import torch from torch import nn from d2l import torch as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 28 * 28 num_outputs = 10 num_hiddens = 256 #注意从数据集train_iter中拿到的小批量X维度是batch_size*28*28的,需要先将其展平成batch_size*784的 net = nn.Sequential(nn.Flatten(), nn.Linear(num_inputs,num_hiddens), nn.ReLU(), nn.Linear(num_hiddens,num_outputs) ) def init_weights(m): if type(m) == nn.Linear: nn.init.normal_(m.weight,std=0.01) return net.apply(init_weights) loss = nn.CrossEntropyLoss(reduction='none') trainer = torch.optim.SGD(net.parameters(),lr=0.1) num_epochs = 5 d2l.train_ch3(net,train_iter,test_iter,loss,num_epochs,trainer)
在这里,
注意:
1. 从数据集train_iter中拿到的小批量X维度是batch_size*28*28的,需要先将其展平成batch_size*784的。
2. nn.Linear()的两个参数分别是线性层(全连接层)的输入神经元的个数(即输入特征数)和输出神经元的个数(即输出特征数).
标签:nn,4.3,28,batch,多层,感知机,num,iter,size From: https://www.cnblogs.com/pkuqcy/p/17516625.html