【实验目的】
理解神经网络原理,掌握神经网络前向推理和后向传播方法;
掌握使用pytorch框架训练和推理全连接神经网络模型的编程实现方法。
【实验内容】
使用pytorch框架,设计一个全连接神经网络,实现Mnist手写数字字符集的训练与识别。
1.准备所需安装包并导入
import torch import torchvision from torch.utils.data import DataLoader import matplotlib.pyplot as plt import torch.nn as nn import torch.nn.functional as F import torch.optim as optim
注意:这里的torch和torchvision安装包需要自行安装,可通过控制面板直接输入代码安装
pip install torch;pip install torchvision
2.运行代码
1 n_epochs = 3 2 batch_size_train = 64 3 batch_size_test = 1000 4 learning_rate = 0.01 5 momentum = 0.5 6 log_interval = 10 7 random_seed = 1 8 torch.manual_seed(random_seed) 9 train_loader = torch.utils.data.DataLoader( 10 torchvision.datasets.MNIST('./data/', train=True, download=True, 11 transform=torchvision.transforms.Compose([ 12 torchvision.transforms.ToTensor(), 13 torchvision.transforms.Normalize( 14 (0.1307,), (0.3081,)) 15 ])), 16 batch_size=batch_size_train, shuffle=True) 17 test_loader = torch.utils.data.DataLoader( 18 torchvision.datasets.MNIST('./data/', train=False, download=True, 19 transform=torchvision.transforms.Compose([ 20 torchvision.transforms.ToTensor(), 21 torchvision.transforms.Normalize( 22 (0.1307,), (0.3081,)) 23 ])), 24 batch_size=batch_size_test, shuffle=True) 25 examples = enumerate(test_loader) 26 batch_idx, (example_data, example_targets) = next(examples) 27 print(example_targets) 28 print(example_data.shape)
输出结果:
1 import matplotlib.pyplot as plt 2 fig = plt.figure() 3 for i in range(9): 4 plt.subplot(3,3,i+1) 5 plt.tight_layout() #自动调整坐标轴标签,刻度,标题 6 plt.imshow(example_data[i][0], cmap='gray') #数字转化成图形 7 plt.title("Ground Truth: {}".format(example_targets[i])) 8 #不显示坐标轴 9 plt.xticks([]) 10 plt.yticks([]) 11 plt.show()
1 import torch.nn as nn 2 import torch.nn.functional as F 3 import torch.optim as optim 4 5 class Net(nn.Module): 6 def __init__(self): 7 super(Net, self).__init__() 8 self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 9 self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 10 self.conv2_drop = nn.Dropout2d() 11 self.fc1 = nn.Linear(320, 50) 12 self.fc2 = nn.Linear(50, 10) 13 def forward(self, x): 14 x = F.relu(F.max_pool2d(self.conv1(x), 2)) 15 x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 16 x = x.view(-1, 320) 17 x = F.relu(self.fc1(x)) 18 x = F.dropout(x, training=self.training) 19 x = self.fc2(x) 20 return F.log_softmax(x,dim=0) 21 network = Net() 22 optimizer = optim.SGD(network.parameters(), lr=learning_rate, 23 momentum=momentum) 24 train_losses = [] 25 train_counter = [] 26 test_losses = [] 27 test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)] 28 def train(epoch): 29 network.train() 30 for batch_idx, (data, target) in enumerate(train_loader): 31 optimizer.zero_grad() 32 output = network(data) 33 loss = F.nll_loss(output, target) 34 loss.backward() 35 optimizer.step() 36 if batch_idx % log_interval == 0: 37 print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( 38 epoch, batch_idx * len(data), len(train_loader.dataset), 39 100. * batch_idx / len(train_loader), loss.item())) 40 train_losses.append(loss.item()) 41 train_counter.append( 42 (batch_idx*64) + ((epoch-1)*len(train_loader.dataset))) 43 torch.save(network.state_dict(), './model.pth') 44 torch.save(optimizer.state_dict(), './optimizer.pth') 45 46 train(1)
标签:nn,self,torch,batch,神经网络,train,实验,import,手写 From: https://www.cnblogs.com/cmx3186588507-/p/16933634.html