【实验目的】
理解神经网络原理,掌握神经网络前向推理和后向传播方法;
掌握使用pytorch框架训练和推理全连接神经网络模型的编程实现方法。
【实验内容】
1.使用pytorch框架,设计一个全连接神经网络,实现Mnist手写数字字符集的训练与识别。
【实验报告要求】
修改神经网络结构,改变层数观察层数对训练和检测时间,准确度等参数的影响;
修改神经网络的学习率,观察对训练和检测效果的影响;
修改神经网络结构,增强或减少神经元的数量,观察对训练的检测效果的影响。
import torch import torchvision from torch.utils.data import DataLoader import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import matplotlib.pyplot as plt n_epochs = 3 batch_size_train = 64 batch_size_test = 1000 learning_rate = 0.01 momentum = 0.5 log_interval = 10 random_seed = 1 torch.manual_seed(random_seed) train_loader = torch.utils.data.DataLoader( torchvision.datasets.MNIST('./data/', train=True, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.1307,), (0.3081,)) ])), batch_size=batch_size_train, shuffle=True) test_loader = torch.utils.data.DataLoader( torchvision.datasets.MNIST('./data/', train=False, download=True, transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize( (0.1307,), (0.3081,)) ])), batch_size=batch_size_test, shuffle=True) examples = enumerate(test_loader) batch_idx, (example_data, example_targets) = next(examples) # print(example_targets) # print(example_data.shape) fig = plt.figure() for i in range(6): plt.subplot(2, 3, i + 1) plt.tight_layout() plt.imshow(example_data[i][0], cmap='gray', interpolation='none') plt.title("Ground Truth: {}".format(example_targets[i])) plt.xticks([]) plt.yticks([]) plt.show() class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) network = Net() optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum) train_losses = [] train_counter = [] test_losses = [] test_counter = [i * len(train_loader.dataset) for i in range(n_epochs + 1)] def train(epoch): network.train() for batch_idx, (data, target) in enumerate(train_loader): optimizer.zero_grad() output = network(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) train_losses.append(loss.item()) train_counter.append((batch_idx * 64) + ((epoch - 1) * len(train_loader.dataset))) torch.save(network.state_dict(), './model.pth') torch.save(optimizer.state_dict(), './optimizer.pth') def test(): network.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: output = network(data) test_loss += F.nll_loss(output, target, reduction='sum').item() pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).sum() test_loss /= len(test_loader.dataset) test_losses.append(test_loss) print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) train(1) test() # 不加这个,后面画图就会报错:x and y must be the same size for epoch in range(1, n_epochs + 1): train(epoch) test() fig = plt.figure() plt.plot(train_counter, train_losses, color='blue') plt.scatter(test_counter, test_losses, color='red') plt.legend(['Train Loss', 'Test Loss'], loc='upper right') plt.xlabel('number of training examples seen') plt.ylabel('negative log likelihood loss') examples = enumerate(test_loader) batch_idx, (example_data, example_targets) = next(examples) with torch.no_grad(): output = network(example_data) fig = plt.figure() for i in range(6): plt.subplot(2, 3, i + 1) plt.tight_layout() plt.imshow(example_data[i][0], cmap='gray', interpolation='none') plt.title("Prediction: {}".format(output.data.max(1, keepdim=True)[1][i].item())) plt.xticks([]) plt.yticks([]) plt.show() # ----------------------------------------------------------- # continued_network = Net() continued_optimizer = optim.SGD(network.parameters(), lr=learning_rate, momentum=momentum) network_state_dict = torch.load('model.pth') continued_network.load_state_dict(network_state_dict) optimizer_state_dict = torch.load('optimizer.pth') continued_optimizer.load_state_dict(optimizer_state_dict) # 注意不要注释前面的“for epoch in range(1, n_epochs + 1):”部分, # 不然报错:x and y must be the same size # 为什么是“4”开始呢,因为n_epochs=3,上面用了[1, n_epochs + 1) for i in range(4, 9): test_counter.append(i*len(train_loader.dataset)) train(i) test() fig = plt.figure() plt.plot(train_counter, train_losses, color='blue') plt.scatter(test_counter, test_losses, color='red') plt.legend(['Train Loss', 'Test Loss'], loc='upper right') plt.xlabel('number of training examples seen') plt.ylabel('negative log likelihood loss') plt.show()
标签:plt,torch,loader,神经网络,train,实验,test,手写,data From: https://www.cnblogs.com/xiaogaoa/p/16933664.html