8、对多分类任务中的模型评估隐藏层层数和隐藏单元个数对实验结果的影响
1.确定隐藏层的层数
对于一些很简单的数据集,一层甚至两层隐藏元都已经够了,隐藏层的层数不一定设置的越好,过多的隐藏层可能会导致数据过拟合。对于自然语言处理以及CV领域,则建议增加网络层数。
层数越深,理论上来说模型拟合函数的能力增强,效果会更好,但是实际上更深的层数可能会带来过拟合的问题,同时也会增加训练难度,使模型难以收敛。
因此这里给出的建议是,在使用神经网络时,最好可以参照已有的性能良好的模型。
尝试迁移和微调已有的预训练模型,能取得事半功倍的效果。
2.确定隐藏层中的神经元数量
在隐藏层中使用太少的神经元将导致欠拟合(underfitting)。
相反,使用过多的神经元同样会导致一些问题。首先,隐藏层中的神经元过多可能会导致过拟合(overfitting)。
当神经网络具有过多的节点时,训练集中包含的有限信息量不足以训练隐藏层中的所有神经元,因此就会导致过拟合。即使训练数据包含的信息量足够,隐藏层中过多的神经元会增加训练时间,从而难以达到预期的效果。显然,选择一个合适的隐藏层神经元数量是至关重要的。
通常对于某些数据集,拥有较大的第一层并在其后跟随较小的层将导致更好的性能,因为第一层可以学习很多低阶的特征,这些较低层的特征可以馈入后续层中,提取出较高阶特征。
需要注意的是,与在每一层中添加更多的神经元相比,添加层层数将获得更大的性能提升。因此,不要在一个隐藏层中加入过多的神经元。
按照经验来说,神经元数量可以由以下规则来确定:
还有另一种方法可供参考,神经元数量通常可以由一下几个原则大致确定:
1 隐藏神经元的数量应在输入层的大小和输出层的大小之间。
2 隐藏神经元的数量应为输入层大小的2/3加上输出层大小的2/3。
3 隐藏神经元的数量应小于输入层大小的两倍。
总而言之,隐藏层神经元是最佳数量需要自己通过不断试验来进行微调,建议从一个较小数值比如1到3层和1到100个神经元开始。
如果欠拟合然后慢慢添加更多的层和神经元,如果过拟合就减小层数和神经元。此外,在实际过程中还可以考虑引入Batch Normalization, Dropout, 正则化等降低过拟合的方法。
代码部分
#导入必要的包
import torch
import torch.nn as nn
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader,TensorDataset
#读取数据
mnist_train = datasets.MNIST(root = './data',train = True,download = False,transform =transforms.ToTensor())
mnist_test = datasets.MNIST(root ='./data',train = False,download = False,transform = transforms.ToTensor())
#把数据放在GPU上
train_x = mnist_train.data.cuda().type(torch.float32)
train_y = mnist_train.targets.cuda()
test_x = mnist_test.data.cuda().type(torch.float32)
test_y = mnist_test.targets.cuda()
batch_size = 64
#训练集
train_data = TensorDataset(train_x,train_y)
train_iter = DataLoader(
dataset = train_data,
shuffle = True,
batch_size = batch_size
)
#测试集
test_data = TensorDataset(test_x,test_y)
test_iter = DataLoader(
dataset = test_data,
shuffle = True,
batch_size = batch_size
)
#平滑层
class flatten(nn.Module):
def __init__(self):
super(flatten,self).__init__()
def forward(self,x):
return x.view(x.shape[0],784)
#使用一个隐藏层的模型
class Linear1(nn.Module ):
def __init__(self,num_input,num_hidden,num_output):
super(Linear1,self).__init__()
self.linear1 = nn.Linear(num_input,num_hidden)
self.linear2 = nn.Linear(num_hidden,num_output)
self.flatten = flatten()
self.relu = nn.ReLU()
def forward(self,input):
out = self.flatten(input)
out = self.relu(self.linear1(out))
out = self.linear2(out)
return out
#使用两个隐藏层的模型
class Linear2(nn.Module ):
def __init__(self,num_input,num_hidden1,num_hidden2,num_output):
super(Linear2,self).__init__()
self.linear1 = nn.Linear(num_input,num_hidden1)
self.linear2 = nn.Linear(num_hidden1,num_hidden2)
self.linear3 = nn.Linear(num_hidden2,num_output)
self.flatten = flatten()
self.relu = nn.ReLU()
def forward(self,input):
out = self.flatten(input)
out = self.relu(self.linear1(out))
out = self.relu(self.linear2(out))
out = self.linear3(out)
return out
#使用三个隐藏层的模型
class Linear3(nn.Module ):
def __init__(self,num_input,num_hidden1,num_hidden2,num_hidden3,num_output):
super(Linear3,self).__init__()
self.linear1 = nn.Linear(num_input,num_hidden1)
self.linear2 = nn.Linear(num_hidden1,num_hidden2)
self.linear3 = nn.Linear(num_hidden2,num_hidden3)
self.linear4 = nn.Linear(num_hidden3,num_output)
self.flatten = flatten()
self.relu = nn.ReLU()
def forward(self,input):
out = self.flatten(input)
out = self.relu(self.linear1(out))
out = self.relu(self.linear2(out))
out = self.relu(self.linear3(out))
out = self.linear4(out)
return out
#使用一个隐藏层,隐藏层单元数为256
num_input,num_hidden,num_output = 784,256,10
lr = 0.001
net = Linear1(num_input,num_hidden,num_output)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = net.to(device) # 移动模型到cuda
loss = nn.CrossEntropyLoss(reduction='mean')
# optimizer = torch.optim.Adam(net.parameters(),lr = lr)
optimizer = torch.optim.SGD(net.parameters(),lr = lr)
#定义训练函数
def train(net,train_iter,test_iter,loss,num_epochs,batch_size,optimizer):
train_ls ,test_ls, train_acc,test_acc = [],[],[],[]
for epoch in range(num_epochs):
train_ls_sum,train_acc_sum,n = 0,0,0
for x,y in train_iter:
y_pred = net(x)
l = loss(y_pred,y)
optimizer.zero_grad()
l.backward()
optimizer.step()
train_ls_sum +=l
train_acc_sum += (y_pred.argmax(dim = 1) == y).sum().item()
n += x.shape[0]
train_ls.append(train_ls_sum)
train_acc.append(train_acc_sum/n)
test_ls_sum,test_acc_sum ,n = 0,0,0
for x,y in test_iter:
y_pred = net(x)
l = loss(y_pred,y)
test_ls_sum +=l
test_acc_sum += (y_pred.argmax(dim = 1) == y).sum().item()
n += x.shape[0]
test_ls.append(test_ls_sum)
test_acc.append(test_acc_sum/n)
print('epoch: %d, train loss: %f, test loss: %f , train acc: %f, test acc: %f '
%(epoch+1,train_ls[-1],test_ls[-1],train_acc[-1],test_acc[-1]))
return train_ls,test_ls
#开始训练
num_epochs = 40
train_ls,test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,optimizer)
#结果可视化
for i, v in enumerate(train_ls): train_ls[i] = v.cpu().item()
for i, v in enumerate(test_ls): test_ls[i] = v.cpu().item()
x = np.linspace(0,len(train_ls),len(train_ls))
plt.plot(x,train_ls,label="train_loss",linewidth=1.5)
plt.plot(x,test_ls,label="test_loss",linewidth=1.5)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()
#使用一个隐藏层,隐藏层单元数为10
num_input,num_hidden,num_output = 784,10,10
lr = 0.001
net = Linear1(num_input,num_hidden,num_output)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = net.to(device) # 移动模型到cuda
loss = nn.CrossEntropyLoss(reduction='mean')
# optimizer = torch.optim.Adam(net.parameters(),lr = lr)
optimizer = torch.optim.SGD(net.parameters(),lr = lr)
#开始训练
num_epochs = 40
train_ls,test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,optimizer)
#结果可视化
for i, v in enumerate(train_ls): train_ls[i] = v.cpu().item()
for i, v in enumerate(test_ls): test_ls[i] = v.cpu().item()
x = np.linspace(0,len(train_ls),len(train_ls))
plt.plot(x,train_ls,label="train_loss",linewidth=1.5)
plt.plot(x,test_ls,label="test_loss",linewidth=1.5)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()
#使用两个隐藏层,隐藏层单元数为256、128
num_input,num_hidden1,num_hidden2,num_output = 784,256,128,10
lr = 0.001
net = Linear2(num_input,num_hidden1,num_hidden2,num_output)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = net.to(device) # 移动模型到cuda
loss = nn.CrossEntropyLoss(reduction='mean')
# optimizer = torch.optim.Adam(net.parameters(),lr = lr)
optimizer = torch.optim.SGD(net.parameters(),lr = lr)
#开始训练
num_epochs = 40
train_ls,test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,optimizer)
#结果可视化
for i, v in enumerate(train_ls): train_ls[i] = v.cpu().item()
for i, v in enumerate(test_ls): test_ls[i] = v.cpu().item()
x = np.linspace(0,len(train_ls),len(train_ls))
plt.plot(x,train_ls,label="train_loss",linewidth=1.5)
plt.plot(x,test_ls,label="test_loss",linewidth=1.5)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()
标签:num,层层,self,个数,train,ls,test,隐藏,out
From: https://www.cnblogs.com/cyberbase/p/16821147.html