2、手动实现前馈神经网络解决二分类问题
#导入必要的包
import numpy as np
import torch
from torch import nn
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from torch.utils.data import TensorDataset,DataLoader
#创建数据集
num_example,num_input = 10000,200
x1 = torch.normal(-2,1,size = (num_example,num_input)) #均值为-2,标准差为1
y1 = torch.ones((num_example,1))
x2 = torch.normal(2,1,size = (num_example,num_input)) #均值为2,标准差为1
y2 = torch.zeros((num_example,1))
x = torch.cat((x1,x2),dim = 0)
y = torch.cat((y1,y2),dim = 0)
#划分训练集和测试集 训练集:测试集 = 7:3
train_x,test_x,train_y,test_y =train_test_split(x,y,shuffle = True,stratify = y,random_state= 1,test_size=0.3)
print(train_x.shape)
print(test_x.shape)
#读取数据
batch_size = 50
#训练集
train_dataset = TensorDataset(train_x,train_y)
train_iter = DataLoader(
dataset = train_dataset,
shuffle = True,
batch_size = batch_size,
num_workers = 0
)
#测试集
test_dataset = TensorDataset(test_x,test_y)
test_iter = DataLoader(
dataset = test_dataset,
shuffle = True,
batch_size = batch_size,
num_workers = 0
)
#初始化参数
w=torch.tensor(np.random.normal(0,0.01,(200,1)),dtype=torch.float32)
b=torch.zeros(1,dtype=torch.float32)
#对参数梯度进行追踪
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
#实现逻辑回归
def logits(X, w, b):
y = torch.mm(X, w) + b
return 1/(1+torch.pow(np.e,-y)) #加一个sigmoid层,使数据归一到(0,1)里面
#查看参数情况
#print(np.unique(y))
#np.sum([ele.data==1 for ele in train_y.flatten() ])
#np.sum([ele.data==0 for ele in train_y.flatten() ])
#隐藏层单元个数256个
num_hidden,num_output = 256,1
torch.set_default_tensor_type = torch.float32
# w1 = torch.tensor(torch.normal(0,0.001,size = (num_hidden,num_input))).type(torch.float32)
w1 = torch.tensor(torch.normal(0,0.001,size = (num_hidden,num_input))).type(torch.FloatTensor)
b1 = torch.ones(1)
# b1 = torch.ones(1,dtype = torch.float32)
print(w1.dtype)
print(b1.dtype)
w2 = torch.tensor(torch.normal(0,0.001,size = (num_output,num_hidden)),dtype = torch.float32)
b2 = torch.ones(1)
params = [w1,w2,b1,b2]
for param in params:
param.requires_grad_(requires_grad=True)
#激活函数
def ReLU(x):
return torch.max(x,other = torch.tensor(0.0))
loss = nn.BCEWithLogitsLoss()
# loss = nn.BCELoss()
#定义模型
def net(x):
H1 = ReLU(torch.matmul(x,w1.t())+b1)
H2 = torch.matmul(H1,w2.t())+b2
return H2
#优化器
def SGD(params,lr):
param.data -= lr*param.grad/batch_size
#定义训练函数
def train(net ,train_iter,test_iter,lr,num_epochs,params):
train_l,test_l = [],[]
for epoch in range(num_epochs):
train_l_sum,n = 0,0
for x,y in train_iter:
n += y.shape[0]
y_pred = net(x)
l = loss(y_pred,y)
train_l_sum += l.item()
if params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
SGD(params,lr)
train_l.append(train_l_sum/n)
test_l_sum,n = 0,0
for x,y in test_iter:
n += y.shape[0]
y_pred = net(x)
l = loss(y_pred,y)
test_l_sum += l.item()
test_l.append(test_l_sum/n)
print('epoch %d, train_loss %.6f,test_loss %.6f'%(epoch+1, train_l[epoch],test_l[epoch]))
return train_l,test_l
lr, num_epochs = 0.01,100 #学习率和训练次数
train_loss,test_loss =train(net ,train_iter,test_iter,lr,num_epochs,params) #开始训练
#结果可视化
x = np.linspace(0,len(train_loss),len(train_loss))
plt.plot(x,train_loss,label="train_loss",linewidth=1.5)
plt.plot(x,test_loss,label="test_loss",linewidth=1.5)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.show()
标签:loss,torch,num,手动,前馈,神经网络,train,test,size
From: https://www.cnblogs.com/cyberbase/p/16821139.html