手写线性回归
教程地址 未解决的问题:plt.show()会阻塞
import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random
from tqdm import tqdm
from multiprocessing import Pool
# generate dataset
true_w = torch.Tensor([2, -3.4])
true_w = torch.unsqueeze(true_w, 1)
true_b = torch.Tensor([4.2])
feature_num = 2
data_num = 10000
features = torch.randn(data_num, feature_num, dtype=torch.float32)
true_labels = features.mm(true_w) + true_b
# add Gaussian noise
noise_labels = true_labels + torch.tensor(np.random.normal(0, 0.01, size=true_labels.size()), dtype=torch.float32)
# visualize
def use_svg_display():
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
use_svg_display()
plt.rcParams['figure.figsize'] = figsize
# 手写数据迭代器
def data_iter(batch_size, features, labels):
data_num = len(features)
indices = list(range(data_num))
random.shuffle(indices) # 将indices的列表顺序打乱
for i in range(0, data_num, batch_size): # 从0迭代到data_num-1
# 构建long类型的张量,and防止最后一次不足一个batch
j = torch.LongTensor(indices[i: min(i + batch_size, data_num)])
# yield相当于return,但在哪里跌倒就在哪里站起来,并且节约空间
# 0代表按行索引,j代表索引哪些行
yield features.index_select(0, j), labels.index_select(0, j)
# initialize
w = torch.tensor(np.random.normal(0, 0.01, (feature_num, 1)), dtype=torch.float32)
b = torch.zeros(1, dtype=torch.float32)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
# 线性回归计算
def linreg(X, w, b): # 本函数已保存在d2lzh_pytorch包中方便以后使用
return torch.mm(X, w) + b
def squared_loss(y_hat, y): # 本函数已保存在d2lzh_pytorch包中方便以后使用
# 注意这里返回的是向量, 另外, pytorch里的MSELoss并没有除以 2
# size不需要用到
# return (y_hat - y.view(y_hat.size())) ** 2 / 2
return (y_hat - y.data) ** 2 / 2
def sgd(params, lr, batch_size): # 本函数已保存在d2lzh_pytorch包中方便以后使用
for param in params:
# 注意这里更改param时用的param.data,这样不会被追踪自动微分
# 因为微分是根据y的值用反向传播算出来的,如果直接改变x的值,会把改变的范围也算进微分里面
# 或者会导致梯度算出来是None,因为x不再是叶子节点
param.data -= lr * param.grad / batch_size
# configuration
batch_size = 100
lr = 0.03
num_epochs = 10
# 函数句柄
net = linreg
loss = squared_loss
for epoch in tqdm(range(num_epochs)): # 训练模型一共需要num_epochs个迭代周期
# 在每一个迭代周期中,会使用训练数据集中所有样本一次(假设样本数能够被批量大小整除)。X
# 和y分别是小批量样本的特征和标签
for X, y in data_iter(batch_size, features, noise_labels):
l = loss(net(X, w, b), y).sum() # l是有关小批量X和y的损失
l.backward() # 小批量的损失对模型参数求梯度
sgd([w, b], lr, batch_size) # 使用小批量随机梯度下降迭代模型参数
# 不要忘了梯度清零
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), noise_labels)
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
# 输出运行结果
print(true_w, '\n', w)
print(true_b, '\n', b)
# 画图的放在前面会阻塞后面的进程
set_figsize()
plt.scatter(features[:, 1].numpy(), noise_labels.numpy(), 1, c='r')
plt.scatter(features[:, 1].numpy(), true_labels.numpy(), 1, c='g')
plt.ioff()
plt.show()
plt.ioff()
运行结果
调用nn.linear自己构造类
import torch
import torch.utils.data as Data
import torch.nn as nn
import numpy as np
# generate data
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(1, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] + features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
batch_size = 10
# 数据和标签组合
dataset = Data.TensorDataset(features, labels)
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
class LinearNet(nn.Module):
def __init__(self, n_feature):
## python2写法,class,self
# super(LinearNet, self).__init__()
# python3也可以这么写
super().__init__()
self.linear = nn.Linear(in_features=n_feature, out_features=1, bias=True)
def forward(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
print(net)
nn.init.normal_(net.linear.weight, mean=0, std=0.01)
nn.init.constant_(net.linear.bias, val=0)
loss = nn.MSELoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.03)
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
# output是(10,1),所以也要改y
l = loss(output, y.view(-1, 1))
optimizer.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss:%f' % (epoch, l.item()))
结果:
用sequential构造网络
import torch
import torch.utils.data as Data
import torch.nn as nn
import numpy as np
# generate data
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(1, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] + features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
batch_size = 10
# 数据和标签组合
dataset = Data.TensorDataset(features, labels)
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
net = nn.Sequential()
net.add_module('linear', nn.Linear(num_inputs, 1))
print(net)
# 打印出第0层
print(net[0])
# 打印所有可学习参数
for param in net.parameters():
print(param)
nn.init.normal_(net[0].weight, mean=0, std=0.01)
nn.init.constant_(net[0].bias, val=0)
loss = nn.MSELoss()
# 不同的网络设置不同的学习率
optimizer = torch.optim.SGD([
{'params': net.linear.parameters()}, # lr=0.01
], lr=0.03)
print(optimizer)
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
# output是(10,1),所以也要改y
l = loss(output, y.view(-1, 1))
optimizer.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss:%f' % (epoch, l.item()))
运行结果: