import torch from matplotlib import pyplot as plt # 损失率: learn_rate = 0.1 # 训练数据 x = torch.rand([500,1]) y = 3*x + 0.8 # 参数 w = torch.rand([1,1],requires_grad=True) b = torch.tensor(0,requires_grad=True,dtype=torch.float32) for i in range(500): # 预测值 y_predict = torch.matmul(x,w) + b # 算出标准差 loss = (y-y_predict).pow(2).mean() # 调用backward()函数之前都要将梯度清零,因为如果梯度不清零,pytorch中会将上次计算的梯度和本次计算的梯度累加 if w.grad is not None: w.grad.data.zero_() if b.grad is not None: b.grad.data.zero_() # 反向传播,更新参数 loss.backward() w.data = w.data- learn_rate * w.grad b.data = b.data - learn_rate * b.grad # 画图 plt.figure(figsize=(20,8)) plt.scatter(x.numpy().reshape(-1),y.numpy().reshape(-1)) plt.plot(x.numpy().reshape(-1),y_predict.detach().numpy().reshape(-1),c="red")
标签:reshape,plt,numpy,grad,torch,pytorch,深度,线性,data From: https://www.cnblogs.com/navysummer/p/16755592.html