pytorch
PyTorch 是一个针对深度学习, 并且使用 GPU 和 CPU 来优化的 tensor library (tensor库)
梯度/导数计算
# linear.py
import torch
import numpy as np
x = torch.tensor(3,)
w = torch.tensor(4.,requires_grad=True)
b = torch.tensor(5.,requires_grad=True)
y = w * x + b
y.backward()
print('dy/dx:', x.grad)
print('dy/dw:', w.grad)
print('dy/db:', b.grad)
# print(x,w.item,b)
线性回归
# linear_stastic.py
import numpy as np
import torch
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43],
[91, 88, 64],
[87, 134, 58],
[102, 43, 37],
[69, 96, 70]], dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70],
[81, 101],
[119, 133],
[22, 37],
[103, 119]], dtype='float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
# 随机生成满足shape类型的tensor,并加入grad传导
w = torch.randn(2,3,requires_grad=True)
b = torch.randn(2,requires_grad=True)
# 加载线性函数
def model(x):
return x @ w.t() + b
# print(predict)
# 求均值方差
def mes(t1,t2):
diff = t1 - t2
return torch.sum(diff * diff) / diff.numel()
# epoch = 1000
for i in range(1000):
predict = model(inputs)
loss = mes(predict,targets)
print("loss",i+1,"=",loss)
loss.backward()
with torch.no_grad():
# 1e-5 means learning rate
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
print("w,b",i+1,"=",w,b)
print(targets,'\n',predict)
torch内置函数实现线性回归
# linear_stastic_intorch.py
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
# 加载训练集和测试集
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43],
[91, 88, 64],
[87, 134, 58],
[102, 43, 37],
[69, 96, 70],
[74, 66, 43],
[91, 87, 65],
[88, 134, 59],
[101, 44, 37],
[68, 96, 71],
[73, 66, 44],
[92, 87, 64],
[87, 135, 57],
[103, 43, 36],
[68, 97, 70]],
dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70],
[81, 101],
[119, 133],
[22, 37],
[103, 119],
[57, 69],
[80, 102],
[118, 132],
[21, 38],
[104, 118],
[57, 69],
[82, 100],
[118, 134],
[20, 38],
[102, 120]],
dtype='float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
# 利用内置数据集类型加载数据
train_dataset = TensorDataset(inputs,targets)
# print(train_dataset[0:5])
batch_size = 5
train_dataloader = DataLoader(train_dataset,batch_size,shuffle=True)
'''
for x,y in train_dataloader:
print(x,y)
'''
# 使用nn.linear自动完成初始化,随机生成model.weight和model.bias
model = nn.Linear(3,2)
# print(model.weight,model.bias)
# print(list(model.parameters()))
# 声明均值平方差loss函数
loss_fn = F.mse_loss
loss = loss_fn(model(inputs),targets)
# 声明优化函数(误差梯度下降)
opt = torch.optim.SGD(model.parameters(),lr=1e-5)
# 定义训练过程
def fit(num_epoch,model,loss_fn,opt):
for epoch in range(num_epoch):
for xb,yb in train_dataloader:
# 线性计算
predict = model(xb)
# 损失计算
loss = loss_fn(predict,yb)
# 梯度计算
loss.backward()
梯度下降实现
opt.step()
# 梯度初始化
opt.zero_grad()
# 日志
if epoch % 10 == 0:
print("Epoch:{}/{},Loss:{:.4f}".format(epoch+1,num_epoch,loss.item()))
fit(100,model,loss_fn,opt)
标签:loss,2024.4,torch,pytorch,print,import,model,grad,初上
From: https://www.cnblogs.com/jibinghu/p/18121044