import matplotlib.pyplot as plt import math import torch from torch import nn from torch.nn import functional as f from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) print(f.one_hot(torch.tensor([0, 2]), len(vocab))) x = torch.arange(10).reshape((2, 5)) print(f.one_hot(x.T, 28).shape) # 初始化模型参数 def get_params(vocab_size, num_hidden, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device) * 0.01 # 隐藏层参数 w_xh = normal((num_inputs, num_hidden)) w_hh = normal((num_hidden, num_hidden)) b_h = torch.zeros(num_hidden, device=device) # 输出层参数 w_hq = normal((num_hidden, num_outputs)) b_hq = torch.zeros(num_outputs, device=device) # 附加梯度 params = [w_xh, w_hh, b_h, w_hq, b_hq] for param in params: param.requires_grad_(True) return params # 构造网络 def init_rnn_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device),) def rnn(inputs, state, params): w_xh, w_hh, b_h, w_hq, b_q = params h, = state outputs = [] # x的形状(批量大小,词表大小) for x in inputs: h = torch.tanh(torch.mm(x, w_xh) + torch.mm(h, w_hh) + b_h) y = torch.mm(h, w_hq) + b_q outputs.append((y)) return torch.cat(outputs, dim=0), (h,) class RNNModelScratch: # @save """从零开始实现的循环神经网络模型""" def __init__(self, vocab_size, num_hidden, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hidden = vocab_size, num_hidden self.params = get_params(vocab_size, num_hidden, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, x, state): x = f.one_hot(x.T, self.vocab_size).type(torch.float32) return self.forward_fn(x, state, self.params) def begin_state(self, batch_size, device): return self.init_state(batch_size, self.num_hidden, device) num_hidden = 512 net = RNNModelScratch(len(vocab), num_hidden, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(x.shape[0], d2l.try_gpu()) y, new_state = net(x.to(d2l.try_gpu()), state) print(y.shape, len(new_state), new_state[0].shape) # 训练预测 def predict_ch8(prefix, num_preds, net, vocab, device): """在prefix后面生成新字符""" state = net.begin_state(batch_size=1, device=device) outputs = [vocab[prefix[0]]] get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1)) for y in prefix[1:]: # 预热期 _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): # 预测num_preds步 y, state = net(get_input(), state) outputs.append(int(y.argmax(dim=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) print(predict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu())) def grad_clipping(net, theta): # @save """裁剪梯度""" if isinstance(net, nn.Module): params = [p for p in net.parameters() if p.requires_grad] else: params = net.params norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm # 训练 # @save def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): """训练网络一个迭代周期(定义见第8章)""" state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) # 训练损失之和,词元数量 for X, Y in train_iter: if state is None or use_random_iter: # 在第一次迭代或使用随机抽样时初始化state state = net.begin_state(batch_size=X.shape[0], device=device) else: if isinstance(net, nn.Module) and not isinstance(state, tuple): # state对于nn.GRU是个张量 state.detach_() else: # state对于nn.LSTM或对于我们从零开始实现的模型是个张量 for s in state: s.detach_() y = Y.T.reshape(-1) X, y = X.to(device), y.to(device) y_hat, state = net(X, state) l = loss(y_hat, y.long()).mean() if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) # 因为已经调用了mean函数 updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() # @save def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): """训练模型(定义见第8章)""" loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) # 初始化 if isinstance(net, nn.Module): updater = torch.optim.SGD(net.parameters(), lr) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) # 训练和预测 for epoch in range(num_epochs): ppl, speed = train_epoch_ch8( net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: print(predict('time traveller')) animator.add(epoch + 1, [ppl]) print(f'困惑度 {ppl:.1f}, {speed:.1f} 词元/秒 {str(device)}') print(predict('time traveller')) print(predict('traveller')) plt.show() num_epochs, lr = 500, 1 train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())
标签:state,--,torch,神经网络,pytorch,num,device,net,size From: https://www.cnblogs.com/o-Sakurajimamai-o/p/17680754.html