首页 > 其他分享 >《PyTorch 深度学习实践 》-刘二大人 第十三讲

《PyTorch 深度学习实践 》-刘二大人 第十三讲

时间:2022-10-24 22:37:28浏览次数:49  
标签:第十三 seq countries self torch 刘二 PyTorch hidden size

同样的参数,CPU跑15min,GPU 2min43s

  1 #根据地名分辨国家
  2 import math
  3 import time
  4 import torch
  5 # 绘图
  6 import matplotlib.pyplot as plt
  7 import numpy as np
  8 # 读取数据
  9 import gzip
 10 import csv
 11 
 12 from torch.nn.utils.rnn import pack_padded_sequence
 13 from torch.utils.data import Dataset, DataLoader
 14 import os
 15 os.environ['KMP_DUPLICATE_LIB_OK']='True'
 16 
 17 # ------------0 parameters-------------#
 18 HIDDEN_SIZE = 100
 19 BATCH_SIZE = 256
 20 N_LAYER = 2
 21 N_EPOCHS = 100
 22 N_CHARS = 128  # 字典长度
 23 USE_GPU = True  # 不用GPU
 24 
 25 # ---------------------1 Preparing Data and DataLoad-------------------------------#
 26 class NameDataset(Dataset):
 27     def __init__(self, is_train_set=True):
 28         filename = 'names_train.csv.gz' if is_train_set else 'names_test.csv.gz'
 29 
 30         # 访问数据集,使用gzip和csv包
 31         with gzip.open(filename, 'rt') as f:
 32             reader = csv.reader(f)
 33             rows = list(reader)  # 按行读取(names,countries)
 34 
 35         self.names = [row[0] for row in rows]
 36         self.len = len(self.names)
 37         self.countries = [row[1] for row in rows]
 38         self.country_list = list(sorted(set(self.countries)))  # set:去除重复,sorted:排序,list:转换为列表
 39         self.country_dict = self.getCountryDict()
 40         self.country_num = len(self.country_list)
 41 
 42     def __getitem__(self, index):
 43         return self.names[index], self.country_dict[self.countries[index]]
 44         # 取出的names是字符串,country_dict是索引
 45 
 46     def __len__(self):
 47         return self.len
 48 
 49     def getCountryDict(self):  # Convert list into dictionary.
 50         country_dict = dict()
 51         for idx, country_name in enumerate(self.country_list, 0):
 52             country_dict[country_name] = idx
 53         return country_dict
 54 
 55     def idx2country(self, index):  # Return country name giving index.
 56         return self.country_list[index]
 57 
 58     def getCountriesNum(self):  # Return the number of countries.
 59         return self.country_num
 60 
 61 
 62 # DataLoade
 63 trainset = NameDataset(is_train_set=True)
 64 trainloader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)
 65 testset = NameDataset(is_train_set=False)
 66 testloader = DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False)
 67 N_COUNTRY = trainset.getCountriesNum()
 68 
 69 
 70 # ------------------------------Design  Model-----------------------------------#
 71 def create_tensor(tensor):
 72     if USE_GPU:
 73         device = torch.device("cuda:0")
 74         tensor = tensor.to(device)
 75     return tensor
 76 
 77 
 78 class RNNClassifier(torch.nn.Module):
 79     def __init__(self, input_size, hidden_size, output_size, n_layers=1, bidirectional=True):
 80         super(RNNClassifier, self).__init__()
 81         self.hidden_size = hidden_size
 82         self.n_layers = n_layers
 83         self.n_directions = 2 if bidirectional else 1  # bidirectional,双向循环神经网络
 84         self.embedding = torch.nn.Embedding(input_size, hidden_size)
 85         self.gru = torch.nn.GRU(hidden_size, hidden_size, n_layers, bidirectional=bidirectional)
 86         self.fc = torch.nn.Linear(hidden_size * self.n_directions, output_size)
 87 
 88     def _init_hidden(self, batch_size):
 89         hidden = torch.zeros(self.n_layers * self.n_directions, batch_size, self.hidden_size)
 90         return create_tensor(hidden)
 91 
 92     def forward(self, input, seq_lengths):
 93         input = input.t()  # 转置 t -> transpose: input shape : B x S -> S x B
 94         batch_size = input.size(1)
 95 
 96         hidden = self._init_hidden(batch_size)  # h0
 97         embedding = self.embedding(input)  # (seqLen,batchSize,hiddenSize)
 98 
 99         # PackedSquence:把为0的填充量去除,把每个样本的长度记录下来,按长度排序后拼接在一起
100         gru_input = pack_padded_sequence(embedding, seq_lengths)
101 
102         output, hidden = self.gru(gru_input, hidden)
103         if self.n_directions == 2:  # 双向循环神经网络有两个hidden
104             hidden_cat = torch.cat([hidden[-1], hidden[-2]], dim=1)
105         else:
106             hidden_cat = hidden[-1]
107 
108         fc_output = self.fc(hidden_cat)
109         return fc_output
110 
111 
112 classifier = RNNClassifier(N_CHARS, HIDDEN_SIZE, N_COUNTRY, N_LAYER)
113 
114 #----------------------3 Construct Loss and Optimizer------------------------------------#
115 criterion = torch.nn.CrossEntropyLoss()
116 optimizer = torch.optim.Adam(classifier.parameters(), lr=0.001)
117 
118 
119 #-----------------------------------4 Train and Test----------------------------------------------------#
120 def time_since(since):
121     s = time.time() - since
122     m = math.floor(s / 60)
123     s -= m * 60
124     return '%dm %ds' % (m, s)
125 
126 
127 def name2list(name):
128     arr = [ord(c) for c in name]  # 返回对应字符的 ASCII 数值
129     return arr, len(arr)  # 返回元组,列表本身和列表长度
130 
131 
132 def make_tensors(names, countries):
133     sequences_and_lengths = [name2list(name) for name in names]
134     name_sequences = [sl[0] for sl in sequences_and_lengths]
135     seq_lengths = torch.LongTensor([sl[1] for sl in sequences_and_lengths])
136     countries = countries.long()  # countries:国家索引
137 
138     # make tensor of name, BatchSize x SeqLen
139     seq_tensor = torch.zeros(len(name_sequences), seq_lengths.max()).long()
140     for idx, (seq, seq_len) in enumerate(zip(name_sequences, seq_lengths), 0):
141         seq_tensor[idx, :seq_len] = torch.LongTensor(seq)
142     # 先制作一个全0的tensor,然后将名字贴在上面
143 
144     # 排序,sort by length to use pack_padded_sequence
145     seq_lengths, perm_idx = seq_lengths.sort(dim=0, descending=True)
146     # sort返回两个值,seq_lengths:排完序后的序列(未padding),perm_idx:排完序后对应元素的索引
147     seq_tensor = seq_tensor[perm_idx]  # 排序(已padding)
148     countries = countries[perm_idx]  # 排序(标签)
149     return create_tensor(seq_tensor), create_tensor(seq_lengths), create_tensor(countries)
150 
151 
152 def trainModel():
153     total_loss = 0
154     for i, (names, countries) in enumerate(trainloader, 1):
155         inputs, seq_lengths, target = make_tensors(names, countries)  # make_tensors
156         output = classifier(inputs, seq_lengths.to('cpu'))
157         loss = criterion(output, target)
158         optimizer.zero_grad()
159         loss.backward()
160         optimizer.step()
161 
162         total_loss += loss.item()
163         if i % 10 == 0:
164             print(f'[{time_since(start)}] Epoch {epoch} ', end='')
165             print(f'[{i * len(inputs)}/{len(trainset)}] ', end='')
166             print(f'loss={total_loss / (i * len(inputs))}')
167     return total_loss
168 
169 #test module
170 def hehe():
171     correct = 0
172     total = len(testset)
173     print("evaluating trained model ...")
174     with torch.no_grad():
175         for i, (names, countries) in enumerate(testloader, 1):
176             inputs, seq_lengths, target = make_tensors(names, countries)  # make_tensors
177             output = classifier(inputs, seq_lengths.to('cpu'))
178             pred = output.max(dim=1, keepdim=True)[1]
179             correct += pred.eq(target.view_as(pred)).sum().item()
180         percent = '%.2f' % (100 * correct / total)
181         print(f'Test set: Accuracy {correct}/{total} {percent}%')
182     return correct / total
183 
184 
185 if __name__ == '__main__':
186     if USE_GPU:
187         device = torch.device("cuda:0")
188         classifier.to(device)
189     start = time.time()
190     print("Training for %d epochs..." % N_EPOCHS)
191     acc_list = []
192     # Train cycle,In every epoch, training and testing the model once.
193     for epoch in range(1, N_EPOCHS + 1):
194         trainModel()
195         acc = hehe()
196         acc_list.append(acc)
197 
198     # 绘图
199     epoch = np.arange(1, len(acc_list) + 1, 1)
200     acc_list = np.array(acc_list)
201     plt.plot(epoch, acc_list)
202     plt.xlabel('Epoch')
203     plt.ylabel('Accuracy')
204     plt.grid()
205     plt.show()

evaluating trained model ...
Test set: Accuracy 5599/6700 83.57%
[2m 41s] Epoch 100 [2560/13374] loss=0.00011349248889018782
[2m 42s] Epoch 100 [5120/13374] loss=0.00012008407356915996
[2m 42s] Epoch 100 [7680/13374] loss=0.0001346439957463493
[2m 42s] Epoch 100 [10240/13374] loss=0.00013780106764897936
[2m 43s] Epoch 100 [12800/13374] loss=0.00014130977695458568
evaluating trained model ...
Test set: Accuracy 5607/6700 83.69%

 

标签:第十三,seq,countries,self,torch,刘二,PyTorch,hidden,size
From: https://www.cnblogs.com/zhouyeqin/p/16823279.html

相关文章