本学习笔记来源于B站:深度学习—迁移学习项目实战自定义数据集宝可梦精灵。
在本预训练-微调代码中,重点要学习的内容包括:加载官方提供的经典网络架构resnet18和已经训练好的模型,对最后一层全连接层进行修改,改为适合自己任务的网络架构。在此基础上,训练最后一层全连接层,并保存最优模型。
与上一篇 预训练-微调的学习笔记 不同,本视频教程中提供了另一种分割经典网络架构与预训练模型的方法,在代码中主要改动了两个位置。
改动位置一:
from torchvision.models import resnet18 # 加载官网上训练好的 resnet18
改动位置二:
trained_model = resnet18(pretrained=True)
# 通过 pretrained=True 指定该模型使用在大规模数据集上(如 ImageNet)预训练的权重。
model = nn.Sequential(*list(trained_model.children())[:-1], # [b, 512, 1, 1]
# trained_model.children() 返回一个包含该模型所有子模块的迭代器。使用 list(...) 将其转换为列表。
# [:-1] 表示切片操作,将列表中的最后一个子模块(通常是分类层)去掉,因此取出除了最后一层以外的所有层(即 1-17层)。
nn.Flatten(), # 添加了一个 Flatten 层 # [b, 512, 1, 1] => [b, 512]
nn.Linear(512, 5) # 添加了一个全连接层(线性层)
).to(device)
主程序完整代码如下:
import torch
from torch import optim, nn
import visdom
from torch.utils.data import DataLoader
from pokemon import Pokemon
############################################ 改动位置一
from torchvision.models import resnet18 # 加载官网上训练好的 resnet18
############################################
batchsz = 32
lr = 1e-3
epochs = 10
device = torch.device('cuda')
torch.manual_seed(1234)
train_db = Pokemon('pokemon', 224, mode='train')
val_db = Pokemon('pokemon', 224, mode='val')
test_db = Pokemon('pokemon', 224, mode='test')
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True,
num_workers=2)
val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)
test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)
viz = visdom.Visdom()
def evalute(model, loader):
model.eval()
correct = 0
total = len(loader.dataset)
for x,y in loader:
x,y = x.to(device), y.to(device)
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def main():
############################################ 改动位置二
trained_model = resnet18(pretrained=True)
# 通过 pretrained=True 指定该模型使用在大规模数据集上(如 ImageNet)预训练的权重。
model = nn.Sequential(*list(trained_model.children())[:-1], # [b, 512, 1, 1]
# trained_model.children() 返回一个包含该模型所有子模块的迭代器。使用 list(...) 将其转换为列表。
# [:-1] 表示切片操作,将列表中的最后一个子模块(通常是分类层)去掉,因此取出除了最后一层以外的所有层(即 1-17层)。
nn.Flatten(), # 添加了一个 Flatten 层 # [b, 512, 1, 1] => [b, 512]
nn.Linear(512, 5) # 添加了一个全连接层(线性层)
).to(device)
############################################
print(model)
optimizer = optim.Adam(model.parameters(), lr=lr)
criteon = nn.CrossEntropyLoss()
best_acc, best_epoch = 0, 0
global_step = 0
viz.line([0], [-1], win='loss', opts=dict(title='loss'))
viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
for epoch in range(epochs):
for step, (x,y) in enumerate(train_loader):
x, y = x.to(device), y.to(device)
model.train()
logits = model(x)
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
viz.line([loss.item()], [global_step], win='loss', update='append')
global_step += 1
if epoch % 1 == 0:
val_acc = evalute(model, val_loader)
if val_acc> best_acc:
best_epoch = epoch
best_acc = val_acc
torch.save(model.state_dict(), 'best.mdl')
viz.line([val_acc], [global_step], win='val_acc', update='append')
print('best acc:', best_acc, 'best epoch:', best_epoch)
model.load_state_dict(torch.load('best.mdl'))
print('loaded from ckpt!')
test_acc = evalute(model, test_loader)
print('test acc:', test_acc)
if __name__ == '__main__':
main()
在主程序中,调用了pokemon,pokemon.py代码如下:
import visdom
import time
import torch
import os, glob
import random, csv
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
class Pokemon(Dataset):
def __init__(self, root, resize, mode):
super(Pokemon, self).__init__()
self.root = root
self.resize = resize
self.name2label = {}
for name in sorted(os.listdir(os.path.join(root))):
if not os.path.isdir(os.path.join(root, name)):
continue
self.name2label[name] = len(self.name2label.keys())
self.images, self.labels = self.load_csv('images.csv')
if mode=='train': # 60%
self.images = self.images[:int(0.6*len(self.images))]
self.labels = self.labels[:int(0.6*len(self.labels))]
elif mode=='val': # 20% = 60%->80%
self.images = self.images[int(0.6*len(self.images)):int(0.8*len(self.images))]
self.labels = self.labels[int(0.6*len(self.labels)):int(0.8*len(self.labels))]
else: # 20% = 80%->100%
self.images = self.images[int(0.8*len(self.images)):]
self.labels = self.labels[int(0.8*len(self.labels)):]
def load_csv(self, filename):
if not os.path.exists(os.path.join(self.root, filename)):
images = []
for name in self.name2label.keys():
images += glob.glob(os.path.join(self.root, name, '*.png'))
images += glob.glob(os.path.join(self.root, name, '*.jpg'))
images += glob.glob(os.path.join(self.root, name, '*.jpeg'))
print(len(images), images)
random.shuffle(images)
with open(os.path.join(self.root, filename), mode='w', newline='') as f:
writer = csv.writer(f)
for img in images:
name = img.split(os.sep)[-2]
label = self.name2label[name]
writer.writerow([img, label])
print('writen into csv file:', filename)
# read from csv file
images, labels = [], []
with open(os.path.join(self.root, filename)) as f:
reader = csv.reader(f)
for row in reader:
img, label = row
label = int(label)
images.append(img)
labels.append(label)
assert len(images) == len(labels)
return images, labels
def __len__(self):
return len(self.images)
def denormalize(self, x_hat):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
mean = torch.tensor(mean).unsqueeze(1).unsqueeze(1)
std = torch.tensor(std).unsqueeze(1).unsqueeze(1)
x = x_hat * std + mean
return x
def __getitem__(self, idx):
img, label = self.images[idx], self.labels[idx]
tf = transforms.Compose([
lambda x:Image.open(x).convert('RGB'),
transforms.Resize((int(self.resize*1.25), int(self.resize*1.25))),
transforms.RandomRotation(15),
transforms.CenterCrop(self.resize),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
img = tf(img)
label = torch.tensor(label)
return img, label
def main():
viz = visdom.Visdom()
db = Pokemon('pokemon', 64, 'train')
x,y = next(iter(db))
print('sample:', x.shape, y.shape, y)
viz.image(db.denormalize(x), win='sample_x', opts=dict(title='sample_x'))
loader = DataLoader(db, batch_size=32, shuffle=True, num_workers=2)
for x,y in loader:
viz.images(db.denormalize(x), nrow=8, win='batch', opts=dict(title='batch'))
viz.text(str(y.numpy()), win='label', opts=dict(title='batch-y'))
time.sleep(10)
if __name__ == '__main__':
main()
运行结果如下:
标签:__,定义数据,可梦,self,labels,集宝,images,import,model From: https://blog.csdn.net/zly19980718/article/details/143759033