首页 > 其他分享 >06-DenseNet 图像分类

06-DenseNet 图像分类

时间:2023-01-12 10:11:34浏览次数:48  
标签:06 nn train self rate num 图像 DenseNet size

 

 

 

 

 

 DenseNet代码实现(pytorch):

  1 import torch
  2 import torch.nn as nn
  3 import torchvision
  4 
  5 print("PyTorch Version: ",torch.__version__)
  6 print("Torchvision Version: ",torchvision.__version__)
  7 
  8 __all__ = ['DenseNet121', 'DenseNet169','DenseNet201','DenseNet264']
  9 
 10 def Conv1(in_planes, places, stride=2):
 11     return nn.Sequential(
 12         nn.Conv2d(in_channels=in_planes,out_channels=places,kernel_size=7,stride=stride,padding=3, bias=False),
 13         nn.BatchNorm2d(places),
 14         nn.ReLU(inplace=True),
 15         nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
 16     )
 17 
 18 class _TransitionLayer(nn.Module):
 19     def __init__(self, inplace, plance):
 20         super(_TransitionLayer, self).__init__()
 21         self.transition_layer = nn.Sequential(
 22             nn.BatchNorm2d(inplace),
 23             nn.ReLU(inplace=True),
 24             nn.Conv2d(in_channels=inplace,out_channels=plance,kernel_size=1,stride=1,padding=0,bias=False),
 25             nn.AvgPool2d(kernel_size=2,stride=2),
 26         )
 27 
 28     def forward(self, x):
 29         return self.transition_layer(x)
 30 
 31 
 32 class _DenseLayer(nn.Module):
 33     def __init__(self, inplace, growth_rate, bn_size, drop_rate=0):
 34         super(_DenseLayer, self).__init__()
 35         self.drop_rate = drop_rate
 36         self.dense_layer = nn.Sequential(
 37             nn.BatchNorm2d(inplace),
 38             nn.ReLU(inplace=True),
 39             nn.Conv2d(in_channels=inplace, out_channels=bn_size * growth_rate, kernel_size=1, stride=1, padding=0, bias=False),
 40             nn.BatchNorm2d(bn_size * growth_rate),
 41             nn.ReLU(inplace=True),
 42             nn.Conv2d(in_channels=bn_size * growth_rate, out_channels=growth_rate, kernel_size=3, stride=1, padding=1, bias=False),
 43         )
 44         self.dropout = nn.Dropout(p=self.drop_rate)
 45 
 46     def forward(self, x):
 47         y = self.dense_layer(x)
 48         if self.drop_rate > 0:
 49             y = self.dropout(y)
 50         return torch.cat([x, y], 1)
 51 
 52 
 53 class DenseBlock(nn.Module):
 54     def __init__(self, num_layers, inplances, growth_rate, bn_size , drop_rate=0):
 55         super(DenseBlock, self).__init__()
 56         layers = []
 57         for i in range(num_layers):
 58             layers.append(_DenseLayer(inplances + i * growth_rate, growth_rate, bn_size, drop_rate))
 59         self.layers = nn.Sequential(*layers)
 60 
 61     def forward(self, x):
 62         return self.layers(x)
 63 
 64 
 65 class DenseNet(nn.Module):
 66     def __init__(self, init_channels=64, growth_rate=32, blocks=[6, 12, 24, 16],num_classes=1000):
 67         super(DenseNet, self).__init__()
 68         bn_size = 4
 69         drop_rate = 0
 70         self.conv1 = Conv1(in_planes=3, places=init_channels)
 71 
 72         num_features = init_channels
 73         self.layer1 = DenseBlock(num_layers=blocks[0], inplances=num_features, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate)
 74         num_features = num_features + blocks[0] * growth_rate
 75         self.transition1 = _TransitionLayer(inplace=num_features, plance=num_features // 2)
 76         num_features = num_features // 2
 77         self.layer2 = DenseBlock(num_layers=blocks[1], inplances=num_features, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate)
 78         num_features = num_features + blocks[1] * growth_rate
 79         self.transition2 = _TransitionLayer(inplace=num_features, plance=num_features // 2)
 80         num_features = num_features // 2
 81         self.layer3 = DenseBlock(num_layers=blocks[2], inplances=num_features, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate)
 82         num_features = num_features + blocks[2] * growth_rate
 83         self.transition3 = _TransitionLayer(inplace=num_features, plance=num_features // 2)
 84         num_features = num_features // 2
 85         self.layer4 = DenseBlock(num_layers=blocks[3], inplances=num_features, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate)
 86         num_features = num_features + blocks[3] * growth_rate
 87 
 88         self.avgpool = nn.AvgPool2d(7, stride=1)
 89         self.fc = nn.Linear(num_features, num_classes)
 90 
 91     def forward(self, x):
 92         x = self.conv1(x)
 93 
 94         x = self.layer1(x)
 95         x = self.transition1(x)
 96         x = self.layer2(x)
 97         x = self.transition2(x)
 98         x = self.layer3(x)
 99         x = self.transition3(x)
100         x = self.layer4(x)
101 
102         x = self.avgpool(x)
103         x = x.view(x.size(0), -1)
104         x = self.fc(x)
105         return x
106 
107 def DenseNet121():
108     return DenseNet(init_channels=64, growth_rate=32, blocks=[6, 12, 24, 16], num_classes=10)
109 
110 def DenseNet169():
111     return DenseNet(init_channels=64, growth_rate=32, blocks=[6, 12, 32, 32],num_classes=10)
112 
113 def DenseNet201():
114     return DenseNet(init_channels=64, growth_rate=32, blocks=[6, 12, 48, 32],num_classes=10)
115 
116 def DenseNet264():
117     return DenseNet(init_channels=64, growth_rate=32, blocks=[6, 12, 64, 48],num_classes=10)
118 
119 # if __name__=='__main__':
120 #     # model = torchvision.models.densenet121()
121 #     model = DenseNet121()
122 #     print(model)
123 
124 #     input = torch.randn(1, 3, 224, 224)
125 #     out = model(input)
126 #     print(out.shape)
View Code

ClassfyNet_main.py

  1 import torch
  2 from torch.utils.data import DataLoader
  3 from torch import nn, optim
  4 from torchvision import datasets, transforms
  5 from torchvision.transforms.functional import InterpolationMode
  6 
  7 from matplotlib import pyplot as plt
  8 
  9 
 10 import time
 11 
 12 from Lenet5 import Lenet5_new
 13 from Resnet18 import ResNet18,ResNet18_new
 14 from AlexNet import AlexNet
 15 from Vgg16 import VGGNet16
 16 from Densenet import DenseNet121, DenseNet169, DenseNet201, DenseNet264
 17 
 18 def main():
 19     
 20     print("Load datasets...")
 21     
 22     # transforms.RandomHorizontalFlip(p=0.5)---以0.5的概率对图片做水平横向翻转
 23     # transforms.ToTensor()---shape从(H,W,C)->(C,H,W), 每个像素点从(0-255)映射到(0-1):直接除以255
 24     # transforms.Normalize---先将输入归一化到(0,1),像素点通过"(x-mean)/std",将每个元素分布到(-1,1)
 25     transform_train = transforms.Compose([
 26                         transforms.Resize((224, 224), interpolation=InterpolationMode.BICUBIC),
 27                         # transforms.RandomCrop(32, padding=4),  # 先四周填充0,在吧图像随机裁剪成32*32
 28                         transforms.RandomHorizontalFlip(p=0.5),
 29                         transforms.ToTensor(),
 30                         transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
 31                     ])
 32 
 33     transform_test = transforms.Compose([
 34                         transforms.Resize((224, 224), interpolation=InterpolationMode.BICUBIC),
 35                         # transforms.RandomCrop(32, padding=4),  # 先四周填充0,在吧图像随机裁剪成32*32
 36                         transforms.ToTensor(),
 37                         transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
 38                     ])
 39     
 40     # 内置函数下载数据集
 41     train_dataset = datasets.CIFAR10(root="./data/Cifar10/", train=True, 
 42                                      transform = transform_train,
 43                                      download=True)
 44     test_dataset = datasets.CIFAR10(root = "./data/Cifar10/", 
 45                                     train = False, 
 46                                     transform = transform_test,
 47                                     download=True)
 48     
 49     print(len(train_dataset), len(test_dataset))
 50     
 51     Batch_size = 64
 52     train_loader = DataLoader(train_dataset, batch_size=Batch_size,  shuffle = True, num_workers=4)
 53     test_loader = DataLoader(test_dataset, batch_size = Batch_size, shuffle = False, num_workers=4)
 54     
 55     # 设置CUDA
 56     device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
 57 
 58     # 初始化模型
 59     # 直接更换模型就行,其他无需操作
 60     # model = Lenet5_new().to(device)
 61     # model = ResNet18().to(device)
 62     # model = ResNet18_new().to(device)
 63     # model = VGGNet16().to(device)
 64     
 65     model = DenseNet121().to(device)
 66     # model  = DenseNet169().to(device)
 67     
 68     # model = AlexNet(num_classes=10, init_weights=True).to(device)
 69     print("DenseNet121 train...")
 70       
 71     # 构造损失函数和优化器
 72     criterion = nn.CrossEntropyLoss() # 多分类softmax构造损失
 73     # opt = optim.SGD(model.parameters(), lr=0.01, momentum=0.8, weight_decay=0.001)
 74     opt = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)
 75     
 76     # 动态更新学习率 ------每隔step_size : lr = lr * gamma
 77     schedule = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.6, last_epoch=-1)
 78     
 79     # 开始训练
 80     print("Start Train...")
 81 
 82     epochs = 100
 83    
 84     loss_list = []
 85     train_acc_list =[]
 86     test_acc_list = []
 87     epochs_list = []
 88     
 89     for epoch in range(0, epochs):
 90          
 91         start = time.time()
 92         
 93         model.train()
 94         
 95         running_loss = 0.0
 96         batch_num = 0
 97         
 98         for i, (inputs, labels) in enumerate(train_loader):
 99             
100             inputs, labels = inputs.to(device), labels.to(device)
101             
102             # 将数据送入模型训练
103             outputs = model(inputs)
104             # 计算损失
105             loss = criterion(outputs, labels).to(device)
106             
107             # 重置梯度
108             opt.zero_grad()
109             # 计算梯度,反向传播
110             loss.backward()
111             # 根据反向传播的梯度值优化更新参数
112             opt.step()
113             
114             # 100个batch的 loss 之和
115             running_loss += loss.item()
116             # loss_list.append(loss.item())
117             batch_num+=1
118             
119             
120         epochs_list.append(epoch)
121             
122         # 每一轮结束输出一下当前的学习率 lr
123         lr_1 = opt.param_groups[0]['lr']
124         print("learn_rate:%.15f" % lr_1)
125         schedule.step()
126         
127         end = time.time()
128         print('epoch = %d/100, batch_num = %d, loss = %.6f, time = %.3f' % (epoch+1, batch_num, running_loss/batch_num, end-start))
129         running_loss=0.0    
130         
131         # 每个epoch训练结束,都进行一次测试验证
132         model.eval()
133         train_correct = 0.0
134         train_total = 0
135 
136         test_correct = 0.0
137         test_total = 0
138         
139          # 训练模式不需要反向传播更新梯度
140         with torch.no_grad():
141             
142             # print("=======================train=======================")
143             for inputs, labels in train_loader:
144                 inputs, labels = inputs.to(device), labels.to(device)
145                 outputs = model(inputs)
146 
147                 pred = outputs.argmax(dim=1)  # 返回每一行中最大值元素索引
148                 train_total += inputs.size(0)
149                 train_correct += torch.eq(pred, labels).sum().item()
150           
151             
152             # print("=======================test=======================")
153             for inputs, labels in test_loader:
154                 inputs, labels = inputs.to(device), labels.to(device)
155                 outputs = model(inputs)
156 
157                 pred = outputs.argmax(dim=1)  # 返回每一行中最大值元素索引
158                 test_total += inputs.size(0)
159                 test_correct += torch.eq(pred, labels).sum().item()
160 
161             print("train_total = %d, Accuracy = %.5f %%,  test_total= %d, Accuracy = %.5f %%" %(train_total, 100 * train_correct / train_total, test_total, 100 * test_correct / test_total))    
162 
163             train_acc_list.append(100 * train_correct / train_total)
164             test_acc_list.append(100 * test_correct / test_total)
165 
166         # print("Accuracy of the network on the 10000 test images:%.5f %%" % (100 * test_correct / test_total))
167         # print("===============================================")
168 
169     fig = plt.figure(figsize=(4, 4))
170     
171     plt.plot(epochs_list, train_acc_list, label='train_acc_list')
172     plt.plot(epochs_list, test_acc_list, label='test_acc_list')
173     plt.legend()
174     plt.title("train_test_acc")
175     plt.savefig('DenseNet121_acc_epoch_{:04d}.png'.format(epochs))
176     plt.close()
177     
178 if __name__ == "__main__":
179     
180     main()
View Code

 

图4  DenseNet121_acc_epoch_0100

 

标签:06,nn,train,self,rate,num,图像,DenseNet,size
From: https://www.cnblogs.com/zhaopengpeng/p/17045663.html

相关文章

  • 04 图像像素的读写操作
    04图像像素的读写操作opencv知识点:获取/改变图像的某个像素-Mat::at图像像素-数组遍历图像像素-指针遍历本课所解决的问题:如何获取/改变图像的某个像素?......
  • HTTP请求错误400、401、402、403、404、405、406、407、412、414、500、501、502解析
    HTTP错误400400请求出错由于语法格式有误,服务器无法理解此请求。不作修改,客户程序就无法重复此请求。HTTP错误401401.1未授权:登录失败此错误表明传输给服务器......
  • Codeforces Edu Round 106 Div2
    解题A.DominoonWindowsill这个题给一个2xn的方格,一个行有k1个白块,第二行有k2个白块,那么现在有w个2x1的白块和b个2x1黑块,白对白,黑对黑,问能不能全放下这个就是判断下白......
  • 03 图像对象的创建与赋值
    03图像对象的创建与赋值opencv知识点:Mat类图像复制的3种方法图像属性的获取Mat对象的创建Mat对象的赋值本课所解决的问题:Mat是什么?Mat对象克隆/拷贝与赋值的......
  • 图像识别四大图像库比较:OpenCV/FreeImage/CImg/CxImage
    OpenCV功能十分的强大,而且支持目前先进的图像处理技术,体系十分完善,操作手册很详细,手册首先给大家补计算机视觉的知识,几乎涵盖了近10年内的主流算法,然后将图像格式和矩阵......
  • Python下opencv使用笔记(图像频域滤波与傅里叶变换)
    前面曾经介绍过空间域滤波,空间域滤波就是用各种模板直接与图像进行卷积运算,实现对图像的处理,这种方法直接对图像空间操作,操作简单,所以也是空间域滤波。频域滤波说到底最终......
  • 图像色彩空间转换
    02图像色彩空间转换opencv知识点:色彩空间转换函数-cvtColor()图像保存-imwrite()图像显示-imshow()本课解决的问题:如何对图片进行色彩空间转换?如何保存图像?......
  • 数字图像处理笔记
    本文章是对《数字图像处理》书中知识概念、定理、公式的总结知识,并给出了自己的理解,部分涉及具体应用代码,主要是原理解析和算法总结。学习数字图像处理能让我们更深入理解......
  • WPF使用WriteableBitmap更新图像
     <Windowx:Class="WpfApp2.MainWindow"xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"xmlns:x="http://schemas.microsoft.c......
  • DenseNet 论文解读
    目录摘要网络结构优点代码问题参考资料摘要ResNet的工作表面,只要建立前面层和后面层之间的“短路连接”(shortcut),就能有助于训练过程中梯度的反向传播,从而能训练......