首页 > 其他分享 >基于resnet50的糖尿病视网膜分类-messidor数据集

基于resnet50的糖尿病视网膜分类-messidor数据集

时间:2024-04-21 20:22:42浏览次数:23  
标签:resnet50 messidor torch train transforms 视网膜 device import model

  1 import os
  2 import torch
  3 from torchvision import datasets, transforms
  4 from torch.utils.data import DataLoader, SubsetRandomSampler
  5 
  6 import torch
  7 from torch import nn
  8 from torch.utils.data import DataLoader,Dataset
  9 import torch.nn.functional as F
 10 from PIL import Image
 11 import tifffile
 12 import torchvision.transforms as transforms
 13 from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
 14 import csv
 15 import numpy as np
 16 import cv2
 17 if torch.cuda.is_available():
 18     torch.cuda.set_device(0)  # 设置默认使用第一个GPU设备
 19 # 检查当前默认设备
 20 # default_device = torch.cuda.current_device()
 21 device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
 22 if device != -1:
 23     print(f"默认设备设置成功,当前默认设备为 CUDA 设备 {device}")
 24 else:
 25     print("默认设备设置失败,当前默认设备为 CPU")
 26 
 27 import torch
 28 import torchvision.models as models
 29 import torch.nn as nn
 30 import torch.optim as optim
 31 from torchvision import transforms
 32 from torch.utils.data import DataLoader
 33 from torchvision.datasets import ImageFolder
 34 # 定义ResNet50模型
 35 resnet50 = models.resnet50()
 36 # 加载本地下载好的权重文件
 37 checkpoint = torch.load(r'/home/guoliang/CV/LJQ/0/RETFound_MAE-main/models/resnet50-19c8e357.pth')
 38 # 加载权重文件到模型中
 39 resnet50.load_state_dict(checkpoint)
 40 # 冻结除最后一层之外的所有层参数
 41 # 如果需要微调,可以设置requires_grad=True来允许参数更新
 42 for param in resnet50.parameters():
 43     param.requires_grad = True
 44 # 替换或添加最后一层全连接层以适应你的任务
 45 num_classes = 4  # 假设需要分类成4类
 46 resnet50.fc = nn.Linear(resnet50.fc.in_features, num_classes)
 47 print(resnet50)
 48 # 其余代码与前述示例相同
 49 from timm.data import create_transform
 50 from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
 51 mean = IMAGENET_DEFAULT_MEAN
 52 std = IMAGENET_DEFAULT_STD
 53 
 54 transform1 = transforms.Compose([
 55     transforms.Resize(400) ,
 56     #transforms.CenterCrop(300)  ,# 中心裁剪至256x256
 57     transforms.ToTensor(),
 58     transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5])
 59 ])
 60 transform = transforms.Compose([
 61     #transforms.Resize(400, interpolation=transforms.InterpolationMode.BICUBIC),
 62     transforms.Resize(400) ,
 63     #transforms.CenterCrop(300)  ,# 中心裁剪至256x256
 64     transforms.ColorJitter(brightness=(0.95,1.05),
 65                            contrast=(0.95,1.05),
 66                            saturation=(0.95,1.05),
 67                            hue=0.05
 68                            ),
 69     transforms.RandomAffine(5,translate=(0.01,0.01)),
 70     transforms.ToTensor(),
 71     transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5])
 72 ])
 73 train_dataset = datasets.ImageFolder(r'/home/guoliang/CV/dataset/messidor5/train', transform=transform)
 74 test_dataset = datasets.ImageFolder(r'/home/guoliang/CV/dataset/messidor/test', transform=transform1)
 75 val_dataset = datasets.ImageFolder(r'/home/guoliang/CV/dataset/messidor/val', transform=transform1)
 76 train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True)
 77 test_loader = DataLoader(test_dataset, batch_size=128)
 78 val_loader = DataLoader(val_dataset, batch_size=128)
 79 # 计算类别权重
 80 # class_counts = torch.bincount(torch.tensor(train_dataset.targets))
 81 # total_samples = len(train_dataset.targets)
 82 # class_weights = total_samples / (2 * class_counts.float())
 83 def accuracy_test_evaluation(model):
 84     model.eval()
 85     right = 0
 86     total = 0
 87     with torch.no_grad():
 88         for x, y in test_loader:
 89             x, y = x.to(device), y.to(device)
 90             y_hat = model(x.cuda())
 91             _, predicted = torch.max(y_hat, 1)
 92             right += (predicted == y.cuda()).sum().item()
 93             total += y.size(0)
 94     print(right)
 95     return right / total
 96 def accuracy_train_evaluation(model):
 97     model.eval()
 98     right = 0
 99     total = 0
100     with torch.no_grad():
101         for x, y in train_loader:
102             x, y = x.to(device), y.to(device)
103             y_hat = model(x.cuda())
104             _, predicted = torch.max(y_hat, 1)
105             right += (predicted == y.cuda()).sum().item()
106             total += y.size(0)
107     print(right)
108     return right / total
109 def eval_val(model):
110     model.eval()
111     loss1=nn.CrossEntropyLoss()
112     sum=0
113     with torch.no_grad():
114         for x,y in val_loader:
115             x, y = x.to(device), y.to(device)
116             y_hat1 = model(x)
117             y_hat1 = F.softmax(y_hat1, dim=1)
118             l=loss1(y_hat1,y)
119             sum+=l
120     return sum
121 def eval_train(model):
122     model.eval()
123     loss2=nn.CrossEntropyLoss()
124     sum=0
125     with torch.no_grad():
126         for x,y in train_loader:
127             x, y = x.to(device), y.to(device)
128             y_hat1 = model(x)
129             y_hat1 = F.softmax(y_hat1, dim=1)
130             l=loss2(y_hat1,y)
131             sum+=l
132     return sum
133 from torch.optim.lr_scheduler import StepLR
134 import torch.optim as optim
135 from torch.optim.lr_scheduler import ReduceLROnPlateau
136 loss = nn.CrossEntropyLoss()
137 #weights = torch.FloatTensor([1, 4, 2, 2]) # 类别权重分别是 1:1:8:8:4
138 # pos_weight_weight(tensor): 1-D tensor,n 个元素,分别代表 n 类的权重,
139 # 为每个批次元素的损失指定的手动重新缩放权重,
140 # 如果你的训练样本很不均衡的话,是非常有用的。默认值为 None。
141 #loss = nn.CrossEntropyLoss(pos_weight=weights).cuda()
142 #loss = nn.BCELoss(weight=class_weights)  # 使用权重
143 #loss = nn.CrossEntropyLoss(weight=class_weights.to(device))  # 使用权重
144 net=resnet50.to(device)
145 # optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
146 # scheduler = StepLR(optimizer, step_size=50, gamma=0.1)
147 optimizer = optim.Adam(resnet50.parameters(), lr=0.001)
148 scheduler = StepLR(optimizer, step_size=80, gamma=0.1)
149 sum_loss=[]
150 sum_train_acc=[]
151 sum_test_acc=[]
152 def train_batch(model,loss,optimizer):
153     # def init_weights(m):
154     #     if isinstance(m, (nn.Linear, nn.Conv2d)):
155     #         nn.init.xavier_uniform_(m.weight)
156     # model.apply(init_weights)
157     epoch=500
158     # 在每个 epoch 开始之前重新打乱数据集的索引
159     for ix in range(epoch):
160         model.train()
161         for x,y in train_loader:
162             optimizer.zero_grad()
163             print(x.shape)
164             x, y = x.to(device), y.to(device)
165             #print(len(y))
166             y_hat=model(x)
167             y_hat = F.softmax(y_hat, dim=1)
168             #print(y_hat)
169             #print(y)
170             l=loss(y_hat,y)
171             l.backward()
172             optimizer.step()
173         #scheduler.step()
174         val_loss=eval_val(model)
175         #train_loss=eval_train(model)
176         acc_test=accuracy_test_evaluation(model)
177         #acc_train=accuracy_train_evaluation(model)
178         scheduler.step()
179         #sum_train_acc.append(acc_train)
180         sum_test_acc.append(acc_test)
181         print("epoch:   "+str(ix)+"  完成")
182         #print("train_loss: "+str(train_loss))
183         #print("acc_train: "+str(acc_train))
184         print("acc_test: "+str(acc_test))
185         print("val_loss: "+str(val_loss))
186         with open(r'/home/guoliang/CV/LJQ/0/log/output1.log', 'a') as f:
187             f.write("epoch:   " + str(ix) + "  完成\n")
188             #f.write("train_loss: " + str(train_loss) + "\n")
189             # f.write("acc_train: " + str(acc_train) + "\n")
190             f.write("acc_test: " + str(acc_test) + "\n")
191             f.write("val_loss: " + str(val_loss) + "\n")
192 train_batch(net,loss,optimizer)
193         # for ix ,batch in enumerate()
194 
195 torch.cuda.empty_cache()
196 print(1)

 

标签:resnet50,messidor,torch,train,transforms,视网膜,device,import,model
From: https://www.cnblogs.com/ljq20204136/p/18149444

相关文章

  • ResNet50算法
    ResNet(Residualnet)是残差网络的通用概念,而ResNet50是一个具体的网络结构,其由50个卷积层组成。ResNet50是指包含了50个卷积层(包括卷积层、池化层、全连接层等)的ResNet网络。ResNet50是基于ImageNet数据集上的训练所提出的一个具体网络结构。ResNet核心:在最终输出中,除了......
  • 易基因:NSUN2介导的m5C RNA甲基化在视网膜母细胞瘤进展中的重要作用 | 科研速递
    大家好,这里是专注表观组学十余年,领跑多组学科研服务的易基因。视网膜母细胞瘤(retinoblastoma,RB)是儿童期最常见的眼内恶性肿瘤,可导致失明甚至死亡。RB1缺失(>90%)和MYCN扩增(~10%)被认为是致癌驱动事件,导致细胞周期更新增强和癌基因激活。最近的研究表明,表观遗传缺陷也参与RB肿瘤进展......
  • Broodstock breeding behaviour recognition based on Resnet50-LSTM with CBAM atten
    一区top,2022年ComputersandElectronicsinAgriculture分类题目:“基于CBAM注意机制Resnet50-LSTM的亲鱼繁殖行为识别”(Du等,2022,pp.-)(pdf)“BroodstockbreedingbehaviourrecognitionbasedonResnet50-LSTMwithCBAMattentionmechanism”(Du等,2022......
  • 视网膜New iPad与普通分辨率iPad页面的兼容处理
    一、这是篇经验分享 就算不是果粉也应该知道,iPad2与newiPad的重大区别之一就是显示屏的分辨率。newiPad显示屏被称之为“视网膜显示屏”,其设备分辨比(之前有详细介绍,点击这里查看)是iPad2的两倍。–iPadmini也是普通分辨比。 iPad2与newiPad同时显示一个页面,宽度都是1024像素......
  • 机器学习-头饰20类-图像分类,头饰图像分类-Resnet50
    (一).选题背景:什么是图像分类?它有哪些应用场合?图像分类任务是计算机视觉中的核心任务,其目标是根据图像信息中所反映的不同特征,把不同类别的图像区分开来。从已知的类别标签集合中为给定的输入图片选定一个类别标签。它的难点在于:跨越“语义鸿沟”建立像素到语义的映射。还有就是......
  • 【移动端网页布局】移动端网页布局基础概念 ⑤ ( 视网膜屏技术 | 二倍图概念 | 代码示
    文章目录一、视网膜屏技术二、二倍图概念三、代码示例一、视网膜屏技术PC端和早期的移动端网页中,CSS中配置的1像素对应的就是物理屏幕中的1像素;Retina视网膜屏幕技术出现后,将多个物理像素压缩到一块屏幕中,可以达到更高的分辨率,画面显示效果更好;下......
  • ResNet50的猫狗分类训练及预测
    相比于之前写的ResNet18,下面的ResNet50写得更加工程化一点,这还适用与其他分类。我的代码文件结构 1.数据处理首先已经对数据做好了分类   文件夹结构是这样开始划分数据集split_data.pyimportosimportrandomimportshutildefmo......
  • 加强版Google Glass:能够预知未来的视网膜成像技术(视频)
    曾经,人们的头脑中有这样一种观念,预测未来几乎是件不可能的事情。但是随着科技的发展,我们已经渐渐从之前的思维中走了出来,未来惊悚片《少数派报告》(MinorityReport)就是一个很好的例子,这部电影向什么传达了这样一种概念:未来是可以预知的。虽然《少数派报告》只是一部娱乐大众的科......
  • J2、ResNet50V2算法实战与解析
     ......
  • 【MindStudio训练营第一季】使用MindStudio复现resnet50分类任务
    一、前言在训练营进阶班的课程中,老师使用了resnet50的样例,分析了其中的ACL代码,本文将复现这个resnet50分类项目,为后面ACL编程学习做铺垫。项目的Gitee仓如下:cplusplus/le......