首页 > 其他分享 >pytorch 使用示例

pytorch 使用示例

时间:2023-06-14 16:55:37浏览次数:42  
标签:nn val 示例 self torch pytorch train 使用 size

记录通过pytorch编写cnn 模型示例,包括训练、模型、预测全流程代码结构,数据采集公共调制方式识别数据集,编写代码简单,以便进行pytorch学习。

train.py

import os
import numpy as np

import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from multi_scale_module import GoogLeNet
from center_loss import center_loss


# Torch的核心是流行的神经网络和简单易用的优化库
# 使用Torch能在实现复杂的神经网络拓扑结构的时候保持最大的灵活性
# 同时可以使用并行的方式对CPU和GPU进行更有效率的操作。

# tqdm 显示进度条

def main():
    # 检测GPU是否可用
    device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    # data_root = r'/home/wangchao/location/core/model/multi_scale_2/'
    data_root = r'/home/wc/'

    # 载入训练集
    train_dataset = np.load(os.path.join(data_root, 'train.npy'))

    labels = np.load(os.path.join(data_root, 'train_label.npy'))

    # 训练集划分
    x_train, x_test, y_train, y_test = train_test_split(train_dataset, labels, test_size=0.1, random_state=0)

    # 数据格式转换  train 训练集   val 测试集
    train_labels = []
    for i in y_train:
        train_labels.append(int(i[0]))

    train_set = []
    for i in x_train:
        train_set.append(i)

    val_labels = []
    for j in y_test:
        val_labels.append(j[0])

    val_set = []
    for j in x_test:
        val_set.append(j)

    # 设置每个batch大小
    batch_size = 128
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    # 创建x、y的张量  训练集、测试集
    x = torch.tensor(np.array(train_set))
    x = torch.tensor(x).type(torch.float)
    y = torch.tensor(np.array(train_labels))
    y = torch.tensor(y).type(torch.long)

    train_dataset = torch.utils.data.TensorDataset(x, y)

    x_val1 = torch.tensor(np.array(val_set))
    x_val1 = torch.tensor(x_val1).type(torch.float)
    y_val1 = torch.tensor(np.array(val_labels))
    y_val1 = torch.tensor(y_val1).type(torch.long)

    val_dataset = torch.utils.data.TensorDataset(x_val1, y_val1)

    # torch 数据载入
    train_num = len(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=nw, drop_last=True)

    val_num = len(val_dataset)
    validate_loader = torch.utils.data.DataLoader(val_dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=nw, drop_last=True)

    print("using {} images for training, {} images for validation.".format(train_num,
                                                                           val_num))

    # 神经网络框架搭建
    # net
    net = GoogLeNet(num_classes=11, aux_logits=True, init_weights=True)
    net.to(device)
    # 损失函数
    loss_function = nn.CrossEntropyLoss()
    # 优化器
    optimizer = optim.SGD(net.parameters(), lr=0.003, momentum=0.9)

    epochs = 500  # 迭代次数
    best_acc = 0.0  # 精度

    # 网络结构保存路径
    save_path = './multiScaleNet.pth'

    train_steps = len(train_loader)
    for epoch in range(epochs):
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            images = images.reshape(128, 1024, 2, 1)

            optimizer.zero_grad()

            logits, aux_logits = net(images.to(device))
            aux_logits = torch.squeeze(aux_logits)
            # 计算损失函数
            loss0 = loss_function(logits, labels.to(device))
            loss_center = center_loss(aux_logits, labels.to(device), 0.5)
            loss = loss0 + loss_center * 0.5

            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
                                                                     epochs,
                                                                     loss)

        # validate
        net.eval()
        acc = 0.0
        with torch.no_grad():
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                val_images = val_images.reshape(128, 1024, 2, 1)
                outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')


if __name__ == '__main__':
    main()

 

predict.py

import os
import json
import numpy as np
import torch
from tqdm import tqdm

from multi_scale_module import GoogLeNet


def main(validate_loader):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # read class_indict
    json_path = './class_indices.json'
    assert os.path.exists(json_path), "file: '{}' dose not exist.".format(json_path)

    json_file = open(json_path, "r")
    class_indict = json.load(json_file)

    # create model
    model = GoogLeNet(num_classes=11, aux_logits=False).to(device)

    # load model weights
    weights_path = r"E:\python\modulation_identification\core\model\multi_scale_2\multiScaleNet.pth"
    assert os.path.exists(weights_path), "file: '{}' dose not exist.".format(weights_path)

    model.eval()
    acc = 0.0
    with torch.no_grad():
        # predict class
        val_bar = tqdm(validate_loader)
        for val_data in val_bar:
            val_images, val_labels = val_data
            val_images = val_images.reshape(32, 1024, 2, 1)
            outputs = torch.squeeze(model(val_images.to(device))).cpu()
            predicts = torch.max(outputs, dim=1)[1]
            acc += torch.eq(predicts, val_labels.to(device)).sum().item()

    val_accurate = acc / val_num
    print('val_accuracy: %.3f' % (val_accurate))

if __name__ == '__main__':
    data_root = r'E:\python\modulation_identification\data'
    test_dataset = np.load(os.path.join(data_root, 'test1.npy'))
    labels = np.load(os.path.join(data_root, 'test1_label.npy'))

    test_labels = []
    for i in labels:
        test_labels.append(int(i[0]))

    test_labels = torch.tensor(np.array(test_labels))

    test_set = []
    for i in test_dataset:
        test_set.append(i)

    test_set = torch.tensor(test_set).type(torch.float)

    dataset = torch.utils.data.TensorDataset(test_set, test_labels)

    batch_size = 32
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
    val_num = len(dataset)
    validate_loader = torch.utils.data.DataLoader(dataset,
                                                  batch_size=batch_size, shuffle=False,
                                                  num_workers=nw, drop_last=True)

    main(validate_loader)

 

model.py

import torch.nn as nn
import torch
import torch.nn.functional as F


class GoogLeNet(nn.Module):
    def __init__(self, num_classes=1000, aux_logits=True, init_weights=False):
        super(GoogLeNet, self).__init__()
        self.aux_logits = aux_logits

        self.conv4 = BasicConv2d(1024, 512, kernel_size=(3, 1), stride=2, padding=(1, 0))
        self.inception3a = Inception(512, 256, 256, 128, 128, 64, 64, 32)
        self.conv5 = BasicConv2d(480, 256, kernel_size=(3, 1), stride=2, padding=(1, 0))
        self.inception3b = Inception(256, 64, 128, 32, 64, 32, 32, 16)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc1 = nn.Linear(144, 72)
        self.fc2 = nn.Linear(72, num_classes)

        if init_weights:
            self._initialize_weights()

    def forward(self, x):

        x = self.conv4(x)
        x = self.inception3a(x)
        x = self.conv5(x)
        x = self.inception3b(x)

        x = self.avgpool(x)
        # 按列进行拼接
        x = torch.flatten(x, 1)
        x = F.dropout(x, 0.5, training=self.training)
        x1 = self.fc1(x)
        # x = F.dropout(x1, 0.5, training=self.training)
        x = self.fc2(x1)
        if self.training:
            return x, x1
        return x

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)


class Inception(nn.Module):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, ch7x7red, ch7x7):
        super(Inception, self).__init__()

        self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)

        self.branch2 = nn.Sequential(
            BasicConv2d(in_channels, ch3x3red, kernel_size=1),
            BasicConv2d(ch3x3red, ch3x3, kernel_size=(3, 1), padding=(1, 0))  # 保证输出大小等于输入大小(输出特征矩阵的高和宽等于输入特征矩阵的高和宽)
        )

        self.branch3 = nn.Sequential(
            BasicConv2d(in_channels, ch5x5red, kernel_size=1),
            BasicConv2d(ch5x5red, ch5x5, kernel_size=(5, 1), padding=(2, 0))  # 保证输出大小等于输入大小
        )

        self.branch4 = nn.Sequential(
            BasicConv2d(in_channels, ch7x7red, kernel_size=1),
            BasicConv2d(ch7x7red, ch7x7, kernel_size=(7, 1), padding=(3, 0))
        )

    def _forward(self, x):
        branch1 = self.branch1(x)
        branch2 = self.branch2(x)
        branch3 = self.branch3(x)
        branch4 = self.branch4(x)

        outputs = [branch1, branch2, branch3, branch4]
        return outputs

    def forward(self, x):
        outputs = self._forward(x)
        return torch.cat(outputs, 1)


# 辅助分类器
class InceptionAux(nn.Module):

    def __init__(self, in_channels, num_classes):
        super(InceptionAux, self).__init__()
        self.averagePool = nn.AvgPool2d(kernel_size=1, stride=1)
        self.conv = BasicConv2d(in_channels, 34, kernel_size=1)
        self.fc = nn.Linear(70176, num_classes)

    def forward(self, x):
        x = self.averagePool(x)
        # N x 128 x 4 x 4
        x = torch.flatten(x, 1)
        x = F.dropout(x, 0.5, training=self.training)
        # N x 2048
        x = F.relu(self.fc(x), inplace=True)
        return x


# 基础卷积层
class BasicConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, **kwargs):
        super(BasicConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
        self.relu = nn.ReLU(inplace=True)
        self.bn = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        x = self.conv(x)
        x = self.relu(x)
        x = self.bn(x)
        return x

 

标签:nn,val,示例,self,torch,pytorch,train,使用,size
From: https://www.cnblogs.com/pass-ion/p/17480752.html

相关文章

  • Java中时间戳的使用
    原文链接当前时间importjava.sql.Timestamp;//导包TimestampnowTime=newTimestamp(System.currentTimeMillis());System.out.println(nowTime);输出:2022-06-0811:15:51.014Long型时间戳LongtimeLong=System.currentTimeMillis();System.out.println("timeLon......
  • 使用Leangoo领歌敏捷项目管理工具自动生成燃尽图
    ​在上一篇,我为大家介绍了如何使用Leangoo领歌敏捷工具管理SprintBacklog_哆啦B梦_的博客-CSDN博客,今天我们一起来看看Leangoo领歌敏捷工具是如何自动生成Scrum燃尽图的什么是Scrum燃尽图?燃尽图是在项目完成之前,对需要完成的工作的一种可视化表示。能形象地展示当前迭代中的剩......
  • 通过使用动态ip解决网络连接问题
     互联网的使用已经成为我们生活中不可或缺的一种生活方式。但是,有时候我们会遇到网络连接问题,如IP地址冲突、网络瘫痪等等。这种情况往往会影响我们的工作,现在我们一般使用动态ip地址来解决这些问题。  一、什么是动态ip地址  动态IP地址是由互联网服务提供商临时分配给......
  • 使用iPhone相机和OpenCV来完成3D重建(第三部分)
    正文字数:4509 阅读时长:2分钟欢迎来到本教程的第三部分,也是最后一部分关于立体重建的教程。Postedby OmarPadierna url: https://medium.com/@omar.ps16/stereo-3d-reconstruction-with-opencv-using-an-iphone-camera-part-iii-95460d3eddf0快速回顾:在第一部分中,我们简要介......
  • 使用iPhone相机和OpenCV来完成3D重建(第一部分)
    正文字数:1497 阅读时长:2分钟这个教程将带你使用自己的手机摄像头和图片实现从零开始到点云。Postedby OmarPadierna https://becominghuman.ai/stereo-3d-reconstruction-with-opencv-using-an-iphone-camera-part-i-c013907d1ab5这是一个由3部分组成的系列文章。我注意到,其......
  • C++ OpenMP、TBB库的简单使用
    1.OpenMP的简单使用OpenMP可以用来并行计算for循环,提高程序运行速度。首先要打开OpenMP支持:“配置属性”——“C/C++”——“语言”——“1.OpenMP支持”后选择“是”。1omp_get_num_procs()//获取系统中处理器的个数2omp_set_num_threads(num_count)/......
  • 如何使用三个有用的工具提升你的React技能
    自2013年发布以来,React.js一直是使用最广泛的JavaScript工具之一,这要归功于其无与伦比的多功能性和效率。在其基于组件的架构和虚拟DOM实现的支持下,React.js在构建现代动态Web应用程序方面表现出色。它使开发人员能够创建可重用的UI元素,从而提高生产力。由于其广泛......
  • 如何使用MySQL存储过程简化数据库操作
    在数据库管理领域,MySQL已成为最受欢迎和最可靠的选择之一。MySQL不仅提供了强大的数据存储能力,还提供了一种称为“过程”的强大功能,使开发人员能够简化复杂的数据库操作。在本教程中,我们将深入研究MySQL过程的概念并探索它们的好处。然后,我将提供有关如何有效使用它们的分步......
  • Java8-Consumer的使用场景
    Java8的Consumer比较抽象。结合几个例子来看看常用的使用场景有以下几个:把方法作为函数的入参Java8中可以使用Consumer来实现在函数的入参中传递方法,这个如果熟悉js的话可能会比较好理解一些。在某些情况下,不能直接使用某个对象的方法,需要把方法传递到另一个函数里面去执行,那么......
  • 云原生之使用docker部署httpd服务
    (云原生之使用docker部署httpd服务)一、检查系统版本[root@node~]#cat/etc/os-releaseNAME="CentOSLinux"VERSION="7(Core)"ID="centos"ID_LIKE="rhelfedora"VERSION_ID="7"PRETTY_NAME="CentOSLinux7(Core)"AN......