首页 > 其他分享 >全连接分类一维数据(csv文件)

全连接分类一维数据(csv文件)

时间:2023-05-06 20:46:02浏览次数:31  
标签:loss val torch label train 一维 device csv 连接

类似于这样的csv文件,通过全连接网络进行分类等任务。

`

点击查看代码
# 第一步  读取csv文件(循环读取)
# 第二步  将数据转化为tensor形式
# 第三步  创建一个列表 将tensor逐个放入列表
# 第四步  写入标签
import csv
import numpy as np
import torch
from torch.utils.data import TensorDataset
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import os
import pandas as pd
from tqdm import tqdm
import sys
import matplotlib.pyplot as plt
` 读取训练集
点击查看代码
# 训练集
# 目录
path_train = r"D:\BaiduNetdiskDownload\pear\浅层特征\csv_hebing82\train" 
train_files= os.listdir(path_train)
train_feature_list=[]
for i in train_files:
    csv_file = csv.reader(open(path_train+ '/'+i,'r'))
    a = []
    for b in csv_file:
        a.append(b[0])
    c = []
    for d in a:
        e = float(d)
        c.append(e)
    train_feature_list.append(c)
total_train = 1054
train_label = np.zeros(total_train)    #打标签
train_label[0:135] = 0     
train_label[135:288] = 1    
train_label[288:368] = 2   
train_label[368:593] = 3    
train_label[593:867] = 4     
train_label[867:1054] = 5   
# train_label_tensor = torch.from_numpy(train_label).int()
train_label_tensor = torch.tensor(train_label,dtype=torch.long)

train_feature_list=torch.tensor(train_feature_list)
train_tensor = train_feature_list

读取验证集

点击查看代码
# 测试集
path_val = r"D:\BaiduNetdiskDownload\pear\浅层特征\csv_hebing82\val" 
val_files= os.listdir(path_val)
val_feature_list=[]
for i in val_files:
    csv_file = csv.reader(open(path_val+ '/'+i,'r'))
    a = []
    for b in csv_file:
        a.append(b[0])
    c = []
    for d in a:
        e = float(d)
        c.append(e)
    val_feature_list.append(c)

total_val = 260
val_label = np.zeros(total_val)
val_label[0:33] = 0
val_label[33:71] = 1
val_label[71:90] = 2
val_label[90:146] = 3
val_label[146:214] = 4
val_label[214:260] = 5
val_label_tensor = torch.tensor(val_label,dtype=torch.long)

val_feature_list=torch.tensor(val_feature_list)
val_tensor = val_feature_list
点击查看代码
# 搭建dataloader
train_dataset = TensorDataset(train_tensor, train_label_tensor)
train_loader = DataLoader(dataset=train_dataset, batch_size=32, shuffle=True)

val_dataset = TensorDataset(val_tensor, val_label_tensor)
val_loader = DataLoader(dataset=val_dataset, batch_size=32, shuffle=False)
写全连接网络
点击查看代码
# 全连接层
models = torch.nn.Sequential(
    torch.nn.BatchNorm1d(765),
    torch.nn.Linear(765,388),
    torch.nn.ReLU(),
    torch.nn.Dropout(0.5),
    torch.nn.Linear(388,388),
    torch.nn.ReLU(),
    torch.nn.Dropout(0.5),
    torch.nn.Linear(388,6)
)
点击查看代码
device = torch.device('cuda')
LR = 0.001
epochs=150
net = models.to(device)
criterion = nn.CrossEntropyLoss()
# 优化函数使用 Adam 自适应优化算法
optimizer = optim.Adam(
    net.parameters(),
    lr=LR,
)
训练函数
点击查看代码
def train_one_epoch(model, optimizer, data_loader, device, epoch):
    model.train()
    loss_function = torch.nn.CrossEntropyLoss()
    accu_loss = torch.zeros(1).to(device)  # 累计损失
    accu_num = torch.zeros(1).to(device)   # 累计预测正确的样本数
    optimizer.zero_grad()

    sample_num = 0
    data_loader = tqdm(data_loader, file=sys.stdout)
    for step, data in enumerate(data_loader):
        images, labels = data
        sample_num += images.shape[0]
        pred = model(images.to(device))
        pred_classes = torch.max(pred, dim=1)[1]
        accu_num += torch.eq(pred_classes, labels.to(device)).sum()

        loss = loss_function(pred, labels.to(device))
        loss.backward()
        accu_loss += loss.detach()

        data_loader.desc = "[train epoch {}] loss: {:.3f}, acc: {:.3f}, lr: {:.5f}".format(
            epoch+1,
            accu_loss.item() / (step + 1),
            accu_num.item() / sample_num,
            optimizer.param_groups[0]["lr"]
        )

        if not torch.isfinite(loss):
            print('WARNING: non-finite loss, ending training ', loss)
            sys.exit(1)

        optimizer.step()
        optimizer.zero_grad()
        # update lr
#         lr_scheduler.step()

    return accu_loss.item() / (step + 1), accu_num.item() / sample_num
验证函数
点击查看代码
def evaluate(model, data_loader, device, epoch):
    loss_function = torch.nn.CrossEntropyLoss()

    model.eval()

    accu_num = torch.zeros(1).to(device)   # 累计预测正确的样本数
    accu_loss = torch.zeros(1).to(device)  # 累计损失

    sample_num = 0
    data_loader = tqdm(data_loader, file=sys.stdout)
    for step, data in enumerate(data_loader):
        images, labels = data
        sample_num += images.shape[0]

        pred = model(images.to(device))
        pred_classes = torch.max(pred, dim=1)[1]
        accu_num += torch.eq(pred_classes, labels.to(device)).sum()

        loss = loss_function(pred, labels.to(device))
        accu_loss += loss

        data_loader.desc = "[valid epoch {}] loss: {:.3f}, acc: {:.3f}".format(
            epoch+1,
            accu_loss.item() / (step + 1),
            accu_num.item() / sample_num
        )

    return accu_loss.item() / (10*(step + 1)), accu_num.item() / sample_num

运行
点击查看代码
train_loss_array = []
train_acc_array = []
val_loss_array = []
val_acc_array = []
for epoch in range(epochs):
    # train
    train_loss, train_acc = train_one_epoch(model=net,
                                            optimizer=optimizer,
                                            data_loader=train_loader,
                                            device=device,
                                            epoch=epochs)

    # validate
    val_loss, val_acc = evaluate(model=net,
                                 data_loader=val_loader,
                                 device=device,
                                 epoch=epochs)
    train_loss_array.append(train_loss)
    train_acc_array.append(train_acc)
    val_loss_array.append(val_loss)

    val_acc_array.append(val_acc)

标签:loss,val,torch,label,train,一维,device,csv,连接
From: https://www.cnblogs.com/kafukasy/p/17378404.html

相关文章

  • 一维卷积对一维数据进行特征再提取
    点击查看代码#第一步读取csv文件(循环读取)#第二步将数据转化为tensor形式#第三步创建一个列表将tensor逐个放入列表#第四步写入标签importcsvimportnumpyasnpimporttorchfromtorch.utils.dataimportTensorDatasetimporttorchfromtorch.utils.dat......
  • 注意力机制对一维数据特征提取
    点击查看代码#第一步读取csv文件(循环读取)#第二步将数据转化为tensor形式#第三步创建一个列表将tensor逐个放入列表#第四步写入标签importcsvimportnumpyasnpimporttorchfromtorch.utils.dataimportTensorDatasetimporttorchfromtorch.utils.dat......
  • RPC,远程连接Linux开发
    RPC​ 指远程过程调用eg:​ 两台服务器,A,B,A想要调用B服务器上的应用的函数或方法,但是他两不在一个内存空间,不能直接调用,需要通过网络来表达调用的语义和传达调用的数据,只要调用成功这就称为RPC调用.主要应用在分布式系统当中与微服务。由于计算能力需要横向扩展,需要在多台机......
  • 7-在Servlet中连接数据库,怎么做?
    Servlet是Java程序,所以在Servlet中完全可以编写JDBC代码连接数据库。在一个webapp中去连接数据库,需要将驱动jar包放到WEB-INF/lib目录下。(com.mysql.cj.jdbc.Driver这个类就在驱动jar包当中。)代码如下:packagecom.north.javaweb.servlet;importjakarta.servlet.*;impor......
  • Portainer连接远程Docker
    介绍Portainer是一个可视化的容器镜像的图形管理工具,利用Portainer可以轻松构建,管理和维护Docker环境。而且完全免费,基于容器化的安装方式,方便高效部署。官方站点:https://www.portainer.io/连接远程Docker1、配置远程Docker端口vim/usr/lib/systemd/system/docker.service......
  • 连接池/线程池
    线程池是一种多线程处理形式,处理过程中将任务添加到队列,然后在创建线程后自动启动这些任务。线程池线程都是后台线程。每个线程都使用默认的堆栈大小,以默认的优先级运行,并处于多线程单元中。如果某个线程在托管代码中空闲(如正在等待某个事件),则线程池将插入另一个辅助线程来使所有......
  • CentOS虚拟机连接外网,NET模式
    大致思路,将主机和CentOS的ip设置成同一网段,并且网关相同,虚拟网络编辑器中的网关也与主机相同1、主机VMnet8的ip设置  2、CentOS的网关和ip设置1)查看网络设备名称 2)进入编辑IP和网关、DNS等进入network-scripts,网络相关的配置文件存放位置/etc/sysconfig/network-scr......
  • oracle中用户删除不了,ORA-01940提示 “无法删除当前已连接用户”
    Oracle删除用户的提示无法删除当前已连接用户两种解决方法如下:1、先锁定用户、然后查询进程号,最后删除对应的进程、在删除对应的用户SQLalteruserXXXaccountlock;SQLSELECT*FROMV$SESSIONWHEREUSERNAME='LGDB';Oracle删除用户的提示无法删除当前已连接用户两种解决......
  • Oracle 强行断开用户连接的方法
    1、查找目标用户的当前进程selectsid,serial#fromv$sessionwhereusername='test';2、使用上述语句会返回一个进程列表,每行有两个数字,用数字替代下面的sid和serialaltersystemkillsession'sid,serial';3、执行kill语句altersystemkillsession‘9,154......
  • C#连接数据库
    第一步先在配置文件中写入数据库的配置信息<addname="connString"connectionString="server=服务器名;database=数据库名;uid=登录名;pwd=登录密码"/><connectionStrings><addname="connString"connectionString="server=LIN;database=Tes......