点击查看代码
# 第一步 读取csv文件(循环读取)
# 第二步 将数据转化为tensor形式
# 第三步 创建一个列表 将tensor逐个放入列表
# 第四步 写入标签
import csv
import numpy as np
import torch
from torch.utils.data import TensorDataset
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import os
import pandas as pd
from tqdm import tqdm
import sys
import matplotlib.pyplot as plt
# 训练集
# 目录
path_train = r"D:\BaiduNetdiskDownload\pear\csv_hebing82\train"
train_files= os.listdir(path_train)
train_feature_list=[]
for i in train_files:
csv_file = csv.reader(open(path_train+ '/'+i,'r'))
a = []
for b in csv_file:
a.append(b[0])
c = []
for d in a:
e = [float(d)]
c.append(e)
train_feature_list.append(c)
# c = torch.tensor(c)
total_train = 1054
train_label = np.zeros(total_train)
train_label[0:135] = 0
train_label[135:288] = 1
train_label[288:368] = 2
train_label[368:593] = 3
train_label[593:867] = 4
train_label[867:1054] = 5
train_label_tensor = torch.tensor(train_label,dtype=torch.long)
# train_label_tensor = train_label.tolist()
# train_label_tensor = [train_label_tensor]
# train_label_tensor = torch.tensor(train_label_tensor,dtype=torch.long)
# train_label_tensor=train_label_tensor.reshape(1054,1)
train_feature_list=torch.tensor(train_feature_list)
train_tensor = train_feature_list
# 测试集
path_val = r"D:\BaiduNetdiskDownload\pear\csv_hebing82\val"
val_files= os.listdir(path_val)
val_feature_list=[]
for i in val_files:
csv_file = csv.reader(open(path_val+ '/'+i,'r'))
a = []
for b in csv_file:
a.append(b[0])
c = []
for d in a:
e = [float(d)]
c.append(e)
val_feature_list.append(c)
total_val = 260
val_label = np.zeros(total_val)
val_label[0:33] = 0
val_label[33:71] = 1
val_label[71:90] = 2
val_label[90:146] = 3
val_label[146:214] = 4
val_label[214:260] = 5
val_label_tensor = torch.tensor(val_label,dtype=torch.long)
val_feature_list=torch.tensor(val_feature_list)
val_tensor = val_feature_list
# 搭建dataloader完毕
train_dataset = TensorDataset(train_tensor, train_label_tensor)
train_loader = DataLoader(dataset=train_dataset, batch_size=16, shuffle=True)
val_dataset = TensorDataset(val_tensor, val_label_tensor)
val_loader = DataLoader(dataset=val_dataset, batch_size=16, shuffle=False)、
class Attention(nn.Module):
def __init__(self, embed_dim=1):
super(Attention, self).__init__()
# self.norm = nn.BatchNorm1d(765)
self.features = nn.MultiheadAttention(embed_dim=1,
num_heads=1)
def forward(self, input):
# input = self.norm(input)
attn_output, attn_output_weights = self.features(input,input,input)
return attn_output
class ClassificationHead(nn.Sequential):
def __init__(self, input_size: int = 765, n_classes: int = 6):
super().__init__(
nn.Flatten(),
nn.BatchNorm1d(input_size),
nn.Linear(input_size, input_size),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(input_size, 388),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(388,n_classes)
)
class Module(nn.Sequential):
def __init__(self,
emb_size: int = 1,
input_size: int = 765,
n_classes: int = 6):
super().__init__(
Attention(emb_size),
ClassificationHead(input_size, n_classes)
)
models = Module()
device = torch.device('cuda')
LR = 0.001
epochs=150
net = models.to(device)
criterion = nn.CrossEntropyLoss()
# 优化函数使用 Adam 自适应优化算法
optimizer = optim.Adam(
net.parameters(),
lr=LR,
)
def train_one_epoch(model, optimizer, data_loader, device, epoch):
model.train()
loss_function = torch.nn.CrossEntropyLoss()
accu_loss = torch.zeros(1).to(device) # 累计损失
accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数
optimizer.zero_grad()
sample_num = 0
data_loader = tqdm(data_loader, file=sys.stdout)
for step, data in enumerate(data_loader):
images, labels = data
sample_num += images.shape[0]
# images = torch.flatten(images)
pred = model(images.to(device))
pred = pred.reshape(len(pred),-1)
# print('测试数据:',pred)
pred_classes = torch.max(pred, dim=1)[1]
accu_num += torch.eq(pred_classes, labels.to(device)).sum()
loss = loss_function(pred, labels.to(device))
loss.backward()
accu_loss += loss.detach()
data_loader.desc = "[train epoch {}] loss: {:.3f}, acc: {:.3f}, lr: {:.5f}".format(
epoch+1,
accu_loss.item() / (step + 1),
accu_num.item() / sample_num,
optimizer.param_groups[0]["lr"]
)
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss)
sys.exit(1)
optimizer.step()
optimizer.zero_grad()
# update lr
# lr_scheduler.step()
return accu_loss.item() / (step + 1), accu_num.item() / sample_num
def evaluate(model, data_loader, device, epoch):
loss_function = torch.nn.CrossEntropyLoss()
model.eval()
accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数
accu_loss = torch.zeros(1).to(device) # 累计损失
sample_num = 0
data_loader = tqdm(data_loader, file=sys.stdout)
for step, data in enumerate(data_loader):
images, labels = data
sample_num += images.shape[0]
pred = model(images.to(device))
pred = pred.reshape(len(pred),-1)
pred_classes = torch.max(pred, dim=1)[1]
accu_num += torch.eq(pred_classes, labels.to(device)).sum()
loss = loss_function(pred, labels.to(device))
accu_loss += loss
data_loader.desc = "[valid epoch {}] loss: {:.3f}, acc: {:.3f}".format(
epoch+1,
accu_loss.item() / (step + 1),
accu_num.item() / sample_num
)
return accu_loss.item() / (10*(step + 1)), accu_num.item() / sample_num
train_loss_array = []
train_acc_array = []
val_loss_array = []
val_acc_array = []
for epoch in range(epochs):
# train
train_loss, train_acc = train_one_epoch(model=net,
optimizer=optimizer,
data_loader=train_loader,
device=device,
epoch=epochs)
# validate
val_loss, val_acc = evaluate(model=net,
data_loader=val_loader,
device=device,
epoch=epochs)
train_loss_array.append(train_loss)
train_acc_array.append(train_acc)
val_loss_array.append(val_loss)
val_acc_array.append(val_acc)
在输入注意力机制之前,要对数据进行位置编码。可以选择ViT那种固定的位置编码,也可以选择Transformer那种随着训练更新的位置编码。
标签:loss,tensor,val,torch,label,train,一维,特征提取,注意力 From: https://www.cnblogs.com/kafukasy/p/17378416.html