首页 > 其他分享 >注意力机制对一维数据特征提取

注意力机制对一维数据特征提取

时间:2023-05-06 20:44:23浏览次数:33  
标签:loss tensor val torch label train 一维 特征提取 注意力

点击查看代码
# 第一步  读取csv文件(循环读取)
# 第二步  将数据转化为tensor形式
# 第三步  创建一个列表 将tensor逐个放入列表
# 第四步  写入标签
import csv
import numpy as np
import torch
from torch.utils.data import TensorDataset
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import os
import pandas as pd
from tqdm import tqdm
import sys
import matplotlib.pyplot as plt

# 训练集
# 目录
path_train = r"D:\BaiduNetdiskDownload\pear\csv_hebing82\train" 
train_files= os.listdir(path_train)
train_feature_list=[]

for i in train_files:
    csv_file = csv.reader(open(path_train+ '/'+i,'r'))
    a = []
    for b in csv_file:
        a.append(b[0])
    c = []
    for d in a:
        e = [float(d)]
        c.append(e)
    train_feature_list.append(c)
#     c = torch.tensor(c)

    
total_train = 1054
train_label = np.zeros(total_train)
train_label[0:135] = 0     
train_label[135:288] = 1    
train_label[288:368] = 2   
train_label[368:593] = 3    
train_label[593:867] = 4     
train_label[867:1054] = 5   

train_label_tensor = torch.tensor(train_label,dtype=torch.long)
# train_label_tensor = train_label.tolist()
# train_label_tensor = [train_label_tensor]
# train_label_tensor = torch.tensor(train_label_tensor,dtype=torch.long)
# train_label_tensor=train_label_tensor.reshape(1054,1)

train_feature_list=torch.tensor(train_feature_list)
train_tensor = train_feature_list

# 测试集
path_val = r"D:\BaiduNetdiskDownload\pear\csv_hebing82\val" 
val_files= os.listdir(path_val)
val_feature_list=[]
for i in val_files:
    csv_file = csv.reader(open(path_val+ '/'+i,'r'))
    a = []
    for b in csv_file:
        a.append(b[0])
    c = []
    for d in a:
        e = [float(d)]
        c.append(e)
    val_feature_list.append(c)
    

total_val = 260
val_label = np.zeros(total_val)
val_label[0:33] = 0
val_label[33:71] = 1
val_label[71:90] = 2
val_label[90:146] = 3
val_label[146:214] = 4
val_label[214:260] = 5


val_label_tensor = torch.tensor(val_label,dtype=torch.long)

val_feature_list=torch.tensor(val_feature_list)
val_tensor = val_feature_list

# 搭建dataloader完毕
train_dataset = TensorDataset(train_tensor, train_label_tensor)
train_loader = DataLoader(dataset=train_dataset, batch_size=16, shuffle=True)

val_dataset = TensorDataset(val_tensor, val_label_tensor)
val_loader = DataLoader(dataset=val_dataset, batch_size=16, shuffle=False)、



class Attention(nn.Module):
    def __init__(self, embed_dim=1):
        super(Attention, self).__init__()
        
#         self.norm = nn.BatchNorm1d(765)
        self.features = nn.MultiheadAttention(embed_dim=1,
                                  num_heads=1)
    def forward(self, input):
#         input = self.norm(input)
        attn_output, attn_output_weights = self.features(input,input,input)
        return attn_output
    
class ClassificationHead(nn.Sequential):
    def __init__(self, input_size: int = 765, n_classes: int = 6):
        super().__init__(            
            nn.Flatten(),
            nn.BatchNorm1d(input_size), 
            nn.Linear(input_size, input_size),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(input_size, 388),
            nn.Dropout(0.5),
            nn.ReLU(),
            nn.Linear(388,n_classes)
            )
        
class Module(nn.Sequential):
    def __init__(self,     
                emb_size: int = 1,
                input_size: int = 765,
                n_classes: int = 6):
        super().__init__(
            Attention(emb_size),
            ClassificationHead(input_size, n_classes)
        )
models = Module()

device = torch.device('cuda')
LR = 0.001
epochs=150
net = models.to(device)
criterion = nn.CrossEntropyLoss()
# 优化函数使用 Adam 自适应优化算法
optimizer = optim.Adam(
    net.parameters(),
    lr=LR,
)
def train_one_epoch(model, optimizer, data_loader, device, epoch):
    model.train()
    loss_function = torch.nn.CrossEntropyLoss()
    accu_loss = torch.zeros(1).to(device)  # 累计损失
    accu_num = torch.zeros(1).to(device)   # 累计预测正确的样本数
    optimizer.zero_grad()

    sample_num = 0
    data_loader = tqdm(data_loader, file=sys.stdout)
    for step, data in enumerate(data_loader):
        images, labels = data
        sample_num += images.shape[0]
#         images = torch.flatten(images)
        pred = model(images.to(device))
        pred = pred.reshape(len(pred),-1)
#         print('测试数据:',pred)
        pred_classes = torch.max(pred, dim=1)[1]
        accu_num += torch.eq(pred_classes, labels.to(device)).sum()

        loss = loss_function(pred, labels.to(device))
        loss.backward()
        accu_loss += loss.detach()

        data_loader.desc = "[train epoch {}] loss: {:.3f}, acc: {:.3f}, lr: {:.5f}".format(
            epoch+1,
            accu_loss.item() / (step + 1),
            accu_num.item() / sample_num,
            optimizer.param_groups[0]["lr"]
        )

        if not torch.isfinite(loss):
            print('WARNING: non-finite loss, ending training ', loss)
            sys.exit(1)

        optimizer.step()
        optimizer.zero_grad()
        # update lr
#         lr_scheduler.step()

    return accu_loss.item() / (step + 1), accu_num.item() / sample_num

def evaluate(model, data_loader, device, epoch):
    loss_function = torch.nn.CrossEntropyLoss()

    model.eval()

    accu_num = torch.zeros(1).to(device)   # 累计预测正确的样本数
    accu_loss = torch.zeros(1).to(device)  # 累计损失

    sample_num = 0
    data_loader = tqdm(data_loader, file=sys.stdout)
    for step, data in enumerate(data_loader):
        images, labels = data
        sample_num += images.shape[0]

        pred = model(images.to(device))
        pred = pred.reshape(len(pred),-1)
        pred_classes = torch.max(pred, dim=1)[1]
        accu_num += torch.eq(pred_classes, labels.to(device)).sum()

        loss = loss_function(pred, labels.to(device))
        accu_loss += loss

        data_loader.desc = "[valid epoch {}] loss: {:.3f}, acc: {:.3f}".format(
            epoch+1,
            accu_loss.item() / (step + 1),
            accu_num.item() / sample_num
        )

    return accu_loss.item() / (10*(step + 1)), accu_num.item() / sample_num

train_loss_array = []
train_acc_array = []
val_loss_array = []
val_acc_array = []
for epoch in range(epochs):
    # train
    train_loss, train_acc = train_one_epoch(model=net,
                                            optimizer=optimizer,
                                            data_loader=train_loader,
                                            device=device,
                                            epoch=epochs)
    # validate
    val_loss, val_acc = evaluate(model=net,
                                 data_loader=val_loader,
                                 device=device,
                                 epoch=epochs)
    train_loss_array.append(train_loss)
    train_acc_array.append(train_acc)

    val_loss_array.append(val_loss)
    val_acc_array.append(val_acc)

在输入注意力机制之前,要对数据进行位置编码。可以选择ViT那种固定的位置编码,也可以选择Transformer那种随着训练更新的位置编码。

标签:loss,tensor,val,torch,label,train,一维,特征提取,注意力
From: https://www.cnblogs.com/kafukasy/p/17378416.html

相关文章

  • numpy.ndarray.flatten-返回numpy对象的一维数组
    参考:https://numpy.org/doc/stable/reference/generated/numpy.ndarray.flatten.html语法格式ndarray.flatten(order='C')order:“C”表示按行(C-style)顺序折叠。“F”表示按列(Fortran-style)顺序进行平化。'A'表示:如果A在内存中是Fortran连续的,则按列顺序平化,否则按行顺......
  • 实例 042 获取一维数组最小值
      你可以使用以下代码来获取一维数组中的最小值:int[]arr={5,3,9,1,7};intmin=arr[0];for(inti=1;i<arr.length;i++){if(arr[i]<min){min=arr[i];}}System.out.println("最小值为:"+min);  在上面的代码中,我们首先初始......
  • 墨尔本大学提出水下视觉SLAM中的知识蒸馏:提升特征提取性能
    z以下内容来自小六的机器人SLAM学习圈知识星球每日更新内容点击领取学习资料→机器人SLAM学习资料大礼包论文##开源数据集#开源代码#KnowledgeDistillationforFeatureExtractioninUnderwaterVSLAM论文地址:https://arxiv.org/abs/2303.17981作者单位:墨尔本大学数据......
  • smarty section循环显示一维数组元素
    <?phpheader("Content-type:text/html;charset=utf-8");//设置中国时区date_default_timezone_set('PRC');require_once("./Smarty/libs/Smarty.class.php");$smarty=newSmarty();$smarty->left_delimiter="<{";$sm......
  • 每日打卡一维数组和二维数组传参的几种方式
    //一组数组传参//#include<stdio.h> //voidInputArray(intn[],inta);//voidOutputArray(intn[],inta);//intmain()//{// intm[3];// InputArray(m,3);// OutputArray(m,3);// return0;//}//voidInputArray(intm[],intn)//{// for(inti=0;i<n;i++)......
  • 03-3 燃烧系统分析方法与着火理论:零维燃烧系统热工况、一维系统
    零维系统一维系统一维系统:系统中在气流的横截面上温度、浓度等参数是均匀的,仅沿气流方向这些参数才有变化。一维系统中可以对气流燃烧过程进行计算。(一维炉系统图)......
  • m基于EAN13字符编码规则的一维条形码条码宽度计算和数字译码matlab仿真
    1.算法仿真效果matlab2022a仿真结果如下:2.算法涉及理论知识概要条码技术是在计算机的应用实践中产生和发展起来的一种自动识别技术,条码应用技术就是应用条码系统进行的信息处理技术。条码技术的研究始于20世纪中期,是继计算机技术应用和发展应运而生的。随着70年代微处理器的问......
  • 深度学习基础入门篇[六(1)]:模型调优:注意力机制[多头注意力、自注意力],正则化【L1、L2,D
    1.注意力机制在深度学习领域,模型往往需要接收和处理大量的数据,然而在特定的某个时刻,往往只有少部分的某些数据是重要的,这种情况就非常适合Attention机制发光发热。举个例子,图2展示了一个机器翻译的结果,在这个例子中,我们想将”whoareyou”翻译为”你是谁”,传统的模型处理方式是......
  • 从一维到十维,延伸至思想的一维到高维
     https://www.bilibili.com/video/BV17s4y1S7E7/?spm_id_from=333.1007.tianma.2-1-4.click&vd_source=e4991eff671e2c8b3ce1f748b6cca451https://www.bilibili.com/video/BV12x411Y7J7/?spm_id_from=autoNext&vd_source=e4991eff671e2c8b3ce1f748b6cca451  二维平面......
  • 一维与二维前缀和(蓝桥杯复习+例题讲解+模板c++)
    文章目录前缀和二维前缀和总结3956.截断数组99.激光炸弹前缀和前缀和是一种常见的算法,用于快速计算数组中某一段区间的和。前缀和的思想就是预处理出数组中前缀和,然后用后缀和减去前缀和,即可快速计算区间和。以一维数组为例,设表示数组中第个元素的值,表示数组中前个元素的......