首页 > 其他分享 >二手车预测-阿里云天池

二手车预测-阿里云天池

时间:2024-07-28 21:06:30浏览次数:7  
标签:nn 二手车 self torch 阿里 train test 天池 data

%matplotlib inline
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
import pandas as pd
import zipfile  
import re
import numpy as np
import torch
from torch import nn
from matplotlib_inline import backend_inline
import matplotlib.pyplot as plt  
import matplotlib.image as mpimg  
from IPython import display
import torch  
# 给网络加上随机种子,保证结果的可复现性
# cpu
# torch.manual_seed(42) 

# gpu
if torch.cuda.is_available():  
    torch.cuda.manual_seed(42)
# 定义解压.zip包函数
def unzip_file(zip_filepath, dest_path): 
    with zipfile.ZipFile(zip_filepath, 'r') as zip_ref:  
        zip_ref.extractall(dest_path) 
# 对.zip进行解包
unzip_file('used_car_train_20200313.zip','./')
unzip_file('used_car_testB_20200421.zip','./')
test_data = pd.read_csv('used_car_testB_20200421.csv', sep=' ')
train_data = pd.read_csv('used_car_train_20200313.csv', sep=' ')
test_data.to_csv('used_car_testB.csv')
train_data.to_csv('used_car_train.csv')
data = pd.concat([train_data, test_data])
data = data.replace('-', '-1')
data.notRepairedDamage = data.notRepairedDamage.astype('float32')
data.loc[data['power']>600,'power'] = 600
% 离散数据与连续数据
cate_cols=['model', 'brand', 'bodyType', 'fuelType', 'gearbox', 'seller', 'notRepairedDamage']
num_cols=['regDate', 'creatDate', 'power', 'kilometer', 'v_0', 'v_1', 'v_2', 'v_3', 'v_4', 'v_5', 'v_6', 'v_7', 'v_8', 'v_9', 'v_10','v_11', 'v_12', 'v_13', 'v_14']
# 定义One-Hot编码函数
def oneHotEncode(df, colNames):
    for col in colNames:
        dummies = pd.get_dummies(df[col], prefix=col)
        df = pd.concat([df, dummies],axis=1)
        df.drop([col], axis=1, inplace=True)
    return df
# 处理离散数据
for col in cate_cols:
    data[col] = data[col].fillna('-1')
data = oneHotEncode(data, cate_cols)

# 处理连续数据
for col in num_cols:
    data[col] = data[col].fillna(0)
    data[col] = (data[col]-data[col].min()) / (data[col].max()-data[col].min())

# 处理(可能)无关数据 
data.drop(['name', 'regionCode'], axis=1, inplace=True)

data.columns
Index(['SaleID', 'regDate', 'power', 'kilometer', 'offerType', 'creatDate',
       'price', 'v_0', 'v_1', 'v_2',
       ...
       'fuelType_6.0', 'fuelType_-1', 'gearbox_0.0', 'gearbox_1.0',
       'gearbox_-1', 'seller_0', 'seller_1', 'notRepairedDamage_-1.0',
       'notRepairedDamage_0.0', 'notRepairedDamage_1.0'],
      dtype='object', length=336)
# 拿出结果集
data=data.reset_index(drop=True)
data = data.astype(float)
test_data = data[pd.isna(data.price)]
X_id=test_data['SaleID']
del test_data['SaleID']
del test_data['price']
X_result=torch.tensor(test_data.values, dtype=torch.float32)
test_data.to_csv('one_hot_testB.csv') 
# 拿出训练集
train_data = data.drop(data[pd.isna(data.price)].index)
train_data.to_csv('one_hot_train.csv') 
y=train_data['price']
del train_data['price']
del train_data['SaleID']
X=torch.tensor(train_data.values, dtype=torch.float32)
y=torch.Tensor(y)
X=X.reshape(-1,334)
y=y.reshape(-1,1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,random_state=512) 
X_train, X_test, y_train, y_test=torch.Tensor(X_train), torch.Tensor(X_test), torch.Tensor(y_train), torch.Tensor(y_test)
# TensorDataset是PyTorch中用于将样本和标签打包成单个数据集的类
train_dataset = TensorDataset(X_train, y_train)  
test_dataset = TensorDataset(X_test, y_test)   
# DataLoader是一个可迭代的对象,它提供了对TensorDataset的批量加载功能。
train_iter = DataLoader(train_dataset, batch_size=512, shuffle=True,num_workers=3)  
test_iter = DataLoader(test_dataset, batch_size=512, shuffle=False,num_workers=3)  
X_train.shape,y_train.shape
(torch.Size([112500, 334]), torch.Size([112500, 1]))
net = nn.Sequential(
            nn.BatchNorm1d(334),
            nn.Linear(334, 568),
            nn.BatchNorm1d(568),
            nn.ReLU(),
            nn.Linear(568, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Linear(256,256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Linear(256,256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Linear(256,128),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.Linear(128,1))

def init_weights(m):
    if type(m) == nn.Linear:
        nn.init.xavier_uniform_(m.weight)

net.apply(init_weights);
def try_gpu(i=0):
    if torch.cuda.device_count() >= i + 1:
        return torch.device(f'cuda:{i}')
    return torch.device('cpu')
def use_svg_display():
    backend_inline.set_matplotlib_formats('svg')
class Accumulator:
    """在n个变量上累加"""
    def __init__(self, n):
        self.data = [0.0] * n

    def add(self, *args):
        self.data = [a + float(b) for a, b in zip(self.data, args)]

    def reset(self):
        self.data = [0.0] * len(self.data)#reset方法用于将self.data`中的所有元素重置为0.0。

    def __getitem__(self, idx):
        # getitem是一个特殊方法,它允许类的实例支持索引操作。例如,如果你有一个Accumulator的实例acc,你可以使用acc[i]来获取self.data`中的第i个元素。
        return self.data[idx]
def evaluate_accuracy(net, device,loss,data_iter):
    net.eval()
    metric = Accumulator(2)  # 正确预测数、预测总数
    with torch.no_grad():
        for X, y in data_iter:        
            X=X.to(device)
            y=y.to(device)
            metric.add(abs(net(X)-y).sum().item(), y.numel())#y.numel():表示预测的数量
            # 将每次的正确预测数和预测数量一次加入迭代器中
    return metric[0] / metric[1]
# .item()将矩阵转化为python标量
# 对模型进行训练
def train_epoch_ch3(net, device,train_iter, loss, updater):
    """训练模型一个迭代周期"""
    # 将模型设置为训练模式
    net.train()
    # 训练损失总和、训练准确度总和、样本数
    metric = Accumulator(2)
    for X, y in train_iter:
        # 计算梯度并更新参数
        X=X.to(device)
        y=y.to(device)
        y_hat = net(X)
        l = loss(y_hat, y)                     
        updater.zero_grad()
        l.backward()
        updater.step()
        metric.add(abs(y_hat-y).sum().item(), y.numel())#记录分类正确的个数
    # 返回训练损失
    return metric[0] / metric[1]
def set_axes(axes,xlabel,ylabel,xlim,ylim,xscale,yscale,legend):
    axes.set_xlabel(xlabel)
    axes.set_ylabel(ylabel)
    axes.set_xscale(xscale)
    axes.set_yscale(yscale)
    axes.set_xlim(xlim)
    axes.set_ylim(ylim)
    if legend:
        axes.legend(legend)
    axes.grid()
class Animator:
    """在动画中绘制数据"""
    def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
                 ylim=None, xscale='linear', yscale='linear',
                 fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
                 figsize=(7, 5)):
        # 增量地绘制多条线
        if legend is None:
            legend = []
        use_svg_display()
        self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
        if nrows * ncols == 1:
            self.axes = [self.axes, ]
        # 使用lambda函数捕获参数
        self.config_axes = lambda: set_axes(
            self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
        self.X, self.Y, self.fmts = None, None, fmts

    def add(self, x, y):
        # 向图表中添加多个数据点
        if not hasattr(y, "__len__"):
            y = [y]
        n = len(y)
        if not hasattr(x, "__len__"):
            x = [x] * n
        if not self.X:
            self.X = [[] for _ in range(n)]
        if not self.Y:
            self.Y = [[] for _ in range(n)]
        for i, (a, b) in enumerate(zip(x, y)):
            if a is not None and b is not None:
                self.X[i].append(a)
                self.Y[i].append(b)
        self.axes[0].cla()
        for x, y, fmt in zip(self.X, self.Y, self.fmts):
            self.axes[0].plot(x, y, fmt)
        self.config_axes()
        display.display(self.fig)
        display.clear_output(wait=True)
def train_ch3(net, device,train_iter, test_iter, loss, num_epochs, updater):
    net.to(device)
    animator = Animator(xlabel='epoch', xlim=[1, num_epochs],
                        legend=['train loss',  'test loss'])
    for epoch in range(num_epochs):
        train_loss = train_epoch_ch3(net, device,train_iter, loss, updater)
        test_loss = evaluate_accuracy(net, device,loss,test_iter)
        animator.add(epoch + 1, (train_loss,)+ (test_loss,))
    return train_loss,test_loss
def predict_ch3(net, X_result):
    net.eval()
    X_result=X_result.to(device)
    y_hat=net(X_result)
    return y_hat
#训练
lr, num_epochs =  0.01, 150
loss = nn.MSELoss()
trainer = torch.optim.Adam(net.parameters(), lr=lr)
device=try_gpu()

train_loss,test_loss=train_ch3(net, device,train_iter, test_iter, loss, num_epochs, trainer)

svg

pred=predict_ch3(net,X_result)
pred=pred.to('cpu')
pred=pred.detach().numpy() #安全地将张量转换为NumPy数组
res=pd.DataFrame(pred, columns=['price']) 
X_id=X_id.reset_index(drop=True)
submission = pd.concat([X_id, res['price']], axis=1)
submission.to_csv('submission.csv',index=False)
train_loss,test_loss
(518.0879554166667, 586.9690177083334)

标签:nn,二手车,self,torch,阿里,train,test,天池,data
From: https://www.cnblogs.com/1019-Yan/p/18328872

相关文章

  • Centos的yum源更换为国内的阿里云源
    1、备份mv/etc/yum.repos.d/CentOS-Base.repo/etc/yum.repos.d/CentOS-Base.repo.backup2、下载新的CentOS-Base.repo到/etc/yum.repos.d/ CentOS5wget-O/etc/yum.repos.d/CentOS-Base.repohttp://mirrors.aliyun.com/repo/Centos-5.repo或者curl-o/etc/yum.repo......
  • STM32+ESP8266-连接阿里云-创建云产品流转实现STM32与Android app通讯(1)
    前言本文章的内容为STM32通过ESP8266利用AT指令连接阿里云平台,并创建设备和创建云产品流转主题,来为实现Androidapp与STM32的发送接收数据做准备。Androidapp的实现由于篇幅不宜过长,将放到下一篇文章中。演示视频实现一个简单的app来控制stm32开关灯、蜂鸣器、门(舵机),显示温......
  • 如何从自建开源 Prometheus 迁移到阿里云托管 Prometheus 服务
    作者:颍川引言Prometheus[1]已成为可观测指标领域的事实标准。由于各方面因素,当前还有部分用户的部分场景仍使用“自建开源Prometheus+自建开源Thanos+自建Grafana”来实现基础设施和业务应用的指标监控和告警。阿里云可观测监控Prometheus版[2]全面对接开源Promethe......
  • centos7更换阿里源
    1)虚拟机使用yum时,报错信息:2)浏览器搜索,centos7更换阿里源:打开该网页。3)如果yum和wget使用不了,下载好Centos-7.repo文件直接拖到该目录(/etc/yum.repos.d/)下面,替换掉CentOS-Base.repo文件(Centos-7.repo文件名称改为CentOS-Base.repo)。完成后,yum就可以使用了。下载Centos-7.rep......
  • 使用阿里云实现图片存储
    文章目录前言一、为上传图片做准备二、代码实现图片上传添加依赖创建配置类创建工具类图片上传实现前言做一个图片上传的功能,图片存储到阿里云服务器。一、为上传图片做准备首先需要在阿里云充钱在阿里云上找到对象存储oos点击管理控制台点击创建Bucket创......
  • 阿里云
    第1章阿里云与物理服务器国外:亚马逊微软云Azuregoogle云国内:阿里云华为云腾讯云京东云百度云ucloud七牛云2.优势与劣势1.降低了运维成本,不用担心硬件损坏了2.运维变得简单了,可能不需要懂物理服务器知识,点点点就行了3.因为运维变得简单了,减少了人力成本4.上云容易,......
  • 阿里云root登录,非密钥登录
    起因最近几天弄了几台阿里云服务器,发现登录貌似只能用生成本地密钥进行登录,而不是通过设置的账号密码登录,就很无语了。于是有了这个帖子解决方法查看这个文件cat/etc/ssh/sshd_config注意这两行,如果都是yes,就不用改改好后重启servicesshdrestart参考https://www......
  • 阿里斯顿热水器全国各市售后服务热线电话官方24小时阿里斯顿维修中心
    阿里斯顿热水器售后服务电话400-6856-656阿里斯顿热水器24小时售后服务电话400-6856-656全面解析!阿里斯顿热水器官方客服热线400-685-6656引言:首先,我会从介绍阿里斯顿热水器的品牌背景开始,然后转向讨论其售后服务的重要性。在这一部分,我会强调为什么一个可靠的售后服务对于消......
  • 关于使用阿里云ECS搭建114cha.com网站的避坑指南
    阿里云ECS(ElasticComputeService)作为弹性计算服务,提供了灵活的云服务器资源,适合各类网站和应用的部署。然而,对于初次使用ECS搭建网站的用户来说,可能会遇到一些挑战。本文旨在帮助用户顺利搭建网站,并避免一些常见的坑。一、准备工作1.购买ECS实例在阿里云官网购买ECS实例......
  • 阿里云系统安装docker
    dockers安装sudoyuminstall-yyum-utils---使用阿里云镜像yum-config-manager--add-repohttp://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repoyuminstalldocker-cedocker-ce-clicontainerd.iosystemctlstartdocker---设置docker镜像地址sudomk......