首页 > 其他分享 >kaggle房价预测

kaggle房价预测

时间:2023-08-06 13:44:16浏览次数:39  
标签:features 房价 kaggle print train ls test data 预测

import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import hashlib
import tarfile
import zipfile
import requests
import numpy as np
import pandas as pd
import torch
from torch import nn
from d2l import torch as d2l

DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'

def download(name, cache_dir=os.path.join('..', 'data')):
    """下载一个DATA_HUB中的文件,返回本地文件名"""
    assert name in DATA_HUB, f"{name} 不存在于 {DATA_HUB}"
    url, sha1_hash = DATA_HUB[name]
    os.makedirs(cache_dir, exist_ok=True)
    fname = os.path.join(cache_dir, url.split('/')[-1])
    if os.path.exists(fname):
        sha1 = hashlib.sha1()
        with open(fname, 'rb') as f:
            while True:
                data = f.read(1048576)
                if not data:
                    break
                sha1.update(data)
        if sha1.hexdigest() == sha1_hash:
            return fname  # 命中缓存
    print(f'正在从{url}下载{fname}...')
    r = requests.get(url, stream=True, verify=True)
    with open(fname, 'wb') as f:
        f.write(r.content)
    return fname

def download_extract(name, folder=None):
    """下载并解压zip/tar文件"""
    fname = download(name)
    base_dir = os.path.dirname(fname)
    data_dir, ext = os.path.splitext(fname)
    if ext == '.zip':
        fp = zipfile.ZipFile(fname, 'r')
    elif ext in ('.tar', '.gz'):
        fp = tarfile.open(fname, 'r')
    else:
        assert False, '只有zip/tar文件可以被解压缩'
    fp.extractall(base_dir)
    return os.path.join(base_dir, folder) if folder else data_dir

def download_all():
    """下载DATA_HUB中的所有文件"""
    for name in DATA_HUB:
        download(name)

DATA_HUB['kaggle_house_train'] = (
    DATA_URL + 'kaggle_house_pred_train.csv',
    '585e9cc93e70b39160e7921475f9bcd7d31219ce')

DATA_HUB['kaggle_house_test'] = (
    DATA_URL + 'kaggle_house_pred_test.csv',
    'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')

train_data = pd.read_csv(download('kaggle_house_train'))
test_data = pd.read_csv(download('kaggle_house_test'))
print(train_data.shape)
print(test_data.shape)

# 先看一下数据
print(train_data.iloc[0:4, [0, 1, 2, 3, -3, -2, -1]])
# 把id列去掉在把数据合并
all_features = pd.concat((train_data.iloc[:,1:-1],test_data.iloc[:,1:]))
# 如果无法获得测试数据,则可根据训练数据计算均值和标准差
# 找到所有数值类型的标签
numeric_features = all_features.dtypes[all_features.dtypes!='object'].index
# 将列的数值类型 进行标准化操作,均值为0 方差为1
all_features[numeric_features] = all_features[numeric_features].apply(
    lambda x:((x-x.mean())/(x.std()))
)
# 在标准化数据之后,所有均值消失,因此可以将缺失值设置位0
all_features[numeric_features] = all_features[numeric_features].fillna(0)
# dummy_na=True将na视为有效值的特征值,并为其创建指示符特征,将离散值用独热编码替换
all_features = pd.get_dummies(all_features,dummy_na=True)
shape = all_features.shape
print(shape)

# 获得train数据有多少条
n_train = train_data.shape[0]
print('n_train:',n_train)
# 将train和test分开
train_features = torch.tensor(all_features[:n_train].values,dtype=torch.float32)
test_features = torch.tensor(all_features[n_train:].values,dtype=torch.float32)
train_labels = torch.tensor(train_data.SalePrice.values.reshape(-1,1),dtype=torch.float32)

# 损失
loss = nn.MSELoss()
# 特征选取331列
in_features = train_features.shape[1]

def get_net():
    # 单层线性回归
    net = nn.Sequential(nn.Linear(in_features,1))
    return net
# 计算相对误差
def log_rmse(net,features,labels):
    # 为了在取对数时进一步稳定该值,将小于1的值设置为1
    clipped_preds = torch.clamp(net(features),1,float('inf'))
    rmse = torch.sqrt(loss(torch.log(clipped_preds),torch.log(labels)))
    return rmse.item()

def train(net,train_features,train_labels,test_features,test_labels,
          num_epochs,learning_rate,weight_decay,batch_size):
    train_ls,test_ls = [],[]
    train_iter = d2l.load_array((train_features,train_labels),batch_size)
    print('train_iter:',train_iter)
    #使用Adam优化算法,对学习率不那么敏感
    optimizer = torch.optim.Adam(
        net.parameters(),
        lr=learning_rate,
        weight_decay=weight_decay
    )
    for epoch in range(num_epochs):
        for x,y in train_iter:
            # print('x是:',x)
            # print('y是:',y)
            optimizer.zero_grad()
            l=loss(net(x),y)
            l.backward()
            optimizer.step()
        train_ls.append(log_rmse(net,train_features,train_labels))
        if test_labels is not None:
            test_ls.append(log_rmse(net,test_features,test_labels))
    print('train_ls:',train_ls)
    print('test_ls:',test_ls)
    return train_ls,test_ls

# k折交叉验证,返回对应的测试集和验证集
def get_k_fold_data(k,i,x,y):
    assert k>1
    # //表示整除
    fold_size = x.shape[0] // k
    x_train,y_train = None,None
    for j in range(k):
        # 设定切片区间
        idx = slice(j *fold_size,(j+1)*fold_size)
        print('idx:',idx)
        x_part,y_part = x[idx,:],y[idx]
        print('x_part:',x_part)
        print('y_part:',y_part)
        # i表示当前第几折,做成验证集
        if j==i:
            x_valid,y_valid = x_part,y_part
        # 将其余的折合并起来
        elif x_train is None:
            x_train,y_train = x_part,y_part
        else:
            x_train = torch.cat([x_train,x_part],0)
            y_train = torch.cat([y_train,y_part],0)
    print('x_train:',x_train)
    print('y_train:',y_train)
    print('x_valid:',x_valid)
    print('y_valid:',y_valid)
    # 返回训练集和验证集
    return x_train,y_train,x_valid,y_valid

# 做k折交叉验证
def k_fold(k,x_train,y_train,num_epochs,learning_rate,weight_decay,batch_size):
    train_l_sum,valid_l_sum=0,0
    for i in range(k):
        # 每次拿第i折,包含了训练集和验证集
        data = get_k_fold_data(k,i,x_train,y_train)
        # 初始化net
        net = get_net()
        train_ls,valid_ls = train(net,*data,num_epochs,learning_rate,weight_decay,batch_size)
        train_l_sum += train_ls[-1]
        valid_l_sum += valid_ls[-1]
        if i == 0:
            d2l.plot(list(range(1, num_epochs + 1)), [train_ls, valid_ls],
                     xlabel='epoch', ylabel='rmse', xlim=[1, num_epochs],
                     legend=['train', 'valid'], yscale='log')
        print(f'折{i + 1},训练log rmse{float(train_ls[-1]):f}, '
              f'验证log rmse{float(valid_ls[-1]):f}')
    # 训练损失求和做平均,验证损失求和做平均
    return train_l_sum / k, valid_l_sum / k


k, num_epochs, lr, weight_decay, batch_size = 5, 100, 5, 0, 64
train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr,
                          weight_decay, batch_size)
print(f'{k}-折验证: 平均训练log rmse: {float(train_l):f}, '
      f'平均验证log rmse: {float(valid_l):f}')

# 提交Kaggle预测

def train_and_pred(train_features,test_features,train_labels,test_data,num_epochs,lr,weight_decay,batch_size):
    net = get_net()
    train_ls,_=train(net,train_features,train_labels,None,None,
                     num_epochs,lr,weight_decay,batch_size)
    print(f'训练log rmse:{float(train_ls[-1]):f}')
    # 将网络应用于测试集。
    preds = net(test_features).detach().numpy()
    # 将其重新格式化以导出到Kaggle
    test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
    submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
    submission.to_csv('submission.csv', index=False)

train_and_pred(train_features, test_features, train_labels, test_data,
               num_epochs, lr, weight_decay, batch_size)

 

标签:features,房价,kaggle,print,train,ls,test,data,预测
From: https://www.cnblogs.com/jinbb/p/17609345.html

相关文章