【学习记录】Kaggle房价预测

问题描述:

        让购房者描述他们梦想中的房子,他们可能不会从地下室的天花板的高度或者是距离铁路的距离来考虑。但是这个数据集可以包含了影响房价的因素。79 个解释变量(几乎)描述了爱荷华州艾姆斯住宅的各个方面,

数据集下载:

House Prices - Advanced Regression Techniques | Kaggle

实现代码:

读取数据文件和对数据处理

import numpy as np
import pandas as pd
import torch 
from torch import nn 
from d2l import torch as d2l

train_data = pd.read_csv('datasets/house-prices/train.csv')

test_data =  pd.read_csv('datasets/house-prices/test.csv')

#查看前四個和最後兩個特徵,以及相應標籤
train_data.iloc[0:4,[0,1,2,3,-3,-2,-1]]

#数据集中的id需要删除
all_features = pd.concat((train_data.iloc[:,1:-1],test_data.iloc[:,1:]))
all_features

数据预处理:

        将缺失值替换为平均值

        离散值用独热编码来替换

        数据转换为张量,为训练做准备


'''
若無法獲得測試數據,則可根據訓練數據計算平均值和標準差
'''
numeric_features = all_features.dtypes[all_features.dtypes != 'object'].index

all_features[numeric_features] = all_features[numeric_features].apply(
    lambda x:(x - x.mean()) / (x.std())
)
#在標準化數據後,所有均值消失,因此可以將缺失值設置為0
all_features[numeric_features] = all_features[numeric_features].fillna(0)


'''
处理离散值。这包括诸如“MSZoning”之类的特征。我们用独热编码替换它们
'''
# “Dummy_na=True”将“na”(缺失值)视为有效的特征值,并为其创建指示符特征 
all_features = pd.get_dummies(all_features, dummy_na=True) 
all_features.shape


'''
转换为张量
'''
n_train = train_data.shape[0]
train_features = torch.tensor(all_features[:n_train].values, dtype=torch.float32) 
test_features = torch.tensor(all_features[n_train:].values, dtype=torch.float32) 

train_labels = torch.tensor(
    train_data.SalePrice.values.reshape(-1, 1), dtype=torch.float32
)

定义模型和定义损失函数


loss = nn.MSELoss()

in_features = train_features.shape[1]

def get_model():
    model = nn.Sequential(nn.Linear(in_features,1)) 
    return model

'''預測價格的對數與真實標籤之間的差值'''
def log_rmse(model,features,labels):
    clipp_preds = torch.clamp(model(features),1,float('inf'))
    rmse = torch.sqrt(loss(torch.log(clipp_preds),
                            torch.log(labels)))
    
    return rmse.item()

训练函数(Adam优化器)

def train(
    model,train_features, train_labels, test_features, 
    test_labels, num_epochs, learning_rate, weight_decay, 
    batch_size
    ):
    train_ls ,test_ls = [],[]
    train_iter = d2l.load_array((train_features,train_labels),batch_size)

    optimizer = torch.optim.Adam(model.parameters(),
                            lr= learning_rate,
                            weight_decay=weight_decay)

    for epoch in range(num_epochs):
        for x ,y in train_iter:
            optimizer.zero_grad()
            l = loss(model(x),y)
            l.backward()
            optimizer.step()
        train_ls.append(log_rmse(model,train_features,train_labels))
        
        if test_labels is not None:
            test_ls.append(log_rmse(model,test_features,test_labels))

    return train_ls,test_ls

验证函数(K折交叉验证)

'''k折交叉驗證'''
def get_k_fold_data(k,i,x,y):
    assert k > 1
    fold_size = x.shape[0] // k
    x_train,y_train = None,None
    for j in range(k):
        idx = slice(j * fold_size, (j+1) * fold_size)
        x_part,y_part = x[idx,:],y[idx]
        if j ==  i:
            x_valid,y_valid = x_part,y_part

        elif  x_train is None :
            x_train,y_train = x_part,y_part
        
        else :
            x_train =  torch.cat([x_train,x_part],0)
            y_train = torch.cat([y_train,y_part],0)
    
    return x_train,y_train,x_valid,y_valid

返回训练和验证误差的平均值


def k_fold(k,x_train, y_train, num_epochs, learning_rate, weight_decay, batch_size):
    train_l_sum , valid_l_sum = 0,0

    for i in range(k):
        data = get_k_fold_data(k,i,x_train,y_train)
        model = get_model()
        train_ls, valid_ls = train(
            model, *data, num_epochs, learning_rate,
            weight_decay, batch_size)
            
        train_l_sum += train_ls[-1]
        valid_l_sum += valid_ls[-1]
        if i == 0:
            d2l.plot(
               list(range(1, num_epochs + 1)), 
               [train_ls, valid_ls], 
               xlabel='epoch', ylabel='rmse', 
               xlim=[1, num_epochs], 
               legend=['train', 'valid'], 
               yscale='log' 
            )
        print(f'折{i + 1} 训练log rmse{float(train_ls[-1]):f}, ' f'验证log rmse{float(valid_ls[-1]):f}')
    
    return train_l_sum / k, valid_l_sum / k

模型训练与可视化

'''模型選擇'''
k, num_epochs, lr, weight_decay, batch_size = 5, 100, 5, 0, 64
train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr,weight_decay, batch_size) 
print(f'{k}-折验证: 平均训练log rmse: {float(train_l):f}, 'f'平均验证log rmse: {float(valid_l):f}')

模型预测以及生成预测新文件

'''提交預測'''
def train_and_pred(train_features, test_feature, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):
    net = get_model()
    train_ls, _ = train(net, train_features, train_labels, None, None,
    num_epochs, lr, weight_decay, batch_size) 
    
    d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch',
    ylabel='log rmse', xlim=[1, num_epochs], yscale='log') 
    
    print(f'训练log rmse:{float(train_ls[-1]):f}')
    # 将网络应用于测试集。
    preds = net(test_features).detach().numpy()
    # 将其重新格式化以导出到Kaggle
    test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
    submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) 
    submission.to_csv('submission.csv', index=False)


'''生成文件'''
train_and_pred(train_features, test_features, 
        train_labels, test_data, 
        num_epochs, lr, weight_decay, 
        batch_size)

参考:

《动手学深度学习》

  • 0
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值