多层感知机实现Kaggle比赛:房价预测

 pytorch新手,最近在学习多层感知机,实现房价预测在此记录。

import torch
import torch.optim as optim
import torch.nn as nn
import tensorflow as tf
import numpy as np
import pandas as pd
import d2lzh_pytorch as d2l
import sys
import os
import netlayer
from torch.nn import init
from sklearn.model_selection import train_test_split
sys.path.append("..")
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

# 设置默认使用float数据模式
torch.set_default_tensor_type(torch.FloatTensor)

train_path = 'D:/user/python project/homeprice/house-prices-advanced-regression-techniques/train.csv'
test_path = 'D:/user/python project/homeprice/house-prices-advanced-regression-techniques/test.csv'
train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)

# 按行拼接,不带标签
all_features = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:]))

gross_train_labels = train_data.iloc[:, -1]
gross_train_labels = np.array(gross_train_labels)
gross_train_labels_mean = gross_train_labels.mean()
gross_train_labels_std = gross_train_labels.std()
norm_train_labels = (gross_train_labels-gross_train_labels_mean)/gross_train_labels_std

train_max = max(gross_train_labels)
train_min = min(gross_train_labels)

# 获取pandas库中dataframe类型中数据类型不为object的特征的索引
numeric_features = all_features.dtypes[all_features.dtypes != 'object'].index
all_features[numeric_features] = all_features[numeric_features].apply(lambda x: (x - x.mean()) / (x.std()))

# 标准化后,每个数值特征的均值变为0,所以可以直接用0来替换缺失值
all_features[numeric_features] = all_features[numeric_features].fillna(0)
# 离散数据处理
all_features = pd.get_dummies(all_features, dummy_na=True)

# 训练样本总数
num_train = train_data.shape[0]

# 特征数
num_inputs = all_features.shape[1]

# 测试集样本总数
num_test = test_data.shape[0]

# 训练集与验证集划分
data_train, data_valid = train_test_split(all_features.iloc[:num_train], test_size=0.2, train_size=0.8, shuffle=False)
n_train = data_train.shape[0]
n_valid = data_valid.shape[0]

# 三集的数据
train_features = torch.tensor(all_features[:n_train].values, dtype=torch.float)
valid_features = torch.tensor(all_features[n_train:num_train].values, dtype=torch.float)
test_features = torch.tensor(all_features[num_train:].values, dtype=torch.float)


# 训练集与验证集标签
train_labels = torch.tensor(norm_train_labels[:n_train], dtype=torch.float).view(-1, 1)
valid_labels = torch.tensor(norm_train_labels[n_train:], dtype=torch.float).view(-1, 1)


net = netlayer.MLPnet(num_inputs, 256)  # 初始化模型参数
loss = nn.MSELoss()  # 定义损失函数
optimizer = torch.optim.SGD(net.parameters(), lr=0.03)
train_epoch = n_train//16
valid_epoch = n_valid//16

# 训练模型
for epoch in range(0, train_epoch):
    output = net(train_features[epoch*16:min((epoch+1)*16, n_train), :])
    l = loss(output, train_labels[epoch*16:min((epoch+1)*16, n_train), :])
    optimizer.zero_grad()  # 梯度清零,等价于net.zero_grad()
    l.backward()  # 计算梯度
    optimizer.step()  # 更新网络梯度参数

# 测试模型
loss_sum = 0
for epoch in range(0, valid_epoch):
    output = net(valid_features[epoch*16:min((epoch+1)*16, n_valid), :])
    l = loss(output, train_labels[epoch*16:min((epoch+1)*16, n_train), :])
    loss_sum += l
loss_mean = loss_sum / n_valid

# 模型预测
prediction = []
for epoch in range(0, num_test):
    output = net(test_features[epoch, :])
    prediction.append(output)
prediction = torch.tensor(prediction, requires_grad=False)

prediction_min = torch.min(prediction)
prediction_max = torch.max(prediction)

prediction = train_min+(prediction-prediction_min)*(train_max-train_min)/(prediction_max-prediction_min)  # 映射回标签区间
prediction = prediction.view(-1, 1)
print(prediction)

网络模型如下:

def relu(x):
    return torch.max(input=x, other=torch.tensor(0.0))

#  构造多层感知模型
class MLPnet(nn.Module):
    def __init__(self, num_input, num_hidden):  # 输入特征数及隐含层节点数
        super(MLPnet, self).__init__()
        w_xh_tensor = torch.Tensor(num_input, num_hidden)
        w_ho_tensor = torch.Tensor(num_hidden, 1)
        b_tensor = torch.Tensor(1, 1)
        self.w_xh = torch.nn.Parameter(init.normal_(w_xh_tensor, mean=0, std=0.01))
        self.b_xh = torch.nn.Parameter(init.constant_(b_tensor, val=0))
        self.w_ho = torch.nn.Parameter(init.normal_(w_ho_tensor, mean=0, std=0.01))
        self.b_ho = torch.nn.Parameter(init.constant_(b_tensor, val=0))

    def forward(self, x):
        xh = torch.matmul(x, self.w_xh) + self.b_xh
        h = relu(xh)
        ho = torch.matmul(h, self.w_ho) + self.b_ho
        return ho

 有不对的或者可以优化的地方,希望留言指教。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值