“第二课堂”AI实践第三次笔记

二手车预测

%matplotlib inline

from sklearn.model_selection import train_test_split

from torch.utils.data import TensorDataset, DataLoader

import pandas as pd

import zipfile  

import re

import numpy as np

import torch

from torch import nn

from matplotlib_inline import backend_inline

import matplotlib.pyplot as plt  

import matplotlib.image as mpimg  

from IPython import display

import torch  

# 给网络加上随机种子,保证结果的可复现性

# cpu

# torch.manual_seed(42)

# gpu

if torch.cuda.is_available():  

    torch.cuda.manual_seed(42)

# 定义解压.zip包函数

def unzip_file(zip_filepath, dest_path):

    with zipfile.ZipFile(zip_filepath, 'r') as zip_ref:  

        zip_ref.extractall(dest_path)

# 对.zip进行解包

unzip_file('used_car_train_20200313.zip','./')

unzip_file('used_car_testB_20200421.zip','./')

test_data = pd.read_csv('used_car_testB_20200421.csv', sep=' ')

train_data = pd.read_csv('used_car_train_20200313.csv', sep=' ')

test_data.to_csv('used_car_testB.csv')

train_data.to_csv('used_car_train.csv')

data = pd.concat([train_data, test_data])

data = data.replace('-', '-1')

data.notRepairedDamage = data.notRepairedDamage.astype('float32')

data.loc[data['power']>600,'power'] = 600

cate_cols=['model', 'brand', 'bodyType', 'fuelType', 'gearbox', 'seller', 'notRepairedDamage']

num_cols=['regDate', 'creatDate', 'power', 'kilometer', 'v_0', 'v_1', 'v_2', 'v_3', 'v_4', 'v_5', 'v_6', 'v_7', 'v_8', 'v_9', 'v_10','v_11', 'v_12', 'v_13', 'v_14']

# 定义One-Hot编码函数

def oneHotEncode(df, colNames):

    for col in colNames:

        dummies = pd.get_dummies(df[col], prefix=col)

        df = pd.concat([df, dummies],axis=1)

        df.drop([col], axis=1, inplace=True)

    return df

# 处理离散数据

for col in cate_cols:

    data[col] = data[col].fillna('-1')

data = oneHotEncode(data, cate_cols)

# 处理连续数据

for col in num_cols:

    data[col] = data[col].fillna(0)

    data[col] = (data[col]-data[col].min()) / (data[col].max()-data[col].min())

# 处理(可能)无关数据

data.drop(['name', 'regionCode'], axis=1, inplace=True)

data.columns

Index(['SaleID', 'regDate', 'power', 'kilometer', 'offerType', 'creatDate', 'price', 'v_0', 'v_1', 'v_2', ... 'fuelType_6.0', 'fuelType_-1', 'gearbox_0.0', 'gearbox_1.0', 'gearbox_-1', 'seller_0', 'seller_1', 'notRepairedDamage_-1.0', 'notRepairedDamage_0.0', 'notRepairedDamage_1.0'], dtype='object', length=336)

# 拿出测试集

data=data.reset_index(drop=True)

data = data.astype(float)

test_data = data[pd.isna(data.price)]

X_id=test_data['SaleID']

del test_data['SaleID']

del test_data['price']

X_result=torch.tensor(test_data.values, dtype=torch.float32)

test_data.to_csv('one_hot_testB.csv')

# 拿出训练集

train_data = data.drop(data[pd.isna(data.price)].index)

train_data.to_csv('one_hot_train.csv')

y=train_data['price']

del train_data['price']

del train_data['SaleID']

X=torch.tensor(train_data.values, dtype=torch.float32)

y=torch.Tensor(y)

X=X.reshape(-1,334)

y=y.reshape(-1,1)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,random_state=512)

X_train, X_test, y_train, y_test=torch.Tensor(X_train), torch.Tensor(X_test), torch.Tensor(y_train), torch.Tensor(y_test)

train_dataset = TensorDataset(X_train, y_train)  

test_dataset = TensorDataset(X_test, y_test)  

train_iter = DataLoader(train_dataset, batch_size=512, shuffle=True,num_workers=3)  

test_iter = DataLoader(test_dataset, batch_size=512, shuffle=False,num_workers=3)  

X_train.shape,y_train.shape

(torch.Size([112500, 334]), torch.Size([112500, 1]))

net = nn.Sequential(

            nn.BatchNorm1d(334),

            nn.Linear(334, 568),

            nn.BatchNorm1d(568),

            nn.ReLU(),

            nn.Linear(568, 256),

            nn.BatchNorm1d(256),

            nn.ReLU(),

            nn.Linear(256, 256),

            nn.BatchNorm1d(256),

            nn.ReLU(),

            nn.Linear(256,256),

            nn.BatchNorm1d(256),

            nn.ReLU(),

            nn.Linear(256,256),

            nn.BatchNorm1d(256),

            nn.ReLU(),

            nn.Linear(256,128),

            nn.BatchNorm1d(128),

            nn.ReLU(),

            nn.Linear(128,1))

def init_weights(m):

    if type(m) == nn.Linear:

        nn.init.xavier_uniform_(m.weight)

net.apply(init_weights);

def try_gpu(i=0):

    if torch.cuda.device_count() >= i + 1:

        return torch.device(f'cuda:{i}')

    return torch.device('cpu')

def use_svg_display():

    backend_inline.set_matplotlib_formats('svg')

class Accumulator:

    """在n个变量上累加"""

    def __init__(self, n):

        self.data = [0.0] * n

    def add(self, *args):

        self.data = [a + float(b) for a, b in zip(self.data, args)]

    def reset(self):

        self.data = [0.0] * len(self.data)#reset方法用于将self.data`中的所有元素重置为0.0。

    def __getitem__(self, idx):

        # getitem是一个特殊方法,它允许类的实例支持索引操作。例如,如果你有一个Accumulator的实例acc,你可以使用acc[i]来获取self.data`中的第i个元素。

        return self.data[idx]

def evaluate_accuracy(net, device,loss,data_iter):

    net.eval()

    metric = Accumulator(2)  # 正确预测数、预测总数

    with torch.no_grad():

        for X, y in data_iter:        

            X=X.to(device)

            y=y.to(device)

            metric.add(abs(net(X)-y).sum().item(), y.numel())#y.numel():表示预测的数量

            # 将每次的正确预测数和预测数量一次加入迭代器中

    return metric[0] / metric[1]

# .item()将矩阵转化为python标量

# 对模型进行训练

def train_epoch_ch3(net, device,train_iter, loss, updater):

    """训练模型一个迭代周期"""

    # 将模型设置为训练模式

    net.train()

    # 训练损失总和、训练准确度总和、样本数

    metric = Accumulator(2)

    for X, y in train_iter:

        # 计算梯度并更新参数

        X=X.to(device)

        y=y.to(device)

        y_hat = net(X)

        l = loss(y_hat, y)                    

        updater.zero_grad()

        l.backward()

        updater.step()

        metric.add(abs(y_hat-y).sum().item(), y.numel())#记录分类正确的个数

    # 返回训练损失

    return metric[0] / metric[1]

def set_axes(axes,xlabel,ylabel,xlim,ylim,xscale,yscale,legend):

    axes.set_xlabel(xlabel)

    axes.set_ylabel(ylabel)

    axes.set_xscale(xscale)

    axes.set_yscale(yscale)

    axes.set_xlim(xlim)

    axes.set_ylim(ylim)

    if legend:

        axes.legend(legend)

    axes.grid()

class Animator:

    """在动画中绘制数据"""

    def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,

                 ylim=None, xscale='linear', yscale='linear',

                 fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,

                 figsize=(7, 5)):

        # 增量地绘制多条线

        if legend is None:

            legend = []

        use_svg_display()

        self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)

        if nrows * ncols == 1:

            self.axes = [self.axes, ]

        # 使用lambda函数捕获参数

        self.config_axes = lambda: set_axes(

            self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)

        self.X, self.Y, self.fmts = None, None, fmts

    def add(self, x, y):

        # 向图表中添加多个数据点

        if not hasattr(y, "__len__"):

            y = [y]

        n = len(y)

        if not hasattr(x, "__len__"):

            x = [x] * n

        if not self.X:

            self.X = [[] for _ in range(n)]

        if not self.Y:

            self.Y = [[] for _ in range(n)]

        for i, (a, b) in enumerate(zip(x, y)):

            if a is not None and b is not None:

                self.X[i].append(a)

                self.Y[i].append(b)

        self.axes[0].cla()

        for x, y, fmt in zip(self.X, self.Y, self.fmts):

            self.axes[0].plot(x, y, fmt)

        self.config_axes()

        display.display(self.fig)

        display.clear_output(wait=True)

def train_ch3(net, device,train_iter, test_iter, loss, num_epochs, updater):

    net.to(device)

    animator = Animator(xlabel='epoch', xlim=[1, num_epochs],

                        legend=['train loss',  'test loss'])

    for epoch in range(num_epochs):

        train_loss = train_epoch_ch3(net, device,train_iter, loss, updater)

        test_loss = evaluate_accuracy(net, device,loss,test_iter)

        animator.add(epoch + 1, (train_loss,)+ (test_loss,))

    return train_loss,test_loss

def predict_ch3(net, X_result):

    net.eval()

    X_result=X_result.to(device)

    y_hat=net(X_result)

    return y_hat

#训练

lr, num_epochs =  0.01, 150

loss = nn.MSELoss()

trainer = torch.optim.Adam(net.parameters(), lr=lr)

device=try_gpu()

train_loss,test_loss=train_ch3(net, device,train_iter, test_iter, loss, num_epochs, trainer)

pred=predict_ch3(net,X_result)

pred=pred.to('cpu')

pred=pred.detach().numpy() #安全地将张量转换为NumPy数组

res=pd.DataFrame(pred, columns=['price'])

X_id=X_id.reset_index(drop=True)

submission = pd.concat([X_id, res['price']], axis=1)

submission.to_csv('submission.csv',index=False)

train_loss,test_loss

(508.2661855555556, 549.8478175)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值