李宏毅机器学习2022年HW1

前段时间看了李宏毅老师的深度学习的课,作业一直还没做,最近打算做一下,课程资料和地址如下:(强推)李宏毅2021/2022春机器学习课程_哔哩哔哩_bilibili

视频简介下面有相应的课件数据集下载地址。

目前现在是过了strong baseline 离boss 还有些距离,剩下的我看需要做些微调什么的,所以就暂时先放在这里,尽量把作业都做一遍再说。下面是每个等级对应的分数,分数越小越好。

下图为提交结果

下面分享一下我的方法,主要在以下几点做了修改:

       1.首先老师在课上提示过,特征选择是很重要的,做了这一步基本分数就会提升好多,我主要用了两个方法,第一个方法就是老师在课上有暗示过哪些特征很重要,比如前面的地区,以及每天的tested_positive 所以就直接选择这些天对应的序列号即可。但是后来我发现可以用sklearn库里面的特征选择函数SelectKBest来选择最好的前n个特征,这里有对应的链接,讲的非常详细

特征选择函数--selectKBest_夺笋123的博客-CSDN博客

下面是对应的特征选择部分的函数,如下:

def feature_get(k):		# k:需要筛选出的特征数量
    with open('covid.train.csv', 'r') as f:#读取文件
        csv_data = list(csv.reader(f))
        csv_title = csv_data[0][1:]
        csv_data = np.array(csv_data[1:])[:, 1:].astype(float)
    # 生成数据
    labels_list = []
    data_list = []
    for i in csv_data:#labellist可以看作输出y data_list可以看作是输入x
        labels_list.append(i[-1])
        data_list = np.append(data_list, i[:-1])#去掉了label的值少了一列
    data_list = data_list.reshape(2699, -1)
    labels_list = np.array(labels_list)

    model = SelectKBest(chi2, k=k)
    x_new = model.fit_transform(data_list, labels_list.astype('int32'))#x_new为选择出来的函数 比如若选择前20个最好的特征值,则x_new就提取最好的二十列 将他们合并成一个数组

    data_list = data_list.transpose()
    x_new = x_new.transpose()

    feature_list = []
    for i in x_new:#找到该列在数据集里的序号是多少,通过序号来进行特征选择
        for j in range(len(data_list)):
            if i[10] == data_list[j][10]:
                feature_list.append(j+1)
    return feature_list

运行结果,提取了前24个bestfeature,结果如下 其中数字代表特征所在位置

       2.讲relu()换成LeakyReLU(0.1),同时改变learning rate值,将其调大,这里我将其调大了十倍。还有就是优化器我使用Adam,这里老师也指出可以更换优化器,另外还有就是用了余弦退火,这里面其实后面一些对模型的提升不是很明显,算是一些微调,主要还是在特征选择这里。

以下是完整代码,建议大家在本地跑,colab上面一般都是T4的gpu,性能基本上是不如现在大部分的笔记本的,所以本地跑会快很多。

# Numerical Operations
import math
import numpy as np
from sklearn.feature_selection import SelectKBest, chi2
# Reading/Writing Data
import pandas as pd
import os
import csv

# For Progress Bar
from tqdm import tqdm

# Pytorch
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, random_split
from torch.optim.lr_scheduler import CosineAnnealingLR
# For plotting learning curve
from torch.utils.tensorboard import SummaryWriter

def feature_get(k):		# k:需要筛选出的特征数量
    with open('covid.train.csv', 'r') as f:#读取文件
        csv_data = list(csv.reader(f))
        csv_title = csv_data[0][1:]
        csv_data = np.array(csv_data[1:])[:, 1:].astype(float)
    # 生成数据
    labels_list = []
    data_list = []
    for i in csv_data:#labellist可以看作输出y data_list可以看作是输入x
        labels_list.append(i[-1])
        data_list = np.append(data_list, i[:-1])#去掉了label的值少了一列
    data_list = data_list.reshape(2699, -1)
    labels_list = np.array(labels_list)

    model = SelectKBest(chi2, k=k)
    x_new = model.fit_transform(data_list, labels_list.astype('int32'))#x_new为选择出来的函数 比如若选择前20个最好的特征值,则x_new就提取最好的二十列 将他们合并成一个数组

    data_list = data_list.transpose()
    x_new = x_new.transpose()

    feature_list = []
    for i in x_new:#找到该列在数据集里的序号是多少,通过序号来进行特征选择
        for j in range(len(data_list)):
            if i[10] == data_list[j][10]:
                feature_list.append(j+1)
    return feature_list




def same_seed(seed):
    '''Fixes random number generator seeds for reproducibility.'''
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)

def train_valid_split(data_set, valid_ratio, seed):
    '''Split provided training data into training set and validation set'''
    valid_set_size = int(valid_ratio * len(data_set))
    train_set_size = len(data_set) - valid_set_size
    train_set, valid_set = random_split(data_set, [train_set_size, valid_set_size], generator=torch.Generator().manual_seed(seed))
    return np.array(train_set), np.array(valid_set)

def predict(test_loader, model, device):
    model.eval() # Set your model to evaluation mode.
    preds = []
    for x in tqdm(test_loader):
        x = x.to(device)
        with torch.no_grad():
            pred = model(x)
            preds.append(pred.detach().cpu())
    preds = torch.cat(preds, dim=0).numpy()
    return preds

class COVID19Dataset(Dataset):
    '''
    x: Features.
    y: Targets, if none, do prediction.
    '''
    def __init__(self, x, y=None):
        if y is None:
            self.y = y
        else:
            self.y = torch.FloatTensor(y)
        self.x = torch.FloatTensor(x)

    def __getitem__(self, idx):
        if self.y is None:
            return self.x[idx]
        else:
            return self.x[idx], self.y[idx]

    def __len__(self):
        return len(self.x)

class My_Model(nn.Module):
    def __init__(self, input_dim):
        super(My_Model, self).__init__()
        # TODO: modify model's structure, be aware of dimensions.
        self.layers = nn.Sequential(
            nn.Linear(input_dim, 16),
            nn.LeakyReLU(0.1),
            nn.Linear(16, 8),
            nn.LeakyReLU(0.1),
            nn.Linear(8, 1)
        )

    def forward(self, x):
        x = self.layers(x)
        x = x.squeeze(1) # (B, 1) -> (B)
        return x


def select_feat(feature_list,train_data, valid_data, test_data, select_all=True):
    '''Selects useful features to perform regression'''
    y_train, y_valid = train_data[:, -1], valid_data[:, -1]
    raw_x_train, raw_x_valid, raw_x_test = train_data[:, :-1], valid_data[:, :-1], test_data

    if select_all:
        feat_idx = list(range(raw_x_train.shape[1]))
    else:
        #feat_idx = [1, 2, 3, 4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,53,69,85,101]  # TODO: Select suitable feature columns.
        feat_idx=feature_list
    return raw_x_train[:, feat_idx], raw_x_valid[:, feat_idx], raw_x_test[:, feat_idx], y_train, y_valid


def trainer(train_loader, valid_loader, model, config, device):
    criterion = nn.MSELoss(reduction='mean')  # Define your loss function, do not modify this.

    # Define your optimization algorithm.
    # TODO: Please check https://pytorch.org/docs/stable/optim.html to get more available algorithms.
    # TODO: L2 regularization (optimizer(weight decay...) or implement by your self).
    # = torch.optim.SGD(model.parameters(), lr=config['learning_rate'], momentum=0.9)
    optimizer = torch.optim.Adam(model.parameters(), lr=config['learning_rate'])
    scheduler = CosineAnnealingLR(optimizer, T_max=config['n_epochs'])

    writer = SummaryWriter()  # Writer of tensoboard.

    if not os.path.isdir('./models'):
        os.mkdir('./models')  # Create directory of saving models.

    n_epochs, best_loss, step, early_stop_count = config['n_epochs'], math.inf, 0, 0

    for epoch in range(n_epochs):
        scheduler.step()
        model.train()  # Set your model to train mode.
        loss_record = []

        # tqdm is a package to visualize your training progress.
        train_pbar = tqdm(train_loader, position=0, leave=True)

        for x, y in train_pbar:
            optimizer.zero_grad()  # Set gradient to zero.
            x, y = x.to(device), y.to(device)  # Move your data to device.
            pred = model(x)
            loss = criterion(pred, y)
            loss.backward()  # Compute gradient(backpropagation).
            optimizer.step()  # Update parameters.
            step += 1
            loss_record.append(loss.detach().item())

            # Display current epoch number and loss on tqdm progress bar.
            train_pbar.set_description(f'Epoch [{epoch + 1}/{n_epochs}]')
            train_pbar.set_postfix({'loss': loss.detach().item()})

        mean_train_loss = sum(loss_record) / len(loss_record)
        writer.add_scalar('Loss/train', mean_train_loss, step)

        model.eval()  # Set your model to evaluation mode.
        loss_record = []
        for x, y in valid_loader:
            x, y = x.to(device), y.to(device)
            with torch.no_grad():
                pred = model(x)
                loss = criterion(pred, y)

            loss_record.append(loss.item())

        mean_valid_loss = sum(loss_record) / len(loss_record)
        print(f'Epoch [{epoch + 1}/{n_epochs}]: Train loss: {mean_train_loss:.4f}, Valid loss: {mean_valid_loss:.4f}')
        writer.add_scalar('Loss/valid', mean_valid_loss, step)

        if mean_valid_loss < best_loss:
            best_loss = mean_valid_loss
            torch.save(model.state_dict(), config['save_path'])  # Save your best model
            print('Saving model with loss {:.3f}...'.format(best_loss))
            early_stop_count = 0
        else:
            early_stop_count += 1

        if early_stop_count >= config['early_stop']:
            print('\nModel is not improving, so we halt the training session.')
            return


device = 'cuda' if torch.cuda.is_available() else 'cpu'
config = {
    'seed': 5201314,      # Your seed number, you can pick your lucky number. :)
    'select_all': False,   # Whether to use all features.
    'valid_ratio': 0.2,   # validation_size = train_size * valid_ratio
    'n_epochs': 3000,     # Number of epochs.
    'batch_size': 256,
    'learning_rate': 1e-4,
    'early_stop': 400,    # If model has not improved for this many consecutive epochs, stop training.
    'save_path': './models/model.ckpt'  # Your model will be saved here.
}


# Set seed for reproducibility
same_seed(config['seed'])


# train_data size: 2699 x 118 (id + 37 states + 16 features x 5 days)
# test_data size: 1078 x 117 (without last day's positive rate)
train_data, test_data = pd.read_csv('./covid.train.csv').values, pd.read_csv('./covid.test.csv').values
train_data, valid_data = train_valid_split(train_data, config['valid_ratio'], config['seed'])

# Print out the data size.
print(f"""train_data size: {train_data.shape} 
valid_data size: {valid_data.shape} 
test_data size: {test_data.shape}""")

# Select features
feature_list=feature_get(24)
print(feature_list)
x_train, x_valid, x_test, y_train, y_valid = select_feat(feature_list,train_data, valid_data, test_data, config['select_all'])

# Print out the number of features.
print(f'number of features: {x_train.shape[1]}')

train_dataset, valid_dataset, test_dataset = COVID19Dataset(x_train, y_train), \
                                            COVID19Dataset(x_valid, y_valid), \
                                            COVID19Dataset(x_test)

# Pytorch data loader loads pytorch dataset into batches.
train_loader = DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True, pin_memory=True)
valid_loader = DataLoader(valid_dataset, batch_size=config['batch_size'], shuffle=True, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size=config['batch_size'], shuffle=False, pin_memory=True)

model = My_Model(input_dim=x_train.shape[1]).to(device) # put your model and data on the same computation device.
trainer(train_loader, valid_loader, model, config, device)

def save_pred(preds, file):
    ''' Save predictions to specified file '''
    with open(file, 'w') as fp:
        writer = csv.writer(fp)
        writer.writerow(['id', 'tested_positive'])
        for i, p in enumerate(preds):
            writer.writerow([i, p])

model = My_Model(input_dim=x_train.shape[1]).to(device)
model.load_state_dict(torch.load(config['save_path']))
preds = predict(test_loader, model, device)
save_pred(preds, 'pred.csv')

最后,自己也是新手,如果有错误或者更好的方法,也请各位大佬在评论区分享一下,另外如果有问题也可以评论交流,我看到也会第一时间回复的,大家一起进步!!!

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值