机器学习与深度学习——入门自用(2.2)
文章目录
前言
对应李老师春季课堂hw1作业。这是最基础的机器学习程序,即supervised learning,也就是learning from data labeled,这个过程需要我们指出labeled数据。
一、如何实现pychrom环境的配置
参考网站:
https://blog.csdn.net/qq_42196241/article/details/121309063?spm=1001.2101.3001.6650.2&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-2-121309063-blog-107099933.235%5Ev43%5Epc_blog_bottom_relevance_base5&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-2-121309063-blog-107099933.235%5Ev43%5Epc_blog_bottom_relevance_base5&utm_relevant_index=5
二、Training&Testing
Training很好理解,就是机器通过样本集构建函数的过程,要想Training,一定有两个基础的样本集:training set&validation set,前者用来生成函数并纠正,后者用来衡量取更为合理的函数。
在验证通过后,我们就可以用测试集进行测试。
三、完成一个预测的机器学习的项目
参考
#https://blog.csdn.net/MrR1ght/article/details/116804409
#https://blog.csdn.net/weixin_37198422/article/details/124985891
1.预先引用及工具定义
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, random_split
from torch import nn
import pandas as pd
import argparse
import csv
import os
import math
from test import * #此处是我的定义函数的py文件
# 先定义命令行参数
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=2022, help="seed number")
parser.add_argument("--select_all", type=bool, default=True, help="Whether to use all features")
parser.add_argument("--valid_ratio", type=float, default=0.2, help="validation_size = train_size * valid_ratio")
parser.add_argument("--n_epochs", type=int, default=5, help="Number of epochs")
parser.add_argument("--batch_size", type=int, default=255, help="")
parser.add_argument("--lr", type=float, default=1e-5, help="learning rate")
parser.add_argument("--early_stop", type=int, default=400, help="If model has not improved for this epochs, stop training.")
parser.add_argument("--save_path", type=str, default="./model/my_model.pth", help="the directory of save model ")
args = parser.parse_args()
print(args) # 可以把参数输出来看一下
# 选择设备,GPU/CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device) # 看一下自己的设备(支不支持gpu)
几个重要的参数说明:
1.batch size:批次大小
2.lr:参数调整的步长
3.n_epochs:deep learning的次数
2.读入文件,预处理—划分训练集和验证集,划分x(feature)与y(label)
def train_valid_split(data_set, valid_ratio, seed):
"""
:param data_set: Dataset类的数据,传入的是 train_data 和 test_data
:param valid_ratio: 验证集的比例(默认0.2)
:param seed: 随机种子
:return: 划分好的numpy类型的训练集和验证集
"""
# 先计算验证集的大小
valid_set_size = int(valid_ratio * len(data_set))
# 再计算训练集的大小(总的数据集减去验证集)
train_set_size = len(data_set) - valid_set_size
"""
# 开始随机划分
random_split(dataset: Dataset[T], lengths: Sequence[int],
generator: Optional[Generator] = default_generator) -> List[Subset[T]]:
"""
train_set, valid_set = random_split(data_set, [train_set_size, valid_set_size],
generator=torch.Generator().manual_seed(seed))
return np.array(train_set), np.array(valid_set)
def select_feat(train_data, valid_data, test_data, select_all=True):
'''Selects useful features to perform regression'''
# 取训练集和验证集的最后一列(即输出值)作为y值
y_train, y_valid = train_data[:, -1], valid_data[:, -1]
# 取训练集,验证集的前117个特征作为输入x,测试集原本就是117维
raw_x_train, raw_x_valid, raw_x_test = train_data[:, :-1], valid_data[:, :-1], test_data
if select_all:
# 选择全部117个特征
feat_idx = list(range(raw_x_train.shape[1]))
else:
# 这里就是需要研究哪些特征和预测结果正相关
feat_idx = [0, 1, 2, 3, 4]
return raw_x_train[:, feat_idx], raw_x_valid[:, feat_idx], raw_x_test[:, feat_idx], y_train, y_valid
train_data = pd.read_csv('D:/BaiduNetdiskDownload/hw1/covid.train.csv').values
test_data = pd.read_csv('D:/BaiduNetdiskDownload/hw1/covid.test.csv').values
train_data, valid_data = train_valid_split(train_data, args.valid_ratio, args.seed)
""" 将数据划分为训练集和验证集 """
# Print out the data size.
print("训练集的尺寸是:{}".format(train_data.shape))
print("验证集的尺寸是:{}".format(valid_data.shape))
print("测试集的尺寸是:{}".format(test_data.shape))
x_train, x_valid, x_test, y_train, y_valid = select_feat(train_data, valid_data, test_data, args.select_all)
这里生成的train_data,test_data,valid_data等是Tensor,也就是多维数组。
3. pytorch的数据读取
参考:https://blog.csdn.net/MrR1ght/article/details/116804409
1.首先,定义一个Dataset的子类,此子类的示例用来给DataLoader处理。
class Covid2019Data(Dataset):
"""
x: Features.
y: Targets, if none, do prediction.
"""
def __init__(self, x, y): #初始化
if y is None:
self.y = y
self.x = torch.FloatTensor(x)
else:
self.y = torch.FloatTensor(y)
self.x = torch.FloatTensor(x)
def __getitem__(self, idx): #取条目
if self.y is None:
return self.x[idx]
else:
return self.x[idx], self.y[idx]
def __len__(self): #定义量级
return len(self.x)
- Dataloader :进行采样,采样后使用batch_sampler生成长度为batch_size的索引列表(实际是使用sampler采样batch_size次),使用collate_fn将batch_size长度的列表整理成batch样本(tensor格式)。
train_dataset = Covid2019Data(x_train,y_train)
valid_dataset= Covid2019Data(x_valid, y_valid)
test_dataset = Covid2019Data(x_test,None)
# Pytorch data loader loads pytorch dataset into batches.
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=True)
valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True)
4. 构建数据模型
定义自己的数目模型类,模型可以用linear和relu拟合,也可以用sigmoid函数拟合。
class MyModel(nn.Module): #1.自身定义的模型
def __init__(self, input_dim):
super().__init__()
# 模型可以修改
self.model = nn.Sequential(
nn.Linear(input_dim, 16),
nn.ReLU(),
nn.Linear(16, 8),
nn.ReLU(),
nn.Linear(8, 1)
)
def forward(self, x):
output = self.model(x)
# 注意此时的output是(N,1),N是batch_size,所以需要压缩维度
output = output.squeeze(1)
return output
5. 选择Criterion
6. Optimization
7. Training loop
def trainer():
my_model = MyModel(input_dim=x_train.shape[1])
#my_model.parameters()生成的是tensor
my_module = my_model.to(device)
# 线性回归,采用MSE损失函数 即使用绝对值的平方计算loss
criterion = nn.MSELoss(reduction='mean')
criterion = criterion.to(device) # 同样把损失函数放到device设备
# 定义优化器
optim = torch.optim.SGD(my_module.parameters(), args.lr, momentum=0.9)
# Create directory of saving models.
if not os.path.isdir('./model'):
os.mkdir('./model')
best_loss = math.inf # 定义一个best_loss(正无穷大),每一轮的mean_valid_loss与其进行比较
early_stop_count = 0 # 定义记录早停止的变量
for epoch in range(args.n_epochs):
# 将模型设置为训练模式
my_module.train()
print("第{}轮训练开始:".format(epoch + 1))
print(enumerate(train_loader))
# 定义一个loss列表变量,可以记录每一步的train loss
train_loss_record = []
for train_step, (x,y) in enumerate(train_loader):
x = x.to(device)
y = y.to(device)
pred = my_module(x)
# 计算模型输出和实际标签的loss
loss = criterion(pred, y)
# 梯度手动清零
optim.zero_grad()
# 后向传播,计算梯度
loss.backward()
# 优化器优化参数
optim.step()
# 将每一次的loss入队列
train_loss_record.append(loss.detach().item())
# 计算本轮次训练集上的平均loss
mean_train_loss = sum(train_loss_record) / len(train_loss_record)
# 同样定义一个loss列表变量,可以记录每一步的valid loss
valid_loss_record = []
# 将模型设置为测试模式
my_module.eval()
with torch.no_grad():
for x, y in valid_loader:
x, y = x.to(device), y.to(device)
pred = my_module(x)
loss = criterion(pred, y)
# 测试集loss,loss.item()可以避免显存爆炸
valid_loss_record.append(loss.item())
# 计算本轮次验证集上的平均loss
mean_valid_loss = sum(valid_loss_record) / len(valid_loss_record)
# 输出本轮次的训练loss和验证loss
print("Epoch:{}/{},train loss:{},valid loss:{}".format(epoch, args.n_epochs, mean_train_loss, mean_valid_loss))
if mean_valid_loss < best_loss:
best_loss = mean_valid_loss
torch.save(my_model.state_dict(), args.save_path) # Save your best model
print('Saving model with loss {:.3f}...'.format(best_loss))
early_stop_count = 0
# 如果本次的loss比上一次大,那么让early_stop_count数量+1
else:
early_stop_count += 1
# 如果early_stop_count数量达到设定值,则停止训练
if early_stop_count >= args.early_stop:
print('\nModel is not improving, so we halt the training session.')
return
if __name__ == '__main__':
# 开始训练,获得模型
trainer()
# 开始预测
my_model = MyModel(input_dim=x_train.shape[1]).to(device)
my_model.load_state_dict(torch.load(args.save_path))
preds = predict(test_loader, my_model, device)
# 开始将预测值写入文件
with open('D:/BaiduNetdiskDownload/hw1/co.csv', 'w') as fp:
writer = csv.writer(fp)
writer.writerow(['id', 'tested_positive'])
for i, p in enumerate(preds):
writer.writerow([i, p])
8. predict
def predict(test_loader, model, device):
model.eval()
preds = []
for x in test_loader:
x = x.to(device)
with torch.no_grad():
pred = model(x)
preds.append(pred.detach().cpu())
# preds是list,长度为5,将所有的tensor展成一维,并转化为numpy
preds = torch.cat(preds, dim=0).numpy()
return preds
大概流程明白 但是具体细节还是不怎么懂 后面慢慢补充吧……