DAY 36 复习日
仔细回顾一下神经网络到目前的内容,没跟上进度的补一下进度。
- 作业:对之前的信贷项目,利用神经网络训练下,尝试用到目前的知识点让代码更加规范和美观。
- 探索性作业(随意完成):尝试进入nn.Module中,查看他的方法
#数据预处理
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
dt = pd.read_csv(r'data.csv')
dt.head()
dt.info()
dt.isnull().sum()
#缺失值处理
discrete_features = []
continuous_features = []
for feature in dt.columns:
if dt[feature].dtype == 'object':
discrete_features.append(feature)
else:
continuous_features.append(feature)
print(f'离散特征:{discrete_features}')
print(f'连续特征:{continuous_features}')
for features in discrete_features:
mode_value = dt[features].mode()[0]
dt[features].fillna(mode_value,inplace=True)
print(f"列 '{features}' 使用众数 {mode_value} 填补空值")
for features in continuous_features:
median_value = dt[features].median()
dt[features].fillna(median_value,inplace=True)
print(f"列 '{features}' 使用中位数 {median_value} 填补空值")
#离散特征做标签编码/独热编码
dt["Home Ownership"].value_counts()
dt['Years in current job'].value_counts()
dt['Purpose'].value_counts()
dt['Term'].value_counts()
mapping = {
'Home Ownership': {
'Own Home': 0,
'Rent': 1,
'Have Mortgage': 2,
'Home Mortgage': 3
},
'Term': {
'Short Term': 0,
'Long Term': 1
},
'Purpose': {
'debt_consolidation': 2,
'buy house': 1,
'business loan': 1,
'major purchase': 1,
'small business': 1,
'other': 0,
'home improvements': 0,
'buy a car': 0,
'medical bills': 0,
'take a trip': 0,
'wedding': 0,
'moving': 0,
'educational expenses': 0,
'vacation': 0,
'renewable energy': 0
},
'Years in current job': {
'10+ years': 0,
'9 years': 1,
'8 years': 1,
'7 years': 2,
'6 years': 2,
'5 years': 3,
'4 years': 3,
'3 years': 4,
'2 years': 4,
'< 1 year': 5
}
}
dt["Home Ownership"] = dt["Home Ownership"].map(mapping["Home Ownership"])
dt["Term"] = dt["Term"].map(mapping["Term"])
dt["Purpose"] = dt["Purpose"].map(mapping["Purpose"])
dt["Years in current job"] = dt["Years in current job"].map(mapping["Years in current job"])
dt.head()
dt = dt.drop(['Purpose','Id'],axis=1)
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
import matplotlib.pyplot as plt
X = dt.drop('Credit Default', axis=1) # 特征
y = dt['Credit Default'] # 标签
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 归一化数据
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# 将数据转换为PyTorch张量
X_train = torch.FloatTensor(X_train)
y_train = torch.LongTensor(y_train.to_numpy())
X_test = torch.FloatTensor(X_test)
y_test = torch.LongTensor(y_test.to_numpy())
# 检查数据合法性
assert torch.isnan(X_train).sum() == 0, "X_train 包含 NaN"
assert torch.unique(y_train).tolist() == [0, 1], "标签应为 0 或 1"
#特征有多少列
print(X_train.shape[1])
# 定义神经网络模型
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.fc1 = nn.Linear(15, 30) # 输入层到隐藏层 15个特征,30个神经元
self.relu = nn.ReLU()
self.fc2 = nn.Linear(30, 2) # 隐藏层到输出层 30个神经元,2个类别
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
# 实例化模型、损失函数和优化器
model = MLP()
criterion = nn.CrossEntropyLoss() # 多分类问题,使用交叉熵损失函数
optimizer = optim.Adam(model.parameters(), lr=0.0001) # 使用Adam优化器
# 训练模型
num_epochs = 20000 # 训练的轮数
# 用于存储每100个epoch的损失值和对应的epoch数
losses = []
epochs = []
from tqdm import tqdm # 导入tqdm库用于进度条显示
start_time = time.time() # 记录开始时间
# 创建tqdm进度条
with tqdm(total=num_epochs, desc="训练进度", unit="epoch") as pbar:
# 训练模型
for epoch in range(num_epochs):
# 前向传播
outputs = model(X_train) # 隐式调用forward函数
loss = criterion(outputs, y_train)
# 反向传播和优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 记录损失值并更新进度条
if (epoch + 1) % 200 == 0:
losses.append(loss.item())
epochs.append(epoch + 1)
# 更新进度条的描述信息
pbar.set_postfix({'Loss': f'{loss.item():.4f}'})
# 每1000个epoch更新一次进度条
if (epoch + 1) % 1000 == 0:
pbar.update(1000) # 更新进度条
# 确保进度条达到100%
if pbar.n < num_epochs:
pbar.update(num_epochs - pbar.n) # 计算剩余的进度并更新
time_all = time.time() - start_time # 计算训练时间
print(f'Training time: {time_all:.2f} seconds')
# 可视化损失曲线
plt.plot(range(len(losses)), losses)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss over Epochs')
plt.show()
# 测试模型
model.eval() # 设置模型为评估模式,不进行梯度计算
with torch.no_grad(): # 不计算梯度,减少内存消耗
outputs = model(X_test) # 对测试集进行预测
_, predicted = torch.max(outputs, 1) # 找到概率最大的类别
correct = (predicted == y_test).sum().item() # 计算正确预测的数量
total = y_test.size(0) # 测试集样本总数
accuracy = correct / total # 计算准确率
print(f'Test Accuracy: {accuracy:.4f}')
训练进度: 100%|██████████| 20000/20000 [00:37<00:00, 540.26epoch/s, Loss=0.4421]
Training time: 37.02 seconds
Test Accuracy: 0.7687