🍨 本文为[🔗365天深度学习训练营](https://mp.weixin.qq.com/s/0dvHCaOoFnW8SCp3JpzKxg) 中的学习记录博客
🍖 原作者:[K同学啊](https://mtyjkh.blog.csdn.net/)
运行代码为:
# 一、前期准备
# 1.1 导入所需包和设置GPU
import os
import torch
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
# 检查是否有可用的GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# 1.2 导入数据
df = pd.read_csv('D:/研究生课题/深度学习-代码/heart.csv')
print(df)
df.isnull().sum() # 检查是否有空值
# 二、数据预处理
# 2.1 数据集划分
x = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=1)
print(x_train.shape, y_train.shape)
# 将每一列特征标准化为标准正态分布
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# 转换为PyTorch张量,并且调整输入的维度
x_train = torch.tensor(x_train, dtype=torch.float32).to(device)
y_train = torch.tensor(y_train, dtype=torch.float32).to(device)
x_test = torch.tensor(x_test, dtype=torch.float32).to(device)
y_test = torch.tensor(y_test, dtype=torch.float32).to(device)
# 调整输入数据的维度
x_train = x_train.reshape(x_train.shape[0], 1, x_train.shape[1])
x_test = x_test.reshape(x_test.shape[0], 1, x_test.shape[1])
# 构建数据集和数据加载器
train_dataset = TensorDataset(x_train, y_train)
test_dataset = TensorDataset(x_test, y_test)
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)
# 三、构建RNN模型
class SimpleRNNModel(nn.Module):
def __init__(self):
super(SimpleRNNModel, self).__init__()
self.rnn1 = nn.RNN(13, 128, batch_first=True, nonlinearity='relu')
self.rnn2 = nn.RNN(128, 64, batch_first=True, nonlinearity='relu')
self.rnn3 = nn.RNN(64, 32, batch_first=True, nonlinearity='relu')
self.fc1 = nn.Linear(32, 64)
self.fc2 = nn.Linear(64, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x, _ = self.rnn1(x)
x, _ = self.rnn2(x)
x, _ = self.rnn3(x)
x = x[:, -1, :] # 取最后一个时间步的输出
x = self.fc1(x)
x = self.fc2(x)
x = self.sigmoid(x)
return x
model = SimpleRNNModel().to(device)
print(model)
# 四、编译模型
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-4)
# 五、训练模型
epochs = 100
train_losses = []
val_losses = []
train_accs = []
val_accs = []
for epoch in range(epochs):
model.train()
running_loss = 0.0
correct = 0
total = 0
for inputs, labels in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs.squeeze(), labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
preds = (outputs.squeeze() >= 0.5).float()
correct += (preds == labels).sum().item()
total += labels.size(0)
train_loss = running_loss / len(train_loader)
train_acc = correct / total
train_losses.append(train_loss)
train_accs.append(train_acc)
# 验证集
model.eval()
val_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for inputs, labels in test_loader:
outputs = model(inputs)
loss = criterion(outputs.squeeze(), labels)
val_loss += loss.item()
preds = (outputs.squeeze() >= 0.5).float()
correct += (preds == labels).sum().item()
total += labels.size(0)
val_loss /= len(test_loader)
val_acc = correct / total
val_losses.append(val_loss)
val_accs.append(val_acc)
print(f"Epoch [{epoch + 1}/{epochs}], Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}, "
f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.4f}")
# 六、模型评估
epochs_range = range(epochs)
plt.figure(figsize=(14, 4))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, train_accs, label='Training Accuracy')
plt.plot(epochs_range, val_accs, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_losses, label='Training Loss')
plt.plot(epochs_range, val_losses, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# 评估模型
model.eval()
correct = 0
total = 0
with torch.no_grad():
for inputs, labels in test_loader:
outputs = model(inputs)
preds = (outputs.squeeze() >= 0.5).float()
correct += (preds == labels).sum().item()
total += labels.size(0)
accuracy = correct / total
print(f'Accuracy: {accuracy:.2%}')
运行结果为:
C:\Users\dell\anaconda3\envs\pytorch-gpu\python.exe C:\Users\dell\PycharmProjects\pythonProject5\rnn.py
Using device: cuda
age sex cp trestbps chol fbs ... exang oldpeak slope ca thal target
0 63 1 3 145 233 1 ... 0 2.3 0 0 1 1
1 37 1 2 130 250 0 ... 0 3.5 0 0 2 1
2 41 0 1 130 204 0 ... 0 1.4 2 0 2 1
3 56 1 1 120 236 0 ... 0 0.8 2 0 2 1
4 57 0 0 120 354 0 ... 1 0.6 2 0 2 1
.. ... ... .. ... ... ... ... ... ... ... .. ... ...
298 57 0 0 140 241 0 ... 1 0.2 1 0 3 0
299 45 1 3 110 264 0 ... 0 1.2 1 0 3 0
300 68 1 0 144 193 1 ... 0 3.4 1 2 3 0
301 57 1 0 130 131 0 ... 1 1.2 1 1 3 0
302 57 0 1 130 236 0 ... 0 0.0 1 1 2 0
[303 rows x 14 columns]
(272, 13) (272,)
SimpleRNNModel(
(rnn1): RNN(13, 128, batch_first=True)
(rnn2): RNN(128, 64, batch_first=True)
(rnn3): RNN(64, 32, batch_first=True)
(fc1): Linear(in_features=32, out_features=64, bias=True)
(fc2): Linear(in_features=64, out_features=1, bias=True)
(sigmoid): Sigmoid()
)
Epoch [1/100], Train Loss: 0.7162, Train Acc: 0.4485, Val Loss: 0.6927, Val Acc: 0.5161
Epoch [2/100], Train Loss: 0.7116, Train Acc: 0.4485, Val Loss: 0.6921, Val Acc: 0.5161
Epoch [3/100], Train Loss: 0.7030, Train Acc: 0.4485, Val Loss: 0.6915, Val Acc: 0.5161
Epoch [4/100], Train Loss: 0.6955, Train Acc: 0.4485, Val Loss: 0.6909, Val Acc: 0.5161
Epoch [5/100], Train Loss: 0.7044, Train Acc: 0.4485, Val Loss: 0.6902, Val Acc: 0.5161
Epoch [6/100], Train Loss: 0.6974, Train Acc: 0.4485, Val Loss: 0.6895, Val Acc: 0.5161
Epoch [7/100], Train Loss: 0.7161, Train Acc: 0.4485, Val Loss: 0.6887, Val Acc: 0.5161
Epoch [8/100], Train Loss: 0.6897, Train Acc: 0.4485, Val Loss: 0.6879, Val Acc: 0.5161
Epoch [9/100], Train Loss: 0.6955, Train Acc: 0.4485, Val Loss: 0.6871, Val Acc: 0.5161
Epoch [10/100], Train Loss: 0.6951, Train Acc: 0.4485, Val Loss: 0.6863, Val Acc: 0.5161
Epoch [11/100], Train Loss: 0.6971, Train Acc: 0.4485, Val Loss: 0.6854, Val Acc: 0.5161
Epoch [12/100], Train Loss: 0.6983, Train Acc: 0.4485, Val Loss: 0.6844, Val Acc: 0.5161
Epoch [13/100], Train Loss: 0.6913, Train Acc: 0.4485, Val Loss: 0.6834, Val Acc: 0.5161
Epoch [14/100], Train Loss: 0.6956, Train Acc: 0.4485, Val Loss: 0.6822, Val Acc: 0.5161
Epoch [15/100], Train Loss: 0.6979, Train Acc: 0.4485, Val Loss: 0.6809, Val Acc: 0.5161
Epoch [16/100], Train Loss: 0.6917, Train Acc: 0.4485, Val Loss: 0.6795, Val Acc: 0.5161
Epoch [17/100], Train Loss: 0.6934, Train Acc: 0.4485, Val Loss: 0.6781, Val Acc: 0.5161
Epoch [18/100], Train Loss: 0.6904, Train Acc: 0.4485, Val Loss: 0.6765, Val Acc: 0.5161
Epoch [19/100], Train Loss: 0.6934, Train Acc: 0.4485, Val Loss: 0.6749, Val Acc: 0.5161
Epoch [20/100], Train Loss: 0.6866, Train Acc: 0.4485, Val Loss: 0.6731, Val Acc: 0.5161
Epoch [21/100], Train Loss: 0.6930, Train Acc: 0.4485, Val Loss: 0.6713, Val Acc: 0.5161
Epoch [22/100], Train Loss: 0.6854, Train Acc: 0.4485, Val Loss: 0.6693, Val Acc: 0.5161
Epoch [23/100], Train Loss: 0.6896, Train Acc: 0.4485, Val Loss: 0.6673, Val Acc: 0.5161
Epoch [24/100], Train Loss: 0.6880, Train Acc: 0.4485, Val Loss: 0.6651, Val Acc: 0.5161
Epoch [25/100], Train Loss: 0.6864, Train Acc: 0.4485, Val Loss: 0.6628, Val Acc: 0.5161
Epoch [26/100], Train Loss: 0.6852, Train Acc: 0.4485, Val Loss: 0.6604, Val Acc: 0.5161
Epoch [27/100], Train Loss: 0.6726, Train Acc: 0.4559, Val Loss: 0.6579, Val Acc: 0.5806
Epoch [28/100], Train Loss: 0.6680, Train Acc: 0.4596, Val Loss: 0.6551, Val Acc: 0.6129
Epoch [29/100], Train Loss: 0.6740, Train Acc: 0.4853, Val Loss: 0.6520, Val Acc: 0.6452
Epoch [30/100], Train Loss: 0.6706, Train Acc: 0.5184, Val Loss: 0.6487, Val Acc: 0.6774
Epoch [31/100], Train Loss: 0.6603, Train Acc: 0.5625, Val Loss: 0.6451, Val Acc: 0.7097
Epoch [32/100], Train Loss: 0.6662, Train Acc: 0.6066, Val Loss: 0.6411, Val Acc: 0.7742
Epoch [33/100], Train Loss: 0.6671, Train Acc: 0.6507, Val Loss: 0.6369, Val Acc: 0.7742
Epoch [34/100], Train Loss: 0.6536, Train Acc: 0.6985, Val Loss: 0.6325, Val Acc: 0.7742
Epoch [35/100], Train Loss: 0.6624, Train Acc: 0.7463, Val Loss: 0.6278, Val Acc: 0.7742
Epoch [36/100], Train Loss: 0.6555, Train Acc: 0.7500, Val Loss: 0.6232, Val Acc: 0.7742
Epoch [37/100], Train Loss: 0.6496, Train Acc: 0.7831, Val Loss: 0.6183, Val Acc: 0.8065
Epoch [38/100], Train Loss: 0.6469, Train Acc: 0.7868, Val Loss: 0.6130, Val Acc: 0.8065
Epoch [39/100], Train Loss: 0.6350, Train Acc: 0.7868, Val Loss: 0.6073, Val Acc: 0.8387
Epoch [40/100], Train Loss: 0.6267, Train Acc: 0.8015, Val Loss: 0.6011, Val Acc: 0.8387
Epoch [41/100], Train Loss: 0.6287, Train Acc: 0.8015, Val Loss: 0.5943, Val Acc: 0.8710
Epoch [42/100], Train Loss: 0.6220, Train Acc: 0.8199, Val Loss: 0.5873, Val Acc: 0.8710
Epoch [43/100], Train Loss: 0.6305, Train Acc: 0.8199, Val Loss: 0.5801, Val Acc: 0.8710
Epoch [44/100], Train Loss: 0.6167, Train Acc: 0.8309, Val Loss: 0.5730, Val Acc: 0.8387
Epoch [45/100], Train Loss: 0.6232, Train Acc: 0.8309, Val Loss: 0.5658, Val Acc: 0.8387
Epoch [46/100], Train Loss: 0.6110, Train Acc: 0.8346, Val Loss: 0.5586, Val Acc: 0.8387
Epoch [47/100], Train Loss: 0.6092, Train Acc: 0.8419, Val Loss: 0.5511, Val Acc: 0.8710
Epoch [48/100], Train Loss: 0.5939, Train Acc: 0.8456, Val Loss: 0.5433, Val Acc: 0.8710
Epoch [49/100], Train Loss: 0.5915, Train Acc: 0.8493, Val Loss: 0.5353, Val Acc: 0.8710
Epoch [50/100], Train Loss: 0.5743, Train Acc: 0.8529, Val Loss: 0.5267, Val Acc: 0.8710
Epoch [51/100], Train Loss: 0.5873, Train Acc: 0.8419, Val Loss: 0.5178, Val Acc: 0.8710
Epoch [52/100], Train Loss: 0.5656, Train Acc: 0.8382, Val Loss: 0.5087, Val Acc: 0.8710
Epoch [53/100], Train Loss: 0.5860, Train Acc: 0.8456, Val Loss: 0.4995, Val Acc: 0.8710
Epoch [54/100], Train Loss: 0.5626, Train Acc: 0.8529, Val Loss: 0.4907, Val Acc: 0.8710
Epoch [55/100], Train Loss: 0.5273, Train Acc: 0.8529, Val Loss: 0.4815, Val Acc: 0.8710
Epoch [56/100], Train Loss: 0.5473, Train Acc: 0.8529, Val Loss: 0.4718, Val Acc: 0.8710
Epoch [57/100], Train Loss: 0.4958, Train Acc: 0.8529, Val Loss: 0.4618, Val Acc: 0.8710
Epoch [58/100], Train Loss: 0.5281, Train Acc: 0.8529, Val Loss: 0.4515, Val Acc: 0.8710
Epoch [59/100], Train Loss: 0.5039, Train Acc: 0.8529, Val Loss: 0.4413, Val Acc: 0.8710
Epoch [60/100], Train Loss: 0.4875, Train Acc: 0.8529, Val Loss: 0.4311, Val Acc: 0.8710
Epoch [61/100], Train Loss: 0.4740, Train Acc: 0.8529, Val Loss: 0.4208, Val Acc: 0.8710
Epoch [62/100], Train Loss: 0.4801, Train Acc: 0.8529, Val Loss: 0.4106, Val Acc: 0.8710
Epoch [63/100], Train Loss: 0.4951, Train Acc: 0.8566, Val Loss: 0.4009, Val Acc: 0.8710
Epoch [64/100], Train Loss: 0.4613, Train Acc: 0.8566, Val Loss: 0.3919, Val Acc: 0.8710
Epoch [65/100], Train Loss: 0.4372, Train Acc: 0.8566, Val Loss: 0.3831, Val Acc: 0.8710
Epoch [66/100], Train Loss: 0.4354, Train Acc: 0.8566, Val Loss: 0.3745, Val Acc: 0.8710
Epoch [67/100], Train Loss: 0.4409, Train Acc: 0.8529, Val Loss: 0.3665, Val Acc: 0.9032
Epoch [68/100], Train Loss: 0.4356, Train Acc: 0.8529, Val Loss: 0.3593, Val Acc: 0.9032
Epoch [69/100], Train Loss: 0.4179, Train Acc: 0.8529, Val Loss: 0.3528, Val Acc: 0.9032
Epoch [70/100], Train Loss: 0.4368, Train Acc: 0.8493, Val Loss: 0.3467, Val Acc: 0.9032
Epoch [71/100], Train Loss: 0.4249, Train Acc: 0.8456, Val Loss: 0.3412, Val Acc: 0.9032
Epoch [72/100], Train Loss: 0.3705, Train Acc: 0.8456, Val Loss: 0.3358, Val Acc: 0.9032
Epoch [73/100], Train Loss: 0.4140, Train Acc: 0.8456, Val Loss: 0.3310, Val Acc: 0.9032
Epoch [74/100], Train Loss: 0.3580, Train Acc: 0.8382, Val Loss: 0.3268, Val Acc: 0.9032
Epoch [75/100], Train Loss: 0.4012, Train Acc: 0.8382, Val Loss: 0.3228, Val Acc: 0.9032
Epoch [76/100], Train Loss: 0.3408, Train Acc: 0.8419, Val Loss: 0.3194, Val Acc: 0.9032
Epoch [77/100], Train Loss: 0.4313, Train Acc: 0.8419, Val Loss: 0.3161, Val Acc: 0.9032
Epoch [78/100], Train Loss: 0.3920, Train Acc: 0.8456, Val Loss: 0.3134, Val Acc: 0.9032
Epoch [79/100], Train Loss: 0.4273, Train Acc: 0.8456, Val Loss: 0.3113, Val Acc: 0.9032
Epoch [80/100], Train Loss: 0.4027, Train Acc: 0.8456, Val Loss: 0.3100, Val Acc: 0.9032
Epoch [81/100], Train Loss: 0.3329, Train Acc: 0.8456, Val Loss: 0.3089, Val Acc: 0.9032
Epoch [82/100], Train Loss: 0.3454, Train Acc: 0.8456, Val Loss: 0.3074, Val Acc: 0.9032
Epoch [83/100], Train Loss: 0.3247, Train Acc: 0.8456, Val Loss: 0.3061, Val Acc: 0.9032
Epoch [84/100], Train Loss: 0.4069, Train Acc: 0.8456, Val Loss: 0.3049, Val Acc: 0.9032
Epoch [85/100], Train Loss: 0.3228, Train Acc: 0.8456, Val Loss: 0.3044, Val Acc: 0.9032
Epoch [86/100], Train Loss: 0.3383, Train Acc: 0.8456, Val Loss: 0.3036, Val Acc: 0.9032
Epoch [87/100], Train Loss: 0.3300, Train Acc: 0.8456, Val Loss: 0.3026, Val Acc: 0.9032
Epoch [88/100], Train Loss: 0.2983, Train Acc: 0.8529, Val Loss: 0.3015, Val Acc: 0.9032
Epoch [89/100], Train Loss: 0.3488, Train Acc: 0.8566, Val Loss: 0.3007, Val Acc: 0.9032
Epoch [90/100], Train Loss: 0.3353, Train Acc: 0.8566, Val Loss: 0.3003, Val Acc: 0.9032
Epoch [91/100], Train Loss: 0.3324, Train Acc: 0.8566, Val Loss: 0.2999, Val Acc: 0.9032
Epoch [92/100], Train Loss: 0.3730, Train Acc: 0.8566, Val Loss: 0.2995, Val Acc: 0.9032
Epoch [93/100], Train Loss: 0.3356, Train Acc: 0.8566, Val Loss: 0.2990, Val Acc: 0.9032
Epoch [94/100], Train Loss: 0.3115, Train Acc: 0.8603, Val Loss: 0.2991, Val Acc: 0.9032
Epoch [95/100], Train Loss: 0.3469, Train Acc: 0.8603, Val Loss: 0.2994, Val Acc: 0.9032
Epoch [96/100], Train Loss: 0.3202, Train Acc: 0.8603, Val Loss: 0.2999, Val Acc: 0.9032
Epoch [97/100], Train Loss: 0.3344, Train Acc: 0.8603, Val Loss: 0.3004, Val Acc: 0.9032
Epoch [98/100], Train Loss: 0.2741, Train Acc: 0.8640, Val Loss: 0.3013, Val Acc: 0.9032
Epoch [99/100], Train Loss: 0.3008, Train Acc: 0.8640, Val Loss: 0.3023, Val Acc: 0.9032
Epoch [100/100], Train Loss: 0.2924, Train Acc: 0.8676, Val Loss: 0.3034, Val Acc: 0.9032
Accuracy: 90.32%
进程已结束,退出代码为 0
总结心得:通过这次 RNN 模型识别心脏病的实战,我学到了数据预处理的重要性,尤其是标准化和调整数据形状对模型性能的影响。RNN 模型擅长处理时间序列数据,但需注意梯度消失问题。模型调优、训练和评估中,选择合适的优化器和参数,以及监控训练过程中的指标至关重要。