pytorch神经网络学习笔记07----一个使用pytorch对卷积神经网络做交叉验证的例子

下面是一个使用 PyTorch 对卷积神经网络进行交叉验证的示例。假设我们有一个模型类,名为 “CNN”。

import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import KFold

# 定义模型类
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(32 * 8 * 8, 128)
        self.fc2 = nn.Linear(128, 10)
    
    def forward(self, x):
        x = self.pool(nn.functional.relu(self.conv1(x)))
        x = self.pool(nn.functional.relu(self.conv2(x)))
        x = x.view(-1, 32 * 8 * 8)
        x = nn.functional.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 定义交叉验证参数
k = 5
epochs = 10
batch_size = 64
lr = 0.001

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)

# 加载数据集
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)

# 初始化 k-fold
kf = KFold(n_splits=k, shuffle=True)

# 交叉验证训练
for fold, (train_indices, val_indices) in enumerate(kf.split(trainset)):
  # 数据分为训练集和验证集
  train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
  val_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
  train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=train_sampler)
  val_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=val_sampler)

  # 初始化模型
  net = CNN()

  # 训练模型
  for epoch in range(epochs):
    running_loss = 0.0
    for i, data in enumerate(train_loader):
        inputs, labels = data
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()

    # 输出训练结果
    print("Fold [%d]/[%d] Epoch [%d]/[%d] Loss: %.3f" % (fold+1, k, epoch+1, epochs, running_loss/(i+1)))
    
    # 验证模型
    correct = 0
    total = 0
    with torch.no_grad():
        for data in val_loader:
            images, labels = data
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    print('Validation Accuracy: %d %%' % (accuracy))

下面是一个使用 PyTorch 对卷积神经网络进行交叉验证的示例。假设我们有一个模型类,名为 “CNN”。

Copy code
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import KFold

定义模型类

class CNN(nn.Module):
def init(self):
super(CNN, self).init()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(32 * 8 * 8, 128)
self.fc2 = nn.Linear(128, 10)

def forward(self, x):
    x = self.pool(nn.functional.relu(self.conv1(x)))
    x = self.pool(nn.functional.relu(self.conv2(x)))
    x = x.view(-1, 32 * 8 * 8)
    x = nn.functional.relu(self.fc1(x))
    x = self.fc2(x)
    return x

定义交叉验证参数

k = 5
epochs = 10
batch_size = 64
lr = 0.001

定义损失函数和优化器

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)

加载数据集

train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)

初始化 k-fold

kf = KFold(n_splits=k, shuffle=True)

交叉验证训练

for fold, (train_indices, val_indices) in enumerate(kf.split(trainset)):

数据分为训练集和验证集

train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
val_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, sampler=val_sampler)

初始化模型

net = CNN()

训练模型

for epoch in range(epochs):
running_loss = 0.0
for i, data in enumerate(train_loader):
inputs, labels = data
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()

# 输出训练结果
print("Fold [%d]/[%d] Epoch [%d]/[%d] Loss: %.3f" % (fold+1, k, epoch+1, epochs, running_loss/(i+1)))

# 验证模型
correct = 0
total = 0
with torch.no_grad():
    for data in val_loader:
        images, labels = data
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

accuracy = 100 * correct / total
print('Validation Accuracy: %d %%' % (accuracy))

这个示例代码中,我们首先定义了我们的 CNN 模型,并设置了一些超参数和优化器和损失函数。

然后我们使用 K-Fold 方法将数据拆分为训练和验证集,并利用 PyTorch 中的 DataLoader 将数据加载至模型中。

接着我们对每个 fold 进行训练和验证,跟踪损失并输出结果。最后计算交叉验证的准确率(accuracy)并打印出来。

  • 6
    点赞
  • 26
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
神经网络交叉验证(Cross-Validation for Neural Networks)是一种评估模型性能并防止过拟合的方法。在训练神经网络时,我们通常会将数据集划分为训练集、验证集和测试集。交叉验证则是在这些基本划分之上进行的更细致的过程,以便更准确地估计模型在未知数据上的表现。 以下是一个简单的K-Fold交叉验证神经网络代码示例,使用Python和深度学习库Keras(假设你已经安装了TensorFlow或PyTorch作为后端): ```python # 导入必要的库 from keras.models import Sequential from keras.layers import Dense from sklearn.model_selection import KFold # 假设我们有一个数据集X_data和对应的标签y_data X_data = ... y_data = ... # 定义神经网络模型 def create_model(): model = Sequential() model.add(Dense(units=64, activation='relu', input_dim=X_data.shape)) model.add(Dense(units=32, activation='relu')) model.add(Dense(units=1, activation='sigmoid')) # 输出层假设为二分类问题 model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return model # K-Fold交叉验证 kfold = KFold(n_splits=5, shuffle=True) scores = [] for train_index, val_index in kfold.split(X_data): X_train, X_val = X_data[train_index], X_data[val_index] y_train, y_val = y_data[train_index], y_data[val_index] model = create_model() model.fit(X_train, y_train, epochs=50, batch_size=32, validation_data=(X_val, y_val)) # 记录每个折叠的验证结果 scores.append(model.evaluate(X_val, y_val, verbose=0)) # 打印平均性能 mean_accuracy = sum(scores) / len(scores) print(f"Average accuracy: {mean_accuracy}")

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值