四折交叉验证训练函数

笔记
这是前期的准备工作因为在每一折前面我们需要清除模型的参数要用到net.apply(weights_init)所以要先定义这一块

def weights_init(m):
    if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
        init.xavier_uniform_(m.weight.data)
        if m.bias is not None:
            init.constant_(m.bias, 0)

# 在train_with_cross_validation函数中的fold循环前定义weights_init函数,如下所示:
def train_with_cross_validation(net, x_train, y_train, x_test, y_test, epochs, try_gpu, batch_size):
    # 定义weights_init函数
    def weights_init(m):
        if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
            init.xavier_uniform_(m.weight.data)
            if m.bias is not None:
                init.constant_(m.bias, 0)

下面是具体的训练函数 其中要注意net.apply(weights_init)的位置要放在把模型移到gpu之后放在之前会导致训练函数直接退出

def train_with_cross_validation(net, x_train, y_train, x_test, y_test, epochs, try_gpu, batch_size):
    # 将数据转换为Tensor类型
    x_train = torch.Tensor(x_train)
    y_train = torch.Tensor(y_train)
    x_test = torch.Tensor(x_test)
    y_test = torch.Tensor(y_test)# 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters())# 将数据划分为四折
    fold_size = len(x_train) // 4
    folds = [(x_train[i:i+fold_size], y_train[i:i+fold_size]) for i in range(0, len(x_train), fold_size)]# 训练和验证四个折
    for fold in range(4):
        # 创建训练集和验证集
​
        train_set = folds[:fold] + folds[fold+1:]
        train_set = torch.cat([x[0] for x in train_set]), torch.cat([x[1] for x in train_set])
        val_set = folds[fold]# 创建数据加载器
        train_loader = DataLoader(TensorDataset(*train_set), batch_size=batch_size, shuffle=True)
        val_loader = DataLoader(TensorDataset(*val_set), batch_size=batch_size)# 将模型移到GPU上(如果可用)
        if try_gpu and torch.cuda.is_available():
            net = net.cuda()
        net.apply(weights_init)
        # 训练模型
        for epoch in range(epochs):
            net.train()
            for inputs, labels in train_loader:
                if try_gpu and torch.cuda.is_available():
                    inputs = inputs.cuda()
                    labels = labels.cuda()
​
                optimizer.zero_grad()
                outputs = net(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()# 在验证集上评估模型
            net.eval()
            val_loss = 0.0
            correct = 0
            total = 0
            with torch.no_grad():
                for inputs, labels in val_loader:
                    if try_gpu and torch.cuda.is_available():
                        inputs = inputs.cuda()
                        labels = labels.cuda()
​
                    outputs = net(inputs)
                    val_loss += criterion(outputs, labels).item()
                    _, predicted = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predicted == labels).sum().item()# 打印训练和验证结果
            print(f"Fold: {fold+1}, Epoch: {epoch+1}, Train Loss: {loss.item():.4f}, Val Loss: {val_loss/len(val_loader):.4f}, Val Acc: {(correct/total)*100:.2f}%")# 在测试集上评估模型
    test_set = TensorDataset(x_test, y_test)
    test_loader = DataLoader(test_set, batch_size=batch_size)
    net.eval()
    test_loss = 0.0
    correct = 0
    total = 0
    with torch.no_grad():
        for inputs, labels in test_loader:
            if try_gpu and torch.cuda.is_available():
                inputs = inputs.cuda()
                labels = labels.cuda()
​
            outputs = net(inputs)
            test_loss += criterion(outputs, labels).item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()# 打印测试结果
    print(f"Test Loss: {test_loss/len(test_loader):.4f}, Test Acc: {(correct/total)*100:.2f}%")

调用的方式

train_with_cross_validation(EEGNet(),normalized_train_X,train_y,normalized_test_X ,test_y,epochs,try_gpu,batch_size)
  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值