一个比较完整的pytorch项目

模型定义:

import torch
import torch.nn as nn

class DF(nn.Module):
    def __init__(self, nb_classes):
        super(DF, self).__init__()
        self.block1 = nn.Sequential(         
            nn.Conv1d(
                in_channels=1,              
                out_channels=32,            
                kernel_size=8,              
                stride=1,                   
                padding=0,                 
            ),  
            nn.BatchNorm1d(32),                   
            nn.ELU(alpha=1.0),                     
            nn.Conv1d(32, 32, 8, 1, 0),
            nn.BatchNorm1d(32),
            nn.ELU(alpha=1.0),
            nn.MaxPool1d(8, 4, 0), 
            nn.Dropout(0.1), 
        )

        self.block2 = nn.Sequential(
            nn.Conv1d(32, 64, 8, 1, 0),
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.Conv1d(64, 64, 8, 1, 0),
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.MaxPool1d(8, 4, 0),
            nn.Dropout(0.1),
        )

        self.block3 = nn.Sequential(
            nn.Conv1d(64, 128, 8, 1, 0),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.Conv1d(128, 128, 8, 1, 0),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.MaxPool1d(8, 4, 0),
            nn.Dropout(0.1),
        )

        self.block4 = nn.Sequential(
            nn.Conv1d(128, 256, 8, 1, 0),
            nn.BatchNorm1d(256), 
            nn.ReLU(),
            nn.Conv1d(256, 256, 8, 1, 0),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.MaxPool1d(8, 4, 0),
            nn.Dropout(0.1),
        )

        self.fc1 = nn.Sequential(         
            nn.Flatten(),
            nn.Linear(3328,512),
            nn.BatchNorm1d(512),
            nn.ReLU(),
            nn.Dropout(0.7),              
        )

        self.fc2 = nn.Sequential(
            nn.Linear(512, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(),
            nn.Dropout(0.5),              
        )

        self.out = nn.Sequential(
            nn.Linear(512, nb_classes),
        )   

    def forward(self, x):
        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)
        x = self.fc1(x)
        x = self.fc2(x)          
        output = self.out(x)
        return output, x   

模型训练:

train_loader=train_dl
NB_CLASSES = 50
EPOCH = 100
BATCH_SIZE = 128
LR = 0.001

cnn = DF(NB_CLASSES).float().cuda()

loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)
validation_loader=test_dl
validation_size=450
train_size=4050

for epoch in range(EPOCH):
    for step, (b_x, b_y) in enumerate(train_loader):
        # 128 1 5000
        b_x = b_x.cuda()
        b_y = b_y.cuda()
        output = cnn(b_x.unsqueeze(-2).float())[0]
        loss = loss_func(output, b_y.squeeze().long())
        resnet_loss.append(loss)
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if step % 1 == 0:
            corrects = 0
            avg_loss = 0
            for _, (b_x, b_y) in enumerate(validation_loader):
                b_x = b_x.cuda()
                b_y = b_y.cuda()
                logit = cnn(b_x.unsqueeze(-2).float())[0]
                

                
                loss = loss_func(logit, b_y.squeeze().long())
                avg_loss += loss.item()
                corrects += (torch.max(logit, 1)
                            [1].view(b_y.size()).data == b_y.data).sum()
            
            size = validation_size
            avg_loss /= size
            accuracy = 100.0 * corrects / size
            
            resnet_accuracy.append(accuracy)
            
            print('Epoch: {:2d}({:6d}/{}) Evaluation - loss: {:.6f}  acc: {:3.4f}%({}/{})'.format(
                                                                            epoch,
                                                                            step * 128,
                                                                            train_size,
                                                                            avg_loss, 
                                                                            accuracy, 
                                                                            corrects, 
                                                                            size))
  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值