pytorch 高级CNN模型

该博客展示了如何利用PyTorch构建一个包含Inception模块的卷积神经网络(CNN)。首先,对MNIST数据集进行预处理并加载。接着,定义了一个Inception模块,它包含了不同大小的卷积核和池化操作。然后,构建了整体的CNN模型,包括1*10*5*5的卷积层、Inception层和全连接层。训练过程使用Adam优化器和交叉熵损失函数,并在每个epoch后评估模型在测试集上的准确性。
摘要由CSDN通过智能技术生成
import torch
from torch import optim
from torch.autograd.grad_mode import no_grad
from torch.nn import parameter
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
 
batch_size = 64
 
# 数据处理
transfrom = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))
])
 
# 加载数据集
 
train_dataset = datasets.MNIST(root="dataset/mnist/",
                                train=True,
                                download=True,
                                transform=transfrom)
 
train_load = DataLoader(train_dataset,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers= 6)
 
test_dataset = datasets.MNIST(root="dataset/mnist/",
                                train=False,
                                download=True,
                                transform=transfrom)
 
test_load = DataLoader(test_dataset,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers=6,)
 
 
class Inception(torch.nn.Module):
    def __init__(self, in_channels):
        """
        Inception 模块
        """
        super(Inception, self).__init__()
        self.conv2dpool_1x1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=24, kernel_size=1)
        self.conv2d_1x1 = torch.nn.Conv2d(in_channels=in_channels, out_channels=16, kernel_size=1)
        self.conv2d_3x3_1 = torch.nn.Conv2d(16, 24, kernel_size=3, padding=1)
        self.conv2d_3x3_2 = torch.nn.Conv2d(24, 24, kernel_size=3, padding=1)
        self.conv2d_5x5 = torch.nn.Conv2d(16, 24, kernel_size=5, padding=2)
 
    def forward(self, x):
        branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch_pool = self.conv2dpool_1x1(branch_pool)
 
        branch_2 = self.conv2d_1x1(x)
        branch_3 = self.conv2d_5x5(self.conv2d_1x1(x))
        branch_4 = self.conv2d_3x3_2(self.conv2d_3x3_1(self.conv2d_1x1(x)))
        outputs = [branch_pool, branch_2, branch_3, branch_4]
        return torch.cat(outputs, dim=1)
 
class CNNnet(torch.nn.Module):
    """
    模型主体
    1*10*5*5 的卷积层 + relu激活函数 + 最大池化层2*2   输出 10*12*12
    Inception层 10*88*12*12                          输出 88*12*12
    88*20*5*5 的卷积层 + relu激活函数 + 最大池化层2*2  输出 20*4*4
    INception层 20*88*4*4                            输出 88*4*4
    将88*4*4 转换为一维张量1408*1                      输出 1408*1
    然后全连接层                                      输出10*1
    """
    def __init__(self):
        super(CNNnet, self).__init__()
        self.conv2d1 = torch.nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5)
        self.conv2d2 = torch.nn.Conv2d(in_channels=88, out_channels=20, kernel_size=5)
        self.mp = torch.nn.MaxPool2d(2)
        self.fc = torch.nn.Linear(1408, 10)
        self.inception1 = Inception(in_channels = 10)
        self.inception2 = Inception(in_channels = 20)
 
    def forward(self, x):
        batch_size = x.size()[0]
        # 输出 10*12*12
        x = F.relu(self.mp(self.conv2d1(x)))
        # 输出 88*12*12
        x = self.inception1(x)
        # 输出 20*4*4
        x = F.relu(self.mp(self.conv2d2(x)))
        # 输出 88*4*4
        x = self.inception2(x)
        # 输出 1408*1
        x = x.view(batch_size, -1)
        return x
 
# 创建模型
model = CNNnet()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
optimizer = torch.optim.Adam(model.parameters(),lr=0.0003)
 
def train(epoch):
    running_loss =0
    for num,data in enumerate(train_load, 0):
        inputs, labels = data
        inputs, labels=inputs.to(device), labels.to(device)
        outputs = model(inputs)
        optimizer.zero_grad()
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        # print("-----------------")
        # print(loss)
        running_loss += loss.item()
        # print(running_loss)
        if (num %300 == 299):
            print("epoch %d  %5d张图片  loss: %.3f" %(epoch+1, num+1, running_loss/300))
            running_loss = 0
 
 
def run_data():
    total = 0
    correct = 0
    with torch.no_grad():
        for data in test_load:
            inputs, labels = data
            inputs, labels=inputs.to(device), labels.to(device)
            outputs = model(inputs)
            total += outputs.size()[0]
            _, predicted = torch.max(outputs.data, dim=1)
            correct += (predicted== labels).sum().item()
    
    print("Accuracy on test : %.2f %%   [ %d, %d ]" %(100*correct/total, correct, total))
 
 
 
if __name__ == '__main__':
    for epoch in range(30):
        train(epoch)
        run_data()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值