PyTorch复现经典网络————GoogleNet

import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torchvision.datasets import CIFAR10

# d定义一个卷积+ReLu的函数,[输入通道, 输出通道, 卷积核大小, stride, paading]
def conv_relu(in_channel, out_channel, kernel, stride=1, padding=0):
    layer = nn.Sequential(
        nn.Conv2d(in_channel, out_channel, kernel, stride, padding ),
        nn.BatchNorm2d(out_channel, eps=1e-3),  # 批归一化处理
        nn.ReLU(True)
    )
    return layer

# 定义一个inception模块类,模块分为四个部分,1.1*1卷积, 2 1*1卷积 + 3*3卷积, 3.1*1卷积 + 5*5卷积, 4.3*3最大池化 + 1*1卷积
class inception(nn.Module):
    def __init__(self, in_channel, out1_1, out2_1, out2_3, out3_1, out3_5, out4_1):
        super(inception, self).__init__()
        self.branch1x1 = conv_relu(in_channel, out1_1, 1)

        self.branch3x3 = nn.Sequential(
            conv_relu(in_channel, out2_1, 1),
            conv_relu(out2_1, out2_3, 3, padding=1)
        )

        self.branch5x5 = nn.Sequential(
            conv_relu(in_channel, out3_1, 1),
            conv_relu(out3_1, out3_5, 5, padding=2),
        )

        self.branch_pool = nn.Sequential(
            nn.MaxPool2d(3, stride=1, padding=1),
            conv_relu(in_channel, out4_1, 1),
        )

    def forward(self, x):
        f1 = self.branch1x1(x)
        # print("f1:", f1.shape)
        f2 = self.branch3x3(x)
        # print("f2:", f2.shape)
        f3 = self.branch5x5(x)
        # print("f3:", f3.shape)
        f4 = self.branch_pool(x)
        # print("f4:", f4.shape)
        output = torch.cat((f1, f2, f3, f4), dim=1)
        # print("out:", output.shape)
        # 输出会发现图像的大小没有改变,通道数增加了
        return output
# 一个测试
test_net = inception(3, 64, 48, 64, 64, 96, 32)
test_x = Variable(torch.zeros(1, 3, 96, 96))
test_y = test_net(test_x)

# 定义一个googlenet网络类,让很多个inception从串联起来
class googlenet(nn.Module):
    def __init__(self, in_channel, num_classes, verbose=False):
        super(googlenet, self).__init__()
        self.verbose =verbose

        self.block1 = nn.Sequential(
            conv_relu(in_channel, out_channel=64, kernel=7, stride=2, padding=3),
            nn.MaxPool2d(3, 2)
        )

        self.block2 = nn.Sequential(
            conv_relu(64, 64, kernel=1),
            conv_relu(64, 192, kernel=3, padding=1),
            nn.MaxPool2d(3, 2)
        )

        self.block3 = nn.Sequential(
            inception(192, 64, 96, 128, 16, 32, 32),
            inception(256, 128, 128, 192, 32, 96, 64),
            nn.MaxPool2d(3, 2)
        )

        self.block4 = nn.Sequential(
            inception(480, 192, 96, 208, 16, 48, 64),
            inception(512, 160, 112, 224, 24, 64, 64),
            inception(512, 128, 128, 256, 24, 64, 64),
            inception(512, 112, 144, 288, 32, 64, 64),
            inception(528, 256, 160, 320, 32, 128, 128),
            nn.MaxPool2d(3, 2)
        )

        self.block5 = nn.Sequential(
            inception(832, 256, 160, 320, 32, 128, 128),
            inception(832, 384, 182, 384, 48, 128, 128),
            nn.AvgPool2d(2)
        )

        self.classifier = nn.Linear(1024, num_classes)


    def forward(self, x):
        x = self.block1(x)
        print("block1:", x.shape)

        x = self.block2(x)
        print("block2:", x.shape)

        x = self.block3(x)
        print("block3:", x.shape)

        x = self.block4(x)
        print("block4:", x.shape)

        x = self.block5(x)
        print("block5:", x.shape)

        x = x.view(x.shape[0], -1)
        x = self.classifier(x)
        return x
# 一组测试block1:
# torch.Size([1, 64, 23, 23])
# block2: torch.Size([1, 192, 11, 11])
# block3: torch.Size([1, 480, 5, 5])
# block4: torch.Size([1, 832, 2, 2])
# block5: torch.Size([1, 1024, 1, 1])
# out: torch.Size([1, 10])

"""
test_net = googlenet(3, 10, True)
test_x = Variable(torch.zeros(1, 3, 96, 96))
test_y = test_net(test_x)
print("out:", test_y.shape)
"""

def data_tf(x):
    x = x.resize((96, 96), 2)
    x = np.array(x, dtype="float32") / 255
    x =(x - 0.5) / 0.5
    # print(x.shape)  # 图片格式为96*96*3
    x = x.transpose((2, 0, 1))
    # print(x.shape)  # 转换成PyTorch支持的格式3*96*96
    x = torch.from_numpy(x)
    return x

train_set = CIFAR10("./data_cifar10", train=True, transform=data_tf, download=True)
train_data = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)
test_set = CIFAR10("./data_cifar10", train=False, transform=data_tf, download=True)
test_data = torch.utils.data.DataLoader(test_set, batch_size=128, shuffle=False)

net = googlenet(3, 10)
optimizer = torch.optim.SGD(net.parameters(), lr=1e-1)
criterion = nn.CrossEntropyLoss()

i =0

for e in range(20):
    losses = 0
    acces = 0
    net.train()
    for im, label in train_data:
        i = i + 1
        im = Variable(im)
        lable =Variable(label)
        out = net(im)
        loss = criterion(out, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        losses = losses + loss.data

        _, pred = out.max(1)
        acc = float((pred == label).sum().data) / im.shape[0]
        acces = acces + acc
        print("interation=", i, "loss = ", loss, "acc=", acc)
    print("epoch :{}, Train Loss:{:.6f}, Train ACC:{:.6f}"
          .format(e+1, losses / len(train_data), acces / len(train_data)))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值