GoogleNet

结构

在这里插入图片描述

Inception

在这里插入图片描述

1*1卷积核降维

在这里插入图片描述

辅助分类器

在这里插入图片描述
AveragePool: kernel_size=5, stride=3;
Conv: 1 * 1 * 128
FC1: 1024
Dropout: 0.7
FC2: num_class

网络结构

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

代码

在这里插入图片描述

model.py

import torch
import torch.nn as nn


class GoogleNet(nn.Module):
    def __init__(self, num_classes, aux_logits=True):
        super(GoogleNet, self).__init__()
        self.aux_logits = aux_logits

        self.front = nn.Sequential(
            BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3),    # input(3, 224, 224), output(64, 112, 112)
            nn.MaxPool2d(3, stride=2, ceil_mode=True),    # (64, 56, 56), (112 - 3 + 0) / 2 + 1 =55.5, 若ceil_mode=False, 则取55, 若为True, 则取56
            BasicConv2d(64, 64, kernel_size=1),    # (64, 56, 56)
            BasicConv2d(64, 192, kernel_size=3, padding=1),    # (192, 56, 56)
            nn.MaxPool2d(3, stride=2, ceil_mode=True)    # (192, 28, 28)
        )

        self.inception_3a = Inception(192, 64, 96, 128, 16, 32, 32)    # (256, 28, 28)
        self.inception_3b = Inception(256, 128, 128, 192, 32, 96, 64)    # (480, 28, 28)
        self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)   # (480, 14, 14)

        self.inception_4a = Inception(480, 192, 96, 208, 16, 48, 64)    # (512, 14, 14)
        self.inception_4b = Inception(512, 160, 112, 224, 24, 64, 64)    # (512, 14, 14)
        self.inception_4c = Inception(512, 128, 128, 256, 24, 64, 64)    # (512, 14, 14)
        self.inception_4d = Inception(512, 112, 144, 288, 32, 64, 64)    # (512, 14, 14)
        self.inception_4e = Inception(528, 256, 160, 320, 32, 128, 128)    # (832, 14, 14)
        self.maxpool4 = nn.MaxPool2d(3, stride=2, ceil_mode=True)    # (832, 7, 7)

        self.inception_5a = Inception(832, 256, 160, 320, 32, 128, 128)    # (832, 7, 7)
        self.inception_5b = Inception(832, 384, 192, 384, 48, 128, 128)    # (1024, 7, 7)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))    # (1024, 1, 1)
        self.dropout = nn.Dropout(0.4)
        self.fc = nn.Linear(1024, num_classes)

        if self.aux_logits:
            self.aux1 = InceptionAux(512, num_classes)
            self.aux2 = InceptionAux(528, num_classes)

    def forward(self, x):
        x = self.front(x)

        x = self.inception_3a(x)
        x = self.inception_3b(x)
        x = self.maxpool3(x)

        x = self.inception_4a(x)
        if self.training and self.aux_logits:    # eval model lose this layer
            aux1 = self.aux1(x)
        x = self.inception_4b(x)
        x = self.inception_4c(x)
        x = self.inception_4d(x)
        if self.training and self.aux_logits:    # eval model lose this layer
            aux2 = self.aux2(x)
        x = self.inception_4e(x)
        x = self.maxpool4(x)

        x = self.inception_5a(x)
        x = self.inception_5b(x)
        x = self.avgpool(x)

        x = torch.flatten(x, 1)
        # N x 1024
        x = self.dropout(x)
        x = self.fc(x)
        # N x 1000 (num_classes)
        if self.training and self.aux_logits:   # eval model lose this layer
            return x, aux1, aux2
        return x


class BasicConv2d(nn.Module):
    def __init__(self, in_channel, out_channel, **kwargs):
        super(BasicConv2d, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, **kwargs),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        x = self.conv(x)
        return x


class Inception(nn.Module):
    def __init__(self, in_channel, ch1x1, ch3x3Red, ch3x3, ch5x5Red, ch5x5, pool_proj):
        super(Inception, self).__init__()
        self.branch1 = BasicConv2d(in_channel, ch1x1, kernel_size=1)
        self.branch2 = nn.Sequential(
            BasicConv2d(in_channel, ch3x3Red, kernel_size=1),
            BasicConv2d(ch3x3Red, ch3x3, kernel_size=3, padding=1)
        )
        self.branch3 = nn.Sequential(
            BasicConv2d(in_channel, ch5x5Red, kernel_size=1),
            BasicConv2d(ch5x5Red, ch5x5, kernel_size=5, padding=2)
        )
        self.branch4 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
            BasicConv2d(in_channel, pool_proj, kernel_size=1)
        )

    def forward(self, x):
        branch1 = self.branch1(x)
        branch2 = self.branch2(x)
        branch3 = self.branch3(x)
        branch4 = self.branch4(x)

        outputs = [branch1, branch2, branch3, branch4]
        x = torch.cat(outputs, 1)
        return x


class InceptionAux(nn.Module):
    def __init__(self, in_channel, num_classes):
        super(InceptionAux, self).__init__()
        self.features = nn.Sequential(
            nn.AvgPool2d(kernel_size=5, stride=3),
            BasicConv2d(in_channel, 128, kernel_size=1)    # (128, 4, 4)
        )
        self.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(128 * 4 * 4, 1024),
            nn.ReLU(inplace=True),
            nn.Linear(1024, num_classes)
        )

    def forward(self, x):
        x = self.features(x)
        x = torch.flatten(x, start_dim=1)
        x = self.classifier(x)
        return x

train.py

from model import GoogleNet
import torch
import torchvision as tv
import torchvision.transforms as transforms
import json

data_transform = {
    "train":
    transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]),
    "val":
    transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
}

train_set = tv.datasets.ImageFolder(root="C:/Users/14251/Desktop/workspace/GoogleNet/flower_data/train",
                                    transform=data_transform["train"])
val_set = tv.datasets.ImageFolder(root="C:/Users/14251/Desktop/workspace/GoogleNet/flower_data/val",
                                  transform=data_transform["val"])

train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=32,
                                           shuffle=True,
                                           num_workers=0)
val_loader = torch.utils.data.DataLoader(val_set,
                                         batch_size=32,
                                         shuffle=True,
                                         num_workers=0)

flower_list = train_set.class_to_idx
flower_dict = dict((val, key) for key, val in flower_list.items())
json_str = json.dumps(flower_dict, indent=4)
with open("class_indices.json", "w") as json_file:
    json_file.write(json_str)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

net = GoogleNet(num_classes=5, aux_logits=True).to(device)

loss_fun = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.0003)

best_accurate = 0.0
for epoch in range(20):
    net.train()
    running_loss = 0.0
    for step, train_data in enumerate(train_loader, start=0):
        train_images, train_labels = train_data
        optimizer.zero_grad()
        outputs_main, outputs_aux1, outputs_aux2 = net(
            train_images.to(device))
        loss_main = loss_fun(outputs_main, train_labels.to(device))
        loss_aux1 = loss_fun(outputs_aux1, train_labels.to(device))
        loss_aux2 = loss_fun(outputs_aux2, train_labels.to(device))
        loss = loss_main + loss_aux1 * 0.3 + loss_aux2 * 0.3
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        # print train process
        rate = (step + 1) / len(train_loader)
        a = "*" * int(rate * 50)
        b = "." * int((1 - rate) * 50)
        print("\rtrain loss: {:^3.0f}%[{}->{}]{:.3f}".format(
            int(rate * 100), a, b, loss),
              end="")
    print()

    net.eval()
    accurate = 0.0
    with torch.no_grad():
        for val_data in val_loader:
            val_images, val_labels = val_data
            outputs = net(val_images.to(device))
            pred = torch.max(outputs, dim=1)[1]
            accurate += (pred == val_labels.to(device)).sum().item()
        val_accurate = accurate / len(val_set)
        if (val_accurate > best_accurate):
            best_accurate = val_accurate
            torch.save(
                net.state_dict(),
                "C:/Users/14251/Desktop/workspace/GoogleNet/GoogleNet_dict.pth"
            )
        print('[epoch %d] train_loss: %.3f  test_accuracy: %.3f' %
              (epoch + 1, running_loss / step, val_accurate))

print("Finished Training")

predict.py

import torch
import json
import torchvision.transforms as transforms
from model import GoogleNet
from PIL import Image

transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

img = Image.open("C:/Users/14251/Desktop/workspace/GoogleNet/test.jpg")
img = transform(img)
img = img.unsqueeze(dim=0)

try:
    json_file = open(
        "C:/Users/14251/Desktop/workspace/GoogleNet/class_indices.json", "r")
    class_indices = json.load(json_file)
except Exception as e:
    print(e)
    exit(-1)

net = GoogleNet(num_classes=5, aux_logits=False)
net.load_state_dict(
    torch.load("C:/Users/14251/Desktop/workspace/GoogleNet/GoogleNet_dict.pth"), strict=False)

with torch.no_grad():
    output = net(img).squeeze()
    predict = torch.softmax(output, dim=0)
    predict_cla = torch.argmax(predict).numpy()
    print(class_indices[str(predict_cla)], predict[predict_cla].item())

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值