PyTorch实现VGG

import numpy as np
import torch
from torch import nn
from torch.autograd import Variable

定义VGG的block

VGG几乎全部使用3x3的卷积核,以及2x2的池化层,使用小的卷积核进行多层堆叠和一个大的卷积核感受野是相同的,同时VGG小的卷积核还能减少参数,具有更深的结构。

def vgg_block(num_convs, in_channels, out_channels):
    # 定义第一层,并转化为 List
    net = [nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),nn.ReLU(True)]
    
    # 通过循环定义其他层
    for i in range(num_convs - 1):
        # List每次只能添加一个元素
        # 输入和输出channel均为out_channels
        net.append(nn.Conv2d(out_channels, out_channels, kernel_size=3,padding=1))
        net.append(nn.ReLU(True))
        
    # 定义池化层    
    net.append(nn
  • 3
    点赞
  • 24
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
好的,关于pytorch实现vgg,可以按照以下步骤进行: 1. 导入必要的库和模块: ```python import torch import torch.nn as nn ``` 2. 定义VGG网络的结构: ```python class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) ``` 3. 定义VGG网络的不同层: ```python cfgs = { 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) ``` 4. 定义不同版本的VGG网络: ```python def vgg11(pretrained=False, **kwargs): if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfgs['A']), **kwargs) if pretrained: model.load_state_dict(torch.load('vgg11-bbd30ac9.pth')) return model def vgg13(pretrained=False, **kwargs): if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfgs['B']), **kwargs) if pretrained: model.load_state_dict(torch.load('vgg13-c768596a.pth')) return model def vgg16(pretrained=False, **kwargs): if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfgs['D']), **kwargs) if pretrained: model.load_state_dict(torch.load('vgg16-397923af.pth')) return model def vgg19(pretrained=False, **kwargs): if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfgs['E']), **kwargs) if pretrained: model.load_state_dict(torch.load('vgg19-dcbb9e9d.pth')) return model ``` 5. 加载预训练模型: ```python model = vgg16(pretrained=True) ``` 6. 对输入数据进行预处理: ```python from torchvision import transforms transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) ``` 7. 对输入数据进行推理: ```python from PIL import Image img = Image.open('test.jpg') img = transform(img) img = img.unsqueeze(0) output = model(img) ```
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值