记录贴:pytorch学习Part4

记录贴:pytorch学习Part4

一、卷积

import torch
import torch.nn as nn
from torch.nn import functional as F
#方式一
layer=nn.Conv2d(1,3,kernel_size=3,stride=1,padding=0)
x = torch.rand(1,1,28,28)
out = layer.forward(x)
layer=nn.Conv2d(1,3,kernel_size=3,stride=1,padding=1)
out = layer.forward(x)
layer=nn.Conv2d(1,3,kernel_size=3,stride=2,padding=1)
out = layer.forward(x)
out.shape
layer.weight#权重的维度为3,1,3,3,代表三个卷积,一个图像通道,3*3的卷积核
layer.bias#一层卷积一个偏置
#方式二
w = torch.rand(16,3,5,5)#16个卷积层,3个图像通道,5*5的卷积核
b = torch.rand(16)
x = torch.randn(1,3,28,28)
out = F.conv2d(x,w,b,stride=1,padding=1)

二、下采样和上采样

#Pooling
x = torch.randn(1,16,14,14)
layer = nn.MaxPool2d(2,stride=2)#最大
out = layer(x)
out = F.avg_pool2d(x,2,stride=2)#平均
#upsample
x = out
out = F.interpolate(x,scale_factor=2,mode='nearest')
out = F.interpolate(x,scale_factor=3,mode='nearest')

三、标准化

#Image Normalization
normalize = transforms.Normalize(mean=[0.485,0.456,0.406],
                                std=[0.229,0.224,0.225])
#Batch Normalization
x = torch.rand(100,16,784)
layer = nn.BatchNorm1d(16)
out = layer(x)
layer.running_mean
layer.running_var
x = torch.rand(1,16,7,7)
layer = nn.BatchNorm2d(16)

四、Resnet

class ResBlk(nn.Module):
    def __init__(self,ch_in,ch_out):
        self.conv1 = nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1)
        self.bn1 = nn.BatchNorm2d(ch_out)
        self.conv2 = nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1)
        self.bn2 = nn.BatchNorm2d(ch_out)
        
        self.extra = nn.Sequential()
        if ch_out != ch_in:
            self.extra = nn.Sequential(
            nn.Conv2d(ch_in,ch_out,kernel_size=4,stride=1),
            nn.BatchNorm2d(ch_out))
    def forward(self,x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out = self.extra(x) + out
        return out

五、类

# 网络结构
class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()

        self.model = nn.Sequential(
            #nn.Linear(784, 200),
            Mylinear(784,200),
            nn.BatchNorm1d(200, eps=1e-8),
            nn.LeakyReLU(inplace=True),
            #nn.Linear(200, 200),
            Mylinear(200, 200),  
            nn.BatchNorm1d(200, eps=1e-8),
            nn.LeakyReLU(inplace=True),
            #nn.Linear(200, 10),
            Mylinear(200,10),
            nn.LeakyReLU(inplace=True)
        )

#Container
self.net = nn.Sequential(
    nn.Conv2d(1,32,5,1,1),
    nn.MaxPool2d(2,2),
    nn.ReLU(True),
    nn.BatchNorm2d(32),
    nn.Conv2d(32,64,3,1,1),
    nn.ReLU(True),
    nn.BatchNorm2d(64),
    nn.Conv2d(64,64,3,1,1),
    nn.MaxPool2d(2,2),
    nn.ReLU(True),
    nn.BatchNorm2d(64),
    nn.Conv2d(64,128,3,1,1),
    nn.ReLU(True),
    nn.BatchNorm2d(128))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值