pytorch(2)--记录自己常用到的小网络

一、前言

    本篇记录自己常用到的小网络,分类简单的小图片时,可以获得较好的效果

二、代码

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(3, 6, 5, padding=2), 
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
 
        self.layer2 = nn.Sequential(
            nn.Conv2d(6, 12, kernel_size=5,padding=2),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )        
        self.layer3 = nn.Sequential(
            nn.Conv2d(12, 24, kernel_size=5,padding=2),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.layer4 = nn.Sequential(
            nn.Conv2d(24, 36, kernel_size=5,padding=2),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
               
        self.fc = nn.Sequential(
            nn.Linear(36 * 3 * 3, 128),
            nn.Dropout(0.2),
            nn.ReLU(inplace=True),
            nn.Linear(128, 64),
            nn.Dropout(0.2),
            nn.ReLU(inplace=True),
            nn.Linear(64, 7)  #注意这个最后的类别
        )
 
    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x
    
class Net_2(nn.Module):
    def __init__(self):
        super(Net_2, self).__init__()  
        self.layer1 = nn.Sequential(
            nn.Conv2d(1,16, 3, padding=1), 
            #nn.BatchNorm2d(16),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2) #16,48,16
        )
 
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=3,padding=1),
            #nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2) #32,24,8
        )
         
        self.layer3 = nn.Sequential(
            nn.Conv2d(32, 64, kernel_size=3,padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2) #64,12,4
        )
        self.layer4 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3,padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2) #128,6,2
        ) 

        self.fc1 = nn.Sequential(
            nn.Linear(128 * 6 * 2, 256),
            nn.Dropout(0.8),
            nn.ReLU(inplace=True),
        )
        
        self.fc2 = nn.Sequential(
            nn.Linear(256, 128),
            nn.Dropout(0.8),
            nn.ReLU(inplace=True),
        )   
        self.fc = nn.Sequential(
            nn.Linear(128, 2)  #注意这个最后的类别
        )

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x) 
        x = self.layer3(x)
        x = self.layer4(x)
        x = x.view(x.size(0), -1)
        x = self.fc1(x)
        x = self.fc2(x)   
        x = self.fc(x) 
        
        return x 

Net_1 参考lena-5, Net_2 使用CBRP经典结构,当然也可设计成全卷积分类网络,可支持不同大小输入 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值