Pytorch基础2——神经网络模板

1. 神经网络模板

在Pytorch中编写神经网络,所有的层结构和损失函数都来自于torch.nn,所有的模型构建都是从基类nn.Module继承的,于是有了下面这个模板。

import torch
import torch.nn as nn


# 训练数据集
x_train = torch.tensor([1], dtype=torch.float)
y_train = torch.tensor([1], dtype=torch.float)


# 定义网络模型
class Model_name(nn.Module):
    def __init__(self):
        super(Model_name, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=3)
        # other network layers

    def forward(self, x):
        x = self.conv1(x)
        # others
        return x


model = Model_name()  # 模型实例化

loss_func = torch.nn.MSELoss()  # 定义损失函数

optimizer = torch.optim.SGD(model.parameters(), lr=0.1)  # 定义优化器


num_iter = 100  # 定义最大迭代轮数
for step in range(num_iter):
    # forward
    prediction = model(x_train)            # 前向传播
    loss = loss_func(prediction, y_train)  # 计算损失函数值

    # backward
    optimizer.zero_grad()  # 每次做反向传播前都要将梯度清零,否则梯度会一直累加
    loss.backward()        # 自动微分求梯度
    optimizer.step()       # 梯度反向传播,更新参数
    

2. 应用1——回归

import torch
import torch.nn as nn
import torch.nn.functional as F


# 训练数据集
x_train = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
y_train  = x_train.pow(2) + 0.2*torch.randn(x_train.shape)


# 定义网络模型
class Model_name(nn.Module):
    def __init__(self, n_feature, n_hidden, n_predict):
        super(Model_name, self).__init__()
        self.hidden = nn.Linear(n_feature, n_hidden)
        self.predict = nn.Linear(n_hidden, n_predict)
        # other network layers

    def forward(self, x):
        net = F.relu(self.hidden(x))
        net = self.predict(net)

        return net


model = Model_name(1, 10, 1)  # 模型实例化
print(model)
loss_func = torch.nn.MSELoss()  # 定义损失函数

optimizer = torch.optim.SGD(model.parameters(), lr=0.05)  # 定义优化器


num_iter = 1000  # 定义最大迭代轮数
for step in range(num_iter):
    # forward
    prediction = model(x_train)            # 前向传播
    loss = loss_func(prediction, y_train)  # 计算损失函数值

    # backward
    optimizer.zero_grad()  # 每次做反向传播前都要将梯度清零,否则梯度会一直累加
    loss.backward()        # 自动微分求梯度
    optimizer.step()       # 梯度反向传播,更新参数

    loss_array = loss.data.numpy()
    print(loss_array)

3. 应用2——图像分类

import torch
import torch.nn as nn


class SimpleCnn(nn.Module):
    def __init__(self):
        super(SimpleCnn, self).__init__()

        layer1 = nn.Sequential()
        layer1.add_module("conv1", nn.Conv2d(3, 32, 3, 1, padding=1))
        layer1.add_module("relu1", nn.ReLU(True))
        layer1.add_module("pool1", nn.MaxPool2d(2, 2))
        self.layer1 = layer1

        layer2 = nn.Sequential()
        layer2.add_module("conv2", nn.Conv2d(32, 64, 3, 1, padding=1))
        layer2.add_module("relu2", nn.ReLU(True))
        layer2.add_module("pool2", nn.MaxPool2d(2, 2))
        self.layer2 = layer2

        layer3 = nn.Sequential()
        layer3.add_module("conv3", nn.Conv2d(64, 128, 3, 1, padding=1))
        layer3.add_module("relu3", nn.ReLU(True))
        layer3.add_module("pool3", nn.MaxPool2d(2, 2))
        self.layer3 = layer3

        layer4 = nn.Sequential()
        layer4.add_module("fc1", nn.Linear(2048, 512))
        layer4.add_module("fc_relu1", nn.ReLU(True))
        layer4.add_module("fc2", nn.Linear(512, 64))
        layer4.add_module("fc_relu2", nn.ReLU(True))
        layer4.add_module("fc3", nn.Linear(64, 10))
        self.layer4 = layer4

    def forward(self, x):
        conv1 = self.layer1(x)
        conv2 = self.layer2(conv1)
        conv3 = self.layer3(conv2)
        fc_input = conv3.view(conv3.shape[0], -1)
        fc_out = self.layer4(fc_input)

        return fc_out


model = SimpleCnn()
print(model)

打印网络结构如下:

SimpleCnn(
  (layer1): Sequential(
    (conv1): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (relu1): ReLU(inplace=True)
    (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (layer2): Sequential(
    (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (relu2): ReLU(inplace=True)
    (pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (layer3): Sequential(
    (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    (relu3): ReLU(inplace=True)
    (pool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
  )
  (layer4): Sequential(
    (fc1): Linear(in_features=2048, out_features=512, bias=True)
    (fc_relu1): ReLU(inplace=True)
    (fc2): Linear(in_features=512, out_features=64, bias=True)
    (fc_relu2): ReLU(inplace=True)
    (fc3): Linear(in_features=64, out_features=10, bias=True)
  )
)

实用技巧:

  • print(model)可将网络结构打印出来,方便查看和检验。
  • 可以在forward()return中添加网络中间结果,这样可以得到网络的中间层输出。
  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值