模型的整体框架
import torch
class NetWord(torch.nn.Module):
def __init__(self):
'''
模型参数层
'''
def forward(self, x):
'''
模型构建及传播
'''
return x
model = NetWord() #模型初始化
print(model) #输出模型框架
一、模型参数层
class NetWord(torch.nn.Module):
def __init__(self):
super(NetWord,self).__init__()
super(NetWord,self)._ init_()继承了nn.Module的属性并使用父类的方法初始化子类
解释不错的文章推荐: super(NetWord,self)._ init_()函数分析
1、nn.Conv2d()函数(卷积)
import torch
x = torch.randn((8,4,28,28)) #生成 8x4x28x28 的矩阵 (28x28)指的是图片 4是对应4个输入通道 8是指有8组四通道
print(x.shape) #torch.Size([8, 4, 28, 28])
conv = torch.nn.Conv2d(in_channels=4, out_channels=2,kernel_size=2, stride=1, padding=0) #输入4个通道 输出是2个通道 卷积核1x1 步长为1
print(list(conv .parameters())[0].shape) #torch.Size([2, 4, 2, 2])
2、nn.Linear()函数(全连接)
import torch
x = torch.randn((8,4,28,28)) #生成 8x4x28x28 的矩阵 (28x28)指的是图片 4是对应4个输入通道 8是指有8组四通道
print(x.shape) #torch.Size([8, 4, 28, 28])
conv = torch.nn.Conv2d(in_channels=4, out_channels=2,kernel_size=2, stride=1, padding=0) #卷积层最关键的上下衔接是对应的输入通道
x = conv(x)
print(x.shape) #torch.Size([8, 2, 27, 27])
# 将四维张量转换为二维张量之后,才能作为全连接层的输入
x = x.view(8,27*27*2)
liner = torch.nn.Linear(in_features=2*27*27,out_features=2) #而全连接层最关键的是几组输入通道
x = liner(x)
print(x.shape) #torch.Size([8, 2])
3、nn.MaxPool2d()和nn.AvgPool2d()函数(最大池化和平均池化)
提示:MaxPool1d()输入输出是一维数据,而MaxPool2d()输入输出是二维数据
import torch
x = torch.randn((8,4,28,28)) #生成 8x4x28x28 的矩阵 (28x28)指的是图片 4是对应4个输入通道 8是指有8组四通道
print(x.shape)
conv = torch.nn.Conv2d(in_channels=4, out_channels=2,kernel_size=2, stride=1, padding=0) #卷积层最关键的上下衔接是对应的输入通道
x = conv(x)
print(x.shape) #torch.Size([8, 4, 27, 27])
maxpool = torch.nn.MaxPool2d(kernel_size=2,stride=1,padding=0) #kernel_size卷积尺寸 stride步长
x = maxpool(x)
print(x.shape) #torch.Size([8, 2, 26, 26])
avgpool = torch.nn.AvgPool2d(kernel_size=2,stride=1,padding=0)
x = avgpool(x)
print(x.shape) #torch.Size([8, 2, 25, 25])
4、nn.Dropout()函数(随机将输入张量中部分元素设置为 0)
import torch
x = torch.randn((8,4,28,28)) #生成 8x4x28x28 的矩阵 (28x28)指的是图片 4是对应4个输入通道 8是指有8组四通道
conv = torch.nn.Conv2d(in_channels=4, out_channels=2,kernel_size=2, stride=1, padding=0) #卷积层最关键的上下衔接是对应的输入通道
x = conv(x)
print(x)
dropout = torch.nn.Dropout(p=0.1) #将输入通道置0的概率, 即丢弃概率。默认: 0.5
x = dropout(x)
print(x)
5、nn.ReLU()函数()
import torch
x = torch.randn((8,4,28,28)) #生成 8x4x28x28 的矩阵 (28x28)指的是图片 4是对应4个输入通道 8是指有8组四通道
conv = torch.nn.Conv2d(in_channels=4, out_channels=2,kernel_size=2, stride=1, padding=0) #卷积层最关键的上下衔接是对应的输入通道
x = conv(x)
print(x)
relu = torch.nn.ReLU()
x = relu(x)
print(x)
输出:conv(x)
5.6277e-02, -7.1074e-01],
[-2.5850e-01, 2.6672e-01, 5.4095e-01, ..., 3.0650e-02,
-5.7501e-03, 3.7112e-01],
[-3.6782e-01, -2.2672e-01, 3.5944e-01, ..., 8.1378e-01,
2.3800e-01, 7.4787e-01]
输出:relu(x)
5.6277e-02, 0.0000e+00],
[0.0000e+00, 2.6672e-01, 5.4095e-01, ..., 3.0650e-02,
0.0000e+00, 3.7112e-01],
[0.0000e+00, 0.0000e+00, 3.5944e-01, ..., 8.1378e-01,
2.3800e-01, 7.4787e-01]
6、nn.Sigmoid()函数()
注意:非常适合于二进制分类问题
import torch
x = torch.randn((8,4,28,28)) #生成 8x4x28x28 的矩阵 (28x28)指的是图片 4是对应4个输入通道 8是指有8组四通道
conv = torch.nn.Conv2d(in_channels=4, out_channels=2,kernel_size=2, stride=1, padding=0) #卷积层最关键的上下衔接是对应的输入通道
x = conv(x)
print(x)
print('------')
sig = torch.nn.Sigmoid() #Sigmoid函数的输出范围为(0,1)
x = sig(x)
print(x)
输出1:conv(x)
4.0350e-02, -9.1782e-01],
[ 1.0022e-01, -1.2966e-01, -1.5330e+00, ..., -9.9505e-01,
4.7113e-01, -3.8614e-02],
[ 3.2308e-02, -6.6265e-01, 3.6953e-02, ..., 7.8304e-01,
3.3069e-01, 1.0838e+00]
输出2:Sigmoid(x)
[0.5250, 0.4676, 0.1775, ..., 0.2699, 0.6157, 0.4903],
[0.5081, 0.3401, 0.5092, ..., 0.6863, 0.5819, 0.7472]
二、模型构建及传播
forward(self, x)类似于 class 中的__call__和__init__方法
import torch.nn as nn
class NetWord(nn.Module):
def __init__(self):
print('1')
super(NetWord,self).__init__()
self.conv = nn.Conv2d(in_channels=4, out_channels=2,kernel_size=2, stride=1, padding=0)
def forward(self, x):
print('2')
return x
model = NetWord() #模型初始化 #从此向下代码注释则输出1
x = torch.randn((8,4,28,28))
x = model(x) #不注释则输出 1 2
print(x)