# PyTorch学习笔记(三) ---- 神经网络

### 文章目录

PyTorch搭建神经网络可以通过 torch.nn 包来构建。一个 nn.Module 包括层和一个方法 forward(input) 它会返回输出(output)。例如，看一下数字图片识别的网络：

1.定义一个包含可训练参数的神经网络
2.迭代整个输入
3.通过神经网络处理输入
4.计算损失(loss)
5.反向传播梯度到神经网络的参数
6.更新网络的参数，典型的用一个简单的更新方法：weight = weight - learning_rate *gradient

# 1.定义神经网络

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F

# 1.定义神经网络

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__() #对继承自父类的属性进行初始化

self.conv1 = nn.Conv2d(1,6,5) # 1 input image channel, 6 output channels, 5x5 square convolution
self.conv2 = nn.Conv2d(6,16,5)

self.fc1 = nn.Linear(16*5*5,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)

def forward(self,x):
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2)) # Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv2(x)),2)  # If the size is a square you can only specify a single number
x =x.view(-1,self.num_flat_features(x))#view函数将张量x变形成一维的向量形式，总特征数并不改变，为接下来的全连接作准备。
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x

def num_flat_features(self,x):
size=x.size()[1:]
num_features=1
for s in size:
num_features *=s

return num_features

net=Net()
print(net)


Net(
(conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
(conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(fc1): Linear(in_features=400, out_features=120, bias=True)
(fc2): Linear(in_features=120, out_features=84, bias=True)
(fc3): Linear(in_features=84, out_features=10, bias=True)
)

# 一个模型可训练的参数可以通过调用 net.parameters() 返回
params=list(net.parameters())
print(len(params))
print(params[0].size())


10
torch.Size([6, 1, 5, 5])

# 2.迭代整个输入

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F

# 1.定义神经网络

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__() #对继承自父类的属性进行初始化

self.conv1 = nn.Conv2d(1,6,5) # 1 input image channel, 6 output channels, 5x5 square convolution
self.conv2 = nn.Conv2d(6,16,5)

self.fc1 = nn.Linear(16*5*5,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)

def forward(self,x):
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2)) # Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv2(x)),2)  # If the size is a square you can only specify a single number
x =x.view(-1,self.num_flat_features(x))#view函数将张量x变形成一维的向量形式，总特征数并不改变，为接下来的全连接作准备。
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x

def num_flat_features(self,x):
size=x.size()[1:]
num_features=1
for s in size:
num_features *=s

return num_features

net=Net()
#2.迭代整个输入
input = torch.randn(1, 1, 32, 32)
out = net(input)
print(out)


tensor([[-0.0142, -0.1713, -0.0990, 0.0307, 0.2069, -0.0126, 0.1045, 0.0479,

# 3.通过神经网络处理输入

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F

# 1.定义神经网络

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__() #对继承自父类的属性进行初始化

self.conv1 = nn.Conv2d(1,6,5) # 1 input image channel, 6 output channels, 5x5 square convolution
self.conv2 = nn.Conv2d(6,16,5)

self.fc1 = nn.Linear(16*5*5,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)

def forward(self,x):
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2)) # Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv2(x)),2)  # If the size is a square you can only specify a single number
x =x.view(-1,self.num_flat_features(x))#view函数将张量x变形成一维的向量形式，总特征数并不改变，为接下来的全连接作准备。
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x

def num_flat_features(self,x):
size=x.size()[1:]
num_features=1
for s in size:
num_features *=s

return num_features

net=Net()
#2.迭代整个输入
input = torch.randn(1, 1, 32, 32)
out = net(input)

#3.通过神经网络处理输入
#把所有参数梯度缓存器置零，用随机的梯度来反向传播
out.backward(torch.randn(1,10))


# 4.计算损失(loss)

# -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F

# 1.定义神经网络

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__() #对继承自父类的属性进行初始化

self.conv1 = nn.Conv2d(1,6,5) # 1 input image channel, 6 output channels, 5x5 square convolution
self.conv2 = nn.Conv2d(6,16,5)

self.fc1 = nn.Linear(16*5*5,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)

def forward(self,x):
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2)) # Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv2(x)),2)  # If the size is a square you can only specify a single number
x =x.view(-1,self.num_flat_features(x))#view函数将张量x变形成一维的向量形式，总特征数并不改变，为接下来的全连接作准备。
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x

def num_flat_features(self,x):
size=x.size()[1:]
num_features=1
for s in size:
num_features *=s

return num_features

net=Net()
#2.迭代整个输入
input = torch.randn(1, 1, 32, 32)
out = net(input)

# 4.计算损失值
# 一个损失函数需要一对输入：模型输出和目标，然后计算一个值来评估输出距离目标有多远。有一些不同的损失函数在 nn 包中。
# 一个简单的损失函数就是 nn.MSELoss ，这计算了均方误差。
target=torch.randn(10)
target=target.view(1,-1)
criterion=nn.MSELoss()

loss=criterion(out,target)
print(loss)



input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
-> view -> linear -> relu -> linear -> relu -> linear
-> MSELoss
-> loss

print(loss.grad_fn)  # MSELoss


<MseLossBackward object at 0x0000023E639F6400>

# 5.反向传播梯度到神经网络的参数

# -*- coding:utf-8 -*-

import torch
import torch.nn as nn
import torch.nn.functional as F

# 1.定义神经网络

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__() #对继承自父类的属性进行初始化

self.conv1 = nn.Conv2d(1,6,5) # 1 input image channel, 6 output channels, 5x5 square convolution
self.conv2 = nn.Conv2d(6,16,5)

self.fc1 = nn.Linear(16*5*5,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)

def forward(self,x):
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2)) # Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv2(x)),2)  # If the size is a square you can only specify a single number
x =x.view(-1,self.num_flat_features(x))#view函数将张量x变形成一维的向量形式，总特征数并不改变，为接下来的全连接作准备。
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x

def num_flat_features(self,x):
size=x.size()[1:]
num_features=1
for s in size:
num_features *=s

return num_features

net=Net()
#2.迭代整个输入
input = torch.randn(1, 1, 32, 32)
out = net(input)
out.backward(torch.randn(1,10))
out=net(input)

target=torch.randn(10)
target=target.view(1,-1)
criterion=nn.MSELoss()

loss=criterion(out,target)
# print(loss)

# 5.反向传播梯度到神经网络的参数
# 为了实现反向传播损失，我们所有需要做的事情仅仅是使用 loss.backward()。你需要清空现存的梯度，要不然梯度将会和现存的梯度累计到一起。
# 现在我们调用 loss.backward() ，然后看一下 con1 的偏置项在反向传播之前和之后的变化。

loss.backward()



conv1.bias.grad before backward tensor([0., 0., 0., 0., 0., 0.])
conv1.bias.grad after backward tensor([ 9.7322e-03, -6.5905e-05,
2.7879e-03, -2.1420e-03, 2.8334e-03,
1.1446e-02])

# 6.更新网络的参数

weight = weight - learning_rate * gradient

# -*- coding:utf-8 -*-

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

# 1.定义神经网络
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__() #对继承自父类的属性进行初始化

self.conv1 = nn.Conv2d(1,6,5) # 1 input image channel, 6 output channels, 5x5 square convolution
self.conv2 = nn.Conv2d(6,16,5)

self.fc1 = nn.Linear(16*5*5,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)

def forward(self,x):
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2)) # Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv2(x)),2)  # If the size is a square you can only specify a single number
x =x.view(-1,self.num_flat_features(x))#view函数将张量x变形成一维的向量形式，总特征数并不改变，为接下来的全连接作准备。
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)

return x

def num_flat_features(self,x):
size=x.size()[1:]
num_features=1
for s in size:
num_features *=s

return num_features

net=Net()
#2.迭代整个输入
input = torch.randn(1, 1, 32, 32)
target=torch.randn(10)
target=target.view(1,-1)

criterion=nn.MSELoss()

#6.使用优化函数，更新神经网络参数
optimizer=optim.SGD(net.parameters(),lr=0.01)

output=net(input) #网络处理输入
loss=criterion(output,target) #计算losss
loss.backward() #调用反向传播

optimizer.step() # Does the update


09-25 1099

01-29 125

01-31 52

11-03 61

02-09 1131

11-28 1694

06-12 3101

08-15 1173

09-20 2万+

10-13 3448

08-11 952

05-04 309

01-20 8万+

01-24 2万+

07-16 1万+

11-06 1万+

04-16 6572

01-30 3万+

02-21 1万+

07-29 8783

#### Pytorch学习笔记（一） 使用PyTorch搭建神经网络的套路

©️2019 CSDN 皮肤主题: 技术黑板 设计师: CSDN官方博客