PyTorch官方教程之2:PyTorch神经网络

# 《PyTorch官方教程中文版》, PyTorch神经网络
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchviz import make_dot
from torchsummary import summary
import hiddenlayer as h
import torch.optim as optim


# 1.定义神经网络(即模型),通常需要继承自nn.Module然后实现自己的layer
# LeNet, 其命名来源于作者LeCun的名字
class Net(nn.Module):

    def __init__(self):
        super(Net, self).__init__()
        # 1 input channel, 6 output channels, 5x5 square convolution i.e. kernel
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        # an affine operation: y = Wx + b, 仿射变换
        # 对传入数据应用线性变换:y = A x+ b
        # 用于设置网络中的全连接层
        self.fc1 = nn.Linear(16 * 5 * 5, 120)#in_features=16 * 5 * 5,out_features=120
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    # 定义前馈函数,然后反向传播函数被自动通过 autograd 定义了
    def forward(self, x):
        # Max pooling over a (2, 2) window, stride = 2
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        # If the size is a square you can only specify a single number
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(-1, self.num_flat_features(x)) # reshape
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x) #why not relu here?
        return x

    def num_flat_features(self, x):
        size = x.size()[1:] # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features


net = Net()
print('model: ', net)

# summary(your_model, input_size=(channels, H, W))
summary(net, input_size=(1, 32, 32))

# 2.数据输入
# Let try a random 32x32 input
# Note: Expected input size to this net(LeNet) is 32x32. To use this net on
# MNIST dataset, please resize the images from the dataset to 32x32.
# 4D Tensor of ``nSamples x nChannels x Height x Width``.
# 尝试随机生成一个 32x32 的输入
# torch.randn[8, 3, 244, 244],[batch, channel, height, width],表示batch_size=8, 3通道(即RGB图像,灰度图像通道为1),图片尺寸:224x224
# 如果给的是torch.randn[1, 1, 32, 32]表示batch_size=1(1张), 1通道(灰度图像),图片尺寸:32x32
input = torch.randn(1, 1, 32, 32) # shape为(batch,channels,height,width)
print('input: ', input)
output = net(input)
print('output: ', output) #size 1*10

# 通过PyTorchViz可视化网络
# g = make_dot(output) #1*10
# g.render(filename='graph', view=False)
# g.view()


# or: 通过HiddenLayer可视化网络
# h_graph = h.build_graph(net, input)
# h_graph.theme = h.graph.THEMES['blue'].copy()
# h_graph.save('graph_h')

#     input -> conv2d -> relu -> maxpool2d -> conv2d -> relu -> maxpool2d
#           -> view -> linear -> relu -> linear -> relu -> linear
#           -> MSELoss
#           -> loss

# 3.计算loss

# 损失函数
target = torch.randn(1, 10) # a dummy target, for example
print('target: ', target)
# 重构张量的维度: .view(a,b)表示重构成a*b维度,b=-1表示自动补齐列向量;.view(-1),则原张量会变成一维的结构。
target = target.view(1, -1) # resize as 1*10
print('target reshaped: ', target)
criterion = nn.MSELoss()

loss = criterion(output, target)
print('loss: ', loss)
print(loss.grad_fn)

# 4.反向传播loss

# 把所有参数的梯度缓存清零
net.zero_grad()
# 调用loss.backward()反向传播误差,查看conv1层的偏置(bias)在反向传播前后的梯度
print('反向传播之前conv1.bias的梯度:', net.conv1.bias.grad)
loss.backward()
print('反向传播之后conv1.bias的梯度:', net.conv1.bias.grad)

# 5.更新神经网络参数

# 最简单的更新规则就是随机梯度下降(Stochastic Gradient Descent (SGD)): weight = weight - learning_rate * gradient
# learning_rate = 0.01
# for f in net.parameters():
#     f.data.sub_(f.grad.data * learning_rate)

# optimizer that could use various different update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc.
optimizer = optim.SGD(net.parameters(), lr=0.01)

optimizer.zero_grad() # zero all gradient buffers
output = net(input) # forward computing
loss = criterion(output, target) # calculate loss
loss.backward()  # backward
optimizer.step()  # update
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值