PyTorch(2):搭建神经网络

本文深入探讨如何使用PyTorch构建神经网络,包括关系拟合、分类问题、快速搭建、批训练、优化器如SGD、Adam等,以及GPU加速和防止过拟合的策略如Dropout和Batch Normalization。
摘要由CSDN通过智能技术生成

PyTorch(2):搭建神经网络

主要内容是通过Pytorch进行简单神经网络的搭建。

关系拟合(回归) Regression

import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt

x = torch.unsqueeze(torch.linspace(-11100),dim=1)  # x data (tensor),shape=(100,1)
y = x.pow(2) + 0.2*torch.rand(x.size())                 # noisy y data (tensor),shape=(100,1)

class Net(torch.nn.Module):
    def __init__(self,n_feature,n_hidden,n_output):
        super(Net,self).__init__()
        self.hidden = torch.nn.Linear(n_feature,n_hidden)   # hidden layer
        self.predict = torch.nn.Linear(n_hidden,n_output)   # output layer

    def forward(self,x):
        x = F.relu(self.hidden(x))      # activation function for hidden layer
        x = self.predict(x)             # linear output
        return x

net = Net(n_feature=1,n_hidden=10,n_output=1)     # define the network
print(net)  # net architecture

optimizer = torch.optim.SGD(net.parameters(),lr=0.2)  # 随机梯度下降  lr = learning rate
loss_func = torch.nn.MSELoss()  # this is for regression mean squared loss   均方差

plt.ion()   # something about plotting  实时打印配置

for t in range(200):
    prediction = net(x)     # input x and predict based on x

    loss = loss_func(prediction,y)     # must be (1。nn output,2。target)

    optimizer.zero_grad()   # clear gradients for next train
    loss.backward()         # backpropagation,compute gradients
    optimizer.step()        # apply gradients

    if t % 5 == 0:
        # plot and show learning process
        plt.cla()
        plt.scatter(x.data.numpy(),y.data.numpy())
        plt.plot(x.data.numpy(),prediction.data.numpy()'r-',lw=5)
        plt.text(0.50'Loss=%.4f' % loss.data.numpy(),fontdict={
   'size': 20'color':  'red'})
        plt.pause(0.1)

plt.ioff()
plt.show()

output:
在这里插入图片描述

分类问题(Classification)

import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

# make fake data
n_data = torch.ones(1002)
x0 = torch.normal(2*n_data,1)      # class0 x data (tensor),shape=(100,2)
y0 = torch.zeros(100)               # class0 y data (tensor),shape=(100,1)
x1 = torch.normal(-2*n_data,1)     # class1 x data (tensor),shape=(100,2)
y1 = torch.ones(100)                # class1 y data (tensor),shape=(100,1)
x = torch.cat((x0,x1)0).type(torch.FloatTensor)  # shape (200,2) FloatTensor = 32-bit floating  数据
y = torch.cat((y0,y1)).type(torch.LongTensor)    # shape (200,) LongTensor = 64-bit integer  数据标签

# The code below is deprecated in Pytorch 0.4。Now,autograd directly supports tensors
# x,y = Variable(x),Variable(y)

# plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn')
# plt.show()


class Net(torch.nn.Module):
    def __init__(self,n_feature,n_hidden,n_output):
        super(Net,self).__init__()
        self.hidden = torch.nn.Linear(n_feature,n_hidden)   # hidden layer
        self.out = torch.nn.Linear(n_hidden,n_output)   # output layer

    def forward(self,x):
        x = F.relu(self.hidden(x))      # activation function for hidden layer
        x = self.out(x)
        return x

net = Net(n_feature=2,n_hidden=10,n_output=2)     # define the network
print(net)  # net architecture

optimizer = torch.optim.SGD(net.parameters(),lr=0.02)
# 算误差的时候,注意真实值!不是! one-hot 形式的,而是1D Tensor,(batch,)
# 但是预测值是2D tensor (batch,n_classes)
loss_func = torch.nn.CrossEntropyLoss()  # the target label is NOT an one-hotted  交叉熵 计算标签误差

plt.ion()   # something about plotting

for t in range(100):
    out = net(x)                 # input x and predict based on x
    loss = loss_func(out,y)     # must be (1。nn output,2。target),the target label is NOT one-hotted

    optimizer.zero_grad()   # clear gradients for next train
    loss.backward()         # backpropagation,compute gradients
    optimizer.step()        # apply gradients

    if t % 2 == 0:
        # plot and show learning process
        plt.cla()
        prediction = torch.max(out,1)[1]
        pred_y = prediction.data.numpy()
        target_y = y.data.numpy()
        plt.scatter(x.data.numpy()[:0],x.data.numpy()[:1],c=pred_y,s=100,lw=0,cmap='RdYlGn')
        accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)
        plt.text(1.5-4'Accuracy=%.2f' % accuracy,fontdict={
   'size': 20'color':  'red'})
        # plt.savefig('./classification/' + str(t) + '.jpg')
        plt.pause(0.1)

plt.ioff()
plt.show()

output:
在这里插入图片描述

快速搭建法

import torch
import torch.nn.functional as F

# method 1
# replace following class code with an easy sequential network
class Net(torch.nn.Module):
    def __init__(self,n_feature,n_hidden,n_output):
        super(Net,self).__init__()
        self.hidden = torch.nn.Linear(n_feature,n_hidden)   # hidden layer
        self.predict = torch.nn.Linear(n_hidden,n_output)   # output layer

    def forward(self,x):
        x = F.relu(self.hidden(x))      # activation function for hidden layer
        x = self.predict(x)             # linear output
        return x

net1 = Net(1101)

# method 2
# easy and fast way to build your network
net2 = torch.nn.Sequential(
    torch.nn.Linear(110),
    torch.nn.ReLU(),
    torch.nn.Linear(101)
)


print(net1)     # net1 architecture
"""
Net (
  (hidden): Linear (1 -> 10)
  (predict): Linear (10 -> 1)
)
"""

print(net2)     # net2 architecture
"""
Sequential (
  (0): Linear (1 -> 10)
  (1): ReLU ()
  (2): Linear (10 -> 1)
)
"""

保存和提取参数

import torch
import matplotlib.pyplot as plt

# torch.manual_seed(1)    # reproducible

# fake data
x = torch.unsqueeze(torch.linspace(-11100),dim=1)  # x data (tensor),shape=(100,1) 扩维
y = x.pow(2) + 0.2*torch.rand(x.size())  # noisy y data (tensor),shape=(100,1)

# The code below is deprecated in Pytorch 0.4。Now,autograd directly supports tensors
# x,y = Variable(x,requires_grad=False),Variable(y,requires_grad=False)


def save():
    # save net1
    net1 = torch.nn.Sequential(
        torch.nn.Linear(110),
        torch.nn.ReLU(),
        torch.nn.Linear(101)
    )
    optimizer = torch.optim.SGD(net1.parameters(),lr=0.5)
    loss_func = torch.nn.MSELoss()

    for t in range(100):
        prediction = net1(x)
        loss = loss_func(prediction,y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    # plot result
    plt.figure(1,figsize=(103))
    plt.subplot(131)
    plt.title('Net1')
    plt.scatter(x.data.numpy(),y.data.numpy())
    plt.plot(x.data.numpy(),prediction.data.numpy()'r-',lw=5)

    # 2 ways to save the net
    # method 1
    torch.save(net1,'net.pkl')  # save entire net
    # method 2
    torch.save(net1.state_dict()'net_params.pkl')   # save only the parameters


# 提取神经网络
def restore_net():
    # restore entire net1 to net2
    net2 = torch.load<
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值