Pytorch--神经网络基础篇

Pytorch学习笔记(1)

1.神经网络基础:

1.1 关系拟合(回归):

import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
#Torch只能处理二维的数据
#torch.unsqueeze与squezee的区别:
#torch.squeeze() 这个函数主要对数据的维度进行压缩,去掉维数为1的的维度
#torch.unsqueeze()这个函数主要是对数据维度进行扩充。给指定位置加上维数为一的维度
#.unsqueeze(0)操作主要是针对于此时的输入数据只包含一个样本的情况
x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1)#torch.linspace(-1,1,100)坐标轴-1~1,100个样本点。
y = x.pow(2) + 0.2*torch.randn(x.size())

#画图
# plt.scatter(x.data.numpy(),y.data.numpy())
# plt.show()

#搭建神经网络方法一
# class Net(torch.nn.Module):
#     def __init__(self,n_feature,n_hidden,n_output):
#         super(Net, self).__init__()#继承__int__功能
#         #定义每层以什么样的形式
#         self.hidden = torch.nn.Linear(n_feature,n_hidden)
#         self.predict = torch.nn.Linear(n_hidden,n_output)
#
#
#     def forward(self,x):
#         #正向传播输入值,神经网络分析出输出值
#         x = F.relu(self.hidden(x))
#         x =self.predict(x)
#         return x
#
# net = Net(n_feature=1,n_hidden=100,n_output=1)
# print(net)
#快速搭建方法二:
net = torch.nn.Sequential(
    torch.nn.Linear(1,100),
    torch.nn.ReLU(),
    torch.nn.Linear(100,1)
)

optimizer = torch.optim.SGD(net.parameters(),lr=0.2)
loss_func = torch.nn.MSELoss()

for t in range(100):
    prediction = net(x)

    loss = loss_func(prediction,y)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if t % 5 == 0:
        plt.cla()# Clear axis即清除当前图形中的当前活动轴。其他轴不受影响
        plt.scatter(x.data.numpy(),y.data.numpy())
        plt.plot(x.data.numpy(),prediction.data.numpy(),'r-',lw=5)
        plt.text(0.5,0,'LOSS=%.4f'% loss.data.numpy(),fontdict={'size':20,'color':'red'})# 第一和第二个参数0,0.5表示文本所在位置的横纵坐标,原点坐标是(0,0)
        plt.pause(0.1) # 暂停一段时间,不然画的太快会卡住显示不出来


plt.ion()
plt.show()

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-rHANcDYS-1596632474555)(C:\Users\Mimosa\AppData\Roaming\Typora\typora-user-images\image-20200803211502062.png)]

1.2区分类型(分类)

import torch
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch.autograd import Variable

n_data = torch.ones(100,2)  #数据的基本形态,生成一个100行2列的全1矩阵
x0 = torch.normal(2*n_data,1) #利用100行2列的全1矩阵产生一个正态分布的矩阵均值方差分别是2*n_data,1
y0 = torch.zeros(100) #给x0标定标签确定其分类0

x1 = torch.normal(-2*n_data,1) #第二个数据类别
y1 = torch.ones(100) #给x1数据类型的label为1

x = torch.cat((x0,x1),0).type(torch.FloatTensor)  #cat方法将两个数据样本聚合在一起(x0,x1),0这个属性
y = torch.cat((y0,y1),).type(torch.LongTensor)

x = Variable(x)
y = Variable(y) #将他们装载到Variable容器中

#plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=y.data.numpy(),s=100,lw=0,cmap='RdYlGn')
#plt.show()

#method1
class Net(torch.nn.Module):#搭建一个神经网络
    def __init__(self,n_feature,n_hidden,n_output):#神经网络初始化,设置输入层、隐藏层、输出层参数
        super(Net,self).__init__() #继承_init__功能(用super函数调用父类的通用初始化函数初始一下)
        self.hidden = torch.nn.Linear(n_feature,n_hidden) #隐藏层线性输出
        self.out = torch.nn.Linear(n_hidden,n_output) #输出层线性输出。

    def forward(self,x):#正向传播输入值,神经网络传播输出值;前向计算过程
        x = self.out(x) #经过输出层返回
        return x

net = Net(n_feature=2,n_hidden=10,n_output=2) #几个类别就几个输出
print(net)

#method2 快速搭建
net2 = torch.nn.Sequential(
    torch.nn.Linear(2,10),
    torch.nn.ReLU(),
    torch.nn.Linear(10,2)
)
print(net2)

optimizer = torch.optim.SGD(net2.parameters(),lr=0.02)#设置优化器参数,lr为学习率
loss_func = torch.nn.CrossEntropyLoss()#损失函数loss——func

plt.ion()#画图
for t in range(100):
    out = net2(x) #100次迭代输出
    loss = loss_func(out,y) #计算loss,out与y的差异

    optimizer.zero_grad() #消除一下上次梯度的计算的数值
    loss.backward() #进行反向传播
    optimizer.step()#最优化迭代

    if t % 2 ==0:
        plt.cla()
        #过了一道softmax激活函数后最大概率才是预测值
        prediction = torch.max(out,1)[1] #返回每一行中最大值的元素,且返回其索引,[1]是得到比较大概率的值的索引。是0属于第一类,是1属于第二类
        pred_y = prediction.data.numpy().squeeze()
        target_y = y.data.numpy()
        plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c=pred_y,s=100,lw=0,cmap='RdYlGn')
        accuracy = float((pred_y == target_y).astype(int).sum())/float(target_y.size) #预测值与真实值一样的占真实值多少比率
        plt.text(1.5,-4,'Accuracy=%.2f'% accuracy,fontdict={'size':20,'color':'red'})
        plt.pause(0.1)


plt.ioff()#停止画图
plt.show()

1.3 保存、提取

#保存、提取网络
import torch
import matplotlib.pyplot as plt
import torch.nn.functional as F


from torch.autograd import Variable

torch.manual_seed(1)    # reproducible 固定初始化,方便运行的数据一致


# 假数据
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)  # x data (tensor), shape=(100, 1)
y = x.pow(2) + 0.2*torch.rand(x.size())  # noisy y data (tensor), shape=(100, 1)

# The code below is deprecated in Pytorch 0.4. Now, autograd directly supports tensors
x,y = Variable(x,requires_grad=False),Variable(y,requires_grad=False)

def save():
    #save net1
    net1 = torch.nn.Sequential(
        torch.nn.Linear(1,10),
        torch.nn.ReLU(),
        torch.nn.Linear(10,1)
    )

    optimizer = torch.optim.SGD(net1.parameters(),lr=0.5)
    loss_func = torch.nn.MSELoss()

    #训练
    for t in range(100):
        prediction = net1(x)
        loss = loss_func(prediction,y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    # 画图
    plt.subplot(131)  # plt.subplot()函数用于直接指定划分方式和位置进行绘图,表示整个图像分为1行3列,当前位置为1
    plt.title('net1')
    plt.scatter(x.data.numpy(), y.data.numpy())
    plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
    #两种途径保存
    torch.save(net1,'net.pkl')#保存整个网络
    torch.save(net1.state_dict,'net_params.pkl')#只保存网络中的参数(速度快,占内存少)


def restore_net():
    #将net1整个网络保存至net2
    net2 = torch.load('net.pkl')
    prediction = net2(x)
    # 画图
    plt.subplot(132)
    plt.title('net2')
    plt.scatter(x.data.numpy(), y.data.numpy())
    plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)


def restore_params():
    net3 = torch.nn.Sequential(
        torch.nn.Linear(1, 10),
        torch.nn.ReLU(),
        torch.nn.Linear(10, 1)
    )
    #将net1的所有参数copy到net3中
    net3.load_state_dict(torch.load('net_params.pkl'))
    prediction = net3(x)
    # 画图
    plt.subplot(133)
    plt.title('net3')
    plt.scatter(x.data.numpy(), y.data.numpy())
    plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
    plt.show()

#save net1
save()

restore_net()

restore_params()

1.5 batch

import torch
import torch.utils.data as Data

torch.manual_seed(1)

BATCH_SIZE = 5 #批训练数据个数

if __name__ == '__main__':
    x = torch.linspace(1, 10, 10) # x data (torch tensor)
    y = torch.linspace(10, 1, 10) # y data (torch tensor)



    torch_dataset = Data.TensorDataset(x,y)
    loader = Data.DataLoader(
       dataset=torch_dataset,#Dataset类,决定数据从哪读取及如何获取
       batch_size=BATCH_SIZE,#批大小
       shuffle=True,#是否打乱顺序
       num_workers=2,#2个进程提取batch_x,batch_y,也就是多线程读数据
     )

    for epoch in range(3):     #  训练所有!整套!数据 3 次
       for step,(batch_x,batch_y) in enumerate(loader): #每一步 loader 释放一小批数据用来学习
        #training...
        #打印1:
           print('Epoch',epoch,'| Step',step,'| batch x:',batch_x,'| batch y:',batch_y)
        #结果:
# Epoch 0 | Step 0 | batch x: tensor([ 5.,  7., 10.,  3.,  4.]) | batch y: tensor([6., 4., 1., 8., 7.])
# Epoch 0 | Step 1 | batch x: tensor([2., 1., 8., 9., 6.]) | batch y: tensor([ 9., 10.,  3.,  2.,  5.])
# Epoch 1 | Step 0 | batch x: tensor([ 4.,  6.,  7., 10.,  8.]) | batch y: tensor([7., 5., 4., 1., 3.])
# Epoch 1 | Step 1 | batch x: tensor([5., 3., 2., 1., 9.]) | batch y: tensor([ 6.,  8.,  9., 10.,  2.])
# Epoch 2 | Step 0 | batch x: tensor([ 4.,  2.,  5.,  6., 10.]) | batch y: tensor([7., 9., 6., 5., 1.])
# Epoch 2 | Step 1 | batch x: tensor([3., 9., 1., 8., 7.]) | batch y: tensor([ 8.,  2., 10.,  3.,  4.])

  #打印2:
           #print('Epoch', epoch, '| Step', step, '| batch x:', batch_x.numpy, '| batch y:', batch_y.numpy)\
#结果2:
# Epoch 0 | Step 0 | batch x: <built-in method numpy of Tensor object at 0x0000027D331238B8> | batch y: <built-in method numpy of Tensor object at 0x0000027D33123908>
# Epoch 0 | Step 1 | batch x: <built-in method numpy of Tensor object at 0x0000027D331237C8> | batch y: <built-in method numpy of Tensor object at 0x0000027D33123778>
# Epoch 1 | Step 0 | batch x: <built-in method numpy of Tensor object at 0x0000027D331239F8> | batch y: <built-in method numpy of Tensor object at 0x0000027D33123BD8>
# Epoch 1 | Step 1 | batch x: <built-in method numpy of Tensor object at 0x0000027D33123CC8> | batch y: <built-in method numpy of Tensor object at 0x0000027D331237C8>
# Epoch 2 | Step 0 | batch x: <built-in method numpy of Tensor object at 0x0000027D33123E08> | batch y: <built-in method numpy of Tensor object at 0x0000027D33123DB8>
# Epoch 2 | Step 1 | batch x: <built-in method numpy of Tensor object at 0x0000027D33123C78> | batch y: <built-in method numpy of Tensor object at 0x0000027D33123D18>

1.6优化器

import torch
import torch.nn.functional as F
import torch.utils.data as Data
from torch.autograd import Variable
import matplotlib.pyplot as plt

#hyper parameters
LR = 0.01
BATCH_SIZE = 32
EPOCH = 12

if __name__ == '__main__':#若有枚举,就要在代码开始的地方用它  
    x = torch.unsqueeze(torch.linspace(-1,1,1000),dim=1)
    y = x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size()))

    torch_dataset = Data.TensorDataset(x,y)
    loader = Data.DataLoader(dataset=torch_dataset,batch_size=BATCH_SIZE,shuffle=True,num_workers=2)

    class Net(torch.nn.Module):
        def __init__(self):
            super(Net, self).__init__()
            self.hidden = torch.nn.Linear(1,20)
            self.predict = torch.nn.Linear(20,1)
        def forward(self,x):
            x = F.relu(self.hidden(x))
            x = self.predict(x)
            return x

    net_SGD = Net()
    net_Momentum = Net()
    net_RMSprop= Net()
    net_Adam = Net()

    nets = [net_SGD,net_Momentum,net_RMSprop,net_Adam]
    opt_SGD = torch.optim.SGD(net_SGD.parameters(),lr=LR)
    opt_Momentum = torch.optim.SGD(net_SGD.parameters(),lr=LR,momentum=0.8)
    opt_RMSprop = torch.optim.RMSprop(net_SGD.parameters(),lr=LR,alpha=0.9)
    opt_dam = torch.optim.Adam(net_SGD.parameters(),lr=LR,betas=(0.9,0.99))
    optimizers = [opt_SGD,opt_Momentum,opt_RMSprop,opt_dam]

    loss_func = torch.nn.MSELoss()
    loss_his = [[],[],[],[]] #记录训练时不同神经网络的loss

    for epoch in range(EPOCH):
        print(epoch)
        for step,(batch_x,batch_y) in enumerate(loader):
            b_x = Variable(batch_x)
            b_y = Variable(batch_y)


            for net,opt,l_his in zip(nets,optimizers,loss_his):
                output = net(b_x)
                loss = loss_func(output,b_y)
                opt.zero_grad()
                loss.backward()
                opt.step()
                l_his.append(loss.item())

    #画图
    labels = ['SGD','Momentum','RMSprop','Adam']
    for i,l_his in enumerate(loss_his):
        plt.plot(l_his,label=labels[i])
    plt.legend(loc='best')
    plt.xlabel('Steps')
    plt.ylabel('Loss')
    plt.ylim(0,0.2)
    plt.show()

[外链图片转存中...(img-RyHYsA4c-1596632410499)]

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值