第一个pytorch示例

注:仅供记录所用

pytorch实现第一个两层神经网络

1.普通pytorch实现

import torch

N,D_in,H,D_out = 64,1000,100,10  #64个训练数据,输入1000维,隐藏层100维,输出层10维
#产生训练数据
x = torch.randn(N,D_in)
y = torch.randn(N,D_out)
w1 = torch.randn(D_in,H,requires_grad=True) #requires_grad=True表wtensor中列出梯度
w2 = torch.randn(H,D_out,requires_grad=True)
learning_rate = 1e-6

for it in range(500):
    y_pred = x.mm(w1).clamp(min=0).mm(w2) #模型
    loss = (y-y_pred).pow(2).sum()  #计算损失函数
    print(it,loss.item())  #item()从tensor中取出数
    #反向传播
    loss.backward()
    #更新参数
    with torch.no_grad():
        w1 -= learning_rate*w1.grad
        w2 -= learning_rate*w2.grad
        w1.grad.zero_()
        w2.grad.zero_()   #每次调整参数后讲w1,w2清零

 

2.使用torch.nn 

import torch.nn as nn

N,D_in,H,D_out = 64,1000,100,10
#训练数据
x = torch.randn(N,D_in)
y = torch.randn(N,D_out)

#sequen模型
model = torch.nn.Sequential(
    torch.nn.Linear(D_in,H),    #w1+b 但可以设置其参数为bias=False
    torch.nn.ReLU(),
    torch.nn.Linear(H,D_out)
)
#model = model.cuda() 使用GPU
'''
torch.nn.init.normal_(model[0].weight) #第三种初始化
torch.nn.init.normal_(model[2].weight)
'''
loss_fn = nn.MSELoss(reduction = 'sum')

learning_rate = 1e-6
for it in range(500):
    #forward pass
    y_pred = model(x) #model forward
    
    #compute loss
    loss = loss_fn(y_pred,y) #computation graph
    print(it,loss.item())
    
    #Backward pass
    loss.backward()
    
    #update weights of w1 and w2
    with torch.no_grad():
        for param in model.parameters():
            param -= learning_rate*param.grad
    model.zero_grad()

 

 第二种与第一种相比,效果确实差太多了

3.加上初始化则与第一种差不多了

torch.nn.init.normal_(model[0].weight)
torch.nn.init.normal_(model[2].weight)

 

4.使用Adm

import torch
import torch.nn as nn

N,D_in,H,D_out = 64,1000,100,10
#训练数据
x = torch.randn(N,D_in)
y = torch.randn(N,D_out)

model = torch.nn.Sequential(
    torch.nn.Linear(D_in,H),
    torch.nn.ReLU(),
    torch.nn.Linear(H,D_out)
)
#model = model.cuda() 使用GPU
loss_fn = nn.MSELoss(reduction = 'sum')

learning_rate = 1e-4  #1e-3到1e-4是Adam最好的使用范围
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
for it in range(500):
    #forward pass
    y_pred = model(x) #model forward
    
    #compute loss
    loss = loss_fn(y_pred,y) #computation graph
    print(it,loss.item())
    
    optimizer.zero_grad()
    #Backward pass
    loss.backward()
    
    #update mode parameters
    optimizer.step()     #optimizer一步把所有参数更新
    

 5.讲代码模块化

import torch
import torch.nn as nn

N,D_in,H,D_out = 64,1000,100,10
#训练数据
x = torch.randn(N,D_in)
y = torch.randn(N,D_out)

class TwoLayerNet(torch.nn.Module):
    def __init__(self, D_in, H, D_put):
        super(TwoLayerNet,self).__init__()
        #define the model architecture
        self.linear1 = torch.nn.Linear(D_in,H,bias=False)
        self.linear2 = torch.nn.Linear(H,D_out,bias=False)

    def forward(self, x):
        y_pred = self.linear2(self.linear1(x).clamp(min=0))
        return y_pred
model = TwoLayerNet(D_in, H, D_out)
loss_fn = nn.MSELoss(reduction = 'sum')

learning_rate = 1e-4  #1e-3到1e-4是Adam最好的使用范围
optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate)
for it in range(500):
    #forward pass
    y_pred = model(x) #model forward
    
    #compute loss
    loss = loss_fn(y_pred,y) #computation graph
    print(it,loss.item())
    
    optimizer.zero_grad()
    #Backward pass
    loss.backward()
    
    #update mode parameters
    optimizer.step()     #optimizer一步把所有参数更新
    

 

参考视频:https://www.bilibili.com/video/BV12741177Cu?p=1

老师真的讲的太好了,给跪了。。。。

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值