使用pytorch构建神经网络

介绍:从学习神经网络到现在时间也不短了,由于个人数学能力有限,用numpy构建神经网络,实属力不从心,但还是将神经网络的基本步骤理清了,然后开始学习用pytorch搭建神经网络。以下记录构建神经网络的简单方法。

一、使用torch简单构建

import numpy as np
import torch

N, D_in, H, D_out = 64, 1000, 100, 10
# 随机创建一些训练数据
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)

w1 = torch.randn(D_in, H) #设置输入层权重
w2 = torch.randn(H, D_out)#设置隐层层权重

learning_rate = 1e-6 #学习率

for it in range(500):
    # 前传
    h = x.mm(w1)  # N*H
    h_relu = h.clamp(min=0)  # N*H
    y_pred = h_relu.mm(w2)  # N*D_out

    # compute loss  
    loss = (y_pred - y).pow(2).sum().item()
    print(it, loss)

    # Backward pass
    # compute the gradient 这里没有bias
    grad_y_pred = 2.0 * (y_pred - y)
    grad_w2 = h_relu.t().mm(grad_y_pred)
    grad_h_relu = grad_y_pred.mm(w2.t())
    grad_h = grad_h_relu.clone()
    grad_h[h < 0] = 0
    grad_w1 = x.t().mm(grad_h)

    # update weights of w1 and w2
    w1 -= learning_rate * grad_w1
    w2 -= learning_rate * grad_w2

二、引用模块

(1)
import numpy as np
import torch
import torch.nn as nn

N, D_in, H, D_out = 64, 1000, 100, 10
# 随机创建一些训练数据
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)

model = torch.nn.Sequential(
    torch.nn.Linear(D_in, H, bias=False),  # w1*x+b1
    torch.nn.ReLU(),
    torch.nn.Linear(H, D_out, bias=False),  #
)
# loss function
loss_fn = nn.MSELoss(reduction='sum')

learning_rate = 1e-4  # 学习率

optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)  # 优化参数,更新参数

for it in range(500):
    # forward pass
    y_pred = model(x)

    # compute loss
    loss = loss_fn(y_pred, y)  # computation graph
    print(it, loss.item())

    optimizer.zero_grad()  # 求导之前,清空梯度

    # Backward pass
    loss.backward()

    # update model parameters
    optimizer.step()  # 求导之后,更新param

(2)

import numpy as np
import torch
import torch.nn as nn

N, D_in, H, D_out = 64, 1000, 100, 10
# 随机创建一些训练数据
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)


class TwoLayerNet(torch.nn.Module):  # 继承类
    def __init__(self, D_in, H, D_out):
        super(TwoLayerNet, self).__init__()
        # define the model architecture
        self.linear1 = torch.nn.Linear(D_in, H, bias=False)  # bias为y=ax+b中的b
        self.linear2 = torch.nn.Linear(H, D_out, bias=False)

    def forward(self, x):
        y_pred = self.linear2(self.linear1(x).clamp(min=0))
        return y_pred


model = TwoLayerNet(D_in, H, D_out)
'''
model = torch.nn.Sequential(
    torch.nn.Linear(D_in,H,bias=False), #w1*x+b1
    torch.nn.ReLU(),
    torch.nn.Linear(H,D_out,bias=False),
    )


'''
loss_fn = nn.MSELoss(reduction='sum')  # loss function
learning_rate = 1e-4  # 学习率
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)  # 优化参数,更新参数

for it in range(500):
    # 前传
    y_pred = model(x)  # model.forward()

    # compute loss
    loss = loss_fn(y_pred, y)  # computation graph
    print(it, loss.item())

    optimizer.zero_grad()  # 求导之前,清空梯度

    # Backward pass
    loss.backward()
    # compute the gradient

    # update model parameters
    optimizer.step()  # 求导之后,更新param

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值