最简单的求dy/dx的程序实例
import torch
from torch.autograd import Variable
#利用PyTorch进行autograd
x = torch.Tensor([3])
x = Variable(x,requires_grad=True)
y = x**2
y.backward() #注:只有标量可以进行backward求导
print(y)
print(x.grad)
通过输入和输出求权值的一个实例
import torch.nn as nn
import torch
from torch.autograd import Variable
x = [[0.1, 0.8, 1],[0.8,0.2,1]] #输入
y = [[1],[1]] #输出
w = [[0.1, 0.2, 0.3]] #初始权值
x = Variable(torch.Tensor(x))
y = Variable(torch.Tensor(y))
w = Variable(torch.Tensor(w), requires_grad=True) # w需要监测梯度
#开始训练
for i in range(1000):
out = torch.mm(x, w.t()) #计算输出时应关注维度,需要的时候加转置
delta = (out - y)
loss = delta[0]**2 + delta[1]**2
print(loss)#监测误差
w.grad = torch.Tensor([[0,0,0]]) #归零梯度
loss.backward()
w.data -= w.grad * 0.01 #只对data进行操作
print(torch.mm(x, w.t()),w) #看结果
利用Pytorch工具包进行求解
import torch.nn as nn
import torch
from torch.autograd import Variable
x = [[0.1, 0.8],[0.8,0.2]] #输入
y = [[1],[0]] #输出
x = Variable(torch.Tensor(x))
y = Variable(torch.Tensor(y))
class MyNet(nn.Module): #对nn.Module的继承
def __init__(self):
super(MyNet, self).__init__()
self.layer = nn.Linear(2,1) # 2输入,1输出
def forward(self, x):
return self.layer(x) # 设置连接层,已进行权值初始化?
net = MyNet() #构造对象
mls = nn.MSELoss() #定义损失函数器,均方差误差函数
opt = torch.optim.Adam(net.parameters(), lr=0.01) #优化器,SGD、RMS、ADAM等等,对net的参数进行优化
for i in range(1000):
out = net.forward(x) #得到输出
loss = mls(out, y)
print(loss)
opt.zero_grad() #清零梯度
loss.backward() #反向传播
opt.step() #更新权重
print(net(x))
加入激活函数以及多层神经网络
import torch.nn as nn
import torch
from torch.autograd import Variable
x = [[0.1, 0.8],[0.8,0.2]] #输入
y = [[1],[0]] #输出
x = Variable(torch.Tensor(x))
y = Variable(torch.Tensor(y))
class MyNet(nn.Module): #对nn.Module的继承
def __init__(self):
super(MyNet, self).__init__()
self.layer = nn.Linear(2,1) # 2输入,1输出
def forward(self, x):
return self.layer(x) # 设置连接层,已进行权值初始化?
net = MyNet() #构造对象
mls = nn.MSELoss() #定义损失函数器,均方差误差函数
opt = torch.optim.Adam(net.parameters(), lr=0.01) #优化器,SGD、RMS、ADAM等等,对net的参数进行优化
for i in range(1000):
out = net.forward(x) #得到输出
loss = mls(out, y)
print(loss)
opt.zero_grad() #清零梯度
loss.backward() #反向传播
opt.step() #更新权重
print(net(x))
利用Sequential和optim.Adam对神经网络进行类封装
import torch
import torch.nn as nn
x = [[0.2,0.4],[0.2,0.3],[0.3,0.4]]
x = torch.Tensor(x)
y = torch.Tensor([[0.6],[0.5],[0.7]]) #只利用原始数据,暂时不尽兴归一化等清洗
class MyNet(nn.Module):
def __init__(self):
super(MyNet,self).__init__()
self.fc = nn.Sequential( #容器,定义层的集合,进行层的顺序调用
nn.Linear(2,4),
nn.ReLU(),
nn.Linear(4,4),
nn.ReLU(),
nn.Linear(4,1)
)
self.opt = torch.optim.Adam(self.parameters()) #设定优化对象
self.mls = torch.nn.MSELoss()
def forward(self, inputs):
return self.fc(inputs) #得到结果
def train_model(self, x, y): #定义一次训练过程
out = self.forward(x)
self.loss = self.mls(out, y)
self.opt.zero_grad()
self.loss.backward()
self.opt.step() #更新权重
def test(self,x):
return self.forward(x)
net = MyNet()
for i in range(5000):
net.train_model(x,y)
print("Loss is {}".format(net.loss))
out = net.test(x)
print(out)