import torch
print(torch.tensor([1,2,3],dtype=torch.float))#将一个列表强制转换为torch.Tensor类型
print(torch.randn(5,3))#生成torch.Tensor类型的5X3的随机数
1、构建模型
2、定义一个损失函数
3、定义一个优化器
4、将训练数据带入模型得到预测值
5、将梯度清零
6、获得损失
7、进行优化
import torch
from torch.autograd import Variable
#初步认识构建Tensor数据
def one():
print(torch.tensor([1,2,3],dtype=torch.float))#将一个列表强制转换为torch.Tensor类型
print(torch.randn(5,3))#生成torch.Tensor类型的5X3的随机数
print(torch.zeros((2,3)))#生成一个2X3的全零矩阵
print(torch.ones((2,3)))#生成一个2X3的全一矩阵
a = torch.randn((2,3))
b = a.numpy()#将一个torch.Tensor转换为numpy
c = torch.from_numpy(b)#将numpy转换为Tensor
print(a)
print(b)
print(c)
#使用Variable自动求导
def two():
# 构建Variable
x = Variable(torch.Tensor([1, 2, 3]), requires_grad=True)
w = Variable(torch.Tensor([4, 5, 6]), requires_grad=True)
b = Variable(torch.Tensor([7, 8, 9]), requires_grad=True)
# 函数等式
y = w * x ** 2 + b
# 使用梯度下降计算各变量的偏导数
y.backward(torch.Tensor([1, 1, 1]))
print(x.grad)
print(w.grad)
print(b.grad)
线性回归例子:
import torch
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
from torch import nn
x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y = 3*x+10+torch.rand(x.size())
class LinearRegression(nn.Module):
def __init__(self):
super(LinearRegression,self).__init__()
self.Linear = nn.Linear(1,1)
def forward(self,x):
return self.Linear(x)
model = LinearRegression()
Loss = nn.MSELoss()
Opt = torch.optim.SGD(model.parameters(),lr=0.01)
for i in range(1000):
inputs = Variable(x)
targets = Variable(y)
outputs = model(inputs)
loss = Loss(outputs,targets)
Opt.zero_grad()
loss.backward()
Opt.step()
model.eval()
predict = model(Variable(x))
plt.plot(x.numpy(),y.numpy(),'ro')
plt.plot(x.numpy(),predict.data.numpy())
plt.show()