import torch
import numpy as np
from torch import nn
from torch.autograd import Variable
import visdom
import matplotlib.pyplot as plt
import random
viz = visdom.Visdom(env='train')
loss_win = viz.line(np.arange(10))
#torch.cat()实现Tensor拼接
def make_features(x):
x = x.unsqueeze(1)
return torch.cat([x ** i for i in range(1,4)],1)
#定义实际的函数
#unsqueeze将原来的tensor大小由3变成(3,1)
W_target = torch.FloatTensor([0.5,3,2.4]).unsqueeze(1)
b_target = torch.FloatTensor([0.9])
def f(x):
return x.mm(W_target) + b_target[0]
#定义每次的训练集
#每次取batch_size个数据点,然后转换成矩阵的形式
def get_batch(batch_size=32,random = None):
if random is None:
random = torch.randn(batch_size)
batch_size = random.size()[0]
x = make_features(random)
y = f(x)
return Variable(x),Variable(y)
#定义模型
# model = nn.Linear(3,1)也可以
n = 3
class poly_model(nn.Module) :
def __init__(self, n) :
super().__init__()
self.poly = nn.Linear(n, 1)
def forward(self, x) :
return self.poly(x)
model = poly_model(n)
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr=1e-3)
print('****')
epoch=0
while True:
batch_x,batch_y = get_batch()
#Forward
output = model(batch_x)
loss = criterion(output,batch_y)
print_loss = loss.item()
if(epoch + 1) % 20 == 0:
print('Epoch[{}],loss:{:.6f}'.format(epoch+1,loss.item()))
viz.line(Y=np.array([loss.item()]), X=np.array([epoch]), update='append', win=loss_win)
#Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch+=1
if(print_loss<1e-3):
print('Loss:{} after {} batchs'.format(print_loss,epoch))
break
x = [random.randint(-200,200)*0.01 for i in range(20)]
x = np.array(sorted(x))
featurn_x,y = get_batch(random = torch.from_numpy(x).float())
y = y.data.numpy()
plt.plot(x,y,'ro',label='Original Data')
model.eval()
x_sample = np.arange(-2,2,0.01)
x, y = get_batch(random = torch.from_numpy(x_sample).float())
y = model(x)
y_sample = y.data.numpy()
plt.plot(x_sample,y_sample,label='Fitting Line')
plt.show()