没有使用torch的简易回归实现
import numpy as np
import matplotlib.pyplot as mplot
x_data=[1,2,3]
y_data=[2,4,6]
def forward(x):
return(x*w)
def loss(x,y):
y_pred=forward(x)
return (y_pred-y)*(y_pred-y)
w_list=[]
mse_list=[]
for w in np.arange(0,4.1,0.1):
print('w=',w)
l_sum=0
for x_val,y_val in zip(x_data,y_data):
y_pred_val=forward(x_val)
loss_val=loss(x_val,y_val)
l_sum+=loss_val
print('\t',x_val,y_val,y_pred_val,loss_val)
print('MSE=',l_sum/3)
w_list.append(w)
mse_list.append(l_sum/3)
mplot.plot(w_list,mse_list)
mplot.ylabel('Loss')
mplot.xlabel('w')
mplot.show()
回归函数
使用torch库的回归
import torch
import numpy as np
x_data=torch.Tensor([[1],[2],[3]])
y_data=torch.Tensor([[2],[4],[6]])
class LinearModule(torch.nn.Module):#基类
def __init__(self):
super(LinearModule,self).__init__()
self.linear=torch.nn.Linear(1,1)# 应该是内部自动给一个w,b的初值
def forward(self,x):
y_pre=self.linear(x)
return y_pre
model=LinearModule()
criterion=torch.nn.MSELoss(reduction='sum')
optimizer=torch.optim.SGD(model.parameters(),lr=0.01)
for epoch in range(500):
y_pred=model(x_data)
loss=criterion(y_pred,y_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('w',model.linear.weight.item())
print('b',model.linear.bias.item())