import numpy as np
import matplotlib.pyplot as plt
from torch import nn, optim
from torch.autograd import Variable
import torch
# 线性回归,造训练数据 y = 0.1x + 0.2 + noise
x_data = np.random.rand(100) # ndarray{Size: 100)
noise = np.random.normal(0, 0.01, x_data.shape)
y_data = x_data * 0.1 + 0.2 + noise
x_data = x_data.reshape(-1, 1) # ndarray{Size: (100,1)) 变成vector
y_data = y_data.reshape(-1, 1)
# 非线性回归, y = x^2 + noise
x_data = np.linspace(-2,2,200)[:,np.newaxis] # ndarray{Size: (200,1)) 另一种变成vector的方式
noise = np.random.normal(0,0.2,x_data.shape)
y_data = np.square(x_data) + noise
plt.scatter(x_data, y_data)
plt.show()
# 把numpy数据变成tensor
x_data = torch.FloatTensor(x_data)
y_data = torch.FloatTensor(y_data)
inputs = Variable(x_data)
target = Variable(y_data)
# 线性回归
# 构建神经网络模型
# 一般把网络中具有可学习参数的层放在__init__()中
class LinearRegression(nn.Module):
# 定义网络结构
def __init__(self):
# 初始化nn.Module
super(LinearRegression, self).__init__()
self.fc = nn.Linear(1, 1)
# 定义网络计算
def forward(self, x):
out = self.fc(x)
return out
# 非线性回归
class NonLinearRegression(nn.Module):
# 定义网络结构
def __init__(self):
# 初始化nn.Module
super(NonLinearRegression, self).__init__()
# units:1-10-1
self.fc1 = nn.Linear(1,10)
self.tanh = nn.Tanh()
self.fc2 = nn.Linear(10,1)
# 定义网络计算
def forward(self,x):
x = self.fc1(x)
x = self.tanh(x)
x = self.fc2(x)
return x
# 定义模型
model = LinearRegression()
# 定义代价函数
mse_loss = nn.MSELoss()
# 定义优化器
optimizer = optim.SGD(model.parameters(), lr=0.1) #非线性回归学习率定大点,lr=0.3
for i in range(1001): #非线性回归迭代次数定大点,range(2001)
out = model(inputs)
# 计算loss
loss = mse_loss(out, target)
# 梯度清0
optimizer.zero_grad()
# 计算梯度
loss.backward()
# 修改权值
optimizer.step()
if i % 200 == 0:
print(i, loss.item())
y_pred = model(inputs)
plt.scatter(x_data, y_data)
plt.plot(x_data.data.numpy(), y_pred.data.numpy(), 'r-', lw=3)
plt.show()
reference:
Qinbf github pytorch入门课程