算法:贪心算法,穷举法,分治法,动态规划
visdom可视化工具
np.meshgrid()用于三维图
训练失败可能是学习率太大
线性模型
import numpy as np
import matplotlib.pyplot as plt
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
def forward(x):
return x * w
def loss(x, y):
y_pred = forward(x)
return (y_pred - y) * (y_pred - y)
w_list = []
mse_list = []
for w in np.arange(0.0, 4.1, 0.1):
print('w=', w)
l_sum = 0
for x_val, y_val in zip(x_data, y_data):
y_pred_val = forward(x_val)
loss_val = loss(x_val, y_val)
l_sum += loss_val
print('\t', x_val, y_val, y_pred_val, loss_val)
print('MSE=', l_sum / 3)
w_list.append(w)
mse_list.append(l_sum / 3)
plt.plot(w_list, mse_list)
plt.ylabel('Loss')
plt.xlabel('w')
plt.show()
梯度下降
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = 1.0
def forward(x):
return x * w
def cost(xs, ys):
cost = 0
for x, y in zip(xs, ys):
y_pred = forward(x)
cost += (y_pred - y) ** 2
return cost / len(xs)
def gradient(xs, ys):
grad = 0
for x, y in zip(xs, ys):
grad += 2 * x * (x * w - y) # 求导的导数公式
return grad / len(xs)
print('predict (before training)', 4, forward(4))
for epoch in range(100):
cost_val = cost(x_data, y_data)
grad_val = gradient(x_data, y_data)
w -= 0.01 * grad_val
print('epoch:', epoch, 'w=', w, 'loss=', cost_val)
print('predict (after training)', 4, forward(4))
随机梯度下降:取一个损失更新(以前是平均全部损失)(一加入了噪声,二大样本计算量大)
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = 1.0
def forward(x):
return x * w
def loss(x, y):
y_pred = forward(x)
return (y_pred - y) ** 2
def gradient(x, y):
return 2 * x * (x * w - y)
print('predict (before training)', 4, forward(4))
for epoch in range(100):
for x, y in zip(x_data, y_data):
grad = gradient(x, y)
w -= 0.01 * grad # 及时更新,没办法并行运算,,batch
print('\tgrad:', x, y, grad)
l = loss(x, y)
print("progress:", epoch, "w=", w, "loss", l)
print('predict (after training)', 4, forward(4))
反向传播
import torch
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = torch.Tensor([1.0])
w.requires_grad = True # 需要计算梯度
def forward(x):
return x * w # tensor
def loss(x, y):
y_pred = forward(x)
return (y_pred - y) ** 2
print('predict (before training)', 4, forward(4).item())
for epoch in range(100):
for x, y in zip(x_data, y_data):
l = loss(x, y) # 前向,计算loss
l.backward() # 做完后计算图会释放
print('\tgrad:', x, y, w.grad.item()) # item取值,要是张量计算图一直累积
w.data -= 0.01 * w.grad.data # 不取data会是TENSOR有计算图
w.grad.data.zero_() # 计算出来的梯度不清零会累加
print("progress:", epoch, l.item())
print('predict (after training)', 4, forward(4).item())
pytorch 实现线性回归
import torch
x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[2.0], [4.0], [6.0]]) # 这是几位的?3x1还是1x3
class LinearModel(torch.nn.Module): # 可记住这个模板
def __init__(self): # 构造函数
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(1, 1) # 构造对象,权重和偏执,,1是特征,输入输出维度
def forward(self, x): # model类自动反向传播
y_pred = self.linear(x)
return y_pred
model = LinearModel() # 实例化,可调用
criterion = torch.nn.MSELoss(size_average=False)
optimizer