Numpy 反向传播 拟合 sin():
1. 创造数据
循环:
2. 计算输出值
3. 计算loss 值
4:计算偏导
4:更新参数
循环结束
# -*- coding: utf-8 -*-
# @Time : 2021/1/3 21:38
# @Author : wwq_biubiu!!
# @FileName: grand.py
# @Software: PyCharm
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import math
#create the input and output data
x = np.linspace(-math.pi,math.pi,200)
y = np.sin(x)
#randomly initialize weights
a = np.random.randn()
b = np.random.randn()
c = np.random.randn()
d = np.random.randn()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(x,y)
plt.ion()
plt.ylim((-2, 2))
learning_rate = 1e-6
for t in range(6000):
y_pred = a + b*x + c*x**2 + d*x**3
loss = np.square(y_pred-y).sum()
if t % 100 == 99:
print(t, loss)
try:
ax.lines.remove(lines[0])
except Exception:
pass
lines = ax.plot(x, y_pred)
ax.set_xlabel('Epoch:'+str(t))
plt.pause(0.1)
grad_y_pred = 2.0 * (y_pred-y)
grad_a = grad_y_pred.sum()
grad_b = (grad_y_pred * x ).sum()
grad_c = (grad_y_pred *x ** 2).sum()
grad_d = (grad_y_pred * x ** 3).sum()
# update weights
a = a -grad_a*learning_rate
b = b - grad_b*learning_rate
c = c - grad_c*learning_rate
d = d - grad_d*learning_rate
print(f'Result: y = {a} + {b} x + {c} x^2 + {d} x^3')
plt.pause(0)
运行结果:
GPU tensor 的使用
在大规模的数据计算中 GPU的使用会提高及算效率,Pytorch中:Tensor 与 numpy 在张量操作上有许多相同的操作功能,另外还提供跟踪计算图和计算,与此同时还提供了使用GPU的功能。
在这里我们使用pytorch 的Tensor, 像numpy一样去拟合 sin() 函数。
# -*- coding: utf-8 -*-
# @Time : 2021/1/9 18:15
# @Author : wwq_biubiu!!
# @FileName: chapter1_2_tensor.py
# @Software: PyCharm
import torch
import math
dtype = torch.float
device = torch.device("cuda:0")
#create input and output data
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
y = torch.sin(x)
# random initialize weights
a = torch.rand((), device=device, dtype=dtype)
b = torch.rand((), device=device, dtype=dtype)
c = torch.rand((), device=device, dtype=dtype)
d = torch.rand((), device=device, dtype=dtype)
y_pred = torch.rand((), device=device, dtype=dtype)
loss=torch.rand((), device=device, dtype=dtype)
learning_rate = 1e-6
for t in range(2000):
y_pred = a + b * x + c * x ** 2 + d * x ** 3
loss = (y_pred - y).pow(2).sum().item()
#item: python number的获取统一通过 .item()方式实现:
if t % 99 == 0:
print(t, loss)
grad_y_pred = 2 * (y_pred-y)
grad_a = grad_y_pred.sum()
grad_b = (grad_y_pred * x).sum()
grad_c = (grad_y_pred * x ** 2).sum()
grad_d = (grad_y_pred * x ** 3).sum()
a -= learning_rate * grad_a
b -= learning_rate * grad_b
c -= learning_rate * grad_c
d -= learning_rate * grad_d
print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')
Pytorch 自动计算梯度
对于小型的计算模型,手动计算反向梯度传递没什么大问题,但是对于复杂的网络来说却十分复杂。 pytorch 中提供了autograd 包提供自动求导计算神经网络中的反向传递的过程。下面 我们使用pytorch 的tensor 和autograd 来实现 三阶多项式拟合sin() 函数。
# -*- coding: utf-8 -*-
# @Time : 2021/1/9 21:53
# @Author : wwq_biubiu!!
# @FileName: chapter1_3_autograd.py
# @Software: PyCharm
import torch
import math
dtype = torch.float
device = torch.device('cuda:0')
# create input and output data
x = torch.linspace(-math.pi, math.pi, 2000, device=device , dtype= dtype)
y = torch.sin(x)
a = torch.randn((), device=device, dtype=dtype, requires_grad=True)
b = torch.randn((), device=device, dtype=dtype, requires_grad=True)
c = torch.randn((), device=device, dtype=dtype, requires_grad=True)
d = torch.randn((), device=device, dtype=dtype, requires_grad=True)
learning_rate = 1e-6
for t in range(20000):
y_pred = a + b * x + c * x ** 2 + d * x ** 3
loss =(y_pred - y).pow(2).sum()
if t % 100 == 99:
print(t, loss.item())
# 使用autograd来计算向后传递。这个调用将计算关于requires_grad=True的所有张量的损失梯度
loss.backward()
with torch.no_grad():
#torch.no_grad() 不跟踪梯度变化
#由于 在更新权重时候不需要跟踪梯度变化
a -= learning_rate * a.grad
b -= learning_rate * b.grad
c -= learning_rate * c.grad
d -= learning_rate * d.grad
# 更新权重后,手动将梯度置零,否则会叠加之前的值
a.grad = None
b.grad = None
c.grad = None
d.grad = None
print(f'Result: y = {a.item()} + {b.item()} * P3({c.item()} + {d.item()} x)')
pytorch 自定义模块
有时候 需要指定一系列的模块进行复杂的操作, 我们可以通过 继承 nn.Module 和定义 forward(用来接收 Input tensor 和输出 output tensor ):
同样的以此为例:
# -*- coding: utf-8 -*-
# @Time : 2021/1/9 23:13
# @Author : wwq_biubiu!!
# @FileName: chapter1_4_module.py
# @Software: PyCharm
import torch
import math
class Sin_net(torch.nn.Module):
def __init__(self):
super().__init__()
#self.a = torch.nn.parameter(torch.random(()))
self.a = torch.nn.Parameter(torch.randn(()))
self.b = torch.nn.Parameter(torch.randn(()))
self.c = torch.nn.Parameter(torch.randn(()))
self.d = torch.nn.Parameter(torch.randn(()))
def forward(self,x):
return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3
def string(self):
return f'y = {self.a.item()} + {self.b.item()} x + {self.c.item()} x^2 + {self.d.item()} x^3'
x = torch.linspace(-math.pi, math.pi, 2000)
y = torch.sin(x)
model = Sin_net()
criterion = torch.nn.MSELoss(reduction='sum')
optimzer = torch.optim.SGD(model.parameters(),lr= 1e-6)
for t in range(2000):
y_pred = model(x)
loss= criterion(y_pred,y)
if t % 100 == 99:
print(t, loss.item())
optimzer.zero_grad()
loss.backward()
optimzer.step()
print(f'Result: {model.string()}')