1.numpy和pytorch实现梯度下降法
a.设定初始值
b.求取梯度
c.在梯度方向上进行参数的更新
numpy:
x =1
learing_rate = 0.1
epochs = 10
y = lambda x : x ** 2 + 2 * x + 1
for epoch in range(epochs):
dx = 2 * x + 2
x = x -learing_rate * dx #梯度下降法
print(x)
pytorch:
import torch
from torch.autogtad import Variable
x = torch.Tensor([1])
x = Variable(x,requires_grad = True)
print('grad',x.grad,'data',x.data)
learing_rate = 0.1
epochs = 10
for epoch in range(epochs):
y = x ** 2 + 2 * x + 1
y.backward()
print('grad',x.grad.data)
x.data = x.data - learing_rate * w.grad.data
x.grad.data.zero()
print(x.data)
2.numpy和pytorch实现线性回归
(1)numpy:
import numpy as np
x_data = np.array([1, 2, 3])
y_data = np.array([2, 4, 6])
epochs = 10
lr = 0.1
w = 0
cost = []
for epoch in range(epochs):
yhat = x_data * w
loss = np.average((yhat - y_data) ** 2)
cost.append(loss)
dw = -2 * (y_data - yhat) @ x_data.T / (x_data.shape[0])
w = w - lr * dw
print(w)
2.pytorch:
import torch
num_samples = 64 # N
dim_in, dim_hid, dim_out = 1000, 100, 10 # IN H OUT
x = torch.randn(num_samples, dim_in) # N * IN
y = torch.randn(num_samples, dim_out) # N * OUT
w1 = torch.randn(dim_in, dim_hid) # IN * H
w2 = torch.randn(dim_hid, dim_out) # H * OUT
eta = 1e-6
for i in range(1000):
# Forward pass
h = x @ w1 # N * H
h_relu = h.clamp(min=0) # N * H
y_pred = h_relu @ w2 # N * OUT
# Loss
loss = (y_pred - y).pow(2).sum().item()
print('times is {}, loss is {}'.format(i, loss))
# Backward pass
grad_y_pred = 2.0 * (y_pred - y) # N * OUT
grad_w2 = (h_relu.t()) @ (grad_y_pred) # H * OUT = (H * N) * (N * OUT),其中(H * N) = (N * H).T
grad_h_relu = grad_y_pred @ ((w2.t())) # N * H = (N * OUT) * (OUT * H),其中(OUT * H) = (H * OUT).T
grad_h = grad_h_relu.clone()
grad_h[h < 0] = 0
grad_w1 = (x.t()) @ (grad_h) # IN * H = (IN * N) * (N * H)
w1 = w1 - eta * grad_w1
w2 = w2 - eta
3.pytorch实现一个简单的神经网络
import torch
from torch.autogtad import Variable
torch.manual_seed(2)
x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]]))
y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]]))
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(1, 1, bias=False)
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = Model()
criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
epochs = 20
cost = []
for epoch in range(epochs):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
cost.append(loss.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
list(model.parameters())
很遗憾,作为一个python小白,并没有调试出结果,后续会逐步补充。