import torch
x = torch.randn(200, 1)
y = x * 2 + 3
w = torch.randn(1)
b = torch.tensor([0.0])
lr = 0.01
global_step = 0
for epoch in range(3):
for i in range(200):
y_pre = x[i] * w + b
loss = (y_pre - y[i]).pow(2) / torch.tensor([2])
dw = (y_pre - y[i]) * x[i]
db = y_pre - y[i]
w -= lr * dw
b -= lr * db
print("step = {}\tloss = {}\tw = {}\tb = {}".format(global_step, loss.item(), w.item(), b.item()))
global_step += 1
import torch
x = torch.randn(200, 1, 1)
y = x * 2 + 3 + torch.randn(x.shape) * 0.01
class Liner(torch.nn.Module):
def __init__(self):
super(Liner, self).__init__()
self.layer1 = torch.nn.Linear(1, 1)
def forward(self, x):
y_pre = self.layer1(x)
return y_pre
liner = Liner()
global_step = 0
for epoch in range(5):
for ind, i in enumerate(x):
y_pre = liner(i)
loss = ((y_pre - y[ind]) ** 2) / 2
optimizer = torch.optim.SGD(params=liner.parameters(), lr=0.01)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("loss= {}\tstep= {}\t w = {}\tb = {}".format(loss.item(), global_step, liner.layer1.weight.item(), liner.layer1.bias.item()))
global_step += 1