import torch
from torch.autograd import Variable
from visdom import Visdom
epochs = 5000
global_step = 0
# train data
x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]]))
y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]]))
x_data
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(1, 2) #[3,1] ->[3,2]
self.linear2 = torch.nn.Linear(2, 1) #[3,2] ->[3,1]
def forward(self, x):
y_pred = self.linear1(x)#[3,1] ->[3,2]
y_pred = self.linear2(y_pred)#[3,2] ->[3,1]
return y_pred
model = Model()
viz = Visdom()
viz.line([0.], [0.], win='linear_loss', opts=dict(title='linear loss'))
criterion = torch.nn.MSELoss(size_average=False) # Defined loss function
optimizer = torch.optim.SGD(model.parameters(), lr=0.004) # Defined optimizer
# Training: forward, loss, backward, step
# Training loop
for epoch in range(epochs):
# Forward pass
y_pred = model(x_data)
# Compute loss
loss = criterion(y_pred, y_data)
# Zero gradients
optimizer.zero_grad()
# perform backward pass
loss.backward()
# update weights
optimizer.step()
global_step +=1
viz.line([loss.item()], [global_step], win='linear_loss', update='append')
if loss.item() >= 1e-5:
if epoch % 10==0:
print("Epoch {}:\t[{}/{} ({:.0f}%)] \t Loss {:.6f}".format(epoch, global_step, epochs, 100. * global_step / epochs,loss.item()))
else:
print("Come on man!")
break
Epoch 0: [1/5000 (0%)] Loss 140.462097
Epoch 10: [11/5000 (0%)] Loss 0.889901
Epoch 20: [21/5000 (0%)] Loss 0.594535
Epoch 30: [31/5000 (1%)] Loss 0.518817
Epoch 40: [41/5000 (1%)] Loss 0.452415
Epoch 50: [51/5000 (1%)] Loss 0.394169
Epoch 60: [61/5000 (1%)] Loss 0.343085
Epoch 70: [71/5000 (1%)] Loss 0.298305
Epoch 80: [81/5000 (2%)] Loss 0.259079
Epoch 90: [91/5000 (2%)] Loss 0.224751
Epoch 100: [101/5000 (2%)] Loss 0.194743
Epoch 110: [111/5000 (2%)] Loss 0.168544
Epoch 120: [121/5000 (2%)] Loss 0.145697
Epoch 130: [131/5000 (3%)] Loss 0.125802
Epoch 140: [141/5000 (3%)] Loss 0.108501
Epoch 150: [151/5000 (3%)] Loss 0.093476
Epoch 160: [161/5000 (3%)] Loss 0.080445
Epoch 170: [171/5000 (3%)] Loss 0.069159
Epoch 180: [181/5000 (4%)] Loss 0.059398
Epoch 190: [191/5000 (4%)] Loss 0.050966
Epoch 200: [201/5000 (4%)] Loss 0.043692
Epoch 210: [211/5000 (4%)] Loss 0.037423
Epoch 220: [221/5000 (4%)] Loss 0.032028
Epoch 230: [231/5000 (5%)] Loss 0.027389
Epoch 240: [241/5000 (5%)] Loss 0.023405
Epoch 250: [251/5000 (5%)] Loss 0.019987
Epoch 260: [261/5000 (5%)] Loss 0.017057
Epoch 270: [271/5000 (5%)] Loss 0.014548
Epoch 280: [281/5000 (6%)] Loss 0.012401
Epoch 290: [291/5000 (6%)] Loss 0.010565
Epoch 300: [301/5000 (6%)] Loss 0.008996
Epoch 310: [311/5000 (6%)] Loss 0.007657
Epoch 320: [321/5000 (6%)] Loss 0.006514
Epoch 330: [331/5000 (7%)] Loss 0.005540
Epoch 340: [341/5000 (7%)] Loss 0.004710
Epoch 350: [351/5000 (7%)] Loss 0.004002
Epoch 360: [361/5000 (7%)] Loss 0.003400
Epoch 370: [371/5000 (7%)] Loss 0.002888
Epoch 380: [381/5000 (8%)] Loss 0.002452
Epoch 390: [391/5000 (8%)] Loss 0.002081
Epoch 400: [401/5000 (8%)] Loss 0.001766
Epoch 410: [411/5000 (8%)] Loss 0.001498
Epoch 420: [421/5000 (8%)] Loss 0.001271
Epoch 430: [431/5000 (9%)] Loss 0.001078
Epoch 440: [441/5000 (9%)] Loss 0.000914
Epoch 450: [451/5000 (9%)] Loss 0.000775
Epoch 460: [461/5000 (9%)] Loss 0.000657
Epoch 470: [471/5000 (9%)] Loss 0.000557
Epoch 480: [481/5000 (10%)] Loss 0.000472
Epoch 490: [491/5000 (10%)] Loss 0.000400
Epoch 500: [501/5000 (10%)] Loss 0.000339
Epoch 510: [511/5000 (10%)] Loss 0.000287
Epoch 520: [521/5000 (10%)] Loss 0.000243
Epoch 530: [531/5000 (11%)] Loss 0.000206
Epoch 540: [541/5000 (11%)] Loss 0.000174
Epoch 550: [551/5000 (11%)] Loss 0.000148
Epoch 560: [561/5000 (11%)] Loss 0.000125
Epoch 570: [571/5000 (11%)] Loss 0.000106
Epoch 580: [581/5000 (12%)] Loss 0.000090
Epoch 590: [591/5000 (12%)] Loss 0.000076
Epoch 600: [601/5000 (12%)] Loss 0.000064
Epoch 610: [611/5000 (12%)] Loss 0.000054
Epoch 620: [621/5000 (12%)] Loss 0.000046
Epoch 630: [631/5000 (13%)] Loss 0.000039
Epoch 640: [641/5000 (13%)] Loss 0.000033
Epoch 650: [651/5000 (13%)] Loss 0.000028
Epoch 660: [661/5000 (13%)] Loss 0.000024
Epoch 670: [671/5000 (13%)] Loss 0.000020
Epoch 680: [681/5000 (14%)] Loss 0.000017
Epoch 690: [691/5000 (14%)] Loss 0.000014
Epoch 700: [701/5000 (14%)] Loss 0.000012
Epoch 710: [711/5000 (14%)] Loss 0.000010
Come on man!
![](https://i-blog.csdnimg.cn/blog_migrate/58ed88ec4d298ce234c67db3b6c76eae.png)