1 第四次改进——optim更新参数
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
import random, math
import sklearn
import scipy
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = nn.Sequential(
torch.nn.Linear(D_in, H, bias=False),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out, bias=False)
)
torch.nn.init.normal_(model[0].weight)
torch.nn.init.normal_(model[2].weight)
loss_fn = nn.MSELoss(reduction='sum')
learning_rate = 1e-7
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
for t in range(5000):
y_pred = model(x)
loss = loss_fn(y_pred, y)
if t % 500 == 0:
print(t, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
2 第五次改进——自定义nn.Modules
import torch.nn as nn
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
class TwoLayerNet(nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = nn.Linear(D_in, H, bias = False)
self.linear2 = nn.Linear(H, D_out, bias = False)
def forward(self, x):
y_pred = self.linear2(self.linear1(x).clamp(min = 0))
return y_pred
model = TwoLayerNet(D_in, H, D_out)
loss_fn = nn.MSELoss(reduction='sum')
learning_rate = 1e-5
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(5000):
y_pred = model(x)
loss = loss_fn(y_pred, y)
if not t%500:
print(t, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
3 总结
- 手动神经网络是一个具体的细节的过程,我们需要做的是把它逐步抽象;
- 把神经网络项目的通性从具体的过程中抽象出来聚合成类,将具体的过程转移到初始化部分;
- 很好地体现了面向对象的继承和类抽象的特点。