import torch.nn as nn
import torch
class TwoLayerNet(nn.Module):
def __init__(self, dim_in, dim_hide, dim_out):
super(TwoLayerNet, self).__init__()
self.linear1 = nn.Linear(dim_in, dim_hide, bias=False)
self.linear2 = nn.Linear(dim_hide, dim_out, bias=False)
def forward(self, x):
y_predict = self.linear2(self.linear1(x).clamp(min=0))
return y_predict
if __name__ == "__main__":
N = 64 # batch size
D_in = 1000 # 输入64 x 1000维
H = 100 # 100个隐藏单元
D_out = 10 # 输出100维
# 创建训练数据,这里是对训练数据进行随机初始化
x_data = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = TwoLayerNet(D_in, H, D_out) # 模型
loss_fn = nn.MSELoss(reduction='sum') # 损失函数
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4) # 优化器
for t in range(500):
y_pred = model(x_data) # 前向传播
loss = loss_fn(y_pred, y) # 计算损失
print(t, loss.item()) # 打印
optimizer.zero_grad() # 把模型内参数的梯度清零
loss.backward() # 反向传播
optimizer.step() # 更新权重
Pytorch训练神经网络Demo
最新推荐文章于 2024-05-09 14:16:02 发布