import torch
from torch import nn
import numpy as np
class LinearModel(torch.nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
# 定义线性层
self.linear_layer = nn.Linear(input_dim, output_dim)
# 定义前向传播的方式
def forward(self,x):
output = self.linear_layer(x)
return output
def train(model,input_data, labels):
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
criterion = nn.MSELoss()
# 把数据搬到 GPU 上(如果没有 GPU 就放在 cpu 上)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# 开始进行训练过程
for epoch in range(5000):
x_train = torch.from_numpy(input_data).to(device)
y_train = torch.from_numpy(labels).to(device)
# 梯度要清零
optimizer.zero_grad()
outputs = model(x_train)
# 计算损失
loss = criterion(outputs, y_train)
# 反向传播
loss.backward()
# 权重更新
optimizer.step()
if epoch % 500 == 0:
print("epoch{}, loss:{} ".format(epoch, loss.item()))
def test(model):
lst = [6,7,8,10]
ts = torch.from_numpy(np.array(lst,dtype=np.float32).reshape(-1,1))
x = model(ts)
print(x.data.numpy())
if __name__ == '__main__':
# 创建训练数据,注意维度的组织顺序
input_data = np.array([i for i in range(10)],dtype=np.float32).reshape(-1,1)
print(input_data.shape)
# 创建训练的标签
labels = np.array([2*i + 5 for i in range(10)],dtype=np.float32).reshape(-1,1)
print(labels.shape)
# 实例化网络
model = LinearModel(1,1)
# 对网络进行训练
train(model,input_data,labels)
# 测试网络的拟合效果
test(model)
Pytorch学习之:搭建一个回归网络并训练、测试
最新推荐文章于 2024-07-27 12:20:46 发布