tensor
机器学习的基本单位
x = torch.empty(3,5)
x = torch.zeros(5,4)
x = torch.rand(5,3) #0-1均匀分布的随机矩阵
x = torch.randn(5,3)#正态分布(0,1)随机矩阵
x = torch.randn_like(x)#和x大小一样的randn矩阵
x = torch.tensor([[1,2],[3,4.3]])
view
y = x.view(16)#将其变为16大小的tensor
z = x.view(-1, 8)#第一位自动,第二位8的矩阵
与numpy的转化
a = torch.ones(5)
b = a.numpy()#将tensor转化为b
a = np.ones(5)
b = torch.from_numpy(a)#将numpy转化为tensor
线性回归模型
x_values = [i for i in range(100)]
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1, 1)#向量输入,np类型
class LinearRegressionModel(nn.Module):##继承nn中的模块
def __init__(self, input_dim, output_dim):
super().__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):#重写前向计算
out = self.linear(x)
return out
model = LinearRegressionModel(input_dim, output_dim)
epochs = 2000
learning_rate = 0.01#学习率
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)#优化器,最好使用Adam
criterion = nn.MSELoss()#损失函数
for epoch in range(epochs):
epoch += 1
# 注意转行成tensor
inputs = torch.from_numpy(x_train)
labels = torch.from_numpy(y_train)
# 梯度要清零每一次迭代
optimizer.zero_grad()
# 前向传播
outputs = model(inputs)#将输入传入model
# 计算损失
loss = criterion(outputs, labels)
# 反向传播
loss.backward()#从loss开始反向传输
# 更新权重参数
optimizer.step()
if epoch % 50 == 0:
print('epoch {}, loss {}'.format(epoch, loss.item()))
predicted = model(torch.from_numpy(x_train).requires_grad_()).data.numpy()#模型预测
torch.save(model.state_dict(), 'model.pkl')#模型保存
model.load_state_dict(torch.load('model.pkl'))#模型加载