一、函数讲解
"""
nn.Linear(in_features, out_features)
in_features:指的是输入的二维张量的大小,即输入的[batch_size, size]中的size。
out_features:指的是输出的二维张量的大小,即输出的二维张量的形状为[batch_size,output_size],当然,它也代表了该全连接层的神经元个数。
Examples::
m = nn.Linear(20, 30)
input = torch.randn(128, 20)
output = m(input)
print(output.size())
结果:
torch.Size([128, 30])
"""
二、房价预测
import torch
import numpy as np
import re
# 数据处理
ff = open('data/housing.data').readlines()
data = []
for item in ff:
out = re.sub('\s{2,}', ' ', item).strip()
data.append(out.split(' '))
data = np.array(data).astype(np.float)
Y = data[:, -1]
X = data[:, 0:-1]
print(X.shape) # (506, 13)
print(Y.shape) # (506,)
# 训练数据
X_train = X[:496]
Y_train = Y[:496]
# 测试数据
X_test = X[496:]
Y_test = Y[496:]
# 定义网络结构
class Net(torch.nn.Module):
def __init__(self, n_feature, n_output):
super(Net, self).__init__()
self.predict = torch.nn.Linear(in_features=n_feature, out_features=n_output)
def forward(self, x):
"""
前向运算
:param x:
:return:
"""
out = self.predict(x)
return out
net = Net(13, 1)
# 构建损失,这里使用均方误差
loss_func = torch.nn.MSELoss()
# 优化器,学习率为0.0001
op = torch.optim.Adam(net.parameters(), lr=0.0001)
# 训练
for i in range(1000):
x_data = torch.tensor(X_train, dtype=torch.float32)
y_data = torch.tensor(Y_train, dtype=torch.float32)
# 进行前向传播,得出预测结果
pred = net.forward(x_data)
pred = torch.squeeze(pred) # 二维变一维
loss = loss_func(pred, y_data) * 0.01
# 初始化参数设置为0
op.zero_grad()
# 反向传播
loss.backward()
# 更新迭代次数
op.step()
print(i, loss)
print(pred[:10])
print(y_data[:10])
# test
x_data = torch.tensor(X_test, dtype=torch.float32)
y_data = torch.tensor(Y_test, dtype=torch.float32)
pred = net.forward(x_data)
pred = torch.squeeze(pred) # 二维变一维
loss_test = loss_func(pred, y_data) * 0.01
print(i, loss)
print(pred[:10])
print(y_data[:10])
# 保存模型
# 这种事整体保存
torch.save(net, 'model/regresion.pkl')
# 加载模型
torch.load('model/regresion.pkl')
# 这种是保留参数
torch.save(net.state_dict(), 'model/regresion_param.pkl')
# 加载模型
net.load_state_dict(torch.load('model/regresion_param.pkl'))