Pytorch实现Boston房产数据回归分析
要求提前安装了torch、sklearn库
1.简单回归网络的定义
# 定义简单回归网络
class Net(torch.nn.Module):
def __init__(self,n_feature,n_output):
super(Net,self).__init__()
self.hidden = torch.nn.Linear(n_feature,100)
self.predict = torch.nn.Linear(100,n_output)
def forward(self,x):
out = self.hidden(x)
out = torch.relu(out)
out = self.predict(out)
return out
2.主函数部分
2.1 配置部分
2.1.1 导入数据
# 导入数据
boston = load_boston()
base_data = boston.data
target_data = boston.target
X,Y = base_data,target_data
2.1.2 划分数据集与训练集
# 划分训练集与测试集
X_train = X[0:496,:]
Y_train = Y[0:496]
X_test = X[496:,:]
Y_test = Y[496:]
2.1.3 配置网络结构
# net
net = Net(13,1)
2.1.4 配置损失函数、求解器设置
# loss
loss_func = torch.nn.MSELoss()
# optimiter
optimizer = torch.optim.Adam(net.parameters(),lr=0.01)
2.2 训练部分
包含测试与验证
for i in range(10000):
# training
X_data = torch.tensor(X_train,dtype=torch.float32)
Y_data = torch.tensor(Y_train,dtype=torch.float32)
pred = net.forward(X_data)
pred = torch.squeeze(pred)
loss = loss_func(pred,Y_data)*0.001
# Adam梯度下降优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("ite:{},loss:{}".format(i,loss*1000))
# test
X_test = torch.tensor(X_test, dtype=torch.float32)
Y_test = torch.tensor(Y_test, dtype=torch.float32)
pred = net.forward(X_test)
pred = torch.squeeze(pred)
loss_test = loss_func(pred, Y_test)
print('ite:{},test_loss:{}'.format(i,loss_test))
2.3 模型保存
# save model
torch.save(net,"data_model.pkl")
3. 结果可视化
4.完整代码
import torch
from sklearn.datasets import load_boston
# 定义简单回归网络
class Net(torch.nn.Module):
def __init__(self,n_feature,n_output):
super(Net,self).__init__()
self.hidden = torch.nn.Linear(n_feature,100)
self.predict = torch.nn.Linear(100,n_output)
def forward(self,x):
out = self.hidden(x)
out = torch.relu(out)
out = self.predict(out)
return out
if __name__ == "__main__":
# 导入数据
boston = load_boston()
base_data = boston.data
target_data = boston.target
X,Y = base_data,target_data
# 划分训练集与测试集
X_train = X[0:496,:]
Y_train = Y[0:496]
X_test = X[496:,:]
Y_test = Y[496:]
# net
net = Net(13,1)
# loss
loss_func = torch.nn.MSELoss()
# optimiter
optimizer = torch.optim.Adam(net.parameters(),lr=0.01)
for i in range(10000):
# training
X_data = torch.tensor(X_train,dtype=torch.float32)
Y_data = torch.tensor(Y_train,dtype=torch.float32)
pred = net.forward(X_data)
pred = torch.squeeze(pred)
loss = loss_func(pred,Y_data)*0.001
# Adam梯度下降优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("ite:{},loss:{}".format(i,loss*1000))
# test
X_test = torch.tensor(X_test, dtype=torch.float32)
Y_test = torch.tensor(Y_test, dtype=torch.float32)
pred = net.forward(X_test)
pred = torch.squeeze(pred)
loss_test = loss_func(pred, Y_test)
print('ite:{},test_loss:{}'.format(i,loss_test))
# save model
torch.save(net,"data_model.pkl")