构造训练数据
def get_train_data():
"""得到训练数据,这里使用随机数生成训练数据,由此导致最终结果并不好"""
def get_tensor_from_pd(dataframe_series) -> torch.Tensor:
return torch.tensor(data=dataframe_series.values)
import numpy as np
import pandas as pd
from sklearn import preprocessing
df = pd.DataFrame(data=preprocessing.StandardScaler().fit_transform(np.random.randint(0, 10, size=(200, 5))))
y = pd.Series(np.random.randint(0, 2, 200))
return get_tensor_from_pd(df).float(), get_tensor_from_pd(y).float()
构造LSTM模型
class LSTM(nn.Module):
def __init__(self, input_size=5, hidden_layer_size=100, output_size=1):
"""
LSTM二分类任务
:param input_size: 输入数据的维度
:param hidden_layer_size:隐层的数目
:param output_size: 输出的个数
"""
super().__init__()
self.hidden_layer_size = hidden_layer_size
self.lstm = nn.LSTM(input_size, hidden_layer_size)
self.linear = nn.Linear(hidden_layer_size, output_size)
self.sigmoid = nn.Sigmoid()
def forward(self, input_x):
input_x = input_x.view(len(input_x), 1, -1)
hidden_cell = (torch.zeros(1, 1, self.hidden_layer_size),
torch.zeros(1, 1, self.hidden_layer_size))
lstm_out, (h_n, h_c) = self.lstm(input_x, hidden_cell)
linear_out = self.linear(lstm_out.view(len(input_x), -1))
predictions = self.sigmoid(linear_out)
return predictions
全部代码
import torch
import torch.nn as nn
import torch.utils.data as Data
def get_train_data():
"""得到训练数据,这里使用随机数生成训练数据,由此导致最终结果并不好"""
def get_tensor_from_pd(dataframe_series) -> torch.Tensor:
return torch.tensor(data=dataframe_series.values)
import numpy as np
import pandas as pd
from sklearn import preprocessing
df = pd.DataFrame(data=preprocessing.StandardScaler().fit_transform(np.random.randint(0, 10, size=(200, 5))))
y = pd.Series(np.random.randint(0, 2, 200))
return get_tensor_from_pd(df).float(), get_tensor_from_pd(y).float()
class LSTM(nn.Module):
def __init__(self, input_size=5, hidden_layer_size=100, output_size=1):
"""
LSTM二分类任务
:param input_size: 输入数据的维度
:param hidden_layer_size:隐层的数目
:param output_size: 输出的个数
"""
super().__init__()
self.hidden_layer_size = hidden_layer_size
self.lstm = nn.LSTM(input_size, hidden_layer_size)
self.linear = nn.Linear(hidden_layer_size, output_size)
self.sigmoid = nn.Sigmoid()
def forward(self, input_x):
input_x = input_x.view(len(input_x), 1, -1)
hidden_cell = (torch.zeros(1, 1, self.hidden_layer_size),
torch.zeros(1, 1, self.hidden_layer_size))
lstm_out, (h_n, h_c) = self.lstm(input_x, hidden_cell)
linear_out = self.linear(lstm_out.view(len(input_x), -1))
predictions = self.sigmoid(linear_out)
return predictions
if __name__ == '__main__':
x, y = get_train_data()
train_loader = Data.DataLoader(
dataset=Data.TensorDataset(x, y),
batch_size=20,
shuffle=True,
num_workers=2,
)
model = LSTM()
loss_function = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
epochs = 150
model.train()
for i in range(epochs):
for seq, labels in train_loader:
optimizer.zero_grad()
y_pred = model(seq).squeeze()
single_loss = loss_function(y_pred, labels)
single_loss.backward()
optimizer.step()
print("Train Step:", i, " loss: ", single_loss)
model.eval()
for i in range(epochs):
for seq, labels in train_loader:
y_pred = model(seq).squeeze()
single_loss = loss_function(y_pred, labels)
print("EVAL Step:", i, " loss: ", single_loss)