类别型特征可以通过one-hot 或者 multi-hot 编码生成特征向量,数值型特征可以直接作为特征向量。 Deep Crossing 模型完整解决了从特征工程、稀疏向量稠密化、多层神经网络进行优化目标拟合等一些问题,可以将各类业务数据特征带入模型;总体来看Deep Crossing解决了:
- 通过Embedding 解决了某些离散型特征编码过于稀疏化的问题
- 神经网络解决了特征自动交叉组合的问题
- 输出层直接对 CTR 进行预估
网络的各部分在现在看来都很常见和合理,但是在开始的时候设计出这样的网络结构,考虑到了各类别特征变量的特点,也使得Deep Crossing在实际应用的时候处理了各类的特征。
因为各部分都很常见,直接看代码复现就好~
代码复现:
数据下载链接:
criteo数据用于推荐系统学习-深度学习文档类资源-CSDN文库https://download.csdn.net/download/Big_Huang/85155340?spm=1001.2014.3001.5501代码开始编写的很耦合,参考参考链接3中网友的代码结构,进行了重组,精炼很多
from torch.autograd import Variable
import torch
import torch.nn as nn
import pandas as pd
from sklearn.model_selection import train_test_split
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
class ResidualUnit(nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim, bias=True)
self.relu1 = nn.ReLU()
# 输出输入相同才能 shortcut 中的 element-wise plus
self.fc2 = nn.Linear(hidden_dim, input_dim, bias=True)
self.relu2 = nn.ReLU()
def forward(self, x):
out = self.fc2(self.relu1(self.fc1(x)))
return self.relu2(x + out)
class DeepCrossing(nn.Module):
def __init__(self, features_info, hidden_unit, embedding_dim=10, outout_dim=1):
super().__init__()
# 解析特征信息
self.dense_features, self.sparse_features, self.sparse_features_nunique = features_info
# 解析拿到所有 数值型 和 稀疏型特征信息
self.__dense_features_nums = len(self.dense_features)
self.__sparse_features_nums = len(self.sparse_features)
# 构建embedding
self.embedding_layers = nn.ModuleDict({
'embed_' + str(key): nn.Embedding(num_embeddings=fea_num, embedding_dim=embedding_dim)
for key, fea_num in self.sparse_features_nunique.items()
})
# 确定 stacking层 的维度
self.stacking_dim = self.__sparse_features_nums * embedding_dim + self.__dense_features_nums
# 构建 Multiple Residual Units 层
self.residual_layers = nn.ModuleList([
ResidualUnit(self.stacking_dim, unit) for unit in hidden_unit
])
self.linear = nn.Linear(self.stacking_dim, outout_dim)
def forward(self, x):
# 从输入x中单独拿出 sparse_input 和 dense_input
dense_inputs, sparse_inputs = x[:, :self.__dense_features_nums], x[:, self.__dense_features_nums:]
sparse_inputs = sparse_inputs.long()
# embedding 稀疏特征
sparse_embeds = [self.embedding_layers['embed_' + fea_name](sparse_inputs[:, index]) for index, fea_name in enumerate(self.sparse_features)]
sparse_embeds = torch.cat(sparse_embeds, axis=-1)
# concate 稀疏特征和密集特征
output = torch.cat([sparse_embeds, dense_inputs], axis=-1)
# 残差块多层感知机
for redisual_unit in self.residual_layers:
output = redisual_unit(output)
# 输出层
output = F.sigmoid(self.linear(output))
return output
def getCriteo(data_path='./criteo/train.csv'):
df_data = pd.read_csv(data_path, sep=',')
df_data.drop(['Id'], axis=1, inplace=True)
dense_features = ['I'+str(i+1) for i in range(13)]
sparse_features = ['C'+str(i+1) for i in range(26)]
# 填充缺失值
df_data[sparse_features] = df_data[sparse_features].fillna('-1')
df_data[dense_features] = df_data[dense_features].fillna(0)
# 类别型特征进行 LabelEncoder 编码
for feature in sparse_features:
df_data[feature] = LabelEncoder().fit_transform(df_data[feature])
# 数值型特征进行 特征归一化
df_data[dense_features] = MinMaxScaler().fit_transform(df_data[dense_features])
label = df_data.pop('Label')
sparse_features_nunique = {}
for fea in sparse_features:
sparse_features_nunique[fea] = df_data[fea].nunique()
features_info = [dense_features, sparse_features, sparse_features_nunique]
return df_data, label, features_info
class TrainTask:
def __init__(self, model, lr=0.001, use_cuda=False):
self.__device = torch.device("cuda" if torch.cuda.is_available() and use_cuda else "cpu")
self.__model = model.to(self.__device)
self.__loss_fn = nn.BCELoss().to(self.__device)
self.__optimizer = torch.optim.Adam(model.parameters(), lr=lr)
self.train_loss = []
self.eval_loss = []
self.train_metric = []
self.eval_metric = []
def __train_one_batch(self, feas, labels):
""" 训练一个batch
"""
self.__optimizer.zero_grad()
# 1. 正向
outputs = self.__model(feas)
# 2. loss求解
loss = self.__loss_fn(outputs.squeeze(), labels)
# 3. 梯度回传
loss.backward()
self.__optimizer.step()
return loss.item(), outputs
def __train_one_epoch(self, train_dataloader, epoch_id):
""" 训练一个epoch
"""
self.__model.train()
loss_sum = 0
batch_id = 0
for batch_id, (feas, labels) in enumerate(train_dataloader):
feas, labels = Variable(feas).to(self.__device), Variable(labels).to(self.__device)
loss, outputs = self.__train_one_batch(feas, labels)
loss_sum += loss
self.train_loss.append(loss_sum / (batch_id + 1))
print("Training Epoch: %d, mean loss: %.5f" % (epoch_id, loss_sum / (batch_id + 1)))
def train(self, train_dataset, eval_dataset, epochs, batch_size):
# 构造DataLoader
train_data_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
eval_data_loader = DataLoader(dataset=eval_dataset, batch_size=batch_size, shuffle=True)
for epoch in range(epochs):
print('-' * 20 + ' Epoch {} starts '.format(epoch) + '-' * 20)
# 训练一个轮次
self.__train_one_epoch(train_data_loader, epoch_id=epoch)
# 验证一遍
self.__eval(eval_data_loader, epoch_id=epoch)
def __eval(self, eval_dataloader, epoch_id):
""" 验证集上推理一遍
"""
batch_id = 0
loss_sum = 0
self.__model.eval()
for batch_id, (feas, labels) in enumerate(eval_dataloader):
with torch.no_grad():
feas, labels = Variable(feas).to(self.__device), Variable(labels).to(self.__device)
# 1. 正向
outputs = self.__model(feas)
# 2. loss求解
loss = self.__loss_fn(outputs.view(-1), labels)
loss_sum += loss.item()
self.eval_loss.append(loss_sum / (batch_id + 1))
print("Evaluate Epoch: %d, mean loss: %.5f" % (epoch_id, loss_sum / (batch_id + 1)))
def __plot_metric(self, train_metrics, val_metrics, metric_name):
""" 指标可视化
"""
epochs = range(1, len(train_metrics) + 1)
plt.plot(epochs, train_metrics, 'bo--')
plt.plot(epochs, val_metrics, 'ro-')
plt.title('Training and validation '+ metric_name)
plt.xlabel("Epochs")
plt.ylabel(metric_name)
plt.legend(["train_"+metric_name, 'val_'+metric_name])
plt.show()
def plot_loss_curve(self):
self.__plot_metric(self.train_loss, self.eval_loss, "Loss")
if __name__ == "__main__":
df_data, label, features_info = getCriteo()
# 划分、构建数据集、数据通道
x_train, x_val, y_train, y_val = train_test_split(df_data, label, test_size=0.2, random_state=2022)
train_dataset = TensorDataset(torch.tensor(x_train.values).float(), torch.tensor(y_train.values).float())
val_dataset = TensorDataset(torch.tensor(x_val.values).float(), torch.tensor(y_val.values).float())
# 构建模型
model = DeepCrossing(features_info, hidden_unit=[512, 256, 128, 64, 32], embedding_dim=8)
task = TrainTask(model, use_cuda=True)
task.train(train_dataset, val_dataset, 20, 16)
task.plot_loss_curve()
参考:
1. 《深度学习推荐系统》