ESMM多目标实践

ESMM多目标Python实践

导入的包信息如下

import tensorflow as tf
from tensorflow.python.framework import ops

构建特征部分
对变长型、数字型、字符型等特征,还有交叉特征构建索引,分别放到两个特征数组中,分别对应wide网络输入和deep网络输入

def build_feature():
    first_feature_columns = []
    second_feature_columns = []
    parms = Config.init_model_args()
    string_fea = parms.string_fea
    var_fea = parms.var_fea
    double_fea = parms.double_fea
    cross_fea = parms.cross_fea

    for fea, size in string_fea.items():
        temp = tf.feature_column.categorical_column_with_hash_bucket(fea, size[0], dtype=tf.string)
        emb = tf.feature_column.embedding_column(temp,size[1])
        first_feature_columns.append(temp)
        second_feature_columns.append(emb)
    for fea, size in var_fea.items():
        temp = tf.feature_column.categorical_column_with_hash_bucket(fea, size[0], dtype=tf.string)
        emb = tf.feature_column.embedding_column(temp,size[1])
        first_feature_columns.append(temp)
        second_feature_columns.append(emb)
    for elem in double_fea:
        temp = tf.feature_column.numeric_column(elem)
        second_feature_columns.append(temp)
    for fea,size in cross_fea.items():
        temp = tf.feature_column.crossed_column([fea[0],fea[1]], size[0])
        emb = tf.feature_column.embedding_column(temp,size[1])
        first_feature_columns.append(temp)
        second_feature_columns.append(emb)
    print("Number of feature column features:%d" % (len(second_feature_columns) + len(first_feature_columns)))
    return first_feature_columns, second_feature_columns

构建浅层网络结构

def build_linear(features, first_column, name):
    with tf.variable_scope("Linear"):
        with tf.variable_scope(name):
            wide_logits = tf.feature_column.linear_model(features, first_column, units=1)
    return wide_logits

构建深度网络结构
hidden_units 表示网络层级结构

def build_deep_layers(input_x, parms, name, mode):
    with tf.variable_scope("Deep"):
        with tf.variable_scope(name):
            deep_emb = input_x
            for elem in parms['hidden_units'].split(","):
                elem = int(elem)
                deep_emb = tf.layers.dense(deep_emb, units=elem, activation=None)

                if (parms["is_bn"] == True):
                    if mode == tf.estimator.ModeKeys.TRAIN:
                        trainable = True
                    else:
                        trainable = False
                    deep_emb = tf.layers.batch_normalization(deep_emb, momentum=0.95,trainable=trainable)

                deep_emb = tf.nn.relu(deep_emb)

                if (parms["is_dropout"] == True):
                    deep_emb = tf.layers.dropout(deep_emb, parms["dropout_keep_rate"])

            deep_emb = tf.layers.dense(deep_emb, units=1, activation=None)
    return deep_emb

选择优化器

def get_optimizer(optimizer, learning_rate,global_step=0.9):
    if optimizer == "sgd":
        return tf.train.GradientDescentOptimizer(learning_rate)
    elif optimizer == "adadelta":
        return tf.train.AdadeltaOptimizer(learning_rate=learning_rate,
                                          rho=0.95,
                                          epsilon=1e-08)
    elif optimizer == "adagrad":
        return tf.train.AdagradOptimizer(learning_rate=learning_rate)
    elif optimizer == "adam":
        return tf.train.AdamOptimizer(learning_rate=learning_rate,
                                      beta1=0.95,
                                      beta2=0.999,
                                      epsilon=1e-09)
    elif optimizer == "momentum":
        return tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                          momentum=0.9)
    elif optimizer == "ftrl":
        return tf.train.FtrlOptimizer(learning_rate)
    elif optimizer == "rmsprop":
        return tf.train.RMSPropOptimizer(learning_rate=learning_rate,
                                         decay=0.9,
                                         momentum=0.0,
                                         epsilon=1e-10)
    elif optimizer == "adagradDA":
        global_step = tf.cast(global_step,tf.int64)
        return tf.train.AdagradDAOptimizer(learning_rate=learning_rate,global_step=global_step)
    else:
        exit(1)

构建训练过程
将ctr、ctcvr的bias指标加入进去

def esmm_train(features, labels, mode, params):
    ctr_labels = tf.cast(tf.reshape(labels["ctr_label"], [-1, 1]), dtype=tf.float32)
    ctcvr_labels = tf.cast(tf.reshape(labels["cvr_label"], [-1, 1]), dtype=tf.float32)

    first_column, second_feature_columns = build_feature()

    ctr_wide_logit = build_linear(features, first_column, "ctr")
    cvr_wide_logit = build_linear(features, first_column, "cvr")

    deep_emb = tf.feature_column.input_layer(features=features,feature_columns=second_feature_columns)

    """
    ctr
    """
    ctr_deep_logits = build_deep_layers(deep_emb, params, "ctr", mode)

    ctr_BIAS = tf.get_variable(name='ctr_bias', shape=[1], initializer=tf.constant_initializer(0.0))

    ctr_out = ctr_deep_logits + ctr_BIAS + ctr_wide_logit
    ctr_score = tf.identity(tf.nn.sigmoid(ctr_out),name="ctr_score")

    """
    cvr
    """
    cvr_deep_logits = build_deep_layers(deep_emb, params, "cvr", mode)

    cvr_BIAS = tf.get_variable(name='cvr_bias', shape=[1], initializer=tf.constant_initializer(0.0))

    cvr_out = cvr_deep_logits + cvr_BIAS + cvr_wide_logit
    cvr_score = tf.identity(tf.nn.sigmoid(cvr_out),name="cvr_score")

    """
    ctcvr
    """
    ctcvr_score = ctr_score * cvr_score
    ctcvr_score = tf.identity(ctcvr_score,name="ctr_cvr_score")

    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'ctcvr_score': ctcvr_score,
            'ctr_score': ctr_score,
            'cvr_score': cvr_score,
            'userid': features['userid'],
            'itemid': features['itemid']
        }
        return tf.estimator.EstimatorSpec(mode, predictions=predictions)

    ctr_labels = tf.identity(ctr_labels,name="ctr_labels")
    ctcvr_labels = tf.identity(ctcvr_labels,name="ctcvr_labels")

    ctr_auc = tf.metrics.auc(labels=ctr_labels, predictions=ctr_score,name="ctr_auc")
    ctr_cvr_auc = tf.metrics.auc(labels=ctcvr_labels, predictions=ctcvr_score,name="ctcvr_auc")

    ctr_loss = tf.reduce_mean(tf.losses.log_loss(labels=ctr_labels, predictions=ctr_score))
    ctr_cvr_loss = tf.reduce_mean(tf.losses.log_loss(labels=ctcvr_labels, predictions=ctcvr_score))

    all_loss = ctr_loss + ctr_cvr_loss

    true_ctr = tf.metrics.mean(ctr_labels)
    pred_ctr = tf.metrics.mean(ctr_score)

    true_ctcvr = tf.metrics.mean(ctcvr_labels)
    pred_ctcvr = tf.metrics.mean(ctcvr_score)

    eval_metric_ops = {
        "ctr_auc": ctr_auc,
        "ctr_cvr_auc": ctr_cvr_auc,
        "true_ctr":true_ctr,
        "pred_ctr":pred_ctr,
        "true_ctcvr":true_ctcvr,
        "pred_ctcvr":pred_ctcvr
    }

    tf.summary.scalar('ctr_auc', ctr_auc[1])
    tf.summary.scalar("ctr_loss", ctr_loss)
    tf.summary.scalar('ctr_cvr_auc', ctr_cvr_auc[1])
    tf.summary.scalar('ctr_cvr_loss', ctr_cvr_loss)

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(
            mode, loss=all_loss, eval_metric_ops=eval_metric_ops)
    assert mode == tf.estimator.ModeKeys.TRAIN

    nn_optimizer = get_optimizer(params['optimizer'], learning_rate=0.0001)
    nn_train = nn_optimizer.minimize(all_loss,
                                     global_step=tf.train.get_global_step(),
                                     var_list=ops.get_collection(
                                        ops.GraphKeys.TRAINABLE_VARIABLES,
                                        scope='Deep'))

    lr_optimizer = tf.train.FtrlOptimizer(learning_rate=0.005,
                                          l1_regularization_strength=0.0001,
                                          l2_regularization_strength=0.01)
    lr_train = lr_optimizer.minimize(all_loss,
                                     global_step=tf.train.get_global_step(),
                                     var_list=ops.get_collection(
                                        ops.GraphKeys.TRAINABLE_VARIABLES,
                                        scope='Linear'))

    updata_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    return tf.estimator.EstimatorSpec(
        mode=tf.estimator.ModeKeys.TRAIN,
        loss=all_loss,
        eval_metric_ops=eval_metric_ops,
        train_op=tf.group([nn_train,lr_train, updata_ops])
    )

参数初始化过程

import argparse

'''
user_cate1_var 100000 16
user_cate2_var 100000 16
'''
def read_var_fea():
    path = "var_feature.txt"
    fea = {}
    with open(path, "r") as f:
        line = f.readline()
        while line != "":
            fields = line.rstrip("\n").split(" ")
            if len(fields) == 3:
                name = fields[0]
                bucket = int(fields[1])
                emb = int(fields[2])
                fea[name] = (bucket, emb)
            line = f.readline()
    return fea

'''
day1_ctr_double
day2_ctr_double
day7_ctr_double
'''
def read_double_feature():
    path = "double_feature.txt"
    fea = []
    with open(path, "r") as f:
        line = f.readline()
        while line != "":
            fea.append(line.rstrip("\n"))
            line = f.readline()
    return fea

'''
itemid 10000 8
item_cate1 1000 16
item_cate2 1000 16
'''
def read_string_feature():
  path = "string_feature.txt"
  fea = {}
  with open(path, "r") as f:
    line = f.readline()
    while line != "":
      fields = line.rstrip("\n").split(" ")
      if len(fields) == 3:
        name = fields[0]
        bucket = int(fields[1])
        emb = int(fields[2])
        fea[name] = (bucket, emb)
      line = f.readline()
  return fea

'''
gender itemid 1000000 16
age itemid 1000000 16
'''
def read_cross_feature():
  path = "cross_feature.txt"
  fea = {}
  with open(path, "r") as f:
    for line in f:
      if line != "":
        fields = line.strip().split(" ")
        if len(fields) == 4:
          name1 = fields[0]
          name2 = fields[1]
          bucket = int(fields[2])
          emb = int(fields[3])
          key = (name1, name2)
          val = (bucket, emb)
          fea[key] = val
  return fea

'''
click_seq
'''
def read_seq_feature():
    path = "seq_feature.txt"
    fea = []
    with open(path, "r") as f:
        line = f.readline()
        while line != "":
            fea.append(line.rstrip("\n"))
            line = f.readline()
    return fea


def init_model_args():
    """
    Basic but important params for traning
    """
    parser = argparse.ArgumentParser()
 
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--embedding_size', type=int, default=12)
    parser.add_argument('--num_epochs', type=int, default=20)
    parser.add_argument('--hidden_units', default=[200,128,64])
    parser.add_argument('--num_cross_layers', type=int, default=3)
    parser.add_argument('--num_sampled', type=int, default=32)
    parser.add_argument('--rnn_hidden',type=int,default=5)

    parser.add_argument('--optimizer', default='adam')
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument("--is_decy",default="true")
    parser.add_argument("--decay_steps", default=5000)
    parser.add_argument("--decay_rate", default=0.95)

    parser.add_argument("--is_bn", default=True)
    parser.add_argument("--is_dropout", default="true")
    parser.add_argument("--dropout_rate", default=0.1)

    parser.add_argument('--save_model_dir', default='')
    parser.add_argument('--training_path', default="")
    parser.add_argument('--validation_path', default="")
    parser.add_argument('--test_path', default="")
    parser.add_argument('--feature_dict', default=[])
    parser.add_argument('--log_step_count_steps',type=int, default=100)
    parser.add_argument('--result_path', default="")
    parser.add_argument('--export_path', default="")
    parser.add_argument('--mapPath', default="")
    parser.add_argument('--eval_monitor_path', default="")

    parser.add_argument('--date_num', type=int, default=1)
    parser.add_argument('--part_num', type=int, default=6000)
    parser.add_argument('--train_begin_date', default="")
    parser.add_argument('--val_date', default="")

    parser.add_argument('--eval_steps', type=int, default = 1000)
    parser.add_argument('--eval_start_delay', type=int, default = 30) 
    parser.add_argument('--eval_throttle_secs', type=int, default = 30) 
    parser.add_argument('--num_ps',type=int,default=4)
    parser.add_argument('--task_type',default="train")

    # feature
    string_feature= read_string_feature()
    parser.add_argument('--string_feature', default=string_feature)
    double_feature = read_double_feature()
    parser.add_argument('--double_feature', default=double_feature)
    var_feature = read_var_feature()
    parser.add_argument('--var_feature', default=var_feature)
    seq_feature = read_seq_feature()
    parser.add_argument('--seq_feature', default=seq_feature)
    fea_list = double_feature + list(string_feature.keys()) + list(var_feature.keys())
    parser.add_argument('--fea_list', default=fea_list)
    cross_feature = read_cross_feature()
    parser.add_argument('--cross_feature', default=cross_feature)

    args = parser.parse_args()

    return args

模型构建过程

import tensorflow as tf
import pandas as pd

# 拿到特征列表
def getFeatureDict(label):
    params = init_model_args()
    features = params.fea_list
    doubleFea = params.double_fea
    varFea = params.var_fea
    temp = {}
    for elem in features:
        if elem in varFea:
            temp[elem] = tf.VarLenFeature(tf.string)
        elif elem in doubleFea:
            temp[elem] = tf.FixedLenFeature([],tf.float32)
        elif elem not in ['extra_label_float','label_float']:
            temp[elem] = tf.FixedLenFeature([], tf.string)

    seq_fea = params.seq_fea
    for fea in seq_fea:
        temp[fea] = tf.VarLenFeature(dtype=tf.float32)

    if(label == True):
        temp["label_float"] = tf.FixedLenFeature([], tf.float32)
        temp["extra_label_float"] = tf.FixedLenFeature([], tf.float32)
    temp["userid"] = tf.FixedLenFeature([], tf.string)

    return temp

class Model(object):
    def __init__(self, train_dataset_path, val_dataset_path, save_model_dir,
                 num_epochs, batch_size, embedding_size, learning_rate,export_path,test_path,
                 optimizer, task,is_decy,decay_steps,decay_rate,is_bn,is_dropout,dropout_rate,hidden_units,
                 num_cross_layers,num_threads=1,num_workers=0,task_id=0,job_type='chief',eval_steps=1000,
                 eval_start_delay=30,eval_throttle_secs=30):
        self._num_threads = num_threads
        self._num_epochs = num_epochs
        self._batch_size = batch_size
        self._embedding_size = embedding_size
        self._learning_rate = learning_rate
        self._optimizer = optimizer
        self._train_dataset_path = train_dataset_path
        self._val_dataset_path = val_dataset_path
        self._save_model_dir = save_model_dir
        self._task = task
        self.is_decy = is_decy
        self.decay_steps = decay_steps
        self.decay_rate = decay_rate
        self.is_bn = is_bn
        self.is_dropout = is_dropout
        self.dropout_rate = dropout_rate
        self.hidden_units = hidden_units
        self.num_cross_layers = num_cross_layers
        self.num_workers = num_workers
        self.task_id = task_id
        self.job_type = job_type
        self.export_path = export_path
        self.test_path = test_path
        self.eval_steps = eval_steps
        self.eval_start_delay = eval_start_delay
        self.eval_throttle_secs = eval_throttle_secs


    def build_model(self):
        first_feature_columns, second_feature_columns = build_feature()
        session_config = tf.ConfigProto(intra_op_parallelism_threads = 1, inter_op_parallelism_threads = 4, device_count = {'CPU': 4})

        config = tf.estimator.RunConfig().replace(save_checkpoints_steps=20000,
                                                  log_step_count_steps=500,session_config=session_config)
        params = {
            'learning_rate': self._learning_rate,
            'optimizer': self._optimizer,
            'first_feature_columns': first_feature_columns,
            'second_feature_columns': second_feature_columns,
            'is_decy': self.is_decy,
            'decay_steps': self.decay_steps,
            'decay_rate': self.decay_rate,
            'is_bn': self.is_bn,
            'is_dropout': self.is_dropout,
            'dropout_rate': self.dropout_rate,
            "hidden_units": self.hidden_units,
            "num_cross_layers":self.num_cross_layers,
            "batch_size":self._batch_size
        }
        model = tf.estimator.Estimator(
            model_fn=esmm_train,  # 前面构建的模型具体训练过程
            model_dir=self._save_model_dir,
            params=params,
            # config=config
            config=config
        )
        return model

    def train(self):
        model = self.build_model()

        train_spec = tf.estimator.TrainSpec(
            input_fn=lambda: tools.train_input_fn(file=self._train_dataset_path,num_epochs=self._num_epochs,batch_size=self._batch_size,
                                            num_workers=self.num_workers,task_id=self.task_id,job_type=self.job_type))

        val_spec = tf.estimator.EvalSpec(
            input_fn=lambda: tools.eval_input_fn(file= self._val_dataset_path,batch_size=self._batch_size),
                                            steps = self.eval_steps,
                                            start_delay_secs = self.eval_start_delay,
                                            throttle_secs= self.eval_throttle_secs)
        tf.estimator.train_and_evaluate(model, train_spec, val_spec)

	
    def export(self):  # 导出模型
        export_path = self.export_path
        model = self.build_model()
        temp = tools.getFeaDict()
        serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(temp)
        model.export_savedmodel(export_path,serving_input_receiver_fn)
        export_dir = model.export_savedmodel('export', serving_input_receiver_fn)

    def predict(self):
        model = self.build_model()
        predictions = model.predict(input_fn=lambda: tools.predict_input_fn(file=self.test_path,batch_size=self._batch_size))
        return predictions

    def evaluate(self):
        model = self.build_model()
        ret = model.evaluate(input_fn=lambda: tools.eval_input_fn(file= self._val_dataset_path,batch_size=self._batch_size),
                                                            steps=self.eval_steps)
        return ret
       

最后run起来

def run(args):

    model = Model(
        train_dataset_path=args.training_path,
        val_dataset_path=args.validation_path,
        save_model_dir=args.save_model_dir,
        num_epochs=args.num_epochs,
        batch_size=args.batch_size,
        optimizer=args.optimizer,
        embedding_size=args.embedding_size,
        learning_rate=args.lr,
        task=args.task_type,
        is_decy=args.is_decy,
        decay_steps=args.decay_steps,
        decay_rate=args.decay_rate,
        is_bn=args.is_bn,
        is_dropout=args.is_dropout,
        dropout_rate=args.dropout_rate,
        hidden_units=args.hidden_units,
        num_cross_layers=args.num_cross_layers,
        num_workers = args.num_workers,
        job_type = args.job_type,
        task_id = args.task_id,
        export_path=args.export_path,
        test_path=args.test_path,
        eval_steps=args.eval_steps,
        eval_start_delay=args.eval_start_delay,
        eval_throttle_secs=args.eval_throttle_secs
    )
    model.train()
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值