Hive的transformer可以比较方便的自定义一些操作,下面介绍使用transformer分布式预测并且将预测结果写入Redis的demo。
Hive调用transformer脚本:
#!/bin/bash
. /etc/profile
field='user_info'
base_path=xxxxxxxxxxxx
table_path=xxxxxxxxxxxx
echo `date`": start ..."
sql_cmd="
SET hive.hadoop.supports.splittable.combineinputformat=true;
SET mapreduce.input.fileinputformat.split.maxsize=32000000;
SET mapreduce.input.fileinputformat.split.minsize.per.node=32000000;
SET mapreduce.input.fileinputformat.split.minsize.per.rack=32000000;
add file load_to_redis.py;
add file lib/python/dist-packages/rediscluster;
add file lib/python/dist-packages/redis;
select transform(user_id, xxx)
using 'python predict_and_load_to_redis.py ${field} add'
as (info string)
from (
select user_id,cost
FROM
tbname
DISTRIBUTE BY user_id
SORT BY user_id
) tmp
"
echo `date`": finish loading data into redis."
Python分布式预测脚本:
# -*- coding: utf-8 -*-
"""
# 使用transform分布式地调用模型进行预测
# 避免出现OOM错误
"""
import sys
import numpy as np
import cPickle as pickle
from collections import defaultdict
from operator import itemgetter
from sklearn.ensemble import GradientBoostingClassifier
import os
sys.path.append(os.getcwd())
if __name__ == '__main__':
# 加载模型和bid数据
model_file = sys.argv[1]
model,bid_list,bid_price_list = pickle.load(open(model_file, 'rb'))
# 分块加载
block_size = xxxxxxx
previous_role = '-1'
for line in sys.stdin:
line_split = line.strip().split('\t')
role = line_split[0]
if len(item_list) >= block_size:
if role != previous_role:
# 调用模型预测一个block_size的数据
y_pred = model.predict_proba(np.stack(X, axis=0))[:,1]
# 输出每个role的推荐列表
get_role_rec()
# reset block buffer
role_list, item_list, X = [], [], []
u_avg_single_pay, price, u_total_cost = [], [], []
role_list.append(role)
item_list.append(item)
previous_role = role
# 最后一个block有数据,但是不满block_size条
if X:
y_pred = model.predict_proba(np.stack(X, axis=0))[:,1]
# 输出每个role的推荐列表
get_role_rec()
Python写入Redis脚本:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import datetime
sys.path.append(os.getcwd())
from rediscluster import StrictRedisCluster
field = sys.argv[1] # field name
op = sys.argv[2] # op
startup_nodes = [
{"host": "xxxxx", "port": "xxxxx"},
]
if __name__ == '__main__':
# 连接redis数据库
rs = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=True)
p=rs.pipeline()
cnt = 0
for line in sys.stdin:
line_split = line.strip().split('\t')
rec_key = '_'.join([vName, line_split[0], line_split[1]])
# 确定写入字段
if field=='xxx':
# 先读出已有value,只更新其中的xxx字段
former_value=rs.hget(rec_key,field)
if former_value:
v=json.loads(former_value)
v['xxx']=int(line_split[2])
rec_value=json.dumps(v)
else:
rec_value=json.dumps({'xxx':int(line_split[2])})
elif field=='xxxxxxx':
rec_value=json.dumps(int(line_split[3]))
# 确定写入操作:添加/删除
if op=='add':
p.hset(rec_key,field,rec_value)
elif op=='del':
p.hdel(rec_key,field)
cnt += 1
if cnt%50000==0:
p.execute()
print datetime.datetime.now(), ", {} processed".format(cnt)
p.execute()
print "{},{},{},{}".format(op,rec_key,field,rec_value)
print "{} processed in total".format(cnt)