jg的相关知识点

1、jg分为节点表,和边表,节点有自己的jid和相关属性字段,这个jid为全局唯一,每一个节点的jid都不一样

2、边表会保存两个jid,一个start_jid,为起始节点,一个end_jid,为终止节点,还包括边的相关属性信息

3、因为jg的数据是存入hbase中的,所以要对index即jid进行散列,防止热点

 

一个hive到jg的数据写入和rowkey设计的案列

# encoding=utf-8
from datetime import timedelta
import time
import os
import sys
import random
import copy, re, math
import types
import json
# from pyspark.sql.functions import Column
import logging
from collections import OrderedDict
from datetime import datetime, timedelta
from pyspark import SparkConf
from pyspark.sql import functions as fun, SparkSession
from pyspark.sql.window import Window
from pyspark.sql.types import Row, StringType, StructField, StructType, IntegerType, LongType

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

reload(sys)

warehouse_location = '/user/hive/warehouse'
conf = SparkConf()  #     .set('spark.driver.maxResultSize', '10g')
# conf.set('spark.yarn.executor.memoryOverhead', '30g')
# conf.set('spark.yarn.am.cores', 5)
# conf.set('spark.executor.memory', '30g')
# conf.set('spark.executor.instance', 50)
# conf.set('spark.executor.cores', 8)
# conf.set('spark.executor.extraJavaOptions', '-XX:+PrintGCTimeStamps -XX:+PrintGCDetails -XX:+UseG1GC')
conf.set('spark.sql.warehouse.dir', warehouse_location)

#
spark = SparkSession \
    .builder \
    .master("local[*]") \
    .appName("vertex_edge") \
    .config(conf=conf) \
    .enableHiveSupport() \
    .getOrCreate()

# from pangu.common import *
#
# commonUtil = CommonUdf(spark)

hive_cp = 20181013
path_prefix = '/user/parquet/edge_person_app_person_detail'

def read_parquet(path):
    '''读parquet'''
    df = spark.read.parquet(os.path.join(path_prefix, path))
    return df
def write_parquet(df, path):
    '''写parquet'''
    df.write.mode("overwrite").parquet(os.path.join(path_prefix, path))

def write_csv(df, path, header=False, delimiter='\t'):
    '''写csv'''
    df.write.mode("overwrite").csv(os.path.join(path_prefix, 'csv/', path), header=header, seq=delimiter, quote='"', escape='"')

def get_table_index(tablename):
    table_index={
        'vertex_person':0,
        'vertex_phone':2000000000,
    }
    return table_index[tablename]



vertex_table_info = OrderedDict()

# 节点字段配置
vertex_table_info['vertex_person'] = ['zjhm','zjzl','gj','xm',]
vertex_table_info['vertex_phone'] = ['phone','xm','province','city','isp']


# 边字段配置
edge_table_info = OrderedDict()

edge_table_info['edge_person_fatherIs_person'] = ['sfzh','fqsfzh','start_time','end_time']
edge_table_info['edge_groupcall'] = ['start_phone','end_phone','start_time','end_time','call_total_duration','call_total_times']

# 所有边的关系配置
# key为边表,  值依次为  start_id的依赖表 start_id的依赖字段   end_id的依赖表    end_id的依赖字段
# 起始点到终止点
edge_info = {
    'edge_person_fatherIs_person':['vertex_person_jg','zjhm','vertex_person_jg','zjhm'],
    'edge_groupcall':['vertex_phone_jg','phone','vertex_phone_jg','phone'],
}


index_list = ['jid']
def deal_vertex(tablename):
    logger.info('dealing %s'%tablename)

    source_table_info = vertex_table_info[tablename]

    zd = ','.join(source_table_info)

    table_index = get_table_index(tablename)
    sql = 'select %s from bbd.%s'%(zd,tablename)
    df = spark.sql(sql)

    def map_rdd(data):
        ret = []
        row,index = data
        jid = int(str(random.randint(1,9)) + str(index + table_index))
        ret.append(jid)

        row = list(row)
        for item in row:
            ret.append(item)
        #return (index, zjhm, zjzl, gj, xm)   相当于在每一行数据上对index进行替换,再进行保存
        return tuple(ret)

    rdd = df.rdd.zipWithIndex().map(map_rdd)
    #新的元数据信息, 给了index一个字段为jid(此字段保持绝对唯一)  然后重新保存为jg的节点表
    new_schema = index_list + source_table_info
    res = spark.createDataFrame(rdd,new_schema)
    res.write.mode('overwrite').format('orc').saveAsTable('bbd.%s_jg'%tablename)
    logger.info('bbd.%s_jg   down'%tablename)


edge_zd = ['start_id', 'end_id']
def deal_edge(tablename):

    source_table_info = edge_table_info[tablename]
    start_tablename = edge_info[tablename][0]
    start_id = edge_info[tablename][1]
    end_tablename = edge_info[tablename][2]
    end_id = edge_info[tablename][3]

    # 原始表开始节点  和  结束节点
    s_start_id = source_table_info[0]
    s_end_id = source_table_info[1]

    zd = ','.join(source_table_info)

    df = spark.sql('select %s from bbd.%s'%(zd,tablename))
    df.createOrReplaceTempView('df')


    df_1 = spark.sql('select jid as start_id,%s as s_start from bbd.%s'%(start_id, start_tablename))
    df_2 = spark.sql('select jid as end_id,%s as s_end from bbd.%s'%(end_id, end_tablename))

    df_1.createOrReplaceTempView('df_1')
    df_2.createOrReplaceTempView('df_2')

    #通过边表已经存在的关系,寻找对应的点表的信息,然后各自写入自己的jid, 作为start_id, end_id 为图库做准备
    #然后 边表的其余字段都需要写入表
    # df1 = df.join(df_1,Column(s_start_id) == Column('s_start'),'left').drop('s_start')
    # df2 = df1.join(df_2,Column(s_end_id) == Column('s_end'),'left').drop('s_end')

    sql1 = 'select a.*,b.start_id from df a left join df_1 b on a.%s=b.s_start'%s_start_id
    spark.sql(sql1).createOrReplaceTempView('df_3')
    sql2 = 'select a.*,b.end_id from df_3 a left join df_2 b on a.%s=b.s_end'%s_end_id
    df2 = spark.sql(sql2)

    # df2 = df1.join(df_2,df.columns(s_end_id) == df_2.columns('s_end'),'left').drop('s_end')
    # df1 = df.join(df_1,df.columns(s_start_id) == df_1.columns('s_start'),'left').drop('s_start')

    res_zd = edge_zd + source_table_info

    res = df2.select(res_zd).where('start_id is not null and end_id is not null')
    res.write.mode('overwrite').format('orc').saveAsTable('bbd.%s_jg'%tablename)
    logger.info('bbd.%s_jg down'%tablename)

def get_partitions():
    for key,value in vertex_table_info.iteritems():
        logger.info(key)
        df = spark.sql('show partitions bbd. % s'%key)
        df.show()
    for key,value in edge_table_info.iteritems():
        logger.info(key)
        df = spark.sql('show partitions bbd.%s' % key)
        df.show()

if __name__ == '__main__':

    logger.info(
        '================================start time:%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))

    for key,value in vertex_table_info.iteritems():
        deal_vertex(key)
    for key,value in edge_table_info.iteritems():
        deal_edge(key)



    logger.info('================================end time:%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值