读取Hive日志,将字段血缘关系发送到Datahub的demo

import json
from json import JSONDecodeError

import datahub.emitter.mce_builder as builder
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.metadata.com.linkedin.pegasus2avro.dataset import (
    DatasetLineageType,
    FineGrainedLineage,
    FineGrainedLineageDownstreamType,
    FineGrainedLineageUpstreamType,
    Upstream,
    UpstreamLineage,
)

search_keyword = "hooks.LineageLogger:"
token = ''


def parse_vertice(vertices: list):
    vertex_dict = {}
    for vertex in vertices:
        vertex_id = vertex.get("id", "")
        vertex_type = vertex.get("vertexType", "")
        vertex_names = vertex.get("vertexId", "").split(".")

        if len(vertex_names) >= 3:
            db_name = vertex_names[0]
            tb_name = vertex_names[1]
            col_name = vertex_names[-1] if vertex_type == "COLUMN" else ""

            # if col_name not in partition_field:
            vertex_dict.setdefault(vertex_id, {"db": db_name, "tb": tb_name, "col": col_name})
    return vertex_dict


def parse_edge(edges: list):
    edge_elem_list = []
    for edge in edges:
        source_arr = edge.get("sources", [])
        target_arr = edge.get("targets", [])
        expression = edge.get("expression", "")
        edge_type = edge.get("edgeType", "")
        edge_elem_list.append({"source": source_arr, "target": target_arr, "exp": expression, "type": edge_type})
    return edge_elem_list


def get_column_depend(vertex_dict: dict, edge_list: list):
    column_info_list = []
    tb = ""
    for edge in edge_list:
        source_arr = edge.get('source')
        target_arr = edge.get('target')
        expression = edge.get('exp')
        exp_type = edge.get('type')

        for target_id in target_arr:
            target_dict = vertex_dict.get(target_id)
            if target_dict is not None:
                tb = target_dict.get('db') + "." + target_dict.get('tb')
                for source_id in source_arr:
                    source_dict = vertex_dict.get(source_id)
                    if source_dict is not None:
                        if exp_type != 'PREDICATE':
                            column_info_list.append(
                                (target_dict.get('db', ''),
                                 target_dict.get('tb', ''),
                                 target_dict.get('col', ''),
                                 source_dict.get('db', ''),
                                 source_dict.get('tb', ''),
                                 source_dict.get('col', ''),
                                 expression))

    return tb, column_info_list


class DataHubEmitter:

    def __init__(self, file_path, datahubUrl):
        self.dataType = 'hive'
        self.emitter = DatahubRestEmitter(datahubUrl, token=token)
        self.file_path = file_path

    def read_hive_log(self):
        content = []
        try:
            with open(self.file_path, 'r', encoding='utf-8', errors='replace') as log_file:
                for line in log_file:
                    if search_keyword in line:
                        content.append(line.split(search_keyword)[-1])
        except Exception as e:
            print(f"读取Hive日志文件失败:{e}")
        return content

    def genLineage(self, content):

        if len(file_content) > 0:
            # 去重数据
            contents = []
            for line in content:
                upTbls = set()
                downTbl = ''
                fLs = []
                column_dict = {}
                try:
                    lineage_dict = json.loads(line)
                    vertex_dict = parse_vertice(lineage_dict.get('vertices', []))
                    edge_list = parse_edge(lineage_dict.get('edges', []))
                    tb, column_info = get_column_depend(vertex_dict, edge_list)
                    downTbl = tb
                    for tp in column_info:
                        upTbls.add(tp[3] + "." + tp[4])
                        column_dict.setdefault(self.fldUrn(tp[0] + "." + tp[1], tp[2]), []).append(self.fldUrn(tp[3] + "." + tp[4], tp[5]))

                    for key in column_dict.keys():
                        fLs.append(FineGrainedLineage(
                            upstreamType=FineGrainedLineageUpstreamType.FIELD_SET,
                            upstreams=column_dict.get(key),
                            downstreamType=FineGrainedLineageDownstreamType.FIELD,
                            downstreams=[key])
                        )
                    contents.append((upTbls, downTbl, fLs))
                except JSONDecodeError as e:
                    print("json解析错误: {}".format(line))
            return contents
        else:
            print("日志文件没有数据, 请检查日志文件后再试")

    def datasetUrn(self, tbl):
        return builder.make_dataset_urn(self.dataType, tbl)

    def fldUrn(self, tbl, fld):
        return builder.make_schema_field_urn(self.datasetUrn(tbl), fld)

    def emitterLineageMcp(self, upTbs, downTb, fineGrainedLineages):

        # this is just to check if any conflicts with existing Upstream, particularly the DownstreamOf relationship
        upstreams = []
        for upTb in upTbs:
            upstreams.append(Upstream(dataset=self.datasetUrn(upTb), type=DatasetLineageType.TRANSFORMED))
        fieldLineages = UpstreamLineage(upstreams=upstreams, fineGrainedLineages=fineGrainedLineages)

        lineageMcp: MetadataChangeProposalWrapper = MetadataChangeProposalWrapper(entityUrn=self.datasetUrn(downTb), aspect=fieldLineages)

        self.emitter.emit_mcp(lineageMcp)
        print(f"血缘发送成功: {upTbs} <> {downTb}")


if __name__ == '__main__':
    datahubEmitter = DataHubEmitter("日志路径", "http://xxxx:8080/")
    file_content = datahubEmitter.read_hive_log()
    cs = datahubEmitter.genLineage(file_content)
    for upTables, downTable, fLineages  in cs:
        datahubEmitter.emitterLineageMcp(upTables, downTable, fLineages)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值