pyspark 结构化流读取 两个 kafka主题,累计统计 IP 出现次数 Demo版本

# -*- coding: utf-8 -*-
"""
 @CreateTime :2021/1/5 15:26
 @Author : Liangde
 @Description :

 @Modify:

"""

from pyspark import SparkConf
from pyspark.sql import SparkSession
from conf.setting import KAFKA_CONFIG




"""
    设置任务 常量
"""

TOPIC = KAFKA_CONFIG["TOPIC"]
MAX_OFFSETS_PER_TRIGGER = KAFKA_CONFIG["MAX_OFFSETS_PER_TRIGGER"]
PROCESSING_TIME = KAFKA_CONFIG["PROCESSING_TIME"]

BOOTSTRAP_SERVERS = KAFKA_CONFIG["BOOTSTRAP_SERVERS"]
STARTING_OFFSETS = KAFKA_CONFIG["STARTING_OFFSETS"]


"""

    1、 订阅 kafka 的两个主题

"""

if __name__ == '__main__':

    """
        初始化SparkConf shuffle 分区设为 60
        提交给 yarn 来处理
        建立 spark 对象,并支持启用 hive 的 UDF 函数
    """
    conf = SparkConf() \
        .setAppName('structuredStreamingCleanFFile') \
        .set("spark.sql.shuffle.partitions", 60) \
        .set("stopGracefullyOnShutdown", "true") \
        .set("spark.cleaner.referenceTracking.cleanCheckpoints", "true") \
        .set("spark.executor.memoryOverhead", "2G")

    spark = SparkSession.builder.config(conf=conf).getOrCreate()

    """
        创建 读取 Kafka Source 流
    """

    kafkaSourceFDF = spark.readStream.format("kafka") \
        .option("kafka.bootstrap.servers", BOOTSTRAP_SERVERS) \
        .option("subscribe", TOPIC) \
        .option("startingOffsets", STARTING_OFFSETS) \
        .option("maxOffsetsPerTrigger", MAX_OFFSETS_PER_TRIGGER) \
        .load() \
        .selectExpr("CAST(value AS STRING)", "key") \
        .createOrReplaceTempView("dfTable")

    kafkaSourceFDF2 = spark.sql("""
        with temp as (
            select
                line_list[0] as f_ip,
                line_list[2] as t_ip,
                key as cseq
                from(
                    select  split(value,',') line_list, key from dfTable
                    )
        )

        select t1.ip,t1.cseq,count(1) cnt
        from
        (
            select
                f_ip as ip ,
                cseq
                from temp
            union all
            select
                t_ip as ip,
                cseq
                 from temp
        ) t1
        group by t1.ip,t1.cseq
        order by cnt desc
    """)



    concoleQuery = kafkaSourceFDF2.writeStream \
        .trigger(processingTime='10 seconds') \
        .format("console") \
        .outputMode("complete") \
        .start()

    concoleQuery.awaitTermination()


outputMode(“”) 有三种
Complete Mode (只能选用 complete 模式)
Append Mode (有聚合的不支持 Append 模式)
Update Mode (有聚合的不支持 Update 模式)

setting.py

# -*- coding: utf-8 -*-
"""
 @CreateTime :2020/12/14 18:23
 @Author : Liangde
 @Description :
        存放 kakfa 的相关信息
 @Modify:

"""

KAFKA_CONFIG = {

    'TOPIC': "sip_kfk_fmt_f_r2p6,sip_kfk_fmt_s_r2p6",
    'MAX_OFFSETS_PER_TRIGGER': 1000,
    'PROCESSING_TIME': '5 seconds',


    'BOOTSTRAP_SERVERS': "cdhtest005:9092,cdhtest002:9092,cdhtest003:9092",
    'STARTING_OFFSETS': "latest"

}



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值