Flink去重计数统计用户数

1.数据

订单表,分别是店铺id、用户id和支付金额

"店铺id,用户id,支付金额",
"shop-1,user-1,1",
"shop-1,user-2,1",
"shop-1,user-2,1",
"shop-1,user-3,1",
"shop-1,user-3,1",
"shop-1,user-1,1",
"shop-1,user-2,1",
"shop-1,user-4,1",
"shop-2,user-4,1",
"shop-2,user-4,1",
"shop-2,user-2,1"

2.可运行案例

import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;


public class Test03 {
    public static void main(String[] args) throws Exception {
        // 1. 创建流式执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 2.创建表执行环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        // 3.读取数据源
        SingleOutputStreamOperator<String> jsonStream = env
                .fromElements("shop-1,user-1,1",
                        "shop-1,user-2,1",
                        "shop-1,user-2,1",
                        "shop-1,user-3,1",
                        "shop-1,user-3,1",
                        "shop-1,user-1,1",
                        "shop-1,user-2,1",
                        "shop-1,user-4,1",
                        "shop-2,user-4,1",
                        "shop-2,user-4,1",
                        "shop-2,user-2,1"
                );
        // 4.流转换为表
        Table table = tableEnv.fromDataStream(jsonStream);

        // 5. 把注册为一个临时视图
        tableEnv.createTemporaryView("tableTmp", table);

        // 6.求每个商店的用户数
        Table table1 = tableEnv.sqlQuery("select shop_id,sum(num) as num,sum(gmv) as gmv from (select shop_id,user_id, 1 as num,sum(gmv) as gmv from (select SPLIT_INDEX(f0,',',0) as shop_id,SPLIT_INDEX(f0,',',1) as user_id,cast(SPLIT_INDEX(f0,',',2) as bigint) as gmv from tableTmp) t1 group by shop_id,user_id) t2 group by shop_id");

        // 7.打印
        tableEnv.toRetractStream(table1, Row.class).print(">>>>>>");

        // 8.执行
        env.execute("test");
    }
}

sql:

select
  shop_id,
  sum(num) as num,
  sum(gmv) as gmv
from
  (
    select
      shop_id,
      user_id,
      1 as num,
      sum(gmv) as gmv
    from
      (
        select
          SPLIT_INDEX(f0, ',', 0) as shop_id,
          SPLIT_INDEX(f0, ',', 1) as user_id,
          cast(SPLIT_INDEX(f0, ',', 2) as bigint) as gmv
        from
          tableTmp
      ) t1
    group by
      shop_id,
      user_id
  ) t2
group by
  shop_id

3.运行结果

>>>>>>:7> (true,+U[shop-2, 2, 3])

>>>>>>:1> (true,+U[shop-1, 4, 8])  

>>>>>>:7> (true,+I[shop-2, 1, 1])
>>>>>>:1> (true,+I[shop-1, 1, 1])
>>>>>>:1> (false,-U[shop-1, 1, 1])
>>>>>>:7> (false,-U[shop-2, 1, 1])
>>>>>>:1> (true,+U[shop-1, 2, 2])
>>>>>>:7> (true,+U[shop-2, 2, 2])
>>>>>>:1> (false,-U[shop-1, 2, 2])
>>>>>>:7> (false,-U[shop-2, 2, 2])
>>>>>>:1> (true,+U[shop-1, 1, 1])
>>>>>>:7> (true,+U[shop-2, 1, 1])
>>>>>>:1> (false,-U[shop-1, 1, 1])
>>>>>>:7> (false,-U[shop-2, 1, 1])
>>>>>>:7> (true,+U[shop-2, 2, 3])
>>>>>>:1> (true,+U[shop-1, 2, 3])
>>>>>>:1> (false,-U[shop-1, 2, 3])
>>>>>>:1> (true,+U[shop-1, 3, 4])
>>>>>>:1> (false,-U[shop-1, 3, 4])
>>>>>>:1> (true,+U[shop-1, 2, 3])
>>>>>>:1> (false,-U[shop-1, 2, 3])
>>>>>>:1> (true,+U[shop-1, 3, 5])
>>>>>>:1> (false,-U[shop-1, 3, 5])
>>>>>>:1> (true,+U[shop-1, 2, 3])
>>>>>>:1> (false,-U[shop-1, 2, 3])
>>>>>>:1> (true,+U[shop-1, 3, 6])
>>>>>>:1> (false,-U[shop-1, 3, 6])
>>>>>>:1> (true,+U[shop-1, 4, 7])
>>>>>>:1> (false,-U[shop-1, 4, 7])
>>>>>>:1> (true,+U[shop-1, 3, 6])
>>>>>>:1> (false,-U[shop-1, 3, 6])
>>>>>>:1> (true,+U[shop-1, 4, 8])

4.原理

Flink回撤流原理

  • 9
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Flink可以通过使用SetState来实现历史全量去重计数。具体实现步骤如下: 1.定义一个MapState作为状态,用于存储历史数据的去重结果。 ``` MapState<String, Long> countState = getRuntimeContext().getMapState(new MapStateDescriptor<>("countState", String.class, Long.class)); ``` 2.在KeyedProcessFunction的processElement方法中,判断当前数据是否已经存在于状态中,如果不存在则将其加入状态,并将计数器加1。 ``` @Override public void processElement(T value, Context ctx, Collector<Long> out) throws Exception { //获取当前事件的key和value String key = ctx.getCurrentKey(); Long currentValue = value.get(); //如果当前事件不存在于状态中,就将其加入状态并将计数器加1 if (!countState.contains(currentValue.toString())) { countState.put(currentValue.toString(), 1L); out.collect(countState.values().iterator().next()); } } ``` 3.在Job中设置状态后端,并启动Job。 ``` StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStateBackend(new RocksDBStateBackend("hdfs://localhost:9000/flink/checkpoints")); DataStream<Tuple2<String, Long>> input = env.fromElements( Tuple2.of("key", 1L), Tuple2.of("key", 1L), Tuple2.of("key", 2L), Tuple2.of("key", 3L), Tuple2.of("key", 2L), Tuple2.of("key", 4L), Tuple2.of("key", 5L), Tuple2.of("key", 3L) ); input.keyBy(0) .process(new CountDistinct()) .print(); env.execute(); ``` 这样就可以实现历史全量去重计数了。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值