监控数据源发送消息预警

该博客展示了如何使用Apache Flink构建一个实时监控数据源发送间隔并进行预警的系统。通过创建一个无限流模拟数据源,并结合实际数据流,利用CoProcessFunction处理两个流的数据,实现对数据源延迟的检测和打印。当数据源长时间未接收到消息时,系统将输出延迟信息。
摘要由CSDN通过智能技术生成

监控数据源发送消息间隔、预警

public class MoniterSourceDemo {

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
        environment.setParallelism(1);
        environment.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);

        /**
         *
         * 数据
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,1000,ab1
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,1200,ab2
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,1500,ab3
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,2000,ab4
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,3000,ab4
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,4000,ab4
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,5000,ab5
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,5600,ab1
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,10000,ab4
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,20000,ab4
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,30000,ab4
         *
         *
         * 10.14.125.2,10.14.125.1,2021-04-15 01:00:33,900,ab1
         * 10.14.125.2,10.14.125.1,2021-04-15 01:00:33,1200,ab2
         * 10.14.125.2,10.14.125.1,2021-04-15 01:00:33,2000,ab2
         * 10.14.125.2,10.14.125.1,2021-04-15 01:00:33,3000,ab4
         * 10.14.125.2,10.14.125.1,2021-04-15 01:00:33,6000,ab1
         * 10.14.125.2,10.14.125.1,2021-04-15 01:00:33,2000,ab3
         * 10.14.125.1,10.14.125.2,2021-04-15 01:00:33,500,ab4
         * 10.14.125.2,10.14.125.1,2021-04-15 01:00:33,5000,ab5
         */

        // 无限流
        KeyedStream<Tuple2<String, Long>, String> monitorStream = environment.addSource(new SourceFunction<Tuple2<String, Long>>() {
            private Boolean flag = true;

            @Override
            public void run(SourceContext<Tuple2<String, Long>> ctx) throws Exception {
                while (flag) {
                    Thread.sleep(2000L);
                    ctx.collect(new Tuple2<>("key", System.currentTimeMillis()));
                }
            }

            @Override
            public void cancel() {
                flag = false;
            }
        }).keyBy(new KeySelector<Tuple2<String, Long>, String>() {
            @Override
            public String getKey(Tuple2<String, Long> value) throws Exception {
                return value._1;
            }
        });

        // 数据源
        DataStreamSource<String> upStreamSource = environment.socketTextStream("hadoop01", 4401);
        SingleOutputStreamOperator<Tuple3<String, Event, Long>> upStream = upStreamSource.map(new MapFunction<String, Tuple3<String, Event, Long>>() {
            @Override
            public Tuple3<String, Event, Long> map(String value) throws Exception {
                String[] split = value.split(",");
                Event upEvent = Event.builder().source(split[0]).destination(split[1]).eventTime(split[2]).timestamp(split[3]).md5(split[4]).build();
                return new Tuple3<String, Event, Long>("key", upEvent, System.currentTimeMillis());
            }
        });

        KeyedStream<Tuple3<String, Event, Long>, String> keyedUpStream = upStream.keyBy(new KeySelector<Tuple3<String, Event, Long>, String>() {
            @Override
            public String getKey(Tuple3<String, Event, Long> value) throws Exception {
                return value.f0;
            }
        });

        SingleOutputStreamOperator<String> monitorResult = monitorStream.connect(keyedUpStream).process(new CoProcessFunction<Tuple2<String, Long>, Tuple3<String, Event, Long>, String>() {

            ListState<Tuple2<String, Long>> monitorState;
            ValueState<Tuple3<String, Event, Long>> upStreamState;

            @Override
            public void open(Configuration parameters) throws Exception {
                monitorState = getRuntimeContext().getListState(new ListStateDescriptor<Tuple2<String, Long>>("monitorState", TypeInformation.of(new TypeHint<Tuple2<String, Long>>(){})));
                upStreamState = getRuntimeContext().getState(new ValueStateDescriptor<Tuple3<String, Event, Long>>("upStreamState", TypeInformation.of(new TypeHint<Tuple3<String, Event, Long>>(){})));
            }

            @Override
            public void processElement1(Tuple2<String, Long> value, Context ctx, Collector<String> out) throws Exception {
                if (upStreamState.value() != null) {
                    monitorState.clear();
                    upStreamState.clear();
                } else {
                    monitorState.add(value);
                    // 计算时间
                    Iterator<Tuple2<String, Long>> iterator = monitorState.get().iterator();
                    Long minLong = Long.MAX_VALUE;
                    while (iterator.hasNext()){
                        Tuple2<String, Long> next = iterator.next();
                        if(next._2 < minLong){
                            minLong = next._2;
                        }
                    }
                    // 打印多长时间未接收到消息源的数据
                    out.collect("时长:" + (value._2 - minLong) + "未接收到消息源的消息!");
                }
            }

            @Override
            public void processElement2(Tuple3<String, Event, Long> value, Context ctx, Collector<String> out) throws Exception {
                upStreamState.update(value);
            }


        });

        monitorResult.print("result");

        environment.execute("监控数据源");

    }

}

元组类指定类型方式

getRuntimeContext().getListState(new ListStateDescriptor<Tuple2<String, Long>>("monitorState", TypeInformation.of(new TypeHint<Tuple2<String, Long>>(){})));

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值