Flink的keyedState

State状态

StateBackEnd

CheckPointing

重启策略

CheckPointingMode

重启策略详解

固定次数的重启策略

 

package cn._51doit.flink.day06;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * Flink程序的重启策略
 *
 * 1.固定重启次数,每一次延迟指定的时间
 * 2.重启的次数不会超过指定的次数,如果超过,程序退出。
 */
public class RestartStrategyDemo1 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //设置重启策略,严格的说不是JobManager或者TaskManager重启,而是TaskManager中的subTask重启
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.seconds(5)));//每次异常5秒后重启,最多对3次异常进行处理,第4次异常会退出
      //虽然设置了重启策略,但是异常之前的数据(中间结果)由于没有进行checkpointing,异常重启后会丢失异常之前的数据

        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);

        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = lines.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                if (line.startsWith("error")) {
                    throw new RuntimeException("有错误数据出现,抛出异常!");
                }
                String[] words = line.split(" ");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1));
                }
            }
        });

        wordAndOne.keyBy(t -> t.f0).sum(1).print();

        env.execute();
      
      //每次重启,中间结果会丢失

    }
}

开启checkpointing默认无限重启

package cn._51doit.flink.day06;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * Flink程序的重启策略
 *
 * 如果程序开启的Checkpointing,默认的重启策略是无限重启!
 */
public class RestartStrategyDemo2 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //开启checkpoint,5秒钟进行一次checkpoint,即5秒钟会将状态保存到statebackend中(默认是保存到jobManager的内存中)
        env.enableCheckpointing(5000);

        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);

        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = lines.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                if (line.startsWith("error")) {
                    throw new RuntimeException("有错误数据出现,抛出异常!");
                }
                String[] words = line.split(" ");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1));
                }
            }
        });

        wordAndOne.keyBy(t -> t.f0).sum(1).print();

        env.execute();
		
      //每次重启,中间结果不会丢失

    }
}

错误率重启策略

指定时间段内可以重启的次数,每个时间段内重启次数限定,到了下一个时间段,重启次数更新。

package cn._51doit.flink.day06;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * Flink程序的重启策略
 *
 * 错误率重启策略(在一段时间内可也重启指定的次数,如果超过时间范围,重新计数)
 */
public class RestartStrategyDemo3 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //30秒内最多可也重启3次,每次重启延迟2秒
        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.seconds(30), Time.seconds(2)));

        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);

        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = lines.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                if (line.startsWith("error")) {
                    throw new RuntimeException("有错误数据出现,抛出异常!");
                }
                String[] words = line.split(" ");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1));
                }
            }
        });

        wordAndOne.keyBy(t -> t.f0).sum(1).print();

        env.execute();


    }
}

状态详解

Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)

KeyedState:ValueState  Map<Key, Value>

不需要关心Key,只需要关心Value

自定义api实现flink的状态功能

对于valueState,flink自身的状态功能是在调了keyBy算子后的keyedStream再调sum或reduce算子实现的

这里不调sum或者value算子,而是自定义api实现功能

package cn._51doit.flink.day06;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * 将ValueState的底层实现
 *
 * Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
 *
 * ValueState是KeyedState中的一种
 *
 */
public class ValueStateDemo1 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 5000));

        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);

        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = lines.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                if (line.startsWith("error")) {
                    throw new RuntimeException("有错误数据出现,抛出异常!");
                }
                String[] words = line.split(" ");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1));
                }
            }
        });

        KeyedStream<Tuple2<String, Integer>, String> keyedStream = wordAndOne.keyBy(t -> t.f0);

        SingleOutputStreamOperator<Tuple2<String, Integer>> res = keyedStream.map(new MapFunction<Tuple2<String, Integer>, Tuple2<String, Integer>>() {

            private Integer counter = 0;

            @Override
            public Tuple2<String, Integer> map(Tuple2<String, Integer> tp) throws Exception {
                Integer current = tp.f1;
                counter += current;
                tp.f1 = counter;
                return tp;
            }
        });

        res.print();

        env.execute();
      
      //in:
      //spark
      //spark
      //flink
      //flink
      //flink
      //flink
      //hive
      
      //out:
      //1>(spark,1)
      //1>(spark,2)
      //4>(flink,1)
      //4>(flink,2)
      //4>(flink,3)
      //4>(flink,4)
      //1>(hive,3)

	//对于同一个分区不同组的数据,这个自定义api没有办法分辨
    //重启后垃圾处理器回收counter变量,数据清零
    //为了区分同分区内的不同组,尝试用map结构来代替Integer

    }
}

为了区分同分区内的不同组,尝试用map结构来代替Integer

package cn._51doit.flink.day06;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.util.HashMap;
import java.util.Map;

/**
 * 将ValueState的底层实现
 * <p>
 * Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
 * <p>
 * ValueState是KeyedState中的一种
 *
 * 1.KeyedState底层是一个Map结构
 * 2.如果想要容错,必须开启checkpointing,并且按照Flink的状态编程API进行编程(将中间结果保存都Flink特殊的变量中)
 *
 */
public class ValueStateDemo2 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //开启checkpoint
        env.enableCheckpointing(5000);
        //设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 5000));

        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);

        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = lines.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                if (line.startsWith("error")) {
                    throw new RuntimeException("有错误数据出现,抛出异常!");
                }
                String[] words = line.split(" ");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1));
                }
            }
        });

        KeyedStream<Tuple2<String, Integer>, String> keyedStream = wordAndOne.keyBy(t -> t.f0);

        SingleOutputStreamOperator<Tuple2<String, Integer>> res = keyedStream.map(new MapFunction<Tuple2<String, Integer>, Tuple2<String, Integer>>() {

            private Map<String, Integer> counter = new HashMap<>();

            @Override
            public Tuple2<String, Integer> map(Tuple2<String, Integer> tp) throws Exception {
                String word = tp.f0;
                Integer current = tp.f1;
                Integer historyCount = counter.get(word);
                if (historyCount == null) {
                    historyCount = 0;
                }
                int sum = historyCount + current;
                //更新数据
                counter.put(word, sum);

                //输出数据
                tp.f1 = sum;
                return tp;
            }
        });

        res.print();

        env.execute();


    }
}

可以正常区分同分区不同组的数据了,但是不论是否开启checkpointing重启后数据都会丢失

普通的map数据结构,flink不认识,没有办法存数据。还是需要使用flink的api

使用flink的api实现状态

package cn._51doit.flink.day06;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.util.HashMap;
import java.util.Map;

/**
 * 将ValueState的底层实现
 *
 * Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
 *
 * ValueState是KeyedState中的一种
 *
 * 1.KeyedState底层是一个Map结构
 * 2.如果想要容错,必须开启checkpointing,并且按照Flink的状态编程API进行编程(将中间结果保存都Flink特殊的变量中)
 *
 * 使用Flink的ValueState编程API实现WordCount的功能
 *
 */
public class ValueStateDemo3 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //开启checkpoint
        env.enableCheckpointing(5000);
        //设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 5000));

        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);

        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = lines.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                if (line.startsWith("error")) {
                    throw new RuntimeException("有错误数据出现,抛出异常!");
                }
                String[] words = line.split(" ");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1));
                }
            }
        });

        KeyedStream<Tuple2<String, Integer>, String> keyedStream = wordAndOne.keyBy(t -> t.f0);

        SingleOutputStreamOperator<Tuple2<String, Integer>> res = keyedStream.map(new RichMapFunction<Tuple2<String, Integer>, Tuple2<String, Integer>>() {

            private ValueState<Integer> valueState;//valueState不用管key,与key相关的操作都在内部实现了

          
          //前边代码块使用一个map来存数据,这里却只有一个integer,感觉上没有和key绑定在一起,但是实际上由于使用了keyedState,存取数据都会和对应的key绑定
          
            //在open方法中初始化状态或恢复状态
            @Override
            public void open(Configuration parameters) throws Exception {
                //定义状态描述器(描述状态的类型、名称)
                ValueStateDescriptor<Integer> stateDescriptor = new ValueStateDescriptor<>("wc-state", Integer.class);//如果这里包含泛型,那就需要使用typeInformation.of(new type)
                //初始化或恢复状态(在状态存储的地方读状态)
                valueState = getRuntimeContext().getState(stateDescriptor);
            }

            @Override
            public Tuple2<String, Integer> map(Tuple2<String, Integer> input) throws Exception {
                //String word = input.f0; //这个key没有用处,update内部能直接获取key
                Integer current = input.f1;
        //看似没有根据key来取,实际上内部会获取当前的key,根据当前的key取出对应的value
                Integer history = valueState.value();
                if (history == null) {
                    history = 0;
                }
                current += history;
                //更新状态
                valueState.update(current);
                //输出数据
                input.f1 = current;
                return input;
            }
        });

        res.print();

        env.execute();


    }
}

即使使用了flink的状态编程API,不设置checkpointing也不能保存状态,重启后数据清零。所以一定要开启checkpointing,来存储中间状态。

KeyedState:MapState Map<Key, Map<k, v>>

不需要关心Key,只需要关心Map<k, v>

package cn._51doit.flink.day06;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * 将ValueState的底层实现
 *
 * Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
 *
 * ValueState是KeyedState中的一种
 *
 * 1.KeyedState底层是一个Map结构
 * 2.如果想要容错,必须开启checkpointing,并且按照Flink的状态编程API进行编程(将中间结果保存都Flink特殊的变量中)
 *
 * ValueState : Map<Key, Value>
 * MapState   : Map<Key, Map<k, v>>
 * ListState  : Map<Key, List<v>>
 *
 */
public class MapStateDemo {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //开启checkpoint
        env.enableCheckpointing(5000);

        //辽宁省,沈阳市,3000
        //辽宁省,大连市,4000
        //辽宁省,鞍山市,4000
        //河北省,廊坊市,2000
        //河北省,邢台市,3000
        //河北省,石家庄市,2000
        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888, "\n", 5);

        //对数据进行整理
        SingleOutputStreamOperator<Tuple3<String, String, Integer>> tpStream = lines.map(new MapFunction<String, Tuple3<String, String, Integer>>() {
            @Override
            public Tuple3<String, String, Integer> map(String line) throws Exception {
                String[] fields = line.split(",");
                String province = fields[0];
                String city = fields[1];
                int money = Integer.parseInt(fields[2]);
                return Tuple3.of(province, city, money);
            }
        });

        //按照省份进行keyBy,将同一个省份的数据分到同一个分区中,并且按照城市累加金额
      //按照(省份,城市)keyBy的话,(省份,城市)可能导致同省份进入到不同的分区里
        KeyedStream<Tuple3<String, String, Integer>, String> keyedStream = tpStream.keyBy(t -> t.f0);

        SingleOutputStreamOperator<Tuple3<String, String, Integer>> res = keyedStream.map(new CityMoneyFunction());

        res.print();

        env.execute();

    }


    private static class CityMoneyFunction extends RichMapFunction<Tuple3<String, String, Integer>, Tuple3<String, String, Integer>> {

        private MapState<String, Integer> mapState;

        @Override
        public void open(Configuration parameters) throws Exception {
            //定义MapStateDescriptor
            MapStateDescriptor<String, Integer> stateDescriptor = new MapStateDescriptor<>("city-money-state", String.class, Integer.class);
            //初始化或恢复状态
            mapState = getRuntimeContext().getMapState(stateDescriptor);
        }

        @Override
        public Tuple3<String, String, Integer> map(Tuple3<String, String, Integer> input) throws Exception {
            String city = input.f1;
            Integer money = input.f2;
            Integer history = mapState.get(city);//根据小key取小value
            if (history == null) {
                history = 0;
            }
            money += history;
            //更新状态
            mapState.put(city, money);
            //输出数据
            input.f2 = money;
            return input;
        }
    }
}

KeyedState:ListState Map<Key, List<v>>

不需要关心Key,只需要关心List<v>

package cn._51doit.flink.day06;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.util.ArrayList;
import java.util.List;

/**
 * 将ValueState的底层实现
 *
 * Flink的State分为两种:KeyedState(KeyBy之后对应的State),和OperatorState(没有keyBy的State)
 *
 * ValueState是KeyedState中的一种
 *
 * 1.KeyedState底层是一个Map结构
 * 2.如果想要容错,必须开启checkpointing,并且按照Flink的状态编程API进行编程(将中间结果保存都Flink特殊的变量中)
 *
 * ValueState : Map<Key, Value>
 * MapState   : Map<Key, Map<k, v>>
 * ListState  : Map<Key, List<v>>
 *
 */
public class ListStateDemo {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //开启checkpoint
        env.enableCheckpointing(5000);

        //将同一个用户,最近的10个行为保存起来
        //u001,view
        //u001,pay
        //u002,view
        //u002,view
        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888, "\n", 5);

        SingleOutputStreamOperator<Tuple2<String, String>> tpStream = lines.map(new MapFunction<String, Tuple2<String, String>>() {
            @Override
            public Tuple2<String, String> map(String value) throws Exception {
                if (value.startsWith("error")) {
                    throw new RuntimeException("数据出问题了!");
                }
                String[] fields = value.split(",");
                String uid = fields[0];
                String event = fields[1];
                return Tuple2.of(uid, event);
            }
        });

        KeyedStream<Tuple2<String, String>, String> keyedStream = tpStream.keyBy(t -> t.f0);

        //将用一个用户的行为数据按照先后顺序保存起来
        SingleOutputStreamOperator<Tuple2<String, List<String>>> res = keyedStream.map(new UserEventFunction());

        res.print();

        env.execute();


    }

    private static class UserEventFunction extends RichMapFunction<Tuple2<String, String>, Tuple2<String, List<String>>> {

        private ListState<String> listState;

        @Override
        public void open(Configuration parameters) throws Exception {
            //定义状态描述器
            ListStateDescriptor<String> stateDescriptor = new ListStateDescriptor<>("event-state", String.class);
            //初始化或恢复状态
            listState = getRuntimeContext().getListState(stateDescriptor);
        }

        @Override
        public Tuple2<String, List<String>> map(Tuple2<String, String> input) throws Exception {
            String event = input.f1;
            listState.add(event);
            ArrayList<String> events = (ArrayList<String>) listState.get();
            if (events.size() > 10) {
                events.remove(0);
            }
            return Tuple2.of(input.f0, events);
          
          // events是内部的引用,不需要更新
        }
    }
}

KeyedState练习

输入如下数据:(字段含义,用户ID,活动ID),活动可以重复览次

user01,activity01,view
user01,activity01,join
user01,activity02,view
user02,activity02,view
user02,activity02,view
user03,activity02,view
user02,activity02,join
user03,activity01,view


实时统计出各个活动,各种事件的次数和人数(次数出现就累计,人数要按照用户ID去重)
activity01,view,2,2
activity01,join,1,1
activity02,view,4,3
activity02,join,1,1
package cn._51doit.flink.day06;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.api.java.tuple.Tuple4;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;

import java.util.HashSet;

/**
 * 使用Flink的状态,统计各个活动,各种事件的次数和人数
 *
 */
public class ActivityCount {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.enableCheckpointing(10000);

        DataStreamSource<String> lines = env.socketTextStream("localhost", 8888);

        SingleOutputStreamOperator<Tuple3<String, String, String>> tpStream = lines.map(new MapFunction<String, Tuple3<String, String, String>>() {
            @Override
            public Tuple3<String, String, String> map(String value) throws Exception {
                String[] fields = value.split(",");
                String uid = fields[0];
                String aid = fields[1];
                String event = fields[2];
                return Tuple3.of(uid, aid, event);
            }
        });

        //按照活动ID和事件联合起来KeyBy
        KeyedStream<Tuple3<String, String, String>, Tuple2<String, String>> keyedStream = tpStream.keyBy(new KeySelector<Tuple3<String, String, String>, Tuple2<String, String>>() {
            @Override
            public Tuple2<String, String> getKey(Tuple3<String, String, String> value) throws Exception {
                return Tuple2.of(value.f1, value.f2);
            }
        });

        SingleOutputStreamOperator<Tuple4<String, String, Integer, Integer>> res = keyedStream.process(new ActivityCountFunction());
      //process的作用和map相似,map是输入一条输出一条,process输入一条可以输出多条。像输出就collect一下

        res.print();

        env.execute();

    }

    private static class ActivityCountFunction extends KeyedProcessFunction<Tuple2<String, String>, Tuple3<String, String, String>, Tuple4<String, String, Integer, Integer>> {
      //泛型是<K,I,O> Key、输入和输出。Key是活动id和事件的联合,In是tp3,Out是时间联合+事件数+人数

        private ValueState<Integer> countState;
        private ValueState<HashSet<String>> uidState;
        @Override
        public void open(Configuration parameters) throws Exception {
            //初始化状态
            //记录次数的状态
            ValueStateDescriptor<Integer> countStateDescriptor = new ValueStateDescriptor<>("count-state", Integer.class);
            countState = getRuntimeContext().getState(countStateDescriptor);
            //记录uid的状态(人数) 用HashSet去重,不可重复性,后边的用户ID会覆盖前边的用户id
            ValueStateDescriptor<HashSet<String>> uidStateDescriptor = new ValueStateDescriptor<>("uid-state", TypeInformation.of(new TypeHint<HashSet<String>>() {}));
            uidState = getRuntimeContext().getState(uidStateDescriptor);
        }

        @Override
        public void processElement(Tuple3<String, String, String> input, Context ctx, Collector<Tuple4<String, String, Integer, Integer>> out) throws Exception {
            String uid = input.f0;
            //统计次数
            Integer history = countState.value();
            if (history == null) {
                history = 0;
            }
            int count = history + 1;
            //更新状态
            countState.update(count);

            //统计人数
            HashSet<String> set = uidState.value();
            if (set == null) {
                set = new HashSet<>();
            }
            set.add(uid);
            uidState.update(set);

            //输出数据
            out.collect(Tuple4.of(ctx.getCurrentKey().f0, ctx.getCurrentKey().f1, count, set.size()));

        }
    }
}

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值