/**
* Created by shuiyu lei
* date 2019/6/21
*/
public class TestRock {
public static void main(String[] args) {
StreamExecutionEnvironment en = StreamExecutionEnvironment.getExecutionEnvironment();
en.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime);
en.enableCheckpointing(5000);
RocksDBStateBackend rock = null;
try {
// rock作state 路径为HDFS 增量作state
rock = new RocksDBStateBackend("hdfs://centos-6:8020/flink/ch/", true);
} catch (IOException e) {
e.printStackTrace();
}
en.setStateBackend((StateBackend) rock);
// 自己封装的
KafkaUtil util = new KafkaUtil();
FlinkKafkaConsumer011 consumer = util.getConsumer("dsf", "te");
en.addSource(consumer).flatMap(new Tokenizer())
.keyBy(0)
.window(TumblingEventTimeWindows.of(Time.seconds(10)))
.sum(1)
.print();
try {
en.execute("print dwf log");
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Implements the string tokenizer that splits sentences into words as a user-defined
* FlatMapFunction. The function takes a line (String) and splits it into
* multiple pairs in the form of "(word,1)" ({@code Tuple2<String, Integer>}).
*/
public static final class Tokenizer implements FlatMapFunction<String, Tuple2<String, Integer>> {
@Override
public void flatMap(String value, Collector<Tuple2<String, Integer>> out) {
// normalize and split the line
String[] tokens = value.toLowerCase().split("\\W+");
// emit the pairs
for (String token : tokens) {
if (token.length() > 0) {
out.collect(new Tuple2<>(token, 1));
}
}
}
}
}