写文章不容易,喜欢的请赞一赞。如有疑问,请加微信wx15151889890,谢谢。
flink相比于spark,还是比较好用的。话不多说上代码
//定义流处理环境
final StreamExecutionEnvironment env = StreamExecutionEnvironment
.getExecutionEnvironment();
// 非常关键,一定要设置启动检查点!!
env.enableCheckpointing(5000);
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
//配置kafka信息
Properties props = new Properties();
props.setProperty("bootstrap.servers",
"hadoop01:9092,hadoop02:9092");
props.setProperty("zookeeper.connect",
"hadoop01:2181,hadoop02:2181");
props.setProperty("group.id", "kafka_to_hdfs");
//读取数据
FlinkKafkaConsumer010<String> consumer = new FlinkKafkaConsumer010<>(
"test1", new SimpleStringSchema(), props);
//设置只读取最新数据
consumer.setStartFromLatest();
//添加kafka为数据源
DataStream<String> stream = env.addSource(consumer);
//定义输出sink
BucketingSink<String> hdfs_sink = new BucketingSink<String>(
"hdfs:///data/flink/fromKafka");
hdfs_sink.setBatchSize(1024 * 1024 * 400);
hdfs_sink.setBucketer(new DateTimeBucketer<String>("yyyy-MM-dd"));
hdfs_sink.setBatchRolloverInterval(3600000);
//存到hdfs
stream.addSink(hdfs_sink);
env.execute("Kafka-to-HDFS");
然后提交到flink控制台运行下就ok啦