pom文件
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>2.0.2</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-kafka-0.8_2.11</artifactId>
<version>1.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-filesystem_2.11</artifactId>
<version>1.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-scala_2.11</artifactId>
<version>1.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-clients_2.11</artifactId>
<version>1.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-streaming-scala_2.11</artifactId>
<version>1.6.0</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table_2.11</artifactId>
<version>1.7.0</version>
</dependency>
</dependencies>
代码
import java.util.Properties
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.api.scala._
import org.apache.flink.streaming.connectors.fs.StringWriter
import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer08
object KafkaTest {
def main(args: Array[String]): Unit = {
val environment: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
val properties: Properties = new Properties()
properties.setProperty("bootstrap.servers", "192.168.96.182:9092,192.168.96.183:9092,192.168.96.184:9092")
properties.setProperty("zookeeper.connect", "192.168.96.182:2181,192.168.96.183:2181,192.168.96.184:2181")
properties.setProperty("group.id", "test")
val unit: DataStream[String] = environment.addSource(new FlinkKafkaConsumer08[String]("test", new SimpleStringSchema(), properties))
val counts: DataStream[(String, Int)] = unit.flatMap(x => x.split(" ")).map(x => (x, 1)).keyBy(0).sum(1)
counts.print()
val date: BucketingSink[(String,Int)] = new BucketingSink("hdfs://192.168.96.181:9000/date/")
date.setWriter(new StringWriter())
// this is 400 MB,
date.setBatchSize(1024 * 1024 * 400L)
// this is 60 mins
date.setBatchRolloverInterval(60 * 60 * 1024L)
counts.addSink(date)
environment.execute("KafkaTest")
}
}