object SourceDemo {
def main(args: Array[String]): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
//数据来源
//1.从文件读取
val inpath="D:\\programs\\sparkPrograms\\FlinkProgarm\\src\\main\\resources\\hello.txt"
val stream1 = env.readTextFile(inpath)
//2.从socket流中读取
val stream2 =env.socketTextStream("hadoop01",7777)
//3.从kafka中读取
val properties=new Properties()
properties.setProperty("bootstrap.servers","hadoop01:9092")
properties.setProperty("group.id","cosumer-group")
properties.setProperty("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer")
properties.setProperty("value.deserializer",
"org.apache.kafka.common.serialization.StringDeserializer")
properties.setProperty("auto.offset.reset", "latest")
val stream3 = env.addSource(new FlinkKafkaConsumer011[String]("sensor", new
SimpleStringSchema(), properties))
// stream3.print("stream3")
//4.
flink读取数据源的四种方式
最新推荐文章于 2025-09-18 21:19:38 发布

最低0.47元/天 解锁文章
1048

被折叠的 条评论
为什么被折叠?



