flink_初识02kafkawordcount

1.启动zookeeper服务

./bin/zookeeper-server-start.sh config/zookeeper.properties

2.开启kafka服务

.\bin\windows\kafka-server-start.bat .\config\server.properties
./bin/kafka-server-start.sh config/server.properties

3.创建topic

.\bin\windows\kafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test_flink
./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test_flink

4.创建生产者

.\bin\windows\kafka-console-producer.bat --broker-list localhost:9092 --topic test_flink
./bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test_flink

--5.创建消费者
--.\bin\windows\kafka-console-consumer.bat --zookeeper localhost:2181 --topic test_flink
--./bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test_flink --from-beginning

5.

package flink

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.core.fs.FileSystem.WriteMode
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
import org.apache.flink.streaming.api.scala._

object KafkaWordCount {


  def getFlinkKafkaConsumer() = {
    val prop = new Properties()

    prop.setProperty("zookeeper.connect", "localhost:2181") //ZOOKEEPER_HOST
    prop.setProperty("bootstrap.servers", "localhost:9092") //KAFKA_BROKER
    prop.setProperty("group.id", "group1") //TRANSACTION_GROUP
    new FlinkKafkaConsumer011[String]("test_flink", new SimpleStringSchema(), prop) //TOPIC
  }


  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment

    val source = getFlinkKafkaConsumer()

    source.setStartFromEarliest()

    val dStream = env.addSource(source)

    val result = dStream.flatMap(x => x.split("\\s"))
      .map(x => (x, 1)).keyBy(0).timeWindow(Time.seconds(2l)).sum(1)

    result.setParallelism(1).print()

    result.writeAsText("E:\\\\sparkproject\\\\src\\\\test\\\\data\\\\result2.txt", WriteMode.OVERWRITE)

    env.execute("KafkaWordCount")
  }

}

 

转载于:https://www.cnblogs.com/yin-fei/p/11165559.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值