本案例使用SparkStreaming接受处理kafka消费数据,并将处理好的数据写进kafka指定主题
-
kafkaParams,存放消费者的配置信息
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.spark.SparkConf import org.apache.spark.streaming.dstream.InputDStream import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies} import org.apache.spark.streaming.{Seconds, StreamingContext} object UserFriendRawToUserFriend { def main(args: Array[String]): Unit = { val conf: SparkConf = new SparkConf().setAppName("sparkStream").setMaster("local[*]") val sc = new StreamingContext(conf, Seconds(3)) //kafkaParams,存放消费者的配置信息 val kafkaParams = Map(
-
集群信息
(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "192.168.89.140:9092"),
-
key,Value的反序列化器
(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer"), (ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer"),
</