spark输出到MySQL、Redis、kafka

MySQL

object streaming_mysql {
  def main(args: Array[String]): Unit = {

    //初始化spark配置信息
    val conf = new SparkConf().setMaster("local[*]").setAppName(this.getClass.getName)
    //初始化SparkStreamingContext
    val ssc = new StreamingContext(conf, Seconds(5))

    val map: Map[String, String] = Map[String, String](
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
      ConsumerConfig.GROUP_ID_CONFIG -> "g1",
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop11:9092,hadoop12:9092,hadoop13:9092"
    )

    val dStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc,
      LocationStrategies.PreferConsistent, ConsumerStrategies.Subscribe[String, String](List("topica"), map))

    dStream.window(Seconds(15),Seconds(10))

    val res: DStream[(String, Int)] = dStream.map(record=>record.value).flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _)
    res.foreachRDD((rdd,time)=>{
      rdd.foreachPartition(v=>{
        val conn: Connection = DriverManager.getConnection("jdbc:mysql://hadoop13:3306/test1?characterEncoding=UTF-8", "root", "119131Zrf0...")
        v.foreach(v => {
          val ps: PreparedStatement = conn.prepareStatement("insert into t_words valuses (null,?,?,?)")
          ps.setString(1,v._1)
          ps.setInt(2,v._2)
          ps.setTimestamp(3,new Timestamp(time.milliseconds))
          ps.executeUpdate()
          ps.close()
        })
      })
    })
    ssc.start()
    ssc.awaitTermination()
  }

}

Redis

object streaming_Redis {
  def main(args: Array[String]): Unit = {
    val conf =new SparkConf().setMaster("local[*]").setAppName(this.getClass.getName)
    val scc = new StreamingContext(conf,Seconds(10))
    scc.sparkContext.setLogLevel("WARN")


    val dStream: ReceiverInputDStream[String] = scc.socketTextStream("hadoop10", 9999)
    dStream
      .flatMap(_.split(" "))
      .map((_,1))
      .reduceByKey(_+_)
      .foreachRDD((rdd,time)=>{
        rdd.foreachPartition(iter =>{
          val jedis = new Jedis("hadoop12", 6379)
          jedis.auth("123")

          iter.foreach(v => {
            if(jedis.exists(v._1)){
              val c1: String = jedis.get(v._1)
              jedis.set(v._1,(c1.toInt+v._2).toString)
            }else{
              jedis.set(v._1,v._2.toString)
            }
          })
          jedis.close()
        })

      })
    scc.start()
    scc.awaitTermination()
  }

}

kafka

object streamingWC_Kafka {
  def main(args: Array[String]): Unit = {
    val conf =new SparkConf().setMaster("local[*]").setAppName(this.getClass.getName)
    val scc = new StreamingContext(conf,Seconds(10))
    scc.sparkContext.setLogLevel("WARN")

    val map: Map[String, String] = Map[String, String](
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
      ConsumerConfig.GROUP_ID_CONFIG -> "g1",
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop11:9092,hadoop12:9092,hadoop13:9092"
    )

    val dstream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(scc,
      LocationStrategies.PreferConsistent, ConsumerStrategies.Subscribe[String, String](List("topica"), map))

    dstream.map(record=>record.value)
      .flatMap(line=>line.split(" "))
      .map(v=>(v,1))
      .reduceByKey(_+_)
      .print()

      scc.start()
    scc.awaitTermination()
  }

}
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值