spark streaming 处理kafka数据并写入mysql

spark streaming 从kafka读取数据,将流处理结果写入mysql

//spark streaming 从kafka读取数据,将流处理结果写入mysql
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import java.sql.{PreparedStatement,Connection,DriverManager}
import java.util.concurrent.atomic.AtomicInteger
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.StreamingContext._
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, Minutes}
import org.apache.log4j.Logger
import org.apache.log4j.Level

object KafkaWordCount{
    def main(args:Array[String]){
	  Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
        Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
        val kafkaParams = Map[String, Object](
            "bootstrap.servers" -> "localhost:9092",
            "key.deserializer" -> classOf[StringDeserializer],
            "value.deserializer" -> classOf[StringDeserializer],
            "group.id" -> "use_a_separate_group_id_for_each_stream",
            "auto.offset.reset" -> "latest",
            "enable.auto.commit" -> (false: java.lang.Boolean))
                
		//val numThreads = 3  //每个topic的分区数
        //val topicMap =topics.split(",").map((_,numThreads.toInt)).toMap
	    val sc = new SparkConf().setAppName("ZkWordCount")
        val ssc = new StreamingContext(sc, Seconds(10))
	    ssc.checkpoint("hdfs://nameservice1/test/checkpoint") //设置检查点
		val topics = Array("zklog")
        val stream = KafkaUtils.createDirectStream[String, String](
            ssc,
            PreferConsistent,
            Subscribe[String, String](topics, kafkaParams)
         )
        
		//val line = stream.map(record => (record.key, record.value))
	    val lines = stream.map(record => (record.value))
        val words = lines.flatMap(_.split(","))
        val pair = words.map(x => (x,1))
        val wordCounts = pair.reduceByKeyAndWindow(_ + _,_ - _,Minutes(1),Seconds(10),3) //这行代码的含义在下一节的窗口转换操作中会有介绍
        wordCounts.print()
		//下面是新增的语句,把DStream保存到MySQL数据库中     
        wordCounts.foreachRDD(rdd => {
			def func(records: Iterator[(String,Int)]) {
				var conn: Connection = null
				var stmt: PreparedStatement = null
				try {
					val url = "jdbc:mysql://172.xx.xx.xx:3306/bigdata"
					val user = "admin"
					val password = "admin"  
					conn = DriverManager.getConnection(url, user, password)
					records.foreach(p => {
						val sql = "insert into zklog(information,count) values (?,?)"
						stmt = conn.prepareStatement(sql);
						stmt.setString(1, p._1.trim)
						stmt.setInt(2,p._2.toInt)
						stmt.executeUpdate()
					})
				} catch {
					case e: Exception => e.printStackTrace()
				} finally {
					if (stmt != null) {
						stmt.close()
					}
					if (conn != null) {
					conn.close()
					}
				}
			}

			val repartitionedRDD = rdd.repartition(3)
			repartitionedRDD.foreachPartition(func)
		})
        ssc.start
        ssc.awaitTermination
	}
}
  • 3
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

鱼遇雨愈愉

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值