Spark(2.4.0) streaming 写入数据到hbase

Spark streaming写入数据到hbase

//Spark streaming写入数据到hbase
//改进方案,按照官方文档修改,目前的写入方案是典型的官方文档中第一类错误做法,每个RDD都要重复建立链接;在驱动程序中建立链接,无法序列化后发送到worker 
//目前能正常运行
import org.apache.kafka.common.serialization.{StringDeserializer,IntegerDeserializer}
import java.util.concurrent.atomic.AtomicInteger
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.StreamingContext._
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, Minutes}
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase._
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.client.{Result,Put}
import org.apache.log4j.Logger
import org.apache.log4j.Level

object SparkStreamingToHbase{
    def main(args:Array[String]){
	    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
        Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
	
        val kafkaParams = Map[String, Object](
            "bootstrap.servers" -> "localhost:9092",
            "key.deserializer" -> classOf[StringDeserializer],
            "value.deserializer" -> classOf[StringDeserializer],
            "group.id" -> "use_a_separate_group_id_for_each_stream",
            "auto.offset.reset" -> "latest",
            "enable.auto.commit" -> (false: java.lang.Boolean))
                
		//val numThreads = 3  //每个topic的分区数
        //val topicMap =topics.split(",").map((_,numThreads.toInt)).toMap
	    val sc = new SparkConf().setAppName("ZkWordCount")
        val ssc = new StreamingContext(sc, Seconds(10))
	    ssc.checkpoint("hdfs://nameservice1/test/checkpoint") //设置检查点
		val topics = Array("zklog")
        val stream = KafkaUtils.createDirectStream[String, String](
            ssc,
            PreferConsistent,
            Subscribe[String, String](topics, kafkaParams)
         )
        
		//val line = stream.map(record => (record.key, record.value))
	    val lines = stream.map(record => (record.value))
        val words = lines.flatMap(_.split("org.apache.zookeeper.server.")).flatMap(_.split(","))
					
        val pair = words.map(x => (x,1))
        val wordCounts = pair.reduceByKeyAndWindow(_ + _,_ - _,Minutes(1),Seconds(10),3) //这行代码的含义在下一节的窗口转换操作中会有介绍
        wordCounts.print()	
		wordCounts.foreachRDD(rdd => {
			val hbaseConf =HBaseConfiguration.create()
			hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, "zklog")
			hbaseConf.set("mapreduce.output.fileoutputformat.outputdir", "/tmp")
			val job = Job.getInstance(hbaseConf)
			job.setOutputKeyClass(classOf[ImmutableBytesWritable])
			job.setOutputValueClass(classOf[Put])
			job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
            val save_rdd = rdd.map(x => {
			val put=new Put(Bytes.toBytes(x._1.trim))
			put.addImmutable(Bytes.toBytes("zk"),Bytes.toBytes("count"),Bytes.toBytes(x._2.toString))
		   (new ImmutableBytesWritable, put)
			}).saveAsNewAPIHadoopDataset(job.getConfiguration())
      })
        ssc.start
        ssc.awaitTermination
	}
}


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

鱼遇雨愈愉

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值