kafka 集成 spark 将数据写到hbase中
导入以下依赖
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>2.4.3</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>2.4.3</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
<version>2.4.3</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<version>2.1.3</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>2.1.3</version>
</dependency>
SparkStreaming 代码
package demo2
import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
import org.apache.hadoop.hbase.client.{ConnectionFactory, Put}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.{KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.kafka010.ConsumerStrategies._
object test2 {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("test").setMaster("local[3]")//这里用的是本地测试
val ssc = new StreamingContext(conf,Seconds(1))
val kafkaParams = Map[String, Object](
"bootstrap.servers" -> "CentOSA:9092,CentOSB:9092,CentOSC:9092",//自己的卡夫卡集群
"key.deserializer" -> classOf[StringDeserializer],//设置序列化
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> "group",//分组信息
"auto.offset.reset" -> "latest",//从最后读取偏移量
"enable.auto.commit" -> (false:java.lang.Boolean)//关闭自动提交偏移量
)
KafkaUtils.createDirectStream[String,String](ssc,//用kafkaUtils连接方式
LocationStrategies.PreferConsistent,
Subscribe[String,String](Array("topic01"),kafkaParams))
.map(line=>line.value())
.flatMap(_.split("\\s+"))
.map((_,1))
.reduceByKey(_+_)
.foreachRDD(rdd=>{
rdd.foreachPartition(partitionOfRecords =>{//每个分区连接一次 ,减少连接,提高效率
val tableName = "lbz:spark"// 库名: 表名
val conf = HBaseConfiguration.create() //创建hbase连接
conf.set("hbase.zookeeper.quorum", "CentOSA")//zookeeper 的 节点
conf.set("hbase.zookeeper.property.clientPort", "2181") //端口号
val conn = ConnectionFactory.createConnection(conf)
val StatTable = conn.getTable(TableName.valueOf(tableName))
partitionOfRecords.foreach(pair => {
val put = new Put(Bytes.toBytes("rowKey")) //这里读取的是rowkey
put.addColumn("value".getBytes, "name".getBytes, Bytes.toBytes(pair._1))//("列簇",“列” ,“列的值”)
StatTable.put(put)
})
})
})
ssc.sparkContext.setLogLevel("FATAL")
ssc.start()
ssc.awaitTermination()
}
}
这样从kafka 读数据 经过 sparkstreaming 最后写到 hbase中