spark 写入dataframe到hbase
import org.apache.spark.sql.SparkSession
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase._
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.sql._
import org.apache.spark.sql.Column
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._
object Hdfs_To_Hbase{
def main(args:Array[String]):Unit={
val spark = SparkSession.builder()
.appName("Spark Write data to Hbase")
.config("spark.some.config.option", "some-value")
.config("spark.debug.maxToStringFields", "100")
.config("spark.eventLog.enabled", "true")
.getOrCreate()
import spark.implicits._
val o_Sense = spark.read.format("csv").
option("sep", ",").
option("inferSchema", "true").
option("header", "true").
load("hdfs://nameservice1/test/test.csv")
val o_Sense2 = o_Sense.na.fill(0, cols=o_Sense.columns.drop(3))
val o_Sense3 = o_Sense2.withColumn("telephone", $"telephone"+1010101010)
o_Sense3.show()
val hbaseConf =HBaseConfiguration.create()
hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, "o_Sense")
hbaseConf.set("mapreduce.output.fileoutputformat.outputdir", "/tmp")
val job = Job.getInstance(hbaseConf)
job.setOutputKeyClass(classOf[ImmutableBytesWritable])
job.setOutputValueClass(classOf[Put])
job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
val rdd1 = o_Sense3.rdd.map(p=>{
val put=new Put(Bytes.toBytes(p(1).toString+"_"+p(0).toString))
put.addColumn(Bytes.toBytes("Sense"),Bytes.toBytes("live_cells"),Bytes.toBytes(p(2).toString))
put.addColumn(Bytes.toBytes("Sense"),Bytes.toBytes("value1"),Bytes.toBytes(p(3).toString))
put.addColumn(Bytes.toBytes("Sense"),Bytes.toBytes("value2"),Bytes.toBytes(p(4).toString))
(new ImmutableBytesWritable(), put)}
)
rdd1.saveAsNewAPIHadoopDataset(job.getConfiguration)
}
}