//1) 建立表, 表名:test 字段名: cf
//安装hbase 后通过hbase-shell启动hbase命令行方式
//参考:https://www.w3cschool.cn/hbase_doc/hbase_doc-7hpa2llr.html
create ‘test’, ‘cf’
//desc 表信息
list ‘test’
//插值
put ‘test’, ‘row1’, ‘cf:a’, ‘value1’
put ‘test’, ‘row1’, ‘cf:b’, ‘value2’
put ‘test’, ‘row2’, ‘cf:a’, ‘value21’
put ‘test’, ‘row2’, ‘cf:b’, ‘value22’
//select 所有表信息
scan ‘test’
//获取记录信息
get ‘test’, ‘row1’
//禁用表
disable ‘test’
enable ‘test’
//删除表
disable ‘test’
drop ‘test’
// 2) 使用spark-shell调用hbase
// 2.1 必须确保以下文件在目录中存在,没有就需要设法复制。复制到哪个目录无所谓,下面的SPARK_CLASSPATH可以指定
cp hbase* /home/hadoop/app/spark-1.6.1-bin-hadoop2.6/lib
cp guava-14.0.1.jar /home/hadoop/app/spark-1.6.1-bin-hadoop2.6/lib
cp htrace-core-3.1.0-incubating.jar /home/hadoop/app/spark-1.6.1-bin-hadoop2.6/lib
cp protobuf-java-2.5.0.jar /home/hadoop/app/spark-1.6.1-bin-hadoop2.6/lib
// 2.2. 可以通过以下命令检查主要的jar 文件是否存在,最好把所有以下SPARK_CLASSPATH中指定的所有文件全都检查一遍
ls /home/hadoop/app/hive-1.1.0-cdh5.7.0/lib/mysql-connector-java-5.1.45-bin.jar
ls /home/hadoop/app/hbase-1.5.0/lib/hbase-server-1.5.0.jar
ls /home/hadoop/app/hbase-1.5.0/lib/hbase-protocol-1.5.0.jar
ls /home/hadoop/app/hbase-1.5.0/lib/hbase-hadoop2-compat-1.5.0.jar
ls /home/hadoop/app/hbase-1.5.0/lib/hbase-client-1.5.0.jar
ls /home/hadoop/app/hbase-1.5.0/lib/hbase-common-1.5.0.jar
ls /home/hadoop/app/hbase-1.5.0/lib/guava-12.0.1.jar
ls /home/hadoop/app/hbase-1.5.0/lib/htrace-core-3.1.0-incubating.jar
ls /home/hadoop/app/hbase-1.5.0/lib/protobuf-java-2.5.0.jar
ls /home/hadoop/app/hbase-1.5.0/lib/metrics-core-3.1.2.jar
// 2.3 重中之重,以下所有文件必须都包括在内,否则永远报错
export SPARK_CLASSPATH=/home/hadoop/app/hive-1.1.0-cdh5.7.0/lib/mysql-connector-java-5.1.45-bin.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-annotations-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-hadoop2-compat-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-prefix-tree-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-server-1.5.0-tests.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-annotations-1.5.0-tests.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-hadoop-compat-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-procedure-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-shaded-gson-3.0.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-client-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-hbtop-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-protocol-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-shell-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-common-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-it-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-resource-bundle-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-thrift-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-common-1.5.0-tests.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-it-1.5.0-tests.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-rest-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-examples-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-metrics-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-rsgroup-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-external-blockcache-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-metrics-api-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-server-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-server-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-protocol-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-hadoop2-compat-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-client-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-common-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/guava-12.0.1.jar:/home/hadoop/app/hbase-1.5.0/lib/htrace-core-3.1.0-incubating.jar:/home/hadoop/app/hbase-1.5.0/lib/protobuf-java-2.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/metrics-core-3.1.2.jar:/home/hadoop/app/hbase-1.5.0/lib/metrics-core-2.2.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-metrics-1.5.0.jar:/home/hadoop/app/hbase-1.5.0/lib/hbase-metrics-api-1.5.0.jar
// 2.4 启动spark-shell
spark-shell --master spark://localhost:7077 --executor-memory 1g --total-executor-cores 2
// 3 导入所有需要的文件
import java.io.IOException
import org.apache.spark.rdd.NewHadoopRDD
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client.{HBaseAdmin, Put, Result}
import org.apache.hadoop.hbase.HColumnDescriptor
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase._
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.client.Get
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.{Base64,Bytes}
import org.apache.hadoop.hbase.KeyValue
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.JsonMapper
import org.apache.commons.codec.digest.DigestUtils
import org.apache.hadoop.hbase.metrics
import org.apache.spark._
//3.1) 查找表,查找记录
// 参考: https://www.cnblogs.com/zzhangyuhang/p/9018739.html
val conf = HBaseConfiguration.create();
conf.set(TableInputFormat.INPUT_TABLE,“test”)
val stuRdd = sc.newAPIHadoopRDD(conf,classOf[TableInputFormat],
classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
classOf[org.apache.hadoop.hbase.client.Result]);
stuRdd.cache();//持久化
//计算数据条数
val count = stuRdd.count();
println(“Student rdd count:”+count);
//3.2)写入单条记录
val i=3
var put= new Put(Bytes.toBytes(“row” + i));
// column name:cf … sub-column name: a … value: i
put.add(Bytes.toBytes(“cf”),Bytes.toBytes(“a”),Bytes.toBytes(“value”+i));
table.put(put)
//3.3)循环写入多条记录
for(i <- 5 to 100){
var put= new Put(Bytes.toBytes(“row”+i));
// column name:cf … sub-column name: a … value: i
put.add(Bytes.toBytes(“cf”),Bytes.toBytes(“a”),Bytes.toBytes(“value”+i));
put.add(Bytes.toBytes(“cf”),Bytes.toBytes(“b”),Bytes.toBytes(“value”+i+1));
table.put(put)
}
//仅获取表信息,没有实质作用
val hbaseRdd = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],classOf[org.apache.hadoop.hbase.client.Result])
var scan = new Scan();
scan.addFamily(Bytes.toBytes(“cf”));
var proto = ProtobufUtil.toScan(scan)
var scanToString = Base64.encodeBytes(proto.toByteArray());
conf.set(TableInputFormat.SCAN,scanToString)
//3.5) 获取整个数据集
val hbaseRdd = sc.newAPIHadoopRDD(
conf,
classOf[TableInputFormat],
classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
classOf[org.apache.hadoop.hbase.client.Result]
)
val datas = hbaseRdd.map( x=>x._2).map{
result => (
result.getRow,result.getValue(Bytes.toBytes(“cf”),Bytes.toBytes(“a”)))}.map(
row => (new String(row._1),new String(row._2))).collect.foreach(r => (
println(r._1+":"+r._2)
)
)