大数据之HBase Java API

HBase API

官方最常用API为Java

  • 支持完整的HBase命令集

External API:REST、Scala、Python等

  • http://hbase.apache.org/book.html#external_apis
API类型API信息
Javahttps://hbase.apache.org/apidocs/
Pythonhttps://happybase.readthedocs.org/en/latest/
ScalaThrough Java/Spark
Thrifthttps://wiki.apache.org/hadoop/Hbase/ThriftApi
RESThttp://blog.cloudera.com/blog/2013/03/how-to-use-the-apache-hbase-rest-interface-part-1/

 

HBase Java API

使用Java API操作HBase

  • 创建Maven项目并添加依赖

  • 使用Java API开发代码

 创建表,查看表,删除表
 添加数,查看数据,删除数据

打包、上传Jar包并测试

// Get client admin
Configuration config = HBaseConfiguration.create();config.Path("/etc/hbase/conf/hbase-site.xml");config.Path("/etc/hadoop/conf/core-site.xml");connect= ConnectionFactory.createConnection(config);admin = connect.getAdmin();
// 执行表操作
admin.createTable()
admin.disableTable()
admin.deleteTable()
admin.listTable()

HBase REST API

使用REST API操作HBase

  • start/stop rest service
./hbase-daemon.sh start rest -p 9081
./hbase-daemon.sh stop rest -p 9081
http://localhost:9081/version
http://localhost:9081/<table_name>/schema
http://localhost:9081/<table_name>/<row_key>
[root@hadoop100 ~]# hbase-daemon.sh start rest -p 9081
[root@hadoop100 ~]# jps

查看rest

192.168.136.100:9081

Java实现HBase

创建表

  • 创建Maven工程
  • 添加依赖包
<dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-client</artifactId>
      <version>1.2.0</version>
    </dependency>

import cn.kgc.kb09.test.util.HBaseConfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;

import java.io.IOException;

public class CreateTable {
    public static void main(String[] args) {
        //Configuration conf = HBaseConfiguration.create();
        //conf.set("hbase.zookeeper.quorum", "192.168.136.100");
        //conf.set("hbase.zookeeper.port", "2181");
       // conf.set("hbase.master", "192.168.136.100:16000");
        conf.addResource(new Path("/opt/hbase/conf/hbase-site.xml"));
        conf.addResource(new Path("opt/hadoop/etc/hadoop/core-site.xml"));
        Connection conn = ConnectionFactory.createConnection(conf);
        Admin admin = HBaseConfs.getadmin();
        HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(args[0]));
        HColumnDescriptor add = new HColumnDescriptor(args[1]);
        HColumnDescriptor time = new HColumnDescriptor(args[2]);
        htd.addFamily(add);
        htd.addFamily(time);
        admin.createTable(htd);
  • 打jar包
  • 把jar包放入家目录下
  • 配置hbase-site.xml
[root@hadoop100 opt]# vi hbase/conf/hbase-site.xml
//把false改为true
true

[root@hadoop100 ~]# hadoop jar /root/testhbase.jar cn.kgc.kb09.test.CreateTable "hello" "name" "addr"

  • 代表创建成功

添加数据

  • HBaseConfs类
package cn.kgc.kb09.test.util;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;

import java.io.IOException;

public  class HBaseConfs {
    private HBaseConfs(){}
    private static Configuration getconf() {
        Configuration conf = HBaseConfiguration.create();
        conf.addResource(new Path("/opt/hbase/conf/hbase-site.xml"));
        conf.addResource(new Path("/opt/hadoop/etc/hadoop/core-site.xml"));
        return conf;
    }
    public static Connection getConn(){
        Connection conn=null;
        try {
            conn = ConnectionFactory.createConnection(getconf());
        } catch (IOException e) {
            e.printStackTrace();
        }

        return conn;
    }
    public static Admin getadmin(){
Admin admin=null;
        try {
            admin = getConn().getAdmin();
        } catch (IOException e) {
            e.printStackTrace();
        }
        return admin;
    }
}

CreateTable类

package cn.kgc.kb09.test;

import cn.kgc.kb09.test.util.HBaseConfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;

import java.io.IOException;

public class CreateTable {
    public static void main(String[] args) {
        //Configuration conf = HBaseConfiguration.create();
        //conf.set("hbase.zookeeper.quorum", "192.168.136.100");
        //conf.set("hbase.zookeeper.port", "2181");
       // conf.set("hbase.master", "192.168.136.100:16000");
        //conf.addResource(new Path("/opt/hbase/conf/hbase-site.xml"));
               // conf.addResource(new Path("opt/hadoop/etc/hadoop/core-site.xml"));
        //Connection conn = ConnectionFactory.createConnection(conf);
        Admin admin = HBaseConfs.getadmin();
        HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(args[0]));
        for (int i = 1; i < args.length; i++) {
            HColumnDescriptor family = new HColumnDescriptor(args[i]);
            htd.addFamily(family);
        }
        //HColumnDescriptor add = new HColumnDescriptor(args[1]);
        //HColumnDescriptor time = new HColumnDescriptor(args[2]);
        //htd.addFamily(add);
        //htd.addFamily(time);
        try {
            admin.createTable(htd);
        } catch (IOException e) {
            e.printStackTrace();
        }finally {
            try {
                admin.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

InsertTable类

package cn.kgc.kb09.test;

import cn.kgc.kb09.test.util.HBaseConfs;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;

public class InsertTable {
    public static void main(String[] args) throws Exception{
        Connection conn = HBaseConfs.getConn();
        Admin admin = HBaseConfs.getadmin();
        TableName[] tableNames = admin.listTableNames();
        for (TableName tableName : tableNames) {
            System.out.println(tableName.getNameAsString());
        }
        Table table = conn.getTable(TableName.valueOf("hello"));
        String[][] values = {
                {"1","胡", "哥", "1st White House", "WDC"},
                {"2","诸葛", "正我", "10th 唐宁街", "London"},
                {"3","赵", "丽颖", "111th 中南海", "Beijing"},
                {"4","百度","一下","地下核基地","PingRang"}
        };
        for (int i = 0; i < values.length; i++) {
            Put put = new Put(values[i][0].getBytes());
            put.addColumn("name".getBytes(), "fname".getBytes(), values[i][1].getBytes());
            put.addColumn("name".getBytes(), "lname".getBytes(), values[i][2].getBytes());
            put.addColumn("addr".getBytes(), "address".getBytes(), values[i][3].getBytes());
            put.addColumn("addr".getBytes(), "city".getBytes(), values[i][4].getBytes());
            table.put(put);
        }
        admin.close();
        conn.close();
    }
}
  • 打jar包
  • 把jar包放入家目录
  • 添加数据
[root@hadoop100 ~]# hadoop jar /root/testhbase.jar cn.kgc.kb09.test.InsertTable
  • 到这一步代表已经成功添加数据
  • 查看数据

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值