hbase java客户端实现

Hbase 安装:

伪分布安装:

下载并解压

下载页面: http://hbase.apache.org/downloads.html
解压命令 :

	tar -xzvf hbase-1.4.4.tar.gz -C targetDir

配置修改

hbase-env.sh

hbase-env.sh 加入 环境变量 JAVA_HOME, HBASE_HOME。
如果 没有单独的zookeeper,可设置

export HBASE_MANAGES_ZK=true
hbase-site.xml
<configuration>
	<!-- 存储目录 -->
	<property>
		 <name>hbase.rootdir</name>
		 <value>file:///root/hbase/data</value>
		 <description>The directory shared byregion servers.</description>
	</property>
	<!--  超时时间 -->
	<property>
	 <name>zookeeper.session.timeout</name>
	 <value>120000</value>
	</property>
	<property>
	 <name>hbase.tmp.dir</name>
	 <value>/tmp/hbase/tmp</value>
	</property>
	<!-- false是单机模式,true是分布式模式  -->
	<property>
	 <name>hbase.cluster.distributed</name>
	 <value>true</value>
	</property>
</configuration>
regionservers

在regionservers 文件中加regionserver host list ;
这里我只加了 hbase-host
并在/etc/hosts 中加入 hbase-host 的IP 映射。

启动

启动命令:

cd $HBASE_HOME/bin
./start-hbase.sh	

hbase shell client

./hbase shell

关闭Hbase命令:

./stop-hbase.sh

Java 客户端

pom.xml

<dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.4.2</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.hbase/hbase-client -->
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <version>1.5.2</version>
        </dependency>

注: 即使hbase.rootdir 为file:///root/hbase/data 本地路径, 因为hbase并不完成直 接的数据读写操作,所以仍需要hadoop jar 包来支持hbase 读写操作。

在系统环境中加入 HBASE_CONF_DIR 指定 $HBASE_HOME/conf,
从而可以 将hbase-site.xml的配置 加载到config中

this.config = HBaseConfiguration.create();
 if (System.getProperty("HBASE_CONF_DIR") != null) {
            config.addResource(new Path(System.getProperty("HBASE_CONF_DIR"), "hbase-site.xml"));
        } else {
            config.addResource(new Path(System.getenv("HBASE_CONF_DIR"), "hbase-site.xml"));
//            logger.info("HBASE_CONF_DIR : {}", System.getenv("HBASE_CONF_DIR"));
            File p = new File(System.getenv("HBASE_CONF_DIR")+"/"+ "hbase-site.xml");
//            logger.info("HBASE_CONF_DIR : file {}", p.exists() );
        }

代码实现

/**
 * knowledge :
 * 1. even if the hbase data is stored in the local place
 *    with the parameter {hbase.rootdir}:"file:///Users/cwiz/hbase/data",
 *    we still need the compatible hadoop jar to perform
 *    the function of storing.
 *
 */

public class HBaseClient implements Closeable {

    protected Connection connection;
    protected Configuration config;

    protected String tableName = "javaClient";
    protected String cf_create = "create";
    protected String cf_delete = "delete";
    {
        this.config = HBaseConfiguration.create();
//        config.set("hbase.zookeeper.quorum", "192.168.8.104");
//        config.set("hbase.zookeeper.property.clientPort", "2181");
        if (System.getProperty("HBASE_CONF_DIR") != null) {
            config.addResource(new Path(System.getProperty("HBASE_CONF_DIR"), "hbase-site.xml"));
        } else {
            config.addResource(new Path(System.getenv("HBASE_CONF_DIR"), "hbase-site.xml"));
//            logger.info("HBASE_CONF_DIR : {}", System.getenv("HBASE_CONF_DIR"));
            File p = new File(System.getenv("HBASE_CONF_DIR")+"/"+ "hbase-site.xml");
//            logger.info("HBASE_CONF_DIR : file {}", p.exists() );
        }

        try {
            this.connection  = ConnectionFactory.createConnection(this.config);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    public HBaseClient(){
    }

    public void createTable(String tableName,Configuration conf, String... columnFamilyNames) {
        System.out.println("start create table "+tableName);
        try {

            HBaseAdmin hBaseAdmin = new HBaseAdmin(conf);
            if (hBaseAdmin.tableExists(tableName)) {
                System.out.println(tableName + " already exists");
                //hBaseAdmin.disableTable(tableName);
                //hBaseAdmin.deleteTable(tableName);
                return;
            }
            HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
            for(int i=0; i<columnFamilyNames.length; i++) {
                tableDescriptor.addFamily(new HColumnDescriptor( columnFamilyNames[i]));
            }
            hBaseAdmin.createTable(tableDescriptor);
        } catch (Exception ex) {
            ex.printStackTrace();
        }
        System.out.println("end create table "+tableName);
    }

    public TableName getTableName(String tableName){
        return TableName.valueOf(tableName);
    }

    public Table getTable(TableName tableName){
        try {
            return this.connection.getTable(tableName);
        } catch (IOException e) {
            e.printStackTrace();
        }
        return null;
    }
    public void write(String tableName, String rowkey, Map<String, Object> keyValues) throws IOException {
        Table table = this.getTable(getTableName(tableName));
        Put put = generatePut(rowkey, keyValues);
        table.put(put);
    }

    public Put generatePut(String rowkey, Map<String, Object> keyValues){
        Put put = new Put(Bytes.toBytes(rowkey));
        ObjectMapper mapper = new ObjectMapper();
        for(Map.Entry<String,Object> entry : keyValues.entrySet()){
            String[] cols = entry.getKey().split(":");
            try {
                String value  = mapper.writeValueAsString( entry.getValue());
                put.addColumn(Bytes.toBytes(cols[0]), Bytes.toBytes(cols[1]), Bytes.toBytes( value ));
            } catch (JsonProcessingException e) {
                e.printStackTrace();
            }
        }

        //System.out.println(put.toString());
        return put;
    }

    public Put generatePut(String rowkey, String columnFamily, Map<String, String> keyValues){
        Put put = new Put(Bytes.toBytes(rowkey));
        for(Map.Entry<String,String> entry : keyValues.entrySet()){
            put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(entry.getKey()), Bytes.toBytes(entry.getValue()));
        }
        //System.out.println(put.toString());
        return put;
    }

    public boolean isTableExists(TableName tableName) throws IOException {
        boolean result = false;
        Admin admin = this.connection.getAdmin();
        return admin.tableExists(tableName);
    }

    public Result read(String tableName , String rowkey){
        Get get = new Get(Bytes.toBytes(rowkey));
        get.addFamily(Bytes.toBytes(cf_create));
        Table table = getTable(getTableName(tableName));
        Result result = null;
        try {
            result = table.get(get);
        } catch (IOException e) {
            e.printStackTrace();
        }
        ObjectMapper mapper = new ObjectMapper();
        ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
        try {
            System.out.println(writer.writeValueAsString(result));
        } catch (JsonProcessingException e) {
            e.printStackTrace();
        }
        return result;
    }
    public void close() {
        try {
            this.connection.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    public static void main(String[] args) {
        HBaseClient hBaseClient = new HBaseClient();
        hBaseClient.createTable(hBaseClient.tableName,
                hBaseClient.config ,
                hBaseClient.cf_create,
                hBaseClient.cf_delete);
        Map<String, Object>  keyValue = new HashMap<String, Object>();
        keyValue.put(hBaseClient.cf_create+":c1", "v1");
        keyValue.put(hBaseClient.cf_create+":c2", "v2");
        String rowkey = "row1";
        try {
            hBaseClient.write(hBaseClient.tableName , rowkey, keyValue);
        } catch (IOException e) {
            e.printStackTrace();
        }

        Result result= hBaseClient.read(hBaseClient.tableName, rowkey);
        System.out.println();
        System.out.println("result :" + result );

        System.out.println(Bytes.toString(result.getRow()));
        System.out.println("rawCells");
        Cell[] cells = result.rawCells();
        for(Cell cell : cells){
            System.out.println(Bytes.toString(CellUtil.cloneRow(cell)) +" , "
                    + Bytes.toString(CellUtil.cloneFamily(cell)) +" , "
                    + Bytes.toString(CellUtil.cloneQualifier(cell)) +" , "
                    + Bytes.toString(CellUtil.cloneValue(cell)) +" , "
                    + cell.getTimestamp());
        }
//        CellScanner scanner = CellUtil.createCellScanner(cells)
    }
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值