hbase基本的表管理和访问

1.导入hbase依赖的jar包

hadoop-core-1.1.2.jar

zookeeper-3.4.5.jar

hbase-0.94.20-tests.jar

hbase-0.94.20.jar

以及hbase安装目录下lib中的包

2.程序代码如下:

package com.hadoop.hbase;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;

/**
 * hbase基本的表管理和访问
 * 
 * @author root 2014-6-27
 */
public class ExampleClient {

	private static HBaseAdmin admin;
	private static HTable table;

	public static void main(String[] args) throws Exception {
		Configuration config = HBaseConfiguration.create();

		admin = new HBaseAdmin(config);
		HTableDescriptor htd = new HTableDescriptor("test");
		HColumnDescriptor hcd = new HColumnDescriptor("data");
		htd.addFamily(hcd);
		admin.createTable(htd);
		byte[] tablename = htd.getName();
		HTableDescriptor[] tables = admin.listTables();
		if (tables.length != 1 && Bytes.equals(tablename, tables[0].getName())) {
			throw new Exception("Failed create of table");
		}

		table = new HTable(config, tablename);
		byte[] row1 = Bytes.toBytes("row1");
		Put p1 = new Put(row1);
		byte[] databytes = Bytes.toBytes("data");
		p1.add(databytes, Bytes.toBytes("1"), Bytes.toBytes("values"));
		table.put(p1);

		Get g = new Get(row1);
		Result result = table.get(g);
		System.out.println("GET:" + result);

		Scan scan = new Scan();
		ResultScanner scanner = table.getScanner(scan);
		try {
			for (Result scannerResult : scanner) {
				System.out.println("Scan:" + scannerResult);
			}
		} finally {
			scanner.close();
		}

		// drop the table
		admin.disableTable(tablename);
		admin.deleteTable(tablename);
	}
}


3.出现如下表示运行正常

14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:zookeeper.version=3.4.5-1392090, built on 09/30/2012 17:52 GMT
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:host.name=znb
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:java.version=1.8.0_05
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:java.vendor=Oracle Corporation
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:java.home=/usr/lib/java/jdk1.8/jre
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:java.class.path=/home/znb/workspace/hadoop/hadoop/bin:/home/znb/workspace/hadoop/hadoop/lib/activation-1.1.jar:/home/znb/workspace/hadoop/hadoop/lib/asm-3.1.jar:/home/znb/workspace/hadoop/hadoop/lib/avro-1.5.3.jar:/home/znb/workspace/hadoop/hadoop/lib/avro-ipc-1.5.3.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-beanutils-1.7.0.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-beanutils-core-1.8.0.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-cli-1.2.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-codec-1.4.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-collections-3.2.1.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-configuration-1.6.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-digester-1.8.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-el-1.0.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-httpclient-3.1.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-io-2.1.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-lang-2.5.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-logging-1.1.1.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-math-2.1.jar:/home/znb/workspace/hadoop/hadoop/lib/commons-net-1.4.1.jar:/home/znb/workspace/hadoop/hadoop/lib/core-3.1.1.jar:/home/znb/workspace/hadoop/hadoop/lib/guava-11.0.2.jar:/home/znb/workspace/hadoop/hadoop/lib/hadoop-core-1.1.2.jar:/home/znb/workspace/hadoop/hadoop/lib/hamcrest-core-1.3.jar:/home/znb/workspace/hadoop/hadoop/lib/hbase-0.94.20-tests.jar:/home/znb/workspace/hadoop/hadoop/lib/hbase-0.94.20.jar:/home/znb/workspace/hadoop/hadoop/lib/high-scale-lib-1.1.1.jar:/home/znb/workspace/hadoop/hadoop/lib/httpclient-4.1.2.jar:/home/znb/workspace/hadoop/hadoop/lib/httpcore-4.1.3.jar:/home/znb/workspace/hadoop/hadoop/lib/jackson-core-asl-1.8.8.jar:/home/znb/workspace/hadoop/hadoop/lib/jackson-jaxrs-1.8.8.jar:/home/znb/workspace/hadoop/hadoop/lib/jackson-mapper-asl-1.8.8.jar:/home/znb/workspace/hadoop/hadoop/lib/jackson-xc-1.8.8.jar:/home/znb/workspace/hadoop/hadoop/lib/jamon-runtime-2.3.1.jar:/home/znb/workspace/hadoop/hadoop/lib/jasper-compiler-5.5.23.jar:/home/znb/workspace/hadoop/hadoop/lib/jasper-runtime-5.5.23.jar:/home/znb/workspace/hadoop/hadoop/lib/jaxb-api-2.1.jar:/home/znb/workspace/hadoop/hadoop/lib/jaxb-impl-2.2.3-1.jar:/home/znb/workspace/hadoop/hadoop/lib/jersey-core-1.8.jar:/home/znb/workspace/hadoop/hadoop/lib/jersey-json-1.8.jar:/home/znb/workspace/hadoop/hadoop/lib/jersey-server-1.8.jar:/home/znb/workspace/hadoop/hadoop/lib/jettison-1.1.jar:/home/znb/workspace/hadoop/hadoop/lib/jetty-6.1.26.jar:/home/znb/workspace/hadoop/hadoop/lib/jetty-util-6.1.26.jar:/home/znb/workspace/hadoop/hadoop/lib/jruby-complete-1.6.5.jar:/home/znb/workspace/hadoop/hadoop/lib/jsp-2.1-6.1.14.jar:/home/znb/workspace/hadoop/hadoop/lib/jsp-api-2.1-6.1.14.jar:/home/znb/workspace/hadoop/hadoop/lib/jsr305-1.3.9.jar:/home/znb/workspace/hadoop/hadoop/lib/junit-4.11.jar:/home/znb/workspace/hadoop/hadoop/lib/libthrift-0.8.0.jar:/home/znb/workspace/hadoop/hadoop/lib/log4j-1.2.16.jar:/home/znb/workspace/hadoop/hadoop/lib/metrics-core-2.1.2.jar:/home/znb/workspace/hadoop/hadoop/lib/netty-3.2.4.Final.jar:/home/znb/workspace/hadoop/hadoop/lib/protobuf-java-2.4.0a.jar:/home/znb/workspace/hadoop/hadoop/lib/servlet-api-2.5-6.1.14.jar:/home/znb/workspace/hadoop/hadoop/lib/slf4j-api-1.4.3.jar:/home/znb/workspace/hadoop/hadoop/lib/slf4j-log4j12-1.4.3.jar:/home/znb/workspace/hadoop/hadoop/lib/snappy-java-1.0.3.2.jar:/home/znb/workspace/hadoop/hadoop/lib/stax-api-1.0.1.jar:/home/znb/workspace/hadoop/hadoop/lib/velocity-1.7.jar:/home/znb/workspace/hadoop/hadoop/lib/xmlenc-0.52.jar:/home/znb/workspace/hadoop/hadoop/lib/zookeeper-3.4.5.jar
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:java.io.tmpdir=/tmp
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:java.compiler=<NA>
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:os.name=Linux
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:os.arch=amd64
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:os.version=3.11.0-23-generic
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:user.name=root
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:user.home=/root
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Client environment:user.dir=/home/znb/workspace/hadoop/hadoop
14/06/27 18:34:51 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=localhost:2181 sessionTimeout=180000 watcher=hconnection
14/06/27 18:34:51 INFO zookeeper.RecoverableZooKeeper: The identifier of this process is 14511@znb
14/06/27 18:34:51 INFO zookeeper.ClientCnxn: Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error)
14/06/27 18:34:51 INFO zookeeper.ClientCnxn: Socket connection established to localhost/127.0.0.1:2181, initiating session
14/06/27 18:34:51 INFO zookeeper.ClientCnxn: Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x146dca9d17c0016, negotiated timeout = 180000
14/06/27 18:34:52 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=localhost:2181 sessionTimeout=180000 watcher=catalogtracker-on-org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation@26aa12dd
14/06/27 18:34:52 INFO zookeeper.ClientCnxn: Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error)
14/06/27 18:34:52 INFO zookeeper.ClientCnxn: Socket connection established to localhost/127.0.0.1:2181, initiating session
14/06/27 18:34:52 INFO zookeeper.RecoverableZooKeeper: The identifier of this process is 14511@znb
14/06/27 18:34:52 INFO zookeeper.ClientCnxn: Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x146dca9d17c0017, negotiated timeout = 180000
14/06/27 18:34:52 INFO zookeeper.ZooKeeper: Session: 0x146dca9d17c0017 closed
14/06/27 18:34:52 INFO zookeeper.ClientCnxn: EventThread shut down
GET:keyvalues={row1/data:1/1403865292631/Put/vlen=6/ts=0}
Scan:keyvalues={row1/data:1/1403865292631/Put/vlen=6/ts=0}
14/06/27 18:34:52 INFO client.HBaseAdmin: Started disable of test
14/06/27 18:34:52 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=localhost:2181 sessionTimeout=180000 watcher=catalogtracker-on-org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation@26aa12dd
14/06/27 18:34:52 INFO zookeeper.RecoverableZooKeeper: The identifier of this process is 14511@znb
14/06/27 18:34:52 INFO zookeeper.ClientCnxn: Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error)
14/06/27 18:34:52 INFO zookeeper.ClientCnxn: Socket connection established to localhost/127.0.0.1:2181, initiating session
14/06/27 18:34:52 INFO zookeeper.ClientCnxn: Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x146dca9d17c0018, negotiated timeout = 180000
14/06/27 18:34:52 INFO zookeeper.ZooKeeper: Session: 0x146dca9d17c0018 closed
14/06/27 18:34:52 INFO zookeeper.ClientCnxn: EventThread shut down
14/06/27 18:34:53 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=localhost:2181 sessionTimeout=180000 watcher=catalogtracker-on-org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation@26aa12dd
14/06/27 18:34:53 INFO zookeeper.RecoverableZooKeeper: The identifier of this process is 14511@znb
14/06/27 18:34:53 INFO zookeeper.ClientCnxn: Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error)
14/06/27 18:34:53 INFO zookeeper.ClientCnxn: Socket connection established to localhost/127.0.0.1:2181, initiating session
14/06/27 18:34:53 INFO zookeeper.ClientCnxn: Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x146dca9d17c0019, negotiated timeout = 180000
14/06/27 18:34:53 INFO zookeeper.ZooKeeper: Session: 0x146dca9d17c0019 closed
14/06/27 18:34:53 INFO zookeeper.ClientCnxn: EventThread shut down
14/06/27 18:34:53 INFO client.HBaseAdmin: Disabled test
14/06/27 18:34:54 INFO client.HBaseAdmin: Deleted test

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: HBase-Java API 是一种用于管理 HBase 的编程接口。使用 HBase-Java API,开发人员可以编写 Java 代码来创建、删除、修改和查询 HBase HBase-Java API 提供了一系列类和方法,可以让开发人员轻松地与 HBase 进行交互。例如,可以使用 HBaseAdmin 类来管理 HBase ,使用 HTable 类来访问 HBase 的数据,使用 Put 类来插入数据,使用 Get 类来获取数据,等等。总之,HBase-Java API 是 HBase 的重要组成部分,它为开发人员提供了强大的工具来管理和操作 HBase 。 ### 回答2: HBase是一个分布式的列式存储数据库,在很多大数据应用得到广泛的使用。它采用Hadoop作为其底层基础框架,同时提供了Java API供开发人员使用。HBase的Java API为开发人员提供了一个管理的接口,使得开发人员可以对HBase数据库进行创建、读取、修改和删除等基本操作。 首先,我们需要用Java API创建一个HBase数据库。使用HBase的Java API创建的流程如下: 1. 首先需要获取HBase Configuration对象,并设置HBase连接参数以连接HBase数据库。 2. 接下来,需要获取HBase Admin对象,以便在操作HBase数据库之前检查是否存在,如果不存在,需要创建该。 3. 通过HBase的Java API创建时,需要指定名、列族的名称以及版本数等属性。 4. 创建时需要考虑的region的分配问题,可以对的region进行手动或自动分片,以此来控制HBase的负载均衡。 创建了HBase数据库之后,我们可以使用Java API对进行读写操作。在进行读写操作时,需要使用HBase的Java API提供的Get的方法来获取的数据、Scan方法来扫描整个、以及Put方法来向插入数据。此外,在进行操作时还需要设置一些常见的数据操作参数,例如版本数、时间戳等。 在使用HBase的Java API时,还需要注意一些性能优化的问题,例如何时启用缓存、何时触发分区策略以及如何优化HBase的大小等。这些优化措施能够在HBase的性能以及数据读写时的延迟方面提供很好的支持和帮助。 总的来说,HBase的Java API提供的管理接口为开发人员提供了非常方便和快捷的方式来操作HBase数据库。通过使用这些API,开发人员可以创建、读取、修改和删除的数据,并且能够充分应用HBase的分布式特性来优化数据管理和性能提升。 ### 回答3: HBase是一个开源、分布式、非关系型数据库,它可以存储和处理大规模结构化、半结构化和非结构化数据。HBase Java API是HBase的官方API,它提供了对HBase管理和操作功能,让开发人员可以通过Java代码轻松地连接到HBase数据库。 在使用HBase Java API管理时,首先需要创建一个HBaseConfiguration对象,它包含了连接HBase数据库所需的配置信息,如Zookeeper地址、HBase根目录等。然后,可以使用HBaseAdmin类创建、删除、修改,以及列族等操作。例如,创建一个可以通过以下代码实现: ``` HBaseAdmin admin = new HBaseAdmin(HBaseConfiguration.create()); HTableDescriptor tableDescriptor = new HTableDescriptor("table_name"); HColumnDescriptor columnDescriptor = new HColumnDescriptor("column_family"); tableDescriptor.addFamily(columnDescriptor); admin.createTable(tableDescriptor); ``` 创建时,需要先通过HTableDescriptor类定义名称,然后通过HColumnDescriptor类定义列族名称。可以通过addFamily()方法将列族添加到描述,最后通过HBaseAdmin的createTable()方法创建。 除了创建之外,HBase Java API还提供了许多其他的操作,如获取信息、获取所有的列、删除等。同时,HBase Java API还提供了对数据的CRUD操作,包括put、get、scan、delete等方法,让开发人员可以方便地进行数据操作。 总之,HBase Java API是一个非常强大的工具,它可以使开发人员轻松地管理HBase数据库,并实现数据的高效存储和处理。但是,在使用HBase Java API时,需要了解HBase基本知识和API的用法,才能更好地发挥其功能。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值