java通过API操作HBase
- 依赖包安装
<-- 在pom.xml文件里添加以下两个依赖,version版本为对应的hbase版本-->
<dependencies>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>1.4.13</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.4.13</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>3.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<version>1.4.13</version>
</dependency>
</dependencies>
如果使用的是InterriJ IDEA,在联网状态下无法下载,可在File -> settings -> Maven下查看user setting file的路径,然后在xml文件里添加源
<!-- 配置阿里云仓库 -->
<mirror>
<id>alimaven</id>
<name>aliyun maven</name>
<url>https://maven.aliyun.com/repository/public</url>
<mirrorOf>central</mirrorOf>
</mirror>
配置完后如仍无法下载,可重启IDEA
- DDL操作
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import java.io.IOException;
public class hbase {
private static Connection connection = null;
private static Admin admin = null;
static{
Configuration configuration = HBaseConfiguration.create();
configuration.set("hbase.zookeeper.quorum","dong");
try {
connection = ConnectionFactory.createConnection(configuration);
admin = connection.getAdmin();
} catch (IOException e) {
e.printStackTrace();
}
}
private static void close() throws IOException {
admin.close();
}
public static boolean isTableExist(String tableName) throws IOException {
/*
旧接口
BaseAdmin admin = new HBaseAdmin(configuration);
configuration.set("hbase.zookeeper.quorum","dong");
HBaseAdmin admin = new HBaseAdmin();
boolean exists = admin.tableExists(tableName);
*/
/*
新接口
Configuration configuration = HBaseConfiguration.create();
configuration.set("hbase.zookeeper.quorum","dong");
Connection connection = ConnectionFactory.createConnection(configuration);
Admin admin = connection.getAdmin();
*/
boolean exists = admin.tableExists(TableName.valueOf(tableName));
return exists;
}
public static void createTable(String tableName,String... params) throws IOException {
if(params.length <= 0){
System.out.println("parameter.length must be greater than 0");
return;
}
if(isTableExist(tableName)){
System.out.println("the table already exists");
return;
}
HTableDescriptor hTableDescriptor = new HTableDescriptor(TableName.valueOf(tableName));
for(String parma:params){
HColumnDescriptor hColumnDescriptor = new HColumnDescriptor(String.valueOf(params));
hTableDescriptor.addFamily(hColumnDescriptor);
}
admin.createTable(hTableDescriptor);
}
public static void dropTable(String tableName) throws IOException {
if(! isTableExist(tableName)){
System.out.println("the table does not exist");
return ;
}
admin.disableTable(TableName.valueOf(tableName));
admin.deleteTable(TableName.valueOf(tableName));
}
public static void createNameSpace(String nameSpace) {
NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create(nameSpace).build();
try{
admin.createNamespace(namespaceDescriptor);
} catch (NamespaceExistException e){
System.out.println("the namespace already exists");
} catch (IOException e){
e.printStackTrace();
}
}
public static void main(String [] args) throws IOException {
//System.out.println(isTableExist("stu"));
//createTable("stuTest","info");
//dropTable("stuTest");
createNameSpace("Test");
close();
}
}
- DML操作
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
public class dml {
private static Connection connection = null;
private static Table table = null;
static{
Configuration configuration = HBaseConfiguration.create();
configuration.set("hbase.zookeeper.quorum","dong");
try {
connection = ConnectionFactory.createConnection(configuration);
} catch (IOException e) {
e.printStackTrace();
}
}
public static void putData(String tableName,String rowKey,String colFamily,
String col,String value) throws IOException {
table = connection.getTable(TableName.valueOf(tableName));
Put put = new Put(Bytes.toBytes(rowKey));
put.addColumn(Bytes.toBytes(colFamily),Bytes.toBytes(col),Bytes.toBytes(value));
table.put(put);
table.close();
}
public static void scanTable(String tableName) throws IOException {
table = connection.getTable(TableName.valueOf(tableName));
Scan scan = new Scan();
ResultScanner resultScan = table.getScanner(scan);
for(Result result:resultScan){
for(Cell cell:result.rawCells()){
String rowKey = Bytes.toString(CellUtil.cloneRow(cell));
String columnFamily = Bytes.toString(CellUtil.cloneFamily(cell));
String column = Bytes.toString(CellUtil.cloneQualifier(cell));
String value = Bytes.toString(CellUtil.cloneValue(cell));
System.out.println("rowKey:" + rowKey + "," +
"columnFamily:" + columnFamily + "," +
"column:" + column + "," +
"value:" + value);
}
}
table.close();
}
public static void getData(String tableName,String rowKey,
String colFamily,String col) throws IOException {
table = connection.getTable(TableName.valueOf(tableName));
Get get = new Get(Bytes.toBytes(rowKey));
if(!colFamily.equals("") && col.equals("")) {
get.addFamily(Bytes.toBytes(colFamily));
} else if(!colFamily.equals("") && !col.equals("")) {
get.addColumn(Bytes.toBytes(colFamily), Bytes.toBytes(col));
} else {
}
Result result = table.get(get);
for(Cell cell:result.rawCells()){
String columnFamily = Bytes.toString(CellUtil.cloneFamily(cell));
String column = Bytes.toString(CellUtil.cloneQualifier(cell));
String value = Bytes.toString(CellUtil.cloneValue(cell));
System.out.println("rowKey:" + rowKey + "," +
"columnFamily:" + columnFamily + "," +
"column:" + column + "," +
"value:" + value);
}
table.close();
}
public static void deleteData(String tableName,String rowKey,String colFamily,String col)
throws IOException {
Table table = connection.getTable(TableName.valueOf(tableName));
Delete delete = new Delete(Bytes.toBytes(rowKey));
if(!colFamily.equals("") && col.equals("")) {
delete.addFamily(Bytes.toBytes(colFamily));
} else if(!colFamily.equals("") && !col.equals("")) {
/*
addColumn 只删除最新version的数据 如果加时间戳,则仅删除timestamp正好等于传入的时间戳的version
addColumns 删除最有version的数据 如果加时间戳,则删除 <= 传入timestamp的所有version
*/
delete.addColumns(Bytes.toBytes(colFamily), Bytes.toBytes(col));
}
table.delete(delete);
table.close();
}
public static void main(String [] args) throws IOException {
//putData("stu","1001","info","name","lis9i");
//putData("stu","1001","info","age","129");
//putData("stu","1001","info","name","lisi");
//putData("stu","1001","info","age","1");
deleteData("stu","1001","info","name");
scanTable("stu");
//getData("stu","1001","","");
}
}
- 报错
Exception in thread "main" org.apache.hadoop.hbase.client.RetriesExhaustedException: Can't get the location for replica 0
at org.apache.hadoop.hbase.client.RpcRetryingCallerWithReadReplicas.getRegionLocations(RpcRetryingCallerWithReadReplicas.java:372)
at org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:153)
at org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.call(ScannerCallableWithReplicas.java:58)
at org.apache.hadoop.hbase.client.RpcRetryingCaller.callWithoutRetries(RpcRetryingCaller.java:219)
at org.apache.hadoop.hbase.client.ClientScanner.call(ClientScanner.java:275)
at org.apache.hadoop.hbase.client.ClientScanner.loadCache(ClientScanner.java:436)
at org.apache.hadoop.hbase.client.ClientScanner.next(ClientScanner.java:310)
at org.apache.hadoop.hbase.MetaTableAccessor.fullScan(MetaTableAccessor.java:639)
at org.apache.hadoop.hbase.MetaTableAccessor.tableExists(MetaTableAccessor.java:366)
at org.apache.hadoop.hbase.client.HBaseAdmin.tableExists(HBaseAdmin.java:409)
at hbase_hello.isTableExist(hbase_hello.java:46)
at hbase_hello.main(hbase_hello.java:93)
解决办法:
配置参数
configuration.set("zookeeper.znode.parent","/hbase-unsecure");
或者在src--> main -->resources下放置配置文件hbase-site.xml,文件内需要配置上述参数