get put namespace scan filter cache
代码
package com.sid.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.filter.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IOUtils;
public class HBaseTest {
static Configuration conf = null;
static {
conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "node1:2181,node2:2181,node3:2181");
conf.set("hbase.rootdir","hdfs://hadoopcluster/hbase");
}
public static void main(String[] args) throws Exception{
/**createNamespace("ns1");
createTable("apitable","info");
addColumnFamily("apitable","secret");
putData("apitable","1","info","name","sid");
putData("apitable","1","info","age","27");
putData("apitable","1","secret","idcart","123");
putData("apitable","2","info","age","28");
putData("apitable","2","info","name","zhangsi");
putData("apitable","3","info","name","lisi");
putData("apitable","3","info","school","cqut");
putData("apitable","4","info","name","wangwu");
getData("apitable","1","info","name");*/
//scanData("apitable","1","3","info");
filter("apitable","1");
}
/**
* 创建namespace
* */
public static void createNamespace(String namespace) throws Exception{
HBaseAdmin admin = new HBaseAdmin(conf);
//创建namespace
NamespaceDescriptor descriptor = NamespaceDescriptor.create(namespace).build();
admin.createNamespace(descriptor);
admin.close();
}
/**
* put
*/
public static void putData(String tablename,String rowkey,String columnfamily,String column,String value) throws Exception{
HTable table = getTable(tablename);
Put put = new Put(Bytes.toBytes(rowkey));
put.add(Bytes.toBytes(columnfamily),Bytes.toBytes(column),Bytes.toBytes(value));
table.put(put);
table.close();
}
/**
* get
* */
public static void getData(String tablename,String rowkey,String columnfamily,String column)throws Exception{
HTable table = getTable(tablename);
//一定要知道rowkey
Get get = new Get(Bytes.toBytes(rowkey));
Result result = table.get(get);
//在知道列族和列名的情况下 查询指定单元格
byte[] value = result.getValue(Bytes.toBytes(columnfamily), Bytes.toBytes(column));
System.out.println(new String(value));
//在只知道rowkey的情况下,遍历每个单元格
printResult(result);
//System.out.println(new String(cell.getFamily())+":"+new String(cell.getQualifier())+":"+new String(cell.getValue()));
table.close();
}
/**
* scan
* 一个Result是一行数据
* ResultScanner是多行数据
* */
public static void scanData(String tablename,String startRowKey,String stopRowKey,String columnfamily){
HTable table = null;
try{
table = getTable(tablename);
//是全表扫描
Scan scan = new Scan();
//根据rowkey范围查询 开始是包括这个rowkey 结束是不包括这个rowkey
scan.setStartRow(Bytes.toBytes(startRowKey));
scan.setStopRow(Bytes.toBytes(stopRowKey));
//查询指定列族下的
scan.addFamily(Bytes.toBytes(columnfamily));
//是否本地缓存数据
scan.setCacheBlocks(false);//默认是true
//面向列的缓存 每次返回的列有3列
scan.setBatch(3);
//面向行的缓存 每次返回的行有2行 如果一共有10行数据,那么就会读取5次
scan.setCaching(2);
//权限设置
//scan.setACL();
ResultScanner rs = table.getScanner(scan);
for(Result r :rs){
printResult(r);
}
}catch (Exception e){
e.printStackTrace();
}finally {
if(table != null){
IOUtils.closeStream(table);
}
}
}
//过滤器
public static void filter(String tablename,String prefix){
HTable table = null;
try {
table = getTable(tablename);
Scan scan = new Scan();
Filter filter = null;
//根据列的前缀查询 不是列族!
//filter = new ColumnPrefixFilter(Bytes.toBytes(prefix));
//根据rowkey的前缀查询
//filter = new PrefixFilter(Bytes.toBytes(prefix));
//分页过滤器 查询条数3
//filter = new PageFilter(3);
//值比较
ByteArrayComparable comp = null;
//列族info的name的值为lisi 就查询出来
comp = new SubstringComparator("lisi");
filter = new SingleColumnValueFilter(Bytes.toBytes("info"),Bytes.toBytes("name"),CompareFilter.CompareOp.EQUAL,comp);
scan.setFilter(filter);
ResultScanner rs = table.getScanner(scan);
for(Result r :rs){
printResult(r);
}
}catch (Exception e){
e.printStackTrace();
}finally {
if(table != null){
IOUtils.closeStream(table);
}
}
}
/**
* 打印出一行数据的每个单元格
* */
public static void printResult(Result rs){
for(Cell cell:rs.rawCells()){
System.out.println("row key:"+Bytes.toString(CellUtil.cloneRow(cell)));
System.out.println("column family:"+Bytes.toString(CellUtil.cloneFamily(cell)));
System.out.println("column name:"+Bytes.toString(CellUtil.cloneQualifier(cell)));
System.out.println("value:"+Bytes.toString(CellUtil.cloneValue(cell)));
}
}
/**
* 获取表
* */
public static HTable getTable(String tablename) throws Exception{
return new HTable(conf,Bytes.toBytes(tablename));
}
/**
* 创建表
* */
public static void createTable(String tablename,String columnfamily) throws Exception {
//hbase表结果操作的入口
HBaseAdmin admin = new HBaseAdmin(conf);
//判断表是否存在
boolean b = admin.tableExists(tablename);
if(b){
//禁用表
admin.disableTable(tablename);
//删除表
admin.deleteTable(tablename);
}
//设置表名
HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tablename));
//设置列族
table.addFamily(new HColumnDescriptor(Bytes.toBytes(columnfamily)));
//创建表
admin.createTable(table);
//关闭资源
admin.close();
}
/**
* 给已有的表添加列族
* */
public static void addColumnFamily(String tablename,String columnfamily) throws Exception{
//hbase表结果操作的入口
HBaseAdmin admin = new HBaseAdmin(conf);
//判断表是否存在
boolean b = admin.tableExists(tablename);
if(b){
//添加列族
admin.addColumn(tablename,new HColumnDescriptor(Bytes.toBytes(columnfamily)));
}
//关闭资源
admin.close();
}
}
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.sid.hbase</groupId>
<artifactId>hbase-train</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<hbase.version>1.4.4</hbase.version>
<hadoop.version>2.9.0</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>${hbase.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>${hbase.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
</dependencies>
</project>