import com.alibaba.fastjson.JSONObject;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import java.io.*;
import java.util.List;
/**
*
*scan
*
*
*
*
*/
public class test2 {
public static void main(String[] args) {
//hbase 连接
try {
Configuration HBASE_CONF;
HBASE_CONF = HBaseConfiguration.create();
HBASE_CONF.set("hbase.zookeeper.property.clientPort", "2181");
HBASE_CONF.set("hbase.zookeeper.quorum", "192.168.xx.xx");
HBASE_CONF.set("hbase.master", "192.168.xx.xx:60000");
HBASE_CONF.set("zookeeper.znode.parent", "/hbase");
HBASE_CONF.setInt("hbase.hconnection.threads.max", 100);
HBASE_CONF.setInt("hbase.hconnection.threads.core", 50);
HBASE_CONF.setLong("hbase.hconnection.threads.keepalivetime", 1000);
ConnectionFactory.createConnection(HBASE_CONF);
Connection hbaseConnection = ConnectionFactory.createConnection(HBASE_CONF);
Table table = hbaseConnection.getTable(TableName.valueOf("0_library_token"));
System.out.println("Table Name: " + table.getName());
Scan scan = new Scan();
scan.addColumn(Bytes.toBytes("F"), Bytes.toBytes("F"));
scan.setMaxVersions(1111111111);//不设置默认,hbase只取row中的1个cell
ResultScanner resultScanner = table.getScanner(scan);
for (Result result : resultScanner) {
List<Cell> cells = result.listCells();//hbase的一个row对应的所有cell
byte[] row = result.getRow();
String rowKey = Bytes.toString(row);//rowkey
System.out.println("rowKey为"+rowKey);
for (Cell cell : cells) {//value
String jsonstr = Bytes.toString(CellUtil.cloneValue(cell));
//单个value中的cell
JSONObject jsonObject = JSONObject.parseObject(jsonstr);
System.out.println("path为" + jsonObject.toString );
}
System.out.println("************************");
}
} catch (IOException e) {
e.printStackTrace();
}
}
插入:
Configuration HBASE_CONF;
HBASE_CONF = HBaseConfiguration.create();
HBASE_CONF.set("hbase.zookeeper.property.clientPort", "2181");
HBASE_CONF.set("hbase.zookeeper.quorum", "192.168.xx.xx");
HBASE_CONF.set("hbase.master", "192.168.xx.xx:60000");
HBASE_CONF.set("zookeeper.znode.parent", "/hbase");
HBASE_CONF.setInt("hbase.hconnection.threads.max", 100);
HBASE_CONF.setInt("hbase.hconnection.threads.core", 50);
HBASE_CONF.setLong("hbase.hconnection.threads.keepalivetime", 1000);
try {
ConnectionFactory.createConnection(HBASE_CONF);
Connection hbaseConnection = ConnectionFactory.createConnection(HBASE_CONF);
Table table = hbaseConnection.getTable(TableName.valueOf("0_file_pv"));
Put put = new Put("rk0001".getBytes()); //指定rowkey
put.addColumn("F".getBytes(), "F".getBytes(), "shandong".getBytes());
//插入数据
table.put(put);
table.close();
hbaseConnection.close();
System.out.println("结束***");
} catch (IOException e) {
e.printStackTrace();
}
包:
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<!--org.apache.hadoop.hbase.mapreduce-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>${hbase.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>${hbase.version}</version>
</dependency>
properties
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<hadoop.version>2.6.5</hadoop.version>
<hbase.version>1.2.5</hbase.version>
</properties>
注意hosts文件中的地址与连接地址保持一致