import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.cslo.lsmp.hadoop.user.commom.CounterMap.Counter;
public class HbaseUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(HbaseUtil.class);
static HashMap<String, CounterMap> rowKeyCounterMap = new HashMap<String, CounterMap>();
static Configuration hConfig;
static HTable hTable;
static {
hConfig = HBaseConfiguration.create();
//本地
hConfig.set("hbase.zookeeper.quorum", "cdh1,cdh2,cdh3");
}
/**
* 创建表 (需要删除原有数据时使用)
* @param tableName
*/
public static void createTable(String tableName,String[] familys) {
try {
HBaseAdmin hBaseAdmin = new HBaseAdmin(hConfig);
if (hBaseAdmin.tableExists(tableName)) {// 如果存在要创建的表,那么先删除,再创建
hBaseAdmin.disableTable(tableName);
hBaseAdmin.deleteTable(tableName);
System.out.println(tableName + " is exist,detele....");
}
HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
if(familys!=null&&familys.length>0){
for(String family:familys){
tableDescriptor.addFamily(new HColumnDescriptor(family));
}
}
hBaseAdmin.createTable(tableDescriptor);
} catch (Exception e) {
LOGGER.error("HBase获取createTable errmsg:{}",
e.toString());
}
}
/**
* @param tableName 表名
*/
public static void getTable(String tableName) {
try {
hTable = new HTable(hConfig, tableName);
hTable.setWriteBufferSize(6 * 1024 * 1024); //4MB
hTable.setAutoFlush(false);
} catch (IOException e) {
LOGGER.error("HBase获取getTable errmsg:{}",
e.toString());
}
}
public static void incerment(String rowKey, String key, Object value) {
CounterMap counterMap = rowKeyCounterMap.get(rowKey);
if (counterMap == null) {
counterMap = new CounterMap();
// rowKey=Md5Util.generateRowkey(rowKey);
rowKeyCounterMap.put(rowKey, counterMap);
}
counterMap.increment(key, value);
}
/**
*
* @param tablelie 列族名
*/
public static Boolean flushToHBase(String tablelie) {
Boolean bl=true;
List<Put> puts = new ArrayList<Put>();
Long writebufsize=0l;
try {
for (Entry<String, CounterMap> entry : rowKeyCounterMap.entrySet()) {
CounterMap pastCounterMap = entry.getValue();
byte[] rowkey=HashRowKey(entry.getKey());
Put put = new Put(rowkey);
for (Entry<String, Counter> entry2 : pastCounterMap.entrySet()) {
String key = entry2.getKey();
Counter value = entry2.getValue();
String values = "";
if (value != null) {
Object valObj = value.value;
if (valObj != null) {
values = valObj.toString();
}
}
put.addColumn(Bytes.toBytes(tablelie), Bytes.toBytes(key),
Bytes.toBytes(values));
writebufsize+=put.heapSize();
}
put.setDurability(Durability.SKIP_WAL);
// put.setWriteToWAL(false);
puts.add(put);
//Put数据超过的时候提交
if(writebufsize>6 * 1024 * 1024){
hTable.put(puts);
hTable.flushCommits();
LOGGER.info("插入到资讯表中hTable"+"_count___writebufsize--"+writebufsize+"puts--"+puts.size());
puts.clear();
writebufsize=0l;
}
}
hTable.put(puts);
hTable.flushCommits();
rowKeyCounterMap.clear();
// LOGGER.info("插入到资讯表中hTable"+"_count___"+n +"writebufsize--"+writebufsize+"puts--"+puts.size());
// }
} catch (IOException e) {
bl=false;
LOGGER.error("HBase获取flushToHBase errmsg:{}",
e.toString());
}
return bl;
}
/**
* 查询一条记录
* @param rowKey
* @return
* @throws IOException
*/
public static String getFromHBase(String rowKey,String family,String name) {
String result="";
try {
Get get=new Get(rowKey.getBytes());
// get.addFamily(family.getBytes());
Result res =hTable.get(get);
if(res!=null&&res.getRow()!=null){
result=res.getValue(family.getBytes(), name.getBytes()).toString();
}
} catch (IOException e) {
e.printStackTrace();
}
return result;
}
public static void clearHbase() {
try {
if(hTable!=null){
hTable.close();
}
} catch (IOException e) {
// TODO Auto-generated catch block
LOGGER.error("clearHbase errmsg:{}",
e.toString());
}finally{
hTable=null;
}
}
/**
*
* @param hash rowkey
* @return
*/
public static byte[] HashRowKey(String rowkey) {
byte[] bytes = Bytes.toBytes(rowkey);
String hashPrefix = MD5Hash.getMD5AsHex(bytes).substring(0, 4);
// System.out.println(hashPrefix);
byte[] bytes2 = Bytes.toBytes(hashPrefix);
//rowkey取md5(userid)的前四位+userid.前四位用来散列userid,避免写入热点。缺点,不支持顺序scan userId.
// System.out.println(Bytes.add(bytes2, bytes));
return Bytes.add(bytes2, bytes);
}
}
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.cslo.lsmp.hadoop.user.commom.CounterMap.Counter;
public class HbaseUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(HbaseUtil.class);
static HashMap<String, CounterMap> rowKeyCounterMap = new HashMap<String, CounterMap>();
static Configuration hConfig;
static HTable hTable;
static {
hConfig = HBaseConfiguration.create();
//本地
hConfig.set("hbase.zookeeper.quorum", "cdh1,cdh2,cdh3");
}
/**
* 创建表 (需要删除原有数据时使用)
* @param tableName
*/
public static void createTable(String tableName,String[] familys) {
try {
HBaseAdmin hBaseAdmin = new HBaseAdmin(hConfig);
if (hBaseAdmin.tableExists(tableName)) {// 如果存在要创建的表,那么先删除,再创建
hBaseAdmin.disableTable(tableName);
hBaseAdmin.deleteTable(tableName);
System.out.println(tableName + " is exist,detele....");
}
HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
if(familys!=null&&familys.length>0){
for(String family:familys){
tableDescriptor.addFamily(new HColumnDescriptor(family));
}
}
hBaseAdmin.createTable(tableDescriptor);
} catch (Exception e) {
LOGGER.error("HBase获取createTable errmsg:{}",
e.toString());
}
}
/**
* @param tableName 表名
*/
public static void getTable(String tableName) {
try {
hTable = new HTable(hConfig, tableName);
hTable.setWriteBufferSize(6 * 1024 * 1024); //4MB
hTable.setAutoFlush(false);
} catch (IOException e) {
LOGGER.error("HBase获取getTable errmsg:{}",
e.toString());
}
}
public static void incerment(String rowKey, String key, Object value) {
CounterMap counterMap = rowKeyCounterMap.get(rowKey);
if (counterMap == null) {
counterMap = new CounterMap();
// rowKey=Md5Util.generateRowkey(rowKey);
rowKeyCounterMap.put(rowKey, counterMap);
}
counterMap.increment(key, value);
}
/**
*
* @param tablelie 列族名
*/
public static Boolean flushToHBase(String tablelie) {
Boolean bl=true;
List<Put> puts = new ArrayList<Put>();
Long writebufsize=0l;
try {
for (Entry<String, CounterMap> entry : rowKeyCounterMap.entrySet()) {
CounterMap pastCounterMap = entry.getValue();
byte[] rowkey=HashRowKey(entry.getKey());
Put put = new Put(rowkey);
for (Entry<String, Counter> entry2 : pastCounterMap.entrySet()) {
String key = entry2.getKey();
Counter value = entry2.getValue();
String values = "";
if (value != null) {
Object valObj = value.value;
if (valObj != null) {
values = valObj.toString();
}
}
put.addColumn(Bytes.toBytes(tablelie), Bytes.toBytes(key),
Bytes.toBytes(values));
writebufsize+=put.heapSize();
}
put.setDurability(Durability.SKIP_WAL);
// put.setWriteToWAL(false);
puts.add(put);
//Put数据超过的时候提交
if(writebufsize>6 * 1024 * 1024){
hTable.put(puts);
hTable.flushCommits();
LOGGER.info("插入到资讯表中hTable"+"_count___writebufsize--"+writebufsize+"puts--"+puts.size());
puts.clear();
writebufsize=0l;
}
}
hTable.put(puts);
hTable.flushCommits();
rowKeyCounterMap.clear();
// LOGGER.info("插入到资讯表中hTable"+"_count___"+n +"writebufsize--"+writebufsize+"puts--"+puts.size());
// }
} catch (IOException e) {
bl=false;
LOGGER.error("HBase获取flushToHBase errmsg:{}",
e.toString());
}
return bl;
}
/**
* 查询一条记录
* @param rowKey
* @return
* @throws IOException
*/
public static String getFromHBase(String rowKey,String family,String name) {
String result="";
try {
Get get=new Get(rowKey.getBytes());
// get.addFamily(family.getBytes());
Result res =hTable.get(get);
if(res!=null&&res.getRow()!=null){
result=res.getValue(family.getBytes(), name.getBytes()).toString();
}
} catch (IOException e) {
e.printStackTrace();
}
return result;
}
public static void clearHbase() {
try {
if(hTable!=null){
hTable.close();
}
} catch (IOException e) {
// TODO Auto-generated catch block
LOGGER.error("clearHbase errmsg:{}",
e.toString());
}finally{
hTable=null;
}
}
/**
*
* @param hash rowkey
* @return
*/
public static byte[] HashRowKey(String rowkey) {
byte[] bytes = Bytes.toBytes(rowkey);
String hashPrefix = MD5Hash.getMD5AsHex(bytes).substring(0, 4);
// System.out.println(hashPrefix);
byte[] bytes2 = Bytes.toBytes(hashPrefix);
//rowkey取md5(userid)的前四位+userid.前四位用来散列userid,避免写入热点。缺点,不支持顺序scan userId.
// System.out.println(Bytes.add(bytes2, bytes));
return Bytes.add(bytes2, bytes);
}
}