java 访问HBase 例子
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Put;
public class TestHBase {
static String hbase_xml = "***/conf/hbase-site.xml"; //配置文件地址
static HBaseConfiguration cfg =null;
static {
Configuration conf = new Configuration();
//conf.addResource(new Path(hbase_xml));
conf.set("hbase.zookeeper.quorum", "******");//HBase 服务器地址
conf.set("hbase.zookeeper.property.clientPort", "*****");//端口
cfg = new HBaseConfiguration(conf);
}
public static void createTable(String tableName,String[] columnFarilys) throws Exception
{
HBaseAdmin admin = new HBaseAdmin(cfg);
if(admin.tableExists(tableName))
{
System.out.println(tableName+"不存在!");
System.exit(0);
}
else
{
HTableDescriptor tableDesc = new HTableDescriptor(tableName);
for(String columnFarily:columnFarilys)
{
tableDesc.addFamily(new HColumnDescriptor(columnFarily));
}
admin.createTable(tableDesc);
System.out.println("succcess to add table" + tableName);
}
}
public static void addData(String tableName,String row,String columnFamily,String column,String data) throws Exception
{
HTable table = new HTable(cfg,tableName);
Put putrow = new Put(row.getBytes());
putrow.add(columnFamily.getBytes(), column.getBytes(), data.getBytes());
table.put(putrow);
System.out.println("success to add data for table " + tableName + "columnFamily " + columnFamily);
}
public static void getAllData(String tableName) throws Exception
{
HTable table = new HTable(cfg,tableName);
Scan scan = new Scan();
ResultScanner rs = table.getScanner(scan);
for(Result r:rs)
{
for(KeyValue kv:r.raw())
{
System.out.println(new String(kv.getRow())+new String(kv.getValue()));
}
}
}
public static void main(String[] args)
{
try
{
String tableName = "xiaowang";
String[] familynames = {"place","school"};
TestHBase.createTable(tableName, familynames);
TestHBase.addData(tableName, "1", "place", "province", "ShanXi");
TestHBase.getAllData(tableName);
TestHBase.addData(tableName, "2", "place", "province", "Beijing");
TestHBase.getAllData(tableName);
TestHBase.addData(tableName, "1", "school", "high", "MIT");
TestHBase.getAllData(tableName);
}
catch(Exception e)
{
e.printStackTrace();
}
}
}
http://doudouclever.blog.163.com/blog/static/17511231020121013836168/
---------------------------------------------------------------------------------------------------------------------------------------------
<h3 style="margin: 0px; padding: 0px; font-size: 20px; font-family: 黑体; text-align: center; background-color: rgb(248, 248, 248);">java调用HBase api创建表,插入数据</h3>
今天看说学习了下通过java 调用HBase api 实习表的创建和数据插入的操作,贴下代码:
class="java">package com.lyq.study.api;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import com.lyq.study.util.HBaseConfigUtils;
public class HBase_API {
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
String tableName = "hbasetest";
String familyName = "info";
Configuration conf = HBaseConfigUtils.getHBaseConfig(1);
//创建表
HBaseAdmin admin = new HBaseAdmin(conf);
HTableDescriptor tableDesc = new HTableDescriptor(tableName);
tableDesc.addFamily(new HColumnDescriptor(familyName));
admin.createTable(tableDesc);
//插入表数据
HTable table = new HTable(conf, tableName);
//单条插入
//row1为rowkey
Put putRow1 = new Put("row1".getBytes());
putRow1.add(familyName.getBytes(), "name".getBytes(), "zhangsan".getBytes());
putRow1.add(familyName.getBytes(), "age".getBytes(), "24".getBytes());
putRow1.add(familyName.getBytes(), "city".getBytes(), "chengde".getBytes());
putRow1.add(familyName.getBytes(), "sex".getBytes(), "male".getBytes());
table.put(putRow1);
//多条插入
List<Put> list = new ArrayList<Put>();
Put p = null;
p = new Put("rowkey1".getBytes());
p.add(familyName.getBytes(), "name".getBytes(), "wangwu".getBytes());
p.add(familyName.getBytes(), "sex".getBytes(), "male".getBytes());
p.add(familyName.getBytes(), "city".getBytes(), "beijing".getBytes());
p.add(familyName.getBytes(), "age".getBytes(), "25".getBytes());
list.add(p);
p = new Put("rowkey2".getBytes());
p.add(familyName.getBytes(), "name".getBytes(), "zhangliu".getBytes());
p.add(familyName.getBytes(), "sex".getBytes(), "male".getBytes());
p.add(familyName.getBytes(), "city".getBytes(), "handan".getBytes());
p.add(familyName.getBytes(), "age".getBytes(), "28".getBytes());
list.add(p);
p = new Put("rowkey3".getBytes());
p.add(familyName.getBytes(), "name".getBytes(), "liqing".getBytes());
p.add(familyName.getBytes(), "sex".getBytes(), "female".getBytes());
p.add(familyName.getBytes(), "city".getBytes(), "guangzhou".getBytes());
p.add(familyName.getBytes(), "age".getBytes(), "18".getBytes());
list.add(p);
table.put(list);
}
}
?
创建表之接调用了HBaseAdmin对象的create(HTableDescriptor desc)方法,
插入数据分别掉用了HTable对象的put(Put put)方法,和put(List<Put> list)方法:
其中put(Put put)方法实习了数据的单条插入,
put(List<Put> list)方法实现了数据的多条批量插入。
Configuration conf = HBaseConfigUtils.getHBaseConfig(1);
这句代码中的HBaseConfigUtils.getHBaseConfig(int flag)方法代码如下:
package com.lyq.study.util;
import org.apache.hadoop.conf.Configuration;
public class HBaseConfigUtils {
/**
* 获取HBaseConfiguration
* @param flag
* 集群标识:0,单机;1集群
* @return
*/
public static Configuration getHBaseConfig(int flag){
Configuration conf = new Configuration();
if(flag > 0){
//集群
conf.set("fs.defaultFS", "hdfs://master129:9000/");
conf.set("mapreduce.framework.name", "local");
conf.set("mapred.job.tracker", "master129:9001");
conf.set("hbase.zookeeper.quorum", "master129,slave130,slave131,slave132");
}else{
//单机
conf.set("fs.defaultFS", "hdfs://ubuntu:9000/");
conf.set("mapreduce.framework.name", "local");
conf.set("mapred.job.tracker", "ubuntu:9001");
conf.set("hbase.zookeeper.quorum", "ubuntu");
}
return conf;
}
}
?
编辑文件如下列代码所示。注意项有2:
1.其中首先需要注意hdfs://dm4:9000/hbase这里,必须与你的Hadoop集群的core-site.xml文件配置保持完全一致才行,如果你Hadoop的hdfs使用了其它端口,请在这里也修改。再者就是Hbase该项并不识别机器IP,只能使用机器hostname才可行,即若使用dm4的IP(192.168.0.10)是会抛出java错误,至于具体的错误由于时间久远,我就懒得去翻查那大量的log了。
2.hbase.zookeeper.quorum 的个数必须是奇数。
org.apache.hadoop.hbase.ZooKeeperConnectionException:
这个是你没连上zookeeper造成的。我给你一个连接代码看看吧,要保证机器IP是对的。Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", ip); conf.set("hbase.zookeeper.property.clientPort", port); conf.set("hbase.master", master);然后再用HTable table = new HTable(conf,"表名");来获得 HTable句柄。如果还是不行,要在zookeeper机器上用netstat -pan|grep 2181看看这个端口上的zookeeper是不是正常
hbase-site.xml
http://www.cnblogs.com/zhenjing/p/hbase_example.html
http://www.linuxidc.com/Linux/2012-07/65667.htm
http://www.linuxidc.com/Linux/2012-07/65667.htm
http://blog.csdn.net/kky2010_110/article/details/7865550
http://www.cnblogs.com/panfeng412/archive/2011/08/14/2137984.html
http://www.cnblogs.com/zhenjing/p/hbase_example.html
http://blog.csdn.net/franklysun/article/details/6443027
http://blog.sina.com.cn/s/blog_6aa9c7380102uwkv.html
http://bbs.csdn.net/topics/390033213
http://www.verydemo.com/demo_c447_i9472.html
http://bbs.csdn.net/topics/390033213
http://bbs.csdn.net/topics/390033213
http://biancheng.dnbcw.info/sql/464391.html
http://blog.csdn.net/renren000/article/details/6662595
http://blog.csdn.net/feixiangcq/article/details/5495027
http://www.cxyclub.cn/n/54759/
http://doudouclever.blog.163.com/blog/static/17511231020121013836168/