Windows7+Eclipse环境下Hbase Java客户端的开发

本文展示如何在Windows的环境下通过Eclipse构建Hbase的客户端开发
  1. 构建Hbase集群,请参考:Centos 下Hbase0.98.10-hadoop2 集群的配置
  2. 在Eclipse中创建Maven的工程
  3. 将集群的hbase-site.xml文件放到工程的classes目录下

  4. 配置操作系统的C:\windows\system32\drivers\etc文件,将Hbase集群的IP以及域名配置到该文件中
    192.168.40.108   hadoop108
    192.168.40.148   hadoop148 
    192.168.40.104   hadoop104 
    192.168.40.107   hadoop107 
    192.168.40.105   hadoop105
  5. 编写Maven的pom.xml文件,依赖内容如下
    	<dependencies>
    
    		<dependency>
    			<groupId>org.apache.avro</groupId>
    			<artifactId>avro</artifactId>
    			<version>1.7.7</version>
    		</dependency>
    
    		<dependency>
    			<groupId>org.apache.avro</groupId>
    			<artifactId>avro-tools</artifactId>
    			<version>1.7.7</version>
    		</dependency>
    
    		<dependency>
    			<groupId>org.apache.avro</groupId>
    			<artifactId>avro-maven-plugin</artifactId>
    			<version>1.7.7</version>
    		</dependency>
    		<dependency>
    			<groupId>org.apache.avro</groupId>
    			<artifactId>avro-compiler</artifactId>
    			<version>1.7.7</version>
    		</dependency>
    
    		<dependency>
    			<groupId>org.apache.hbase</groupId>
    			<artifactId>hbase-client</artifactId>
    			<version>0.98.8-hadoop1</version>
    		</dependency>
    
    		<dependency>
    			<groupId>org.apache.hbase</groupId>
    			<artifactId>hbase</artifactId>
    			<version>0.90.2</version>
    		</dependency>
    		<dependency>
    			<groupId>org.apache.hadoop</groupId>
    			<artifactId>hadoop-core</artifactId>
    			<version>1.2.1</version>
    		</dependency>
    
    		<dependency>
    			<groupId>junit</groupId>
    			<artifactId>junit</artifactId>
    			<version>3.8.1</version>
    			<scope>test</scope>
    		</dependency>
    	</dependencies>


  6. 编辑Java源码
    package com.eric.hbase;
    
    import java.io.IOException;
    import java.util.ArrayList;
    import java.util.List;
    
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.hbase.HBaseConfiguration;
    import org.apache.hadoop.hbase.HColumnDescriptor;
    import org.apache.hadoop.hbase.HTableDescriptor;
    import org.apache.hadoop.hbase.KeyValue;
    import org.apache.hadoop.hbase.MasterNotRunningException;
    import org.apache.hadoop.hbase.ZooKeeperConnectionException;
    import org.apache.hadoop.hbase.client.Delete;
    import org.apache.hadoop.hbase.client.Get;
    import org.apache.hadoop.hbase.client.HBaseAdmin;
    import org.apache.hadoop.hbase.client.HTable;
    import org.apache.hadoop.hbase.client.Put;
    import org.apache.hadoop.hbase.client.Result;
    import org.apache.hadoop.hbase.client.ResultScanner;
    import org.apache.hadoop.hbase.client.Scan;
    import org.apache.hadoop.hbase.util.Bytes;
    
    public class BaseOperation {
    
    	private static final String TABLE_NAME = "demo_table";
    
    	public static Configuration conf = null;
    	public HTable table = null;
    	public HBaseAdmin admin = null;
    
    	static {
    		conf = HBaseConfiguration.create();
    		System.out.println(conf.get("hbase.zookeeper.quorum"));
    	}
    
    	/**
    	 * 创建一张表
    	 */
    	public static void creatTable(String tableName, String[] familys)
    			throws Exception {
    		HBaseAdmin admin = new HBaseAdmin(conf);
    		if (admin.tableExists(tableName)) {
    			System.out.println("table already exists!");
    		} else {
    			HTableDescriptor tableDesc = new HTableDescriptor(tableName);
    			for (int i = 0; i < familys.length; i++) {
    				tableDesc.addFamily(new HColumnDescriptor(familys[i]));
    			}
    			admin.createTable(tableDesc);
    			System.out.println("create table " + tableName + " ok.");
    		}
    	}
    
    	/**
    	 * 删除表
    	 */
    	public static void deleteTable(String tableName) throws Exception {
    		try {
    			HBaseAdmin admin = new HBaseAdmin(conf);
    			admin.disableTable(tableName);
    			admin.deleteTable(tableName);
    			System.out.println("delete table " + tableName + " ok.");
    		} catch (MasterNotRunningException e) {
    			e.printStackTrace();
    		} catch (ZooKeeperConnectionException e) {
    			e.printStackTrace();
    		}
    	}
    
    	/**
    	 * 插入一行记录
    	 */
    	public static void addRecord(String tableName, String rowKey,
    			String family, String qualifier, String value) throws Exception {
    		try {
    			HTable table = new HTable(conf, tableName);
    			Put put = new Put(Bytes.toBytes(rowKey));
    			put.add(Bytes.toBytes(family), Bytes.toBytes(qualifier),
    					Bytes.toBytes(value));
    			table.put(put);
    			System.out.println("insert recored " + rowKey + " to table "
    					+ tableName + " ok.");
    		} catch (IOException e) {
    			e.printStackTrace();
    		}
    	}
    
    	/**
    	 * 删除一行记录
    	 */
    	public static void delRecord(String tableName, String rowKey)
    			throws IOException {
    		HTable table = new HTable(conf, tableName);
    		List list = new ArrayList();
    		Delete del = new Delete(rowKey.getBytes());
    		list.add(del);
    		table.delete(list);
    		System.out.println("del recored " + rowKey + " ok.");
    	}
    
    	/**
    	 * 查找一行记录
    	 */
    	public static void getOneRecord(String tableName, String rowKey)
    			throws IOException {
    		HTable table = new HTable(conf, tableName);
    		Get get = new Get(rowKey.getBytes());
    		Result rs = table.get(get);
    		for (KeyValue kv : rs.raw()) {
    			System.out.print(new String(kv.getRow()) + " ");
    			System.out.print(new String(kv.getFamily()) + ":");
    			System.out.print(new String(kv.getQualifier()) + " ");
    			System.out.print(kv.getTimestamp() + " ");
    			System.out.println(new String(kv.getValue()));
    		}
    	}
    
    	/**
    	 * 显示所有数据
    	 */
    	public static void getAllRecord(String tableName) {
    		try {
    			HTable table = new HTable(conf, tableName);
    			Scan s = new Scan();
    			ResultScanner ss = table.getScanner(s);
    			for (Result r : ss) {
    				for (KeyValue kv : r.raw()) {
    					System.out.print(new String(kv.getRow()) + " ");
    					System.out.print(new String(kv.getFamily()) + ":");
    					System.out.print(new String(kv.getQualifier()) + " ");
    					System.out.print(kv.getTimestamp() + " ");
    					System.out.println(new String(kv.getValue()));
    				}
    			}
    		} catch (IOException e) {
    			e.printStackTrace();
    		}
    	}
    
    	public static void main(String[] agrs) {
    		try {
    			String tablename = "scores";
    			String[] familys = { "grade", "course" };
    			BaseOperation.creatTable(tablename, familys);
    
    			// add record zkb
    			BaseOperation.addRecord(tablename, "zkb", "grade", "", "5");
    			BaseOperation.addRecord(tablename, "zkb", "course", "", "90");
    			BaseOperation.addRecord(tablename, "zkb", "course", "math", "97");
    			BaseOperation.addRecord(tablename, "zkb", "course", "art", "87");
    			// add record baoniu
    			BaseOperation.addRecord(tablename, "baoniu", "grade", "", "4");
    			BaseOperation
    					.addRecord(tablename, "baoniu", "course", "math", "89");
    
    			System.out.println("===========get one record========");
    			BaseOperation.getOneRecord(tablename, "zkb");
    
    			System.out.println("===========show all record========");
    			BaseOperation.getAllRecord(tablename);
    
    			System.out.println("===========del one record========");
    			BaseOperation.delRecord(tablename, "baoniu");
    			BaseOperation.getAllRecord(tablename);
    
    			System.out.println("===========show all record========");
    			BaseOperation.getAllRecord(tablename);
    		} catch (Exception e) {
    			e.printStackTrace();
    		}
    	}
    
    }
    
  7. 运行程序,输出如下:
    hadoop107,hadoop108,hadoop104
    log4j:WARN No appenders could be found for logger (org.apache.hadoop.metrics2.lib.MutableMetricsFactory).
    log4j:WARN Please initialize the log4j system properly.
    log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
    table already exists!
    insert recored zkb to table scores ok.
    insert recored zkb to table scores ok.
    insert recored zkb to table scores ok.
    insert recored zkb to table scores ok.
    insert recored baoniu to table scores ok.
    insert recored baoniu to table scores ok.
    ===========get one record========
    zkb course: 1425258910718 90
    zkb course:art 1425258910727 87
    zkb course:math 1425258910722 97
    zkb grade: 1425258910705 5
    ===========show all record========
    baoniu course:math 1425258910734 89
    baoniu grade: 1425258910730 4
    zkb course: 1425258910718 90
    zkb course:art 1425258910727 87
    zkb course:math 1425258910722 97
    zkb grade: 1425258910705 5
    ===========del one record========
    del recored baoniu ok.
    zkb course: 1425258910718 90
    zkb course:art 1425258910727 87
    zkb course:math 1425258910722 97
    zkb grade: 1425258910705 5
    ===========show all record========
    zkb course: 1425258910718 90
    zkb course:art 1425258910727 87
    zkb course:math 1425258910722 97
    zkb grade: 1425258910705 5
    


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值