[Hadoop家族] Spring整合Hbase

1、需要加入的JAR包


2、配置文件spring-hbase.xml

<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:hdp="http://www.springframework.org/schema/hadoop"
	xmlns:beans="http://www.springframework.org/schema/beans"
	xsi:schemaLocation="  
    http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd  
    http://www.springframework.org/schema/hadoop http://www.springframework.org/schema/hadoop/spring-hadoop.xsd">
	<!-- 配置zookeeper的信息,远程连接hbase时使用 -->
	<hdp:configuration resources="classpath:/config/hbase/hbase-site.xml" />
	<hdp:hbase-configuration configuration-ref="hadoopConfiguration" />
	<!-- 配置HbaseTemplate -->
	<bean id="hbaseTemplate" class="org.springframework.data.hadoop.hbase.HbaseTemplate">
		<property name="configuration" ref="hbaseConfiguration"></property>
		<property name="encoding" value="UTF-8"></property>
	</bean>
</beans>  


这里使用了hbase-site.xml的配置方式,可以直接把linux部署的hbase-site.xml拷过来就行

3、hbase-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
	<property>
		<name>hbase.rootdir</name>
		<value>hdfs://hadoop-master:9000/hbase20</value>
		<description>The directory shared byregion servers.</description>
	</property>
	<property>
		<name>hbase.zookeeper.property.clientPort</name>
		<value>2181</value>
		<description>Property from ZooKeeper'sconfig zoo.cfg. The port at
			which the clients will connect.
		</description>
	</property>
	<property>
		<name>zookeeper.session.timeout</name>
		<value>500000</value>
	</property>
	<property>
		<name>hbase.regionserver.restart.on.zk.expire</name>
		<value>true</value>
	</property>
	<property>
		<name>hbase.zookeeper.quorum</name>
		<value>hadoop-master,hadoop-slave1,hadoop-slave2</value>
	</property>
	<property>
		<name>hbase.tmp.dir</name>
		<value>/data/hbase/tmp</value>
	</property>
	<property>
		<name>hbase.cluster.distributed</name>
		<value>true</value>
	</property>
	<property>
		<name>hbase.balancer.period</name>
		<value>30000</value>
	</property>
	<property>
		<name>hbase.regionserver.lease.period</name>
		<value>500000</value>
	</property>
	<property>
		<name>hbase.rpc.timeout</name>
		<value>500000</value>
	</property>
	<property>
		<name>hbase.regionserver.lease.period</name>
		<value>180000</value>
	</property>
	<property>
		<name>hfile.block.cache.size</name>
		<value>0.4</value>
	</property>
</configuration>

其中的

hbase.rootdir   

hbase.zookeeper.property.clientPort   

hbase.zookeeper.quorum   

是必配的几个配置,其他的是hbase优化配置,可以后期调整


4、spring整合后访问的示例代码

package com.fhzz.business.controller.hbase;

import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.quartz.SchedulerException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.hadoop.hbase.HbaseTemplate;
import org.springframework.data.hadoop.hbase.RowMapper;
import org.springframework.stereotype.Controller;
import org.springframework.ui.ModelMap;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseBody;

import com.fhzz.business.controller.db.DatabaseDemoAction;
import com.fhzz.core.controller.BaseAction;

/**
 * @author YangYi
 * 
 */
@Controller
public class HBaseDemoAction extends BaseAction {
	Log logger = LogFactory.getLog(DatabaseDemoAction.class);

	@Autowired
	private HbaseTemplate hbaseTemplate;

	@RequestMapping("toHbaseDemo")
	public String hbaseTest(HttpServletRequest request, HttpServletResponse response, ModelMap model) {
		System.setProperty("hadoop.home.dir", "D:\\hadoop-2.8.3"); // 必备条件之一
//		find("DEMO_TABLE3","49217_20161013000000","49217_20181013000000");
		return "hbase/hbaseDemo";
	}

	@RequestMapping("scanWithStartAndStop")
	@ResponseBody
	public Map<String, Object> scanWithStartAndStop(@RequestParam("startRow") String startRow,
			@RequestParam("stopRow") String stopRow) throws IOException, SchedulerException {
		Map<String, Object> result = new HashMap<String, Object>();
		result.put("success", true);
		List<Map<String, Object>> list = scan( "DEMO_TABLE3", startRow, stopRow) ;
		result.put("total", list.size());
		result.put("rows", list);
		return result;
	}
	
	/**
	 * 通过表名,开始行键和结束行键获取数据
	 * 
	 * @param tableName
	 * @param startRow
	 * @param stopRow
	 * @return
	 */
	public List<Map<String, Object>> scan(String tableName, String startRow,
			String stopRow) {
		Scan scan = new Scan();
		if (startRow == null) {
			startRow = "";
		}
		if (stopRow == null) {
			stopRow = "";
		}
		scan.setStartRow(Bytes.toBytes(startRow));
		scan.setStopRow(Bytes.toBytes(stopRow));
		/*
		 * PageFilter filter = new PageFilter(5); scan.setFilter(filter);
		 */
		return hbaseTemplate.find(tableName, scan,
				new RowMapper<Map<String, Object>>() {
					public Map<String, Object> mapRow(Result result, int rowNum)
							throws Exception {

						List<Cell> ceList = result.listCells();
						Map<String, Object> map = new HashMap<String, Object>();
						String row = "";
						if (ceList != null && ceList.size() > 0) {
							for (Cell cell : ceList) {
								row = Bytes.toString(cell.getRowArray(),
										cell.getRowOffset(),
										cell.getRowLength());
								String value = Bytes.toString(
										cell.getValueArray(),
										cell.getValueOffset(),
										cell.getValueLength());
								String family = Bytes.toString(
										cell.getFamilyArray(),
										cell.getFamilyOffset(),
										cell.getFamilyLength());
								String quali = Bytes.toString(
										cell.getQualifierArray(),
										cell.getQualifierOffset(),
										cell.getQualifierLength());
								map.put(family + "_" + quali, value);
							}
							map.put("row", row);
						}
						System.out.println(map);
						return map;
					}
				});
	}
}

5、这里注意由于使用了windows的开发环境,需要设置下

System.setProperty("hadoop.home.dir", "D:\\hadoop-2.8.3"); // 必备条件之一

此处的hadoop包里包含了一个名为  winutils.exe  的文件

如果没有的话,可以github下载

https://github.com/srccodes/hadoop-common-2.2.0-bin

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值