关闭

通过mapreduce操作Hbase

280人阅读 评论(0) 收藏 举报
分类:
package man.ludq.hbase;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;

public class ExampleTotalMapReduce{
	public static void main(String[] args) {
		try{
			Configuration config = HBaseConfiguration.create();
			Job job = new Job(config,"ExampleSummary");
			job.setJarByClass(ExampleTotalMapReduce.class);     // class that contains mapper and reducer

			Scan scan = new Scan();
			scan.setCaching(500);        // 1 is the default in Scan, which will be bad for MapReduce jobs
			scan.setCacheBlocks(false);  // don't set to true for MR jobs
			// set other scan attrs
			//scan.addColumn(family, qualifier);
			TableMapReduceUtil.initTableMapperJob(
					"access-log",        // input table
					scan,               // Scan instance to control CF and attribute selection
					MyMapper.class,     // mapper class
					Text.class,         // mapper output key
					IntWritable.class,  // mapper output value
					job);
			TableMapReduceUtil.initTableReducerJob(
					"total-access",        // output table
					MyTableReducer.class,    // reducer class
					job);
			job.setNumReduceTasks(1);   // at least one, adjust as required

			boolean b = job.waitForCompletion(true);
			if (!b) {
				throw new IOException("error with job!");
			} 
		} catch(Exception e){
			e.printStackTrace();
		}
	}

	public static class MyMapper extends TableMapper<Text, IntWritable>  {

		private final IntWritable ONE = new IntWritable(1);
		private Text text = new Text();

		public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException, InterruptedException {
			String ip = Bytes.toString(row.get()).split("-")[0];
			String url = new String(value.getValue(Bytes.toBytes("info"), Bytes.toBytes("url")));
			text.set(ip+"&"+url);
			context.write(text, ONE);
		}
	}

	public static class MyTableReducer extends TableReducer<Text, IntWritable, ImmutableBytesWritable>  {
		public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
			int sum = 0;
			for (IntWritable val : values) {
				sum += val.get();
			}

			Put put = new Put(key.getBytes());
			put.add(Bytes.toBytes("info"), Bytes.toBytes("count"), Bytes.toBytes(String.valueOf(sum)));

			context.write(null, put);
		}
	}
}

将上面代码打包后 放在hadoop上通过执行前需要导入Hbase的包:

将hbase/lib下的部分包拷贝到hadoop 节点上,比如 /usr/local/hbaselib,因为我用的是hbase-0.98.14,为了避免和hadoop的包重复,我导入以下jar:

hbase-*.jar
high-scale-lib-1.1.1.jar
htrace-core-2.04.jar
metrics-core-2.2.0.jar

然后在 hadooop/etc/hadoop/hadoop-env.sh中 增加如下:

for f in /usr/local/hbaselib/*.jar; do
  if [ "$HADOOP_CLASSPATH" ]; then
    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
  else
    export HADOOP_CLASSPATH=$f
  fi
done

然后在hadoop上跑mapreduce运行成功。

0
0

猜你在找
【直播】机器学习&数据挖掘7周实训--韦玮
【套餐】系统集成项目管理工程师顺利通关--徐朋
【直播】3小时掌握Docker最佳实战-徐西宁
【套餐】机器学习系列套餐(算法+实战)--唐宇迪
【直播】计算机视觉原理及实战--屈教授
【套餐】微信订阅号+服务号Java版 v2.0--翟东平
【直播】机器学习之矩阵--黄博士
【套餐】微信订阅号+服务号Java版 v2.0--翟东平
【直播】机器学习之凸优化--马博士
【套餐】Javascript 设计模式实战--曾亮
查看评论
* 以上用户言论只代表其个人观点,不代表CSDN网站的观点或立场
    个人资料
    • 访问:53310次
    • 积分:1153
    • 等级:
    • 排名:千里之外
    • 原创:64篇
    • 转载:7篇
    • 译文:0篇
    • 评论:3条
    文章分类
    最新评论