mapper 函数:
static class AnalyzMapper extends TableMapper<Text, Writable>{
private static IntWritable ONE = new IntWritable(1);
private JSONParser parser=new JSONParser();
@Override
protected void map(ImmutableBytesWritable row, Result columns, Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
String value=null;
for(Cell ce: columns.listCells()){ //这里不使用 kv,kv浪费资源
CellUtil cu= new CellUtil();
cu.cloneQualifier(ce);
// value=Bytes.toStringBinary(cu.cloneQualifier(ce)); //得到的是列族中所有列的名称
value=Bytes.toStringBinary(cu.cloneQualifier(ce));
String value2=Bytes.toStringBinary(cu.cloneValue(ce));
// String[] key=value.split(", ");
try {
// JSONObject json=(JSONObject) parser.parse(value);
// String author= (String) json.get(json);
context.write(new Text(value+value2), ONE);
// context.write(new Text(key[0]), ONE);
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
}
由上边的代码可知: Result columns 得到的是整个列族下的所有列,我们可以再main函数中的 scan 设置我们需要的列族和列
reducer 函数:
static class AnalyzReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
@Override
protected void reduce(Text key, Iterable<IntWritable> value,Context context) throws IOException, InterruptedException {
// TODO Auto-generated method stub
int count=0;
for (IntWritable one : value){
count++;
}
context.write(key, new IntWritable(count));
}
}
main 函数:
public static void main(String[] args) throws Exception {
// TODO Auto-generated method stub
System.out.print("ad");
Configuration conf = HBaseConfiguration.create();
conf.set("conf.column", "data");
//配置 conf
conf.set("hbase.rootdir","hdfs://master:9000/hbase");
conf.set("hbase.master", "219.217.203.1:60000");
//使用eclipse时必须添加这个,否则无法定位
conf.set("hbase.zookeeper.property.clientPort", "2181");
conf.set("hbase.zookeeper.quorum", "219.217.203.24,219.217.203.25,219.217.203.26");
Scan scan=new Scan();
//scan.addColumn(Bytes.toBytes("data"), Bytes.toBytes("qual"));
Job job=Job.getInstance(conf);
job.setJarByClass(AnalyzFromHBase.class);
//设置mapper函数,使用提供的库
TableMapReduceUtil.initTableMapperJob("hbase-book", scan, AnalyzMapper.class, Text.class, IntWritable.class, job);
//设置reduce函数,常规设置
job.setReducerClass(AnalyzReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setNumReduceTasks(1);
String outpath="hdfs://master:9000/tim/hbase-book/json-test-out";
FileOutputFormat.setOutputPath(job, new Path(outpath));
System.exit(job.waitForCompletion(true) ? 0 :1);
//
}