通过hadoop实现单词的统计,并将统计结果保存到Hbase以及错误排解:java.lang.ClassNotFoundException: org.apache.hadoop.io.compress.SnappyCodec
设计思想:通过MR框架来统计给定文件的单词数目,然后把统计结果保存到hbase中:
程序如下:
import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
//import org.apache.hadoop.io.compress.SnappyCodec;
public class WordCountHBase {
// 实现 Map 类
public static class Map extends
Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
// 实现 Reduce 类
public static class Reduce extends
TableReducer<Text, IntWritable, NullWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
/* Iterator<IntWritable> iterator = values.iterator();
while (iterator.hasNext()) {
sum += iterator.next().get();
}
*/
for (IntWritable val : values) {
sum += val.get();
}
// Put 实例化,每个词存一行
Put put = new Put(Bytes.toBytes(key.toString()));
// 列族为 content,列修饰符为 count,列值为数目
put.add(Bytes.toBytes("content"), Bytes.toBytes("count"),
Bytes.toBytes(String.valueOf(sum)));
context.write(NullWritable.get(), put);
}
}
// 创建 HBase 数据表
public static void createHBaseTable(String tableName)
throws IOException {
// 创建表描述
HTableDescriptor htd = new HTableDescriptor(tableName);
// 创建列族描述
HColumnDescriptor col = new HColumnDescriptor("content");
htd.addFamily(col);
// 配置 HBase
Configuration conf = HBaseConfiguration.create();
//配置 从机
conf.set("hbase.zookeeper.quorum", "172.16.2.34,172.16.2.54,172.16.2.57");
conf.set("hbase.zookeeper.property.clientPort", "2181");
//配置主机
conf.set("hbase.master", "172.16.2.42:60000");
HBaseAdmin hAdmin = new HBaseAdmin(conf);
if (hAdmin.tableExists(tableName)) {
System.out.println("该数据表已经存在,正在重新创建。");
hAdmin.disableTable(tableName);
hAdmin.deleteTable(tableName);
}
System.out.println("创建表:" + tableName);
hAdmin.createTable(htd);
}
public static void main(String[] args) throws Exception {
String tableName = "wordcount";
// one step:创建数据库表
WordCountHBase.createHBaseTable(tableName);
// two step:进行 MapReduce 处理
// 配置 hbase
Configuration conf = new Configuration();
conf.set("hbase.zookeeper.quorum", "172.16.2.34,172.16.2.54,172.16.2.57");
conf.set("hbase.zookeeper.property.clientPort", "2181");
conf.set("hbase.master", "172.16.2.42:60000");
//配置hadoop
conf.set("fs.default.name", "hdfs://172.16.2.42:9000/");
conf.set("hadoop.job.user", "hadoop");
conf.set("mapred.job.tracker", "172.16.2.42:9001");
//eclipse提交到hadoop上 需要打包 理论上是不需要这样的,但是本人一直没弄出来
conf.set( "mapred.jar","WordCountHBase.jar");
conf.set(TableOutputFormat.OUTPUT_TABLE, tableName);
Job job = new Job(conf, "New Word Count");
job.setJarByClass(WordCountHBase.class);
// 设置 Map 和 Reduce 处理类
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
// 设置输出类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置输入和输出格式
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TableOutputFormat.class);
// 设置输入目录
FileInputFormat.addInputPath(job, new Path("hdfs://172.16