hanoop的helloworld——WordCount解读

关于MapReduce的大概过程,请看:http://www.cnblogs.com/xia520pi/archive/2012/05/16/2504205.html图文并茂,讲解详细。

package hadoop_test;

import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCountTest {
	public static class TokenizerMapper extends Mapper<Object,Text,Text,IntWritable>{
		private final static IntWritable one = new IntWritable(1);
		private Text word = new Text();
		
		public void map(Object key,Text value,Context context) throws IOException , InterruptedException{
			StringTokenizer st = new StringTokenizer(value.toString());
			while(st.hasMoreTokens())
			{
				word.set(st.nextToken());
				context.write(word, one);
			}
		}
	}
	public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
		private IntWritable result = new IntWritable();
		public void reduce(Text key,Iterable<IntWritable> values,Context context) throws IOException,InterruptedException{
			int sum=0;
			//int[] a = {1,2,3,4};
			//for (int i : a){}
			for(IntWritable val : values)
				sum+=val.get();//get()返回int
			result.set(sum);//set()设置int
			context.write(key, result);
		}
	}
	public static void main(String[] args) throws Exception {
		args = new String[2];
		args[0]="hdfs://hadoop1:9000/input";
		args[1]="hdfs://hadoop1:9000/output";
		//获得Configuration配置 Configuration: core-default.xml, core-site.xml
		Configuration conf = new Configuration();
		//获得输入参数 [hdfs://localhost:9000/input, hdfs://localhost:9000/output]
		String[] otherArgs = new GenericOptionsParser(conf,args).getRemainingArgs();
		if (otherArgs.length != 2) {//判断输入参数,不是2个,就是异常退出
			System.err.println("usage:wordcount <in> <out>");
			System.exit(2);
		}
		//设置job属性
		//有横线,说明函数是旧版本,不建议使用
		//Job job = new Job(conf, "WordCountTest");
		Job job = Job.getInstance(conf, "WordCountTest");
		job.setJarByClass(WordCountTest.class);
		job.setMapperClass(TokenizerMapper.class);
		job.setCombinerClass(IntSumReducer.class);//将map后到中间结果,在本机进行局部合并
		job.setReducerClass(IntSumReducer.class);
		job.setOutputKeyClass(Text.class);//设置Job输出结果<key,word>--<Text,IntWritable>
		job.setOutputValueClass(IntWritable.class);
		FileInputFormat.addInputPath(job, new Path(otherArgs[0]));//设置输入和输出路径
		FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
		System.exit(job.waitForCompletion(true)?0:1);
	}
}
package hadoop_test;

import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class WordCountTest{
    public static class TokenizerMapper extends Mapper<Object,Text,Text,IntWritable>{
        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();
        public void map(Object key,Text values,Context context) throws IOException,InterruptedException{
            StringTokenizer st = new StringTokenizer(values.toString());
            while(st.hasMoreTokens()){
                word.set(st.nextToken());
                context.write(word,one);
            }
        }
    }
    public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable>{
        private IntWritable result = new IntWritable();
        public void reduce(Text key,Iterable<IntWritable> values,Context context) throws IOException,InterruptedException{
            int sum=0;
            for(IntWritable i: values)
                sum+=i.get();
            result.set(sum);
            context.write(key,result);
        }
    }
    public static void main(String args[]) throws Exception {
        args = new String[2];
        args[0]="hdfs://hadoop1:9000/input";
        args[1]="hdfs://hadoop1:9000/output";
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf,"WordCountTest");
        job.setJarByClass(WordCountTest.class);
        job.setMapperClass(TokenizerMapper.class);
        job.setCombinerClass(IntSumReducer.class);
        job.setReducerClass(IntSumReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);
        FileInputFormat.addInputPath(job,new Path(args[0]));
        FileOutputFormat.setOutputPath(job,new Path(args[1]));
        System.exit(job.waitForCompletion(true)?0:1);
    }
}



Mapreduce实例-WordCount是一个经典的MapReduce程序,用于统计文本中每个单词出现的次数。它的工作原理是将输入的文本划分为多个片段,每个片段由多个键值对组成,其中键是单词,值是1。然后通过Map阶段将每个片段中的单词提取出来,并将每个单词映射为键值对,其中键是单词,值是1。接下来,通过Shuffle和Sort阶段将具有相同单词的键值对聚集在一起。最后,通过Reduce阶段将相同单词的计数值进行累加,得到每个单词的总次数。 以下是一个示例代码片段,展示了WordCount程序的基本结构和关键组件: ```java import java.io.IOException; import java.util.StringTokenizer; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class WordCount { public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable>{ private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(Object key, Text value, Context context) throws IOException, InterruptedException { StringTokenizer itr = new StringTokenizer(value.toString()); while (itr.hasMoreTokens()) { word.set(itr.nextToken()); context.write(word, one); } } } public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable> { private IntWritable result = new IntWritable(); public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int sum = 0; for (IntWritable val : values) { sum += val.get(); } result.set(sum); context.write(key, result); } } public static void main(String[] args) throws Exception { Job job = Job.getInstance(); job.setJarByClass(WordCount.class); job.setMapperClass(TokenizerMapper.class); job.setCombinerClass(IntSumReducer.class); job.setReducerClass(IntSumReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); System.exit(job.waitForCompletion(true) ? 0 : 1); } } ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值