另外一个hadoop的入门demo,求平均数。是对WordCount这个demo的一个小小的修改。输入一堆成绩单(人名,成绩),然后求每个人成绩平均数,比如:
// subject1.txt
a 90
b 80
c 70
// subject2.txt
a 100
b 90
c 80
求a,b,c这三个人的平均分。解决思路很简单,在map阶段key是名字,value是成绩,直接output。reduce阶段得到了map输出的key名字,values是该名字对应的一系列的成绩,那么对其求平均数即可。
这里我们实现了两个版本的代码,分别用TextInputFormat和 KeyValueTextInputFormat来作为输入格式。
TextInputFormat版本:
import java.util.*;
import java.io.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class AveScore {
public static class AveMapper extends Mapper<Object, Text, Text, IntWritable>
{
@Override
public void map(Object key, Text value, Context context) throws IOException, InterruptedException
{
String line = value.toString();
String[] strs = line.split(" ");
String name = strs[0];
int score = Integer.parseInt(strs[1]);
context.write(new Text(name), new IntWritable(score));
}
}
public static class AveReducer extends Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
{
int sum = 0;
int count = 0;
for(IntWritable val : values)
{
sum += val.get();
count++;
}
int aveScore = sum / count;
context.write(key, new IntWritable(aveScore));
}
}
public static void main(String[] args) throws Exception
{
Configuration conf = new Configuration();
Job job = new Job(conf,"AverageScore");
job.setJarByClass(AveScore.class);
job.setMapperClass(AveMapper.class);
job.setReducerClass(AveReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit( job.waitForCompletion(true) ? 0 : 1);
}
}
KeyValueTextInputFormat版本;
import java.util.*;
import java.io.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class AveScore_KeyValue {
public static class AveMapper extends Mapper<Text, Text, Text, IntWritable>
{
@Override
public void map(Text key, Text value, Context context) throws IOException, InterruptedException
{
int score = Integer.parseInt(value.toString());
context.write(key, new IntWritable(score) );
}
}
public static class AveReducer extends Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
{
int sum = 0;
int count = 0;
for(IntWritable val : values)
{
sum += val.get();
count++;
}
int aveScore = sum / count;
context.write(key, new IntWritable(aveScore));
}
}
public static void main(String[] args) throws Exception
{
Configuration conf = new Configuration();
conf.set("mapreduce.input.keyvaluelinerecordreader.key.value.separator", " ");
Job job = new Job(conf,"AverageScore");
job.setJarByClass(AveScore_KeyValue.class);
job.setMapperClass(AveMapper.class);
job.setReducerClass(AveReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormatClass(KeyValueTextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit( job.waitForCompletion(true) ? 0 : 1);
}
}
输出结果为:
a 95
b 85
c 75