数据集
有如下数据集,每行以空格分开,前一个为单词,后一个为该单词出现次数,如下。
All 1
An 1
DFS 1
Distributed 1
DistributedFileSystem. 1
File 1
FileSystem 1
Hadoop 3
It 1
It's 1
LocalFileSystem 1
System 1
The 3
a 6
abstract 1
and 3
appears 1
as 3
base 1
be 2
because 1
capacity. 1
class 1
code 1
disk. 2
distributed 2
exists 1
fairly 1
fault 1
filesystem, 1
filesystem. 1
for 3
generic 1
implementation 2
implemented 1
instances 1
is 3
its 1
large 1
local 2
locally-connected 1
may 2
multi-machine 1
object. 1
of 1
one 1
or 1
potentially 2
reflects 1
should 1
single 1
small 1
system 1
testing. 1
that 3
the 2
to 1
tolerance 1
use 2
useful 1
user 1
version 1
very 1
written 1
实现功能
将单词出现次数从多到少排序,即出现次数从大到小排序。排序后如下。每行有两个元素,第一个为单词,第二个为出现次数。如单词a出现的次数为6,是出现次数最多的单词,所以排在第一位。
a 6
Hadoop 3
The 3
and 3
as 3
for 3
is 3
that 3
be 2
disk. 2
distributed 2
implementation 2
local 2
may 2
potentially 2
the 2
use 2
All 1
An 1
DFS 1
Distributed 1
DistributedFileSystem. 1
File 1
FileSystem 1
It 1
It's 1
LocalFileSystem 1
System 1
abstract 1
appears 1
base 1
because 1
capacity. 1
class 1
code 1
exists 1
fairly 1
fault 1
filesystem, 1
filesystem. 1
generic 1
implemented 1
instances 1
its 1
large 1
locally-connected 1
multi-machine 1
object. 1
of 1
one 1
or 1
reflects 1
should 1
single 1
small 1
system 1
testing. 1
to 1
tolerance 1
useful 1
user 1
version 1
very 1
written 1
实现代码
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class WordCountSort {
public static class TextIntWritable implements WritableComparable<TextIntWritable>{
Text word; //单词
IntWritable count; //次数
public TextIntWritable(){
set(new Text(), new IntWritable());
}
public void set(Text word, IntWritable count){
this.word = word;
this.count = count;
}
@Override
public void write(DataOutput out) throws IOException {
word.write(out);
count.write(out);
}
@Override
public void readFields(DataInput in) throws IOException {
word.readFields(in);
count.readFields(in);
}
@Override
public String toString(){
return word.toString() + " " + count.toString();
}
@Override
public int hashCode(){
return this.word.hashCode() + this.count.hashCode();
}
@Override
public int compareTo(TextIntWritable o) {
int result = -1 * this.count.compareTo(o.count); //先比较次数
if(result != 0)
return result;
return this.word .compareTo(o.word); //次数相同,则按字典排序
}
}
public static class WordCountMapper extends Mapper<LongWritable, Text , TextIntWritable, NullWritable>{
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{
TextIntWritable k = new TextIntWritable();
String []string = value.toString().split(" ");
System.out.println(string[0]+""+string[1]);
k.set(new Text(string[0]), new IntWritable(Integer.valueOf(string[1])));
context.write(k, NullWritable.get());
}
}
public static class WordCountReducer extends Reducer<TextIntWritable, NullWritable, TextIntWritable, NullWritable>{
public void reduce(TextIntWritable key, Iterable<NullWritable> value, Context context) throws IOException, InterruptedException{
for(NullWritable v : value)
context.write(key, v);
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
if(args.length < 2){
System.out.println("args must be two");return ;
}
Configuration conf = new Configuration();
Job job = Job.getInstance(conf,"WordCountSort");
job.setJarByClass(WordCountSort.class);
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
job.setOutputKeyClass(TextIntWritable.class);
job.setOutputValueClass(NullWritable.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true)?0:1);
}
}