1、有words.txt文件内容如下,其中以制表符分割
1 Smith
3 Alice
2 Tom
4 Tony
2、分析
(1)、定义实体类实现WritableComparable接口,重写compareTo方法
package com.qujiuge.sort_;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class Bean implements WritableComparable<Bean> {
private int id;
private String name;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeInt(id);
dataOutput.writeUTF(name);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
id = dataInput.readInt();
name = dataInput.readUTF();
}
@Override
public String toString() {
return id + "\t" + name;
}
@Override
public int compareTo(Bean o) {
return this.id > o.getId() ? -1 : 1;
}
}
(2)、map阶段
(2.1)、mapreduce逐行读取文件,得到每行的值
(2.2)、以制表符分割后,将满足条件的数据封装到bean中
(3)、reduce阶段
(1)、直接输出
3、创建maven工程后,添加如下依赖
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.9.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.9.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.9.2</version>
</dependency>
</dependencies>
4、编写mapreduce程序
package com.qujiuge.sort_;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class Driver {
static class SortMapper extends Mapper<LongWritable, Text, Bean, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] fields = line.split("\t");
if (fields.length == 2) {
Bean bean = new Bean();
int id = Integer.parseInt(fields[0]);
String name = fields[1];
bean.setId(id);
bean.setName(name);
context.write(bean, NullWritable.get());
}
}
}
static class SortReducer extends Reducer<Bean, NullWritable, Bean, NullWritable> {
@Override
protected void reduce(Bean key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
context.write(key, values.iterator().next());
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration config = new Configuration();
Job job = Job.getInstance(config);
job.setJarByClass(Driver.class);
job.setMapperClass(SortMapper.class);
job.setReducerClass(SortReducer.class);
job.setMapOutputKeyClass(Bean.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(Bean.class);
job.setOutputValueClass(NullWritable.class);
FileSystem fs = FileSystem.get(config);
Path outPath = new Path(args[1]);
if (fs.exists(outPath)) {
fs.delete(outPath, true);
}
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, outPath);
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
项目结构如下:
5、将words.txt文件上传到hdfs中
6、将项目打包成jar文件后,用hadoop jar命令执行
运行结果: