package org.apache.hadoop.examples;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class Sort
{
public static class Map extends Mapper<Object,Text,IntWritable,IntWritable>
{
private IntWritable num=new IntWritable(1); //序号
private IntWritable val=new IntWritable(); //数值
public void map(Object key,Text value,Mapper<Object,Text,IntWritable,IntWritable>.Context context) throws IOException, InterruptedException
{
val.set(Integer.parseInt(value.toString()));
//将一行行读取到的数值写入key中,num是序号1
context.write(val, num);
}
}
public static class Reduce extends Reducer<IntWritable,IntWritable,IntWritable,IntWritable>
{
private IntWritable num=new IntWritable(1);
public void reduce(IntWritable key,Iterable<IntWritable> value,Reducer<IntWritable,IntWritable,IntWritable,IntWritable>.Context context) throws IOException, InterruptedException
{
//Map传入Reduce节点中间又一个Shuffle过程,会自动对数字升序排序
for(IntWritable val:value)
{
//将序号 数字 写入output目录中
context.write(num,key);
//每写入一个数字,序号num+1
num.set(num.get()+1);
}
}
}
public static void main(String[] args) throws Exception
{
String[] otherArgs = new String[]{"input2","output2"};
final String OUTPUT_PATH = otherArgs[1];
Configuration conf = new Configuration();
Path path = new Path(OUTPUT_PATH);
//加载配置文件
FileSystem fileSystem = path.getFileSystem(conf);
//输出目录若存在则删除
if (fileSystem.exists(new Path(OUTPUT_PATH)))
{
fileSystem.delete(new Path(OUTPUT_PATH),true);
}
//指定输入输出目录
if (otherArgs.length != 2)
{
System.err.println("路径出错");
System.exit(2);
}
//一些初始化
Job job = Job.getInstance();
job.setJarByClass(Sort.class);
job.setMapperClass(Map.class); //初始化为自定义Map类
job.setReducerClass(Reduce.class); //初始化为自定义Reduce类
job.setOutputKeyClass(IntWritable.class); //指定输出的key的类型
job.setOutputValueClass(IntWritable.class); //指定输出的Value的类型
FileInputFormat.addInputPath(job, new Path(otherArgs[0])); //FileInputFormat指将输入的文件(若大于64M)进行切片划分,每个split切片对应一个Mapper任务
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
代码有很清晰的注释,看不懂的话可以评论给我,input目录文件及运行结果output目录如下:
DFS文件目录:
/input/1.txt 2.txt 3. txt 4.txt
每个文件中都有一些测试数字,不一一截图了。