此实验在windows操作系统下进行的,使用IDEA编译运行
一、环境准备
二、完整代码:
数据集是1000个随机数,范围是[1,10000]
package com.hadoop.demo.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException; import java.util.StringTokenizer;
/**
* @author: 易霭珞
* @description 对输入数据进行排序
* @date: 2022/10/26 11:14
*/
public class InputSort {
public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
System.setProperty("HADOOP_USER_NAME","hduser");
Configuration conf = new Configuration();
conf.set("fs.defaultFs","hdfs://192.168.56.100:9000");
//设置job属性
Job job = Job.getInstance(conf);
job.setJarByClass(InputSort.class);
//设置数据输入路径
Path inPath = new Path("hdfs://192.168.56.100:9000/user/hduser/sort/sort.txt");
FileInputFormat.addInputPath(job, inPath);
//设置job执行的Mapper类和输出K_V类型
job.setMapperClass(InputSort.sortMapper.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(NullWritable.class);
//设置job执行的Reduce类和输出K_V类型
job.setReducerClass(InputSort.sortReduce.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(NullWritable.class);
job.setNumReduceTasks(100);
//设置job执行的自定义分区类
job.setPartitionerClass(InputSort.sortPartition.class);
//设置数据输出路径
Path outPath = new Path("hdfs://192.168.56.100:9000/user/hduser/sort/out");
FileOutputFormat.setOutputPath(job, outPath);
System.exit(job.waitForCompletion(true)?0:1);
}
/**
*实现Mapper接口,输入是Text,这里需要把输入的value赋值给输出的key,而输出的value可以 为任意类型
*因为排序不需要输出的value,这里设置为NUll
*/
public static class sortMapper extends Mapper<Object,Text,IntWritable,NullWritable>{
public sortMapper(){}
private IntWritable line = new IntWritable();
/**
*
*@param key
*@param value
*@param context
*@throws IOException
*@throws InterruptedException
*map函数读取输入中的value,将其转化成IntWritable类型,最后作为输出key
*/ @Override
protected void map(Object key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString()); while (itr.hasMoreTokens()){
line.set(Integer.parseInt(itr.nextToken())); context.write(line,NullWritable.get());
}
}
}
//实现Reduce接口,这里只需要把所有的key值放入context
public static class sortReduce extends Reducer<IntWritable,NullWritable,IntWritable,NullWritable>
{
/**
*
*@param key
*@param values
*@param context
*@throws IOException
*@throws InterruptedException
*查询values的个数,有多少个就输出多少个Key值。
*/
@Override
protected void reduce(IntWritable key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
for (NullWritable ignored : values){ context.write(key,NullWritable.get());
}
}
}
//自定义分区,可以让key大体在每个分区都有分布,防止数据倾斜。
//使key值得范围在分区外有序
public static class sortPartition extends Partitioner<IntWritable,NullWritable>{
/**
*
*@param key
*@param NaN
*@param numReduceTasks 这个参数是reduce任务的个数,可以自己设置
*@return
*/
@Override
public int getPartition(IntWritable key, NullWritable NaN, int
numReduceTasks) {
int Max_Value = 1000;//这里假设待排序的最大值不会超过1000
int parts = Max_Value / numReduceTasks;//输入数据在哪一个分区
int value = key.get();
for (int i = 0; i < numReduceTasks; i++) {
if(value > parts*i && value <= parts*(i+1))
return i;
}
return 0;
}
}
}