提示
在通过迭代器遍历values的时候,不要直接将value赋值给某个变量,因为这里是地址复制,后续value指向的值发生了变化,之前复制的那个变量的值收到影响
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
IntWritable max = new IntWritable(0);
// 在MapReduce为了节省内存空间,采取了地址复用机制
// 在遍历迭代器的时候,迭代对象只创建一次
// IntWritable val = new IntWritable();
// val.set(312);
// val.get() > max.get() -> 312 > 0 -> true
// max = val; - val和max都是对象,所以赋值给的是地址 -> max和val的地址就一样了
// val.set(684); - val和max的地址一样,那就意味着max中的值也变成了684
// val.get() > max.get() -> 684 > 684 -> false
// val.set(340); - max的值也跟着变成了340
// 这么遍历下去,max的最终结果应该遍历的最后一个值
for (IntWritable val : values) {
if (val.get() > max.get())
// max = val;
"这里如果直接使用val复制,后续val的改变会影响到max,所以直接使用基本数据类型int进行赋值"
max.set(val.get());
}
context.write(key, max);
}
人名:分数1,分数2.分数3.分数4;
取最大值
package maxscore;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class maxMapper extends Mapper<LongWritable,Text, Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] s = value.toString().split(" ");
context.write(new Text(s[0]),new IntWritable(Integer.valueOf(s[1])));
}
}
package maxscore;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class maxReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
//找到values中的对大值
int maxScore = 0;
for(IntWritable i : values){
if(i.get()>=maxScore)
maxScore = i.get();
}
context.write(key,new IntWritable(maxScore));
}
}
package maxscore;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class maxDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(maxDriver.class);
job.setMapperClass(maxMapper.class);
job.setReducerClass(maxReducer.class);
//设置mapper的输出类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//设置reducer的输出类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//文件输入路径
FileInputFormat.addInputPath(job,new Path("hdfs://hadoop01:9000/txt/score2.txt"));
// 设置输出路径
// 要求输出路径在HDFS上不存在
FileOutputFormat.setOutputPath(job, new Path("hdfs://hadoop01:9000/result/score"));
// 提交任务
job.waitForCompletion(true);
}
}