本节是续写hadoop离线阶段(第八节—2)mapreduce原理解释和通过JavaAPI操作mapreduce的,所有的Demo都是在同一项目下新建不同的包,所以pom.xml与“第八节—2”中的一样
通过自定义partition来实现数据拆分
原始数据如下图,需要实现将第6列大于15的数据发送到分区0,将第6列小于等于15的数据发送到分区1。
- 写一个继承于org.apache.hadoop.mapreduce.Mapper的类,来完成Map阶段的处理
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class PartitionMapper extends Mapper<LongWritable, Text,Text, NullWritable>
{
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException
{
context.write(value,NullWritable.get());
}
}
- 写一个继承于org.apache.hadoop.mapreduce.Partitioner的类,来完成数据分区的处理
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
//Partitioner接收来自Mapper的k2、v2,对k2、v2进行判断后返回分区号
public class MyPartitioner extends Partitioner<Text, NullWritable>
{
@Override
public int getPartition(Text text, NullWritable nullWritable, int NumReduceTask)
{
String num=text.toString().split("\t")[5];
if(Integer.parseInt(num) > 15)
return 0;
else
return 1;
}
}
- 写一个继承于org.apache.hadoop.mapreduce.Reducer的类,来完成Reduce阶段的处理
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class PartitionReduce extends Reducer<Text, NullWritable,Text,NullWritable>
{
@Override
protected void reduce(Text key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException
{
context.write(key, NullWritable.get());
}
}
- 编写主类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class PartitionRun extends Configured implements Tool
{
@Override
public int run(String[] strings) throws Exception
{
Job job= Job.getInstance(super.getConf(),"partition-test");
job.setJarByClass(PartitionRun.class);
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job,new Path(strings[0]));
job.setMapperClass(PartitionMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(NullWritable.class);
job.setPartitionerClass(MyPartitioner.class);
job.setReducerClass(PartitionReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//指定ReduceTasks的数量
job.setNumReduceTasks(2);
//如果ReduceTasks的数量大于Partitioner里需要的数量,那么就会生成空文件
//job.setNumReduceTasks(3);
//如果ReduceTasks的数量小于Partitioner里需要的数量,那么就有些Reduce就会处理更多的数据
//job.setNumReduceTasks(1);
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job,new Path(strings[1]));
return job.waitForCompletion(true)?0:1;
}
public static void main(String[] args) throws Exception
{
System.exit(ToolRunner.run(new Configuration(),new PartitionRun(),args));
}
}
注意,凡是涉及到partition的mapreduce程序,必须发送到Linux机器上执行,而且输入文件和输出文件的路径也要位于hdfs上。
实现自定义排序、自定义计数器和自定义规约
原始数据
排序后的成果数据
- 写一个自定义类来封装一行数据中的字母和数字,并且实现WritableComparable接口,来实现数据间的大小比较
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/*
这个类负责把原始数据的字母和数据拆开,
并且按先字母后数字,
字母升序,
数字降序,
进行比较大小
*/
public class Key2Bean implements WritableComparable<Key2Bean>
{
private String first;
private Integer second;
public Key2Bean()
{
}
public Key2Bean(String first,Integer second)
{
this.first=first;
this.second=second;
}
@Override
public int compareTo(Key2Bean o)
{
int f=this.first.compareTo(o.first);
if(f == 0)
{
int s=0-this.second.compareTo(o.second); //实现降序排序
return s;
}
else
return f;
}
@Override
public void write(DataOutput dataOutput) throws IOException
{
dataOutput.writeUTF(first);
dataOutput.writeInt(second);
}
@Override
public void readFields(DataInput dataInput) throws IOException
{
this.first=dataInput.readUTF();
this.second=dataInput.readInt();
}
public String getFirst()
{
return first;
}
public void setFirst(String first)
{
this.first = first;
}
public Integer getSecond()
{
return second;
}
public void setSecond(Integer second)
{
this.second = second;
}
/*
由于最终会以TextOutputFormat的形式输出Key2Bean形式的数据,
所以要重写toString方法,
并且要写成希望看到的数据组织方式
*/
@Override
public String toString()
{
return first+"\t"+second;
}
}
- 写Mapper的子类
import org.