问题描述
现在有各个地区的物品的销售额数据(非真实的,模拟出来的)。然后把每个地区的销量top5的商品。数据如下:
三个字段分别对应地区、商品名称、销售额,字段直接用空格隔开。
解决办法
这是一个很经典的分组求topN的问题,无论是在spark还是mapreduce都会遇到类似的问题。我们要做到的就是举一反三,将这个问题彻底击破。
解法一
我们将数据使用map读取过来之后,将地区作为key输出,然后将商品名称和销售额拼接之后作为value。例如 (上海,耳机&67643) 这种key value格式。这样一来reduce可以对同一个地区的销售数据进行处理。这里我们需要定义一个销售数据的pojo类,然后reduce端将数据封装成对象存入集合中,然后按照一定的规则进行排序。
代码如下:
package com.liu.hadoop.topN;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
public class processTest {
static class TopNMapper extends Mapper<LongWritable, Text, Text, Text>{
Text outKey = new Text();
Text outValue = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] line = value.toString().split(" ");
outKey.set(line[0]);
outValue.set(line[1] + "&" + line[2]);
context.write(outKey, outValue);
}
}
static class TopReduce extends Reducer<Text, Text, Text, Text>{
//上海 手机&233434
// 上海 耳机&85478
@Override
protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
List<ProductSale> res = new ArrayList<>();
for (Text value : values) {
String[] split = value.toString().split("&");
ProductSale productSale = new ProductSale(key.toString(), split[0], Double.parseDouble(split[1]));
res.add(productSale);
}
//按照销售额降序排列
res.sort(new Comparator<ProductSale>() {
@Override
public int compare(ProductSale o1, ProductSale o2) {
return -Double.compare(o1.getValue(), o2.getValue());
}
});
//输出结果
for (int i = 0; i < 5; i++) {
context.write(new Text(res.get(i).getArea()), new Text(res.get(i).getProductName() + " " + res.get(i).getValue()));
}
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//数据 地区 商品 销售额
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "topN"); //定义一个job,启动任务
job.setMapperClass(TopNMapper.class);
job.setReducerClass(TopReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(job, new Path("/data/saleData.txt"));
FileOutputFormat.setOutputPath(job, new Path("/data/topN"));
job.setJar("/data/HadoopMr-1.0-SNAPSHOT.jar");
job.waitForCompletion(true);
}
}
销售数据pojo类:
package com.liu.hadoop.topN;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class ProductSale {
private String area;
private String productName;
private double value;
public ProductSale(String area, String productName, double value) {
this.area = area;
this.productName = productName;
this.value = value;
}
public String getArea() {
return area;
}
public void setArea(String area) {
this.area = area;
}
public String getProductName() {
return productName;
}
public void setProductName(String productName) {
this.productName = productName;
}
public double getValue() {
return value;
}
public void setValue(double value) {
this.value = value;
}
@Override
public String toString() {
return "ProductSale{" +
"area='" + area + '\'' +
", productName='" + productName + '\'' +
", value=" + value +
'}';
}
}
结果如下:
总结:这种解决办法简单易写,但是有个问题就是在reduce端每一次都要定义一个集合类,而且要对这个集合类进行排序。如果上海地区有上万条销售数据,那么这个集合以及集合的排序操作将会占用很大的内存。
改进:可以将集合改成长度为5的数组,然后数组里面最后存放的就是销售前5的数据。
解法二
解法1未能充分利用mapreduce的排序特性,所以我们可以在map端将map输出key直接定义为销售数据的pojo,然后利用自定义分区器将同一个地区的数据拿到同一个reduce去处理,这样得出来结果就是按照销售量排好序的。
销售数据的类需要继承WritableComparable这个接口。Product类:
package com.liu.hadoop.topN1;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class Product implements WritableComparable<Product> {
private String areaName;
private String productName;
private Double saleValue;
@Override
public int compareTo(Product o) {
if (this.areaName.compareTo(o.areaName) == 0){
return -Double.compare(this.saleValue, o.saleValue);
}
return this.areaName.compareTo(o.areaName);
}
@Override
public void write(DataOutput dataOutput) throws IOException {
dataOutput.writeUTF(areaName);
dataOutput.writeUTF(productName);
dataOutput.writeDouble(saleValue);
}
@Override
public void readFields(DataInput dataInput) throws IOException {
this.areaName = dataInput.readUTF();
this.productName = dataInput.readUTF();
this.saleValue = dataInput.readDouble();
}
public String getAreaName() {
return areaName;
}
public void setAreaName(String areaName) {
this.areaName = areaName;
}
public String getProductName() {
return productName;
}
public void setProductName(String productName) {
this.productName = productName;
}
public double getSaleValue() {
return saleValue;
}
public void setSaleValue(double saleValue) {
this.saleValue = saleValue;
}
}
自定义分区器:
package com.liu.hadoop.topN1;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Partitioner;
public class GroupPartition extends Partitioner<Product, LongWritable> {
@Override
public int getPartition(Product product, LongWritable longWritable, int numPartitions) {
//将地区相同的数据放入同一个reduce处理
return (product.getProductName().hashCode() & Integer.MAX_VALUE) % numPartitions;
}
}
自定义分组类:
package com.liu.hadoop.topN1;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
public class MyGroupComparator extends WritableComparator {
public MyGroupComparator() {
super(Product.class,true);
}
@Override
public int compare(WritableComparable a, WritableComparable b) {
Product first = (Product) a;
Product second = (Product) b;
return first.getAreaName().compareTo(second.getAreaName());
}
}
主程序:
package com.liu.hadoop.topN1;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class ProductTopN {
static class ProductMapper extends Mapper<LongWritable, Text, Product, LongWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] s = value.toString().split(" ");
Product product = new Product();
product.setAreaName(s[0]);
product.setProductName(s[1]);
product.setSaleValue(Double.parseDouble(s[2]));
context.write(product, new LongWritable(1));
}
}
static class ProductReducer extends Reducer<Product, LongWritable, Text, Text> {
@Override
protected void reduce(Product key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
int i = 0;
for (LongWritable value : values) {
i++;
if (i <= 5)
context.write(new Text(key.getAreaName() + " " + key.getProductName() + " " + key.getSaleValue()), new Text(i + ""));
else
break;
}
}
}
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "topN");
job.setMapperClass(ProductMapper.class);
job.setMapOutputKeyClass(Product.class);
job.setMapOutputValueClass(LongWritable.class);
job.setPartitionerClass(GroupPartition.class);
job.setGroupingComparatorClass(MyGroupComparator.class);
job.setReducerClass(ProductReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.setInputPaths(job, new Path("/data/saleData.txt"));
FileOutputFormat.setOutputPath(job, new Path("/data/topN"));
job.setJar("/data/HadoopMr-1.0-SNAPSHOT.jar");
job.waitForCompletion(true);
}
}
这里将一条数据封装一个自定义类,然后通过自定义分区器和分组器将同一个地区的数据放入同一个reduce处理,map阶段的排序会按照自定义类里的排序规则进行排序。所以在reduce端只需要取前五条数据即可。
结果如下
两种结果一致。
总结:方法二充分利用了map阶段的排序这一特性,由于map端排序是在缓冲区内存进行排序的,效率较快,所以方法二是一个效率较高的解决办法。