大数据培训课程GroupingComparator分组案例实操

GroupingComparator分组案例实操

1.需求

有如下订单数据

表4-2 订单数据

现在需要求出每一个订单中最贵的商品。

(1)输入数据

(2)期望输出数据

1       222.8

2       722.4

3       232.8

2.需求分析

(1)利用“订单id和成交金额”作为key,可以将Map阶段读取到的所有订单数据按照id升序排序,如果id相同再按照金额降序排序,发送到Reduce。

(2)在Reduce端利用groupingComparator将订单id相同的kv聚合成组,然后取第一个即是该订单中最贵商品,如图4-18所示。

图4-18 过程分析

3.代码实现

(1)定义订单信息OrderBean类

package com.atguigu.mapreduce.order; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.io.WritableComparable;   public class OrderBean implements WritableComparable<OrderBean> {     private int order_id; // 订单id号   private double price; // 价格     public OrderBean() {       super();   }     public OrderBean(int order_id, double price) {       super();       this.order_id = order_id;       this.price = price;   }     @Override   public void write(DataOutput out) throws IOException {       out.writeInt(order_id);       out.writeDouble(price);   }     @Override   public void readFields(DataInput in) throws IOException {       order_id = in.readInt();       price = in.readDouble();   }     @Override   public String toString() {       return order_id + “\t” + price;   }     public int getOrder_id() {       return order_id;   }     public void setOrder_id(int order_id) {       this.order_id = order_id;   }     public double getPrice() {       return price;   }     public void setPrice(double price) {       this.price = price;   }     // 二次排序   @Override   public int compareTo(OrderBean o) {         int result;         if (order_id > o.getOrder_id()) {          result = 1;       } else if (order_id < o.getOrder_id()) {          result = -1;       } else {          // 价格倒序排序          result = price > o.getPrice() ? -1 : 1;       }         return result;   } }

(2)编写OrderSortMapper类

package com.atguigu.mapreduce.order; import java.io.IOException; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Mapper;   public class OrderMapper extends Mapper<LongWritable, Text, OrderBean, NullWritable> {     OrderBean k = new OrderBean();     @Override   protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {             // 1 获取一行       String line = value.toString();             // 2 截取       String[] fields = line.split(“\t”);             // 3 封装对象       k.setOrder_id(Integer.parseInt(fields[0]));       k.setPrice(Double.parseDouble(fields[2]));             // 4 写出       context.write(k, NullWritable.get());   } }

(3)编写OrderSortGroupingComparator类

package com.atguigu.mapreduce.order; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator;   public class OrderGroupingComparator extends WritableComparator {     protected OrderGroupingComparator() {       super(OrderBean.class, true);   }     @Override   public int compare(WritableComparable a, WritableComparable b) {         OrderBean aBean = (OrderBean) a;       OrderBean bBean = (OrderBean) b;         int result;       if (aBean.getOrder_id() > bBean.getOrder_id()) {          result = 1;   } else if (aBean.getOrder_id() < bBean.getOrder_id()) {          result = -1;       } else {          result = 0;       }         return result;   } }

(4)编写OrderSortReducer类

package com.atguigu.mapreduce.order; import java.io.IOException; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.Reducer;   public class OrderReducer extends Reducer<OrderBean, NullWritable, OrderBean, NullWritable> {     @Override   protected void reduce(OrderBean key, Iterable<NullWritable> values, Context context)     throws IOException, InterruptedException {             context.write(key, NullWritable.get());   } }

(5)编写OrderSortDriver类

package com.atguigu.mapreduce.order; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;   public class OrderDriver {     public static void main(String[] args) throws Exception, IOException {   // 输入输出路径需要根据自己电脑上实际的输入输出路径设置       args  = new String[]{“e:/input/inputorder” , “e:/output1”};         // 1 获取配置信息       Configuration conf = new Configuration();       Job job = Job.getInstance(conf);         // 2 设置jar包加载路径       job.setJarByClass(OrderDriver.class);         // 3 加载map/reduce类       job.setMapperClass(OrderMapper.class);       job.setReducerClass(OrderReducer.class);         // 4 设置map输出数据key和value类型       job.setMapOutputKeyClass(OrderBean.class);       job.setMapOutputValueClass(NullWritable.class);         // 5 设置最终输出数据的key和value类型       job.setOutputKeyClass(OrderBean.class);       job.setOutputValueClass(NullWritable.class);         // 6 设置输入数据和输出数据路径       FileInputFormat.setInputPaths(job, new Path(args[0]));       FileOutputFormat.setOutputPath(job, new Path(args[1]));        // 8 设置reduce端的分组   job.setGroupingComparatorClass(OrderGroupingComparator.class);         // 7 提交       boolean result = job.waitForCompletion(true);       System.exit(result ? 0 : 1);   } }
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值