Hadoop(Day07) -- MapReduce(Day 02), Eclipse

一.Version:

1.MapReduce(v1.0):


2.MapReduce(v2.0):



二.Code:

Example: 

1.Counting the data of phone (partition):

FlowBean.java:

package com.bsr.bigdata.mapreduce.part_pd;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.Writable;

public class FlowBean implements Writable{
	
	private long upFlow;
	private long dFlow;
	private long sumFlow;
	
	//反序列化时,需要反射调用空参构造函数,所以要显示定义一个
	public FlowBean(){}
	
	public long getUpFlow() {
		return upFlow;
	}
	public void setUpFlow(long upFlow) {
		this.upFlow = upFlow;
	}
	public long getdFlow() {
		return dFlow;
	}
	public void setdFlow(long dFlow) {
		this.dFlow = dFlow;
	}
	public long getSumFlow() {
		return sumFlow;
	}
	public void setSumFlow(long sumFlow) {
		this.sumFlow = sumFlow;
	}
	public FlowBean(long upFlow, long dFlow) {
		super();
		this.upFlow = upFlow;
		this.dFlow = dFlow;
		this.sumFlow = upFlow + dFlow;
	}
	
	/**
	 * 序列化方法
	 */
	@Override
	public void write(DataOutput out) throws IOException {
		out.writeLong(upFlow);
		out.writeLong(dFlow);
		out.writeLong(sumFlow);		
	}
	
	/**
	 * 反序列化方法
	 * 注:反序列化的顺序跟序列化的顺序完全一致
	 */
	@Override
	public void readFields(DataInput in) throws IOException {
		upFlow = in.readLong();
		dFlow = in.readLong();
		sumFlow = in.readLong();
	}

	@Override
	public String toString() {
		return upFlow + "\t" + dFlow + "\t" + sumFlow;
	}
	
	
	
	
}
PhonePartitioner.java:

package com.bsr.bigdata.mapreduce.part_pd;

import java.util.HashMap;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

import com.bsr.bigdata.mapreduce.part.FlowBean;

public class PhonePartitioner extends Partitioner<Text, FlowBean>{
	
	public static HashMap<String, Integer> phoneDict = new HashMap<String, Integer>();
	static{
		phoneDict.put("0431", 0);
		phoneDict.put("0451", 1);
		phoneDict.put("0461", 2);
		phoneDict.put("0471", 3);
	}
	
	@Override
	public int getPartition(Text key, FlowBean value, int numPartitions) {
		String prefix = key.toString().substring(0, 4);
		Integer phoneId = phoneDict.get(prefix);
		return phoneId == null?4:phoneId;
	}

}
FlowCount.java:

package com.bsr.bigdata.mapreduce.part_pd;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class FlowCount {
	
	static class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException{
			//将一行数据转成String
			String line = value.toString();
			//切分字符
			String[] fields = line.split("\t");
			//手机号
			String phoneNbr = fields[1];
			//取出上行流量
			long upFlow = Long.parseLong(fields[fields.length-3]);
			//取出下行流量
			long dFlow = Long.parseLong(fields[fields.length-2]);
			
			context.write(new Text(phoneNbr), new FlowBean(upFlow, dFlow));
		}
	}
	
	static class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
		@Override
		protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException{
			long sum_upFlow = 0;
			long sum_dFlow = 0;
			//遍历所有的bean 将上行流量和下行流量分别累加
			for(FlowBean bean:values){
				sum_upFlow += bean.getUpFlow();
				sum_dFlow += bean.getdFlow();
			}
			//将所有流量进行汇总
			FlowBean resultBean = new FlowBean(sum_upFlow, sum_dFlow);
			context.write(key, resultBean);
		}
	}
	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
		Configuration conf = new Configuration();
		Job job = Job.getInstance(conf);
		//指定本程序的jar包所在的本地路径
		job.setJarByClass(FlowCount.class);
		
		//指定本业务job要是用的Mapper/Reducer业务类
		job.setMapperClass(FlowCountMapper.class);
		job.setReducerClass(FlowCountReducer.class);
		
		//指定我们自定义的数据分区器
		job.setPartitionerClass(PhonePartitioner.class);
		//同时制定相应“分区”数量的reducetask
		job.setNumReduceTasks(5);
		
		//指定job的输入原始文件所在目录
		FileInputFormat.setInputPaths(job, new Path("D:\\flow\\in"));
		//指定job的输出结果所在目录
		FileOutputFormat.setOutputPath(job, new Path("D\\flow\\in"));
		
		boolean res = job.waitForCompletion(true);
		System.exit(res?0:1);
	}

}


2.WordCountContainer:

package com.bsr.bigdata.mapreduce.combiner;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
  
/** 
 * 问:为什么使用Combiner? 
 * 答:Combiner发生在Map端,对数据进行规约处理,数据量变小了,传送到reduce端的数据量变小了,传输时间变短,作业的整体时间变短。 
 *  
 * 问:为什么Combiner不作为MR运行的标配,而是可选步骤哪? 
 * 答:因为不是所有的算法都适合使用Combiner处理,例如求平均数。 
 * 
 * 问:Combiner本身已经执行了reduce操作,为什么在Reducer阶段还要执行reduce操作哪? 
 * 答:combiner操作发生在map端的,处理一个任务所接收的文件中的数据,不能跨map任务执行;只有reduce可以接收多个map任务处理的数据。 
 * 
 */  
public class WordCountCombiner {  
    /** 
     * KEYIN    即k1     表示行的偏移量 
     * VALUEIN  即v1     表示行文本内容 
     * KEYOUT   即k2     表示行中出现的单词 
     * VALUEOUT 即v2     表示行中出现的单词的次数,固定值1 
     */  
    static class MyMapper extends Mapper<LongWritable, Text, Text, LongWritable>{  
        protected void map(LongWritable k1, Text v1, Context context) throws java.io.IOException ,InterruptedException {  
            final String[] splited = v1.toString().split(" ");  
            for (String word : splited) {  
                context.write(new Text(word), new LongWritable(1));  
                System.out.println("Mapper输出<"+word+","+1+">");  
            }  
        };  
    }  
      
    /** 
     * KEYIN    即k2     表示行中出现的单词 
     * VALUEIN  即v2     表示行中出现的单词的次数 
     * KEYOUT   即k3     表示文本中出现的不同单词 
     * VALUEOUT 即v3     表示文本中出现的不同单词的总次数 
     * 
     */  
    static class MyReducer extends Reducer<Text, LongWritable, Text, LongWritable>{  
        protected void reduce(Text k2, java.lang.Iterable<LongWritable> v2s, Context ctx) throws java.io.IOException ,InterruptedException {  
            //显示次数表示redcue函数被调用了多少次,表示k2有多少个分组  
            System.out.println("MyReducer输入分组<"+k2.toString()+",...>");  
            long times = 0L;  
            for (LongWritable count : v2s) {  
                times += count.get();  
                //显示次数表示输入的k2,v2的键值对数量  
                System.out.println("MyReducer输入键值对<"+k2.toString()+","+count.get()+">");  
            }  
            ctx.write(k2, new LongWritable(times));  
        };  
    }  
      
      
    static class MyCombiner extends Reducer<Text, LongWritable, Text, LongWritable>{  
        protected void reduce(Text k2, java.lang.Iterable<LongWritable> v2s, Context ctx) throws java.io.IOException ,InterruptedException {  
            //显示次数表示redcue函数被调用了多少次,表示k2有多少个分组  
            System.out.println("Combiner输入分组<"+k2.toString()+",...>");  
            long times = 0L;  
            for (LongWritable count : v2s) {  
                times += count.get();  
                //显示次数表示输入的k2,v2的键值对数量  
                System.out.println("Combiner输入键值对<"+k2.toString()+","+count.get()+">");  
            }  
              
            ctx.write(k2, new LongWritable(times));  
            //显示次数表示输出的k2,v2的键值对数量  
            System.out.println("Combiner输出键值对<"+k2.toString()+","+times+">");  
        };  
    }   
    public static void main(String[] args) throws Exception {  
        Configuration conf = new Configuration();  
        
 
        Job job = Job.getInstance(conf);
          
        //指定如何对输入文件进行格式化,把输入文件每一行解析成键值对  
        //job.setInputFormatClass(TextInputFormat.class);  
          
        //1.2 指定自定义的map类  
        job.setMapperClass(MyMapper.class);  
        //map输出的<k,v>类型。如果<k3,v3>的类型与<k2,v2>类型一致,则可以省略  
        job.setMapOutputKeyClass(Text.class);  
        job.setMapOutputValueClass(LongWritable.class);  
         
         
        //1.5 规约  
        job.setCombinerClass(MyCombiner.class);  
          
        //2.2 指定自定义reduce类  
        job.setReducerClass(MyReducer.class);  
        //指定reduce的输出类型  
        job.setOutputKeyClass(Text.class);  
        job.setOutputValueClass(LongWritable.class);  
          
       //指定job的输入原始文件所在目录
       FileInputFormat.setInputPaths(job, new Path("D:\\word\\in"));
       //指定job的输出结果所在目录
       Path outPath = new Path("D:\\word\\out");
       
       FileOutputFormat.setOutputPath(job, outPath);
      
       FileSystem fs = FileSystem.get(conf);
      
       if(fs.exists(outPath)){  
           fs.delete(outPath, true);  
       }  
      		
      //将job中配置的相关参数,以及job所用的java类所在的jar包,提交给yarn去运行
      		/*job.submit();*/
       boolean res = job.waitForCompletion(true);
       System.exit(res?0:1);
    }  
}  



  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值