利用MapReduce计算运营商上行下行流量

上行下行流量统计

	package cn.itcast.bigdata.mr.flowsum;
	
	import java.io.IOException;
	
	import org.apache.hadoop.conf.Configuration;
	import org.apache.hadoop.fs.Path;
	import org.apache.hadoop.io.LongWritable;
	import org.apache.hadoop.io.Text;
	import org.apache.hadoop.mapreduce.Job;
	import org.apache.hadoop.mapreduce.Mapper;
	import org.apache.hadoop.mapreduce.Reducer;
	import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
	import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
	
	public class FlowCount {
		//因为要传输上行下行流量两个,context只能传一个vlaue,所以把他封装在一个对象里面
	static class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean>{
		
		@Override
		protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
			 
			//将一行内容转成string
			String line = value.toString();
			//切分字段
			String[] fields = line.split("\t");
			//取出手机号
			String phoneNbr = fields[1]; 
			//取出上行流量下行流量
	
			long upFlow = Long.parseLong(fields[fields.length-3]);
			long dFlow = Long.parseLong(fields[fields.length-2]);
			
			context.write(new Text(phoneNbr), new FlowBean(upFlow, dFlow));
			
			
		}
		
		
		
	}
	


	static class FlowCountReducer extends Reducer<Text, FlowBean, Text, FlowBean>{
		//进入reduce数据的样子
		//<183323,bean1><183323,bean2><183323,bean3><183323,bean4>.......
		@Override
		protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {

			long sum_upFlow = 0;
			long sum_dFlow = 0;
		
			//遍历所有bean,将其中的上行流量,下行流量分别累加
			for(FlowBean bean: values){
				sum_upFlow += bean.getUpFlow();
				sum_dFlow += bean.getdFlow();
			}
			
			FlowBean resultBean = new FlowBean(sum_upFlow, sum_dFlow);
			//对象写到文本中会调toString方法,如果不写toString拿到就是一串字符,所以封装时候要写一个toString()
			context.write(key, resultBean);
			
			
		}
		
	}
	
	
	
	public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		/*conf.set("mapreduce.framework.name", "yarn");
		conf.set("yarn.resoucemanager.hostname", "mini1");*/
		Job job = Job.getInstance(conf);
		
		/*job.setJar("/home/hadoop/wc.jar");*/
		//指定本程序的jar包所在的本地路径
		job.setJarByClass(FlowCount.class);
		
		//指定本业务job要使用的mapper/Reducer业务类
		job.setMapperClass(FlowCountMapper.class);
		job.setReducerClass(FlowCountReducer.class);
		 
		

		//指定mapper输出数据的kv类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(FlowBean.class);
		
		//指定最终输出的数据的kv类型
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(FlowBean.class);
		
		添加区域聚合Combiner
		**//可以指定combiner ,它是在datanode节点上用,来对map分区输出数据进行局部聚合,
		//例如:wordcount中map输出<hello,1><hello,1><car,1> <car,2> ,combiner输出后
		//为<hello,2><car,2>。然后在由reduce总聚合,reduce拿到的数据量相比直接从map拿少
		//了许多,减少了网络传输。  加入combiner一定不要改变之前业务逻辑,才可以用
		
		//job.setCombinerClass(FlowCountReducer.class) 一般与reduce逻辑一样**
			
		多个小文件合并策略
//设置setInputFormat为CombineTextInputFormat.class,
//如果不设置则默认的是TextInputFormat.class
//默认用的FileInputForamt中的getSplit进行切片	
//而CombineTextInputFormat可以将多个小文件逻辑划分为一个切片,然后运行一个maptask
//这样就能多个小文件就能在一个maptask运行

//job.setInputFormat(CombineTextInputFormat.class);
这里设置最大切片大小为4M
//CombineTextInputFormat.setMaxInputSplitSize(job,4194304)
最小为2M
//CombineTextInputFormatsetMinInputSplitSize(job,2097152)
	

		//指定job的输入原始文件所在目录
		FileInputFormat.setInputPaths(job, new Path(args[0]));
		//指定job的输出结果所在目录
		FileOutputFormat.setOutputPath(job, new Path(args[1]));
		
		//将job中配置的相关参数,以及job所用的java类所在的jar包,提交给yarn去运行
		/*job.submit();*/
		boolean res = job.waitForCompletion(true);
		System.exit(res?0:1);
		
	}
	
	}

对象序列化

	package cn.itcast.bigdata.mr.flowsum;
	
	import java.io.DataInput;
	import java.io.DataOutput;
	import java.io.IOException;
	
	import org.apache.hadoop.io.Writable;
	
	public class FlowBean implements Writable{
	
	private long upFlow;
	private long dFlow;
	private long sumFlow;
	
	//**反序列化时,需要反射调用空参构造函数,默认的被自己写的覆盖了,所以要显示定义一个**
	public FlowBean(){}
	
	public FlowBean(long upFlow, long dFlow) {
		this.upFlow = upFlow;
		this.dFlow = dFlow;
		this.sumFlow = upFlow + dFlow;
	}
	public set(long upFlow, long dFlow) {
		this.upFlow = upFlow;
		this.dFlow = dFlow;
		this.sumFlow = upFlow + dFlow;
	}
	
	
	public long getUpFlow() {
		return upFlow;
	}
	public void setUpFlow(long upFlow) {
		this.upFlow = upFlow;
	}
	public long getdFlow() {
		return dFlow;
	}
	public void setdFlow(long dFlow) {
		this.dFlow = dFlow;
	}


	public long getSumFlow() {
		return sumFlow;
	}


	public void setSumFlow(long sumFlow) {
		this.sumFlow = sumFlow;
	}


	/**
	 * 序列化方法
	 */
	@Override
	public void write(DataOutput out) throws IOException {
		out.writeLong(upFlow);
		out.writeLong(dFlow);
		out.writeLong(sumFlow);
		
	}


	/**
	 * 反序列化方法
	 * 注意:反序列化的顺序跟序列化的顺序完全一致
	 */
	@Override
	public void readFields(DataInput in) throws IOException {
		 this.upFlow = in.readLong();
		 this.dFlow = in.readLong();
		 this.sumFlow = in.readLong();
	}
	
	@Override
	public String toString() {
		 
		return upFlow + "\t" + dFlow + "\t" + sumFlow;
	}}

数据信息

1363157985066 13726230503 00-FD-07-A4-72-B8:CMCC 120.196.100.82 i02.c.aliimg.com 24 27 2481 24681 200
1363157995052 13826544101 5C-0E-8B-C7-F1-E0:CMCC 120.197.40.4 4 0 264 0 200
1363157991076 13926435656 20-10-7A-28-CC-0A:CMCC 120.196.100.99 2 4 132 1512 200
1363154400022 13926251106 5C-0E-8B-8B-B1-50:CMCC 120.197.40.4 4 0 240 0 200
1363157993044 18211575961 94-71-AC-CD-E6-18:CMCC-EASY 120.196.100.99 iface.qiyi.com 视频网站 15 12 1527 2106 200
1363157995074 84138413 5C-0E-8B-8C-E8-20:7DaysInn 120.197.40.4 122.72.52.12 20 16 4116 1432 200
1363157993055 13560439658 C4-17-FE-BA-DE-D9:CMCC 120.196.100.99 18 15 1116 954 200
1363157995033 15920133257 5C-0E-8B-C7-BA-20:CMCC 120.197.40.4 sug.so.360.cn 信息安全 20 20 3156 2936 200
1363157983019 13719199419 68-A1-B7-03-07-B1:CMCC-EASY 120.196.100.82 4 0 240 0 200
1363157984041 13660577991 5C-0E-8B-92-5C-20:CMCC-EASY 120.197.40.4 s19.cnzz.com 站点统计 24 9 6960 690 200
1363157973098 15013685858 5C-0E-8B-C7-F7-90:CMCC 120.197.40.4 rank.ie.sogou.com 搜索引擎 28 27 3659 3538 200
1363157986029 15989002119 E8-99-C4-4E-93-E0:CMCC-EASY 120.196.100.99 www.umeng.com 站点统计 3 3 1938 180 200
1363157992093 13560439658 C4-17-FE-BA-DE-D9:CMCC 120.196.100.99 15 9 918 4938 200
1363157986041 13480253104 5C-0E-8B-C7-FC-80:CMCC-EASY 120.197.40.4 3 3 180 180 200
1363157984040 13602846565 5C-0E-8B-8B-B6-00:CMCC 120.197.40.4 2052.flash2-http.qq.com 综合门户 15 12 1938 2910 200
1363157995093 13922314466 00-FD-07-A2-EC-BA:CMCC 120.196.100.82 img.qfc.cn 12 12 3008 3720 200
1363157982040 13502468823 5C-0A-5B-6A-0B-D4:CMCC-EASY 120.196.100.99 y0.ifengimg.com 综合门户 57 102 7335 110349 200
1363157986072 18320173382 84-25-DB-4F-10-1A:CMCC-EASY 120.196.100.99 input.shouji.sogou.com 搜索引擎 21 18 9531 2412 200
1363157990043 13925057413 00-1F-64-E1-E6-9A:CMCC 120.196.100.55 t3.baidu.com 搜索引擎 69 63 11058 48243 200
1363157988072 13760778710 00-FD-07-A4-7B-08:CMCC 120.196.100.82 2 2 120 120 200
1363157985066 13726238888 00-FD-07-A4-72-B8:CMCC 120.196.100.82 i02.c.aliimg.com 24 27 2481 24681 200
1363157993055 13560436666 C4-17-FE-BA-DE-D9:CMCC 120.196.100.99 18 15 1116 954 200

转自传智播客视频教学内容

对于map处理后,设置分区函数。自己设置不同手机号放入不同分区。

自己设置分区后需要在main里面添加配置信息
//指定我们自定义的数据分区器
job.setPartitionerClass(ProvincePartitioner.class);
//同时指定相应“分区”数量的reducetask
job.setNumReduceTasks(5);

package cn.itcast.bigdata.mr.provinceflow

import java.util.HashMap;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;

/**
 * K2  V2  对应的是map输出kv的类型 
 * @author
 *
 */
public class ProvincePartitioner extends Partitioner<Text, FlowBean>{

	public static HashMap<String, Integer> proviceDict = new HashMap<String, Integer>();
	static{
		proviceDict.put("136", 0);
		proviceDict.put("137", 1);
		proviceDict.put("138", 2);
		proviceDict.put("139", 3);
	}
	
	
	
	@Override
	public int getPartition(Text key, FlowBean value, int numPartitions) {
		String prefix = key.toString().substring(0, 3);
		Integer provinceId = proviceDict.get(prefix);
		
		return provinceId==null?4:provinceId;
	}



}

对上面reduce输出的流量进行排序输出逻辑代码

package cn.itcast.bigdata.mr.flowsum;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import cn.itcast.bigdata.mr.flowsum.FlowCount.FlowCountMapper;
import cn.itcast.bigdata.mr.flowsum.FlowCount.FlowCountReducer;

/** 拿到的信息 输入map中,key还是偏移量value是字符串
 * 13480253104 180 180 360 13502468823 7335 110349 117684 13560436666 1116 954
 * 2070
 * 
 
 * 
 */
public class FlowCountSort {

static class FlowCountSortMapper extends Mapper<LongWritable, Text, FlowBean, Text> {

	FlowBean bean = new FlowBean();
	Text v = new Text();

	@Override
	protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

		// 拿到的是上一个统计程序的输出结果,已经是各手机号的总流量信息
		String line = value.toString();

		String[] fields = line.split("\t");

		String phoneNbr = fields[0];

		long upFlow = Long.parseLong(fields[1]);
		long dFlow = Long.parseLong(fields[2]);

		bean.set(upFlow, dFlow);
		v.set(phoneNbr);
		//把一个个对象序列化到文件中去。
		context.write(bean, v);

	}

}

/**
 * 根据key来掉, 传过来的是对象, 每个对象都是不一样的, 所以每个对象都调用一次reduce方法
 
 */
static class FlowCountSortReducer extends Reducer<FlowBean, Text, Text, FlowBean> {

	// reduce拿到的数据样式,<bean(),phonenbr>
	
	@Override
	protected void reduce(FlowBean bean, Iterable<Text> values, Context context) throws IOException, InterruptedException {

//第一次调用Iterator的next()方法时,它返回序列的第一个元素
		context.write(values.iterator().next(), bean);

	}

}

public static void main(String[] args) throws Exception {

	Configuration conf = new Configuration();
	/*conf.set("mapreduce.framework.name", "yarn");
	conf.set("yarn.resoucemanager.hostname", "mini1");*/
	Job job = Job.getInstance(conf);
	
	/*job.setJar("/home/hadoop/wc.jar");*/
	//指定本程序的jar包所在的本地路径
	job.setJarByClass(FlowCountSort.class);
	
	//指定本业务job要使用的mapper/Reducer业务类
	job.setMapperClass(FlowCountSortMapper.class);
	job.setReducerClass(FlowCountSortReducer.class);
	
	//指定mapper输出数据的kv类型
	job.setMapOutputKeyClass(FlowBean.class);
	job.setMapOutputValueClass(Text.class);
	
	//指定最终输出的数据的kv类型
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(FlowBean.class);
	
	//指定job的输入原始文件所在目录
	FileInputFormat.setInputPaths(job, new Path(args[0]));
	//指定job的输出结果所在目录
	
	Path outPath = new Path(args[1]);
	/*FileSystem fs = FileSystem.get(conf);
	if(fs.exists(outPath)){
		fs.delete(outPath, true);
	}*/
	FileOutputFormat.setOutputPath(job, outPath);
	
	//将job中配置的相关参数,以及job所用的java类所在的jar包,提交给yarn去运行
	/*job.submit();*/
	boolean res = job.waitForCompletion(true);
	System.exit(res?0:1);
	

}}

因为要比大小,所以bean要继承WritableComparale.

要把对象作为key必须实现Comparale。因为mapreduce作为框架只要是key都会进行排序,而对象排序的方法没写顾会报错。

package cn.itcast.bigdata.mr.flowsum;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;

import org.apache.hadoop.io.WritableComparable;

public class FlowBean implements WritableComparable<FlowBean>{
	
private long upFlow;
private long dFlow;
private long sumFlow;

//反序列化时,需要反射调用空参构造函数,所以要显示定义一个
public FlowBean(){}

public FlowBean(long upFlow, long dFlow) {
	this.upFlow = upFlow;
	this.dFlow = dFlow;
	this.sumFlow = upFlow + dFlow;
}


public void set(long upFlow, long dFlow) {
	this.upFlow = upFlow;
	this.dFlow = dFlow;
	this.sumFlow = upFlow + dFlow;
}




public long getUpFlow() {
	return upFlow;
}
public void setUpFlow(long upFlow) {
	this.upFlow = upFlow;
}
public long getdFlow() {
	return dFlow;
}
public void setdFlow(long dFlow) {
	this.dFlow = dFlow;
}


public long getSumFlow() {
	return sumFlow;
}


public void setSumFlow(long sumFlow) {
	this.sumFlow = sumFlow;
}


/**
 * 序列化方法
 */
@Override
public void write(DataOutput out) throws IOException {
	out.writeLong(upFlow);
	out.writeLong(dFlow);
	out.writeLong(sumFlow);
	
}


/**
 * 反序列化方法
 * 注意:反序列化的顺序跟序列化的顺序完全一致
 */
@Override
public void readFields(DataInput in) throws IOException {
	 upFlow = in.readLong();
	 dFlow = in.readLong();
	 sumFlow = in.readLong();
}

@Override
public String toString() {
	 
	return upFlow + "\t" + dFlow + "\t" + sumFlow;
}

@Override
public int compareTo(FlowBean o) {
//比bean大小的时候是比的总流量
	return this.sumFlow>o.getSumFlow()?-1:1;	//从大到小, 当前对象和要比较的对象比, 如果当前对象大, 返回-1, 交换他们的位置(自己的理解)
}}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值