实训日记1 -----手机流量统计项目
1.项目需求
Map阶段读取一行数据需要记录’上行流量’,‘下行流量’以及’总流量’,单个基本数据类型不方便保存,引入自定义对象保存,但需要序列化
数据格式
输入数据
1363157993055 13560436666 C4-17-FE-BA-DE-D9:CMCC 120.196.100.99 18 15 1116 954 200
手机号码 上行流量 下行流量
输出数据
`
1356·0436666 1116 954 2070
手机号码 上行流量 下行流量 总流量
2.基本思路
Map阶段:
(1)读取一行数据,切分字段
(2)抽取手机号、上行流量、下行流量
(3)以手机号为key,bean对象为value输出,即context.write(手机号,bean);
Reduce阶段:
(1)累加上行流量和下行流量得到总流量。
(2)实现自定义的bean来封装流量信息,并将bean作为map输出的key来传输
(3)MR程序在处理数据的过程中会对数据排序(map输出的kv对传输到reduce之前,会排序),排序的依据是map输出的key
3.开发步骤
(1)自定义Access类
包括属性:手机号、上行流量、下行流量、总流量
(2)自定义Map任务类(Map Task)
对每一行日志内容进行拆分,Map输出数据为:phone==>Access(手机号,该行手机号的上行流量,该行手机号的
下行流量)
(3)编写Reduce任务类(Reduce Task)
对每个手机号的流量进行汇总,Map输出数据为:phone==>Access(手机号,上行流量和,下行流量和)
也可以优化为:phone==>Access(NullWritable对象,上行流量和,下行流量和)
(4)编写分区处理类
继承org.apache.hadoop.mapreduce.Partitioner类,"13"开头的手机号交给第一个ReduceTask任务处理,最终输出到0号分区,"15"开头的手机号交给第二个ReduceTask任务处理,最终输出到1号分区,其余手机号交给第三个ReduceTask任务处理,最终输出到2号分区。
4.代码
1.Access类
package org.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.net.URI;
public class FlowHDFSApp {
public static void main(String[] args) throws Exception{
System.setProperty("HADOOP_USER_NAME", "root");
Configuration configuration = new Configuration();
configuration.set("fs.defaultFS","hdfs://192.168.10.104:9000");
// 创建一个Job
Job job = Job.getInstance(configuration);
// 设置Job对应的参数: 主类
job.setJarByClass(FlowHDFSApp.class);
// 设置Job对应的参数: 设置自定义的Mapper和Reducer处理类
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
// 添加Combiner的设置即可
job.setCombinerClass(FlowReducer.class);
// 设置Job对应的参数: Mapper输出key和value的类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置Job对应的参数: Reduce输出key和value的类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 如果输出目录已经存在,则先删除
FileSystem fileSystem = FileSystem.get(new URI("hdfs://192.168.10.131:9000"),configuration, "root");
Path outputPath = new Path("/wordcount/output");
if(fileSystem.exists(outputPath)) {
fileSystem.delete(outputPath,true);
}
// 设置Job对应的参数: Mapper输出key和value的类型:作业输入和输出的路径
FileInputFormat.setInputPaths(job, new Path("/home/alice/桌面"));
FileOutputFormat.setOutputPath(job, outputPath);
// 提交job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : -1);
}
};
2.FlowMapper类
package org.mapreduce;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FlowMapper extends Mapper<LongWritable, Text, Text, Access>{
Text k=new Text();
Access v=new Access();
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String line=value.toString();
String[] fields=line.split("\t");
String phNum=fields[1];
long upFlow=Long.parseLong(fields[fields.length-3]);
long downFlow=Long.parseLong(fields[fields.length-2]);
k.set(phNum);
v.set(upFlow,downFlow);
context.write(k, v);
}
}
3.FlowReduce类
package org.mapreduce;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FlowReducer extends Reducer<Text, Access, Text, Access>{
@Override
protected void reduce(Text key, Iterable<Access> values, Context context)
throws IOException, InterruptedException {
long sumUpFlow=0;
long sumDownFlow=0;
System.out.println(values);
for (Access access : values) {
sumUpFlow+= access.getUpflow();
sumDownFlow+= access.getDownflow();
}
Access v=new Access(sumUpFlow,sumDownFlow);
context.write(key, v);
}
}
4.FlowDriver类
package org.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class FlowDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
if (args.length < 2) {
System.err.println("Usage: FlowDriver <inputPath> <outputPath>");
System.exit(1);
}
Configuration configuration = new Configuration();
Job job = Job.getInstance(configuration, "Flow Calculation");
job.setJarByClass(FlowDriver.class);
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Access.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Access.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
}
5.PhonePartitioner类
package org.mapreduce;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
public class PhonePartitioner extends Partitioner<Text, Access> {
@Override
public int getPartition(Text key, Access value, int numPartitions) {
String phonePrefix = key.toString().substring(0, 2);
switch (phonePrefix) {
case "13":
return 0;
case "15":
return 1;
default:
return 2;
}
}
}
6.FlowHDFSApp类
此类可自拟类名我的为wcl`
package org.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.net.URI;
public class FlowHDFSApp {
public static void main(String[] args) throws Exception{
System.setProperty("HADOOP_USER_NAME", "root");
Configuration configuration = new Configuration();
configuration.set("fs.defaultFS","hdfs://192.168.10.100:9000");
// 创建一个Job
Job job = Job.getInstance(configuration);
// 设置Job对应的参数: 主类
job.setJarByClass(FlowHDFSApp.class);
// 设置Job对应的参数: 设置自定义的Mapper和Reducer处理类
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
// 添加Combiner的设置即可
job.setCombinerClass(FlowReducer.class);
// 设置Job对应的参数: Mapper输出key和value的类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 设置Job对应的参数: Reduce输出key和value的类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 如果输出目录已经存在,则先删除
FileSystem fileSystem = FileSystem.get(new URI("hdfs://192.168.10.100:9000"),configuration, "root");
Path outputPath = new Path("/wordcount/output");
if(fileSystem.exists(outputPath)) {
fileSystem.delete(outputPath,true);
}
// 设置Job对应的参数: Mapper输出key和value的类型:作业输入和输出的路径
FileInputFormat.setInputPaths(job, new Path("/home/alice/桌面"));
FileOutputFormat.setOutputPath(job, outputPath);
// 提交job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : -1);
}
}
5.结果
运行代码 在HDFS集群中进入此目录
"13"开头的手机号最终输出到0号分区,四个数字对应:手机号,对应手机号的上行流量,下行流量,总流量
"15"开头的手机号最终输出到1号分区
其他开头的手机号最终输出到2号分区