文章目录
前言
在当今数字化时代,手机已经成为人们生活的必需品,许多人离不开手机,随时随地都需要使用网络。然而,手机上网使用流量是有限的,超出流量限制会导致额外的费用。因此,手机上网流量的统计和管理变得非常重要。
手机流量统计项目的背景是为了帮助用户控制和管理手机上网使用的流量,并提供相关的统计和预警功能。通过对手机流量的监控和分析,用户可以了解自己的流量消耗情况,避免超出限制而产生额外费用。
提示:以下是本篇文章正文内容,下面案例可供参考
一、项目需求
数据为access.log日志文件:
- 第二个字段:手机号
- 倒数第三个字段:上行流量
- 倒数第二个字段:下行流量
需统计每个手机号上行流量和、下行流量和、总流量和(上
行流量和+下行流量和),并且:将统计结果按照手机号的前缀
进行区分,并输出到不同的输出文件中去。
13* ==> …
15* ==> …
other ==> …
二、开发步骤
1.创建Maven项目
创建项目并配置好本地仓库:
配置好pom.xml文件,在其中添加Hadoop依赖:
<dependencies>
<!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>3.2.0</version> //此处添加自己的Hadoop版本
</dependency>
</dependencies>
2.Java代码
Access.java
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
// access对象要实例化
public class Access implements Writable {
private long upFlow;
private long downFlow;
private long sumFlow;
// 反序列化时,需要反射调用空参构造函数,所以必须有
public Access() {
super();
}
public Access(long upFlow, long downFlow) {
super();
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow + downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
/**
* 序列化方法
*
* @param out
* @throws IOException
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
/**
* 反序列化方法
注意反序列化的顺序和序列化的顺序完全一致
*
* @param in
* @throws IOException
*/
@Override
public void readFields(DataInput in) throws IOException {
upFlow = in.readLong();
downFlow = in.readLong();
sumFlow = in.readLong();
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
public void set(long upFlow, long downFlow) {
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow + downFlow;
}
}
Map.java
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class Map extends Mapper<LongWritable, Text, Text, Access>{
//不能在map方法中new对象,map方法执行频率高,内存消耗大。这也就是需要在access对象中要有一个空构造方法的原因
Access access = new Access();
Text k = new Text();
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//1.获取一行数据
String line = value.toString();
//2.截取字段
String[] fields = line.split("\t");
//3.封装access对象,获取电话号码(第二大步:具体的业务逻辑)
String phoneNum = fields[1];
long upFlow = Long.parseLong(fields[fields.length - 3]);
long downFlow = Long.parseLong(fields[fields.length - 2]);
access.set(upFlow, downFlow);
k.set(phoneNum);
//4.写出去(第三大步:将数据输出出去,key和value分别是什么,规定清楚)
context.write(k, access);
}
}
Reduce.java
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class Reduce extends Reducer<Text, Access, Text, Access>{
@Override
protected void reduce(Text key, Iterable<Access> values, Context context)
throws IOException, InterruptedException {
//1.计算总的流量
long sum_upFlow = 0;
long sum_downFlow = 0;
for(Access access : values){
sum_upFlow += access.getUpFlow();
sum_downFlow += access.getDownFlow();
}
//2.输出
context.write(key, new Access(sum_upFlow,sum_downFlow));
}
}
ProvincePartitioner.java
负责分区处理
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
/**
* K2 V2 对应的是map输出kv类型
* @author Administrator
*/
public class ProvincePartitioner extends Partitioner<Text, Access> {
@Override
public int getPartition(Text key, Access value, int numPartitions) {
// 1 获取电话号码的前两位
String preNum = key.toString().substring(0, 2);
int partition = 3;
// 2 判断是哪个分区
if ("13".equals(preNum)) {
partition = 0;
}else if ("15".equals(preNum)) {
partition = 1;
}else {
partition = 2;
}
return partition;
}
}
Driver.java
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class Driver {
public static void main(String[] args) throws Exception {
//1.获取配置信息
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//2.获取jar包信息
job.setJarByClass(Driver.class);
//3.配置mapper、reducer类
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
//4.配置mapper输出key、value值
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Access.class);
//5.配置输出key、value值
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Access.class);
//设置分区
job.setPartitionerClass(ProvincePartitioner.class);
//设置Reducenum,依据是看flowpartitioner里分了几个区
job.setNumReduceTasks(3);
//6.配置输入路径和输出路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//7.提交
boolean result = job.waitForCompletion(true);
System.exit(result?0:1);
}
}
3.打包
将项目打包成jar包,准备运行
三、项目运行
1.数据导入
将access.log文件上传到HDFS中
2.程序运行
用以下命令运行jar包:
hadoop jar <jar包位置> <程序文件主类名> <输入文件路径> <输出文件路径(需要为空)>