需求:对日志数据中的上下行流量信息汇总
例如数据如下:
1363157985066 13726230503 00-FD-07-A4-72-B8:CMCC 120.196.100.82 24 27 2481 24681 200
1363157995052 13826544101 5C-0E-8B-C7-F1-E0:CMCC 120.197.40.4 4 0 264 0 200
1363157991076 13926435656 20-10-7A-28-CC-0A:CMCC 120.196.100.99 2 4 132 1512 200
1363154400022 13926251106 5C-0E-8B-8B-B1-50:CMCC 120.197.40.4 4 0 240 0 200
(1)自定义bean
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
/**
* 把流星信息封装成对象
*
* @author
*
*/
public class FlowBean implements Writable {
private long upFlow;
private long dFlow;
private long sumFlow;
public FlowBean() {
super();
}
public FlowBean(long upFlow, long dFlow) {
super();
this.upFlow = upFlow;
this.dFlow = dFlow;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getdFlow() {
return dFlow;
}
public void setdFlow(long dFlow) {
this.dFlow = dFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
// 序列化 :将字段信息写到输出流中
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(upFlow);
out.writeLong(dFlow);
out.writeLong(sumFlow);
}
// 反序列化:从输出流中读取各个字段的信息
// 注意:反序列化的顺序必须跟序列化的对象一致
@Override
public void readFields(DataInput in) throws IOException {
upFlow = in.readLong();
dFlow = in.readLong();
sumFlow = in.readLong();
}
// 重写toString()方法
@Override
public String toString() {
return upFlow + "\t" + dFlow + "\t" + sumFlow;
}
}
(2) 创建
mapper
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
public class FlowCountMapper extends Mapper<LongWritable, Text, Text, FlowBean> {
@Override
protected void map(LongWritable key, Text values, Context context) throws IOException, InterruptedException {
// 将一行的内容转化为String
String value = values.toString();
// 切分字段
String[] split = value.split("\t");
// 取出手机号码
String phoneNum = split[1];
// 取出上行流量和下行流量
long upFlow = Long.parseLong(split[split.length - 2]);
long dFlow = Long.parseLong(split[split.length - 3]);
context.write(new Text(phoneNum), new FlowBean(upFlow, dFlow));
}
}
(3) 创建 reducer
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class FlowCountReduce extends Reducer<Text, FlowBean, Text, FlowBean> {
@Override
protected void reduce(Text key, Iterable<FlowBean> values, Context context)
throws IOException, InterruptedException {
long sum_upFlow = 0;
long sum_dFlow = 0;
// 遍历 将上行流量和下行流量分别累加
for (FlowBean flowBean : values) {
sum_upFlow += flowBean.getUpFlow();
sum_dFlow += flowBean.getdFlow();
}
FlowBean res = new FlowBean(sum_upFlow, sum_dFlow);
context.write(key, res);
}
}
(4)创建FlowCount
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FlowCount {
public static void main(String[] args) throws Exception {
String inPath = "";
String outPath = "";
if (args.length == 2) {
inPath = args[0];
outPath = args[1];
}
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
// 指定jar包所在的本地路径
job.setJarByClass(FlowCount.class);
// 指定jar包使用的mapper和Reduce业务类
job.setMapperClass(FlowCountMapper.class);
job.setReducerClass(FlowCountReduce.class);
// 指定mapper输出数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 指定最终的输出数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowBean.class);
// 指定job的输入原始文件所在的目录
FileInputFormat.setInputPaths(job, new Path(inPath));
FileOutputFormat.setOutputPath(job, new Path(outPath));
boolean res = job.waitForCompletion(true);
System.exit(res ? 0 : 1);
}
}