题目:设计MapReduce统计每个手机号上行流量和、下行流量和、总流量和(即上
行流量和+下行流量和)
第二天工作:
在idea中编写MapReduce代码
FlowMapper类:
package com.colin.bigdata.mapreduce;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class FlowMapper extends Mapper<LongWritable, Text, Text, Access> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
String[] fields = line.split("\t");
String phone = fields[1];
long upFlow = Long.parseLong(fields[fields.length - 3]);
long downFlow = Long.parseLong(fields[fields.length - 2]);
context.write(new Text(phone), new Access(phone, upFlow, downFlow));
}
}
FlowReducer类:
package com.colin.bigdata.mapreduce;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class FlowReducer extends Reducer<Text, Access, Text, Access> {
@Override
// 输入的key是每行数据的起始偏移量,通常不需要使用。输入的value是每行文本数据
protected void reduce(Text key, Iterable<Access> values, Context context) throws IOException, InterruptedException {
long upFlowSum = 0;
long downFlowSum = 0;
for (Access access : values) {
upFlowSum += access.getUpFlow();
downFlowSum += access.getDownFlow();
}
Access result = new Access(upFlowSum, downFlowSum);
context.write(key, result);
}
}
FlowJob类:
package com.colin.bigdata.mapreduce;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FlowJob {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(FlowJob.class);
job.setMapperClass(FlowMapper.class);
job.setReducerClass(FlowReducer.class);
job.setPartitionerClass(FlowPartitioner.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Access.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Access.class);
job.setNumReduceTasks(3); // Ensure the number of partitions matches the number of reducers
FileSystem fileSystem = FileSystem.get(conf);
fileSystem.delete(new Path("/project/phoneTraffic/out"), true);
//设置输入目录
FileInputFormat.setInputPaths(job, new Path("D:/access.log"));
//设置输出目录
FileOutputFormat.setOutputPath(job, new Path("D:/out"));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
注意编写输入输出路径,以免找不到结果:
Access类:
package com.colin.bigdata.mapreduce;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
public class Access implements Writable {
private String phone;
private long upFlow;
private long downFlow;
private long sumFlow;
public Access() {
}
public Access(String phone, long upFlow, long downFlow) {
this.phone = phone;
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow + downFlow;
}
public Access(long upFlow, long downFlow) {
this.upFlow = upFlow;
this.downFlow = downFlow;
this.sumFlow = upFlow + downFlow;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(phone);
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
}
@Override
public void readFields(DataInput in) throws IOException {
this.phone = in.readUTF();
this.upFlow = in.readLong();
this.downFlow = in.readLong();
this.sumFlow = in.readLong();
}
@Override
public String toString() {
return upFlow + "\t" + downFlow + "\t" + sumFlow;
}
public String getPhone() {
return phone;
}
public void setPhone(String phone) {
this.phone = phone;
}
public long getUpFlow() {
return upFlow;
}
public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
}
public long getDownFlow() {
return downFlow;
}
public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
}
public long getSumFlow() {
return sumFlow;
}
public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
}
// Getters and setters
}
我选择在虚拟机中安装idea,再把代码在虚拟机的idea中运行。
结果展示:
在虚拟机的目标输出目录中查看结果out2:
输出结果展示: