1.创建本地文件,需要注意的是,创建文件时尽量不要到有多余的字符或者换行,否则会报数组越界异常,解决方法是在map中分割文件时添加if判断语句
13726230501 200 1100
13396230502 300 1200
13897230503 400 1300
13897230503 100 300
13597230534 500 1400
13597230534 300 1200
2.上传至HDFS文件系统中
hadoop fs -put /phone.txt /data/phone/input
3.编写代码,代码如下
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class FlowSumWritable {
public static void main(String[] args) throws Exception{
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
Path inputPath = new Path(args[0]);
Path outputPath = new Path(args[1]);
job.setJarByClass(FlowSumWritable.class);
job.setJobName("Flow");
job.setMapperClass(Map.class);
job.setReducerClass(Red.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(FlowWritable.class);
FileInputFormat.setInputPaths(job, inputPath);
FileOutputFormat.setOutputPath(job, outputPath);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowWritable.class);
job.waitForCompletion(true);
}
public static class Map extends Mapper<LongWritable,Text,Text,FlowWritable>{
public void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException{
String[] line = value.toString().split(" ");
if(line.length>2){
FlowWritable fwValue = new FlowWritable(Integer.parseInt(line[1]),Integer.parseInt(line[2]));
context.write(new Text(line[0]), fwValue);
}
}
}
public static class Red extends Reducer<Text, FlowWritable, Text, Text>{
public void reduce(Text key,Iterable<FlowWritable> value,Context context)throws IOException, InterruptedException{
@SuppressWarnings("unused")
String[] str;
int upSum = 0;
int downSum = 0;
int totSum = 0;
for (FlowWritable val : value) {
//str = val.toString().split(" ");
//upSum+=Integer.parseInt(str[0]);
upSum+=val.getUpFlow();
//downSum+=Integer.parseInt(str[1]);
downSum+=val.getDownFlow();
}
totSum = upSum + downSum;
//FlowWritable fw = new FlowWritable(upSum, downSum);
context.write(key, new Text(upSum+":"+downSum+":"+totSum));
}
}
}
4.导出jar包并运行
hadoop jar /home/cloudera/jarfile/Flow.jar /data/phone/input /data/phone/output
5.查看运行结果
hadoop fs -cat /data/phone/output/part-r-00000