原文件 web.txt
Map类
java
package telephone;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class LogCleanMapper extends Mapper<LongWritable, Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//1.读取一行数据
String line = value.toString();
parseLog(line,context);
boolean b =parseLog(line,context);
if (!b){
return;
}
context.write(value,NullWritable.get());
}
//判断数据是否符合
public boolean parseLog(String line,Context context){
//获取数据
String[]words=line.split(" ");
//判断计数器
if (words.length>11){
context.getCounter("map","正常数据").increment(1);
return true;
}else{
context.getCounter("map","非正常数据").increment(1);
return false;
}
}
}
Reduce
java
package telephone;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class LogCleanDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args=new String[]{"G:\\a\\web.txt","G:\\a\\OutWeb1.txt"};
Job job = Job.getInstance(new Configuration());
job.setJarByClass(LogCleanDriver.class);
//配置mapper类及输出数据类型
job.setMapperClass(LogCleanMapper.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//设置reduce的个数,默认是1
job.setNumReduceTasks(0);
//输入数据路径
FileInputFormat.setInputPaths(job,new Path(args[0]));
//输出数据路径
FileOutputFormat.setOutputPath(job,new Path(args[1]));
//提交 任务
job.waitForCompletion(true);
}
}