1)需求:
去除日志中按照空格切分,字段长度小于等于11的日志属于不符合日志。
日志模板清洗前:一共14619行
2)实现代码:
(1)编写LogMapper
package com.itstar.mr.wc0908.mr.bigdata_13.clean;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* @author Jackson
* 2019-9-05
*/
public class LogCleanMap extends Mapper<LongWritable, Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//1.读取一行数据
String line = value.toString();
//2.进行数据切分
String[] word = line.split(" ");
//3.判断是否符合+计数器
if (word.length>11){
//输出的都是具体的,所以NullWritable后面要加get获取空值
context.write(value,NullWritable.get());
}
}
}
3)driver类:
package com.itstar.mr.wc0908.mr.bigdata_13.clean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import javax.xml.soap.Text;
import java.io.IOException;
public class LogCleanDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args=new String[]{"D:\\input\\test\\plus\\web.txt","D:\\input\\test\\plus\\outweb.txt"};
Job job = Job.getInstance(new Configuration());
job.setJarByClass(LogCleanDriver.class);
//配置mapper的类及输出数据类型
job.setMapperClass(LogCleanMap.class);
job.setMapOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//设置reduce的个数,默认是1
job.setNumReduceTasks(0);
//输入数据路径
FileInputFormat.setInputPaths(job,new Path(args[0]));
//输出数据路径
FileOutputFormat.setOutputPath(job,new Path(args[1]));
//提交任务
job.waitForCompletion(true);
}
}
结果:
日志模板清洗后:一共113770行
优化,计数器来,找出正常数据多少 和 非正常数据多少
做了一个封装:
map类
package com.itstar.mr.wc0908.mr.bigdata_13.clean;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* @author Jackson
* 2019-11-05
*/
public class LogCleanMap extends Mapper<LongWritable, Text,Text, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//1.读取一行数据
String line = value.toString();
//2.parseLog方到里面,是为了获取数据line
boolean b = parseLog(line, context);
//如果b为true ,则!b为假,不返回
if (!b){
return;
}
context.write(value,NullWritable.get());
}
public boolean parseLog(String line, Context context){
//2.获取数据,进行数据切分
String[] word = line.split(" ");
//3.判断是否符合+计数器(map自带的计数器increment方法)
if (word.length>11){
//getCounter 设置名字的参数
context.getCounter("map","正常数据").increment(1);
return true;
}else {
context.getCounter("map","不符合数据").increment(1);
return false;
}
}
}
drive类:
package com.itstar.mr.wc0908.mr.bigdata_13.clean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import javax.xml.soap.Text;
import java.io.IOException;
public class LogCleanDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
args=new String[]{"D:\\input\\test\\plus\\web.txt","D:\\input\\test\\plus\\outweb4.txt"};
Job job = Job.getInstance(new Configuration());
job.setJarByClass(LogCleanDriver.class);
//配置mapper的类及输出数据类型
job.setMapperClass(LogCleanMap.class);
job.setMapOutputKeyClass(Text.class);
job.setOutputValueClass(NullWritable.class);
//设置reduce的个数,默认是1
job.setNumReduceTasks(0);
//输入数据路径
FileInputFormat.setInputPaths(job,new Path(args[0]));
//输出数据路径
FileOutputFormat.setOutputPath(job,new Path(args[1]));
//提交任务
job.waitForCompletion(true);
}
}
结果: