项目要求
根据电商日志文件,分析:
-
统计页面浏览量(每行记录就是一次浏览)
-
统计各个省份的浏览量 (需要解析IP)
-
日志的ETL操作(ETL:数据从来源端经过抽取(Extract)、转换(Transform)、加载(Load)至目的端的过程)
为什么要ETL:没有必要解析出所有数据,只需要解析出有价值的字段即可。本项目中需要解析出:ip、url、pageId(topicId对应的页面Id)、country、province、city
需求二
统计各个省份的浏览量 (需要解析IP)
基于需求一构建以下几个类
ProvinceMapper类
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import utils.IPParser;
import utils.LogParser;
import java.io.IOException;
import java.util.Map;
public class ProvinceMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String log = value.toString();
LogParser parser = new LogParser();
Map<String, String> logInfo = parser.parse(log);
if (StringUtils.isNotBlank(logInfo.get("ip"))) {
IPParser.RegionInfo regionInfo = IPParser.getInstance().analyseIp(logInfo.get("ip"));
String province = regionInfo.getProvince();
if (StringUtils.isNotBlank(province)) {
context.write(new Text(province), new IntWritable(1));
} else {
context.write(new Text("-"), new IntWritable(1));
}
} else {
context.write(new Text("-"), new IntWritable(1));
}
}
}
ProvinceReducer类
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class ProvinceReducer extends Reducer<Text,IntWritable,Text,IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
long sum = 0;
for(IntWritable value : values){
sum += value.get();
}
context.write(key,new IntWritable((int) sum));
}
}
ProvinceDriver类
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class ProvinceDriver {
public static void main(String[] args) throws Exception {
//1.获取配置信息
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
//2.获取jar包信息
job.setJarByClass( ProvinceDriver.class);
//3.配置mapper、reducer类
job.setMapperClass( ProvinceMapper.class);
job.setReducerClass( ProvinceReducer.class);
//4.配置mapper输出key、value值
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//5.配置输出key、value值
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//设置分区
//job.setPartitionerClass(.class);
//设置Reducenum,依据是看flowpartitioner里分了几个区
//job.setNumReduceTasks();
//6.配置输入路径和输出路径
FileInputFormat.setInputPaths(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//7.提交
boolean result = job.waitForCompletion(true);
System.exit(result?0:1);
}
}
打包方式和项目一一样
使用Hadoop命令提交
hadoop jar province.jar /dianshang/trackinfo_20130721.txt /dianshang/out3
也可在终端上查看结果
hdfs dfs -cat /dainshang/out3/part-r-00000