1.UV的概念
UV:unique view—》一天内访问网站的用户数
下面是统计UV的代码:
首先是MapReduce类的代码
package com.huadian.bigdata.webloguv07;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class UVWebLogMapReduce extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
//1、创建job
Job job = Job.getInstance( this.getConf(), "UVWebLogMapReduce" );
job.setJarByClass( UVWebLogMapReduce.class );
//2、input
Path inputPath = new Path( args[0] );
FileInputFormat.setInputPaths( job,inputPath );
//3.mapper
job.setMapperClass( UVWebLogMapper.class );
job.setMapOutputKeyClass( Text.class );
job.setMapOutputValueClass( Text.class );
//job.setNumReduceTasks(2);
//5.reduce
job.setReducerClass( UVWebLogReducer.class );
job.setOutputKeyClass( Text.class );
job.setOutputValueClass( IntWritable.class );
//6.output
Path outputPath = new Path( args[1] );
//如果该路径存在,先删除
FileSystem hdfs = FileSystem.get( this.getConf() );
if(hdfs.exists( outputPath )){
//boolean delete(Path f, boolean recursive)
hdfs.delete(outputPath,true);
}
FileOutputFormat.setOutputPath( job,outputPath );
boolean isSuccess = job.waitForCompletion( true );
return isSuccess?0:1;
}
public static void main(String[] args) {
Configuration configuration = new Configuration();
try {
//public static int run(Configuration conf, Tool tool, String[] args)
int status = ToolRunner.run( configuration, new UVWebLogMapReduce(), args );
System.exit( status );
} catch (Exception e) {
e.printStackTrace();
}
}
}
下面是map类的代码:
package com.huadian.bigdata.webloguv07;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class UVWebLogMapper extends Mapper <LongWritable, Text, Text, Text> {
private Text outputKey = new Text( );
private Text outputValue = new Text( );
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
/**
* (1)分割 36个字段,如果数组长度<36,该条数据丢弃
* (2)guid字段,下标:5
* (3)第25个cityID,下标24
* 返回:<cityID,1>
*/
String valueStr = value.toString();
String[] items = valueStr.split( "\t" );
context.getCounter("user defined","allCount").increment(1L);
if(items.length >=36){
context.getCounter("user defined","count1").increment(1L);
if(StringUtils.isBlank( items[5] )){
return;
}
outputKey.set( items[24] );
outputValue.set(items[5]);
context.write( outputKey ,outputValue);
}else {
context.getCounter("user defined","count2").increment(1L);
return;
}
}
}
下面是reduce类的代码:
package com.huadian.bigdata.webloguv07;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
public class UVWebLogReducer extends Reducer<Text, Text, Text, IntWritable> {
private IntWritable outputValue = new IntWritable();
@Override
protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
//实现业务
Set<String> set = new HashSet<>();
for (Text value : values) {
set.add(value.toString());
}
outputValue.set(set.size());
context.write(key, outputValue);
}
}
我将这个MapReduce程序分为三个类来写的,便于代码的维护与查看的清晰。这个根据数据的不同统计的,uv就是根据用户的id与访问的ip数来统计的。