这里以redis数据库为例。
这里的例子是,我想统计日志文件中的某天各个小时的访问量,日志格式为:
1
|
2014
-
02
-
10
04
:
52
:
34
127.0
.
0.1
xxx
|
我们知道在写mapreduce job时,要配置输入输出,然后编写mapper和reducer类,hadoop默认输出是到hdfs的文件中,例如:
1
|
job.setOutputFormatClass(FileOutputFormat.
class
);
|
现在我们想要将任务计算结果输出到数据库(redis)中,怎么做呢?可以继承FileOutputFormat类,定制自己的类,看代码:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40 |
public class LoginLogOutputFormat<K, V> extends FileOutputFormat<K, V> {
/**
* 重点也是定制一个RecordWriter类,每一条reduce处理后的记录,我们便可将该记录输出到数据库中
*/
protected static class RedisRecordWriter<K, V> extends RecordWriter<K, V>{
private Jedis jedis; //redis的client实例
public RedisRecordWriter(Jedis jedis){
this .jedis = jedis;
}
@Override
public void write(K key, V value) throws IOException,
InterruptedException {
boolean nullKey = key == null ;
boolean nullValue = value == null ;
if (nullKey || nullValue) return ;
String[] sKey = key.toString().split( "-" );
String outKey = sKey[ 0 ]+ "-" +sKey[ 1 ]+ "-" +sKey[ 2 ]+ "_login_stat" ; //zset key为yyyy-MM-dd_login_stat
jedis.zadd(outKey.getBytes( "UTF-8" ), - 1 ,
(sKey[ 3 ]+ ":" +value).getBytes( "UTF-8" )); //zadd, 其值格式为: 时刻:访问量
}
@Override
public void close(TaskAttemptContext context) throws IOException,
InterruptedException {
if (jedis != null ) jedis.disconnect(); //关闭链接
}
}
@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext job)
throws IOException, InterruptedException {
Jedis jedis = RedisClient.newJedis(); //构建一个redis,这里你可以自己根据实际情况来构建数据库连接对象
//System.out.println("构建RedisRecordWriter");
return new RedisRecordWriter<K, V>(jedis);
}
}
|
下面就是整个job实现:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73 |
public class LoginLogStatTask extends Configured implements Tool {
public static class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
@Override
protected void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
if (value == null || "" .equals(value)) return ;
// 解析value,如: 2014-02-10 04:52:34 127.0.0.1 xxx
String[] fields = value.toString().split( " " );
String date = fields[ 0 ];
String time = fields[ 1 ];
String hour = time.split( ":" )[ 0 ];
String outKey = date+ "-" +hour;
context.write( new Text(outKey), new IntWritable( 1 ));
}
}
public static class MyReducer extends Reducer<Text, IntWritable, Text, IntWritable>{
@Override
protected void reduce(Text key, Iterable<IntWritable> values,
Context context)
throws IOException, InterruptedException {
int count = 0 ;
while (values.iterator().hasNext()){ //统计数量
count ++;
values.iterator().next();
}
context.write(key, new IntWritable(count));
}
}
@Override
public int run(String[] args) throws Exception {
Configuration conf = getConf();
List<Path> inputs = new ArrayList<>();
String inputPath = args[ 0 ];
if (inputPath.endsWith( "/" )){ //如果是目录
inputs.addAll(HdfsUtil.listFiles(inputPath, conf));
} else { //如果是文件
inputs.add( new Path(inputPath));
}
long ts = System.currentTimeMillis();
String jobName = "login_logs_stat_job_" + ts;
Job job = Job.getInstance(conf, jobName);
job.setJarByClass(LoginLogStatTask. class );
//添加输入文件路径
for (Path p : inputs){
FileInputFormat.addInputPath(job, p);
}
//设置输出路径
Path out = new Path(jobName + ".out" ); //以jobName.out作为输出
FileOutputFormat.setOutputPath(job, out);
//设置mapper
job.setMapperClass(MyMapper. class );
//设置reducer
job.setReducerClass(MyReducer. class );
//设置输入格式
job.setInputFormatClass(TextInputFormat. class );
//设置输出格式
job.setOutputFormatClass(LoginLogOutputFormat. class );
//设置输出key类型
job.setOutputKeyClass(Text. class );
//设置输出value类型
job.setOutputValueClass(IntWritable. class );
job.waitForCompletion( true );
return job.isSuccessful()? 0 : 1 ;
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
int res = ToolRunner.run(conf, new LoginLogStatTask(), args);
System.exit(res);
}
|
运行job后,就会在redis数据库中有对应的key:
