package com.imooc.bigdata.hadoop.hdfs.mr.wc;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
/**
* 使用MR统计HDFS上的文件对应的词频
*
* Driver:配置Mapper,Reducer的相关属性
*
* 提交到本地运行:开发过程中使用
*/
public class WordCountApp {
public static void main(String[] args) throws Exception {
System.setProperty("HADOOP_USER_NAME","hadoop");
Configuration configuration = new Configuration();
configuration.set("fs.defaultFS","hdfs://192.168.0.233:8020");
//创建一个Job
Job job = Job.getInstance(configuration);
//设置Job对应的参数:自定义的Mapper和Reduce处理类
job.setJarByClass(WordCountApp.class);
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
//设置Job对应的参数:Mapper输出Key和Value类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//设置Job对应的参数:Reduce输出Key和Value的类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//设置Job对应的参数:Mapper输出Key和Value的类型:作业输入和输出的路径
FileInputFormat.setInputPaths(job,new Path("/wordcount/input"));
FileOutputFormat.setOutputPath(job,new Path("/wordcount/output"));
//提交job
boolean result = job.waitForCompletion(true);
System.exit(result ? 0:-1);
}
}
log4.properties
log4j.rootLogger=INFO,stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Target=System.out
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%-5p] method:%l%n%m%n