|
POM
<properties>
<hadoop.version>2.6.0</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.mrunit/mrunit -->
<dependency>
<groupId>org.apache.mrunit</groupId>
<artifactId>mrunit</artifactId>
<version>1.1.0</version>
<!--<scope>test</scope>-->
<!--不加导包可能失败-->
<classifier>hadoop2</classifier>
</dependency>
</dependencies>
WordCountMapper.java
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
/**
* KEYIN:默认情况下,是mr框架所读到的一行文本的起始偏移量,Long; 在hadoop中有自己的更精简的序列化接口,所以不直接用Long,而是用LongWritable
* VALUEIN:默认情况下,是mr框架所读到的一行文本内容,String;此处用Text
*
* KEYOUT:是用户自定义逻辑处理完成之后输出数据中的key,在此处是单词,String;此处用Text
* VALUEOUT:是用户自定义逻辑处理完成之后输出数据中的value,在此处是单词次数,Integer,此处用IntWritable
*/
public class WordCountMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
/**
* map阶段的业务逻辑就写在自定义的map()方法中
* maptask会对每一行输入数据调用一次我们自定义的map()方法
*/
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// 1 将maptask传给我们的文本内容先转换成String
String line = value.toString();
// 2 根据空格将这一行切分成单词
String[] words = line.split(" ");
// 3 将单词输出为<单词,1>
for(String word:words){
// 将单词作为key,将次数1作为value,以便于后续的数据分发,可以根据单词分发,以便于相同单词会到相同的reducetask中
context.write(new Text(word), new IntWritable(1));
}
}
}
WordCountReducer.java
import java.io.IOException;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
/**
* KEYIN , VALUEIN 对应mapper输出的KEYOUT, VALUEOUT类型
*
* KEYOUT,VALUEOUT 对应自定义reduce逻辑处理结果的输出数据类型 KEYOUT是单词 VALUEOUT是总次数
*/
public class WordCountReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
/**
* key,是一组相同单词kv对的key
*/
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count = 0;
// 1 汇总各个key的个数
for(IntWritable value:values){
count +=value.get();
}
// 2输出该key的总次数
context.write(key, new IntWritable(count));
}
}
WordCountDriver.java
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
// Java类型 <---> Hadoop Writable类型
// boolean BooleanWritable
// byte ByteWritable
// int IntWritable
// float FloatWritable
// long LongWritable
// double DoubleWritable
// string Text
// map MapWritable
// array ArrayWritable
/**
* 驱动主程序
*/
public class WordCountDriver {
public static void main(String[] args) throws Exception {
// 1 获取配置信息,或者job对象实例
Configuration configuration = new Configuration();
// 即使没有下面这行,也可以本地运行。因\hadoop-mapreduce-client-core.jar!\mapred-default.xml 中默认的参数就是 local
configuration.set("mapreduce.framework.name","local");
Job job = Job.getInstance(configuration);
// 2 指定本次mr job jar包运行主类
job.setJarByClass(WordCountDriver.class);
// 3 指定本业务job要使用的mapper/Reducer业务类
job.setMapperClass(WordCountMapper.class);
job.setReducerClass(WordCountReducer.class);
// 4 指定mapper输出数据的kv类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// 5 指定最终输出的数据的kv类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
// 6 指定job的输入原始文件所在目录,输入目录下可放多个文件,eg:data1.txt data2.txt ......
FileInputFormat.setInputPaths(job, new Path("C:\\Users\\Administrator\\Desktop\\input"));
// 7 指定文件输出目录,注意:输出目录output在程序执行之前并不存在,存在则报错
FileOutputFormat.setOutputPath(job, new Path("C:\\Users\\Administrator\\Desktop\\output"));
boolean result = job.waitForCompletion(true);
System.exit(result?0:1);
}
}
若报错:
Exception in thread "main" java.lang.UnsatisfiedLinkError: org.apache.hadoop.io.nativeio.NativeIO$Windows.access0(Ljava/lang/String;I)Z
请在windows下配置hadoop环境后重启。 hadoop的windows压缩包
代码见GitHub:https://github.com/666wg/mapreduce