一、创建项目工程
二、添加pom.xml依赖
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.soft863</groupId>
<artifactId>mrdemo</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
<hadoop.version>3.2.0</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.10</version>
<!--<scope>test</scope>-->
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.6.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
</dependencies>
</project>
三、创建包
创建com.soft863包,并在包下面创建以下几个类
WordCountMapper
package com.soft863;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class WordCountMapper extends Mapper<LongWritable,Text,Text,IntWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
System.out.println("key:"+key.toString());
System.out.println("value:"+value.toString());
String line = value.toString();
String[] words = line.split(" ");
for(String word:words){
context.write(new Text(word), new IntWritable(1));
}
}
}
WordCountReducer
package com.soft863;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WordCountReducer extends Reducer<Text, IntWritable,Text,IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count =0;
for(IntWritable value:values){
count+=value.get();
}
context.write(key,new IntWritable(count));
}
}
WordCountPatitioner
package com.soft863;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Partitioner;
public class WordCountPatitioner extends Partitioner<Text, IntWritable> {
public int getPartition(Text text, IntWritable intWritable, int numPartitions) {
if(text.toString().startsWith("h")){
return 1;
}
return 0;
}
}
WordCountCombiner
package com.soft863;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class WordCountCombiner extends Reducer<Text, IntWritable,Text,IntWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int count=0;
for(IntWritable value:values){
count+=value.get();
}
context.write(key,new IntWritable(count));
}
}
WordCountDriver
package com.soft863;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class WordCountDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "hdfs://hadoop100:9000");
Job job = Job.getInstance(conf);
//重要:指定本job所在的jar包
job.setJarByClass(WordCountDriver.class);
//设置wordCountJob所用的mapper逻辑类为哪个类
job.setMapperClass(WordCountMapper.class);
//设置wordCountJob所用的reducer逻辑类为哪个类
job.setReducerClass(WordCountReducer.class);
//设置map阶段输出的kv数据类型
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
//设置最终输出的kv数据类型
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
//设置要处理的文本数据所存放的路径
Path sourceFile = new Path(args[0]);
Path targetFile = new Path(args[1]);
FileInputFormat.setInputPaths(job, sourceFile);
FileOutputFormat.setOutputPath(job, targetFile);
job.setPartitionerClass(WordCountPatitioner.class);
job.setNumReduceTasks(2);
job.setCombinerClass(WordCountCombiner.class);
//WordCountCombiner与WordCountReducer代码相同,可以直接复用
// wordCountJob.setCombinerClass(WordCountReducer.class);
// 获取文件系统
FileSystem fs = FileSystem.get(conf);
//输出路径文件夹不能存在,如果存在则发生错误,所以在此先进行删除
if (fs.exists(targetFile)) {
fs.delete(targetFile, true);
}
//提交job给hadoop集群
boolean success = job.waitForCompletion(true);
System.exit(success ? 0 : 1);
}
}
四、添加log4j.properties文件
在resources资源文件夹下创建log4j.properties(可选项,为了输出过程日志)
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
五、执行程序
打开Edit Configurations,配置参数路径
hdfs://hadoop100:9000//bigdata/EnglishPaper.txt hdfs://hadoop100:9000/result1
点击执行,查看结果
项目打包
六、jar包提交到集群
将jar包上传至Linux系统/usr/local/app/目录下
集群运行MR
cd /usr/local/app/
hadoop jar mrdemo-1.0-SNAPSHOT.jar com.soft863.WordCountDriver hdfs://hadoop100:9000//bigdata/EnglishPaper.txt hdfs://hadoop100:9000/result1