pom.xml 导包:
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.7.5</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>RELEASE</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>RELEASE</version>
</dependency>
<!--lombok 简化编程工具 -->
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.0</version>
<scope>provided</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
<!-- <verbal>true</verbal>-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.4.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<minimizeJar>true</minimizeJar>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
wordcountmapper 类
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* 四个泛型
* KEYIN:k1的类型
* VALUEIN:V1的类型
* KEYOUT: K2的类型
* VALUEOUT:V2的类型
*
* 使用自定义的类 序列化 对基本类型的封装
*
*/
public class WordCountMapper extends Mapper<LongWritable,Text,Text,LongWritable> {
/**
* map 方法就是将k1 v1转 k2 v2
* @param key k1 行偏移量
* @param value v1 每一行的文本数据
* @param context 表示上下文对象 起到桥梁
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
Text text = new Text();
LongWritable longWritable = new LongWritable();
//1.将一行的文本数据进行拆分
String[] split = value.toString().split(",");
//2.遍历数组,组装K2 V2
for(String word:split){
//3.将k2 v2写入上下文中
text.set(word);
longWritable.set(1);
context.write(text,longWritable);
}
}
}
reduce 类
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.io.Text;
import java.io.IOException;
/**
* 四个泛型解释
* KEYIN : 新k2 类型
* VALUE : 新V2 类型
* KEYOUT :K3 类型
* VALUEOUT :V3 类型
*/
public class WordCountReducer extends Reducer<Text,LongWritable,Text,LongWritable> {
/**
* reduce方法作用:将新的k2 v2 转为k3 v3,将k3 v3写入上下文中
* @param key 新k2
* @param values 集合 新的v2
* @param context 上下文
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {
//1.遍历集合,将集合中的数字相加,得到V3
long count = 0;
for(LongWritable value:values){
count+=value.get();
}
//2.将结果写入上下文中
context.write(key,new LongWritable(count));
}
}
jobMain 运行类
```java
```java
```java
```java
```java
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class JobMain extends Configured implements Tool {
/**
* 该方法用于指定一个job任务
* @param strings
* @return
* @throws Exception
*/
@Override
public int run(String[] strings) throws Exception {
//1.创建一个job任务对象
Job job = Job.getInstance(super.getConf(), "wordcount");
//打包到集群上面运行时候,必须要添加以下配置,指定程序的main函数
job.setJarByClass(JobMain.class);
//2.配置job任务对象(八个步骤)
//第一步 指定文件的读取方式和读取路径
job.setInputFormatClass(TextInputFormat.class);
//本地文件系统
TextInputFormat.addInputPath(job,new Path("file:///D:\\mapreduce\\input"));
// TextInputFormat.addInputPath(job,new Path("hdfs://node01:8020/wordcount"));
//第二步 指定map阶段的处理方式 和数据类型
job.setMapperClass(WordCountMapper.class);
job.setMapOutputKeyClass(Text.class);//设置map阶段k2的类型
job.setMapOutputValueClass(LongWritable.class);//设置map节点的v2的类型
//第三 四 五 六 shuffle阶段采用默认的方式 不做处理
//第七步 指定reduce阶段处理方式和数据类型
job.setReducerClass(WordCountReducer.class);
job.setOutputKeyClass(Text.class);//设置k3的类型
job.setOutputValueClass(LongWritable.class);//设置v3的类型
//第八步 设置输出类型
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job,new Path("file:///D:\\mapreduce\\output"));//目标文件夹不能存在 否则会报错
// TextOutputFormat.setOutputPath(job,new Path("hdfs://node01:8020/wordcount_out"));//设置输出的路径
//等待任务结束
boolean b = job.waitForCompletion(true);
return b?0:1;
}
public static void main(String[] args) throws Exception {
Configuration entries = new Configuration();
//启动job任务
int run = ToolRunner.run(entries, new JobMain(), args);
System.exit(run); //0表示执行成功
}
}