WordCount程序实现(idea)
开发环境
- JDK 1.8
- IDEA 2020.3.1
- HADOOP 2.10.1
pom.xml文件
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>cn.yangxinn.hadoop</groupId>
<artifactId>demo</artifactId>
<version>1.0-SNAPSHOT</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<hadoop.version>2.10.1</hadoop.version>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
WordCount
注意:
- 输入输出的地址 要写为hdfs 的地址
job.setMapOutputKeyClass(Text.class);
注入对应的 Key 类型job.setMapOutputValueClass(IntWritable.class);
注入对应的 Value 类型FileInputFormat.addInputPath(job, inPath);
对应的 jar 包是org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
package cn.yangxinn.hadoop.demo;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
public class WordCount {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(WordCount.class); // 提交到分布式集群运行的时候
// Specify various job-specific parameters
job.setJobName("myjob");
// 没有全局配置 hdfs 这里要写成 处于 active 的 namenode
Path inPath = new Path("hdfs://node1:8020/wc/inpath");
FileInputFormat.addInputPath(job, inPath);
Path outPath = new Path("hdfs://node1:8020/wc/outpath");
if (outPath.getFileSystem(conf).exists(outPath)) {
outPath.getFileSystem(conf).delete(outPath, true); // true 递归删除
}
FileOutputFormat.setOutputPath(job, outPath);
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setReducerClass(MyReducer.class);
// Submit the job, then poll for progress until the job is complete
job.waitForCompletion(true);
}
}
MyMapper
- 使用封装类,不能使用基本的数据类型
Mapper
泛型 要对应使用的 Key Value 的类型
package cn.yangxinn.hadoop.demo;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
import java.util.StringTokenizer;
public class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
MyReducer
Reducer
泛型 要对应使用的 Key Value 的类型Text
类,会进行排序,(好像是 ASCII),一样的 key 在一起,方便使用
package cn.yangxinn.hadoop.demo;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
/**
* reduce: 对相同的 key 的值的集合迭代计算,调用一次 reduce 方法
*/
public class MyReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
运行Job
打成 jar 包上传
创建文件夹
hdfs dfs -mkdir -p /wc/inpath
新建单词统计文件
vim hello.txt
内容:
hello welcome world
hi nihao hello
wordl hi haha
hi hi welcome well
上传文件
hdfs dfs -put hello.txt /wc/inpath
执行 WordCount 工作
hadoop jar wc.jar cn.yangxinn.hadoop.demo.WordCount
查看执行结果
hdfs dfs -cat /wc/outpath/*
haha 1
hello 2
hi 4
nihao 1
welcome 2
well 1
wordl 1
world 1