用IntelliJ IDEA 2021编辑,实际运行在Ubuntu18.04的虚拟机上。
Hadoop版本3.0.0。
1. 导入Hadoop相关依赖包
导入方法:Project Structure -> Project Settings -> Modules -> Dependencies -> "+"
{HADOOP_HOME}/share/hadoop/common/hadoop-common-3.0.0.jar
{HADOOP_HOME}/share/hadoop/common/haoop-nfs-3.0.0.jar
{HADOOP_HOME}/share/hadoop/common/lib/*
{HADOOP_HOME}/share/hadoop/hdfs/haoop-hdfs-3.0.0.jar
{HADOOP_HOME}/share/hadoop/hdfs/hdfs-nfs-3.0.0.jar
{HADOOP_HOME}/share/hadoop/hdfs/lib/*
{HADOOP_HOME}/share/hadoop/mapreduce/*
2. WordCount 代码
官网代码:https://hadoop.apache.org/docs/r1.0.4/cn/mapred_tutorial.html
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(LongWritable key, Text value, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values, OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
int sum = 0;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(WordCount.class);
conf.setJobName("wordcount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
3.打包成jar包
javac WordCount.java
jar -cvf WordCount.jar ./WordCount*.class
4.运行MapReduce
查看一下有关wordcount的HDFS的目录:
hadoop fs -ls /wordcount/
input里面存放的是输入的文件。sampleoutput是输出文件夹。
每次运行代码的时候都要把输出文件夹删除掉,否则会报org.apache.hadoop.mapred.FileAlreadyExistsException
。
hadoop fs -rm -r /wordcount/sampleoutput
然后运行MapReduce程序:
hadoop jar WordCount.jar WordCount /wordcount/input/ /wordcount/sampleoutput
第一个参数WordCount
是类名,如有程序中有包装在package中,要把类名写完整(包名+类名)。
第二个参数是输入路径,第三个参数是输出路径。
跑成功大概可以看到这一句:
INFO mapreduce.Job: Job job_local1565413150_0001 completed successfully
5.查看输出
hadoop fs -cat /wordcount/sampleoutput/* | head -n 5
shuchu1
6.观测集群状态
7.Reference
[1]一起学Hadoop——第一个MapReduce程序
[2] 使用命令行编译打包运行自己的MapReduce程序 Hadoop2.6.0