1)采用Eclipse/IDEA创建一个Maven工程
2)修改pom.xml,增加dependencies,/dependencies、build ,/build节点,如下:
增加依赖
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.7</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.7</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.7.7</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.7</version>
</dependency>
</dependencies>
在build下增加以下内容
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>2.6</version>
<configuration>
<archive>
<manifest>
<!-- main()所在的类,注意修改 -->
<mainClass>org.example.WordCountMain</mainClass>
</manifest>
</archive>
</configuration>
</plugin>
</plugins>
</build>
上面的org.example.WordCountMain需要根据主类进行修改。
3)编写Wordcount代码
首先创建一个Java class,写入代码如下:
package org.example;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.util.StringTokenizer;
public class WordCountMain {
public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
public static void main(String[] args) throws Exception {
if (args == null || args.length < 3) {
args[0] = "wordcount";
args[1] = "/input/word.txt";
args[2] = "/output/wordcountpara1";
}
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, args[0]);
job.setJarByClass(WordCountMain.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormatClass(NLineInputFormat.class);
// 输入文件路径
FileInputFormat.addInputPath(job, new Path(args[1]));
// 输出文件路径
FileOutputFormat.setOutputPath(job, new Path(args[2]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
4)使用mvn clean package命令打成jar包,将jar包复制到桌面,然后上传到ubuntu服务器本地上
5)使用如下命令运行jar包
hadoop jar shiyan4-1.0-SNAPSHOT.jar org.example.WordCountMain /input/data.txt /output/wx
shiyan4-1.0-SNAPSHOT.jar是jar包的名字
org.example.WordCountMain是类名(与上面修改pom.xml里面的类名一致)
/input/data.txt是hdfs上输入的文件的路径
/output/wx是输出的路径
6)运行结束后使用如下命令就可以查看到运行结果:
hdfs dfs -cat /output/wx/part-r-00000
/output/wx/part-r-00000是输出文件的名字(可在hdfs上自行查看)