Driver类代码:
package part4;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class WordCount extends Configured implements Tool {
public static void main(String[] args) throws Exception {
String[] myArgs = {"/user/root/wordcountin/a.txt", "/user/root/wordcountout", "Hello", "\\s"};
ToolRunner.run(new WordCount(), myArgs);
}
@Override
public Configuration getConf() {
Configuration conf = new Configuration();
conf.setBoolean("mapreduce.app-submission.cross-platform", true);
conf.set("fs.defaultFS", "hdfs://master:8020");
conf.set("mapreduce.framework.name", "yarn");
conf.set("yarn.resourcemanager.address", "master:8032");
conf.set("yarn.resourcemanager.scheduler.address", "master:8030");
conf.set("mapreduce.jobhistory.address", "master:10020");
conf.set("mapreduce.job.jar", "D:\\Documents\\大数据\\作业\\homework2\\target\\homework2-1.0-SNAPSHOT.jar");
return conf;
}
@Override
public int run(String[] args) throws Exception {
// 获取配置
Configuration conf = getConf();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length < 4) {
System.err.println("Must have 4 arguments: input_path, output_path, word, split");
System.exit(2);
}
conf.set("word", otherArgs[2]);
conf.set("split", otherArgs[3]);
// 任务
Job job = Job.getInstance(conf);
job.setJarByClass(WordCount.class);
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReducer.class);
job.setJobName("homework2 part4");
// 输入路径
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
// 输出路径
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
// Mapper输出键的类
job.setMapOutputKeyClass(NullWritable.class);
// Reducer输出键的类
job.setOutputKeyClass(Text.class);
// Mapper和Reducer输出值的类
job.setOutputValueClass(IntWritable.class);
return job.waitForCompletion(true) ? 0 : 1;
}
}
我的hadoop版本是2.6.5,报错如下:
Exception in thread "main" java.io.IOException: Cannot initialize Cluster. Please check your configuration for mapreduce.framework.name and the correspond server addresses.
一番测试后发现问题出在conf.set("mapreduce.framework.name", "yarn");
这行代码上,删掉的话能运行,但这样就用不了yarn了。
最后发现报错的原因是依赖没添加全。下面是我的pom.xml中的依赖配置:
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.6.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.6.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.6.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-common</artifactId>
<version>2.6.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<version>2.6.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-app</artifactId>
<version>2.6.5</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<version>2.6.5</version>
</dependency>
</dependencies>
最终应该是hadoop-mapreduce-client-jobclient
这个包起了作用。如果不确定缺了什么依赖,就试试把所有hadoop相关依赖加上吧。