Eclipse远程调用hadoop出错:java.lang.RuntimeException: java.lang.ClassNotFoundException: Job$Mappe
转自:
http://my.oschina.net/mkh/blog/340112
http://www.cnblogs.com/chenying99/archive/2013/06/02/3113474.html
http://www.linuxidc.com/Linux/2012-02/54711.htm (http://techme.lofter.com/post/ccd6e_1e772b)
eclipse直接提交mapreduce任务所需环境配置代码如下所示:
添加类EJob
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
import
java.io.File;
import
java.io.FileInputStream;
import
java.io.FileOutputStream;
import
java.io.IOException;
import
java.net.URL;
import
java.net.URLClassLoader;
import
java.util.ArrayList;
import
java.util.Iterator;
import
java.util.List;
import
java.util.jar.JarEntry;
import
java.util.jar.JarOutputStream;
import
java.util.jar.Manifest;
public
class
EJob {
// To declare global field
private
static
List<URL> classPath =
new
ArrayList<URL>();
// To declare method
public
static
File createTempJar(String root)
throws
IOException {
if
(!
new
File(root).exists()) {
return
null
;
}
Manifest manifest =
new
Manifest();
manifest.getMainAttributes().putValue(
"Manifest-Version"
,
"1.0"
);
final
File jarFile = File.createTempFile(
"EJob-"
,
".jar"
,
new
File(System.getProperty(
"java.io.tmpdir"
)));
Runtime.getRuntime().addShutdownHook(
new
Thread() {
public
void
run() {
jarFile.delete();
}
});
JarOutputStream out =
new
JarOutputStream(
new
FileOutputStream(jarFile), manifest);
createTempJarInner(out,
new
File(root),
""
);
out.flush();
out.close();
return
jarFile;
}
private
static
void
createTempJarInner(JarOutputStream out, File f,
String base)
throws
IOException {
if
(f.isDirectory()) {
File[] fl = f.listFiles();
if
(base.length() >
0
) {
base = base +
"/"
;
}
for
(
int
i =
0
; i < fl.length; i++) {
createTempJarInner(out, fl[i], base + fl[i].getName());
}
}
else
{
out.putNextEntry(
new
JarEntry(base));
FileInputStream in =
new
FileInputStream(f);
byte
[] buffer =
new
byte
[
1024
];
int
n = in.read(buffer);
while
(n != -
1
) {
out.write(buffer,
0
, n);
n = in.read(buffer);
}
in.close();
}
}
public
static
ClassLoader getClassLoader() {
ClassLoader parent = Thread.currentThread().getContextClassLoader();
if
(parent ==
null
) {
parent = EJob.
class
.getClassLoader();
}
if
(parent ==
null
) {
parent = ClassLoader.getSystemClassLoader();
}
return
new
URLClassLoader(classPath.toArray(
new
URL[
0
]), parent);
}
public
static
void
addClasspath(String component) {
if
((component !=
null
) && (component.length() >
0
)) {
try
{
File f =
new
File(component);
if
(f.exists()) {
URL key = f.getCanonicalFile().toURL();
if
(!classPath.contains(key)) {
classPath.add(key);
}
}
}
catch
(IOException e) {
}
}
}
}
|
修改后的wordcount代码如下(转自别人的,我没测过)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
14
7
148
149
|
import
java.io.File;
import
java.io.IOException;
import
java.text.SimpleDateFormat;
import
java.util.Date;
import
java.util.StringTokenizer;
import
org.apache.hadoop.conf.Configuration;
import
org.apache.hadoop.fs.Path;
import
org.apache.hadoop.fs.permission.FsPermission;
import
org.apache.hadoop.io.IntWritable;
import
org.apache.hadoop.io.Text;
import
org.apache.hadoop.mapred.JobConf;
import
org.apache.hadoop.mapreduce.Job;
import
org.apache.hadoop.mapreduce.Mapper;
import
org.apache.hadoop.mapreduce.Reducer;
import
org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import
org.apache.hadoop.util.GenericOptionsParser;
public
class
WordCount {
/*
* 用户自定义map函数,对以<key, value>为输入的结果文件进行处理
* Map过程需要继承org.apache.hadoop.mapreduce包中Mapper类,并重写其map方法。
* 通过在map方法中添加两句把key值和value值输出到控制台的代码
* ,可以发现map方法中value值存储的是文本文件中的一行(以回车符为行结束标记),而key值为该行的首字母相对于文本文件的首地址的偏移量。
* 然后StringTokenizer类将每一行拆分成为一个个的单词
* ,并将<word,1>作为map方法的结果输出,其余的工作都交有MapReduce框架处理。 每行数据调用一次 Tokenizer:单词分词器
*/
public
static
class
TokenizerMapper
extends
Mapper<Object, Text, Text, IntWritable> {
private
final
static
IntWritable one =
new
IntWritable(
1
);
private
Text word =
new
Text();
/*
* 重写Mapper类中的map方法
*/
public
void
map(Object key, Text value, Context context)
throws
IOException, InterruptedException {
StringTokenizer itr =
new
StringTokenizer(value.toString());
//System.out.println(value.toString());
while
(itr.hasMoreTokens()) {
word.set(itr.nextToken());
// 获取下个字段的值并写入文件
context.write(word, one);
}
}
}
/*
* 用户自定义reduce函数,如果有多个热度测,则每个reduce处理自己对应的map结果数据
* Reduce过程需要继承org.apache.hadoop.mapreduce包中Reducer类,并重写其reduce方法。
* Map过程输出<key,values>中key为单个单词,而values是对应单词的计数值所组成的列表,Map的输出就是Reduce的输入,
* 所以reduce方法只要遍历values并求和,即可得到某个单词的总次数。
*/
public
static
class
IntSumReducer
extends
Reducer<Text, IntWritable, Text, IntWritable> {
private
IntWritable result =
new
IntWritable();
public
void
reduce(Text key, Iterable<IntWritable> values,
Context context)
throws
IOException, InterruptedException {
int
sum =
0
;
for
(IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
public
static
void
main(String[] args)
throws
Exception {
/**
* 环境变量配置
*/
File jarFile = EJob.createTempJar( "bin" );
ClassLoader classLoader = EJob.getClassLoader();
Thread.currentThread().setContextClassLoader(classLoader);
/**
* 连接hadoop集群配置
*/
Configuration conf =
new
Configuration(
true
);
conf.set(
"fs.default.name"
,
"hdfs://192.168.1.111:9000"
);
conf.set(
"hadoop.job.user"
,
"hadoop"
);
conf.set(
"mapreduce.framework.name"
,
"yarn"
);
conf.set(
"mapreduce.jobtracker.address"
,
"192.168.1.100:9001"
);
conf.set(
"yarn.resourcemanager.hostname"
,
"192.168.1.100"
);
conf.set(
"yarn.resourcemanager.admin.address"
,
"192.168.1.100:8033"
);
conf.set(
"yarn.resourcemanager.address"
,
"192.168.1.100:8032"
);
conf.set(
"yarn.resourcemanager.resource-tracker.address"
,
"192.168.1.100:8036"
);
conf.set(
"yarn.resourcemanager.scheduler.address"
,
"192.168.1.100:8030"
);
String[] otherArgs =
new
String[
2
];
otherArgs[
0
] =
"hdfs://192.168.1.111:9000/test_in"
;//计算原文件目录,需提前在里面存入文件
String time =
new
SimpleDateFormat(
"yyyyMMddHHmmss"
).format(
new
Date());
otherArgs[
1
] =
"hdfs://192.168.1.111:9000/test_out/"
+ time;//计算后的计算结果存储目录,每次程序执行的结果目录不能相同,所以添加时间标签
/*
* setJobName()方法命名这个Job。对Job进行合理的命名有助于更快地找到Job,
* 以便在JobTracker和Tasktracker的页面中对其进行监视
*/
Job job =
new
Job(conf,
"word count"
);
job.setJarByClass(WordCount.
class
);
((JobConf) job.getConfiguration()).setJar(jarFile.toString()); /
/环境变量调用,添加此句则可在eclipse中直接提交mapreduce任务,如果将该java文件打成jar包,需要将该句注释掉,否则在执行时反而找不到环境变量
// job.setMaxMapAttempts(100);//设置最大试图产生底map数量,该命令不一定会设置该任务运行过车中的map数量
// job.setNumReduceTasks(5);//设置reduce数量,即最后生成文件的数量
/*
* Job处理的Map(拆分)、Combiner(中间结果合并)以及Reduce(合并)的相关处理类。
* 这里用Reduce类来进行Map产生的中间结果合并,避免给网络数据传输产生压力。
*/
job.setMapperClass(TokenizerMapper.
class
);
// 执行用户自定义map函数
job.setCombinerClass(IntSumReducer.
class
);
// 对用户自定义map函数的数据处理结果进行合并,可以减少带宽消耗
job.setReducerClass(IntSumReducer.
class
);
// 执行用户自定义reduce函数
/*
* 接着设置Job输出结果<key,value>的中key和value数据类型,因为结果是<单词,个数>,
* 所以key设置为"Text"类型,相当于Java中String类型
* 。Value设置为"IntWritable",相当于Java中的int类型。
*/
job.setOutputKeyClass(Text.
class
);
job.setOutputValueClass(IntWritable.
class
);
/*
* 加载输入文件夹或文件路径,即输入数据的路径
* 将输入的文件数据分割成一个个的split,并将这些split分拆成<key,value>对作为后面用户自定义map函数的输入
* 其中,每个split文件的大小尽量小于hdfs的文件块大小
* (默认64M),否则该split会从其它机器获取超过hdfs块大小的剩余部分数据,这样就会产生网络带宽造成计算速度影响
* 默认使用TextInputFormat类型,即输入数据形式为文本类型数据文件
*/
System.out.println(
"Job start!"
);
FileInputFormat.addInputPath(job,
new
Path(otherArgs[
0
]));
/*
* 设置输出文件路径 默认使用TextOutputFormat类型,即输出数据形式为文本类型文件,字段间默认以制表符隔开
*/
FileOutputFormat.setOutputPath(job,
new
Path(otherArgs[
1
]));
/*
* 开始运行上面的设置和算法
*/
if
(job.waitForCompletion(
true
)) {
System.out.println(
"ok!"
);
}
else
{
System.out.println(
"error!"
);
System.exit(
0
);
}
}
}
|
或者这样调用(本人测试过)
public static void main(String[] args) throws Exception {
// Add these statements. XXX
File jarFile = EJob.createTempJar("bin");
EJob.addClasspath("/usr/hadoop-1.2.1/conf");//好像不加这句也可以,没有什么用
ClassLoader classLoader = EJob.getClassLoader();
Thread.currentThread().setContextClassLoader(classLoader);
Configuration conf = new Configuration();
conf.set("mapred.job.tracker", "Master.Hadoop:54311");
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2) {
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
Job job = new Job(conf, "word count");
((JobConf)job.getConfiguration()).setJar(jarFile.toString());
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
不加 conf.set("mapred.job.tracker", "Master.Hadoop:54311");会出错
参考文献:https://issues.apache.org/jira/browse/HADOOP-8089
错误信息如下:
ERROR security.UserGroupInformation: PriviledgedActionException as: hadoop cause:java.io.IOException Failed to set permissions of path:\usr\hadoop\tmp\mapred\staging\hadoop753422487\.staging to 0700 Exception in thread "main" java.io.IOException: Failed to set permissions of path: \usr\hadoop\tmp \mapred\staging\hadoop753422487\.staging to 0700
解决方法:
Configuration conf = new Configuration();
conf.set("mapred.job.tracker", "[server]:9001");