hadoop在java项目中的应用,一般是调用本地hadoop 调试,运行成功后才上次到hadoop集群中进行运行,
public class RunJob {
public static void main(String[] args) throws IOException, InterruptedException {
Configuration config = ConfigurationCommon.conf;
/*config.set("fs.default.name", "hdfs://192.168.2.213:9000");
config.set("hadoop.job.user", "hadoop");
config.set("mapreduce.framework.name", "yarn");
config.set("mapreduce.jobtracker.address", "192.168.2.213:9001");
config.set("yarn.resourcemanager.hostname", "192.168.2.213");
config.set("yarn.resourcemanager.admin.address", "192.168.2.213:8033");
config.set("yarn.resourcemanager.address", "192.168.2.213:8032");
config.set("yarn.resourcemanager.resource-tracker.address", "192.168.2.213:8036");
config.set("yarn.resourcemanager.scheduler.address", "192.168.2.213:8030");*/
config.set("fs.default.name", "hdfs://dinfo213:9000/");
config.set("hadoop.job.user","hadoop");
config.set("mapred.job.tracker","dinfo213:9001");
try{
String inputpath = "/input/wordcount.txt";
Path outpath =new Path("/output/yjh/word");
JobConf conf = new JobConf(config,RunJob.class);
conf.setJobName("yangjianghong");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(WordCountMapper.class);
conf.setReducerClass(WordCountReducer.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
//conf.setJar("F:\\Oec-HadRtas-4.0.0-jar-with-dependencies.jar");
conf.setJarByClass(RunJob.class);
FileInputFormat.setInputPaths(conf, new Path(inputpath));
FileOutputFormat.setOutputPath(conf, outpath);
JobClient.runJob(conf);
}catch (Exception e){
e.printStackTrace();
}
}
}
通过配置文件读取集群信息。