pom:
<dependencies> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>2.6.0</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>2.6.0</version> </dependency> </dependencies> <build> <resources> <resource> <directory>${project.basedir}/src/main/resources</directory> </resource> </resources> <plugins> <plugin> <!-- 这是个编译java代码的 --> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <version>3.2</version> <configuration> <source>1.8</source> <target>1.8</target> <encoding>UTF-8</encoding> </configuration> <executions> <execution> <phase>compile</phase> <goals> <goal>compile</goal> </goals> </execution> </executions> </plugin> <plugin> <artifactId>maven-assembly-plugin</artifactId> <version>2.6</version> <configuration> <descriptorRefs> <descriptorRef>jar-with-dependencies</descriptorRef> </descriptorRefs> </configuration> </plugin> </plugins> </build>
java代码:
//HADOOP_USER_NAME设置hadoop用户,查询本用户的远程任务
System.setProperty( "HADOOP_USER_NAME","xiaoyuefei" ); //不要加false(有毒不然连不上resourcemanager) Configuration conf=new Configuration(); conf.set("fs.defaultFS", "hdfs://***:8020"); conf.set("mapreduce.framework.name", "yarn"); conf.set("yarn.resourcemanager.address", "http://***:8032"); conf.set("mapreduce.app-submission.cross-platform", "true"); //这个很关键(classloder加载class) conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName() ); int outtime = 120; JobClient jobClient = new JobClient(conf); System.out.println("jobClient : " + jobClient); JobStatus[] jobsToComplete = null; try { jobsToComplete = jobClient.jobsToComplete(); for (JobStatus e : jobsToComplete) { long startTime = e.getStartTime(); System.out.println(e.getJobName() + " start time : " + new Date(startTime)); if (System.currentTimeMillis() - startTime > outtime * 60 * 1000) { JobID jobID = e.getJobID(); jobClient.getJob(jobID).killJob(); System.out.println("********************************************************"); System.out.println("job killed : " + jobID + " at " + new Date()); System.out.println("********************************************************"); FileWriter fileWriter = new FileWriter("/data1/shell/job_control/job_monitor/logger.txt", true); fileWriter.write("job killed : " + jobID + " at " + new Date() + "\t"); fileWriter.write(jobID + " Start time is :" + new Date(startTime)); fileWriter.write("\n"); fileWriter.flush(); fileWriter.close(); } } } catch (IOException e2) { e2.printStackTrace(); } finally { jobClient.close(); } }
注:
1.本代码用于杀死超时任务
2.可以使用hadoop jar命令执行