import azkaban.utils.Props; //导入方法依赖的package包/类
@Override
public void cancel() throws InterruptedException {
if (process == null)
throw new IllegalStateException("Not started.");
boolean killed = process.softKill(KILL_TIME_MS, TimeUnit.MILLISECONDS);
if (!killed) {
warn("Kill with signal TERM failed. Killing with KILL signal.");
process.hardKill();
}
/**
* process to kill hadoop job on the yarn cluster
*/
String azExecId = jobProps.getString(CommonJobProperties.EXEC_ID);
final String logFilePath = String.format("%s/_job.%s.%s.log", getWorkingDirectory(), azExecId, getId());
Set applicationIds = HadoopJobUtils.findApplicationIdFromLog(logFilePath, getLog());
info("kill applicationIds size:" + applicationIds.size());
if ( applicationIds != null && applicationIds.size() > 0) {
Props props = new Props();
props.putAll(getJobProps());
props.putAll(getSysProps());
Properties properties = new Properties();
properties.putAll(jobProps.getFlattened());
try {
if (HadoopSecureWrapperUtils.shouldProxy(properties)) {
File file = HadoopSecureWrapperUtils.getHadoopTokens(HadoopJobUtils.loadHadoopSecurityManager(getSysProps(), getLog()),props, getLog());
if (file != null) {
UserGroupInformation proxyUser = HadoopSecureWrapperUtils.setupProxyUser(properties, file.getAbsolutePath(), getLog());
proxyUser.doAs(new PrivilegedExceptionAction() {
@Override
public Void run() throws Exception {
HadoopJobUtils.killAllSpawnedHadoopJobs(logFilePath, getLog());
return null;
}
});
}
} else {
HadoopJobUtils.killAllSpawnedHadoopJobs(logFilePath, getLog());
}
} catch (Throwable t) {
Logger.getRootLogger().warn("something happened while trying to kill all spawned jobs", t);
}
} else {
info("ApplicationIds is none.");
}
/**
* finish to process kill job on the yarn cluster.
*/
}