项目需求:
数据迁移进程开始执行后,启动定时任务每隔30秒计算迁移进度,并将进度入库,当迁移完成后,关闭任务。
迁移时根据任务单来执行,任务单中由多种基本业务数据组成。
实现思路:
1.迁移开始前,启动监视进度的线程,每30秒重新执行,如果数据迁移完毕,则调用关闭线程接口。
2.基本业务数据迁移完成后,迁移状态及进度入库。
3.任务单执行完毕后,调用关闭线程接口。
定时任务采用线程方式实现,线程监视迁移进度,和迁移进程同步执行。不采用自己编写线程的方式,是因为系统中很多功能都采用了线程的方式实现,多线程编程还是建议采用应用广泛的线程池来统一管理,否则容易出现多线程的死锁、失控等问题,所以我采用了quartz框架。
示例代码:
public class MoveProcessService extends BaseMgr {
public static Logger logger = Logger.getLogger(LoggerManager.PLATFORM);
public static Map schedulerMap = new HashMap();//计划集合
private Scheduler scheduler;
public MoveProcessService(){
try{
scheduler = StdSchedulerFactory.getDefaultScheduler();//创建scheduler
}catch(Exception e){
logger.error("\n\n^^^^^^^^^^^^^^^^^^^ 创建Scheduler实例异常");
e.printStackTrace();
}
}
/**
* job名称和线程组及触发器名称需要动态处理
* @param caseId
* @param baseTypeIdList
*/
public void watchMoveProcess(String caseId,List baseTypeIdList){
try{
//***********************************************
//为当前个例创建一个定时任务来监控迁移进度
//***********************************************
//1.创建WatchMoveProcessJob的JobDetail
JobDetail jobDetail = new JobDetail("WatchMoveProcessJob","MoveProcessGroup",
com.dhcc.rt.sysMonitor.service.WatchMoveProcessJob.class);
jobDetail.getJobDataMap().put("caseId", caseId);
jobDetail.getJobDataMap().put("baseTypeIdList", baseTypeIdList);
WatchMoveListener moveListener = new WatchMoveListener();
jobDetail.addJobListener(moveListener.getName());
//jobDetail.setRequestsRecovery(true);//此作业是可恢复的
//2.创建其触发器
Trigger trigger = TriggerUtils.makeSecondlyTrigger(30);
trigger.setName("moveProcessTrigger");
trigger.setStartTime(new Date());
//3.创建并启动任务调度引擎
scheduler.addJobListener(moveListener);
scheduler.scheduleJob(jobDetail, trigger);//JobDetail和trigger一同交给引擎去调度
schedulerMap.put(caseId, scheduler);
scheduler.start();
shutdownScheduler(caseId);
}catch (Exception e) {
e.printStackTrace();
}
}
/**
* 关闭个例进度监视
* @param caseId
*/
public void shutdownScheduler(String caseId){
//关闭监视的几个状态:迁移成功、迁移失败
try{
int taskOrderStatus = new PorcessMonitorDAO().findTaskOrderStatus(caseId);
if(taskOrderStatus == 1 || taskOrderStatus == 2){
logger.info("^^^^^^^^^^^^^^^^^^^ 关闭个例进度监视");
Object schedulerObj = schedulerMap.get(caseId);
if(schedulerObj != null){
Scheduler scheduler = (Scheduler)schedulerObj;
scheduler.shutdown();
}
}
}catch(Exception e){
logger.error("^^^^^^^^^^^^^^^^^^^ 关闭Scheduler实例异常");
e.printStackTrace();
}
}
}
job:
小类迁移实际进度入库
porcessMonitorDAO.updateBaseTypeProcess(caseId,baseTypeId,process);
public class WatchMoveProcessJob implements Job{
public static Logger logger = Logger.getLogger(LoggerManager.PLATFORM);
public void execute(JobExecutionContext jec) throws JobExecutionException {
//读取参数
JobDataMap dataMap = jec.getJobDetail().getJobDataMap();
String caseId = dataMap.getString("caseId");
List baseTypeIdList = (List)dataMap.get("baseTypeIdList");
logger.info("** caseId = "+ caseId);
logger.info("** baseTypeIdList = "+ baseTypeIdList);
PorcessMonitorDAO porcessMonitorDAO = new PorcessMonitorDAO();
MoveStatistic moveStatistic = new MoveStatistic();
Map moveProcessMap = TCaseData.getMoveProcessMap();
Map baseTypeIds = (Map)moveProcessMap.get(caseId);
Set baseTypeSet = baseTypeIds.keySet();
if(baseTypeSet != null){
for(Iterator its = baseTypeSet.iterator();its.hasNext();){
String baseTypeId = (String)its.next();
String process = moveStatistic.getMoveProcess(caseId,baseTypeId);//获取迁移进度:本地文件夹大小/远程数据大小
//baseTypeIds.put(baseTypeId, process);
//小类迁移进度入库
process = process.length() > 0 ? process : "0";
if(new Integer(process).intValue() < 100){
porcessMonitorDAO.updateBaseTypeProcess(caseId,baseTypeId,process);
}
}
}
}
}