elasticjob学习了一下,其对于分布式任务及分布式定时任务的整体设计非常不错,尤其对于java伙伴来说便捷性非常高,其对于spring与zookeeper的支持非常高,部署应用相对简单,可以快速入手并应用,总体来说很友好;
关于elastic-job-lite-console控制台的使用,目前的控制台支持任务参数的修改,任务的触发、终止、追踪、详情配置等等,唯一遗憾在于未能提供任务创建;
这就意味着创建任务的过程需要在代码阶段完成;<创建示例:纯属个人代码,牛人勿吐>
server.port=8881
zookeeper.address=192.168.85.140:2182,192.168.85.138:2182,192.168.85.141:2182
zookeeper.namespace=elastic-job
zookeeper.connectionTimeout=10000
zookeeper.sessionTimeout=50000
zookeeper.maxRetries=3
simpleJob.cron=00 30 22 * * ?
simpleJob.shardingTotalCount=5
simpleJob.shardingItemParameters=0=beijing,1=shanghai,2=changchun,3=changsha,4=hangzhou
simpleJob.jobParameter=source1=public,source2=private
simpleJob.failover=true
simpleJob.monitorExecution=true
simpleJob.monitorPort=8889
simpleJob.maxTimeDiffSeconds=-1
simpleJob.jobShardingStrategyClass=com.dangdang.ddframe.job.lite.api.strategy.impl.AverageAllocationJobShardingStrategy
package com.cc.es.config;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.cc.es.listener.SimpleJobListener;
import com.cc.es.task.MySimpleJob;
import com.dangdang.ddframe.job.api.simple.SimpleJob;
import com.dangdang.ddframe.job.config.JobCoreConfiguration;
import com.dangdang.ddframe.job.config.JobRootConfiguration;
import com.dangdang.ddframe.job.config.simple.SimpleJobConfiguration;
import com.dangdang.ddframe.job.lite.api.JobScheduler;
import com.dangdang.ddframe.job.lite.config.LiteJobConfiguration;
import com.dangdang.ddframe.job.lite.spring.api.SpringJobScheduler;
import com.dangdang.ddframe.job.reg.zookeeper.ZookeeperRegistryCenter;
@Configuration
public class MySimpleJobConfig {
/*
* 将注册中心注入
*/
@Autowired
private ZookeeperRegistryCenter registryCenter;
@Bean
public SimpleJob simpleJob() {
return new MySimpleJob();
}
@Bean(initMethod = "init")
public JobScheduler simpleJobScheduler(final SimpleJob simpleJob,
@Value("${simpleJob.cron}") final String cron,
@Value("${simpleJob.shardingTotalCount}") final int shardingTotalCount,
@Value("${simpleJob.shardingItemParameters}") final String shardingItemParameters,
@Value("${simpleJob.jobParameter}") final String jobParameter,
@Value("${simpleJob.failover}") final boolean failover,
@Value("${simpleJob.monitorExecution}") final boolean monitorExecution,
@Value("${simpleJob.monitorPort}") final int monitorPort,
@Value("${simpleJob.maxTimeDiffSeconds}") final int maxTimeDiffSeconds,
@Value("${simpleJob.jobShardingStrategyClass}") final String jobShardingStrategyClass
) {
return new SpringJobScheduler(simpleJob,
registryCenter,
getLiteJobConfiguration(simpleJob.getClass(),
cron,
shardingTotalCount,
shardingItemParameters,
jobParameter,
failover,
monitorExecution,
monitorPort,
maxTimeDiffSeconds,
jobShardingStrategyClass
),
new SimpleJobListener());
}
private LiteJobConfiguration getLiteJobConfiguration(Class<? extends SimpleJob> jobClass, String cron,
int shardingTotalCount, String shardingItemParameters, String jobParameter, boolean failover,
boolean monitorExecution, int monitorPort, int maxTimeDiffSeconds, String jobShardingStrategyClass) {
JobCoreConfiguration jobCoreConfiguration = JobCoreConfiguration.newBuilder(
jobClass.getName(), cron, shardingTotalCount)
.misfire(true)
.failover(failover)
.jobParameter(jobParameter)
.shardingItemParameters(shardingItemParameters)
.build();
SimpleJobConfiguration simpleJobConfiguration = new SimpleJobConfiguration(jobCoreConfiguration, jobClass.getCanonicalName());
//JobRootConfiguration jobRootConfiguration = LiteJobConfiguration.newBuilder(simpleJobConfiguration).build();
LiteJobConfiguration liteJobConfiguration = LiteJobConfiguration.newBuilder(simpleJobConfiguration)
.jobShardingStrategyClass(jobShardingStrategyClass)
.monitorExecution(monitorExecution)
.monitorPort(monitorPort)
.maxTimeDiffSeconds(maxTimeDiffSeconds)
.build();
return liteJobConfiguration;
}
}
package com.cc.es.task;
import com.dangdang.ddframe.job.api.ShardingContext;
import com.dangdang.ddframe.job.api.simple.SimpleJob;
public class MySimpleJob implements SimpleJob{
@Override
public void execute(ShardingContext shardingContext) {
System.out.println("-----开始任务-----");
System.out.println(shardingContext.getJobName());
System.out.println(shardingContext.getJobParameter());
System.out.println(shardingContext.getShardingItem());
System.out.println(shardingContext.getShardingParameter());
System.out.println(shardingContext.getShardingTotalCount());
System.out.println("当前线程:-----"+Thread.currentThread().getName());
System.out.println("-----任务执行结束-----");
}
}
代码中对于创建任务的一些参数都可进行封装,由控制台通过创建任务来完成,开发人员只需要创建一个MyJob开执行分布任务,更优化是创建一个自定义@simpleJob注解来标示service、method,注解配置一个name参数,name保持与控制台提供的创建的作业name保持一致,在应用启动的时候自动向远程注册中心发起请求,注册中心匹配后读取自定义的配置参数(控制台创建时配置),定时向目标节点发送执行任务的请求,执行成功返回结果并记录;
可能很多大牛自己在设计项目架构的时候都有完整的一套关于分布式定时任务的完整解决方案,尤其是基础架构,尽量减轻业务线开发,都在组件支撑上保持“低耦合、低侵入”原则,简单来说即保持开发人员逻辑,该怎么写就怎么写,不需要考虑组件嵌入带来的代码变化;
今日用到,有感而发,在后续的应用中会尝试在elasticjob开源的基础上增加一些此种设想。