- 前提须知配置类配置线程池
- 业务场景:A表数据转移到B表. 把A表数据插入到B表,并且把转移的数据在A表设置状态为-1代表已转移了成为无用数据
依赖
<dependency>
<groupId>com.graphql-java</groupId>
<artifactId>graphql-java</artifactId>
<version>19.2</version>
</dependency>
定时任务跑线程业务(参考)
- 实际就是一个线程在跑whlie(true){} ,可能不属于规范仅供参考
- 单纯为了避免线程占用主线程资源,所以把定时任务跑到自定义线程池中执行业务
timToQueueJobThreadPool
线程池核心数必须设为一个,保证跑完一次业务才进行下一个业务跑,避免数据被重复搜索处理timToQueueThreadPool
线程池核心数为5个,因为定时任务内部异步了5个线程保证并发执行,每次搜500条,分别拆100份,则是5条线程异步执行业务- 注意:因为用的是oracle,为了避免执行计划缓存过多,所以在业务层遍历执行转移,若是使用mysql数据库,则可以在sql进行遍历插入转移数据
import com.google.common.collect.Lists;
import com.xyc.sms.common.entity.queue.SmsQueueGn;
import com.xyc.sms.syndata.dao.queue.SmsTimingMapper;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.ibatis.session.ExecutorType;
import org.apache.ibatis.session.SqlSession;
import org.apache.ibatis.session.SqlSessionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.scheduling.annotation.Async;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import javax.annotation.Resource;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ThreadPoolExecutor;
@Component
@EnableScheduling
@EnableAsync
public class InsertTimingToQueueJob {
private static final Logger logger = LoggerFactory.getLogger(InsertTimingToQueueJob.class);
@Qualifier("smsQueueSqlSessionFactory")
@Resource
private SqlSessionFactory smsQueueSqlSessionFactory;
@Autowired
@Qualifier("timToQueueThreadPool")
private ThreadPoolExecutor queueThreadPool;
@Resource
private SmsTimingMapper smsTimingMapper;
@Scheduled(fixedDelay = 100)
@Async("timToQueueJobThreadPool")
public void insertToQueueByTimingJob() {
LocalDateTime time = LocalDateTime.now();
LocalDateTime afterTime = time.plusMinutes(5);
long selectStartL = System.currentTimeMillis();
List<SmsQueueGn> smsQueueGnList = smsTimingMapper.getQueueTimingListByAfterTime(afterTime,500);
long selectEndL = System.currentTimeMillis();
if (CollectionUtils.isNotEmpty(smsQueueGnList)) {
long startL = System.currentTimeMillis();
CompletableFuture[] allFuture = Lists.partition(smsQueueGnList, 100).stream().map(tempQueues -> {
return CompletableFuture.runAsync(() -> {
long sqlL = System.currentTimeMillis();
SqlSession sqlSessionNew = null;
try {
sqlSessionNew = smsQueueSqlSessionFactory.openSession(ExecutorType.BATCH, false);
SmsTimingMapper mapperNew = sqlSessionNew.getMapper(SmsTimingMapper.class);
for (int i = 0; i < tempQueues.size(); i++) {
mapperNew.updateDataById(tempQueues.get(i).getId());
mapperNew.insertData(tempQueues.get(i));
}
sqlSessionNew.commit();
logger.info("[timToQueueJob] exec sql :{}", System.currentTimeMillis() - sqlL);
} catch (Exception e) {
Optional.ofNullable(sqlSessionNew).ifPresent(SqlSession::rollback);
logger.error("[timToQueueJob]定时任务异步报错:", e);
} finally {
Optional.ofNullable(sqlSessionNew).ifPresent(SqlSession::close);
}
}, queueThreadPool);
}).toArray(CompletableFuture[]::new);
try {
CompletableFuture.allOf(allFuture).join();
} catch (Exception e) {
logger.error("[timToQueueJob]异步等待插入队列表有误");
}
long endL = System.currentTimeMillis();
logger.info("[timToQueueJob]查询耗时:{},转移耗时:{},本次转移总耗时:{}",selectEndL-selectStartL,endL-startL,endL-selectStartL);
}
}
}
线程池配置定义
import com.xyc.sms.common.util.ThreadUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@Configuration
public class QueueThreadPoolConfig {
private final static Logger logger = LoggerFactory.getLogger(QueueThreadPoolConfig.class);
@Bean(name = "queueTransferJobConfig")
public PoolConfig collectReportPoolConfig() {
return new PoolConfig("QueueTransferJob");
}
@Bean(name = "queueTransferSqlConfig")
public PoolConfig collectMoPoolConfig() {
return new PoolConfig("QueueTransferSql");
}
@Bean(name = "timToQueueThreadPool")
public ThreadPoolExecutor timToQueueThreadPool(@Qualifier("queueTransferSqlConfig") PoolConfig c) {
PoolConfig useConfig = c.printInitInfoAndReturnUse();
return new ThreadPoolExecutor(useConfig.getPoolCoreSize(), useConfig.getPoolMaxSize(), 60, TimeUnit.SECONDS,
new LinkedBlockingQueue<>(),
ThreadUtil.threadFactory("timToQueueThreadPool"),
new ThreadPoolExecutor.CallerRunsPolicy());
}
@Bean(name = "timToQueueJobThreadPool")
public ThreadPoolExecutor timToQueueJobThreadPool(@Qualifier("queueTransferJobConfig") PoolConfig c) {
PoolConfig useConfig = c.printInitInfoAndReturnUse();
return new ThreadPoolExecutor(1, 1, 60, TimeUnit.SECONDS,
new LinkedBlockingQueue<>(),
ThreadUtil.threadFactory("timToQueueJobThreadPool"),
new ThreadPoolExecutor.DiscardPolicy());
}
private class PoolConfig {
private int poolCoreSize = 5;
private int poolMaxSize = 5;
private int taskQueueMaxSize = 200;
private String poolNamPrefix;
private int poolCoreSize_default = 5;
private int poolMaxSize_default = 5;
private int taskQueueMaxSize_default = 200;
public PoolConfig(String poolNamPrefix) {
this.poolNamPrefix = poolNamPrefix;
if (poolNamPrefix.equals("QueueTransferJob")){
poolCoreSize=1;
poolMaxSize = 1;
}
}
public int getPoolCoreSize() {
return poolCoreSize;
}
public void setPoolCoreSize(int poolCoreSize) {
this.poolCoreSize = poolCoreSize;
}
public int getPoolMaxSize() {
return poolMaxSize;
}
public void setPoolMaxSize(int poolMaxSize) {
this.poolMaxSize = poolMaxSize;
}
public int getTaskQueueMaxSize() {
return taskQueueMaxSize;
}
public void setTaskQueueMaxSize(int taskQueueMaxSize) {
this.taskQueueMaxSize = taskQueueMaxSize;
}
public String getPoolNamPrefix() {
return poolNamPrefix;
}
public void setPoolNamPrefix(String poolNamPrefix) {
this.poolNamPrefix = poolNamPrefix;
}
public int getPoolCoreSize_default() {
return poolCoreSize_default;
}
public void setPoolCoreSize_default(int poolCoreSize_default) {
this.poolCoreSize_default = poolCoreSize_default;
}
public int getPoolMaxSize_default() {
return poolMaxSize_default;
}
public void setPoolMaxSize_default(int poolMaxSize_default) {
this.poolMaxSize_default = poolMaxSize_default;
}
public int getTaskQueueMaxSize_default() {
return taskQueueMaxSize_default;
}
public void setTaskQueueMaxSize_default(int taskQueueMaxSize_default) {
this.taskQueueMaxSize_default = taskQueueMaxSize_default;
}
private PoolConfig printInitInfoAndReturnUse() {
PoolConfig us = new PoolConfig(this.poolNamPrefix);
logger.info(String.format("****************************************%-5s初始化【%-25s】线程池配置项 开始%-5s****************************************", "", us.getPoolNamPrefix(), ""));
us.setPoolCoreSize(this.poolCoreSize);
us.setPoolMaxSize(us.getPoolCoreSize());
us.setTaskQueueMaxSize(Integer.MAX_VALUE);
logger.info(String.format("**************%-4s 加载 %-25s - %-20s 默认值: %-8d yml配置: %-8d 启动使用配置: %-5d**************",
"", us.getPoolNamPrefix(), "poolCoreSize", us.getPoolCoreSize_default(), this.poolCoreSize, us.getPoolCoreSize()));
logger.info(String.format("**************%-4s 加载 %-25s - %-20s 默认值: %-8d yml配置: %-8d 启动使用配置(使用core配置项): %-5d**************",
"", us.getPoolNamPrefix(), "poolMaxSize", us.getPoolMaxSize_default(), this.poolMaxSize, us.getPoolMaxSize()));
logger.info(String.format("****************************************%-5s初始化【%-25s】线程池配置项 结束%-5s****************************************", "", us.getPoolNamPrefix(), ""));
return us;
}
}
}
线程工具类
import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiFunction;
import java.util.function.Function;
public class ThreadUtil {
private final static Logger logger = LoggerFactory.getLogger(ThreadUtil.class);
public static void sleepByWait(long timeout) throws InterruptedException {
byte[] lock = new byte[0];
synchronized (lock){
lock.wait(timeout);
}
lock = null;
}
public static RejectedExecutionHandler blockExecuteRejectHandle(String name){
return new BlockExecuteRejectHandle(name);
}
public static ThreadFactory threadFactory(String name){
return new TFactory(name);
}
static class TFactory implements ThreadFactory {
private final ThreadGroup group;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private final String namePrefix;
public TFactory(String name) {
SecurityManager s = System.getSecurityManager();
group = (s != null) ? s.getThreadGroup() :
Thread.currentThread().getThreadGroup();
namePrefix = name.concat("-");
}
public Thread newThread(Runnable r) {
Thread t = new Thread(group, r,
namePrefix + threadNumber.getAndIncrement(),
0);
if (t.isDaemon())
t.setDaemon(false);
if (t.getPriority() != Thread.NORM_PRIORITY)
t.setPriority(Thread.NORM_PRIORITY);
return t;
}
}
static class BlockExecuteRejectHandle implements RejectedExecutionHandler {
final String name;
public BlockExecuteRejectHandle(String name) {
this.name = name.concat("RejectHandle");
}
@Override
public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
if (!e.isShutdown()) {
try {
logger.warn("{} 阻塞加入 | pool:{}",name,e);
e.getQueue().put(r);
} catch (Exception ex) {
logger.error("{} 阻塞加入异常",e,ex);
}
}
}
}
public static int avgCapacity(int dataSize, int maxTaskNum) {
int _c = dataSize / maxTaskNum;
if (_c == 0){
return 0;
}
return dataSize % maxTaskNum == 0 ? _c : _c + 1;
}
public static <T> void concurrentExecuteAndBlockResult(ThreadPoolExecutor executor, int maxTaskNum,Function<List<T>, Void> fun,List<T> ls){
if (ls.isEmpty()){
return;
}
int avgCapacity = ThreadUtil.avgCapacity(ls.size(), maxTaskNum);
if (avgCapacity <= 1){
fun.apply(ls);
}else {
List<List<T>> lists = Lists.partition(ls, avgCapacity);
CompletableFuture[] all = new CompletableFuture[lists.size()];
for (int i = 0; i < lists.size(); i++) {
List<T> tmp = lists.get(i);
if (tmp.isEmpty()){
continue;
}
all[i] = CompletableFuture.runAsync(() -> fun.apply(tmp), executor);
}
CompletableFuture.allOf(all).join();
}
}
public static <T> void concurrentExecuteAndBlockResultVo(ThreadPoolExecutor executor, int oneTaskDataSize,Function<List<T>, Void> fun,List<T> ls){
if (ls.isEmpty()){
return;
}
if (ls.size() <= oneTaskDataSize){
fun.apply(ls);
}else {
List<List<T>> lists = Lists.partition(ls, oneTaskDataSize);
CompletableFuture[] all = new CompletableFuture[lists.size()];
for (int i = 0; i < lists.size(); i++) {
List<T> tmp = lists.get(i);
if (tmp.isEmpty()){
continue;
}
all[i] = CompletableFuture.runAsync(() -> fun.apply(tmp), executor);
}
CompletableFuture.allOf(all).join();
}
}
public static <T> void concurrentExecuteAndBlockResultVoForAbortPolicyReject(ThreadPoolExecutor executor, int oneTaskDataSize, Function<List<T>, Void> fun, List<T> ls){
if (ls.isEmpty()){
return;
}
if (ls.size() <= oneTaskDataSize){
fun.apply(ls);
}else {
List<List<T>> lists = Lists.partition(ls, oneTaskDataSize);
CompletableFuture[] all = new CompletableFuture[lists.size()];
for (int i = 0; i < lists.size(); i++) {
List<T> tmp = lists.get(i);
if (tmp.isEmpty()){
continue;
}
int reNum = 0;
while (all[i] == null){
try {
all[i] = CompletableFuture.runAsync(() -> fun.apply(tmp), executor);
} catch (RejectedExecutionException e) {
if (reNum == 0){
logger.warn("线程池处理任务繁忙:{}",e.getMessage());
}
reNum++;
try {Thread.sleep(3);} catch (Exception e1) {}
}catch (Exception e){
logger.error("线程池处理任务异常",e);
break;
}
}
if (reNum>0){
logger.warn("线程池处理任务繁忙 重试次数:{}",reNum);
}
}
CompletableFuture.allOf(all).join();
}
}
public static <T> void concurrentExecuteAndBlockResult(ThreadPoolExecutor executor, int maxTaskNum,BiFunction<List<T>, Object, Void> fun, List<T> ls,Object p0){
if (ls.isEmpty()){
return;
}
int avgCapacity = ThreadUtil.avgCapacity(ls.size(), maxTaskNum);
if (avgCapacity <= 1){
fun.apply(ls,p0);
}else {
List<List<T>> lists = Lists.partition(ls, avgCapacity);
CompletableFuture[] all = new CompletableFuture[lists.size()];
for (int i = 0; i < lists.size(); i++) {
List<T> tmp = lists.get(i);
if (tmp.isEmpty()){
continue;
}
all[i] = CompletableFuture.runAsync(() -> fun.apply(tmp,p0), executor);
}
CompletableFuture.allOf(all).join();
}
}
public static <T> void concurrentExecuteAndBlockResult(ThreadPoolExecutor executor, int maxTaskNum,BiFunction<List<T>, Object[], Void> fun, List<T> ls,Object... oArr){
if (ls.isEmpty()){
return;
}
int avgCapacity = ThreadUtil.avgCapacity(ls.size(), maxTaskNum);
if (avgCapacity <= 1){
fun.apply(ls,oArr);
}else {
List<List<T>> lists = Lists.partition(ls, avgCapacity);
CompletableFuture[] all = new CompletableFuture[lists.size()];
for (int i = 0; i < lists.size(); i++) {
List<T> tmp = lists.get(i);
if (tmp.isEmpty()){
continue;
}
all[i] = CompletableFuture.runAsync(() -> fun.apply(tmp,oArr), executor);
}
CompletableFuture.allOf(all).join();
}
}
public static <T,R> List<R> exec(ThreadPoolExecutor executor, int maxTaskNum,
Function<List<T>, List<R>> fun,
List<T> dataLs){
if (dataLs.isEmpty()){
return new ArrayList<>();
}
int avgCapacity = avgCapacity(dataLs.size(), maxTaskNum);
if (avgCapacity <= 1){
return fun.apply(dataLs);
}else {
List<R> ret = new CopyOnWriteArrayList<>();
List<List<T>> lists = Lists.partition(dataLs, avgCapacity);
CompletableFuture<? extends List<? extends R>>[] all = new CompletableFuture[lists.size()];
for (int i = 0; i < lists.size(); i++) {
List<T> tmp = lists.get(i);
if (tmp.isEmpty()){
continue;
}
all[i] = CompletableFuture.supplyAsync(() -> fun.apply(tmp), executor).whenCompleteAsync((rv, ex) -> {
if (ex != null) {
ex.printStackTrace();
}
if (rv != null) {
ret.addAll(rv);
}
});
}
CompletableFuture.allOf(all).join();
return ret;
}
}
}
数据库配置类
- 注意这里的
sqlSessionFactory
方法里设置的factoryBean.setTransactionFactory(new JdbcTransactionFactory());
会导致该项目模块的贴上了事务注解的方法失效报错,谨慎使用,但是贴了以后才能保证 该演示业务数据转移不会因为转移中途关闭程序而丢失数据
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.ibatis.transaction.jdbc.JdbcTransactionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import javax.sql.DataSource;
@Configuration
@MapperScan(basePackages = { "com.zeki.sms.syndata.dao.queue"}, sqlSessionFactoryRef="smsQueueSqlSessionFactory")
public class SmsQueueDBConfig {
@Bean(name = "smsQueueDataSource")
@ConfigurationProperties(prefix = "spring.datasource.hikari.queue-gn")
public DataSource dataSource() {
return DataSourceBuilder.create().build();
}
@Bean(name = "smsQueueTransactionManager")
public DataSourceTransactionManager transactionManager(@Qualifier("smsQueueDataSource") DataSource dataSource) {
return new DataSourceTransactionManager(dataSource);
}
@Bean(name = "smsQueueSqlSessionFactory")
public SqlSessionFactory sqlSessionFactory(@Qualifier("smsQueueDataSource") DataSource dataSource) throws Exception {
SqlSessionFactoryBean factoryBean = new SqlSessionFactoryBean();
factoryBean.setTransactionFactory(new JdbcTransactionFactory());
factoryBean.setDataSource(dataSource);
factoryBean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:mapper/queue/*.xml"));
SqlSessionFactory factory = factoryBean.getObject();
factory.getConfiguration().setCacheEnabled(false);
return factory;
}
}
yml配置数据库连接参数
spring:
datasource:
hikari:
queue-gn:
driver-class-name: oracle.jdbc.OracleDriver
jdbc-url: jdbc:oracle:thin:@192.168.3.212:1521:zekidb
username: SMS_QUEUE_DEV
password: SMS_QUEUE_DEV
type: com.alibaba.druid.pool.DruidDataSource
filters: stat
max-wait: 60000
initial-size: 10
maxActive: 20
minIdle: 10
timeBetweenEvictionRunsMillis: 60000
minEvictableIdleTimeMillis: 300000
validationQuery: SELECT 'z'
testWhileIdle: true
testOnBorrow: false
testOnReturn: false