前言
开发目的:
提高百万级数据插入效率。
采取方案:
利用ThreadPoolTaskExecutor多线程批量插入。
具体实现细节
application-dev.properties添加线程池配置信息
# 异步线程配置
# 配置核心线程数
async.executor.thread.core_pool_size = 30
# 配置最大线程数
async.executor.thread.max_pool_size = 30
# 配置队列大小
async.executor.thread.queue_capacity = 99988
# 配置线程池中的线程的名称前缀
async.executor.thread.name.prefix = async-importDB-
spring容器注入线程池bean对象
import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import java.util.concurrent.Executor;
import java.util.concurrent.ThreadPoolExecutor;
@Configuration
@EnableAsync
@Slf4j
public class ExecutorConfig {
public static final Logger log = LoggerFactory.getLogger(ExecutorConfig.class);
@Value("${async.executor.thread.core_pool_size}")
private int corePoolSize;
@Value("${async.executor.thread.max_pool_size}")
private int maxPoolSize;
@Value("${async.executor.thread.queue_capacity}")
private int queueCapacity;
@Value("${async.executor.thread.name.prefix}")
private String namePrefix;
@Bean(name = "asyncServiceExecutor")
public Executor asyncServiceExecutor() {
log.warn("start asyncServiceExecutor");
//在这里修改
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
//配置核心线程数
executor.setCorePoolSize(corePoolSize);
//配置最大线程数
executor.setMaxPoolSize(maxPoolSize);
//配置队列大小
executor.setQueueCapacity(queueCapacity);
//配置线程池中的线程的名称前缀
executor.setThreadNamePrefix(namePrefix);
// rejection-policy:当pool已经达到max size的时候,如何处理新任务
// CALLER_RUNS:不在新线程中执行任务,而是有调用者所在的线程来执行
executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
//执行初始化
executor.initialize();
return executor;
}
}
创建异步线程 业务类
import com.newdo.base.CFunc;
import com.newdo.dynamicDataSource.datasource.DBIdentifier;
import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
@Service
@Slf4j
public class OldDataImportModel implements OldDataImportService {
public static final Logger log = LoggerFactory.getLogger(OldDataImportModel.class);
@Autowired
OldDataImportMapper oldDataImportMapper;
@Override
public Map<String, Object> importDataList(Map<String, Object> mapArgu) {
Map<String,Object> mapOut = new HashMap<>();
// 获取要插入的数据
List<Map<String,Object>> listMap = oldDataImportMapper.getSjTiList(mapArgu);
//每100条数据插入开一个线程
List<List<Map<String,Object>>> lists = splitList(listMap, 100);
CountDownLatch countDownLatch = new CountDownLatch(lists.size());
for (List<Map<String,Object>> listSub:lists) {
this.executeAsync(listSub, countDownLatch);
}
try {
countDownLatch.await(); //保证之前的所有的线程都执行完成,才会走下面的;
// 这样就可以在下面拿到所有线程执行完的集合结果
} catch (Exception e) {
log.error("阻塞异常:"+e.getMessage());
}
mapOut.put("status","ok");
return mapOut;
}
@Async("asyncServiceExecutor")
public void executeAsync(List<Map<String,Object>> mapList, CountDownLatch countDownLatch) {
try{
log.warn("线程开始");
//异步线程要做的事情
oldDataImportMapper.myInserSjTi(mapList);
log.warn("线程结束");
}finally {
countDownLatch.countDown();// 很关键, 无论上面程序是否异常必须执行countDown,否则await无法释放
}
}
/**
* 拆分Map集合
* @param list
* @param splitNum
* @return
*/
public List<List<Map<String,Object>>> splitList(List<Map<String,Object>> list, int splitNum){
List<List<Map<String,Object>>> resultMapList = new LinkedList<>();
List<Map<String,Object>> mapList = new LinkedList<>();
for (int i = 0;i < list.size(); i++){
Map<String,Object> map = list.get(i);
mapList.add(map);
if (i == 0) {
continue;
} else if (i == list.size() -1){ // 补充最后不满足取余数量的数据
resultMapList.add(mapList);
} else {
if (i % splitNum == 0){
resultMapList.add(mapList);
mapList = new LinkedList<>();
}
}
}
return resultMapList;
}
}
本文参考链接:
线程池ThreadPoolExecutor详解
性能爆表:SpringBoot利用线程池批量插入百万级数据实测!ThreadPoolTaskExecutor