SpringBoot利用线程池ThreadPoolTaskExecutor批量插入百万级数据

6 篇文章 1 订阅

前言
开发目的:

提高百万级数据插入效率。

采取方案:

利用ThreadPoolTaskExecutor多线程批量插入。

具体实现细节
application-dev.properties添加线程池配置信息

# 异步线程配置
# 配置核心线程数
async.executor.thread.core_pool_size = 30
# 配置最大线程数
async.executor.thread.max_pool_size = 30
# 配置队列大小
async.executor.thread.queue_capacity = 99988
# 配置线程池中的线程的名称前缀
async.executor.thread.name.prefix = async-importDB-

spring容器注入线程池bean对象

import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;

import java.util.concurrent.Executor;
import java.util.concurrent.ThreadPoolExecutor;


@Configuration
@EnableAsync
@Slf4j
public class ExecutorConfig {

    public static final Logger log = LoggerFactory.getLogger(ExecutorConfig.class);

    @Value("${async.executor.thread.core_pool_size}")
    private int corePoolSize;
    @Value("${async.executor.thread.max_pool_size}")
    private int maxPoolSize;
    @Value("${async.executor.thread.queue_capacity}")
    private int queueCapacity;
    @Value("${async.executor.thread.name.prefix}")
    private String namePrefix;

    @Bean(name = "asyncServiceExecutor")
    public Executor asyncServiceExecutor() {
        log.warn("start asyncServiceExecutor");

        //在这里修改
        ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
        //配置核心线程数
        executor.setCorePoolSize(corePoolSize);
        //配置最大线程数
        executor.setMaxPoolSize(maxPoolSize);
        //配置队列大小
        executor.setQueueCapacity(queueCapacity);
        //配置线程池中的线程的名称前缀
        executor.setThreadNamePrefix(namePrefix);
        // rejection-policy:当pool已经达到max size的时候,如何处理新任务
        // CALLER_RUNS:不在新线程中执行任务,而是有调用者所在的线程来执行
        executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
        //执行初始化
        executor.initialize();
        return executor;
    }

}

创建异步线程 业务类

import com.newdo.base.CFunc;
import com.newdo.dynamicDataSource.datasource.DBIdentifier;
import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;

import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;

@Service
@Slf4j
public class OldDataImportModel implements OldDataImportService {

    public static final Logger log = LoggerFactory.getLogger(OldDataImportModel.class);

    @Autowired
    OldDataImportMapper oldDataImportMapper;

	@Override
    public Map<String, Object> importDataList(Map<String, Object> mapArgu) {
        Map<String,Object> mapOut = new HashMap<>();

		// 获取要插入的数据
        List<Map<String,Object>> listMap = oldDataImportMapper.getSjTiList(mapArgu);

        //每100条数据插入开一个线程
        List<List<Map<String,Object>>> lists = splitList(listMap, 100);
        CountDownLatch countDownLatch = new CountDownLatch(lists.size());

        for (List<Map<String,Object>> listSub:lists) {
            this.executeAsync(listSub, countDownLatch);
        }
        try {
            countDownLatch.await(); //保证之前的所有的线程都执行完成,才会走下面的;
            // 这样就可以在下面拿到所有线程执行完的集合结果
        } catch (Exception e) {
            log.error("阻塞异常:"+e.getMessage());
        }

        mapOut.put("status","ok");
        return mapOut;
    }

    @Async("asyncServiceExecutor")
    public void executeAsync(List<Map<String,Object>> mapList, CountDownLatch countDownLatch) {
        try{
            log.warn("线程开始");
            //异步线程要做的事情
            oldDataImportMapper.myInserSjTi(mapList);
            log.warn("线程结束");
        }finally {
            countDownLatch.countDown();// 很关键, 无论上面程序是否异常必须执行countDown,否则await无法释放
        }
    }

    /**
     * 拆分Map集合
     * @param list
     * @param splitNum
     * @return
     */
    public List<List<Map<String,Object>>> splitList(List<Map<String,Object>> list, int splitNum){
        List<List<Map<String,Object>>> resultMapList = new LinkedList<>();
        List<Map<String,Object>> mapList = new LinkedList<>();
        for (int i = 0;i < list.size(); i++){
            Map<String,Object> map = list.get(i);
            mapList.add(map);
            if (i == 0) {
                continue;
            } else if (i == list.size() -1){ // 补充最后不满足取余数量的数据
                resultMapList.add(mapList);
            } else {
                if (i % splitNum == 0){
                    resultMapList.add(mapList);
                    mapList = new LinkedList<>();
                }
            }
        }
        return resultMapList;
    }
}

本文参考链接:
线程池ThreadPoolExecutor详解
性能爆表:SpringBoot利用线程池批量插入百万级数据实测!ThreadPoolTaskExecutor

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值