druid源码解读--druid连接池init方法源码解读

Druid连接池init方法源码解读

今日主要针对druid连接池中的Data Source核心代码中的初始化方法进行源码解读和学习。尤其针对init方法中的createAndStartCreatorThread/createAndStartDestroyThread线程进行详细的源码分析。

Druid#init时序图

在这里插入图片描述

init初始化方法解析

// init在Datasource初始化的时候并不会执行,是在第一次获取getConnection执行的
public void init() throws SQLException {
  		// 是否已经inited 初始化过
        if (inited) {
            return;
        }

        // 获取driver实例 bug fixed for dead lock, for issue #2980
        DruidDriver.getInstance();
				// 获取锁
        final ReentrantLock lock = this.lock;
        try {
            //尝试获取lock
            lock.lockInterruptibly();
        } catch (InterruptedException e) {
            throw new SQLException("interrupt", e);
        }

        boolean init = false;
        try {
          //再次check没有初始化过,double check
            if (inited) {
                return;
            }

            initStackTrace = Utils.toString(Thread.currentThread().getStackTrace());
						// 获取数据源的id
            this.id = DruidDriver.createDataSourceId();
            if (this.id > 1) {
              // 多个数据源 AtomicLongFieldUpdater<DruidAbstractDataSource>
                long delta = (this.id - 1) * 100000;
                this.connectionIdSeedUpdater.addAndGet(this, delta);
                this.statementIdSeedUpdater.addAndGet(this, delta);
                this.resultSetIdSeedUpdater.addAndGet(this, delta);
                this.transactionIdSeedUpdater.addAndGet(this, delta);
            }

            if (this.jdbcUrl != null) {
              //获取jdbc
                this.jdbcUrl = this.jdbcUrl.trim();
                initFromWrapDriverUrl();
            }

            for (Filter filter : filters) {
              // 初始化拦截器
                filter.init(this);
            }

            if (this.dbTypeName == null || this.dbTypeName.length() == 0) {
              // 获取数据库类型。方言?
                this.dbTypeName = JdbcUtils.getDbType(jdbcUrl, null);
            }

            DbType dbType = DbType.of(this.dbTypeName);
            if (dbType == DbType.mysql
                    || dbType == DbType.mariadb
                    || dbType == DbType.oceanbase
                    || dbType == DbType.ads) {
                boolean cacheServerConfigurationSet = false;
                if (this.connectProperties.containsKey("cacheServerConfiguration")) {
                    cacheServerConfigurationSet = true;
                } else if (this.jdbcUrl.indexOf("cacheServerConfiguration") != -1) {
                    cacheServerConfigurationSet = true;
                }
                if (cacheServerConfigurationSet) {
                    this.connectProperties.put("cacheServerConfiguration", "true");
                }
            }
						// 参数check 考虑责任链check
            if (maxActive <= 0) {
                throw new IllegalArgumentException("illegal maxActive " + maxActive);
            }

            if (maxActive < minIdle) {
                throw new IllegalArgumentException("illegal maxActive " + maxActive);
            }

            if (getInitialSize() > maxActive) {
                throw new IllegalArgumentException("illegal initialSize " + this.initialSize + ", maxActive " + maxActive);
            }

            if (timeBetweenLogStatsMillis > 0 && useGlobalDataSourceStat) {
                throw new IllegalArgumentException("timeBetweenLogStatsMillis not support useGlobalDataSourceStat=true");
            }

            if (maxEvictableIdleTimeMillis < minEvictableIdleTimeMillis) {
                throw new SQLException("maxEvictableIdleTimeMillis must be grater than minEvictableIdleTimeMillis");
            }

            if (keepAlive && keepAliveBetweenTimeMillis <= timeBetweenEvictionRunsMillis) {
                throw new SQLException("keepAliveBetweenTimeMillis must be grater than timeBetweenEvictionRunsMillis");
            }

            if (this.driverClass != null) {
                this.driverClass = driverClass.trim();
            }
						//SPIServiceLoader 拦截器
            initFromSPIServiceLoader();
						// 获取driverclass
            resolveDriver();
						// 校验
            initCheck();
						// 异常排序
            initExceptionSorter();
            // 数据库连接校验
            initValidConnectionChecker();
          	// 查询校验
            validationQueryCheck();
						// 如果使用去全局的数据源分析
            if (isUseGlobalDataSourceStat()) {
                // 获取jdbc数据源
                dataSourceStat = JdbcDataSourceStat.getGlobal();
                if (dataSourceStat == null) {
                  	// ?会不会出现单例问题呢 
                    dataSourceStat = new JdbcDataSourceStat("Global", "Global", this.dbTypeName);
                    JdbcDataSourceStat.setGlobal(dataSourceStat);
                }
                if (dataSourceStat.getDbType() == null) {
                    dataSourceStat.setDbType(this.dbTypeName);
                }
            } else {
              // 不是全局数据源直接创建
                dataSourceStat = new JdbcDataSourceStat(this.name, this.jdbcUrl, this.dbTypeName, this.connectProperties);
            }
            dataSourceStat.setResetStatEnable(this.resetStatEnable);
						// 初始化连接、活跃连接为最大的活动连接数量
            connections = new DruidConnectionHolder[maxActive];
            evictConnections = new DruidConnectionHolder[maxActive];
            keepAliveConnections = new DruidConnectionHolder[maxActive];

            SQLException connectError = null;
						// 创建任务的调度者
            if (createScheduler != null && asyncInit) {
                for (int i = 0; i < initialSize; ++i) {
                  // 创建任务调度者调度创建连接task  CreateConnectionTask 创建连接
                    submitCreateTask(true);
                }
            } else if (!asyncInit) {
                // init connections
                while (poolingCount < initialSize) {
                  // todo 需要明确poolingCount 是干什么的?
                    try {
                      //尝试获取物理连接 会持续从连接池获取连接
                        PhysicalConnectionInfo pyConnectInfo = createPhysicalConnection();
                        DruidConnectionHolder holder = new DruidConnectionHolder(this, pyConnectInfo);
                        connections[poolingCount++] = holder;
                    } catch (SQLException ex) {
                        LOG.error("init datasource error, url: " + this.getUrl(), ex);
                        if (initExceptionThrow) {
                            connectError = ex;
                            break;
                        } else {
                            Thread.sleep(3000);
                        }
                    }
                }

                if (poolingCount > 0) {
                  //poolingPeak ??
                    poolingPeak = poolingCount;
                    poolingPeakTime = System.currentTimeMillis();
                }
            }
						// 创建日志线程 单独解析
            createAndLogThread();
          	// 创建开始线程 单独解析
            createAndStartCreatorThread();
          	// 创建销毁线程 单独解析
            createAndStartDestroyThread();
						// 等待所有的线程执行完毕
            initedLatch.await();
            init = true;

            initedTime = new Date();
          	//注入bean 
            registerMbean();

            if (connectError != null && poolingCount == 0) {
                throw connectError;
            }

            if (keepAlive) {
                // async fill to minIdle
                if (createScheduler != null) {
                    for (int i = 0; i < minIdle; ++i) {
                      // 满足最小的线程要求
                        submitCreateTask(true);
                    }
                } else {
                    this.emptySignal();
                }
            }

        } catch (SQLException e) {
            LOG.error("{dataSource-" + this.getID() + "} init error", e);
            throw e;
        } catch (InterruptedException e) {
            throw new SQLException(e.getMessage(), e);
        } catch (RuntimeException e){
            LOG.error("{dataSource-" + this.getID() + "} init error", e);
            throw e;
        } catch (Error e){
            LOG.error("{dataSource-" + this.getID() + "} init error", e);
            throw e;

        } finally {
          	// 最终初始化完成标记 ? ?如果异常了呢直接设置为true?
            inited = true;
          	// 释放锁
            lock.unlock();

            if (init && LOG.isInfoEnabled()) {
                String msg = "{dataSource-" + this.getID();

                if (this.name != null && !this.name.isEmpty()) {
                    msg += ",";
                    msg += this.name;
                }

                msg += "} inited";

                LOG.info(msg);
            }
        }
    }

createAndLogThread

// 创建日志的线程 守护线程daemon
private void createAndLogThread() {
    //日志打印间隔
    if (this.timeBetweenLogStatsMillis <= 0) {
        return;
    }

    String threadName = "Druid-ConnectionPool-Log-" + System.identityHashCode(this);
    logStatsThread = new LogStatsThread(threadName);
    logStatsThread.start();

    this.resetStatEnable = false;
}

createAndStartCreatorThread时序图

在这里插入图片描述

createAndStartCreatorThread源码

//守护线程 Druid所有链接都是CreateConnectionThread创建出来的

protected void createAndStartCreatorThread() {
    if (createScheduler == null) {
        String threadName = "Druid-ConnectionPool-Create-" + System.identityHashCode(this);
        createConnectionThread = new CreateConnectionThread(threadName);
        createConnectionThread.start();
        return;
    }
		// 是不是应该有异常处理。如果异常后没有countDown??
    initedLatch.countDown();
}
//每次需要获取新的连接(底层jdbc连接不够用了需要新的连接)就利用这个单独的线程去创建jdbc连接然后包装成物理连接进而给put(connection);放置到数组中进行池化
//别的业务处理中释放信号量emptySignal,负责创建的线程会新建连接 
//this.emptySignal(); 触发 this.submitCreateTask(false)
public class CreateConnectionThread extends Thread {

    public CreateConnectionThread(String name){
        super(name);
        this.setDaemon(true);
    }

    public void run() {
        // ??? 为啥呢 外层已经countDown
        initedLatch.countDown();

        long lastDiscardCount = 0;
        int errorCount = 0;
        for (;;) {
            // addLast
            try {
              	// 尝试获取锁
                lock.lockInterruptibly();
            } catch (InterruptedException e2) {
                break;
            }
						// 销毁的个数 
            long discardCount = DruidDataSource.this.discardCount;
          // 销毁变动的个数  
          boolean discardChanged = discardCount - lastDiscardCount > 0;
          // 本次销毁的个数  
          lastDiscardCount = discardCount;

            try {
                boolean emptyWait = true;

                if (createError != null
                        && poolingCount == 0
                        && !discardChanged) {
        
                    emptyWait = false;
                }

                if (emptyWait
                        && asyncInit && createCount < initialSize) {
                    emptyWait = false;
                }

                if (emptyWait) {
                    // 必须存在线程等待,才创建连接
                    if (poolingCount >= notEmptyWaitThreadCount //
                            && (!(keepAlive && activeCount + poolingCount < minIdle))
                            && !isFailContinuous()
                    ) {
                        empty.await();
                    }

                    // 防止创建超过maxActive数量的连接
                    if (activeCount + poolingCount >= maxActive) {
                        empty.await();
                        continue;
                    }
                }

            } catch (InterruptedException e) {
                lastCreateError = e;
                lastErrorTimeMillis = System.currentTimeMillis();

                if ((!closing) && (!closed)) {
                    LOG.error("create connection Thread Interrupted, url: " + jdbcUrl, e);
                }
                break;
            } finally {
                lock.unlock();
            }

            PhysicalConnectionInfo connection = null;

            try {
              // 创建物理连接
                connection = createPhysicalConnection();
            } catch (SQLException e) {
                LOG.error("create connection SQLException, url: " + jdbcUrl + ", errorCode " + e.getErrorCode()
                          + ", state " + e.getSQLState(), e);

                errorCount++;
                if (errorCount > connectionErrorRetryAttempts && timeBetweenConnectErrorMillis > 0) {
                    // fail over retry attempts
                    setFailContinuous(true);
                    if (failFast) {
                        lock.lock();
                        try {
                            notEmpty.signalAll();
                        } finally {
                            lock.unlock();
                        }
                    }

                    if (breakAfterAcquireFailure) {
                        break;
                    }

                    try {
                        Thread.sleep(timeBetweenConnectErrorMillis);
                    } catch (InterruptedException interruptEx) {
                        break;
                    }
                }
            } catch (RuntimeException e) {
                LOG.error("create connection RuntimeException", e);
                setFailContinuous(true);
                continue;
            } catch (Error e) {
                LOG.error("create connection Error", e);
                setFailContinuous(true);
                break;
            }

            if (connection == null) {
                continue;
            }
						// 连接放在connections数组里
            boolean result = put(connection);
            if (!result) {
              // 异常场景下关闭这个物理连接
                JdbcUtils.close(connection.getPhysicalConnection());
                LOG.info("put physical connection to pool failed.");
            }

            errorCount = 0; // reset errorCount

            if (closing || closed) {
                break;
            }
        }
    }
}

createAndStartDestroyThread时序图

在这里插入图片描述

createAndStartDestroyThread源码

protected void createAndStartDestroyThread() {
  // 销毁任务
    destroyTask = new DestroyTask();

    if (destroyScheduler != null) {
        long period = timeBetweenEvictionRunsMillis;
        if (period <= 0) {
            period = 1000;
        }
        destroySchedulerFuture = destroyScheduler.scheduleAtFixedRate(destroyTask, period, period,
                                                                      TimeUnit.MILLISECONDS);
        initedLatch.countDown();
        return;
    }

    String threadName = "Druid-ConnectionPool-Destroy-" + System.identityHashCode(this);
    destroyConnectionThread = new DestroyConnectionThread(threadName);
    destroyConnectionThread.start();
}
public class DestroyConnectionThread extends Thread {

    public DestroyConnectionThread(String name){
        super(name);
        this.setDaemon(true);
    }

    public void run() {
        initedLatch.countDown();

        for (;;) {
            // 从前面开始删除
            try {
              // 已经关闭直接break
                if (closed || closing) {
                    break;
                }
								// 扫描间隔时长。这个参数意义是处理什么问题呢?
              	// 配置间隔多久启动一次DestroyThread,对连接池内的连接才进行一次检测,单位是毫秒。检测时:1.如果连接空闲并且超过minIdle以外的连接,如果空闲时间超过minEvictableIdleTimeMillis设置的值则直接物理关闭。2.在minIdle以内的不处理。
                if (timeBetweenEvictionRunsMillis > 0) {
                    Thread.sleep(timeBetweenEvictionRunsMillis);
                } else {
                    Thread.sleep(1000); //
                }

                if (Thread.interrupted()) {
                    break;
                }
								// 销毁线程
                destroyTask.run();
            } catch (InterruptedException e) {
                break;
            }
        }
    }

}

DestroyTask销毁连接的任务时序图

在这里插入图片描述

DestroyTask销毁连接的源码分析

//销毁连接的任务主要有两个核心逻辑:

//1、销毁空闲连接

//2、回收超时连接

// 销毁连接任务
public class DestroyTask implements Runnable {
        public DestroyTask() {

        }

        @Override
        public void run() {
        // 销毁超过最大空闲时间的连接
            shrink(true, keepAlive);

            if (isRemoveAbandoned()) {
            // 超时的连接
                removeAbandoned();
            }
        }

    }

销毁空闲连接

//当一个连接长时间没有被使用,如果不及时清理就会造成资源浪费,所以需要定时检查空闲时间过长的连接进行断开连接销毁

// 由原来的list替换成数组的原因是什么?
public void shrink(boolean checkTime, boolean keepAlive) {
    try {
        //获取锁
        lock.lockInterruptibly();
    } catch (InterruptedException e) {
        return;
    }

    boolean needFill = false;
    //剔除的数据
    int evictCount = 0;
    //保持连接的数量
    int keepAliveCount = 0;
    //错误量 ?? 
    int fatalErrorIncrement = fatalErrorCount - fatalErrorCountLastShrink;
    fatalErrorCountLastShrink = fatalErrorCount;
    
    try {
        if (!inited) {
        //没有inited的直接return 
            return;
        }
				// 空闲数量
        final int checkCount = poolingCount - minIdle;
        final long currentTimeMillis = System.currentTimeMillis();
        for (int i = 0; i < poolingCount; ++i) {
            DruidConnectionHolder connection = connections[i];

            if ((onFatalError || fatalErrorIncrement > 0) && (lastFatalErrorTimeMillis > connection.connectTimeMillis))  {
            //连接时间小于设置的时间 放在活跃连接中
                keepAliveConnections[keepAliveCount++] = connection;
                continue;
            }

            if (checkTime) {
                if (phyTimeoutMillis > 0) {
                //物理连接时间 = 当前时间-连接的连接时间
                    long phyConnectTimeMillis = currentTimeMillis - connection.connectTimeMillis;
                    if (phyConnectTimeMillis > phyTimeoutMillis) {
                    //物理连接的时间大于连接超时时间 放在剔除的数组里
                        evictConnections[evictCount++] = connection;
                        continue;
                    }
                }
								// 不活跃时长 = 当前时间 - 这个连接最后活跃时间
                long idleMillis = currentTimeMillis - connection.lastActiveTimeMillis;

                if (idleMillis < minEvictableIdleTimeMillis
                        && idleMillis < keepAliveBetweenTimeMillis
                ) {
                    break;
                }

                if (idleMillis >= minEvictableIdleTimeMillis) {
                    if (checkTime && i < checkCount) {
                    // 不活跃时长大于最小活跃空闲时间 去除 并且i小于需要剔除的数据个数。加这个判断的意义难道是为了每次只剔除固定个数内的数据,如果在这个过程中产生新的可以剔除的数据,等待下个任务调度到?? 有待商榷	
                        evictConnections[evictCount++] = connection;
                        continue;
                    } else if (idleMillis > maxEvictableIdleTimeMillis) { // 不活跃时长大于最大的空闲时间 直接剔除
                        evictConnections[evictCount++] = connection;
                        continue;
                    }
                }

                if (keepAlive && idleMillis >= keepAliveBetweenTimeMillis) {
                //活跃的连接并且不活跃时间大于保持连接的活跃时间间隔 ??
                    keepAliveConnections[keepAliveCount++] = connection;
                }
            } else {
                if (i < checkCount) {
                // 小于需要剔除的个数 放在剔除数组
                    evictConnections[evictCount++] = connection;
                } else {
                    break;
                }
            }
        }
				// 需要删除的个数 = 剔除的个数 + 保持活跃个数
        int removeCount = evictCount + keepAliveCount;
        if (removeCount > 0) {
        // 有删除的数据 从数组中剔除出多余的连接
            System.arraycopy(connections, removeCount, connections, 0, poolingCount - removeCount);
            Arrays.fill(connections, poolingCount - removeCount, poolingCount, null);
            poolingCount -= removeCount;
        }
        // 保持连接的check的数量重新加上这些需要保持连接的数量
        keepAliveCheckCount += keepAliveCount;

        if (keepAlive && poolingCount + activeCount < minIdle) {
            // 如果池中的连接数加上活跃的连接数小于最小连接数则需要补充连接
            needFill = true;
        }
    } finally {
    // 释放锁
        lock.unlock();
    }

    if (evictCount > 0) {
    // 需要剔除的数组 直接剔除
        for (int i = 0; i < evictCount; ++i) {
            DruidConnectionHolder item = evictConnections[i];
            Connection connection = item.getConnection();
            JdbcUtils.close(connection);
            destroyCountUpdater.incrementAndGet(this);
        }
        Arrays.fill(evictConnections, null);
    }

    if (keepAliveCount > 0) {
        // 需要保持连接的数组 重新保持连接。激活一次?
        for (int i = keepAliveCount - 1; i >= 0; --i) {
        //从最后一个激活连接
            DruidConnectionHolder holer = keepAliveConnections[i];
            Connection connection = holer.getConnection();
            holer.incrementKeepAliveCheckCount();

            boolean validate = false;
            try {
            //校验连接
                this.validateConnection(connection);
                validate = true;
            } catch (Throwable error) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("keepAliveErr", error);
                }
                // skip
            }
						// 如果没有通过校验 则丢弃
            boolean discard = !validate;
            if (validate) {
            //最后活跃时间激活为当前时间
                holer.lastKeepTimeMillis = System.currentTimeMillis();
                //放在数组中 DruidConnectionHolder
                boolean putOk = put(holer, 0L, true);
                if (!putOk) {
                    discard = true;
                }
            }

            if (discard) {
                try {
                //关闭连接
                    connection.close();
                } catch (Exception e) {
                    // skip
                }
								// 加锁
                lock.lock();
                try {
                //丢弃的数量++
                    discardCount++;

                    if (activeCount + poolingCount <= minIdle) {
                    //触发重新创建连接的信号量
                        emptySignal();
                    }
                } finally {
                //释放锁
                    lock.unlock();
                }
            }
        }
        //数据源增加保持连接的数量
        this.getDataSourceStat().addKeepAliveCheckCount(keepAliveCount);
        Arrays.fill(keepAliveConnections, null);
    }
		//是否需要重新补充连接 如果目前的连接数量小于最小的连接数量则补充连接
    if (needFill) {
    //需要填充 加锁
        lock.lock();
        try {
        
        //填充的数量=最小的数量 -(活跃数量+池中的数量+场景task的数量)
            int fillCount = minIdle - (activeCount + poolingCount + createTaskCount);
            for (int i = 0; i < fillCount; ++i) {
            //触发重新创建连接的信号量
                emptySignal();
            }
        } finally {
        //释放锁
            lock.unlock();
        }
    } else if (onFatalError || fatalErrorIncrement > 0) {
        lock.lock();
        try {
            emptySignal();
        } finally {
            lock.unlock();
        }
    }
}

回收超时连接

//当一个连接被一个线程长时间占有没有被归还,有可能是程序出故障了或是有漏洞导致吃吃没有归还连接,这样就可能会导致连接池中的连接不够用,所以需要定时检查霸占连接时间过长的线程,如果超过规定时间没有归还连接,则强制回收该连接。

public int removeAbandoned() {
		//需要删除的数量
    int removeCount = 0;

    long currrentNanos = System.nanoTime();
		// 需要删除的list 这个为什么和回收空闲的不一样的。回收空闲的使用数组 这个使用链表 具体有什么区别?
    List<DruidPooledConnection> abandonedList = new ArrayList<DruidPooledConnection>();
		// 加锁 活跃的连接
    activeConnectionLock.lock();
    try {
        Iterator<DruidPooledConnection> iter = activeConnections.keySet().iterator();

        for (; iter.hasNext();) {
            DruidPooledConnection pooledConnection = iter.next();
						// 池中的连接还在运行
            if (pooledConnection.isRunning()) {
                continue;
            }
						// 当前的时间-该连接的连接时间
            long timeMillis = (currrentNanos - pooledConnection.getConnectedTimeNano()) / (1000 * 1000);

            if (timeMillis >= removeAbandonedTimeoutMillis) {
            // 如果超过了设定的超时时间。迭代直接删除
                iter.remove();
                pooledConnection.setTraceEnable(false);
                // 放在abandonedList
                abandonedList.add(pooledConnection);
            }
        }
    } finally {
    //释放锁
        activeConnectionLock.unlock();
    }

    if (abandonedList.size() > 0) {
    //如果有超时需要回收的连接
        for (DruidPooledConnection pooledConnection : abandonedList) {
            final ReentrantLock lock = pooledConnection.lock;
            lock.lock();
            try {
            //如果连接已经释放
                if (pooledConnection.isDisable()) {
                    continue;
                }
            } finally {
                lock.unlock();
            }
						// 关闭连接
            JdbcUtils.close(pooledConnection);
            pooledConnection.abandond();
            //放弃的连接i++
            removeAbandonedCount++;
            //删除的数据++
            removeCount++;

            if (isLogAbandoned()) {
            //追加打印的日志
                StringBuilder buf = new StringBuilder();
                buf.append("abandon connection, owner thread: ");
                buf.append(pooledConnection.getOwnerThread().getName());
                buf.append(", connected at : ");
                buf.append(pooledConnection.getConnectedTimeMillis());
                buf.append(", open stackTrace\n");

                StackTraceElement[] trace = pooledConnection.getConnectStackTrace();
                for (int i = 0; i < trace.length; i++) {
                    buf.append("\tat ");
                    buf.append(trace[i].toString());
                    buf.append("\n");
                }

                buf.append("ownerThread current state is " + pooledConnection.getOwnerThread().getState()
                           + ", current stackTrace\n");
                trace = pooledConnection.getOwnerThread().getStackTrace();
                for (int i = 0; i < trace.length; i++) {
                    buf.append("\tat ");
                    buf.append(trace[i].toString());
                    buf.append("\n");
                }

                LOG.error(buf.toString());
            }
        }
    }

    return removeCount;
}

总结

今天计划针对init方法进行解读的过程中针对创建开始线程和销毁线程比较有兴趣,感觉这样设计很巧妙,不影响主业务的同时创建守护线程去考虑过程中的连接的监控自动创建补充、自动销毁等过程。其中有一些疑惑点,后面在看后续的源码过程中持续关注这些疑惑点,找到具体这样设计的意义。还有个点创建和销毁的守护线程中经常出现TreadSleep休眠的代码,为什么要做这些基于什么考虑呢?后续能否优化成ScheduledThreadPoolExecutor定时执行线程池去实现定时去监控这些数据呢,也需要考虑ScheduledThreadPoolExecutor这个线程池的线程是否可以设置为守护线程,需要细细琢磨。

  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值