druid源码分析—DruidDataSource的基本分析(二)
getConnectionInternal基本分析
DruidDataSource 参数记录
//已使用的连接数量
private int activeCount = 0;
//池内连接数量,init()方法会创建连接并赋值
private int poolingCount = 0;
//连接数量
private long connectCount = 0L;
//最高峰值记录
private int activePeak = 0;
//最高峰值记录时间
private long activePeakTime = 0;
//连接池关闭状态,调用close()方法则会置位true
private volatile boolean closed = false;
//连接池可用状态,调用close()方法则会置位true
private volatile boolean enable = true;
//禁用异常,调用close()方法则为创建异常进行赋值
private volatile DataSourceDisableException disableException = null;
//最大等待线程数量,默认为-1,可通过properties进行外部设置,当maxWaitThreadCount大于
protected volatile int maxWaitThreadCount = -1;
//核心连接存放位置
private volatile DruidConnectionHolder[] connections;
//原子类参数修改器——connectErrorCount 连接错误计数
protected static final AtomicLongFieldUpdater<DruidDataSource> connectErrorCountUpdater
= AtomicLongFieldUpdater.newUpdater(DruidDataSource.class, "connectErrorCount");
DruidAbstractDataSource 参数记录
//默认最大活跃数量
public final static int DEFAULT_MAX_ACTIVE_SIZE = 8;
//是否致命错误
protected volatile boolean onFatalError = false;
//发生错误的最大数量,当maxActive < onFatalErrorMaxActive 则抛出异常
protected volatile int onFatalErrorMaxActive = 0;
//最大活跃数量
protected volatile int maxActive = DEFAULT_MAX_ACTIVE_SIZE;
protected ScheduledExecutorService createScheduler;
代码分析
private DruidPooledConnection getConnectionInternal(long maxWait) throws SQLException {
//判断连接池是否处于关闭,处于关闭状态则抛出异常
if (closed) {
connectErrorCountUpdater.incrementAndGet(this);
throw new DataSourceClosedException("dataSource already closed at " + new Date(closeTimeMillis));
}
//判断连接池是否不可用,不可用则抛出异常
if (!enable) {
connectErrorCountUpdater.incrementAndGet(this);
if (disableException != null) {
throw disableException;
}
throw new DataSourceDisableException();
}
final long nanos = TimeUnit.MILLISECONDS.toNanos(maxWait);
final int maxWaitThreadCount = this.maxWaitThreadCount;
//连接持有器
DruidConnectionHolder holder;
for (boolean createDirect = false;;) {
//直接创建连接,并不是从池中获取
if (createDirect) {
createStartNanosUpdater.set(this, System.nanoTime());
if (creatingCountUpdater.compareAndSet(this, 0, 1)) {
//创建物理连接
PhysicalConnectionInfo pyConnInfo = DruidDataSource.this.createPhysicalConnection();
//将连接给到持有器
holder = new DruidConnectionHolder(this, pyConnInfo);
//设置最后活跃时间
holder.lastActiveTimeMillis = System.currentTimeMillis();
creatingCountUpdater.decrementAndGet(this);
directCreateCountUpdater.incrementAndGet(this);
if (LOG.isDebugEnabled()) {
LOG.debug("conn-direct_create ");
}
/**
判断当前连接池的存活连接,是否小于最大值
小于最大值:存活数量+1
大于最大值:则需要销毁当前连接
*/
boolean discard = false;
lock.lock();
try {
if (activeCount < maxActive) {
activeCount++;
holder.active = true;
if (activeCount > activePeak) {
activePeak = activeCount;
activePeakTime = System.currentTimeMillis();
}
break;
} else {
discard = true;
}
} finally {
lock.unlock();
}
//销毁连接
if (discard) {
JdbcUtils.close(pyConnInfo.getPhysicalConnection());
}
}
}
//中断锁,产生异常则抛出
try {
lock.lockInterruptibly();
} catch (InterruptedException e) {
connectErrorCountUpdater.incrementAndGet(this);
throw new SQLException("interrupt", e);
}
//如果有线程处于等待状态,并且不为空的等待线程数>= 最大等待线程数,则直接抛出异常,并描述当前线程等待的数量。
try {
if (maxWaitThreadCount > 0
&& notEmptyWaitThreadCount >= maxWaitThreadCount) {
connectErrorCountUpdater.incrementAndGet(this);
throw new SQLException("maxWaitThreadCount " + maxWaitThreadCount + ", current wait Thread count "
+ lock.getQueueLength());
}
//判断是否存在异常情况,有则抛出异常
if (onFatalError
&& onFatalErrorMaxActive > 0
&& activeCount >= onFatalErrorMaxActive) {
connectErrorCountUpdater.incrementAndGet(this);
StringBuilder errorMsg = new StringBuilder();
errorMsg.append("onFatalError, activeCount ")
.append(activeCount)
.append(", onFatalErrorMaxActive ")
.append(onFatalErrorMaxActive);
if (lastFatalErrorTimeMillis > 0) {
errorMsg.append(", time '")
.append(StringUtils.formatDateTime19(
lastFatalErrorTimeMillis, TimeZone.getDefault()))
.append("'");
}
if (lastFatalErrorSql != null) {
errorMsg.append(", sql \n")
.append(lastFatalErrorSql);
}
throw new SQLException(
errorMsg.toString(), lastFatalError);
}
connectCount++;
//判断连接是否满足创建条件,满足条件则直接创建。
if (createScheduler != null
&& poolingCount == 0
&& activeCount < maxActive
&& creatingCountUpdater.get(this) == 0
&& createScheduler instanceof ScheduledThreadPoolExecutor) {
ScheduledThreadPoolExecutor executor = (ScheduledThreadPoolExecutor) createScheduler;
if (executor.getQueue().size() > 0) {
createDirect = true;
continue;
}
}
if (maxWait > 0) {
holder = pollLast(nanos);
} else {
holder = takeLast();
}
if (holder != null) {
if (holder.discard) {
continue;
}
activeCount++;
holder.active = true;
if (activeCount > activePeak) {
activePeak = activeCount;
activePeakTime = System.currentTimeMillis();
}
}
} catch (InterruptedException e) {
connectErrorCountUpdater.incrementAndGet(this);
throw new SQLException(e.getMessage(), e);
} catch (SQLException e) {
connectErrorCountUpdater.incrementAndGet(this);
throw e;
} finally {
lock.unlock();
}
break;
}
if (holder == null) {
long waitNanos = waitNanosLocal.get();
final long activeCount;
final long maxActive;
final long creatingCount;
final long createStartNanos;
final long createErrorCount;
final Throwable createError;
try {
lock.lock();
activeCount = this.activeCount;
maxActive = this.maxActive;
creatingCount = this.creatingCount;
createStartNanos = this.createStartNanos;
createErrorCount = this.createErrorCount;
createError = this.createError;
} finally {
lock.unlock();
}
StringBuilder buf = new StringBuilder(128);
buf.append("wait millis ")//
.append(waitNanos / (1000 * 1000))//
.append(", active ").append(activeCount)//
.append(", maxActive ").append(maxActive)//
.append(", creating ").append(creatingCount)//
;
if (creatingCount > 0 && createStartNanos > 0) {
long createElapseMillis = (System.nanoTime() - createStartNanos) / (1000 * 1000);
if (createElapseMillis > 0) {
buf.append(", createElapseMillis ").append(createElapseMillis);
}
}
if (createErrorCount > 0) {
buf.append(", createErrorCount ").append(createErrorCount);
}
List<JdbcSqlStatValue> sqlList = this.getDataSourceStat().getRuningSqlList();
for (int i = 0; i < sqlList.size(); ++i) {
if (i != 0) {
buf.append('\n');
} else {
buf.append(", ");
}
JdbcSqlStatValue sql = sqlList.get(i);
buf.append("runningSqlCount ").append(sql.getRunningCount());
buf.append(" : ");
buf.append(sql.getSql());
}
String errorMessage = buf.toString();
if (createError != null) {
throw new GetConnectionTimeoutException(errorMessage, createError);
} else {
throw new GetConnectionTimeoutException(errorMessage);
}
}
holder.incrementUseCount();
DruidPooledConnection poolalbeConnection = new DruidPooledConnection(holder);
return poolalbeConnection;
}
总结
createDirect参数为true时,是直接可以创建线程,并不是从池内直接获取的连接。
拿取连接主要通过pollLast(nanos)
和takeLast()
方法获取,拿取时,会默认从池中获取最后一个连接connections[poolingCount]
并将最后的连接置为空,拿取后,将activeCount
计数+1。
由此得知poolingCount + 1 + activeCount
才是真正的连接数量。
今日分析略微痛苦,可能是没分析init
方法直接跳到后面的连接分析,导致有些内容理解不到位。明日将分析init
方法并将这三天分析进行总结。