阿里druid 连接池监控数据自定义存储

如何将druid连接池监控到的sql执行效率,连接池资源情况等进行持久化存储,方便系统运维分析优化,以下案例初步测试成功。

第一部:

新建MyDruidStatLogger类实现接口 extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger,详细代码如下:本实例只实现接收消息并在控制台打印,实际业务应用需要具体实现存储方案。

package xxx;

import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils;

public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
	 private static Log LOG    = LogFactory.getLog(MyDruidStatLogger.class);

	    private Log        logger = LOG;
	public MyDruidStatLogger(){
        this.configFromProperties(System.getProperties());
    }
	@Override
    public void configFromProperties(Properties properties) {
        String property = properties.getProperty("druid.stat.loggerName");
        if (property != null && property.length() > 0) {
            setLoggerName(property);
        }
    }
	public Log getLogger() {
        return logger;
    }

    @Override
    public void setLoggerName(String loggerName) {
        logger = LogFactory.getLog(loggerName);
    }

    @Override
    public void setLogger(Log logger) {
        if (logger == null) {
            throw new IllegalArgumentException("logger can not be null");
        }
        this.logger = logger;
    }

    public boolean isLogEnable() {
        return true;
    }

    public void log(String value) {
        logger.info(value);
    }
    @Override
	public void log(DruidDataSourceStatValue statValue) {
		 Map<String, Object> map = new LinkedHashMap<String, Object>();

	        map.put("url", statValue.getUrl());
	        map.put("dbType", statValue.getDbType());
	        map.put("name", statValue.getName());
	        map.put("activeCount", statValue.getActiveCount());

	        if (statValue.getActivePeak() > 0) {
	            map.put("activePeak", statValue.getActivePeak());
	            map.put("activePeakTime", statValue.getActivePeakTime());
	        }
	        map.put("poolingCount", statValue.getPoolingCount());
	        if (statValue.getPoolingPeak() > 0) {
	            map.put("poolingPeak", statValue.getPoolingPeak());
	            map.put("poolingPeakTime", statValue.getPoolingPeakTime());
	        }
	        map.put("connectCount", statValue.getConnectCount());
	        map.put("closeCount", statValue.getCloseCount());

	        if (statValue.getWaitThreadCount() > 0) {
	            map.put("waitThreadCount", statValue.getWaitThreadCount());
	        }

	        if (statValue.getNotEmptyWaitCount() > 0) {
	            map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
	        }

	        if (statValue.getNotEmptyWaitMillis() > 0) {
	            map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
	        }

	        if (statValue.getLogicConnectErrorCount() > 0) {
	            map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
	        }

	        if (statValue.getPhysicalConnectCount() > 0) {
	            map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
	        }

	        if (statValue.getPhysicalCloseCount() > 0) {
	            map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
	        }

	        if (statValue.getPhysicalConnectErrorCount() > 0) {
	            map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
	        }

	        if (statValue.getExecuteCount() > 0) {
	            map.put("executeCount", statValue.getExecuteCount());
	        }

	        if (statValue.getErrorCount() > 0) {
	            map.put("errorCount", statValue.getErrorCount());
	        }

	        if (statValue.getCommitCount() > 0) {
	            map.put("commitCount", statValue.getCommitCount());
	        }

	        if (statValue.getRollbackCount() > 0) {
	            map.put("rollbackCount", statValue.getRollbackCount());
	        }

	        if (statValue.getPstmtCacheHitCount() > 0) {
	            map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
	        }

	        if (statValue.getPstmtCacheMissCount() > 0) {
	            map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
	        }

	        if (statValue.getStartTransactionCount() > 0) {
	            map.put("startTransactionCount", statValue.getStartTransactionCount());
	            map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
	        }

	        if (statValue.getConnectCount() > 0) {
	            map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
	        }

	        if (statValue.getClobOpenCount() > 0) {
	            map.put("clobOpenCount", statValue.getClobOpenCount());
	        }

	        if (statValue.getBlobOpenCount() > 0) {
	            map.put("blobOpenCount", statValue.getBlobOpenCount());
	        }

	        if (statValue.getSqlSkipCount() > 0) {
	            map.put("sqlSkipCount", statValue.getSqlSkipCount());
	        }

	        ArrayList<Map<String, Object>> sqlList = new ArrayList<Map<String, Object>>();
	        if (statValue.getSqlList().size() > 0) {
	            for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
	                Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>();
	                sqlStatMap.put("sql", sqlStat.getSql());

	                if (sqlStat.getExecuteCount() > 0) {
	                    sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
	                    sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
	                    sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal());

	                    sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
	                    sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
	                }

	                long executeErrorCount = sqlStat.getExecuteErrorCount();
	                if (executeErrorCount > 0) {
	                    sqlStatMap.put("executeErrorCount", executeErrorCount);
	                }

	                int runningCount = sqlStat.getRunningCount();
	                if (runningCount > 0) {
	                    sqlStatMap.put("runningCount", runningCount);
	                }

	                int concurrentMax = sqlStat.getConcurrentMax();
	                if (concurrentMax > 0) {
	                    sqlStatMap.put("concurrentMax", concurrentMax);
	                }

	                if (sqlStat.getFetchRowCount() > 0) {
	                    sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
	                    sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
	                    sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
	                }

	                if (sqlStat.getUpdateCount() > 0) {
	                    sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
	                    sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
	                    sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
	                }

	                if (sqlStat.getInTransactionCount() > 0) {
	                    sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
	                }

	                if (sqlStat.getClobOpenCount() > 0) {
	                    sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
	                }

	                if (sqlStat.getBlobOpenCount() > 0) {
	                    sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
	                }

	                sqlList.add(sqlStatMap);
	            }

	            map.put("sqlList", sqlList);
	        }

	        if (statValue.getKeepAliveCheckCount() > 0) {
	            map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
	        }

	        String text = JSONUtils.toJSONString(map);
	        System.out.println("==============:"+text);
	}
}第二步i
第二步:配置spring bean

<bean id="myStatLogger" class="com.andaily.web.context.MyDruidStatLogger"> </bean>

<bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">   
   <!-- 基本属性 url、user、password -->  
   <property name="url" value="${jdbc.url}" />  
   <property name="username" value="${jdbc.username}" />  
   <property name="password" value="${jdbc.password}" />  
       
   <!-- 配置初始化大小、最小、最大 -->  
   <property name="initialSize" value="1" />  
   <property name="minIdle" value="1" />   
   <property name="maxActive" value="20" />  
  
   <!-- 配置获取连接等待超时的时间 -->  
   <property name="maxWait" value="60000" />  
  
   <!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 -->  
   <property name="timeBetweenEvictionRunsMillis" value="60000" />  
   <!-- 配置一个连接在池中最小生存的时间,单位是毫秒 -->  
   <property name="minEvictableIdleTimeMillis" value="300000" />  
   <property name="validationQuery" value="SELECT 'x'" />  
   <property name="testWhileIdle" value="true" />  
   <property name="testOnBorrow" value="false" />  
   <property name="testOnReturn" value="false" />  
   <!-- 打开PSCache,并且指定每个连接上PSCache的大小 -->  
   <property name="poolPreparedStatements" value="true" />  
   <property name="maxPoolPreparedStatementPerConnectionSize" value="20" />  
  
   <!-- 配置监控统计拦截的filters,去掉后监控界面sql无法统计 -->  
   <property name="filters" value="stat" />   
   <property name="timeBetweenLogStatsMillis" value="1000" />
   <property name="statLogger" ref="myStatLogger"/>

</bean> 


启动后就可以看到控制台打印的durid监控信息了。


package com.andaily.web.context;

import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;
import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils;

public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
	 private static Log LOG    = LogFactory.getLog(MyDruidStatLogger.class);

	    private Log        logger = LOG;
	public MyDruidStatLogger(){
        this.configFromProperties(System.getProperties());
    }
	@Override
    public void configFromProperties(Properties properties) {
        String property = properties.getProperty("druid.stat.loggerName");
        if (property != null && property.length() > 0) {
            setLoggerName(property);
        }
    }
	public Log getLogger() {
        return logger;
    }

    @Override
    public void setLoggerName(String loggerName) {
        logger = LogFactory.getLog(loggerName);
    }

    @Override
    public void setLogger(Log logger) {
        if (logger == null) {
            throw new IllegalArgumentException("logger can not be null");
        }
        this.logger = logger;
    }

    public boolean isLogEnable() {
        return true;
    }

    public void log(String value) {
        logger.info(value);
    }
    @Override
	public void log(DruidDataSourceStatValue statValue) {
		 Map
   
   
    
     map = new LinkedHashMap
    
    
     
     ();

	        map.put("url", statValue.getUrl());
	        map.put("dbType", statValue.getDbType());
	        map.put("name", statValue.getName());
	        map.put("activeCount", statValue.getActiveCount());

	        if (statValue.getActivePeak() > 0) {
	            map.put("activePeak", statValue.getActivePeak());
	            map.put("activePeakTime", statValue.getActivePeakTime());
	        }
	        map.put("poolingCount", statValue.getPoolingCount());
	        if (statValue.getPoolingPeak() > 0) {
	            map.put("poolingPeak", statValue.getPoolingPeak());
	            map.put("poolingPeakTime", statValue.getPoolingPeakTime());
	        }
	        map.put("connectCount", statValue.getConnectCount());
	        map.put("closeCount", statValue.getCloseCount());

	        if (statValue.getWaitThreadCount() > 0) {
	            map.put("waitThreadCount", statValue.getWaitThreadCount());
	        }

	        if (statValue.getNotEmptyWaitCount() > 0) {
	            map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
	        }

	        if (statValue.getNotEmptyWaitMillis() > 0) {
	            map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
	        }

	        if (statValue.getLogicConnectErrorCount() > 0) {
	            map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
	        }

	        if (statValue.getPhysicalConnectCount() > 0) {
	            map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
	        }

	        if (statValue.getPhysicalCloseCount() > 0) {
	            map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
	        }

	        if (statValue.getPhysicalConnectErrorCount() > 0) {
	            map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
	        }

	        if (statValue.getExecuteCount() > 0) {
	            map.put("executeCount", statValue.getExecuteCount());
	        }

	        if (statValue.getErrorCount() > 0) {
	            map.put("errorCount", statValue.getErrorCount());
	        }

	        if (statValue.getCommitCount() > 0) {
	            map.put("commitCount", statValue.getCommitCount());
	        }

	        if (statValue.getRollbackCount() > 0) {
	            map.put("rollbackCount", statValue.getRollbackCount());
	        }

	        if (statValue.getPstmtCacheHitCount() > 0) {
	            map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
	        }

	        if (statValue.getPstmtCacheMissCount() > 0) {
	            map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
	        }

	        if (statValue.getStartTransactionCount() > 0) {
	            map.put("startTransactionCount", statValue.getStartTransactionCount());
	            map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
	        }

	        if (statValue.getConnectCount() > 0) {
	            map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
	        }

	        if (statValue.getClobOpenCount() > 0) {
	            map.put("clobOpenCount", statValue.getClobOpenCount());
	        }

	        if (statValue.getBlobOpenCount() > 0) {
	            map.put("blobOpenCount", statValue.getBlobOpenCount());
	        }

	        if (statValue.getSqlSkipCount() > 0) {
	            map.put("sqlSkipCount", statValue.getSqlSkipCount());
	        }

	        ArrayList
     
     
      
      
       
       > sqlList = new ArrayList
       
        
        
          >(); if (statValue.getSqlList().size() > 0) { for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) { Map 
         
           sqlStatMap = new LinkedHashMap 
          
            (); sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) { sqlStatMap.put("executeCount", sqlStat.getExecuteCount()); sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax()); sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram())); sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram())); } long executeErrorCount = sqlStat.getExecuteErrorCount(); if (executeErrorCount > 0) { sqlStatMap.put("executeErrorCount", executeErrorCount); } int runningCount = sqlStat.getRunningCount(); if (runningCount > 0) { sqlStatMap.put("runningCount", runningCount); } int concurrentMax = sqlStat.getConcurrentMax(); if (concurrentMax > 0) { sqlStatMap.put("concurrentMax", concurrentMax); } if (sqlStat.getFetchRowCount() > 0) { sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount()); sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax()); sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram())); } if (sqlStat.getUpdateCount() > 0) { sqlStatMap.put("updateCount", sqlStat.getUpdateCount()); sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax()); sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram())); } if (sqlStat.getInTransactionCount() > 0) { sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount()); } if (sqlStat.getClobOpenCount() > 0) { sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount()); } if (sqlStat.getBlobOpenCount() > 0) { sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount()); } sqlList.add(sqlStatMap); } map.put("sqlList", sqlList); } if (statValue.getKeepAliveCheckCount() > 0) { map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount()); } String text = JSONUtils.toJSONString(map); System.out.println("==============:"+text); } } 
           
          
        
      
      
    
    
   
   

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Druid连接是一个专门用于监控数据连接和SQL执行情况的工具。它被称为"为监控而生的数据连接",具有出色的功能、性能和扩展性,超过了其他常见的数据连接,如DBCP、C3P0、BoneCP、Proxool、JBoss DataSource等。 Druid连接可以通过引入Druid提供的监控工具Druid Monitor来实现监控和统计数据源以及SQL的执行情况。这个工具可以帮助开发人员监测连接连接情况、性能指标、执行的SQL语句等信息,以便于进行性能调优和故障排查。通过使用Druid Monitor,开发人员可以方便地获取连接的运行状态、连接数、活跃连接数、SQL执行情况、执行时间等详细信息,从而更好地了解系统的运行情况,及时发现和解决问题。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* *3* [数据连接 ( 五 ) Druid 数据监控](https://blog.csdn.net/yuanchun05/article/details/127174870)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_1"}}] [.reference_item style="max-width: 50%"] - *2* [druid连接监控](https://blog.csdn.net/zguoshuaiiii/article/details/78402883)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_1"}}] [.reference_item style="max-width: 50%"] [ .reference_list ]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值