Spring boot dubbo监控配置

Spring Boot Druid Monitor 配置

一. 依赖配置

在spring boot 项目中添加如下依赖

<!--Druid数据源依赖-->
<dependency>
   <groupId>com.alibaba</groupId>
   <artifactId>druid</artifactId>
   <version>1.1.12</version>
</dependency>

<!-- https://mvnrepository.com/artifact/log4j/log4j -->
<dependency>
   <groupId>log4j</groupId>
   <artifactId>log4j</artifactId>
   <version>1.2.17</version>
</dependency>

二. application 配置

在application.properties 中增加druid 配置

spring.datasource.initialSize = 5
spring.datasource.minIdle = 5
spring.datasource.maxActive = 20
spring.datasource.maxWait = 60000
spring.datasource.timeBetweenEvictionRunsMillis = 60000
spring.datasource.minEvictableIdleTimeMillis = 300000
spring.datasource.validationQuery = SELECT 1 FROM DUAL
spring.datasource.testWhileIdle = true
spring.datasource.testOnBorrow = false
spring.datasource.testOnReturn = false
spring.datasource.poolPreparedStatements = true
spring.datasource.filters = stat,wall,log4j
spring.datasource.maxPoolPreparedStatementPerConnectionSize = 20
spring.datasource.useGlobalDataSourceStat = false
spring.datasource.connectionProperties = druid.stat.mergeSql=true;druid.stat.slowSqlMillis=500
spring.datasource.timeBetweenLogStatsMillis = 30000
spring.datasource.statLogger.ref = myStatLogger

三. 项目中增加bean 配置

在项目中新增一个配置类,增加如下代码

@Configuration
public class DruidConfigBean {
    @ConfigurationProperties(prefix = "spring.datasource")
    @Bean
    public DataSource druidDataSource(ApplicationContext applicationContext) {
        DruidDataSource druidDataSource = new DruidDataSource();
        druidDataSource.setStatLogger(applicationContext.getBean(MyStatLogger.class));
        return druidDataSource;
    }


    //配置 Druid 监控管理后台的Servlet;
//内置 Servlet 容器时没有web.xml文件,所以使用 Spring Boot 的注册 Servlet 方式
    @Bean
    public ServletRegistrationBean statViewServlet() {
        ServletRegistrationBean bean = new ServletRegistrationBean(new StatViewServlet(), "/druid/*");

        // 这些参数可以在 com.alibaba.druid.support.http.StatViewServlet
        // 的父类 com.alibaba.druid.support.http.ResourceServlet 中找到
        Map<String, String> initParams = new HashMap<>();
        initParams.put("loginUsername", "admin"); //后台管理界面的登录账号
        initParams.put("loginPassword", "123456"); //后台管理界面的登录密码

        //后台允许谁可以访问
        //initParams.put("allow", "localhost"):表示只有本机可以访问
        //initParams.put("allow", ""):为空或者为null时,表示允许所有访问
        initParams.put("allow", "");
        //deny:Druid 后台拒绝谁访问
        //initParams.put("kuangshen", "192.168.1.20");表示禁止此ip访问

        //设置初始化参数
        bean.setInitParameters(initParams);
        return bean;
    }


    //配置 Druid 监控 之  web 监控的 filter
//WebStatFilter:用于配置Web和Druid数据源之间的管理关联监控统计
    @Bean
    public FilterRegistrationBean webStatFilter() {
        FilterRegistrationBean bean = new FilterRegistrationBean();
        bean.setFilter(new WebStatFilter());

        //exclusions:设置哪些请求进行过滤排除掉,从而不进行统计
        Map<String, String> initParams = new HashMap<>();
        initParams.put("exclusions", "*.js,*.css,/druid/*,/jdbc/*");
        bean.setInitParameters(initParams);

        //"/*" 表示过滤所有请求
        bean.setUrlPatterns(Arrays.asList("/*"));
        return bean;
    }

}

四. 监控查看

http://localhost:port/druid/login.html

登录查看即可
监控截屏

五. 监控日志输出

在生产上可以每隔一段时间输出监控信息到日志中,方便查询

  1. application.properties中增加如下配置

    spring.datasource.timeBetweenLogStatsMillis = 30000 //此处指定间隔
    spring.datasource.statLogger.ref = myStatLogger
    
  2. 定义一个日志bean

    @Component
    public class MyStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger {
        private static final Logger  logger = Logger.getLogger(MyStatLogger.class);
    
        /**
         * @see com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter#log(com.alibaba.druid.pool.DruidDataSourceStatValue)
         */
        @Override
        public void log(DruidDataSourceStatValue statValue) {
    
            Map<String, Object> map = new LinkedHashMap<String, Object>();
    
            /*map.put("url", statValue.getUrl());
            map.put("dbType", statValue.getDbType());*/
            map.put("name", statValue.getName());
            map.put("activeCount", statValue.getActiveCount());
    
            if (statValue.getActivePeak() > 0) {
                map.put("activePeak", statValue.getActivePeak());
                map.put("activePeakTime", statValue.getActivePeakTime());
            }
            map.put("poolingCount", statValue.getPoolingCount());
            if (statValue.getPoolingPeak() > 0) {
                map.put("poolingPeak", statValue.getPoolingPeak());
                map.put("poolingPeakTime", statValue.getPoolingPeakTime());
            }
            map.put("connectCount", statValue.getConnectCount());
            map.put("closeCount", statValue.getCloseCount());
    
            if (statValue.getWaitThreadCount() > 0) {
                map.put("waitThreadCount", statValue.getWaitThreadCount());
            }
    
            if (statValue.getNotEmptyWaitCount() > 0) {
                map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
            }
    
            if (statValue.getNotEmptyWaitMillis() > 0) {
                map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
            }
    
            if (statValue.getLogicConnectErrorCount() > 0) {
                map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
            }
    
            if (statValue.getPhysicalConnectCount() > 0) {
                map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
            }
    
            if (statValue.getPhysicalCloseCount() > 0) {
                map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
            }
    
            if (statValue.getPhysicalConnectErrorCount() > 0) {
                map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
            }
    
            if (statValue.getExecuteCount() > 0) {
                map.put("executeCount", statValue.getExecuteCount());
            }
    
            if (statValue.getErrorCount() > 0) {
                map.put("errorCount", statValue.getErrorCount());
            }
    
            if (statValue.getCommitCount() > 0) {
                map.put("commitCount", statValue.getCommitCount());
            }
    
            if (statValue.getRollbackCount() > 0) {
                map.put("rollbackCount", statValue.getRollbackCount());
            }
    
            if (statValue.getPstmtCacheHitCount() > 0) {
                map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
            }
    
            if (statValue.getPstmtCacheMissCount() > 0) {
                map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
            }
    
            if (statValue.getStartTransactionCount() > 0) {
                map.put("startTransactionCount", statValue.getStartTransactionCount());
                map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
            }
    
            if (statValue.getConnectCount() > 0) {
                map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
            }
    
            if (statValue.getClobOpenCount() > 0) {
                map.put("clobOpenCount", statValue.getClobOpenCount());
            }
    
            if (statValue.getBlobOpenCount() > 0) {
                map.put("blobOpenCount", statValue.getBlobOpenCount());
            }
    
            if (statValue.getSqlSkipCount() > 0) {
                map.put("sqlSkipCount", statValue.getSqlSkipCount());
            }
    
            ArrayList<Map<String, Object>> sqlList = new ArrayList<Map<String, Object>>();
            if (statValue.getSqlList().size() > 0) {
                for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
                    Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>();
                    sqlStatMap.put("sql", sqlStat.getSql());
    
                    if (sqlStat.getExecuteCount() > 0) {
                        sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
                        sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
                        sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal());
    
                        sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
                        sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
                    }
    
                    long executeErrorCount = sqlStat.getExecuteErrorCount();
                    if (executeErrorCount > 0) {
                        sqlStatMap.put("executeErrorCount", executeErrorCount);
                    }
    
                    int runningCount = sqlStat.getRunningCount();
                    if (runningCount > 0) {
                        sqlStatMap.put("runningCount", runningCount);
                    }
    
                    int concurrentMax = sqlStat.getConcurrentMax();
                    if (concurrentMax > 0) {
                        sqlStatMap.put("concurrentMax", concurrentMax);
                    }
    
                    if (sqlStat.getFetchRowCount() > 0) {
                        sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
                        sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax());
                        sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
                    }
    
                    if (sqlStat.getUpdateCount() > 0) {
                        sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
                        sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
                        sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
                    }
    
                    if (sqlStat.getInTransactionCount() > 0) {
                        sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
                    }
    
                    if (sqlStat.getClobOpenCount() > 0) {
                        sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
                    }
    
                    if (sqlStat.getBlobOpenCount() > 0) {
                        sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
                    }
    
                    sqlList.add(sqlStatMap);
                }
    
                map.put("sqlList", sqlList);
            }
    
            String text = JSONUtils.toJSONString(map);
            logger.info(text);
        }
    
  3. 增加log配置文件 log4j.properties

    log4j.rootLogger=stdout, D, E
    log4j.appender.stdout = org.apache.log4j.ConsoleAppender
    log4j.appender.stdout.Target = System.out
    log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
    log4j.appender.stdout.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n
    log4j.appender.stdout.Threshold = DEBUG
    
    ### 输出DEBUG 级别以上的日志到=E://logs/error.log ###
    log4j.appender.D = org.apache.log4j.DailyRollingFileAppender
    log4j.appender.D.File = E://logs/log.log
    log4j.appender.D.Append = true
    log4j.appender.D.Threshold = DEBUG
    log4j.appender.D.layout = org.apache.log4j.PatternLayout
    log4j.appender.D.layout.ConversionPattern = %-d{yyyy-MM-dd HH:mm:ss}  [ %t:%r ] - [ %p ]  %m%n
    
    ### 输出ERROR 级别以上的日志到=E://logs/error.log ###
    log4j.appender.E = org.apache.log4j.DailyRollingFileAppender
    log4j.appender.E.File =E://logs/error.log
    log4j.appender.E.Append = true
    log4j.appender.E.Threshold = ERROR
    log4j.appender.E.layout = org.apache.log4j.PatternLayout
    log4j.appender.E.layout.ConversionPattern = %-d{yyyy-MM-dd HH:mm:ss}  [ %t:%r ] - [ %p ]  %m%n
    
  4. 测试运行

参考链接:

Druid连接池介绍 · alibaba/druid Wiki · GitHub
狂神说SpringBoot08:整合Druid (qq.com)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值