SpringBoot配置Druid数据源

Druid简介

Druid 是一个 JDBC 组件库,包含数据库连接池、SQL Parser 等组件, 被大量业务和技术产品使用或集成,经历过最严苛线上业务场景考验,是你值得信赖的技术产品。
Druid首页及官方文档

添加依赖

  1. SpringBoot版本为2.3.4
  2. Druid版本为1.2.3,同时需要添加Log4j的依赖
	<!-- Druid连接池依赖 -->
	<!-- https://mvnrepository.com/artifact/com.alibaba/druid-spring-boot-starter -->
	<dependency>
	    <groupId>com.alibaba</groupId>
	    <artifactId>druid-spring-boot-starter</artifactId>
	    <version>1.2.3</version>
	</dependency>
	<!-- Log4j依赖 -->
	<!-- https://mvnrepository.com/artifact/log4j/log4j -->
	<dependency>
	    <groupId>log4j</groupId>
	    <artifactId>log4j</artifactId>
	    <version>1.2.17</version>
	</dependency>

SpringBoot配置Druid数据源并开启监控

配置可以参考官方的这个配置说明:Druid Spring Boot Starter

Config配置方式

  1. 在配置文件中添加如下配置,这里加在application-dev.yml文件中。
    各配置属性的详细说明参考:DruidDataSource配置属性列表

    spring:
      datasource:
        druid:
          # 这部分配置参考https://github.com/alibaba/druid/tree/master/druid-spring-boot-starter
          # Druid配置详解 https://github.com/alibaba/druid/wiki/DruidDataSource%E9%85%8D%E7%BD%AE%E5%B1%9E%E6%80%A7%E5%88%97%E8%A1%A8
          # JDBC配置
          driver-class-name: com.mysql.jdbc.Driver
          url: jdbc:mysql://数据库地址:3306/数据库名?serverTimezone=GMT%2B8&autoReconnect=true&useUnicode=true&useSSL=false&characterEncoding=utf-8
          username: 用户名
          password: 密码
          # 连接池配置
          # 初始化时建立物理连接的个数
          initial-size: 5
          # 最大连接池数量
          max-active: 5
          # 已不再使用
          #max-idle: 8
          # 最小连接池数量
          min-idle: 2
          # 获取连接时最大等待时间,单位毫秒。配置了maxWait之后,缺省启用公平锁,并发效率会有所下降,如果需要可以通过配置useUnfairLock属性为true使用非公平锁
          max-wait: 60000
          # 是否缓存preparedStatement,即PSCache,PSCache对支持游标的数据库性能提升巨大,如oracle,mysql下建议关闭
          pool-prepared-statements: false
          # 要启用PSCache,必须配置大于0,当大于0时,上面配置自动修改为true;Druid中不会存在Oracle下PSCache占用内存过多的问题,可以把数值配置大一些,如100
          max-pool-prepared-statement-per-connection-size: -1
          # 用来检测连接是否有效的SQL,如果未null,testOnBorrow,testOnReturn,testWhileIdle不会起作用
          validation-query: SELECT 'x'
          # 检测链接是否有效的超时时间,单位秒;底层调用jdbc Statement对象的void setQueryTimeout(int seconds)方法
          validation-query-timeout: 10
          # 申请连接时执行validationQuery检测连接是否有效,做了这配置会降低性能
          test-on-borrow: false
          # 归还连接时执行validationQuery检测连接是否有效,做了这配置会降低性能
          test-on-return: false
          # 建议配置为true,不影响性能,并且保证安全性.申请连接的时候检测,如果空闲时间大于timeBetweenEvictionRunsMills,执行validationQuery检测连接是否有效.
          test-while-idle: true
          #连接池中的minIdle数量以内的连接,空闲时间超过minEvictableIdleTimeMills,则会执行keepAlive操作
          keep-alive: false
          # 有两个含义:
          #1) Destroy线程会检测连接的间隔时间,如果连接空闲时间大于等于minEvictableIdleTimeMillis则关闭物理连接。
          #2) testWhileIdle的判断依据,详细看testWhileIdle属性的说明
          time-between-eviction-runs-millis: 60000
          # 连接保持空闲而不被驱逐的最小时间
          min-evictable-idle-time-millis: 300000
          # 属性类型是字符串,通过别名方式配置扩展插件,常用插件有:
          # 监控统计用的filter:stat
          # 日志用的filter:log4j
          # 防御sql注入的filter:wall
          filters: stat,wall,log4j
          #WebStatFilter监控配置
          # 这个教程很保姆,参考https://blog.csdn.net/qq_45173404/article/details/109075810
          web-stat-filter:
            enabled: true
            url-pattern: /*
            exclusions: '*.js,*.gif,*.png,*.css,*.ico,/druid/*'
    
  2. 添加Druid配置类,在其中配置监控;第一个方法是加载配置文件中的相关配置;下面两个是注册过滤器添加Druid的数据监控。

    import com.alibaba.druid.pool.DruidDataSource;
    import com.alibaba.druid.support.http.StatViewServlet;
    import com.alibaba.druid.support.http.WebStatFilter;
    import org.springframework.boot.context.properties.ConfigurationProperties;
    import org.springframework.boot.web.servlet.FilterRegistrationBean;
    import org.springframework.boot.web.servlet.ServletRegistrationBean;
    import org.springframework.context.annotation.Bean;
    import org.springframework.context.annotation.Configuration;
    
    import javax.sql.DataSource;
    
    /**
     * 数据源配置
     *
     * @author Kay
     * @date 2020-12-7
     */
    @Configuration
    public class DruidConfig {
    
        /**
         * 数据源配置
         *
         * @return
         */
        @ConfigurationProperties(prefix = "spring.datasource.druid")
        @Bean
        public DataSource druidDataSource() {
            return new DruidDataSource();
        }
    
        /**
         * 注册一个StatViewServlet
         *
         * @return
         */
        @Bean
        public ServletRegistrationBean druidStatViewServlet() {
            ServletRegistrationBean servletRegistrationBean = new ServletRegistrationBean(new StatViewServlet(), "/druid/*");
            //添加初始化参数
            //白名单,若不配置或配置为空则表示允许所有地址访问
            //servletRegistrationBean.addInitParameter("allow","127.0.0.1");
            //IP黑名单,与allow同时存在时,deny优先于allow
            //servletRegistrationBean.addInitParameter("deny","192.168.2.74");
            //登录查看信息的账号密码
            servletRegistrationBean.addInitParameter("loginUsername", "admin");
            servletRegistrationBean.addInitParameter("loginPassword", "123456");
            //是否能够重置数据
            servletRegistrationBean.addInitParameter("resetEnable", "false");
            return servletRegistrationBean;
        }
    
        /**
         * druid过滤器
         *
         * @return
         */
        @Bean
        public FilterRegistrationBean druidStatFilter() {
            FilterRegistrationBean filterRegistrationBean = new FilterRegistrationBean(new WebStatFilter());
            //添加过滤规则
            filterRegistrationBean.addUrlPatterns("/*");
            //添加不需要忽略的格式信息
            filterRegistrationBean.addInitParameter("exclusions", "*.js,*.gif,*.jpg,*.png,*.css,*.ico,/druid/*");
            return filterRegistrationBean;
        }
    }
    
    
  3. 启动项目,访问 localhost:端口号/druid,显示如下界面,则表示配置成功。根据自己配置的用户密码登录后,可以看到监控的详情
    在这里插入图片描述
    在这里插入图片描述

纯yml配置方式

纯yml配置方式无需配置类,只在yml文件中添加如下配置即可。配置方式与配置类不太一样,而且我一开始一直无法成功配置监控,后来发现是因为spring:datasource:druid:stat-view-servlet:enabled: true这个属性是一定要设置的,否则默认为false是不会开启监控的呀!

spring:
  datasource:
    url: jdbc:mysql://数据库地址:3306/数据库名?useUnicode=true&charecterEncoding=utf-8&serverTimezone=UTC
    username: 用户名
    password: 密码
    driver-class-name: com.mysql.jdbc.Driver
    type: com.alibaba.druid.pool.DruidDataSource
    druid:
      initial-size: 5
      max-active: 5
      min-idle: 2
      max-wait: 60000
      pool-prepared-statements: false
      max-pool-prepared-statement-per-connection-size: -1
      validation-query: SELECT 'x'
      validation-query-timeout: 10
      test-on-borrow: false
      test-on-return: false
      test-while-idle: true
      keep-alive: false
      time-between-eviction-runs-millis: 60000
      min-evictable-idle-time-millis: 300000
      filters: stat,wall,log4j
      use-global-data-source-stat: true
      connect-properties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
      filter:
        config:
          enabled: true
      stat-view-servlet:
        enabled: true
        login-username: admin
        login-password: 123456
        reset-enable: false
        url-pattern: /druid/*
      web-stat-filter:
        enabled: true
        url-pattern: /*
        exclusions: '*.js,*.gif,*.png,*.css,*.ico,/druid/*'

使用Log4j2进行日志输出

这里也是参考官方的教程:Druid中使用log4j2进行日志输出

  1. 首先将SpringBoot的logback依赖
    <!-- Druid使用Log4j2进行日志输出,去掉springboot中的logback依赖 -->
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
        <exclusions>
            <exclusion>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-starter-logging</artifactId>
            </exclusion>
        </exclusions>
    </dependency>
    
  2. 添加Log4j2的依赖
    <!-- Druid使用的Log4j2依赖 -->
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-log4j2</artifactId>
    </dependency>
    
  3. 添加log4j2.xml配置文件,直接使用官方文档提供的配置文件即可
    <?xml version="1.0" encoding="UTF-8"?>
    <configuration status="OFF">
        <appenders>
    
            <Console name="Console" target="SYSTEM_OUT">
                <!--只接受程序中DEBUG级别的日志进行处理-->
                <ThresholdFilter level="DEBUG" onMatch="ACCEPT" onMismatch="DENY"/>
                <PatternLayout pattern="[%d{HH:mm:ss.SSS}] %-5level %class{36} %L %M - %msg%xEx%n"/>
            </Console>
    
            <!--处理DEBUG级别的日志,并把该日志放到logs/debug.log文件中-->
            <!--打印出DEBUG级别日志,每次大小超过size,则这size大小的日志会自动存入按年份-月份建立的文件夹下面并进行压缩,作为存档-->
            <RollingFile name="RollingFileDebug" fileName="./logs/debug.log"
                         filePattern="logs/$${date:yyyy-MM}/debug-%d{yyyy-MM-dd}-%i.log.gz">
                <Filters>
                    <ThresholdFilter level="DEBUG"/>
                    <ThresholdFilter level="INFO" onMatch="DENY" onMismatch="NEUTRAL"/>
                </Filters>
                <PatternLayout
                        pattern="[%d{yyyy-MM-dd HH:mm:ss}] %-5level %class{36} %L %M - %msg%xEx%n"/>
                <Policies>
                    <SizeBasedTriggeringPolicy size="500 MB"/>
                    <TimeBasedTriggeringPolicy/>
                </Policies>
            </RollingFile>
    
            <!--处理INFO级别的日志,并把该日志放到logs/info.log文件中-->
            <RollingFile name="RollingFileInfo" fileName="./logs/info.log"
                         filePattern="logs/$${date:yyyy-MM}/info-%d{yyyy-MM-dd}-%i.log.gz">
                <Filters>
                    <!--只接受INFO级别的日志,其余的全部拒绝处理-->
                    <ThresholdFilter level="INFO"/>
                    <ThresholdFilter level="WARN" onMatch="DENY" onMismatch="NEUTRAL"/>
                </Filters>
                <PatternLayout
                        pattern="[%d{yyyy-MM-dd HH:mm:ss}] %-5level %class{36} %L %M - %msg%xEx%n"/>
                <Policies>
                    <SizeBasedTriggeringPolicy size="500 MB"/>
                    <TimeBasedTriggeringPolicy/>
                </Policies>
            </RollingFile>
    
            <!--处理WARN级别的日志,并把该日志放到logs/warn.log文件中-->
            <RollingFile name="RollingFileWarn" fileName="./logs/warn.log"
                         filePattern="logs/$${date:yyyy-MM}/warn-%d{yyyy-MM-dd}-%i.log.gz">
                <Filters>
                    <ThresholdFilter level="WARN"/>
                    <ThresholdFilter level="ERROR" onMatch="DENY" onMismatch="NEUTRAL"/>
                </Filters>
                <PatternLayout
                        pattern="[%d{yyyy-MM-dd HH:mm:ss}] %-5level %class{36} %L %M - %msg%xEx%n"/>
                <Policies>
                    <SizeBasedTriggeringPolicy size="500 MB"/>
                    <TimeBasedTriggeringPolicy/>
                </Policies>
            </RollingFile>
    
            <!--处理error级别的日志,并把该日志放到logs/error.log文件中-->
            <RollingFile name="RollingFileError" fileName="./logs/error.log"
                         filePattern="logs/$${date:yyyy-MM}/error-%d{yyyy-MM-dd}-%i.log.gz">
                <ThresholdFilter level="ERROR"/>
                <PatternLayout
                        pattern="[%d{yyyy-MM-dd HH:mm:ss}] %-5level %class{36} %L %M - %msg%xEx%n"/>
                <Policies>
                    <SizeBasedTriggeringPolicy size="500 MB"/>
                    <TimeBasedTriggeringPolicy/>
                </Policies>
            </RollingFile>
    
            <!--druid的日志记录追加器-->
            <RollingFile name="druidSqlRollingFile" fileName="./logs/druid-sql.log"
                         filePattern="logs/$${date:yyyy-MM}/api-%d{yyyy-MM-dd}-%i.log.gz">
                <PatternLayout pattern="[%d{yyyy-MM-dd HH:mm:ss}] %-5level %L %M - %msg%xEx%n"/>
                <Policies>
                    <SizeBasedTriggeringPolicy size="500 MB"/>
                    <TimeBasedTriggeringPolicy/>
                </Policies>
            </RollingFile>
        </appenders>
    
        <loggers>
            <root level="DEBUG">
                <appender-ref ref="Console"/>
                <appender-ref ref="RollingFileInfo"/>
                <appender-ref ref="RollingFileWarn"/>
                <appender-ref ref="RollingFileError"/>
                <appender-ref ref="RollingFileDebug"/>
            </root>
    
            <!--记录druid-sql的记录-->
            <logger name="druid.sql.Statement" level="debug" additivity="false">
                <appender-ref ref="druidSqlRollingFile"/>
            </logger>
            <logger name="druid.sql.Statement" level="debug" additivity="false">
                <appender-ref ref="druidSqlRollingFile"/>
            </logger>
    
            <!--log4j2 自带过滤日志-->
            <Logger name="org.apache.catalina.startup.DigesterFactory" level="error" />
            <Logger name="org.apache.catalina.util.LifecycleBase" level="error" />
            <Logger name="org.apache.coyote.http11.Http11NioProtocol" level="warn" />
            <logger name="org.apache.sshd.common.util.SecurityUtils" level="warn"/>
            <Logger name="org.apache.tomcat.util.net.NioSelectorPool" level="warn" />
            <Logger name="org.crsh.plugin" level="warn" />
            <logger name="org.crsh.ssh" level="warn"/>
            <Logger name="org.eclipse.jetty.util.component.AbstractLifeCycle" level="error" />
            <Logger name="org.hibernate.validator.internal.util.Version" level="warn" />
            <logger name="org.springframework.boot.actuate.autoconfigure.CrshAutoConfiguration" level="warn"/>
            <logger name="org.springframework.boot.actuate.endpoint.jmx" level="warn"/>
            <logger name="org.thymeleaf" level="warn"/>
        </loggers>
    </configuration>
    
  4. application.yml中配置日志输出
    spring:
      datasource:
      	druid:  	
    		filter:
    	        slf4j:
    	          enabled: true
    	          statement-create-after-log-enabled: false
    	          statement-close-after-log-enabled: false
    	          result-set-open-after-log-enabled: false
    	          result-set-close-after-log-enabled: false
    
  5. 启动项目,就可看到按配置好的格式进行日志输出.

多数据源配置方式

这部分参考的是这篇博客:https://www.cnblogs.com/ccsert/p/12249252.html

修改配置文件

修改application-dev.yml配置文件,在spring:datasource:druid下添加一层多数据源的配置,我这里的命名就是按照各自数据源的名称来配置的.分别是practicepolice,简单一点直接命名为one,two什么的也可以.

spring:
  datasource:
    url: jdbc:mysql://127.0.0.1:3306/practice?useUnicode=true&charecterEncoding=utf-8&serverTimezone=UTC
    username: root
    password: 123456
    driver-class-name: com.mysql.jdbc.Driver
    type: com.alibaba.druid.pool.DruidDataSource
    druid:
      practice:
        url: jdbc:mysql://127.0.0.1:3306/practice?useUnicode=true&charecterEncoding=utf-8&serverTimezone=UTC
        username: root
        password: 123456
        driver-class-name: com.mysql.jdbc.Driver
        #==========这里省略各参数类的配置,与前面的配置相同==========
      police:
        url: jdbc:mysql://127.0.0.1:3306/police_data_4?useUnicode=true&charecterEncoding=utf-8&serverTimezone=UTC
        username: root
        password: 123456
        driver-class-name: com.mysql.jdbc.Driver
        #==========这里省略各参数类的配置,与前面的配置相同==========
      filter:
        slf4j:
          enabled: true
          statement-create-after-log-enabled: false
          statement-close-after-log-enabled: false
          result-set-open-after-log-enabled: false
          result-set-close-after-log-enabled: false
        config:
          enabled: true
      stat-view-servlet:
        enabled: true
        login-username: admin
        login-password: 123456
        reset-enable: false
        url-pattern: /druid/*
      web-stat-filter:
        enabled: true
        url-pattern: /*
        exclusions: '*.js,*.gif,*.png,*.css,*.ico,/druid/*'

更改包结构

  1. dao包下分别创建两个数据源各自的dao接口包
    在这里插入图片描述
  2. resources-mapper目录下创建两个数据源各自的xml文件目录
    在这里插入图片描述

配置数据源

创建一个config包,添加一个多数据源的配置类MultiDataSourceConfig,将配置文件中配置的属性注入.

package com.kay.practice.servicemain.config.datasource;

import com.alibaba.druid.spring.boot.autoconfigure.DruidDataSourceBuilder;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;

import javax.sql.DataSource;

/**
 * 多数据源配置类
 *
 * @author Kay
 * @date 2021-03-09
 */
@Configuration
public class MultiDataSourceConfig {
    @Primary
    @Bean("practiceDataSource")
    @ConfigurationProperties("spring.datasource.druid.practice")
    public DataSource practiceDataSource() {
        return DruidDataSourceBuilder.create().build();
    }

    @Bean("policeDataSource")
    @ConfigurationProperties("spring.datasource.druid.police")
    public DataSource policeDataSource() {
        return DruidDataSourceBuilder.create().build();
    }

}

再分别创建两个数据源的配置类,配置好刚才分包之后的dao接口目录和xml文件目录.
注意一下啊,配置xml文件路径的那个方法是getResources…我之前没看清,用的getResource…一直报错路径不存在,嗨呀…

第一个:

package com.kay.practice.servicemain.config.datasource;

import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.SqlSessionTemplate;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;

import javax.sql.DataSource;

/**
 * 主数据源配置
 *
 * @author Kay
 * @date 2021-03-09
 */
@Configuration
@MapperScan(basePackages = "com.kay.practice.servicemain.dao.practice", sqlSessionTemplateRef = "practiceSqlSessionTemplate")
public class PracticeDataSourceConfig {

    @Bean(name = "practiceSqlSessionFactory")
    @Primary
    public SqlSessionFactory testSqlSessionFactory(@Qualifier("practiceDataSource") DataSource dataSource) throws Exception {
        SqlSessionFactoryBean sqlSessionFactoryBean = new SqlSessionFactoryBean();
        sqlSessionFactoryBean.setDataSource(dataSource);
        sqlSessionFactoryBean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:mapper/practice/*.xml"));
        return sqlSessionFactoryBean.getObject();
    }

    @Bean(name = "practiceTransactionManager")
    @Primary
    public DataSourceTransactionManager testTransactionManager(@Qualifier("practiceDataSource") DataSource dataSource) {
        return new DataSourceTransactionManager(dataSource);
    }

    @Bean(name = "practiceSqlSessionTemplate")
    @Primary
    public SqlSessionTemplate testSqlSessionTemplate(@Qualifier("practiceSqlSessionFactory") SqlSessionFactory sqlSessionFactory) {
        return new SqlSessionTemplate(sqlSessionFactory);
    }
}

第二个

package com.kay.practice.servicemain.config.datasource;

import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.SqlSessionTemplate;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;

import javax.sql.DataSource;

/**
 * 第二数据源配置
 *
 * @author Kay
 * @date 2021-03-09
 */
@Configuration
@MapperScan(basePackages = "com.kay.practice.servicemain.dao.police", sqlSessionTemplateRef = "policeSqlSessionTemplate")
public class PoliceDataSourceConfig {
    @Bean(name = "policeSqlSessionFactory")
    public SqlSessionFactory testSqlSessionFactory(@Qualifier("policeDataSource") DataSource dataSource) throws Exception {
        SqlSessionFactoryBean sqlSessionFactoryBean = new SqlSessionFactoryBean();
        sqlSessionFactoryBean.setDataSource(dataSource);
        sqlSessionFactoryBean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:mapper/police/*.xml"));
        return sqlSessionFactoryBean.getObject();
    }

    @Bean(name = "policeTransactionManager")
    public DataSourceTransactionManager testTransactionManager(@Qualifier("policeDataSource") DataSource dataSource) {
        return new DataSourceTransactionManager(dataSource);
    }

    @Bean(name = "policeSqlSessionTemplate")
    public SqlSessionTemplate testSqlSessionTemplate(@Qualifier("policeSqlSessionFactory") SqlSessionFactory sqlSessionFactory) {
        return new SqlSessionTemplate(sqlSessionFactory);
    }
}

编写测试代码及测试效果

以上都配置成功之后,可以开始编写测试代码进行测试啦!

  1. 分别写两个随便查点什么的SQL,并编写对应的dao接口
    1. 第一个数据源
      <!-- 获取全部用户,测试用 -->
      <select id="listAll" resultType="com.kay.practice.servicemain.entity.User">
          SELECT * FROM practice_user WHERE 1=1
      </select>
      
      package com.kay.practice.servicemain.dao.practice;
      
      import com.kay.practice.servicemain.entity.User;
      
      import java.util.List;
      
      /**
       * 系统用户数据层
       *
       * @author Kay
       * @date 2021-03-05
       */
      public interface UserMapper {
      
          /**
           * 获取全部用户,测试用
           * @return
           */
          List<User> listAll();
      }
      
    2. 第二个数据源
      <!-- 查询最新的20条IMSI记录,测试用 -->
         <select id="listLatestImsi" resultType="com.kay.practice.servicemain.entity.ImsiSourceData">
             SELECT
                 i.id AS id,
                 i.entry_id AS entry_id,
                 i.deviceId AS deviceid,
                 i.imsi AS imsi,
                 i.report_time AS report_time_long,
                 i.home_location AS home_location
             FROM
                 imsi_source_data i
             WHERE
                 1=1
             ORDER BY i.report_time DESC
             LIMIT 20
         </select>
      
      package com.kay.practice.servicemain.dao.police;
      
      import com.baomidou.mybatisplus.core.mapper.BaseMapper;
      import com.kay.practice.servicemain.entity.ImsiSourceData;
      
      import java.util.List;
      
      /**
       * IMSI源数据数据层
       *
       * @author Kay
       * @date 2020-10-26
       */
      public interface ImsiSourceDataMapper extends BaseMapper<ImsiSourceData> {
      
          /**
           * 查询最新的20条IMSI记录,测试用
           * @return
           */
          List<ImsiSourceData> listLatestImsi();
      
      }
      
  2. 然后编写业务层和控制层,调用两个查询
    1. 业务层
    package com.kay.practice.servicemain.service;
    
    import com.baomidou.mybatisplus.extension.service.IService;
    import com.kay.practice.servicemain.entity.ImsiSourceData;
    import com.kay.practice.servicemain.entity.User;
    
    import java.util.List;
    
    /**
     * Druid多数据源测试
     *
     * @author Kay
     * @date 2021-03-05
     */
    public interface DruidTestService {
    
        /**
         * 获取最新IMSI
         * @return
         */
        List<ImsiSourceData> listImsi();
    
        /**
         * 获取全部用户
         * @return
         */
        List<User> listUser();
    }
    
    package com.kay.practice.servicemain.service.impl;
    
    import com.kay.practice.servicemain.dao.police.ImsiSourceDataMapper;
    import com.kay.practice.servicemain.dao.practice.UserMapper;
    import com.kay.practice.servicemain.entity.ImsiSourceData;
    import com.kay.practice.servicemain.entity.User;
    import com.kay.practice.servicemain.service.DruidTestService;
    import org.springframework.beans.factory.annotation.Autowired;
    import org.springframework.stereotype.Service;
    
    import java.util.List;
    
    /**
     * Druid多数据源测试
     *
     * @author Kay
     * @date 2021-03-05
     */
    @Service("druidTestService")
    public class DruidTestServiceImpl implements DruidTestService {
    
        @Autowired
        private ImsiSourceDataMapper imsiSourceDataMapper;
    
        @Autowired
        private UserMapper userMapper;
    
        @Override
        public List<ImsiSourceData> listImsi() {
            return imsiSourceDataMapper.listLatestImsi();
        }
    
        @Override
        public List<User> listUser() {
            return userMapper.listAll();
        }
    }
    
    1. 控制层
    package com.kay.practice.servicemain.controller;
    
    import com.kay.practice.servicemain.entity.ImsiSourceData;
    import com.kay.practice.servicemain.entity.User;
    import com.kay.practice.servicemain.service.DruidTestService;
    import io.swagger.annotations.Api;
    import io.swagger.annotations.ApiOperation;
    import org.springframework.beans.factory.annotation.Autowired;
    import org.springframework.web.bind.annotation.GetMapping;
    import org.springframework.web.bind.annotation.RequestMapping;
    import org.springframework.web.bind.annotation.RestController;
    
    import java.util.List;
    
    /**
     * 测试控制器
     *
     * @author Kay
     * @date 2020-10-23
     */
    @RestController
    @RequestMapping(value = "/test")
    @Api(tags = "功能测试控制器")
    public class TestController {
    
        @Autowired
        private DruidTestService druidTestService;
    
        @GetMapping("/practice-source-test")
        @ApiOperation(value = "主数据源测试", notes = "主数据源测试")
        public String practiceSourceTest() {
            List<User> userList = druidTestService.listUser();
            System.out.println(userList.toString());
            return "测试成功";
        }
    
        @GetMapping("/police-source-test")
        @ApiOperation(value = "副数据源测试", notes = "副数据源测试")
        public String policeSourceTest() {
            List<ImsiSourceData> imsiList = druidTestService.listImsi();
            System.out.println(imsiList.toString());
            return "测试成功";
        }
    }
    
  3. 通过swagger请求接口进行查询,控制台打印了查询结果,代表查询成功
    在这里插入图片描述
    在这里插入图片描述
  4. 访问druid监控页面http://localhost:8090/druid查看SQL信息和数据源信息
    在这里插入图片描述
    在这里插入图片描述
    在这里插入图片描述
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值