springboot 2.* 整合 hive 和 mysql 多数据源配置

话不多说,直接开搞:

  • 添加 POM 的hive 和 mysql 依赖(可能有一些没有用)
<dependency>
    <groupId>org.apache.hive</groupId>
    <artifactId>hive-jdbc</artifactId>
    <version>1.1.0</version>
    <exclusions>
        <exclusion>
            <groupId>org.eclipse.jetty.aggregate</groupId>
            <artifactId>jetty-all</artifactId>
        </exclusion>
        <exclusion>
            <groupId>org.apache.hive</groupId>
            <artifactId>hive-shims</artifactId>
        </exclusion>
        <exclusion>
            <artifactId>jasper-compiler</artifactId>
            <groupId>tomcat</groupId>
        </exclusion>
        <exclusion>
            <artifactId>jasper-runtime</artifactId>
            <groupId>tomcat</groupId>
        </exclusion>
        <exclusion>
            <artifactId>servlet-api</artifactId>
            <groupId>javax.servlet</groupId>
        </exclusion>
        <exclusion>
            <artifactId>log4j-slf4j-impl</artifactId>
            <groupId>org.apache.logging.log4j</groupId>
        </exclusion>
        <exclusion>
            <artifactId>slf4j-log4j12</artifactId>
            <groupId>org.slf4j</groupId>
        </exclusion>
        <exclusion>
            <groupId>tomcat</groupId>
            <artifactId>*</artifactId>
        </exclusion>
        <exclusion>
            <groupId>ch.qos.logback</groupId>
            <artifactId>logback-classic</artifactId>
        </exclusion>
        <exclusion>
            <groupId>org.eclipse.jetty.orbit</groupId>
            <artifactId>*</artifactId>
        </exclusion>
        <exclusion>
            <groupId>org.eclipse.jetty.aggregate</groupId>
            <artifactId>*</artifactId>
        </exclusion>
        <exclusion>
            <groupId>javax.servlet</groupId>
            <artifactId>servlet-api</artifactId>
        </exclusion>
        <exclusion>
            <groupId>org.mortbay.jetty</groupId>
            <artifactId>*</artifactId>
        </exclusion>
    </exclusions>
</dependency>
<dependency>
    <groupId>org.springframework.data</groupId>
    <artifactId>spring-data-hadoop</artifactId>
    <version>2.4.0.RELEASE</version>
    <exclusions>
        <exclusion>
            <groupId>javax.servlet</groupId>
            <artifactId>*</artifactId>
        </exclusion>
    </exclusions>
</dependency>
<dependency>
    <groupId>com.alibaba</groupId>
    <artifactId>druid-spring-boot-starter</artifactId>
    <version>1.1.1</version>
</dependency>
<dependency>
    <groupId>org.apache.tomcat</groupId>
    <artifactId>tomcat-jdbc</artifactId>
</dependency>
  • 自定义注解类
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.After;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;
/**
* @Description 多数据源 切面类
**/
@Aspect
@Order(-10)
@Component
public class DataSourceAspect {
    @Before("@annotation(targetDataSource)")
    public void changeDataSource(JoinPoint point, TargetDataSource targetDataSource) throws Throwable {
        String lookupKey = targetDataSource.value();
        System.out.println("DataSource's lookupKey >> " + lookupKey);
        DataSourceContextHolder.set(lookupKey);
    }
    @After("@annotation(targetDataSource)")
    public void restoreDataSource(JoinPoint point, TargetDataSource targetDataSource) {
        DataSourceContextHolder.remove();
    }
}
  • 自定义注解的切面类
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.After;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;
/**
* @Description 多数据源 切面类
**/
@Aspect
@Order(-10)
@Component
public class DataSourceAspect {
    @Before("@annotation(targetDataSource)")
    public void changeDataSource(JoinPoint point, TargetDataSource targetDataSource) throws Throwable {
        String lookupKey = targetDataSource.value();
        System.out.println("DataSource's lookupKey >> " + lookupKey);
        DataSourceContextHolder.set(lookupKey);
    }
    @After("@annotation(targetDataSource)")
    public void restoreDataSource(JoinPoint point, TargetDataSource targetDataSource) {
        DataSourceContextHolder.remove();
    }
}
  • 多数据源的标记类
/**
* @Description 数据源标记类
**/
public class DataSourceContextHolder {
    public static final String MYSQL = "mysql";
    public static final String HIVE = "hive";
    private static final ThreadLocal<String> local = new ThreadLocal<>();
    public static ThreadLocal<String> getLocal() {
        return local;
    }
    public static void set(String lookupKey) {
        local.set(lookupKey);
    }
    public static String get() {
        return local.get();
    }
    public static void remove() {
        local.remove();
    }
}
  • 多数据源的动态切换类
import org.springframework.beans.factory.InitializingBean;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.Properties;
/**
* @Description 加载数据源 配置类
**/
@Component
@ConfigurationProperties(prefix = "spring.datasource")
public class MatchProperties implements InitializingBean {
    private List<Properties> source;
    @Override
    public void afterPropertiesSet() throws Exception {
        System.out.println(this.source);
    }
    public List<Properties> getSource() {
        return source;
    }
    public void setSource(List<Properties> source) {
        this.source = source;
    }
}
  • 多数据源的bean 配置类
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.pool.DruidDataSourceFactory;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
import org.springframework.lang.Nullable;
import org.springframework.transaction.PlatformTransactionManager;

import javax.sql.DataSource;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;

/**
* @Description 多数据源配置类
* @Author      yanghanwei
* @Mail        yanghanwei@geotmt.com
* @Date        2019/3/19 14:07
* @Version     v1
**/
@Configuration
public class DataSourceConfiguration implements ApplicationContextAware, InitializingBean {
    private ApplicationContext context;
    @Nullable
    private Map<Object, Object> targetDataSources = new HashMap<>();
    @Nullable
    private Object defaultTargetDataSource;
    @Override
    public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
        if (context == null) {
            context = applicationContext;
        }
    }
    @Override
    public void afterPropertiesSet() throws Exception {
        MatchProperties properties = context.getBean(MatchProperties.class);
        for (Properties prop : properties.getSource()) {
            String clazz = prop.getProperty("type");
            String label = prop.getProperty("label");
            DataSource dataSource = null;
            if (DruidDataSource.class.getName().equals(clazz)) {
                dataSource = DruidDataSourceFactory.createDataSource(prop);
            }
            if (dataSource != null) {
                if (label.equals(DataSourceContextHolder.MYSQL)) {
                    this.defaultTargetDataSource = dataSource;
                } else {
                    this.targetDataSources.put(label, dataSource);
                }
            }
        }
    }
    @Bean(name = "sqlSessionFactory")
    public SqlSessionFactory sqlSessionFactory() throws Exception {
        SqlSessionFactoryBean sqlSessionFactoryBean = new SqlSessionFactoryBean();
        sqlSessionFactoryBean.setDataSource(roundRobinDataSourceProxy());
        return sqlSessionFactoryBean.getObject();
    }
    //主要是这段
    @Bean(name = "roundRobinDataSourceProxy")
    public DataSource roundRobinDataSourceProxy() {
        final int readSize = targetDataSources.size();
        AbstractRoutingDataSource proxy = new AbstractRoutingDataSource() {
            private AtomicInteger count = new AtomicInteger(0);
            @Override
            protected Object determineCurrentLookupKey() {
                String lookupKey = DataSourceContextHolder.get();
                if (lookupKey == null) {
                    //默认数据源,解决ShiroRealm注入的service事务失效或注入的dao无数据源问题
                    return null;
                }
                if (DataSourceContextHolder.HIVE.equals(lookupKey)) {
                    return lookupKey;
                }
                int number = count.getAndAdd(1);
                int idx = number % readSize;
                System.out.println("DataSource >> " + DataSourceContextHolder.MYSQL + (idx + 1));
                return DataSourceContextHolder.MYSQL + (idx + 1);
            }
        };
        proxy.setDefaultTargetDataSource(defaultTargetDataSource);
        proxy.setTargetDataSources(targetDataSources);
        return proxy;
    }
    @Bean
    public SqlSessionTemplate sqlSessionTemplate(SqlSessionFactory sqlSessionFactory) {
        return new SqlSessionTemplate(sqlSessionFactory);
    }
    @Bean
    public PlatformTransactionManager annotationDrivenTransactionManager(@Qualifier(value = "roundRobinDataSourceProxy") DataSource roundRobinDataSourceProxy) {
        return new DataSourceTransactionManager(roundRobinDataSourceProxy);
    }
}

  • 配置文件
spring:
  datasource:
    source:
      - label: mysql
        type: com.alibaba.druid.pool.DruidDataSource
        driver-class-name: com.mysql.cj.jdbc.Driver
        url: jdbc:mysql://10.111.32.118:3306/geoc?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai&autoReconnect=true
        username: root
        password: 123456
        # 下面为连接池的补充设置,应用到上面所有数据源中
        # 初始化大小,最小,最大
        initialSize: 1
        minIdle: 3
        maxActive: 20
        # 配置获取连接等待超时的时间
        maxWait: 60000
        # 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
        timeBetweenEvictionRunsMillis: 60000
        # 配置一个连接在池中最小生存的时间,单位是毫秒
        minEvictableIdleTimeMillis: 30000
        validationQuery: select 1
        testWhileIdle: true
        testOnBorrow: false
        testOnReturn: false
        # 打开PSCache,并且指定每个连接上PSCache的大小
        poolPreparedStatements: true
        maxPoolPreparedStatementPerConnectionSize: 20
      - label: hive
        url: jdbc:hive2://h1:10000/test
        driver-class-name: org.apache.hadoop.jdbc.HiveDriver
        type: com.alibaba.druid.pool.DruidDataSource
        username: hadoop
        password: hadoop
        # 下面为连接池的补充设置,应用到上面所有数据源中
        # 初始化大小,最小,最大
        initialSize: 1
        minIdle: 3
        maxActive: 20
        # 配置获取连接等待超时的时间
        maxWait: 60000
        # 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
        timeBetweenEvictionRunsMillis: 60000
        # 配置一个连接在池中最小生存的时间,单位是毫秒
        minEvictableIdleTimeMillis: 30000
        validationQuery: select 1
        testWhileIdle: true
        testOnBorrow: false
        testOnReturn: false
        # 打开PSCache,并且指定每个连接上PSCache的大小
        poolPreparedStatements: true
        maxPoolPreparedStatementPerConnectionSize: 20

  • 如何使用
只要在相应的 server 方法上加上注解

	@targetDataSource(value="myslq")

	@targetDataSource(value="hive")

打完收工!

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值