spring boot中动态增加数据源并且通过sharding-jdbc做分库分表查询

最近项目中需要通过数仓对接数据出去,需要手动写一些查询,需要支持分库分表的场景,所以就选择了
google guava 做内存缓存,通过redis做持久化缓存,通过sharding-jdbc实现跨表查询
一下贴的是几个主要的类:

1.数据库信息缓存

package com.tudou.potato.datagaea.apps.cache;

import com.alibaba.fastjson.JSONObject;
import com.tudou.potato.datagaea.core.wh.http.resp.RdsLinkDetailResp;
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;

/**
 * @author: ge.li
 * @create: 2020-05-22 09:42
 **/
@Component
public class DataSourceCache {

    private static final String DATAGAEA_APPS_RDS_LINK = "datagaea_apps_rdslink:";

    @Autowired
    private RedisTemplate redisTemplate;

    public RdsLinkDetailResp getRDSLinkInfo(String code) {
        try {
            String redisKey = DATAGAEA_APPS_RDS_LINK + code;
            String jsonStr = redisTemplate.opsForValue().get(redisKey).toString();
            return JSONObject.parseObject(jsonStr, RdsLinkDetailResp.class);
        } catch (Exception ex) {
            return null;
        }
    }

    public void setRDSLinkInfo(RdsLinkDetailResp data) {
        String redisKey = DATAGAEA_APPS_RDS_LINK + data.getCode();
        redisTemplate.opsForValue().set(redisKey, JSONObject.toJSONString(data));
    }
}

缓存

package com.tudou.potato.datagaea.apps.cache;

import com.alibaba.druid.pool.DruidDataSource;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import java.util.concurrent.TimeUnit;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;

/**
 * @author: ge.li
 * @create: 2020-05-19 09:56
 **/
public class DynamicDataSource extends AbstractRoutingDataSource {

   public final static Cache<String, DruidDataSource> cache = CacheBuilder.newBuilder()
        //设置cache的初始大小为10,要合理设置该值
        .initialCapacity(10)
        //设置并发数为5,即同一时间最多只能有5个线程往cache执行写入操作
        .concurrencyLevel(5)
        //设置cache中的数据在写入之后的存活时间为10 * 60秒
        .expireAfterWrite(10 * 60, TimeUnit.SECONDS)
        //构建cache实例
        .build();

    @Override
    protected String determineCurrentLookupKey() {
        return null;
    }

    public static void setDataSource(String dataSourceName, DruidDataSource dataSource) {
        DynamicDataSource.cache.put(dataSourceName, dataSource);
    }

    public static DruidDataSource getDataSource(String dataSourceName) {
        return DynamicDataSource.cache.getIfPresent(dataSourceName);
    }

    public static void clear() {
        DynamicDataSource.cache.cleanUp();
    }

    public static DruidDataSource createDataSource(String url, String userName, String Password) {
        DruidDataSource druidDataSource = new DruidDataSource();
        druidDataSource.setUrl(url);
        druidDataSource.setUsername(userName);
        druidDataSource.setPassword(Password);
        druidDataSource.setMaxPoolPreparedStatementPerConnectionSize(30);
        return druidDataSource;
    }
}

sharding-jdbc 配置

package com.tudou.potato.datagaea.apps.cache;

import com.alibaba.druid.pool.DruidDataSource;

import com.tudou.potato.api.bean.result.APIResponse;
import com.tudou.potato.api.message.BizCommMessage;
import com.tudou.potato.common.pojo.principal.PrincipalInfoContext;
import com.tudou.potato.datagaea.apps.tls.base.OnlineQueryResult;
import com.tudou.potato.datagaea.apps.tls.repo.impl.RDBConnectionService;
import com.tudou.potato.datagaea.core.wh.http.resp.RdsLinkDetailResp;
import com.tudou.potato.datagaea.core.wh.remote.RdsLinkRemote;
import io.shardingsphere.api.config.rule.ShardingRuleConfiguration;
import io.shardingsphere.api.config.rule.TableRuleConfiguration;
import io.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import javax.annotation.Resource;
import javax.sql.DataSource;
import org.springframework.stereotype.Component;

/**
 * @author: ge.li
 * @create: 2020-05-20 10:33
 **/
@Component
public class SharedDataSource {

    @Resource
    private RdsLinkRemote rdsLinkRemote;
    @Resource
    private RDBConnectionService rDBConnectionService;
    @Resource
    private DataSourceCache dataSourceCache;

    public RdsLinkDetailResp createDataSource(String code) {
        RdsLinkDetailResp rdsinfo = null;
        DruidDataSource data =  DynamicDataSource.getDataSource(code);
        try
        {
            //尝试链接
            rdsinfo = dataSourceCache.getRDSLinkInfo(code);
            Connection conn = data.getConnection();
            conn.close();

        }catch(Exception ex)
        {
            if (null == rdsinfo) {
                APIResponse<RdsLinkDetailResp> rdsinfoResp = rdsLinkRemote
                    .detailByCode(code, PrincipalInfoContext.getAuthorization(),
                        PrincipalInfoContext.getReqId());
                if (BizCommMessage.OK != rdsinfoResp.getCode()) {
                    return null;
                }
                dataSourceCache.setRDSLinkInfo(rdsinfoResp.getData());
                rdsinfo = dataSourceCache.getRDSLinkInfo(code);
            }
            DynamicDataSource.setDataSource(code, DynamicDataSource
                .createDataSource(rdsinfo.getJdbcUrl(),
                    rdsinfo.getUsername(), rdsinfo.getPassword()));
        }
        return rdsinfo;
    }

    public OnlineQueryResult sharedDataSourceSelectSQL(List<String> rdsCodes, String tableName,
        List<String> areaCodes, String sqlStr) throws Exception {
        OnlineQueryResult resultData = null;
        List<String> dbNames = new ArrayList();
        Map<String, DataSource> dataSourceMap = new HashMap<>();
        for (String code : rdsCodes) {
            RdsLinkDetailResp rdsinfo = createDataSource(code);
            DruidDataSource data = DynamicDataSource.getDataSource(code);
            dataSourceMap.put(rdsinfo.getRdsDbName(), data);
            dbNames.add(rdsinfo.getRdsDbName());
        }
        ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
        String tables = "";
        for (String dbName : dbNames) {
            for (String areeCode : areaCodes) {
                tables += String
                    .format("%s.%s_%s,", dbName, tableName, areeCode);
            }
        }
        TableRuleConfiguration orderTableRuleConfig = new TableRuleConfiguration();
        // 期望的基础表名
        orderTableRuleConfig.setLogicTable(tableName);
        orderTableRuleConfig.setActualDataNodes(tables);
        shardingRuleConfig.getTableRuleConfigs().add(orderTableRuleConfig);
        DataSource dataSource = ShardingDataSourceFactory
            .createDataSource(dataSourceMap, shardingRuleConfig, new HashMap<>(),
                new Properties());
        Connection conn = dataSource.getConnection();
        resultData = rDBConnectionService.executeSelectSQL(sqlStr, conn);
        return resultData;
    }
}

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值