sharing-sphere单库分表(入门级示例)

sharing-sphere单库分表(入门级示例)

环境信息

  • mybatis-plus3.4.2

  • shardingsphere-jdbc5.1.1

  • jdk8+

  • spring-boot2.7.4

  • mysql8

需求说明(以实现此需求为例)

以月为单位,拆分trans_record,实现以下功能:

  1. 自动创建对应月份表
  2. 自动更新sharding中的数据节点信息
  3. 整合mybatis-plus

注:本文着重演示说明sharing-sphere,mybatis-plus的相关内容不作介绍,需要的话,可通过文末链接,下载本项目demo代码进行相关调试

sharing-sphere单库分表

提示:下面只罗列重要步骤,细节不作详细介绍。

第一步:引入sharding-jdbc依赖

<dependency>
    <groupId>org.apache.shardingsphere</groupId>
    <artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
    <version>5.1.1</version>
</dependency>

第二步:编写配置

提示:下面展示的配置已去掉不相关部分

spring:
  ### sharding配置
  shardingsphere:
    enabled: true # 启用sharding
    props:
      sql-show: true # 展示sql
      #sql-simple: true # 展示简化的sql
    datasource:
      names: ds0 # 自定义数据源名称,多个以逗号隔开. 如:master,slave
      ds0:
        jdbc-url: ${spring.datasource.hikari.jdbc-url}
        username: ${spring.datasource.hikari.username}
        password: ${spring.datasource.hikari.password}
        type: ${spring.datasource.type}
        driver-class-name: ${spring.datasource.hikari.driver-class-name}
    mode:
      type: Memory
    rules: # 参考官方文档:https://shardingsphere.apache.org/document/current/cn/user-manual/shardingsphere-jdbc/yaml-config/rules/sharding/
      sharding:
        tables: # 如果需要多个分表的话,多几个类似于trans_record表的配置就行
          trans_record: # 逻辑表名(如:trans_record_202210、trans_record_202211之类的属于实际表名,那么trans_record就属于逻辑表名)
            table-strategy: # 分表策略
              standard: # standard-用于单分片键的标准分片场景, complex-用于多分片键的复合分片场景,hint-Hint分片策略, none-不分片
                sharding-column: trans_date # 指定trans_record表中的trans_date列作为分片列
                sharding-algorithm-name: trans-date-algorithm # 指定使用的分片算法(自定义名称即可, 只要对的上就行)
            # 指定所有数据存储节点:数据源.实际表名
            # 注:范围一般要等于或者大于你实际要用到的实际表名范围
            # 注:查询时,如果不指定sharding-column的值或sharding-column的值范围,那么会全查这里列出来的所有表(即:ds0.trans_record_202201 - ds0.trans_record_210012)
            actual-data-nodes: ds0.trans_record_$->{2022..2100}${(1..12).collect{t ->t.toString().padLeft(2,'0')}}
        # 分片算法配置
        sharding-algorithms: # 如果需要配置多个算法的话,多几个类似于trans-date-algorithm算法的配置就行
          trans-date-algorithm: # 分片算法名称(自定义即可)
            props:
              strategy: standard
              algorithmClassName: com.ideaaedi.shardingdynamictable.config.sharding.DateShardingAlgorithm  # 指定算法实现类
            type: JD_SHARDING # 算法类型. 与algorithmClassName指定的算法实现类的getType()方法的返回值保持一致即可

第三步: 创建并实现上一步中指定的算法实现

提示:下面只罗列核心类,完整项目见文末下载链接

分片算法实现类
import com.google.common.collect.BoundType;
import com.google.common.collect.Lists;
import com.google.common.collect.Range;
import com.ideaaedi.shardingdynamictable.config.sharding.support.ShardingTableSupport;
import com.ideaaedi.shardingdynamictable.entity.bo.DatasourceArgs;
import com.ideaaedi.shardingdynamictable.util.DatabaseUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.shardingsphere.sharding.api.sharding.standard.PreciseShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.RangeShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.StandardShardingAlgorithm;

import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;

/**
 * 日期(yyyy-MM-dd)分片算法
 * <br />
 * 此类已通过sharding的SPI加入spring容器(见{@link DateShardingAlgorithm#getType()}说明),可直接使用Resource等进行依赖注入
 *
 * @author JustryDeng
 * @since 2022/10/6 23:40:28
 */
@Slf4j
public class DateShardingAlgorithm implements StandardShardingAlgorithm<String> {
    
    @Resource
    ShardingTableSupport shardingTableSupport;
    
    @Resource
    DatasourceArgs datasourceArgs;
    
    /**
     * shardingValue.getValue()值只有一个,那么返回对应的实际表名
     *
     * @param availableTargetNames
     *            可用的目标节点表(即:当前数据源下,actual-data-nodes对应的表)
     */
    @Override
    public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<String> shardingValue) {
        // 逻辑表名, 如:trans_record
        final String logicTableName = shardingValue.getLogicTableName();
        // 分片列的当前值, 如:2022-10-06
        final String value = shardingValue.getValue();
        // 组装获得实际表名, 如:trans_record_202210
        final String[] arr = value.split("-");
        final String realTableName = logicTableName + "_" + arr[0] + arr[1];
        // 确保realTableName表存在
        ensureTableExist(logicTableName, realTableName);
        if (!availableTargetNames.contains(realTableName)) {
            availableTargetNames.add(realTableName);
        }
        return realTableName;
    }
    
    /**
     * shardingValue.getValueRange()值对应一个范围,那么返回对应范围的多个实际表名
     * <br />
     * 如; 查询时指定 trans_date <= 2022-10-07 and trans_date >= 2022-08-07,
     *     那么 返回的表名集合就应该为 trans_record_202208 ... trans_record_202210
     *
     * @param availableTargetNames
     *            可用的目标节点表(即:当前数据源下,actual-data-nodes对应的表)
     */
    @Override
    @SuppressWarnings("AlibabaLowerCamelCaseVariableNaming")
    public Collection<String> doSharding(Collection<String> availableTargetNames, RangeShardingValue<String> shardingValue) {
        List<String> targetTableList = new ArrayList<>(availableTargetNames);
        
        final String logicTableName = shardingValue.getLogicTableName();
        final Range<String> valueRange = shardingValue.getValueRange();
        
        /// 上界值
        StringBuilder upperTableInfo = new StringBuilder();
        if (valueRange.hasUpperBound()) {
            final String upperEndpoint = valueRange.upperEndpoint();
            final String[] arr = upperEndpoint.split("-");
            String yyyyMM = arr[0] + arr[1];
            String upperTableName = logicTableName + "_" + yyyyMM;
            final BoundType upperBoundType = valueRange.upperBoundType();
            if (BoundType.CLOSED == upperBoundType) {
                upperTableInfo.append(" <= ").append(upperTableName);
            } else if (BoundType.OPEN == upperBoundType) {
                upperTableInfo.append(" < ").append(upperTableName);
            }
            targetTableList = targetTableList.stream().filter(realTableName -> {
                // BoundType.CLOSED - 闭区间, 上界包含当前upperEndpoint值 , 即: <= upperEndpoint
                // BoundType.OPEN - 开区间, 上界不包含当前upperEndpoint值 , 即: < upperEndpoint
                if (BoundType.CLOSED == upperBoundType) {
                    return realTableName.compareTo(upperTableName) <= 0;
                } else if (BoundType.OPEN == upperBoundType) {
                    return realTableName.compareTo(upperTableName) < 0;
                } else {
                    throw new RuntimeException("upperBoundType should not be null.");
                }
            }).collect(Collectors.toList());
        }
        
        /// 下界值
        StringBuilder lowerTableInfo = new StringBuilder();
        if (valueRange.hasLowerBound()) {
            final String lowerEndpoint = valueRange.lowerEndpoint();
            final String[] arr = lowerEndpoint.split("-");
            String yyyyMM = arr[0] + arr[1];
            String lowerTableName = logicTableName + "_" + yyyyMM;
            final BoundType lowerBoundType = valueRange.lowerBoundType();
            if (BoundType.CLOSED == lowerBoundType) {
                lowerTableInfo.append(" >= ").append(lowerTableName);
            } else if (BoundType.OPEN == lowerBoundType) {
                lowerTableInfo.append(" > ").append(lowerTableName);
            }
            targetTableList = targetTableList.stream().filter(realTableName -> {
                // BoundType.CLOSED - 闭区间, 下界包含当前upperEndpoint值 , 即: >= upperEndpoint
                // BoundType.OPEN - 开区间, 下界不包含当前upperEndpoint值 , 即: > upperEndpoint
                if (BoundType.CLOSED == lowerBoundType) {
                    return realTableName.compareTo(lowerTableName) >= 0;
                } else if (BoundType.OPEN == lowerBoundType) {
                    return realTableName.compareTo(lowerTableName) > 0;
                } else {
                    throw new RuntimeException("lowerBoundType should not be null.");
                }
            }).collect(Collectors.toList());
        }
        
        if (targetTableList.size() == 0) {
            log.info("availableTargetNames -> {}", availableTargetNames);
            log.error("Cannot determine realTableName whose logicTableName is '{}'."
                                                 + (upperTableInfo.length() == 0 ? "" : "\tsatisfy: realTableName" + upperTableInfo)
                                                 + (lowerTableInfo.length() == 0 ? "" : "\tsatisfy: realTableName" + lowerTableInfo)
                    ,  logicTableName);
            return Collections.emptyList();
        }
        // 确保realTableName表存在
        for (String realTableName : targetTableList) {
            ensureTableExist(logicTableName, realTableName);
        }
        return targetTableList;
    }
    
    @Override
    public void init() {
    
    }
    
    /**
     * spi注册
     */
    @Override
    public String getType() {
        // 同时,需要在resource下的/META-INF/services/下创建org.apache.shardingsphere.sharding.api.sharding.standard.StandardShardingAlgorithm文件,并指定当前类为其SPI实现
        return "JD_SHARDING";
    }
    
    /**
     * 确保真实表存在
     *
     * @param logicTableName
     *            逻辑表名
     * @param realTableName
     *            真实表名
     */
    private void ensureTableExist(String logicTableName, String realTableName) {
        Set<String> realTableNameSet = shardingTableSupport.getRealTableName(datasourceArgs, logicTableName);
        if (!realTableNameSet.contains(realTableName)) {
            DatabaseUtil.executeSql(datasourceArgs, Lists.newArrayList(shardingTableSupport.obtainRealTableDdl(datasourceArgs, logicTableName, realTableName)));
            shardingTableSupport.addRealTableName(datasourceArgs, logicTableName, realTableName);
        }
    }
}

注:编写完此类后,需要将此类进行SPI注册

resources/META-INF/services下创建文件org.apache.shardingsphere.sharding.spi.ShardingAlgorithm, 文件内容指定实现类为上述类

在这里插入图片描述

actual-data-nodes刷新类

见:https://github.com/apache/shardingsphere/issues/16725

import com.ideaaedi.shardingdynamictable.entity.bo.DatasourceArgs;
import com.ideaaedi.shardingdynamictable.entity.bo.UpdateActualDataNodeBO;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.shardingsphere.driver.jdbc.core.datasource.ShardingSphereDataSource;
import org.apache.shardingsphere.infra.config.RuleConfiguration;
import org.apache.shardingsphere.mode.manager.ContextManager;
import org.apache.shardingsphere.sharding.algorithm.config.AlgorithmProvidedShardingRuleConfiguration;
import org.apache.shardingsphere.sharding.api.config.rule.ShardingTableRuleConfiguration;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;

import javax.annotation.Resource;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.TreeSet;
import java.util.stream.Collectors;

/**
 * 刷新actual-data-nodes支持类 <br /> <br /> 参考:https://github.com/apache/shardingsphere/issues/16725 <br /> <br />
 * 说明:我们在配置文件中指定了数据存储节点涉及到了很多表,那是为以后作考虑的;现在项目中真实存在的表可能没那么多, 所以我们需要一个自动根据数据库中已存在的分表,设置对应的actual-data-nodes数据节点的功能. <br />
 * <br /> 注:如果不设置的话,当进行查询(如:全查)时,就会因为表不存在而报错
 *
 * @author JustryDeng
 * @since 2022/10/7 16:06:22
 */
@Slf4j
@Component
public class RefreshActualDataNodesSupport {
    
    @Resource
    ShardingSphereDataSource shardingSphereDataSource;
    
    /**
     * 刷新actual-data-nodes
     */
    public void refresh(UpdateActualDataNodeBO bo) {
        final String logicTableName = bo.getLogicTableName();
        if (StringUtils.isBlank(logicTableName)) {
            throw new IllegalArgumentException("logicTableName cannot be null.");
        }
        final List<Pair<DatasourceArgs, TreeSet<String>>> dsAndRealTablesPairList = bo.getDsAndRealTablesPairList();
        if (CollectionUtils.isEmpty(dsAndRealTablesPairList)) {
            throw new IllegalArgumentException("dsAndRealTablesPairList cannot be empty.");
        }
        StringBuilder actualDataNodesBuilder = new StringBuilder();
        for (Pair<DatasourceArgs, TreeSet<String>> datasourceArgsTreeSetPair : dsAndRealTablesPairList) {
            final DatasourceArgs datasourceArgs = datasourceArgsTreeSetPair.getLeft();
            Objects.requireNonNull(datasourceArgs, "DatasourceArgs cannot be null.");
            final String datasourceName = datasourceArgs.getName();
            if (StringUtils.isBlank(datasourceName)) {
                throw new IllegalArgumentException("datasourceName cannot be null.");
            }
            final TreeSet<String> realTableNames = datasourceArgsTreeSetPair.getRight();
            if (StringUtils.isBlank(datasourceName)) {
                throw new IllegalArgumentException("datasourceName cannot be null.");
            }
            if (CollectionUtils.isEmpty(realTableNames)) {
                throw new IllegalArgumentException("realTableNames cannot be empty.");
            }
            final String realTableNameInline =
                    realTableNames.stream().map(realTableName -> "'" + realTableName + "'").collect(Collectors.joining(","
                    ));
            // String actualDataNodes = "ds-0.t_order_$->{[1..8]}"; // 等价于ds-0.t_order_${[1,2,3,4,5,6,7,8]}
            // 多数据源的话,使用逗号分割, 如:: "ds-0.t_order_$->{[1..8]},ds-1.t_order_123}"
            actualDataNodesBuilder.append(datasourceName).append(".${[").append(realTableNameInline).append("]}").append(",");
        }
        // 去除尾巴上的逗号
        actualDataNodesBuilder.replace(actualDataNodesBuilder.length() - 1, actualDataNodesBuilder.length(), "");
        String actualDataNodes = actualDataNodesBuilder.toString();
        log.info("refresh actual-data-nodes to " + actualDataNodes);
        updateShardRuleActualDataNodes(shardingSphereDataSource, shardingSphereDataSource.getSchemaName(), logicTableName, actualDataNodes);
    }
    
    /**
     * 刷新actual-data-nodes
     */
    private void updateShardRuleActualDataNodes(ShardingSphereDataSource dataSource, String schemaName,
                                                String logicTableName, String newActualDataNodes) {
        // Context manager.
        ContextManager contextManager = dataSource.getContextManager();
        
        // Rule configuration.
        Collection<RuleConfiguration> newRuleConfigList = new LinkedList<>();
        Collection<RuleConfiguration> oldRuleConfigList = contextManager
                .getMetaDataContexts()
                .getMetaData(schemaName)
                .getRuleMetaData()
                .getConfigurations();
        
        for (RuleConfiguration oldRuleConfig : oldRuleConfigList) {
            if (oldRuleConfig instanceof AlgorithmProvidedShardingRuleConfiguration) {
                
                // Algorithm provided sharding rule configuration
                AlgorithmProvidedShardingRuleConfiguration oldAlgorithmConfig =
                        (AlgorithmProvidedShardingRuleConfiguration) oldRuleConfig;
                AlgorithmProvidedShardingRuleConfiguration newAlgorithmConfig =
                        new AlgorithmProvidedShardingRuleConfiguration();
                
                // Sharding table rule configuration Collection
                Collection<ShardingTableRuleConfiguration> newTableRuleConfigList = new LinkedList<>();
                Collection<ShardingTableRuleConfiguration> oldTableRuleConfigList = oldAlgorithmConfig.getTables();
                
                oldTableRuleConfigList.forEach(oldTableRuleConfig -> {
                    if (logicTableName.equals(oldTableRuleConfig.getLogicTable())) {
                        // 在旧的nodes上进行补充(旧的nodes与新的nodes可能有部分重复,这里不管,因为sharding会自动去重, 见org.apache.shardingsphere.sharding.rule.TableRule#addActualTable)
                        String mergeActualDataNodes;
                        ///if (StringUtils.isNotBlank(oldTableRuleConfig.getActualDataNodes())) {
                        ///    mergeActualDataNodes = oldTableRuleConfig.getActualDataNodes() + "," + newActualDataNodes;
                        ///} else {
                        mergeActualDataNodes = newActualDataNodes;
                        ///}
                        ShardingTableRuleConfiguration newTableRuleConfig =
                                new ShardingTableRuleConfiguration(oldTableRuleConfig.getLogicTable(), mergeActualDataNodes);
                        newTableRuleConfig.setTableShardingStrategy(oldTableRuleConfig.getTableShardingStrategy());
                        newTableRuleConfig.setDatabaseShardingStrategy(oldTableRuleConfig.getDatabaseShardingStrategy());
                        newTableRuleConfig.setKeyGenerateStrategy(oldTableRuleConfig.getKeyGenerateStrategy());
                        
                        newTableRuleConfigList.add(newTableRuleConfig);
                    } else {
                        newTableRuleConfigList.add(oldTableRuleConfig);
                    }
                });
                
                newAlgorithmConfig.setTables(newTableRuleConfigList);
                newAlgorithmConfig.setAutoTables(oldAlgorithmConfig.getAutoTables());
                newAlgorithmConfig.setBindingTableGroups(oldAlgorithmConfig.getBindingTableGroups());
                newAlgorithmConfig.setBroadcastTables(oldAlgorithmConfig.getBroadcastTables());
                newAlgorithmConfig.setDefaultDatabaseShardingStrategy(oldAlgorithmConfig.getDefaultDatabaseShardingStrategy());
                newAlgorithmConfig.setDefaultTableShardingStrategy(oldAlgorithmConfig.getDefaultTableShardingStrategy());
                newAlgorithmConfig.setDefaultKeyGenerateStrategy(oldAlgorithmConfig.getDefaultKeyGenerateStrategy());
                newAlgorithmConfig.setDefaultShardingColumn(oldAlgorithmConfig.getDefaultShardingColumn());
                newAlgorithmConfig.setShardingAlgorithms(oldAlgorithmConfig.getShardingAlgorithms());
                newAlgorithmConfig.setKeyGenerators(oldAlgorithmConfig.getKeyGenerators());
                
                newRuleConfigList.add(newAlgorithmConfig);
            }
        }
        
        // update context
        contextManager.alterRuleConfiguration(schemaName, newRuleConfigList);
    }
}

第四步:测试

测试方案:随机生成数据插入,然后查询所有

  • 单元测试类

    在这里插入图片描述

  • 运行单元测试

    • 观察控制台,单元测试未报错

      在这里插入图片描述

    • 观看库表

      x

demo代码调试说明

下载本项目后,只需将配置文件中的数据库链接地址、账号密码换成自己的,就可以运行单元测试了。

注:无需创建trans_record相关表,程序会自动创建

相关资料

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值