新年发个sharingsphere可扩容的自定义分表

新年发个sharingsphere可扩容的自定义分表

直接上代码……哈哈

先引入jar包

<dependency>
    <groupId>org.apache.shardingsphere</groupId>
    <artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
    <version>5.0.0</version>
</dependency>

properties.xml配置

## 配置第 1 个数据源 注意数据源名称(hikari使用spring.*.jdbc-url)
spring.shardingsphere.datasource.db0.type=com.zaxxer.hikari.HikariDataSource
spring.shardingsphere.datasource.db0.jdbc-url=jdbc:mysql://localhost:3306/db0?useUnicode=true&serverTimezone=Asia/Shanghai&characterEncoding=UTF-8&autoReconnect=true
#spring.shardingsphere.datasource.db0.type=com.zaxxer.hikari.HikariDataSource
#spring.shardingsphere.datasource.db0.url=jdbc:mysql://localhost:3306/db_chat?useUnicode=true&serverTimezone=Asia/Shanghai&characterEncoding=UTF-8&autoReconnect=true&failOverReadOnly=false&maxReconnects=15000
spring.shardingsphere.datasource.db0.driver-class-name=com.mysql.cj.jdbc.Driver
spring.shardingsphere.datasource.db0.username=root
spring.shardingsphere.datasource.db0.password=
spring.shardingsphere.datasource.db0.maxPoolSize=50
spring.shardingsphere.datasource.db0.minPoolSize=50
spring.shardingsphere.datasource.db0.maintenanceIntervalMilliseconds=30000
spring.shardingsphere.datasource.db0.connectionTimeoutMilliseconds=30000
spring.shardingsphere.datasource.db0.idleTimeoutMilliseconds=60000
spring.shardingsphere.datasource.db0.maxLifetimeMilliseconds=1800000

##分布式序列配置(该项为必需,不是很友好)
# 分布式序列算法配置
spring.shardingsphere.rules.sharding.key-generators.snowflake.type=SNOWFLAKE
spring.shardingsphere.rules.sharding.key-generators.snowflake.props.worker-id=123

# 打印解释后的SQL语句
#spring.shardingsphere.props.sql-show=true
# 格式化SQL语句
spring.shardingsphere.props.sql-simple=true
# sharding jdbc 需要重新注入数据源,覆盖原本注入的数据源
spring.main.allow-bean-definition-overriding=true

# 分片规则配置

# chat_log分表策略
# 表达式 `ds_$->{0..1}`枚举的数据源为读写分离配置的逻辑数据源名称
spring.shardingsphere.rules.sharding.tables.chat_log.actual-data-nodes=db0.chat_log_$->{0..7}
# 用于单分片键的标准分片场景
spring.shardingsphere.rules.sharding.tables.chat_log.table-strategy.standard.sharding-column=chat_id
spring.shardingsphere.rules.sharding.tables.chat_log.table-strategy.standard.sharding-algorithm-name=chatlog-inline
# 分布式序列策略配置
spring.shardingsphere.rules.sharding.tables.chat_log.key-generate-strategy.column=chat_id
spring.shardingsphere.rules.sharding.tables.chat_log.key-generate-strategy.key-generator-name=snowflake
## 分片算法配置
#spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.type=INLINE
#spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.props.algorithm-expression=chat_log_$->{chat_id % 8}
# 自定义分片算法
spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.type=CLASS_BASED
spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.props.strategy=STANDARD
spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.props.algorithmClassName=com.xxx.config.sharingshpere.TableShardingAlgorithm

自定义算法类

package com.xxx.config.sharingshpere;

import java.util.ArrayList;
import java.util.Collection;

import org.apache.shardingsphere.sharding.api.sharding.standard.PreciseShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.RangeShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.StandardShardingAlgorithm;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.alibaba.fastjson.JSONObject;
import com.xxx.config.ApplicationContextHolder;
import com.xxx.constant.ShardingConstant;
import com.google.common.collect.Range;
import com.xclj.common.redis.RedisClientTemplate;

/**
 * 自定义水平分片
 * @author xxx
 * 2022年1月4日 上午8:13:03
 */
//@Component
public class TableShardingAlgorithm implements StandardShardingAlgorithm<Long> {
	private static Logger logger = LoggerFactory.getLogger(TableShardingAlgorithm.class);

	RedisClientTemplate redisClientTemplate;

	public String getType() {
		// 分片类型 
		return ShardingConstant.CLASS_BASED;
	}

	public void init() {
		// 
		redisClientTemplate = ApplicationContextHolder.getBean("redisClientTemplate");
	}

	/**
	 * 精准分片算法(自动生成ID插入、=或in根据ID查询)
	 */
	public String doSharding(Collection<String> tableNames, PreciseShardingValue<Long> shardingValue) {
		//logger.info("精准分片算法--{}", JSONArray.toJSONString(tableNames));
		//logger.info("精准分片算法--{}", JSONObject.toJSONString(shardingValue));
				
		// 取值后4位并取整作为表名后缀判断
		String tbSuffix = checkTbSuffix(shardingValue.getValue());
		for (String tableN : tableNames) {
			if (tableN.endsWith(tbSuffix)) {
				logger.info("更新表{}--{}", tableN, JSONObject.toJSONString(shardingValue));
				return tableN;
            }
		}
    	
		// 匹配不到则取最后一个表名
        //return Lists.newArrayList(tableNames).get(tableNames.size()-1);
        return null;
	}

	/**
	 * 取后缀
	 */
	private String checkTbSuffix(final Comparable<?> value) {
		String valStr = value+"";
		valStr = valStr.substring(valStr.length()-4);
		String tbSuffix = Integer.parseInt(valStr) + "";
		return tbSuffix;
	}

	/**
	 * 范围分片算法(between and或> and <)
	 */
	public Collection<String> doSharding(Collection<String> tableNames, RangeShardingValue<Long> shardingValue) {
//		logger.info("范围分片算法--{}", JSONObject.toJSONString(shardingValue));
        Collection<String> collect = new ArrayList<>();
        Range<Long> valueRange = shardingValue.getValueRange();

//        logger.info("valueRange:{}", JSONObject.toJSONString(valueRange));
//        logger.info("最小值lowerEndpoint:{}", valueRange.lowerEndpoint());
//        logger.info("最大值upperEndpoint:{}", valueRange.upperEndpoint());
        String [] tbSuffixs = backTableSuffix(valueRange.lowerEndpoint(), valueRange.upperEndpoint());
        
        for (String tableN : tableNames) {
        	for (String tbSuffix : tbSuffixs) {
                if (tableN.endsWith(tbSuffix)) {
                    collect.add(tableN);
                }
			}
        }

        logger.info("collect:{}", JSONObject.toJSONString(collect));
        return collect;
	}
	
	private String [] backTableSuffix(final Comparable<?> lowerEndpoint, final Comparable<?> upperEndpoint) {
		int startTbSuffix = Integer.parseInt(checkTbSuffix(lowerEndpoint));
		int endSuTbffix = Integer.parseInt(checkTbSuffix(upperEndpoint));
		String [] aTables = new String[endSuTbffix - startTbSuffix + 1];
		for (int i = 0,j=startTbSuffix; j <= endSuTbffix; i++,j++) {
			aTables[i] = j+"";
		}
		return aTables;
	}

	
}

取自动生成id工具类(雪花算法+4位后缀)

package com.xxx.config.sharingshpere;

import java.util.ArrayList;
import java.util.List;

import org.apache.shardingsphere.sharding.algorithm.keygen.SnowflakeKeyGenerateAlgorithm;

import com.alibaba.fastjson.JSONObject;
import com.xxx.constant.RedisConstant;
import com.google.common.collect.Lists;
import com.xclj.common.redis.RedisClientTemplate;

/**
 * 雪花算法ID获取
 * @author xxx
 * 2022年1月5日 上午11:27:34
 */
public class SnowFlakeUtils {
	
	private static int BIT_NUM = 4;
	static SnowflakeKeyGenerateAlgorithm sn;


	// 新表数范围(新增操作时表数量只能属于其中之一,大于9按9算,小于9但不匹配的按小一个算,如指定8个表,则根据tableNames取最后的7个表)
	private static List<Integer> splitList = Lists.newArrayList(new Integer[]{2, 3, 5, 6, 7, 9});
	
	static {
    	sn = new SnowflakeKeyGenerateAlgorithm();
	}
	
    public static Comparable<?>  getId() {
        return getId(new Long(1020));
    }
    
    public static Comparable<?>  getId(Long workerId) {
    	return sn.generateKey();
    }
    
    /**
     * endSuTbffix-startSuffix的大小只能为2, 3, 5, 6, 7, 9其中之一,
     * 大于9按9算,小于9但不匹配的按小一个算,如指定8个表,则取7个表)
     * <pre>默认取尾数四位取整判断表名</pre>
     */
    public static String getTableId(int startTbSuffix, int endSuTbffix) {
		return getTableId(startTbSuffix, endSuTbffix, BIT_NUM);
    }
    
    public static String getTableId(int startTbSuffix, int endSuTbffix, int num) {
    	String [] aTables = "0,1,2,3,4,5,6,7,8".split(",");
    	if ((endSuTbffix > startTbSuffix) && (endSuTbffix - startTbSuffix) <= 9) {
    		aTables = new String[endSuTbffix - startTbSuffix + 1];
    		for (int i = 0,j=startTbSuffix; j <= endSuTbffix; i++,j++) {
    			aTables[i] = j+"";
    		}
    	}
    	
    	List<String> tableNames = new ArrayList<>();
    	for (String aSuffix : aTables) {
    		tableNames.add("tb_"+aSuffix);
    	}
    	
    	return getTableId(tableNames, aTables, num);
    }
    
    /**
     * redis的key为“split_tb_config_key”,保存为map类型("tableSuffix"为tableName对象属性)
     */
    public static String getTableIdByRedis(String tableName, RedisClientTemplate redisClientTemplate) {
		String resultStr = redisClientTemplate.getMapValue(RedisConstant.SPLIT_TB_CONFIG_KEY, tableName);
    	if (resultStr != null) {
			JSONObject jsonObject = JSONObject.parseObject(resultStr);
//			//logger.info("分表配置信息:{}", resultStr);
//			
			String [] aTables = jsonObject.getString("tableSuffix").split(",");
    		List<String> tableNames = new ArrayList<>();
    		for (String aSuffix : aTables) {
    			tableNames.add(tableName+"_"+aSuffix);
    		}
    		
    		return getTableId(tableNames, aTables, BIT_NUM);
    	}
    	
    	return null;
    }

    /**
     * redis的key为“split_tb_config_key”,保存为map类型("tableSuffix"为tableName对象属性)
     */
    public static String getTableIdByRedis(List<String> tableNames, String tableName, RedisClientTemplate redisClientTemplate) {
		String resultStr = redisClientTemplate.getMapValue(RedisConstant.SPLIT_TB_CONFIG_KEY, tableName);
    	if (resultStr != null) {
			JSONObject jsonObject = JSONObject.parseObject(resultStr);
//			//logger.info("分表配置信息:{}", resultStr);
//			
			String [] aTables = jsonObject.getString("tableSuffix").split(",");
    		List<String> newTableNames = new ArrayList<>();
			for (int i = 0,size=tableNames.size(); i < size; i++) {
				newTableNames.add(tableNames.get(i)+"_"+i);
			}
    		
    		return getTableId(newTableNames, aTables, BIT_NUM);
    	}
    	
    	return null;
    }
    
    /**
     * tableSplit为“,”分隔的表后缀
     */
    public static String getTableId(String tableSplit) {
    	if (tableSplit != null && "".equals(tableSplit.trim()) == false) {
//			JSONObject jsonObject = JSONObject.parseObject(resultStr);
//			//logger.info("分表配置信息:{}", resultStr);
//			
//			String [] aTables = jsonObject.getString("tableSuffix").split(",");
    		String [] aTables = tableSplit.split(",");
    		List<String> newTableNames = new ArrayList<>();
        	for (String aSuffix : aTables) {
        		newTableNames.add("tb_"+aSuffix);
        	}
    		
    		return getTableId(newTableNames, aTables, BIT_NUM);
    	}
    	
    	return null;
    }

	private static String getTableId(List<String> tableNames, String[] aTables, int num) {
    	long sn_id = getLongValue(sn.generateKey());
		aTables = defaultTableCheck(tableNames, aTables);
		//logger.info(JSONArray.toJSONString(aTables));
		
		String table = checkTable(tableNames, aTables, sn_id);
		if (table != null) {
			//logger.info("更新表{}--{}", table, JSONArray.toJSONString(shardingValue));
			String [] temp = table.split("_");
			return sn_id + suffixCode(temp[temp.length-1], num);
		}
		
		return null;
	}

	private static String suffixCode(String suffixNum, int num) {
		if (num > suffixNum.length()) {
			int codeNum = Integer.parseInt(suffixNum);
			int num0 = num - suffixNum.length(); 
			StringBuffer sBuffer = new StringBuffer(num);
			for (int i = 0; i < num0; i++) {
				sBuffer.append("0");
			}
			return sBuffer.append(codeNum).toString();
		}
		return suffixNum;
	}

	/**
	 * 返回表
	 */
    private static String checkTable(List<String> listTables, String[] aTables, Long val) {    	
    	for (String tableN : listTables) {
    		for (String activeT : aTables) {
				if (tableN.endsWith(activeT)) {
					if (tableN.endsWith(checkEndStr(listTables, val, aTables))) {
		                return tableN;
		            }
				}
			}
        }
		return null;
	}

	private static String checkEndStr(List<String> listTables, Long val, String [] activeTables) {
		// 活动表个数
		int count = activeTables.length;
		int oCount = Integer.parseInt(activeTables[count-1]) - count + 1;
		String result = null;
		
		switch (count) {
			case 2:
				result = String.valueOf(getLongValue(val) % 2 + oCount);
				break;
			case 3:
				result = String.valueOf(getLongValue(val) % 3 + oCount);
				break;
			case 5:
				result = String.valueOf(getLongValue(val) % 5 + oCount);
				break;
			case 6:
				result = String.valueOf(getLongValue(val) % 6 + oCount);
				break;
			case 7:
				result = String.valueOf(getLongValue(val) % 7 + oCount);
				break;
			case 9:
				result = String.valueOf(getLongValue(val) % 9 + oCount);
				break;
			default:
				
				break;
		}
		
		return result;
	}
	
    private static long getLongValue(final Comparable<?> value) {
        return Long.parseLong(value.toString());
    }

	private static String [] defaultTableCheck(List<String> listTables, String[] aTables) {
		if (splitList.contains(aTables.length) == false) {
			int size = listTables.size();
			String [] activeTables = null;
			int lastNum = splitList.get(0);
			
			for (int splitNum : splitList) {
				if (splitNum >= aTables.length && size >= lastNum) {
					activeTables = new String[lastNum];
					String [] temp = null;
					int num = lastNum-1;
					for (int i = size-1; i >= (size-lastNum); i--) {
						temp = listTables.get(i).split("_");
						activeTables[num] = temp[temp.length-1];
						num--;
					}
					return activeTables;
				} else {
					lastNum = splitNum;
				}
			}
			
			// 多于9个的按9算
			lastNum = 9;
			if (size >= lastNum) {
				activeTables = new String[lastNum];
				String [] temp = null;
				int num = lastNum-1;
				for (int i = size-1; i >= (size-lastNum); i--) {
					temp = listTables.get(i).split("_");
					activeTables[num] = temp[temp.length-1];
					num--;
				}
				return activeTables;
			}
		}
		
		return aTables;
	}


	/**
	 * 取后缀
	 */
	public static String checkTbSuffix(final Comparable<?> value) {
		String valStr = value+"";
		valStr = valStr.substring(valStr.length()-4);
		String tbSuffix = Integer.parseInt(valStr) + "";
		return tbSuffix;
	}
	
	/**
	 * 返回连续的后缀列表
	 */
	public static String [] backTableSuffix(final Comparable<?> lowerEndpoint, final Comparable<?> upperEndpoint) {
		int startTbSuffix = Integer.parseInt(checkTbSuffix(lowerEndpoint));
		int endSuTbffix = Integer.parseInt(checkTbSuffix(upperEndpoint));
		String [] aTables = new String[endSuTbffix - startTbSuffix + 1];
		for (int i = 0,j=startTbSuffix; j <= endSuTbffix; i++,j++) {
			aTables[i] = j+"";
		}
		return aTables;
	}
    
}

再加个test代码吧

		ChatLogVO chatLogNew = new ChatLogVO();
	    chatLogNew.setCompanyCode("S20211219");
	    chatLogNew.setServer("S80d10cf7");
	    chatLogNew.setClient("Cfccb060d");
	    chatLogNew.setType("1");
	    chatLogNew.setStatus("0");
	    
	    for (int i = 60; i < 75; i++) {
		    chatLogNew.setChatId(SnowFlakeUtils.getTableId("0,1,2,3,4"));
		    chatLogNew.setContent("测试数据插入--"+i);
		    chatLogNew.setCreateTime(new Date());
		    int result = chatLogMapper.insertChatLog(chatLogNew);
		    if (result > 0) {
				logger.info("新增记录成功");
			} else {
				logger.error("新增记录失败");
			}
		}

原先的想法是可以在数据库中配置好扩容的表,然后通过redis缓存动态更新配置文件。后面考虑到根据ID =、in、between and等操作就把id类型由bigint设为varchar,虽然不能自动生成ID但以后为表扩容的话基本没有问题了(这个所指的问题是“数据能否均分到指定的各个表上”,不需要数据迁移,也不会有热点问题,照官方的说法id是全网唯一?……数量最大9?……这个今天加9个明天加9个也是可以的,但真到了这个地步应该不是我现在要考虑的问题),但还是要改一下properties.xml配置文件(就是xxx.actual-data-nodes=db0.xxx_$->{0…2}这里,现在没时间了以后等sharing再成熟些可以考虑通过redis更新配置)。

记录在此,也许哪天工作中会用到呢……

对了,还要吐草下……自定义算法里面得到的参数对象太少了太少了太少了……少到不方便判断是更新还是查询操作。

对了,还要感谢下sharingsphere项目团队,真是一个好东东啊,有什么问题也可以在项目github那里提,秒回答,再次感谢。

  • 7
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Sharding-JDBC 是一款基于 JDBC 的分布式数据库中间件,它提供了分库分表、读写分离、分布式事务等功能。而自定义分表策略就是 Sharding-JDBC 中非常重要的一部分。 下面我来介绍一下如何自定义分表策略以及实战实例: 1. 自定义分表策略 Sharding-JDBC 自带了一些默认的分表策略,例如按照取模分表、按照日期分表等。但如果默认分表策略不能满足我们的需求,我们可以自定义分表策略。 自定义分表策略需要实现 `PreciseShardingAlgorithm` 接口,该接口包含两个方法: - `doSharding(Collection<String> availableTargetNames, PreciseShardingValue shardingValue)`:根据分片键和可用的分片数据源名称集合进行分片计算,返回分片后的数据源名称。 - `getType()`:返回分片算法名称。 下面是一个自定义按照用户 ID 分表分表策略: ```java public class UserIdShardingAlgorithm implements PreciseShardingAlgorithm<Long> { @Override public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) { for (String tableName : availableTargetNames) { if (tableName.endsWith(shardingValue.getValue() % 10)) { return tableName; } } throw new UnsupportedOperationException(); } @Override public String getType() { return "USER_ID"; } } ``` 2. 实战实例 下面是一个使用 Sharding-JDBC 的 Spring Boot 实战示例,该示例演示了如何使用自定义分表策略按照用户 ID 分表: 1. 引入依赖: ```xml <dependency> <groupId>org.apache.shardingsphere</groupId> <artifactId>sharding-jdbc-spring-boot-starter</artifactId> <version>${sharding-jdbc.version}</version> </dependency> ``` 2. 编写配置文件: ```yaml spring: shardingsphere: datasource: names: ds0, ds1 ds0: driver-class-name: com.mysql.cj.jdbc.Driver url: jdbc:mysql://localhost:3306/db0?useSSL=false&serverTimezone=GMT%2B8&useUnicode=true&characterEncoding=UTF-8&allowPublicKeyRetrieval=true username: root password: root ds1: driver-class-name: com.mysql.cj.jdbc.Driver url: jdbc:mysql://localhost:3306/db1?useSSL=false&serverTimezone=GMT%2B8&useUnicode=true&characterEncoding=UTF-8&allowPublicKeyRetrieval=true username: root password: root sharding: tables: user: actual-data-nodes: ds$->{0..1}.user_$->{0..9} table-strategy: inline: sharding-column: user_id algorithm-expression: user_$->{user_id % 10} key-generator: column: user_id type: SNOWFLAKE default-database-strategy: inline: sharding-column: user_id algorithm-expression: ds$->{user_id % 2} props: sql.show: true ``` 3. 编写自定义分表策略: ```java public class UserIdShardingAlgorithm implements PreciseShardingAlgorithm<Long> { @Override public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) { for (String tableName : availableTargetNames) { if (tableName.endsWith(shardingValue.getValue() % 10)) { return tableName; } } throw new UnsupportedOperationException(); } @Override public String getType() { return "USER_ID"; } } ``` 4. 在 Spring Boot 中配置自定义分表策略: ```java @Configuration public class ShardingConfig { @Autowired private DataSource dataSource; @Bean public ShardingRule shardingRule() { TableRule userTableRule = TableRule.builder("user") .actualTables(Arrays.asList("user_0", "user_1", "user_2", "user_3", "user_4", "user_5", "user_6", "user_7", "user_8", "user_9")) .dataSourceRule(dataSourceRule()) .tableShardingStrategy(new TableShardingStrategy("user_id", new UserIdShardingAlgorithm())) .keyGenerator(new KeyGeneratorConfiguration("SNOWFLAKE", "user_id")) .build(); return ShardingRule.builder() .dataSourceRule(dataSourceRule()) .tableRules(Arrays.asList(userTableRule)) .databaseShardingStrategy(new DatabaseShardingStrategy("user_id", new InlineShardingStrategy("user_id", "ds$->{user_id % 2}"))) .build(); } @Bean public DataSourceRule dataSourceRule() { Map<String, DataSource> dataSourceMap = new HashMap<>(); dataSourceMap.put("ds0", createDataSource("ds0")); dataSourceMap.put("ds1", createDataSource("ds1")); return new DataSourceRule(dataSourceMap); } private DataSource createDataSource(final String dataSourceName) { DruidDataSource result = new DruidDataSource(); result.setDriverClassName("com.mysql.cj.jdbc.Driver"); result.setUrl(String.format("jdbc:mysql://localhost:3306/%s?useSSL=false&serverTimezone=GMT%%2B8&useUnicode=true&characterEncoding=UTF-8&allowPublicKeyRetrieval=true", dataSourceName)); result.setUsername("root"); result.setPassword("root"); return result; } @Bean public DataSource dataSource() throws SQLException { return new ShardingDataSource(shardingRule()); } } ``` 在上面的示例中,我们使用了自定义的按照用户 ID 分表分表策略。在 `ShardingConfig` 类中,我们使用 `DataSourceRule` 和 `ShardingRule` 配置数据源和分片规则,并且使用自定义分表策略和分库策略。在 `application.yaml` 文件中,我们配置了数据源和表的分片规则以及自定义分表策略。 以上就是关于如何自定义分表策略以及实战实例的介绍。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值