新年发个sharingsphere可扩容的自定义分表
直接上代码……哈哈
先引入jar包
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
<version>5.0.0</version>
</dependency>
properties.xml配置
## 配置第 1 个数据源 注意数据源名称(hikari使用spring.*.jdbc-url)
spring.shardingsphere.datasource.db0.type=com.zaxxer.hikari.HikariDataSource
spring.shardingsphere.datasource.db0.jdbc-url=jdbc:mysql://localhost:3306/db0?useUnicode=true&serverTimezone=Asia/Shanghai&characterEncoding=UTF-8&autoReconnect=true
#spring.shardingsphere.datasource.db0.type=com.zaxxer.hikari.HikariDataSource
#spring.shardingsphere.datasource.db0.url=jdbc:mysql://localhost:3306/db_chat?useUnicode=true&serverTimezone=Asia/Shanghai&characterEncoding=UTF-8&autoReconnect=true&failOverReadOnly=false&maxReconnects=15000
spring.shardingsphere.datasource.db0.driver-class-name=com.mysql.cj.jdbc.Driver
spring.shardingsphere.datasource.db0.username=root
spring.shardingsphere.datasource.db0.password=
spring.shardingsphere.datasource.db0.maxPoolSize=50
spring.shardingsphere.datasource.db0.minPoolSize=50
spring.shardingsphere.datasource.db0.maintenanceIntervalMilliseconds=30000
spring.shardingsphere.datasource.db0.connectionTimeoutMilliseconds=30000
spring.shardingsphere.datasource.db0.idleTimeoutMilliseconds=60000
spring.shardingsphere.datasource.db0.maxLifetimeMilliseconds=1800000
##分布式序列配置(该项为必需,不是很友好)
# 分布式序列算法配置
spring.shardingsphere.rules.sharding.key-generators.snowflake.type=SNOWFLAKE
spring.shardingsphere.rules.sharding.key-generators.snowflake.props.worker-id=123
# 打印解释后的SQL语句
#spring.shardingsphere.props.sql-show=true
# 格式化SQL语句
spring.shardingsphere.props.sql-simple=true
# sharding jdbc 需要重新注入数据源,覆盖原本注入的数据源
spring.main.allow-bean-definition-overriding=true
# 分片规则配置
# chat_log分表策略
# 表达式 `ds_$->{0..1}`枚举的数据源为读写分离配置的逻辑数据源名称
spring.shardingsphere.rules.sharding.tables.chat_log.actual-data-nodes=db0.chat_log_$->{0..7}
# 用于单分片键的标准分片场景
spring.shardingsphere.rules.sharding.tables.chat_log.table-strategy.standard.sharding-column=chat_id
spring.shardingsphere.rules.sharding.tables.chat_log.table-strategy.standard.sharding-algorithm-name=chatlog-inline
# 分布式序列策略配置
spring.shardingsphere.rules.sharding.tables.chat_log.key-generate-strategy.column=chat_id
spring.shardingsphere.rules.sharding.tables.chat_log.key-generate-strategy.key-generator-name=snowflake
## 分片算法配置
#spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.type=INLINE
#spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.props.algorithm-expression=chat_log_$->{chat_id % 8}
# 自定义分片算法
spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.type=CLASS_BASED
spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.props.strategy=STANDARD
spring.shardingsphere.rules.sharding.sharding-algorithms.chatlog-inline.props.algorithmClassName=com.xxx.config.sharingshpere.TableShardingAlgorithm
自定义算法类
package com.xxx.config.sharingshpere;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.shardingsphere.sharding.api.sharding.standard.PreciseShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.RangeShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.StandardShardingAlgorithm;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.alibaba.fastjson.JSONObject;
import com.xxx.config.ApplicationContextHolder;
import com.xxx.constant.ShardingConstant;
import com.google.common.collect.Range;
import com.xclj.common.redis.RedisClientTemplate;
/**
* 自定义水平分片
* @author xxx
* 2022年1月4日 上午8:13:03
*/
//@Component
public class TableShardingAlgorithm implements StandardShardingAlgorithm<Long> {
private static Logger logger = LoggerFactory.getLogger(TableShardingAlgorithm.class);
RedisClientTemplate redisClientTemplate;
public String getType() {
// 分片类型
return ShardingConstant.CLASS_BASED;
}
public void init() {
//
redisClientTemplate = ApplicationContextHolder.getBean("redisClientTemplate");
}
/**
* 精准分片算法(自动生成ID插入、=或in根据ID查询)
*/
public String doSharding(Collection<String> tableNames, PreciseShardingValue<Long> shardingValue) {
//logger.info("精准分片算法--{}", JSONArray.toJSONString(tableNames));
//logger.info("精准分片算法--{}", JSONObject.toJSONString(shardingValue));
// 取值后4位并取整作为表名后缀判断
String tbSuffix = checkTbSuffix(shardingValue.getValue());
for (String tableN : tableNames) {
if (tableN.endsWith(tbSuffix)) {
logger.info("更新表{}--{}", tableN, JSONObject.toJSONString(shardingValue));
return tableN;
}
}
// 匹配不到则取最后一个表名
//return Lists.newArrayList(tableNames).get(tableNames.size()-1);
return null;
}
/**
* 取后缀
*/
private String checkTbSuffix(final Comparable<?> value) {
String valStr = value+"";
valStr = valStr.substring(valStr.length()-4);
String tbSuffix = Integer.parseInt(valStr) + "";
return tbSuffix;
}
/**
* 范围分片算法(between and或> and <)
*/
public Collection<String> doSharding(Collection<String> tableNames, RangeShardingValue<Long> shardingValue) {
// logger.info("范围分片算法--{}", JSONObject.toJSONString(shardingValue));
Collection<String> collect = new ArrayList<>();
Range<Long> valueRange = shardingValue.getValueRange();
// logger.info("valueRange:{}", JSONObject.toJSONString(valueRange));
// logger.info("最小值lowerEndpoint:{}", valueRange.lowerEndpoint());
// logger.info("最大值upperEndpoint:{}", valueRange.upperEndpoint());
String [] tbSuffixs = backTableSuffix(valueRange.lowerEndpoint(), valueRange.upperEndpoint());
for (String tableN : tableNames) {
for (String tbSuffix : tbSuffixs) {
if (tableN.endsWith(tbSuffix)) {
collect.add(tableN);
}
}
}
logger.info("collect:{}", JSONObject.toJSONString(collect));
return collect;
}
private String [] backTableSuffix(final Comparable<?> lowerEndpoint, final Comparable<?> upperEndpoint) {
int startTbSuffix = Integer.parseInt(checkTbSuffix(lowerEndpoint));
int endSuTbffix = Integer.parseInt(checkTbSuffix(upperEndpoint));
String [] aTables = new String[endSuTbffix - startTbSuffix + 1];
for (int i = 0,j=startTbSuffix; j <= endSuTbffix; i++,j++) {
aTables[i] = j+"";
}
return aTables;
}
}
取自动生成id工具类(雪花算法+4位后缀)
package com.xxx.config.sharingshpere;
import java.util.ArrayList;
import java.util.List;
import org.apache.shardingsphere.sharding.algorithm.keygen.SnowflakeKeyGenerateAlgorithm;
import com.alibaba.fastjson.JSONObject;
import com.xxx.constant.RedisConstant;
import com.google.common.collect.Lists;
import com.xclj.common.redis.RedisClientTemplate;
/**
* 雪花算法ID获取
* @author xxx
* 2022年1月5日 上午11:27:34
*/
public class SnowFlakeUtils {
private static int BIT_NUM = 4;
static SnowflakeKeyGenerateAlgorithm sn;
// 新表数范围(新增操作时表数量只能属于其中之一,大于9按9算,小于9但不匹配的按小一个算,如指定8个表,则根据tableNames取最后的7个表)
private static List<Integer> splitList = Lists.newArrayList(new Integer[]{2, 3, 5, 6, 7, 9});
static {
sn = new SnowflakeKeyGenerateAlgorithm();
}
public static Comparable<?> getId() {
return getId(new Long(1020));
}
public static Comparable<?> getId(Long workerId) {
return sn.generateKey();
}
/**
* endSuTbffix-startSuffix的大小只能为2, 3, 5, 6, 7, 9其中之一,
* 大于9按9算,小于9但不匹配的按小一个算,如指定8个表,则取7个表)
* <pre>默认取尾数四位取整判断表名</pre>
*/
public static String getTableId(int startTbSuffix, int endSuTbffix) {
return getTableId(startTbSuffix, endSuTbffix, BIT_NUM);
}
public static String getTableId(int startTbSuffix, int endSuTbffix, int num) {
String [] aTables = "0,1,2,3,4,5,6,7,8".split(",");
if ((endSuTbffix > startTbSuffix) && (endSuTbffix - startTbSuffix) <= 9) {
aTables = new String[endSuTbffix - startTbSuffix + 1];
for (int i = 0,j=startTbSuffix; j <= endSuTbffix; i++,j++) {
aTables[i] = j+"";
}
}
List<String> tableNames = new ArrayList<>();
for (String aSuffix : aTables) {
tableNames.add("tb_"+aSuffix);
}
return getTableId(tableNames, aTables, num);
}
/**
* redis的key为“split_tb_config_key”,保存为map类型("tableSuffix"为tableName对象属性)
*/
public static String getTableIdByRedis(String tableName, RedisClientTemplate redisClientTemplate) {
String resultStr = redisClientTemplate.getMapValue(RedisConstant.SPLIT_TB_CONFIG_KEY, tableName);
if (resultStr != null) {
JSONObject jsonObject = JSONObject.parseObject(resultStr);
// //logger.info("分表配置信息:{}", resultStr);
//
String [] aTables = jsonObject.getString("tableSuffix").split(",");
List<String> tableNames = new ArrayList<>();
for (String aSuffix : aTables) {
tableNames.add(tableName+"_"+aSuffix);
}
return getTableId(tableNames, aTables, BIT_NUM);
}
return null;
}
/**
* redis的key为“split_tb_config_key”,保存为map类型("tableSuffix"为tableName对象属性)
*/
public static String getTableIdByRedis(List<String> tableNames, String tableName, RedisClientTemplate redisClientTemplate) {
String resultStr = redisClientTemplate.getMapValue(RedisConstant.SPLIT_TB_CONFIG_KEY, tableName);
if (resultStr != null) {
JSONObject jsonObject = JSONObject.parseObject(resultStr);
// //logger.info("分表配置信息:{}", resultStr);
//
String [] aTables = jsonObject.getString("tableSuffix").split(",");
List<String> newTableNames = new ArrayList<>();
for (int i = 0,size=tableNames.size(); i < size; i++) {
newTableNames.add(tableNames.get(i)+"_"+i);
}
return getTableId(newTableNames, aTables, BIT_NUM);
}
return null;
}
/**
* tableSplit为“,”分隔的表后缀
*/
public static String getTableId(String tableSplit) {
if (tableSplit != null && "".equals(tableSplit.trim()) == false) {
// JSONObject jsonObject = JSONObject.parseObject(resultStr);
// //logger.info("分表配置信息:{}", resultStr);
//
// String [] aTables = jsonObject.getString("tableSuffix").split(",");
String [] aTables = tableSplit.split(",");
List<String> newTableNames = new ArrayList<>();
for (String aSuffix : aTables) {
newTableNames.add("tb_"+aSuffix);
}
return getTableId(newTableNames, aTables, BIT_NUM);
}
return null;
}
private static String getTableId(List<String> tableNames, String[] aTables, int num) {
long sn_id = getLongValue(sn.generateKey());
aTables = defaultTableCheck(tableNames, aTables);
//logger.info(JSONArray.toJSONString(aTables));
String table = checkTable(tableNames, aTables, sn_id);
if (table != null) {
//logger.info("更新表{}--{}", table, JSONArray.toJSONString(shardingValue));
String [] temp = table.split("_");
return sn_id + suffixCode(temp[temp.length-1], num);
}
return null;
}
private static String suffixCode(String suffixNum, int num) {
if (num > suffixNum.length()) {
int codeNum = Integer.parseInt(suffixNum);
int num0 = num - suffixNum.length();
StringBuffer sBuffer = new StringBuffer(num);
for (int i = 0; i < num0; i++) {
sBuffer.append("0");
}
return sBuffer.append(codeNum).toString();
}
return suffixNum;
}
/**
* 返回表
*/
private static String checkTable(List<String> listTables, String[] aTables, Long val) {
for (String tableN : listTables) {
for (String activeT : aTables) {
if (tableN.endsWith(activeT)) {
if (tableN.endsWith(checkEndStr(listTables, val, aTables))) {
return tableN;
}
}
}
}
return null;
}
private static String checkEndStr(List<String> listTables, Long val, String [] activeTables) {
// 活动表个数
int count = activeTables.length;
int oCount = Integer.parseInt(activeTables[count-1]) - count + 1;
String result = null;
switch (count) {
case 2:
result = String.valueOf(getLongValue(val) % 2 + oCount);
break;
case 3:
result = String.valueOf(getLongValue(val) % 3 + oCount);
break;
case 5:
result = String.valueOf(getLongValue(val) % 5 + oCount);
break;
case 6:
result = String.valueOf(getLongValue(val) % 6 + oCount);
break;
case 7:
result = String.valueOf(getLongValue(val) % 7 + oCount);
break;
case 9:
result = String.valueOf(getLongValue(val) % 9 + oCount);
break;
default:
break;
}
return result;
}
private static long getLongValue(final Comparable<?> value) {
return Long.parseLong(value.toString());
}
private static String [] defaultTableCheck(List<String> listTables, String[] aTables) {
if (splitList.contains(aTables.length) == false) {
int size = listTables.size();
String [] activeTables = null;
int lastNum = splitList.get(0);
for (int splitNum : splitList) {
if (splitNum >= aTables.length && size >= lastNum) {
activeTables = new String[lastNum];
String [] temp = null;
int num = lastNum-1;
for (int i = size-1; i >= (size-lastNum); i--) {
temp = listTables.get(i).split("_");
activeTables[num] = temp[temp.length-1];
num--;
}
return activeTables;
} else {
lastNum = splitNum;
}
}
// 多于9个的按9算
lastNum = 9;
if (size >= lastNum) {
activeTables = new String[lastNum];
String [] temp = null;
int num = lastNum-1;
for (int i = size-1; i >= (size-lastNum); i--) {
temp = listTables.get(i).split("_");
activeTables[num] = temp[temp.length-1];
num--;
}
return activeTables;
}
}
return aTables;
}
/**
* 取后缀
*/
public static String checkTbSuffix(final Comparable<?> value) {
String valStr = value+"";
valStr = valStr.substring(valStr.length()-4);
String tbSuffix = Integer.parseInt(valStr) + "";
return tbSuffix;
}
/**
* 返回连续的后缀列表
*/
public static String [] backTableSuffix(final Comparable<?> lowerEndpoint, final Comparable<?> upperEndpoint) {
int startTbSuffix = Integer.parseInt(checkTbSuffix(lowerEndpoint));
int endSuTbffix = Integer.parseInt(checkTbSuffix(upperEndpoint));
String [] aTables = new String[endSuTbffix - startTbSuffix + 1];
for (int i = 0,j=startTbSuffix; j <= endSuTbffix; i++,j++) {
aTables[i] = j+"";
}
return aTables;
}
}
再加个test代码吧
ChatLogVO chatLogNew = new ChatLogVO();
chatLogNew.setCompanyCode("S20211219");
chatLogNew.setServer("S80d10cf7");
chatLogNew.setClient("Cfccb060d");
chatLogNew.setType("1");
chatLogNew.setStatus("0");
for (int i = 60; i < 75; i++) {
chatLogNew.setChatId(SnowFlakeUtils.getTableId("0,1,2,3,4"));
chatLogNew.setContent("测试数据插入--"+i);
chatLogNew.setCreateTime(new Date());
int result = chatLogMapper.insertChatLog(chatLogNew);
if (result > 0) {
logger.info("新增记录成功");
} else {
logger.error("新增记录失败");
}
}
原先的想法是可以在数据库中配置好扩容的表,然后通过redis缓存动态更新配置文件。后面考虑到根据ID =、in、between and等操作就把id类型由bigint设为varchar,虽然不能自动生成ID但以后为表扩容的话基本没有问题了(这个所指的问题是“数据能否均分到指定的各个表上”,不需要数据迁移,也不会有热点问题,照官方的说法id是全网唯一?……数量最大9?……这个今天加9个明天加9个也是可以的,但真到了这个地步应该不是我现在要考虑的问题),但还是要改一下properties.xml配置文件(就是xxx.actual-data-nodes=db0.xxx_$->{0…2}这里,现在没时间了以后等sharing再成熟些可以考虑通过redis更新配置)。
记录在此,也许哪天工作中会用到呢……
对了,还要吐草下……自定义算法里面得到的参数对象太少了太少了太少了……少到不方便判断是更新还是查询操作。
对了,还要感谢下sharingsphere项目团队,真是一个好东东啊,有什么问题也可以在项目github那里提,秒回答,再次感谢。