前言
实际开发中,分片规则有可能我们是自定义的,这个时候我们怎么让sharding-proxy读到我们自己分片规则,其实sharding-proxy是做了spi机制的,
允许我们扩展规则的。
这个分片规则是参考之前github给出的分片规则,链接为 https://github.com/fafeidou/fast-cloud-nacos/blob/master/fast-common-examples/fast-common-sharding-simple-example/
有些步骤省略,需查看上篇博客。
搭建步骤
编写规则,打成jar
包
pom依赖
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>sharding-jdbc-core</artifactId>
<version>4.0.0-RC1</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.10</version>
<scope>provided</scope>
</dependency>
自定义分片规则
- ShardingDbUtil,分片策略工具类
public class ShardingDbUtil {
/**
* 根据逻辑库名和index,返回实际的库名
*/
public static String getActualDatabaseName(String logicalDatabaseName, Integer index) {
return logicalDatabaseName + "_" + index;
}
/**
* 根据用户信息和DB个数,计算实际库的编号
*/
public static int getActualDatabaseIndex(Long shardingInfo, int dbNum) {
return (RSHash(shardingInfo.toString())) % dbNum;
}
/**
* 根据逻辑表名和index,返回实际的表名
*/
public static String getActualTableName(String logicalTableName, Integer index) {
return logicalTableName + "_" + index;
}
/**
* 根据用户信息和DB个数,计算实际表的编号
*/
public static int getActualTableIndex(Long shardingInfo, int tableNum) {
return (intHash(shardingInfo.intValue())) % tableNum + 1;
}
public static int RSHash(String str) {
int b = 378551;
int a = 63689;
int hash = 0;
for (int i = 0; i < str.length(); i++) {
hash = hash * a + str.charAt(i);
a = a * b;
}
return Math.abs(hash & 0x7FFFFFFF);
}
public static int intHash(int key) {
key += ~(key << 15);
key ^= (key >>> 10);
key += (key << 3);
key ^= (key >>> 6);
key += ~(key << 11);
key ^= (key >>> 16);
return Math.abs(key);
}
/**
* 拼接分片后的库和表的id,用于存储
*/
public static String getShardingMeta(Long shardingInfo, int dbNum, int tableNum) {
Integer dbIndex = getActualDatabaseIndex(shardingInfo, dbNum);
Integer tableIndex = getActualTableIndex(shardingInfo, tableNum);
return dbIndex.toString() + "," + tableIndex.toString();
}
- 实现
PreciseShardingAlgorithm
类OrderShardingAlgorithm
@Slf4j
public class OrderShardingAlgorithm implements PreciseShardingAlgorithm<Long> {
/**
* 实际数据库/表的模板.
*/
private static final String ACTUAL_TARGET_TEMPLATE = "%s_%d";
private static final Long DATABASE_NUM = 1L;
Integer TABLE_NUM = 2;
/**
* Sharding.
*
* @param availableTargetNames available data sources or tables's names
* @param shardingValue sharding value
* @return sharding result for data source or table's name
*/
@Override
public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) {
if (availableTargetNames.stream().findFirst().get().startsWith(shardingValue.getLogicTableName())) {
// 表路由匹配
int tableIndex = ShardingDbUtil.getActualTableIndex(shardingValue.getValue(), TABLE_NUM);
String targetNode = String.format(ACTUAL_TARGET_TEMPLATE, shardingValue.getLogicTableName(), tableIndex);
log.info("sharding table: {}", targetNode);
return targetNode;
} else {
// 库路由匹配
long databaseIndex = shardingValue.getValue() % DATABASE_NUM + 1;
String targetNode = String
.format(ACTUAL_TARGET_TEMPLATE, "order_db", databaseIndex);
log.info("sharding database: {}", targetNode);
return targetNode;
}
}
}
-
将该项目打成jar包
mvn clean package
-
将jar包丢到proxy的lib目录下
-
修改配置
config-sharding.yml
schemaName: sharding_db # 数据库名字
dataSources:
order_db_1: #数据源别名,多个往下边追加
url: jdbc:mysql://192.168.56.121:33065/order_db_1?useUnicode=true
username: root
password: root
connectionTimeoutMilliseconds: 30000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
shardingRule: #分库分表的规则
tables: # 多个表就往下边追加
t_order: # 逻辑表的名字
actualDataNodes: order_db_1.t_order_$->{1..2} #实际执行的节点
databaseStrategy:
standard:
shardingColumn: order_id #分表字段
preciseAlgorithmClassName: com.algorithm.OrderShardingAlgorithm #分表规则
keyGenerator:
type: SNOWFLAKE #分布式主键类型
column: order_id # 分布式主键
bindingTables:
- t_order # 哪些表需要分库分表
defaultDatabaseStrategy:
none:
defaultTableStrategy:
none:
启动
cd ../bin
./start.sh
tail -f ../logs/stdout.log