写在前面:
sharding-jdbc适用于对简单业务的分表处理,比如日志服务。
在sql中一定要带上分表键!!!
在sql中一定要带上分表键!!!
在sql中一定要带上分表键!!!
引入依赖
4.1.1版本使用的是原生的druid依赖,所以需要替换掉spring-boot封装的druid依赖
<!--sharding-jdbc-->
<!-- for spring boot -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>sharding-jdbc-spring-boot-starter</artifactId>
<version>4.1.1</version>
</dependency>
<!-- for spring namespace -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>sharding-jdbc-spring-namespace</artifactId>
<version>4.1.1</version>
</dependency>
<!-- <dependency>-->
<!-- <groupId>com.alibaba</groupId>-->
<!-- <artifactId>druid-spring-boot-starter</artifactId>-->
<!-- <version>1.1.21</version>-->
<!-- </dependency>-->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.21</version>
</dependency>
yml文件
spring:
shardingsphere:
datasource:
names: ds0 --定义数据库别名
ds0: --配置数据库
type: com.alibaba.druid.pool.DruidDataSource
driverClassName: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/ws_relayer?serverTimezone=GMT%2B0&useUnicode=true&characterEncoding=utf-8
username: root
password: 123456
# 数据源其他配置
initialSize: 5
minIdle: 5
maxActive: 20
maxWait: 60000
timeBetweenEvictionRunsMillis: 60000
minEvictableIdleTimeMillis: 300000
validationQuery: SELECT 1 FROM DUAL
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
poolPreparedStatements: true
sharding:
tables:
mpc_ws_relayer_message:
actual-data-nodes: ds0.message_${0..100} --表名规则
# actual-data-nodes: ds0.message_$->{Calendar.getInstance().get(Calendar.YEAR)}$->{(1..12).collect{t ->t.toString().padLeft(2,'0')}} --按月分表
key-generator: --配置主键自动生成
column: id
props:
worker:
id: ${workerId} --这里是为了解决雪花算法多机部署时id重复的问题,需要搭配代码的配置使用
type: SNOWFLAKE --使用雪花算法
table-strategy:
standard:
sharding-column: gmt_create --定义分表键,这里选择了按创建时间分表
precise-algorithm-className: com.***.strategy.sharding.DatePreciseShardingAlgorithm --自己定义精确分表逻辑,适用于sql中使用=的情况
range-algorithm-className: com.***.strategy.sharding.DateRangeShardingAlgorithm --自己定义范围分表逻辑,适用于sql中使用between,>,<的情况
分表算法
精确分表
需要实现org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm
public class DatePreciseShardingAlgorithm implements PreciseShardingAlgorithm<LocalDateTime> {
//collection是sharding-jdbc根据yml中配置的表名规则生成的表名的集合
//preciseShardingValue是传递过来的分表键的值,比如:gmt_create='2022-10-01 18:00:00'
@SneakyThrows
@Override
public String doSharding(Collection<String> collection, PreciseShardingValue<LocalDateTime> preciseShardingValue) {
LocalDateTime localDateTime = preciseShardingValue.getValue();
if (localDateTime == null){
localDateTime = DateTimeUtils.now();
}
//思想:当前时间减去开始分片时间,结果除以分片间隔取模,值作为表名后缀
LocalDateTime startTime = LocalDateTime.parse(ShardingJdbcConstants.startTime, DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
if (localDateTime.isBefore(startTime)){
//查询时间早于开始时间的,都去默认表中查找
return Constants.MESSAGE_TABLE_NAME;
}
long seconds = Duration.between(startTime, localDateTime).getSeconds();
long result = seconds / 7200L;
String tableName = Constants.MESSAGE_TABLE_NAME + "_" + result;
if (collection.contains(tableName)){
return tableName;
}else {
throw new SQLException("The table is not exists,tableName:{}",tableName);
}
}
}
范围分表
需要实现org.apache.shardingsphere.api.sharding.standard.RangeShardingAlgorithm
/*
* 二分查找
* arr输入数组
* target目标值
* left二分左边界
* right二分右侧边界
*/
public static int binarySearch(List<String> arr, String target , int left , int right) {
String regex = Constants.MESSAGE_TABLE_NAME + "_";
long targetL = Long.parseLong(target.split(regex)[1]);
if(Long.parseLong(arr.get(left).split(regex)[1]) > targetL
|| Long.parseLong(arr.get(right).split(regex)[1]) < targetL){
return -1;
}
//找出中间的值的下标
int mid = (left + right)/2;
//判断目标值在中间值的左侧还是右侧
if(Long.parseLong(arr.get(mid).split(regex)[1]) > targetL) {
//递归左侧
return binarySearch(arr,target , left , mid-1);
}else if(Long.parseLong(arr.get(mid).split(regex)[1]) < targetL) {
//递归右侧
return binarySearch(arr,target, mid+1 , right);
}else {
//目标值与中点值相等
return mid;
}
}
//RangeShardingValue包括值上限和下限,这里因为业务上的原因,并没有用到
//rangeShardingValue.getValueRange().upperEndpoint(); 对应sql中的gmt_create<${upperEndpoint}
//rangeShardingValue.getValueRange().lowerEndpoint(); 对应sql中的gmt_create>${lowerEndpoint}
@SneakyThrows
@Override
public Collection<String> doSharding(Collection<String> collection, RangeShardingValue<LocalDateTime> rangeShardingValue) {
List<String> list = new LinkedList<>();
LocalDateTime startTime = LocalDateTime.parse(ShardingJdbcConstants.startTime, DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"));
long cur = Duration.between(startTime, DateTimeUtils.now()).getSeconds() / ShardingJdbcConstants.interval;
if (cur < 0){
list.add(Constants.MESSAGE_TABLE_NAME);
return list;
}
String curTableName = Constants.MESSAGE_TABLE_NAME + "_" + cur;
if (!collection.contains(curTableName)){
throw new SQLException("The table is not exists,tableName:"+curTableName);
}
List<String> collect = new ArrayList<>(collection);
//查当前表和上一张表
int index = binarySearch(collect, curTableName, 0, collect.size() - 1);
if (index == -1){
throw new SQLException("The table is not exists,tableName:"+curTableName);
}
while(index > -1 && list.size() < ShardingJdbcConstants.tableCount){
list.add(collect.get(index));
index--;
}
//返回的数据量仍小于需要查询的表的数量时,把初始表也加进去
if (list.size() < ShardingJdbcConstants.tableCount){
list.add(Constants.MESSAGE_TABLE_NAME);
}
return list;
}
踩坑记录
雪花算法
多机部署时需要配置不同的机器id,减少高并发情况下不同机器生成同一个id的概率。也可以在部署的时候去手动指定机器id。即使如此,仍然有可能生成重复的id,所以在insert的时候需要做好充分的预案,防止insert失败,数据丢失。
@Configuration
public class SnowFlakeWorkerIdConfig {
/**
* 动态指定sharding jdbc 的雪花算法中的属性work.id属性
* 通过调用System.setProperty()的方式实现,可用容器的 id 或者机器标识位
* workId最大值 1L << 100,就是1024,即 0<= workId < 1024
* {@link SnowflakeShardingKeyGenerator#getWorkerId()}
*/
static {
Random random = new Random();
int workId = random.nextInt(1024);
System.setProperty("workerId", workId + "");
}
}
LocalDateTime
sharding-jdbc4.1.1版本不兼容LocalDateTime
@Component
//定义转换器支持的JAVA类型
@MappedTypes(LocalDateTime.class)
//定义转换器支持的数据库类型
@MappedJdbcTypes(value = JdbcType.TIMESTAMP, includeNullJdbcType = true)
public class LocalDateTimeTypeHandle extends BaseTypeHandler<LocalDateTime> {
private final DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
@Override
public void setNonNullParameter(PreparedStatement ps, int i, LocalDateTime parameter, JdbcType jdbcType)
throws SQLException {
ps.setObject(i, parameter);
}
@Override
public LocalDateTime getNullableResult(ResultSet rs, String columnName) throws SQLException {
String target = rs.getString(columnName);
if (StringUtil.isEmpty(target)) {
return null;
}
return LocalDateTime.parse(target, dateTimeFormatter);
}
@Override
public LocalDateTime getNullableResult(ResultSet rs, int columnIndex) throws SQLException {
String target = rs.getString(columnIndex);
if (StringUtil.isEmpty(target)) {
return null;
}
return LocalDateTime.parse(target, dateTimeFormatter);
}
@Override
public LocalDateTime getNullableResult(CallableStatement cs, int columnIndex) throws SQLException {
String target = cs.getString(columnIndex);
if (StringUtil.isEmpty(target)) {
return null;
}
return LocalDateTime.parse(target, dateTimeFormatter);
}
}