场景:分表不分库,按月份分表
1.导包
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>sharding-jdbc-spring-boot-starter</artifactId>
<version>4.1.1</version>
</dependency>
2.编写配置文件
spring:
main:
allow-bean-definition-overriding: true
shardingsphere:
# 参数配置,显示sql
props:
sql:
show: false
# 配置数据源
datasource:
# 给每个数据源取别名,下面的ds0,ds1任意取名字
names: ds0
# 给master-ds1每个数据源配置数据库连接信息
ds0:
# 配置druid数据源
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://127.0.0.1:3306/cloud?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8
username: root
password: 123456
# 下面为连接池的补充设置,应用到上面所有数据源中
# 初始化大小,最小,最大
initial-size: 5
# 最小连接池数量
min-idle: 5
# 最大连接池数量
max-active: 300
# 配置获取连接等待超时的时间
max-wait: 5000
#申请连接的时候检测,如果空闲时间大于timeBetweenEvictionRunsMillis,执行validationQuery检测连接是否有效。
test-while-idle: true
#既作为检测的间隔时间又作为testWhileIdel执行的依据
time-between-eviction-runs-millis: 60000
#销毁线程时检测当前连接的最后活动时间和当前时间差大于该值时,关闭当前连接
min-evictable-idle-time-millis: 30000
#用来检测连接是否有效的sql 必须是一个查询语句
#mssql中为 select 1
#mysql中为 select 'x'
#oracle中为 select 1 from dual
validation-query: select 1
#申请连接时会执行validationQuery检测连接是否有效,开启会降低性能,默认为true
test-on-borrow: false
#归还连接时会执行validationQuery检测连接是否有效,开启会降低性能,默认为true
test-on-return: false
#当数据库抛出不可恢复的异常时,抛弃该连接
#exception-sorter: true
#是否缓存preparedStatement,mysql5.5+建议开启
pool-prepared-statements: true
#当值大于0时poolPreparedStatements会自动修改为true
max-pool-prepared-statement-per-connection-size: 20
#配置扩展插件
#配置监控统计拦截的filters,去掉后监控界面sql无法统计,'wall'用于防火墙
filters: stat,wall,slf4j
#通过connectProperties属性来打开mergeSql功能;慢SQL记录
connection-properties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
#合并多个DruidDataSource的监控数据
use-global-data-source-stat: true
#设置访问druid监控页(访问地址http://localhost:port/druid/login.html)的账号和密码,默认没有
stat-view-servlet:
login-username: admin
login-password: admin
enabled: true
#开启web filter
web-stat-filter:
enabled: true
# 配置默认数据源ds0
sharding:
# 默认数据源
default-data-source-name: ds0
# 配置分表的规则
tables:
# 逻辑表名(需要分表的表)
septic_tank:
# 数据节点:数据源$->{0..N}.逻辑表名$->{0..N}
actual-data-nodes: ds0.septic_tank_$->{2022}${(1..12).collect{t ->t.toString().padLeft(2,'0')} }
key-generator:
column: id
type: SNOWFLAKE #雪花id
# 分表策略,同分库策略
table-strategy:
standard:
shardingColumn: create_time
precise-algorithm-class-name: com.central.internet.config.shardingsphere.ModuloShardingDatabaseAlgorithm
range-algorithm-class-name: com.central.internet.config.shardingsphere.StatTimeRangeShardingAlgorithm
3.自定义分表规则
ModuloShardingDatabaseAlgorithm
import lombok.extern.slf4j.Slf4j;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingValue;
import org.springframework.stereotype.Component;
import java.time.LocalDateTime;
import java.util.Collection;
@Slf4j
@Component
public class ModuloShardingDatabaseAlgorithm implements PreciseShardingAlgorithm<String>{
private int ModeID=1000;//默认取值范围。如果只设置了>20000没有设置 最大值,此时默认设置一个最大值:最小值+1000。反之设置了最大值,没有设置最小值,就是最大值-1000
private static final String SPLITTER = "_";
@Override
public String doSharding(Collection availableTargetNames, PreciseShardingValue shardingValue) {
String month = "";
int monthValue = LocalDateTime.now().getMonth().getValue();
if (monthValue < 10) {
month = "0" + monthValue;
} else {
month = monthValue + "";
}
String tbName = shardingValue.getLogicTableName() + SPLITTER + LocalDateTime.now().getYear() + month;
log.info("shardingsphere表名:{}", tbName);
return tbName;
}
}
StatTimeRangeShardingAlgorithm
package com.central.internet.config.shardingsphere;
import org.apache.commons.compress.utils.Lists;
import org.apache.shardingsphere.api.sharding.standard.RangeShardingAlgorithm;
import org.apache.shardingsphere.api.sharding.standard.RangeShardingValue;
import org.springframework.stereotype.Component;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.Date;
import java.time.format.DateTimeFormatter;
import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
/**
* @description:
* @author: wangql
* @create: 2022-04-01 10:03
**/
@Component
public class StatTimeRangeShardingAlgorithm implements RangeShardingAlgorithm<String> {
/**
* 不支持between and处理
* @author: wangql
* @create: 2022-04-01 10:03
**/
@Override
public Collection<String> doSharding(Collection<String> tableNames, RangeShardingValue<String> rangeShardingValue) {
Set<String> result = new LinkedHashSet<>();
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
// between and 的起始值
LocalDateTime lower = LocalDateTime.parse(rangeShardingValue.getValueRange().lowerEndpoint(),formatter);
LocalDateTime upper = LocalDateTime.parse(rangeShardingValue.getValueRange().upperEndpoint(),formatter);
//获取时间列表
//补充说明:其实tableNames中的值就是配置文件中的actual-data-nodes中配置的所有表,此处返回的是要进行查询的表
List<String> timeList = packDateStrListByInterval(lower.toLocalDate(),upper.toLocalDate());
// 循环范围计算分表逻辑
for(String time : timeList){
for(String tableName : tableNames){
if(tableName.endsWith(time)){
result.add(tableName);
}
}
}
return result;
}
private List<String> packDateStrListByInterval(LocalDate startDate, LocalDate endDate) {
List<String> list = Lists.newArrayList();
if (endDate.isBefore(startDate)) {
return list;
}
LocalDate tmp = startDate;
list.add(tmp.format(DateTimeFormatter.ofPattern("yyyyMM")));
while (true) {
tmp = tmp.plusMonths(1);
if (endDate.isBefore(tmp)) {
break;
}
list.add(tmp.format(DateTimeFormatter.ofPattern("yyyyMM")));
}
return list;
}
}