Springboot 集成 dynamic-datasource-spring-boot-starter 和 Sharding-jdbc 5.x,实现项目中原有的数据源作为主数据源,并保证分库分表有效
对于Sharding-jdbc 5.x的使用,只使用了分表,分库暂未测试
引入多数据源切换依赖:
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>dynamic-datasource-spring-boot-starter</artifactId>
<version>3.6.1</version>
</dependency>
ShardingJDBC依赖:
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
<version>5.1.1</version>
</dependency>
使用的数据库连接池依赖:
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid-spring-boot-starter</artifactId>
<version>1.2.15</version>
</dependency>
以下相关代码编写基于的配置文件:
mysql: &db-mysql
username: 'xxxx'
password: 'xxxxx'
driverClassName: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/test?serverTimezone=Asia/Shanghai&characterEncoding=utf8&useUnicode=true&useSSL=false&autoReconnect=true&zeroDateTimeBehavior=convertToNull&allowMultiQueries=true&nullCatalogMeansCurrent=true
spring:
datasource:
<<: *db-mysql
shardingsphere:
mode:
type: Memory
props:
# 打开sql输出日志
sql-show: true
datasource:
names: test
as:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/test?serverTimezone=Asia/Shanghai&characterEncoding=utf8&useUnicode=true&useSSL=false&autoReconnect=true&zeroDateTimeBehavior=convertToNull&allowMultiQueries=true&nullCatalogMeansCurrent=true
username: root
password: 123456
rules:
sharding:
tables:
# 支付订单表
test_order:
actual-data-nodes: test.test_order_$->{2023..2024}
table-strategy:
# 标准分片算法
standard:
# 分片列名称
sharding-column: created_time
# 分片算法名称
sharding-algorithm-name: test-order-inline
key-generate-strategy:
# 分布式序列列名称
column: id
# 分布式序列算法名称
key-generator-name: snowflake-id-key-generator
# 分片算法配置
sharding-algorithms:
# 分片算法名
# 支付订单表
test-order-inline:
# 使用的自定义标准分片算法,具体请看下面
type: CUSTOM_YEAR_STANDARD
props:
# 根据创建时间的年限进行分表处理
algorithm-expression: test_order_$->{created_time.toString().substring(0, 4)}
# 分布式序列算法配置
key-generators:
# 分布式序列名
snowflake-id-key-generator:
# 分布式序列算法类型 雪花算法
type: SNOWFLAKE
# 分布式序列算法属性配置
# props:
动态配置类(重要
):
@Configuration
@EnableConfigurationProperties({DynamicDataSourceProperties.class})
@AutoConfigureBefore({DynamicDataSourceAutoConfiguration.class, SpringBootConfiguration.class})
public class DataSourceConfiguration {
private Logger logger = LoggerFactory.getLogger(this.getClass());
/**
* 动态数据源配置项
*/
@Autowired
private DynamicDataSourceProperties properties;
@Autowired
private DataSourceProperties dataSourceProperties;
/**
* shardingjdbc有四种数据源,需要根据业务注入不同的数据源
*
* <p>1. 未使用分片, 脱敏的名称(默认): shardingDataSource;
* <p>2. 主从数据源: masterSlaveDataSource;
* <p>3. 脱敏数据源:encryptDataSource;
* <p>4. 影子数据源:shadowDataSource
*
*/
@Lazy
@Resource(name = "shardingSphereDataSource")
private AbstractDataSourceAdapter shardingDataSource;
@Bean
public DynamicDataSourceProvider dynamicDataSourceProvider() {
//默认使用spring提供的数据源信息来创建默认数据源
DataSourceProperty masterProperty = new DataSourceProperty();
masterProperty.setType(dataSourceProperties.getType())
.setDriverClassName(dataSourceProperties.getDriverClassName())
.setUrl(dataSourceProperties.getUrl())
.setUsername(dataSourceProperties.getUsername()).setPassword(dataSourceProperties.getPassword());
//多数据源的数据源信息配置是否存在,如果存在则保存
Map<String, DataSourceProperty> datasource = properties.getDatasource();
if(!datasource.isEmpty()){
//如果存在指定了默认数据源,则替换spring的数据源信息
DataSourceProperty dynamicMasterProperty = datasource.get(properties.getPrimary());
if(dynamicMasterProperty != null){
masterProperty.setType(dynamicMasterProperty.getType())
.setDriverClassName(dynamicMasterProperty.getDriverClassName())
.setUrl(dynamicMasterProperty.getUrl()).setUsername(dynamicMasterProperty.getUsername())
.setPassword(dynamicMasterProperty.getPassword());
}
}
return new ShardingDataSourceProvider(masterProperty,properties,shardingDataSource);
}
/**
* 将动态数据源设置为首选的
* 当spring存在多个数据源时, 自动注入的是首选的对象
* 设置为主要的数据源之后,就可以支持shardingjdbc原生的配置方式了
* @return
*/
@Primary
@Bean
public DataSource dataSource() {
DynamicRoutingDataSource dataSource = new DynamicRoutingDataSource();
dataSource.setPrimary(properties.getPrimary());
dataSource.setStrict(properties.getStrict());
dataSource.setStrategy(properties.getStrategy());
dataSource.setP6spy(properties.getP6spy());
dataSource.setSeata(properties.getSeata());
return dataSource;
}
}
继承AbstractDataSourceProvider
类,实现loadDataSources
方法
@AllArgsConstructor
public class ShardingDataSourceProvider extends AbstractDataSourceProvider {
private final DataSourceProperty masterProperty;
private final DynamicDataSourceProperties dynamicDataSourceProperties;
private final AbstractDataSourceAdapter shardingSphereDataSource;
/**
* 分表数据源名称
*/
private static final String SHARDING_DATA_SOURCE_NAME = "test";
@Override
public Map<String, DataSource> loadDataSources() {
Map<String, DataSourceProperty> map = new HashMap(16);
masterProperty.setDruid(this.dynamicDataSourceProperties.getDruid());
map.put(this.dynamicDataSourceProperties.getPrimary(), masterProperty);
Map<String, DataSourceProperty> datasource = this.dynamicDataSourceProperties.getDatasource();
if (!datasource.isEmpty()) {
map.putAll(datasource);
}
Map<String, DataSource> dataSourceMap = this.createDataSourceMap(map);
dataSourceMap.put(this.SHARDING_DATA_SOURCE_NAME, this.shardingSphereDataSource);
return dataSourceMap;
}
}
自定义标准分片算法类:
/**
* @description 自定义标准分片算法 根据年份分表 针对 单分片键,且只针对表,针对数据库未测试是否适配
* 参考:
* org.apache.shardingsphere.sharding.algorithm.sharding.inline.InlineShardingAlgorithm
* @Author hellfs
* create by 2024/2/27 10:59
*/
public class CustomYEARShardingAlgorithm implements StandardShardingAlgorithm<LocalDateTime> {
private Logger logger = LoggerFactory.getLogger(CustomShardingAlgorithm.class);
private DateTimeFormatter dtf = DateTimeFormatter.ofPattern("yyyy");
private String algorithmExpression;
@Getter
@Setter
private Properties props = new Properties();
@Override
public void init() {
this.algorithmExpression = this.getAlgorithmExpression();
logger.info("[自定义标准分片算法-初始化]处理成功,配置信息:\n\nalgorithm-expression:{}\n",algorithmExpression);
}
private String getAlgorithmExpression() {
String expression = this.props.getProperty("algorithm-expression");
Preconditions.checkState(null != expression && !expression.isEmpty(), "Custom standard sharding standard algorithm expression cannot be null or empty.");
return InlineExpressionParser.handlePlaceHolder(expression.trim());
}
@Override
public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<LocalDateTime> preciseShardingValue) {
String columnName = preciseShardingValue.getColumnName();
LocalDateTime value = preciseShardingValue.getValue();
String logicTableName = preciseShardingValue.getLogicTableName();
//标准算法
return this.doSharding(columnName,value);
}
@Override
public Collection<String> doSharding(Collection<String> availableTargetNames, RangeShardingValue<LocalDateTime> rangeShardingValue) {
String columnName = rangeShardingValue.getColumnName();
String logicTableName = rangeShardingValue.getLogicTableName();
Range<LocalDateTime> valueRange = rangeShardingValue.getValueRange();
//最小值
LocalDateTime lowerEndpoint = valueRange.lowerEndpoint();
int lowerYear = lowerEndpoint.getYear();
//最大值
LocalDateTime upperEndpoint = valueRange.upperEndpoint();
int upperYear = upperEndpoint.getYear();
if(lowerYear == upperYear){
String availableTargetName = this.doSharding(columnName,lowerEndpoint);
//校验子表名是否存在 设定的子表名中,如果不存在,则走全表
if(!availableTargetNames.contains(availableTargetName)){
logger.info("[自定义标准分片算法]未获取到有效表名列表,默认全表处理," +
"参数列表:{availableTargetNames:{},rangeShardingValue:{}}",
JSON.toJSONString(availableTargetNames),JSON.toJSONString(rangeShardingValue));
return availableTargetNames;
}
return Stream.of(availableTargetName).collect(Collectors.toList());
}else if(lowerYear < upperYear){
List<String> data = new ArrayList<>();
LocalDateTime start = lowerEndpoint;
while(start.getYear() <= upperEndpoint.getYear()){
String availableTargetName = this.doSharding(columnName,start);
//未处于设定表名 跳过
if(availableTargetNames.contains(availableTargetName)){
data.add(availableTargetName);
}
start = start.plusYears(1L);
}
if(data.isEmpty()){
logger.info("[自定义标准分片算法]未获取到有效表名列表,默认全表处理," +
"参数列表:{availableTargetNames:{},rangeShardingValue:{}}",
JSON.toJSONString(availableTargetNames),JSON.toJSONString(rangeShardingValue));
return availableTargetNames;
}
return data;
}else{
//最小值大于最大值 违反范围常识问题 返回全部,全分表操作
return availableTargetNames;
}
}
private String doSharding(String columnName, LocalDateTime value){
Closure<?> closure = this.createClosure(this.algorithmExpression);
closure.setProperty(columnName, value);
return this.getTargetShardingNode(closure, columnName,this.algorithmExpression);
}
private Closure<?> createClosure(String algorithmExpression) {
Closure<?> result = (new InlineExpressionParser(algorithmExpression)).evaluateClosure().rehydrate(new Expando(), (Object)null, (Object)null);
result.setResolveStrategy(3);
return result;
}
private String getTargetShardingNode(Closure<?> closure, String columnName, String algorithmExpression) {
try {
return closure.call().toString();
} catch (NullPointerException | MissingMethodException var4) {
throw new ShardingSphereException("Custom standard sharding algorithms expression `%s` and sharding column `%s` not match.", new Object[]{algorithmExpression, columnName});
}
}
@Override
public String getType() {
return "CUSTOM_YEAR_STANDARD";
}
}
注册自定义分片算法
在resources
下新增META-INF
文件夹,以及在META-INF
下新增services
文件夹,并在services
下新增文件,文件名为org.apache.shardingsphere.sharding.spi.ShardingAlgorithm
,文件中添加自定义分片算法类的全限定名io.github.hellfs.shardingjdbc.shardingalgorithm.CustomShardingAlgorithm
如果按照现在的配置使用分库分表时,查询时分库分表规则失效,请查看:
参考文档: