sharding shphere作为一个分库分表组件,在处理分表业务时,比起自己实现分表功能,还是有很多明显优势的。
对于一个从0开始的springboot项目,添加sharding sphere分库分表是比较简单的,有时需要在已有的程序框架中添加sharding sphere的分库分表功能,这就需要根据框架本身的特点进行特定的配置了。
1. 添加依赖
在ruoyi-framework\pom.xml
模块添加sharding-jdbc整合依赖:
<!-- sharding-jdbc分库分表 -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core</artifactId>
<version>5.0.0</version>
</dependency>
2. 创建测试库
这里使用和主程序相同的数据库,所以不需要单独的创建数据库。
3. 创建两个测试订单表
create table sys_order
(
order_id bigint(20) not null comment '订单ID',
user_id bigint(64) not null comment '用户编号',
status char(1) not null comment '状态(0交易成功 1交易失败)',
order_no varchar(64) default null comment '订单流水',
primary key (order_id)
) engine=innodb comment = '订单信息表';
create table sys_order_0
(
order_id bigint(20) not null comment '订单ID',
user_id bigint(64) not null comment '用户编号',
status char(1) not null comment '状态(0交易成功 1交易失败)',
order_no varchar(64) default null comment '订单流水',
primary key (order_id)
) engine=innodb comment = '订单信息表';
create table sys_order_1
(
order_id bigint(20) not null comment '订单ID',
user_id bigint(64) not null comment '用户编号',
status char(1) not null comment '状态(0交易成功 1交易失败)',
order_no varchar(64) default null comment '订单流水',
primary key (order_id)
) engine=innodb comment = '订单信息表';
4. 配置文件添加数据源
在application-druid.yml
添加测试数据源,位置和主数据源同级位置:
# 数据源配置
spring:
datasource:
type: com.alibaba.druid.pool.DruidDataSource
driverClassName: com.mysql.cj.jdbc.Driver
druid:
# 主库数据源
master:
url: jdbc:mysql://localhost:3306/ry473?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8
username: root
password: password
# 订单库1
shardsource:
enabled: true
url: jdbc:mysql://localhost:3306/ry473?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8
username: root
password: password
5. 代码生成工具生成代码组件
通过代码生成功能生成sys_order相关组件并添加到项目中,包括页面、控制器、服务层、mapper以及mapper.xml文件等。
6. 添加shardingsphere配置类
package com.ruoyi.framework.config;
import java.sql.SQLException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import javax.sql.DataSource;
import org.apache.shardingsphere.driver.api.ShardingSphereDataSourceFactory;
import org.apache.shardingsphere.infra.config.algorithm.ShardingSphereAlgorithmConfiguration;
import org.apache.shardingsphere.sharding.api.config.ShardingRuleConfiguration;
import org.apache.shardingsphere.sharding.api.config.rule.ShardingTableRuleConfiguration;
import org.apache.shardingsphere.sharding.api.config.strategy.keygen.KeyGenerateStrategyConfiguration;
import org.apache.shardingsphere.sharding.api.config.strategy.sharding.StandardShardingStrategyConfiguration;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.spring.boot.autoconfigure.DruidDataSourceBuilder;
import com.ruoyi.framework.config.properties.DruidProperties;
/**
* sharding 配置信息
*
* @author ruoyi
*/
@Configuration
public class ShardingDataSourceConfig
{
@Bean
@ConfigurationProperties("spring.datasource.druid.shardsource")
@ConditionalOnProperty(prefix = "spring.datasource.druid.shardsource", name = "enabled", havingValue = "true")
public DataSource shardDataSource(DruidProperties druidProperties)
{
DruidDataSource dataSource = DruidDataSourceBuilder.create().build();
return druidProperties.dataSource(dataSource);
}
// @Bean
// @ConfigurationProperties("spring.datasource.druid.order2")
// @ConditionalOnProperty(prefix = "spring.datasource.druid.order2", name = "enabled", havingValue = "true")
// public DataSource order2DataSource(DruidProperties druidProperties)
// {
// DruidDataSource dataSource = DruidDataSourceBuilder.create().build();
// return druidProperties.dataSource(dataSource);
// }
@Bean(name = "shardingDataSource")
public DataSource shardingDataSource(@Qualifier("shardDataSource") DataSource shardDataSource) throws SQLException
{
Map<String, DataSource> dataSourceMap = new HashMap<>();
dataSourceMap.put("order", shardDataSource);
// dataSourceMap.put("order2", order2DataSource);
// sys_order 表规则配置
// TableRuleConfiguration orderTableRuleConfig = new TableRuleConfiguration("sys_order", "order$->{1..2}.sys_order_$->{0..1}");
ShardingTableRuleConfiguration orderTableRuleConfig = new ShardingTableRuleConfiguration("sys_order", "order.sys_order_$->{0..1}");
orderTableRuleConfig.setTableShardingStrategy(
new StandardShardingStrategyConfiguration("order_id", "tableShardingAlgorithm"));
// 配置分库策略
// orderTableRuleConfig.setDatabaseShardingStrategyConfig(new InlineShardingStrategyConfiguration("user_id", "order$->{user_id % 2 + 1}"));
// 配置分表策略
// orderTableRuleConfig.setTableShardingStrategyConfig(new InlineShardingStrategyConfiguration("order_id", "sys_order_$->{order_id % 2}"));
// 分布式主键
orderTableRuleConfig.setKeyGenerateStrategy(new KeyGenerateStrategyConfiguration("order_id", "snowflake"));
// 配置分片规则
ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
shardingRuleConfig.getTables().add(orderTableRuleConfig);
Properties pDatabase = new Properties();
pDatabase.setProperty("shardCount", "2");
// 设置分表策略
ShardingSphereAlgorithmConfiguration ssactable = new ShardingSphereAlgorithmConfiguration(
"ORDER_ID_SHARD", pDatabase);
shardingRuleConfig.getShardingAlgorithms().put("tableShardingAlgorithm", ssactable);
// 内置Snowflake分布式序列算法配置
Properties snowflakeProp = new Properties();
snowflakeProp.setProperty("worker-id", "1");
shardingRuleConfig.getKeyGenerators().put("snowflake",
new ShardingSphereAlgorithmConfiguration("SNOWFLAKE", snowflakeProp));
// 获取数据源对象
DataSource dataSource = ShardingSphereDataSourceFactory.createDataSource(dataSourceMap,
Collections.singleton(shardingRuleConfig),
getProperties());
return dataSource;
}
/**
* 系统参数配置
*/
private Properties getProperties()
{
Properties shardingProperties = new Properties();
shardingProperties.put("sql.show", true);
return shardingProperties;
}
}
7. druid配置类配置
@Bean(name = "dynamicDataSource")
@Primary
public DynamicDataSource dataSource(DataSource masterDataSource)
{
Map<Object, Object> targetDataSources = new HashMap<>();
targetDataSources.put(DataSourceType.MASTER.name(), masterDataSource);
setDataSource(targetDataSources, DataSourceType.SLAVE.name(), "slaveDataSource");
setDataSource(targetDataSources, DataSourceType.SHARDING.name(), "shardingDataSource");
return new DynamicDataSource(masterDataSource, targetDataSources);
}
8. 添加分表策略
package com.ruoyi.framework.config;
import java.util.Collection;
import java.util.Properties;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.shardingsphere.sharding.api.sharding.standard.PreciseShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.RangeShardingValue;
import org.apache.shardingsphere.sharding.api.sharding.standard.StandardShardingAlgorithm;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
@Component
public class TableShardingAlgorithm implements StandardShardingAlgorithm<Long>{
private static Logger log = LoggerFactory.getLogger(TableShardingAlgorithm.class);
private Properties props;
// @Value("${sharding.shardcount}")
private Long shardCount = 2l;
public Properties getProps() {
return props;
}
public void setProps(Properties props) {
this.props = props;
}
@Override
public String doSharding(Collection<String> tableNames, PreciseShardingValue<Long> shardingValue) {
Long lShard = 0l;
lShard = (Long)shardingValue.getValue();
// 读取设置的参数
if(props != null)
{
String shardCountStr = props.getProperty("shardCount", "2");
shardCount = Long.parseLong(shardCountStr);
}
Long lowMod = lShard % 2;
Long shardTableMod = lowMod;
// 转换为带格式的字符串
String shardModStr = "_" + shardTableMod;
for (String table : tableNames) {
if (table.endsWith(shardModStr)) {
return table;
}
}
return "";
}
@Override
public String getType() {
// TODO Auto-generated method stub
return "ORDER_ID_SHARD";
}
@Override
public void init() {
// TODO Auto-generated method stub
}
@Override
public Collection<String> doSharding(Collection<String> tableNames,
RangeShardingValue<Long> shardingValue) {
return null;
}
}
9. 通过spi设置分表策略
在resource目录下创建META-INF\services目录,创建文件org.apache.shardingsphere.sharding.spi.ShardingAlgorithm,并在文件中添加如下内容:
com.ruoyi.framework.config.TableShardingAlgorithm
自定义的策略,需要通过spi机制进行配置。
10. 在服务的实现文件中添加注解
通过注解@DataSource(DataSourceType.SHARDING),框架可以判断对应的操作需要使用分表的数据库连接。
package com.ruoyi.system.service.impl;
import java.util.List;
import com.ruoyi.common.annotation.DataSource;
import com.ruoyi.common.enums.DataSourceType;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.ruoyi.system.mapper.SysOrderMapper;
import com.ruoyi.system.domain.SysOrder;
import com.ruoyi.system.service.ISysOrderService;
import com.ruoyi.common.core.text.Convert;
/**
* 订单信息Service业务层处理
*
* @author ruoyi
* @date 2022-06-27
*/
@Service
public class SysOrderServiceImpl implements ISysOrderService
{
@Autowired
private SysOrderMapper sysOrderMapper;
/**
* 查询订单信息
*
* @param orderId 订单信息主键
* @return 订单信息
*/
@Override
@DataSource(DataSourceType.SHARDING)
public SysOrder selectSysOrderByOrderId(Long orderId)
{
return sysOrderMapper.selectSysOrderByOrderId(orderId);
}
/**
* 查询订单信息列表
*
* @param sysOrder 订单信息
* @return 订单信息
*/
@Override
@DataSource(DataSourceType.SHARDING)
public List<SysOrder> selectSysOrderList(SysOrder sysOrder)
{
return sysOrderMapper.selectSysOrderList(sysOrder);
}
/**
* 新增订单信息
*
* @param sysOrder 订单信息
* @return 结果
*/
@Override
@DataSource(DataSourceType.SHARDING)
public int insertSysOrder(SysOrder sysOrder)
{
return sysOrderMapper.insertSysOrder(sysOrder);
}
/**
* 修改订单信息
*
* @param sysOrder 订单信息
* @return 结果
*/
@Override
@DataSource(DataSourceType.SHARDING)
public int updateSysOrder(SysOrder sysOrder)
{
return sysOrderMapper.updateSysOrder(sysOrder);
}
/**
* 批量删除订单信息
*
* @param orderIds 需要删除的订单信息主键
* @return 结果
*/
@Override
@DataSource(DataSourceType.SHARDING)
public int deleteSysOrderByOrderIds(String orderIds)
{
return sysOrderMapper.deleteSysOrderByOrderIds(Convert.toStrArray(orderIds));
}
/**
* 删除订单信息信息
*
* @param orderId 订单信息主键
* @return 结果
*/
@Override
@DataSource(DataSourceType.SHARDING)
public int deleteSysOrderByOrderId(Long orderId)
{
return sysOrderMapper.deleteSysOrderByOrderId(orderId);
}
}
11. 执行测试
通过页面,可以添加和删除数据,在数据库表中,可以看到数据被保存到不同的表中了。
表sys_order_0中的数据:
表sys_order_1中的数据:
可以看出尾数为奇数的数据到了sys_order_1,尾数为偶数的数据到了sys_order_0。