ShardingJdbc 按月分表示例
在业务中通常会遇到数据表数据过大的问题,处理这种问题有常见的几种办法
1.分库分表
2.定时清理数据
第一种处理办法涉及 水平分表、垂直分库等操作,由于我们公司涉及的系统数据量过大的基本为一些日志表等,因此采用水平分表的方式。接下来 记录一下单库水平分表的操作。
依赖信息:
JDK1.8
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.3.3.RELEASE</version>
<!-- <version>2.3.3.RELEASE</version>-->
<relativePath/>
</parent>
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>sharding-jdbc-spring-boot-starter</artifactId>
<version>4.1.1</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>3.3.1</version>
<exclusions>
<exclusion>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-generator</artifactId>
</exclusion>
</exclusions>
</dependency>
在添加上述依赖后在 配置文件中添加如下信息:
spring:
shardingsphere:
datasource:
#数据库别名 注:下方关于数据库引用时均需要使用别名进行配置,否则容易出现启动不了但是查不到错误信息的情况
names: ds0
#,slave1
ds0:
type: com.alibaba.druid.pool.DruidDataSource
url: jdbc:mysql://******?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai&useSSL=false
username: *****
password: *****
# 此为从库配置
# slave1:
# type: com.alibaba.druid.pool.DruidDataSource
# url: jdbc:mysql://******?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai&useSSL=false
# username: ******
# password: ******
# masterslave:
# load-balance-algorithm-type: round_robin #负载 轮询,当你有多个从库或者主库时
# name: ms
# master-data-source-name: ds0 #设置主库
# slave-data-source-names: slave1
sharding:
tables:
#需要分片的表 为数据库中存在的实体的表
staff_access_pass_record:
#节点分配,可以理解为规定有多少张表,名称是什么形式,在ShardingJdbcTableNamesReLoad方法重写了
# 具体为将 表staff_access_pass_record_ 分为 staff_access_pass_record_202205-202212 这些表,时间范围可以根据自身需求修改,需要注意的是,在项目启动时会检测是否有上述这么多表,如果没有则会报错
actual-data-nodes: ds0.staff_access_pass_record_$->{2022..2022}${(5..12).collect{t ->t.toString().padLeft(2,'0')}}
# ds0.staff_access_pass_record_$->{0..2}
#分片标准
table-strategy:
#自定义策略需要 设置为standard
standard:
#分片健,按照数据库哪个字段进行分片
sharding-column: event_time
# 查询策略,理解为 一条sql涉及到 分片的表,需要自定义策略去查找相关数据表的路由 以下值需要为全限名
precise-algorithm-class-name: com.datawells.config.MyTablePreciseShardingAlgorithm
# 新增策略,根据分片键值的不同插入不同的数据表,需要自行编写策略 以下值需要为全限名
range-algorithm-class-name: com.datawells.config.MyTableRangeShardingAlgorithm
staff_access_sign_record:
#节点分配,可以理解为规定有多少张表,名称是什么形式,在ShardingJdbcTableNamesReLoad方法重写了
actual-data-nodes: ds0.staff_access_sign_record_$->{2022..2022}${(5..12).collect{t ->t.toString().padLeft(2,'0')}}
# ds0.staff_access_pass_record_$->{0..2}
#分片标准
table-strategy:
#自定义策略需要 设置为standard
standard:
#分片健,按照数据库哪个字段进行分片
sharding-column: record_time
# 查询策略,理解为 一条sql涉及到 分片的表,需要自定义策略去查找相关数据表的路由 以下值需要为全限名
precise-algorithm-class-name: com.datawells.config.MyTablePreciseShardingAlgorithm
# 新增策略,根据分片键值的不同插入不同的数据表,需要自行编写策略 以下值需要为全限名
range-algorithm-class-name: com.datawells.config.MyTableRangeShardingAlgorithm
engy_iot_message:
#节点分配,可以理解为规定有多少张表,名称是什么形式,在ShardingJdbcTableNamesReLoad方法重写了
actual-data-nodes: ds0.engy_iot_message_$->{2022..2022}${(5..12).collect{t ->t.toString().padLeft(2,'0')}}
# ds0.staff_access_pass_record_$->{0..2}
#分片标准
table-strategy:
#自定义策略需要 设置为standard
standard:
#分片健,按照数据库哪个字段进行分片
sharding-column: create_time
# 查询策略,理解为 一条sql涉及到 分片的表,需要自定义策略去查找相关数据表的路由 以下值需要为全限名
precise-algorithm-class-name: com.datawells.config.MyTablePreciseShardingAlgorithm
# 新增策略,根据分片键值的不同插入不同的数据表,需要自行编写策略 以下值需要为全限名
range-algorithm-class-name: com.datawells.config.MyTableRangeShardingAlgorithm
props:
sql:
#是否打印sql
show:
true
#配置数据源信息
datasource:
type: com.alibaba.druid.pool.DruidDataSource
druid:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://********?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai&useSSL=false
username: ********
password: ********
# url: jdbc:mysql://********?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai&useSSL=false
# username: ********
# password: ********
validation-query: select 1
注意事项,由于shardingjdbc会在启动的时候去检测数据库是否存在 配置文件中配置好的节点信息的表,如果不存在的话则会报错。上述配置的时按月分表进行的配置,如果不想一开始生成很多张表的话可以采用下方的方法实现动态修改节点信息,不同版本可能会存在不同的情况,下方部分参考了他人的信息,进行了一些适配修改
import com.datawells.utils.ShardingSpringContextUtils;
import lombok.AllArgsConstructor;
import org.apache.shardingsphere.core.yaml.config.sharding.YamlTableRuleConfiguration;
import org.apache.shardingsphere.shardingjdbc.spring.boot.sharding.SpringBootShardingRuleConfigurationProperties;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import java.util.Map;
@AllArgsConstructor
@Component
@Order(99)
public class ShardingJdbcTableNamesReLoad {
/* @Value("${spring.datasource.druid.url}")
public String url;
@Value("${spring.datasource.druid.username}")
public String username;
@Value("${spring.datasource.druid.password}")
public String password;*/
private final SpringBootShardingRuleConfigurationProperties shardingRuleConfigurationProperties;
@Resource
ShardingSpringContextUtils shardingSpringContextUtils;
@PostConstruct
public void init() {
//获取不到相关配置文件信息
String url = shardingSpringContextUtils.getProperty("spring.datasource.druid.url").toString();
String userName = shardingSpringContextUtils.getProperty("spring.datasource.druid.username").toString();
String password = shardingSpringContextUtils.getProperty("spring.datasource.druid.password").toString();
ShardingAlgorithmTool.tableNameCacheReload(url,
userName,password);
Map<String, YamlTableRuleConfiguration> tables =
shardingRuleConfigurationProperties.getTables();
YamlTableRuleConfiguration order = tables.get("staff_access_pass_record");
String actualDataNodesTo = order.getActualDataNodes();//TODO:可以查询数据库。根据数据关联表
StringBuilder stringBuilder = new StringBuilder("ds0.staff_access_pass_record_$->{[");
// stringBuilder.append("202201,202202,202203,202204");
//初始化时检测数据库表是否都存在,不存在则执行新增
for(int i=2022;i<=2022;i++){
for(int j=5;j<=12;j++){
String temp =new String(String.valueOf(i) +""+ (j<=9 ? "0"+j:String.valueOf(j)));
stringBuilder.append(temp+",");
ShardingAlgorithmTool.shardingTablesCheckAndCreatAndReturn("staff_access_pass_record","staff_access_pass_record_"+temp,url,
userName,password);
ShardingAlgorithmTool.shardingTablesCheckAndCreatAndReturn("engy_iot_message","engy_iot_message_"+temp,url,
userName,password);
ShardingAlgorithmTool.shardingTablesCheckAndCreatAndReturn("staff_access_sign_record","staff_access_sign_record_"+temp,url,
userName,password);
}
}
/* String res = stringBuilder.substring(0,stringBuilder.length()-1);
res+=("]}");
order.setActualDataNodes(res);
String actualDataNodes = order.getActualDataNodes();
System.out.println(actualDataNodesTo);
System.out.println(actualDataNodes);
System.out.println(tables);*/
}
}
import com.datawells.utils.SpringUtil;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.env.Environment;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
@Slf4j
public class ShardingAlgorithmTool {
private static final HashSet<String> tableNameCache = new HashSet<>();
/**
* 判断 分表获取的表名是否存在 不存在则自动建表
*
* @param logicTableName 逻辑表名(表头)
* @param resultTableName 真实表名
* @return 确认存在于数据库中的真实表名*/
public static String shardingTablesCheckAndCreatAndReturn(String logicTableName, String resultTableName,String url,String username,String password) {
synchronized (logicTableName.intern()) {
// 缓存中有此表 返回
if (tableNameCache.contains(resultTableName)) {
return resultTableName;
}
// 缓存中无此表 建表 并添加缓存
List<String> sqlList = selectTableCreateSql(logicTableName);
for (int i = 0; i < sqlList.size(); i++) {
sqlList.set(i, sqlList.get(i).replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS").replace(logicTableName, resultTableName));
}
executeSql(sqlList,url,username,password);
tableNameCache.add(resultTableName);
}
return resultTableName;
}
/*
*
* 缓存重载方法
*/
public static void tableNameCacheReload(String url,String userName,String password) {
// 读取数据库中所有表名
List<String> tableNameList = getAllTableNameBySchema(url,userName,password);
// 删除旧的缓存(如果存在)
ShardingAlgorithmTool.tableNameCache.clear();
// 写入新的缓存
ShardingAlgorithmTool.tableNameCache.addAll(tableNameList);
}
private static void executeSql(List<String> sqlList,String url,String username,String password) {
// Environment env = SpringUtil.getApplicationContext().getEnvironment();
try (Connection conn = DriverManager.getConnection(Objects.requireNonNull(url), username, password)) {
//"spring.datasource.druid.url"
//"spring.datasource.druid.username"
//"spring.datasource.druid.password"
try (Statement st = conn.createStatement()) {
conn.setAutoCommit(false);
for (String sql : sqlList) {
st.execute(sql);
}
conn.commit();
} catch (Exception ex) {
System.out.println(ex.toString());
conn.rollback();
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
private static List<String> selectTableCreateSql(String tableName) {
List<String> res = new ArrayList<>();
if (tableName.equals("staff_access_pass_record")) {
res.add("CREATE TABLE `staff_access_pass_record` (\n" +
" `id` varchar(32) NOT NULL COMMENT '主键',\n" +
+
") ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci COMMENT = '表' ROW_FORMAT = Dynamic;");
} else if(tableName.equals("staff_access_sign_record")){
res.add("CREATE TABLE `staff_access_sign_record` (\n" +
" `id` varchar(60) NOT NULL COMMENT '主键',\n" +
") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 ROW_FORMAT=DYNAMIC COMMENT='表';");
} else if(tableName.equals("engy_iot_message")){
res.add( "CREATE TABLE `engy_iot_message` (\n" +
" `id` varchar(32) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL COMMENT 'uuid',\n" +
") ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci COMMENT = '物' ROW_FORMAT = Dynamic;");
}
return res;
}
public static List<String> getAllTableNameBySchema(String url,String userName,String password) {
List<String> res = new ArrayList<>();
// Environment env = SpringUtil.getApplicationContext().getEnvironment();
try (Connection connection = DriverManager.getConnection(url, userName, password);
Statement st = connection.createStatement()) {
try (ResultSet rs = st.executeQuery("show TABLES like 'staff_access_pass_record%'")) {
while (rs.next()) {
res.add(rs.getString(1));
}
}
} catch (Exception e) {
e.printStackTrace();
}
return res;
// return null;
}
public static HashSet<String> cacheTableNames() {
return tableNameCache;
}
}
import org.nutz.lang.Strings;
import org.springframework.beans.BeansException;
import org.springframework.boot.env.OriginTrackedMapPropertySource;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.core.env.ConfigurableEnvironment;
import org.springframework.stereotype.Component;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
@Component
public class ShardingSpringContextUtils implements ApplicationContextAware {
private static ApplicationContext applicationContext;
/**
* 实现setApplicationContext 将注入的 ApplicationContext 赋值给 当前类中的applicationContext
* @param applicationContext
* @throws BeansException
*/
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
/**
* 获取Spring的上下文
* @return
*/
public static ApplicationContext getApplicationContext(){
return applicationContext;
}
/**
* 获取Spring上下文容器中所有Bean的名称
* @return
*/
public static String[] getBeanDefinitionNames(){
return applicationContext.getBeanDefinitionNames();
}
/**
* 根据Bean的名称获取Bean
* @param name
* @return
*/
public static Object getBean(String name){
return applicationContext.getBean(name);
}
/**
* 根据class获取Bean
* @param clazz
* @param <T>
* @return
*/
public static <T> T getBean(Class<T> clazz){
return applicationContext.getBean(clazz);
}
/**
* 根据Bean名称获取Class
* @param name
* @return
*/
public static Class<?> getType(String name){
return applicationContext.getType(name);
}
/**
* 根据propertyName获取配置信息
* @param propertyName
* @return
*/
public static Object getProperty(String propertyName){
ConfigurableEnvironment env = (ConfigurableEnvironment) applicationContext.getEnvironment();
return env.getProperty(propertyName);
}
/**
* 根据property前缀获取配置列表
* @param propertyPrefix
* @return
*/
public static List<String> getPropertyList(String propertyPrefix){
AtomicReference<List<String>> result = new AtomicReference<>();
ConfigurableEnvironment env = (ConfigurableEnvironment) applicationContext.getEnvironment();
env.getPropertySources().stream().forEach(propertySource->{
if (propertySource instanceof OriginTrackedMapPropertySource) {
List<String> props = Arrays.asList(((OriginTrackedMapPropertySource) propertySource).getPropertyNames());
result.set(props.stream().filter(e -> e.startsWith(propertyPrefix)).collect(Collectors.toList()));
}
});
return result.get();
}
/**
* 判断某个配置是否存在
* @param propertyName
* @param notBlank 不能是空字符或null
* @return
*/
public static boolean containProperty(String propertyName,boolean notBlank){
Object prop = applicationContext.getEnvironment().getProperty(propertyName);
if(null==prop){
return false;
}else if(notBlank && Strings.isBlank(prop.toString())){
return false;
}
return true;
}
自定义查询策略实现:
import com.google.common.collect.Range;
import groovy.util.logging.Slf4j;
import org.apache.shardingsphere.api.sharding.standard.RangeShardingAlgorithm;
import org.apache.shardingsphere.api.sharding.standard.RangeShardingValue;
import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.Set;
@Slf4j
public class MyTableRangeShardingAlgorithm implements RangeShardingAlgorithm<String> {
/**
* 根据传入的分片健的值,对所有待选择的表中 根据自己的业务逻辑进行判断,选择符合条件的表返回
* @param tableNameList 返回需要查询的表
* @param shardingValue 传入的分片健的值
* @return 返回符合条件的表名称
*/
@Override
public Collection<String> doSharding(Collection<String> tableNameList, RangeShardingValue<String> shardingValue) {
System.out.println("[MyTableRangeShardingAlgorithm] shardingValue: [{}]\n"+ shardingValue);
Set<String> tableNameResultList = new LinkedHashSet<>();
Range<String> rangeValue = shardingValue.getValueRange();
int lowInt = Integer.valueOf(rangeValue.lowerEndpoint().substring(0,7).replaceAll("-",""));
int upperInt = Integer.valueOf(rangeValue.upperEndpoint().substring(0,7).replaceAll("-",""));
for (String tableNameItem : tableNameList) {
int tableItem = Integer.valueOf(tableNameItem.substring(tableNameItem.length()-6));
if(tableItem >= lowInt && tableItem <= upperInt ){
tableNameResultList.add(tableNameItem);
}
}
return tableNameResultList;
}
}
注意:采用mybatis-plus 则需要配置分页插件,否则会产生分不了页的情况:
import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* mybatis-plus配置
*
*/
@Configuration
public class MybatisPlusConfig {
/**
* 分页插件
*/
@Bean
public PaginationInterceptor paginationInterceptor() {
return new PaginationInterceptor();
}
}
mybatisy-plus配置
mybatis-plus:
mapper-locations: classpath*:/mapper/**/*.xml
#实体扫描,多个package用逗号或者分号分隔
typeAliasesPackage: com.datawells.modules.*.model
global-config:
#数据库相关配置
db-config:
#主键类型 AUTO:"数据库ID自增", INPUT:"用户输入ID", ID_WORKER:"全局唯一ID (数字类型唯一ID)", UUID:"全局唯一ID UUID";
id-type: AUTO
logic-delete-value: 1
logic-not-delete-value: 0
banner: false
#原生配置
configuration:
map-underscore-to-camel-case: true
cache-enabled: false
call-setters-on-nulls: true
jdbc-type-for-null: 'null'