配置依赖包
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
<spring.boot.version>2.7.17</spring.boot.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter</artifactId>
<version>${spring.boot.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
<version>${spring.boot.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<version>${spring.boot.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.28</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.2.19</version>
</dependency>
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>sharding-jdbc-spring-boot-starter</artifactId>
<version>4.1.1</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>3.5.3.2</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.18.26</version>
</dependency>
<dependency>
<groupId>cn.hutool</groupId>
<artifactId>hutool-all</artifactId>
<version>5.8.25</version>
</dependency>
</dependencies>
增加mybatisplus和shardingsphere配置
server:
port: 8080
spring:
main:
allow-bean-definition-overriding: true
shardingsphere:
#配置分库的数据源
datasource:
#配置数据源名称
names: db0
db0:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2B8&useUnicode=true&characterEncoding=utf-8&useSSL=false&autoReconnect=true&rewriteBatchedStatements=true
username: root
password: 123456
props:
sql:
show: true #控制台打印sql 方便观察
sharding:
tables:
#要进行分片的表名称
trace:
#生成主键
key-generator:
#生成主键的列
column: id
#生成主键的策略
type: SNOWFLAKE
#分片策略
actual-data-nodes: db0.trace_$->{2401..2912}
table-strategy: #表分片策略
standard:
sharding-column: trace_time
precise-algorithm-class-name: com.tom.configuration.TraceShardingAlgorithm
range-algorithm-class-name: com.tom.configuration.TraceRangeShardingAlgorithm
#mybatis-plus
mybatis-plus:
configuration:
#驼峰
map-underscore-to-camel-case: true
global-config:
#逻辑删除
db-config:
logic-delete-value: true
logic-not-delete-value: false
mapper-locations: classpath:mapper/*.xml
type-handlers-package: com.tom.typehandler
只需要注意上面的precise-algorithm-class-name和range-algorithm-class-name两个策略,主要是对分表键的等值和范围查询
import cn.hutool.core.date.DateUtil;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingValue;
import java.time.LocalDateTime;
import java.util.Collection;
/**
* @Description
*
* @Date 2024/4/30 13:51
* @Author tom
**/
public class TraceShardingAlgorithm implements PreciseShardingAlgorithm<LocalDateTime> {
@Override
public String doSharding(Collection<String> collection, PreciseShardingValue<LocalDateTime> preciseShardingValue) {
String timeStr = DateUtil.formatLocalDateTime(preciseShardingValue.getValue());
String year = timeStr.substring(2, 4);
String month = timeStr.substring(5, 7);
String tableSuffix = year + month;
for (String s : collection) {
if (s.contains(tableSuffix)){
return s;
}
}
return null;
}
}
import cn.hutool.core.date.DateUtil;
import com.google.common.collect.Range;
import org.apache.shardingsphere.api.sharding.standard.RangeShardingAlgorithm;
import org.apache.shardingsphere.api.sharding.standard.RangeShardingValue;
import java.time.Duration;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* @Description
*
* @Date 2024/4/30 14:18
* @Author tom
**/
public class TraceRangeShardingAlgorithm implements RangeShardingAlgorithm<LocalDateTime> {
@Override
public Collection<String> doSharding(Collection<String> collection, RangeShardingValue<LocalDateTime> rangeShardingValue) {
Range<LocalDateTime> valueRange = rangeShardingValue.getValueRange();
LocalDateTime lowerEndpoint = valueRange.lowerEndpoint();
LocalDateTime upperEndpoint = valueRange.upperEndpoint();
// 逻辑表名
String logicTableName = rangeShardingValue.getLogicTableName();
List<String> tables = new ArrayList<>();
if (Duration.between(lowerEndpoint, upperEndpoint).toDays() > 365) {
throw new RuntimeException("不能查询大于一年的数据");
}
while (lowerEndpoint.compareTo(upperEndpoint) <= 0) {
// 添加到集合
tables.add(buildTable(logicTableName, lowerEndpoint));
// 往后加一个月
lowerEndpoint = lowerEndpoint.plusMonths(1);
}
return tables;
}
private String buildTable(String logicTableName, LocalDateTime startTime) {
String timeStr = DateUtil.formatLocalDateTime(startTime);
String year = timeStr.substring(2, 4);
String month = timeStr.substring(5, 7);
return logicTableName + "_" + year + month;
}
}
开始测试
@SpringBootTest
public class ShardingTest {
@Resource
private TraceMapper traceMapper;
@Test
public void testInsertTrace() {
TraceDO traceDO = new TraceDO();
traceDO.setTraceInfo("traceInfo");
traceDO.setTraceTime(LocalDateTime.of(2024,04,19,00,01,02));
traceMapper.insert(traceDO);
}
@Test
public void testSelectTrace() {
LocalDateTime start = LocalDateTime.of(2024,4,1,0,0,0);
LocalDateTime end = LocalDateTime.of(2024,5,19,0,1,2);
List<TraceDO> traceDOS = traceMapper.selectList(new LambdaQueryWrapper<TraceDO>().between(TraceDO::getTraceTime,start, end).orderByDesc(TraceDO::getTraceTime).last("limit 1"));
traceDOS.forEach(System.out::println);
}
}
执行插入方法
2024-05-07 15:49:03.530 INFO 15316 --- [ main] com.alibaba.druid.pool.DruidDataSource : {dataSource-1} inited
2024-05-07 15:49:03.675 INFO 15316 --- [ main] o.a.s.core.log.ConfigurationLogger : ShardingRuleConfiguration:
tables:
trace:
actualDataNodes: db0.trace_$->{2401..2912}
keyGenerator:
column: id
type: SNOWFLAKE
logicTable: trace
tableStrategy:
standard:
preciseAlgorithmClassName: com.tom.configuration.TraceShardingAlgorithm
rangeAlgorithmClassName: com.tom.configuration.TraceRangeShardingAlgorithm
shardingColumn: trace_time
2024-05-07 15:49:03.677 INFO 15316 --- [ main] o.a.s.core.log.ConfigurationLogger : Properties:
sql.show: 'true'
2024-05-07 15:49:03.682 INFO 15316 --- [ main] ShardingSphere-metadata : Loading 1 logic tables' meta data.
2024-05-07 15:49:03.700 INFO 15316 --- [ main] ShardingSphere-metadata : Loading 9 tables' meta data.
2024-05-07 15:49:03.763 INFO 15316 --- [ main] ShardingSphere-metadata : Meta data load finished, cost 85 milliseconds.
_ _ |_ _ _|_. ___ _ | _
| | |\/|_)(_| | |_\ |_)||_|_\
/ |
3.5.3.2
2024-05-07 15:49:04.252 INFO 15316 --- [ main] com.tom.test.ShardingTest : Started ShardingTest in 2.855 seconds (JVM running for 3.531)
2024-05-07 15:49:04.850 INFO 15316 --- [ main] ShardingSphere-SQL : Logic SQL: INSERT INTO trace ( id, trace_info, trace_time ) VALUES ( ?, ?, ? )
2024-05-07 15:49:04.850 INFO 15316 --- [ main] ShardingSphere-SQL : SQLStatement: InsertStatementContext(super=CommonSQLStatementContext(sqlStatement=org.apache.shardingsphere.sql.parser.sql.statement.dml.InsertStatement@15cee630, tablesContext=org.apache.shardingsphere.sql.parser.binder.segment.table.TablesContext@2e40fdbd), tablesContext=org.apache.shardingsphere.sql.parser.binder.segment.table.TablesContext@2e40fdbd, columnNames=[id, trace_info, trace_time], insertValueContexts=[InsertValueContext(parametersCount=3, valueExpressions=[ParameterMarkerExpressionSegment(startIndex=61, stopIndex=61, parameterMarkerIndex=0), ParameterMarkerExpressionSegment(startIndex=64, stopIndex=64, parameterMarkerIndex=1), ParameterMarkerExpressionSegment(startIndex=67, stopIndex=67, parameterMarkerIndex=2)], parameters=[1787751489014648833, traceInfo, 2024-04-19T00:01:02])], generatedKeyContext=Optional.empty)
2024-05-07 15:49:04.850 INFO 15316 --- [ main] ShardingSphere-SQL : Actual SQL: db0 ::: INSERT INTO trace_2404 ( id, trace_info, trace_time ) VALUES (?, ?, ?) ::: [1787751489014648833, traceInfo, 2024-04-19T00:01:02]
2024-05-07 15:49:04.889 INFO 15316 --- [ionShutdownHook] com.alibaba.druid.pool.DruidDataSource : {dataSource-1} closing ...
2024-05-07 15:49:04.984 INFO 15316 --- [ionShutdownHook] com.alibaba.druid.pool.DruidDataSource : {dataSource-1} closed
执行查询方法
2024-05-07 15:50:05.585 INFO 4632 --- [ main] o.a.s.core.log.ConfigurationLogger : ShardingRuleConfiguration:
tables:
trace:
actualDataNodes: db0.trace_$->{2401..2912}
keyGenerator:
column: id
type: SNOWFLAKE
logicTable: trace
tableStrategy:
standard:
preciseAlgorithmClassName: com.tom.configuration.TraceShardingAlgorithm
rangeAlgorithmClassName: com.tom.configuration.TraceRangeShardingAlgorithm
shardingColumn: trace_time
2024-05-07 15:50:05.587 INFO 4632 --- [ main] o.a.s.core.log.ConfigurationLogger : Properties:
sql.show: 'true'
2024-05-07 15:50:05.591 INFO 4632 --- [ main] ShardingSphere-metadata : Loading 1 logic tables' meta data.
2024-05-07 15:50:05.610 INFO 4632 --- [ main] ShardingSphere-metadata : Loading 9 tables' meta data.
2024-05-07 15:50:05.679 INFO 4632 --- [ main] ShardingSphere-metadata : Meta data load finished, cost 92 milliseconds.
_ _ |_ _ _|_. ___ _ | _
| | |\/|_)(_| | |_\ |_)||_|_\
/ |
3.5.3.2
2024-05-07 15:50:06.207 INFO 4632 --- [ main] com.tom.test.ShardingTest : Started ShardingTest in 2.971 seconds (JVM running for 3.65)
2024-05-07 15:50:06.973 INFO 4632 --- [ main] ShardingSphere-SQL : Logic SQL: SELECT id,trace_info,trace_time,deleted,version FROM trace WHERE deleted=false AND (trace_time BETWEEN ? AND ?) ORDER BY trace_time DESC limit 1
2024-05-07 15:50:06.973 INFO 4632 --- [ main] ShardingSphere-SQL : SQLStatement: SelectStatementContext(super=CommonSQLStatementContext(sqlStatement=org.apache.shardingsphere.sql.parser.sql.statement.dml.SelectStatement@283bb8b7, tablesContext=org.apache.shardingsphere.sql.parser.binder.segment.table.TablesContext@25e95af1), tablesContext=org.apache.shardingsphere.sql.parser.binder.segment.table.TablesContext@25e95af1, projectionsContext=ProjectionsContext(startIndex=8, stopIndex=47, distinctRow=false, projections=[ColumnProjection(owner=null, name=id, alias=Optional.empty), ColumnProjection(owner=null, name=trace_info, alias=Optional.empty), ColumnProjection(owner=null, name=trace_time, alias=Optional.empty), ColumnProjection(owner=null, name=deleted, alias=Optional.empty), ColumnProjection(owner=null, name=version, alias=Optional.empty)]), groupByContext=org.apache.shardingsphere.sql.parser.binder.segment.select.groupby.GroupByContext@4fa8297b, orderByContext=org.apache.shardingsphere.sql.parser.binder.segment.select.orderby.OrderByContext@6644bdf5, paginationContext=org.apache.shardingsphere.sql.parser.binder.segment.select.pagination.PaginationContext@13cf5f8a, containsSubquery=false)
2024-05-07 15:50:06.973 INFO 4632 --- [ main] ShardingSphere-SQL : Actual SQL: db0 ::: SELECT id,trace_info,trace_time,deleted,version FROM trace_2404 WHERE deleted=false AND (trace_time BETWEEN ? AND ?) ORDER BY trace_time DESC limit 1 ::: [2024-04-01T00:00, 2024-05-19T00:01:02]
2024-05-07 15:50:06.973 INFO 4632 --- [ main] ShardingSphere-SQL : Actual SQL: db0 ::: SELECT id,trace_info,trace_time,deleted,version FROM trace_2405 WHERE deleted=false AND (trace_time BETWEEN ? AND ?) ORDER BY trace_time DESC limit 1 ::: [2024-04-01T00:00, 2024-05-19T00:01:02]
TraceDO(id=1785190384815861761, traceInfo=traceInfo, traceTime=2024-05-19T00:01:02)
2024-05-07 15:50:07.135 INFO 4632 --- [ionShutdownHook] com.alibaba.druid.pool.DruidDataSource : {dataSource-1} closing ...
2024-05-07 15:50:07.140 INFO 4632 --- [ionShutdownHook] com.alibaba.druid.pool.DruidDataSource : {dataSource-1} closed
Process finished with exit code 0