#shardingsphere 配置
shardingsphere:
datasource:
names: superid-dev
superid-dev:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://rm-uf6r4x96k689
username: aa
password: aa
rules:
sharding:
tables:
action_log:
actual-data-nodes: superid-dev.action_log_$->{0..9}
table-strategy:
standard:
sharding-column: id
sharding-algorithm-name: id_inline
sharding-algorithms:
id_inline:
type: INLINE
props:
algorithm-expression: action_log_${id % 10}
props:
sql-show: true
我的配置
引入依赖后一定要刷新配置 ,clean一下,不然一直数据源错误,
<!-- sharding-jdbc -->
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
<version>${shardingsphere.version}</version>
</dependency>
<shardingsphere.version>5.1.2</shardingsphere.version>
spring:
shardingsphere:
# 是否开启
datasource:
# 数据源(逻辑名字)
names: m1
# 配置数据源
m1:
type: com.zaxxer.hikari.HikariDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost:3306/test?useSSL=false&autoReconnect=true&characterEncoding=UTF-8&serverTimezone=UTC
username: root
password: root
# 分片的配置
rules:
sharding:
# 表的分片策略
tables:
# 逻辑表的名称
user:
# 数据节点配置,采用Groovy表达式
actual-data-nodes: m1.user_$->{0..1}
# 配置策略
table-strategy:
# 用于单分片键的标准分片场景
standard:
sharding-column: cid
# 分片算法名字
sharding-algorithm-name: user_inline
key-generate-strategy: # 主键生成策略
column: cid # 主键列
key-generator-name: snowflake # 策略算法名称(推荐使用雪花算法)
key-generators:
snowflake:
type: SNOWFLAKE
sharding-algorithms:
user_inline:
type: inline
props:
algorithm-expression: user_$->{cid % 2}
props:
# 日志显示具体的SQL
sql-show: true
大家看这篇文章 他的配置都是正确的
https://blog.csdn.net/qq_52423918/article/details/125004312?ops_request_misc=&request_id=&biz_id=102&utm_term=Insert%20statement%20does%20not%20supp&utm_medium=distribute.pc_search_result.none-task-blog-2allsobaiduweb~default-0-125004312.142v59control_1,201v3control_1&spm=1018.2226.3001.4187