1.添加依赖
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
<version>5.1.1</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid-spring-boot-starter</artifactId>
<version>1.1.13</version>
</dependency>
2.配置YML文件
common: &common
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
initial-size: 5
min-idle: 5
maxActive: 20
# 配置获取连接等待超时的时间
maxWait: 60000
# 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
timeBetweenEvictionRunsMillis: 60000
# 配置一个连接在池中最小生存的时间,单位是毫秒
minEvictableIdleTimeMillis: 300000
#Oracle需要打开注释
#validationQuery: SELECT 1 FROM DUAL
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
# 打开PSCache,并且指定每个连接上PSCache的大小
poolPreparedStatements: true
maxPoolPreparedStatementPerConnectionSize: 20
# 配置监控统计拦截的filters,去掉后监控界面sql无法统计,'wall'用于防火墙
filters: stat,wall,slf4j
# 通过connectProperties属性来打开mergeSql功能;慢SQL记录
connectionProperties: druid.stat.mergeSql\=true;druid.stat.slowSqlMillis\=5000
wall:
multi-statement-allow: true
autoconfigure:
exclude: com.alibaba.druid.spring.boot.autoconfigure.DruidDataSourceAutoConfigure
datasource:
druid:
stat-view-servlet:
enabled: true
loginUsername: admin
loginPassword: 123456
allow:
web-stat-filter:
enabled: true
shardingsphere:
props:
# 是否在日志中打印 SQL 更多属性参考->https://shardingsphere.apache.org/document/current/cn/user-manual/shardingsphere-jdbc/props/
sql-show: true
datasource:
names: master,slave1,slave2
master:
url: jdbc:mysql://192.168.88.131:3307/travel_0?characterEncoding=UTF-8&useUnicode=true&useSSL=false&tinyInt1isBit=false&allowPublicKeyRetrieval=true&allowMultiQueries=true&serverTimezone=Asia/Shanghai
username: root
password: 123456
<<: *common
slave1:
url: jdbc:mysql://192.168.88.131:3307/travel_1?characterEncoding=UTF-8&useUnicode=true&useSSL=false&tinyInt1isBit=false&allowPublicKeyRetrieval=true&allowMultiQueries=true&serverTimezone=Asia/Shanghai
username: root
password: 123456
<<: *common
rules:
# 步骤:设置分片节点(一个整表分为了多少个分表)->分片规则(根据主键的分片算法,这样才能知道数据存取位置)->主键生成规则(uuid或者雪花算法SNOWFLAKE)
sharding:
readwrite-splitting:
data-sources:
ms:
# 读写分离类型,比如:Static,Dynamic,动态方式需要配合高可用功能,具体参考下方链接
# https://blog.csdn.net/ShardingSphere/article/details/123243843
type: Static
loadBalancerName: round-robin
props:
# 注意,如果接口有事务,读写分离不生效,默认全部使用主库,为了保证数据一致性
write-data-source-name: master
read-data-source-names: slave1,slave2
load-balancers:
#名称自定义,跟上边的loadBalancerName配置的值保持一致
round-robin:
type: RANDOM #一共三种一种是 RANDOM(随机),一种是 ROUND_ROBIN(轮询),一种是 WEIGHT(权重)
tables:
# 配置cl_user的分表的规则
cl_user:
# 拥有几个分片表0-3,表达式参考 https://shardingsphere.apache.org/document/current/cn/features/sharding/concept/inline-expression/
actual-data-nodes: slave1.cl_user_$->{0..3}
table-strategy:
standard:
sharding-column: id
sharding-algorithm-name: table-inline
# key-generate-strategy:
# column: id
# key-generator-name: snowflake
# 配置分片算法
sharding-algorithms:
table-inline:
# 算法类型 参考 https://shardingsphere.apache.org/document/current/cn/user-manual/shardingsphere-jdbc/builtin-algorithm/sharding/
type: HASH_MOD
props:
sharding-count: 4
# key-generators:
# snowflake:
# type: SNOWFLAKE
3.注意事项
当前环境SpringBoot 2.7,MySQL8,ShardingSphere-JDBC5.1.1每个版本区别都很大,配置也大不相同,学习时请注意版本