引入依赖
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-starter</artifactId>
<version>2.0.1</version>
</dependency>
<!-- 不使用druid自动配置starter -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.14</version>
</dependency>
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>sharding-jdbc-spring-boot-starter</artifactId>
<version>4.0.0</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<scope>runtime</scope>
</dependency>
</dependencies>
数据分片
配置
#分片配置
spring:
shardingsphere:
datasource:
#数据源名称,多数据源以逗号分隔
names: ds0, ds1
ds0:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.jdbc.Driver
url: jdbc:mysql://192.168.211.128:3306/sharding-jdbc?useUnicode=true&characterEncoding=UTF-8&useSSL=false
username: root
password: root
ds1:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.jdbc.Driver
url: jdbc:mysql://192.168.211.129:3306/sharding-jdbc?useUnicode=true&characterEncoding=UTF-8&useSSL=false
username: root
password: root
sharding:
tables:
#逻辑表名称(随意取名),在项目中sql使用
t_order:
#逻辑表名称指向的实际表,数据源名 + 实际表名组成
#ds$->{0..1}代表数据源ds0或者ds1,库分片策略决定使用哪个数据源
#t_order$->{0..1}代表表t_order0或者t_order1,表分片策略决定使用哪个表
actual-data-nodes: ds$->{0..1}.t_order$->{0..1}
#主键生成规则,自动生成主键
key-generator:
#主键的字段名称
column: order_id
#雪花算法生成主键
type: SNOWFLAKE
#分布式主键需要设置worker.id不能相同
props:
worker.id = 1
#表分片策略
table-strategy:
#inline行表达式分片
inline:
#分片字段的名称
sharding-column: order_id
#分片算法行表达式,需符合groovy语法.
#根据order_id%2计算出当前执行的sql是在t_order0还是t_order1中执行
algorithm-expression: t_order$->{order_id % 2}
#库分片策略
database-strategy:
inline:
#分片字段的名称
sharding-column: user_id
#分片算法
algorithm-expression: ds$->{user_id % 2}
#订单详情表配置
t_order_item:
actual-data-nodes: ds$->{0..1}.t_order_item$->{0..1}
key-generator:
column: order_item_id
type: SNOWFLAKE
props:
worker.id = 1
table-strategy:
inline:
sharding-column: order_id
algorithm-expression: t_order_item$->{order_id % 2}
database-strategy:
inline:
sharding-column: user_id
algorithm-expression: ds$->{user_id % 2}
#公共表配置
t_dict:
key-generator:
column: id
type: SNOWFLAKE
props:
worker.id = 1
#默认的库分片策略,所有的库分片策略相同时使用
# default-database-strategy:
# inline:
# sharding-column: user_id
# algorithm-expression: ds$->{user_id % 2}
#绑定表,垂直分表需要绑定,不绑定会出现笛卡尔积
#绑定表的分片策略中的sharding-column配置要相同
binding-tables:
- t_order, t_order_item
#广播表,修改操作会操作所有库的表,即公共表
broadcast-tables: t_dict
#开启sql显示
props:
sql.show: true
logging:
level:
root: error
#sharding-jdbc的sql显示日志是info级别
ShardingSphere-SQL: info
分片测试
public interface OrderDAO {
//使用表名是配置文件中的逻辑表名称
@Insert("insert into t_order(user_id, product_name) values(#{userId}, #{productName})")
//返回主键的值
@Options(useGeneratedKeys = true, keyProperty = "orderId")
int insertOrder(Order order);
@Insert("insert into t_order_item(order_id, create_time, user_id) values(#{orderId}, #{createTime}, #{userId})")
int insertOrderItem(OrderItem item);
//表别名需要加as,不加会报错
@Select("select * from t_order as o left join t_order_item as item on o.order_id = item.order_id order by o.order_id desc")
List<Map> selectAll();
}
@Transactional
public void addOrder() {
for (int i = 0; i < 10; i++) {
//模拟分库策略,根据用户id分库
Long userId = 1L;
if (i % 2 == 0) {
userId = 2L;
}
//添加订单
Order order = new Order();
order.setProductName("test");
order.setUserId(userId);
orderDAO.insertOrder(order);
//添加订单详情
OrderItem item = new OrderItem();
item.setUserId(userId);
item.setOrderId(order.getOrderId());
item.setCreateTime(new Date());
orderDAO.insertOrderItem(item);
}
}
控制台日志
//逻辑sql
Logic SQL: insert into t_order(user_id, product_name) values(?, ?)
//实际执行的sql, ds0为数据源名称
Actual SQL: ds0 ::: insert into t_order0(user_id, product_name, order_id) values(?, ?, ?) ::: [2, test, 447816871484329984]
Logic SQL: insert into t_order_item(order_id, create_time, user_id) values(?, ?, ?)
Actual SQL: ds0 ::: insert into t_order_item0(order_id, create_time, user_id, order_item_id) values(?, ?, ?, ?) ::: [447816871484329984, 2020-03-20 17:44:22.352, 2, 447816872205750272]
公共表和其他测试查看日志即可
读写分离
读写分离需要先配置mysql主从同步
配置
#读写分离配置
spring:
shardingsphere:
datasource:
names: master, slave
master:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.jdbc.Driver
url: jdbc:mysql://192.168.211.128:3306/sharding-jdbc?useUnicode=true&characterEncoding=UTF-8&useSSL=false
username: root
password: root
slave:
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.jdbc.Driver
url: jdbc:mysql://192.168.211.130:3306/sharding-jdbc?useUnicode=true&characterEncoding=UTF-8&useSSL=false
username: root
password: root
sharding:
#sharding-jdbc支持一主多从
master-slave-rules:
#读写分离数据源名称
ds0:
#主数据源的名称,对应数据源中配置的名称
master-data-source-name: master
#从数据源名称
slave-data-source-names: slave
#从库负载均衡算法类型,可选值:ROUND_ROBIN,RANDOM
#load-balance-algorithm-type: ROUND_ROBIN
#分片策略配置不变,只需要在使用读写分离的actual-data-nodes中修改为读写分离数据源名称即可
tables:
t_order:
actual-data-nodes: ds0.t_order$->{0..1}
key-generator:
column: order_id
type: SNOWFLAKE
props:
worker.id = 1
table-strategy:
inline:
sharding-column: order_id
algorithm-expression: t_order$->{order_id % 2}
t_order_item:
actual-data-nodes: ds0.t_order_item$->{0..1}
key-generator:
column: order_item_id
type: SNOWFLAKE
props:
worker.id = 1
table-strategy:
inline:
sharding-column: order_id
algorithm-expression: t_order_item$->{order_id % 2}
props:
sql.show: true
logging:
level:
root: error
ShardingSphere-SQL: info
由于表结构未改变,直接运行分片中的测试
修改操作全部使用master数据源
查询操作全部使用slave数据源
作者公众号