一、 业务场景(订单系统)
订单系统保存订单
订单系统调用库存服务,减少商品库存
订单系统调用账户服务,扣减用户金额
二、Seata AT基本原理
Seata AT 事务分两个阶段来管理全局事务:
第一阶段: 执行各分支事务
第二阶段: 控制全局事务最终提交或回滚
1. 第一阶段:执行各分支事务
(1. 事务协调器:协调各个服务的运行状态。这个服务称为 TC(Transaction Coordinator)。
(2. 订单系统开始执行,保存订单之前,首先启动 TM(Transaction Manager,事务管理器),由 TM 向 TC 申请开启一个全局事务
(3. 这时TC会产生一个全局事务ID,称为 XID,并将 XID 传回 TM
这样就开启了全局事务!
(4. 全局事务开启后,开始执行创建订单的业务。首先执行保存订单,这时会先启动一个 RM(Resource Manager,资源管理器),并将 XID 传递给 RM。
(5. RM 负责对分支事务(即微服务的本地事务)进行管理,并与 TC 通信,上报分支事务的执行状态、接收全局事务的提交或回滚指令,RM 首先会使用 XID 向 TC 注册分支事务,将分支事务纳入对应的全局事务管辖。
(6. 现在可以执行保存订单的分支事务了。一旦分支事务执行成功,RM 会上报事务状态:
(7. TC 收到后,会将该状态信息传递到 TM:
到此,保存订单过程结束。下面是调用库存服务,减少商品库存,与订单的执行过程相同。
(8. 首先调用库存服务,启动 RM,并传递 XID:
(9. 库存服务的 RM 使用 XID 向 TC 进行注册,纳入全局事务管辖:
(10. 执行本地事务成功后上报状态,TC会将状态发送给TM:
(11. 相同的,完成账户分支事务:
第二阶段:控制全局事务最终提交
1. 假设订单业务执行过程中,扣减账户金额这一步分支事务执行失败,那么失败状态对TC上报,然后再发送给TM:
2.TM 会进行决策,确定全局事务失败,向 TC 发送全局事务的回滚请求:
3. 然后,TC 会向所有 RM 发送回滚操作指令,RM 会完成最终回滚操作:
三、Seata AT具体工作机制
1. 执行修改库存业务操作前, 会先取出旧的库存信息:
2. 修改库存:
3. 接着,取出更新后的新数据:
4. 接下来,会把旧数据和新数据合并起来,保存到一个事务回滚日志表:undo_log表:
5. 至此,第一阶段,分支事务完成,将状态上报给TC:
6. 如果失败,根据事务回滚日志(undo_log)表的记录,将商品恢复成旧的库存数据:
7. 然后删除事务日志,最终完成第二阶段回滚操作:
四、代码操作步骤
启动事务协调器(TC) – seata server
解压“课前资料/分布式事务/seata-server-1.3.zip”
三个配置文件
registry.conf – 向注册中心注册 file.conf – seata server 运行过程中在数据库中记录日志 seata-server.bat – 使用内存设置成 256M
执行seata-server.bat启动
1. application.yml
spring:
application:
name: order
datasource:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://localhost/seata_order?useUnicode=true&characterEncoding=UTF-8&serverTimezone=GMT%2B8
username: root
password: root
jdbcUrl: ${spring.datasource.url}
cloud:
alibaba:
seata:
tx-service-group: order_tx_group
server:
port: 8083
eureka:
client:
service-url:
defaultZone: http://localhost:8761/eureka
instance:
prefer-ip-address: true
mybatis-plus:
type-aliases-package: cn.tedu.order.entity
mapper-locations: classpath:mapper/*.xml
configuration:
map-underscore-to-camel-case: true
logging:
level:
cn.tedu.order.mapper: debug
2. file.conf
transport {
# tcp udt unix-domain-socket
type = "TCP"
#NIO NATIVE
server = "NIO"
#enable heartbeat
heartbeat = true
# the client batch send request enable
enableClientBatchSendRequest = true
#thread factory for netty
threadFactory {
bossThreadPrefix = "NettyBoss"
workerThreadPrefix = "NettyServerNIOWorker"
serverExecutorThread-prefix = "NettyServerBizHandler"
shareBossWorker = false
clientSelectorThreadPrefix = "NettyClientSelector"
clientSelectorThreadSize = 1
clientWorkerThreadPrefix = "NettyClientWorkerThread"
# netty boss thread size,will not be used for UDT
bossThreadSize = 1
#auto default pin or 8
workerThreadSize = "default"
}
shutdown {
# when destroy server, wait seconds
wait = 3
}
serialization = "seata"
compressor = "none"
}
service {
#transaction service group mapping
# order_tx_group 与 yml 中的 “tx-service-group: order_tx_group” 配置一致
# “seata-server” 与 TC 服务器的注册名一致
# 从eureka获取seata-server的地址,再向seata-server注册自己,设置group
# 当前事务组,对应使用哪个协调器
vgroupMapping.order_tx_group = "seata-server"
#only support when registry.type=file, please don't set multiple addresses
order_tx_group.grouplist = "127.0.0.1:8091"
#degrade, current not support
enableDegrade = false
#disable seata
disableGlobalTransaction = false
}
client {
rm {
asyncCommitBufferLimit = 10000
lock {
retryInterval = 10
retryTimes = 30
retryPolicyBranchRollbackOnConflict = true
}
reportRetryCount = 5
tableMetaCheckEnable = false
reportSuccessEnable = false
}
tm {
commitRetryCount = 5
rollbackRetryCount = 5
}
undo {
dataValidation = true
logSerialization = "jackson"
logTable = "undo_log"
}
log {
exceptionRate = 100
}
}
3. registry.conf
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "eureka"
nacos {
serverAddr = "localhost"
namespace = ""
cluster = "default"
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
# application = "default"
# weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
password = ""
cluster = "default"
timeout = "0"
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
username = ""
password = ""
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3、springCloudConfig
type = "file"
nacos {
serverAddr = "localhost"
namespace = ""
group = "SEATA_GROUP"
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
app.id = "seata-server"
apollo.meta = "http://192.168.1.204:8801"
namespace = "application"
}
zk {
serverAddr = "127.0.0.1:2181"
session.timeout = 6000
connect.timeout = 2000
username = ""
password = ""
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}