seata部署与分布式事务配置
seata-server
seata具体实现原理与分布式事务原理请参考:https://blog.csdn.net/xinzhifu1/article/details/109727873
服务端下载
在官网下载server端:https://github.com/seata/seata/releases
解压后得到seata-server-1.4.2文件夹,需要首先配置相关参数,配置文件位于conf文件夹下,分别需配置两个配置文件:
file.conf和register.conf
服务端持久化配置
本文选择MySQL数据库作为持久化方案,新建seata-server端数据库,并在其中创建三张全局记录表,建表语句位于https://github.com/seata/seata/blob/develop/script/server/db/mysql.sql
-- -------------------------------- The script used when storeMode is 'db' --------------------------------
-- the table to store GlobalSession data
CREATE TABLE IF NOT EXISTS `global_table`
(
`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`status` TINYINT NOT NULL,
`application_id` VARCHAR(32),
`transaction_service_group` VARCHAR(32),
`transaction_name` VARCHAR(128),
`timeout` INT,
`begin_time` BIGINT,
`application_data` VARCHAR(2000),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`xid`),
KEY `idx_gmt_modified_status` (`gmt_modified`, `status`),
KEY `idx_transaction_id` (`transaction_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store BranchSession data
CREATE TABLE IF NOT EXISTS `branch_table`
(
`branch_id` BIGINT NOT NULL,
`xid` VARCHAR(128) NOT NULL,
`transaction_id` BIGINT,
`resource_group_id` VARCHAR(32),
`resource_id` VARCHAR(256),
`branch_type` VARCHAR(8),
`status` TINYINT,
`client_id` VARCHAR(64),
`application_data` VARCHAR(2000),
`gmt_create` DATETIME(6),
`gmt_modified` DATETIME(6),
PRIMARY KEY (`branch_id`),
KEY `idx_xid` (`xid`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
-- the table to store lock data
CREATE TABLE IF NOT EXISTS `lock_table`
(
`row_key` VARCHAR(128) NOT NULL,
`xid` VARCHAR(128),
`transaction_id` BIGINT,
`branch_id` BIGINT NOT NULL,
`resource_id` VARCHAR(256),
`table_name` VARCHAR(32),
`pk` VARCHAR(36),
`gmt_create` DATETIME,
`gmt_modified` DATETIME,
PRIMARY KEY (`row_key`),
KEY `idx_branch_id` (`branch_id`)
) ENGINE = InnoDB
DEFAULT CHARSET = utf8;
© 2021 GitHub, Inc.
数据库创建完成后,需配置file.conf,完成服务端配置,其中需修改mode 为 “db”,并且在db{}下修改数据库相关配置,包括数据库地址以及连接属性。
## transaction log store, only used in seata-server
store {
## store mode: file、db、redis
#修改为db
mode = "db"
## rsa decryption public key
publicKey = ""
## file store property
file {
## store location dir
dir = "sessionStore"
# branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
maxBranchSessionSize = 16384
# globe session size , if exceeded throws exceptions
maxGlobalSessionSize = 512
# file buffer size , if exceeded allocate new buffer
fileWriteBufferCacheSize = 16384
# when recover batch read size
sessionReloadReadSize = 100
# async, sync
flushDiskMode = async
}
## database store property
db {
## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp)/HikariDataSource(hikari) etc.
datasource = "druid"
## mysql/oracle/postgresql/h2/oceanbase etc.
dbType = "mysql"
driverClassName = "com.mysql.jdbc.Driver"
## if using mysql to store the data, recommend add rewriteBatchedStatements=true in jdbc connection param
url = "jdbc:mysql://127.0.0.1:3306/seata?rewriteBatchedStatements=true"
user = "mysql"
password = "mysql"
minConn = 5
maxConn = 100
globalTable = "global_table"
branchTable = "branch_table"
lockTable = "lock_table"
queryLimit = 100
maxWait = 5000
}
## redis store property
redis {
## redis mode: single、sentinel
mode = "single"
## single mode property
single {
host = "127.0.0.1"
port = "6379"
}
## sentinel mode property
sentinel {
masterName = ""
## such as "10.28.235.65:26379,10.28.235.65:26380,10.28.235.65:26381"
sentinelHosts = ""
}
password = ""
database = "0"
minConn = 1
maxConn = 10
maxTotal = 100
queryLimit = 100
}
}
服务端注册中心配置
本文选用eureka作为注册中心,首先创建eureka,之后在register.conf文件中修改相关配置,修改type为eureka,并在eureka{}中配置相关属性,其中application需要自定义,作为eureka中的标识,作为seata-server的标识:
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
#修改type
type = "file"
nacos {
application = "seata-server"
serverAddr = "127.0.0.1:8848"
group = "SEATA_GROUP"
namespace = ""
cluster = "default"
username = ""
password = ""
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = 0
password = ""
cluster = "default"
timeout = 0
}
zk {
cluster = "default"
serverAddr = "127.0.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
aclToken = ""
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "file"
nacos {
serverAddr = "127.0.0.1:8848"
namespace = ""
group = "SEATA_GROUP"
username = ""
password = ""
dataId = "seataServer.properties"
}
consul {
serverAddr = "127.0.0.1:8500"
aclToken = ""
}
apollo {
appId = "seata-server"
## apolloConfigService will cover apolloMeta
apolloMeta = "http://192.168.1.204:8801"
apolloConfigService = "http://192.168.1.204:8080"
namespace = "application"
apolloAccesskeySecret = ""
cluster = "seata"
}
zk {
serverAddr = "127.0.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
nodePath = "/seata/seata.properties"
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
启动seata服务端
运行bin目录下的seata-server.sh即可启动seata,本文采用supervisor进行进程守护,supervisor的配置文件如下:
[program:seata-server]
command=sh /home/seata/seata/seata-server-1.4.2/bin/seata-server.sh
directory=/home/seata/seata/seata-server-1.4.2/bin
user=root
startsecs=20
stopwaitsecs=1
startretries=3
autostart=true
autorestart=true
redirect_stderr=true
stdout_logfile=/var/log/seata-server/stdout.log
stderr_logfile=/var/log/seata-server/stderr.log
logfile=/var/log/seata-server/seata-server.log
启动成功显示如下:
springboot整合seata-client
实现分布式事务,需要在client端进行数据源、seata-server等配置
增加依赖
在pom.xml中增加依赖,其中seata-spring-boot-starter的version要与服务端保持一致
<!-- seata -->
<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-spring-boot-starter</artifactId>
<version>1.4.2</version>
</dependency>
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-seata</artifactId>
<version>2.1.2.RELEASE</version>
<exclusions>
<exclusion>
<groupId>io.seata</groupId>
<artifactId>seata-spring-boot-starter</artifactId>
</exclusion>
</exclusions>
</dependency>
seata-server连接配置
在application.properties中增加配置项,包括注册中心地址、group名称,其中seata.service.vgroup-mapping.my_test_tx_group一项要与在eureka中注册的名称保持一致
#################################################################
##seata
seata.tx-service-group=my_test_tx_group
seata.service.vgroup-mapping.my_test_tx_group=default
seata.registry.type=eureka
seata.registry.eureka.service-url=http://10.18.226.80:8762/eureka
seata.registry.eureka.weight=1
seata数据源配置
seata-client使用DataSourceProxy作为数据代理,我们需要把DataSourceProxy 注册成默认的java.sql.Datasource实现,并提供给其他使用框架(mybatis, jdbctemplate)装配:
@Configuration
public class DataSourceProxyConfig {
@Bean
@ConfigurationProperties(prefix = "spring.datasource")
public DruidDataSource druidDataSource() {
return new DruidDataSource();
}
@Primary
@Bean("dataSource")
public DataSource dataSource(DruidDataSource druidDataSource) {
return new DataSourceProxy(druidDataSource);
}
}
client端数据库部署
在client端新建表undo_log,spl语句位置在:https://github.com/seata/seata/blob/develop/script/client/at/db/mysql.sql
-- for AT mode you must to init this sql for you business database. the seata server not need it.
CREATE TABLE IF NOT EXISTS `undo_log`
(
`branch_id` BIGINT NOT NULL COMMENT 'branch transaction id',
`xid` VARCHAR(128) NOT NULL COMMENT 'global transaction id',
`context` VARCHAR(128) NOT NULL COMMENT 'undo_log context,such as serialization',
`rollback_info` LONGBLOB NOT NULL COMMENT 'rollback info',
`log_status` INT(11) NOT NULL COMMENT '0:normal status,1:defense status',
`log_created` DATETIME(6) NOT NULL COMMENT 'create datetime',
`log_modified` DATETIME(6) NOT NULL COMMENT 'modify datetime',
UNIQUE KEY `ux_undo_log` (`xid`, `branch_id`)
) ENGINE = InnoDB
AUTO_INCREMENT = 1
DEFAULT CHARSET = utf8 COMMENT ='AT transaction mode undo table';
分布式事务实现
测试分布式事务实现效果,启动两个服务test和test2,在test服务中相关方法上增加@GlobalTransactional注解,作为开启事务的标志:
@Override
@GlobalTransactional(name = "create-order", rollbackFor = Exception.class)
public JSONObject testSeata(Name name) {
String xid = RootContext.getXID();
System.out.println(xid);
try {
nameMapper.addname(name);//数据库操作
nameMapper.change();
} catch (Exception e) {
e.printStackTrace();
}
try {
Thread.sleep(20*1000);//在事务中增加延时方便查看效果
} catch (InterruptedException e) {
e.printStackTrace();
}
JSONObject jsonObject = test2Client.testSeata(name.getScore());//调用其他服务
return jsonObject;
}
在test2服务中调用的方法上增加异常,进行回滚测试
@Override
public JSONObject testSeata(Integer score) {
try {
nameMapper.addscore(score);
} catch (Exception e) {
e.printStackTrace();
}
float a = 1/0;
return null;
}
实现效果测试
调用接口后,可以看到分布式事务已经启动,同时全局数据库里各表已存入数据,undo_log表里也有数据:
- 分布式事务启动:
- 全局表:branch_table
- 全局表:global_table
- 全局表:lock_table
- client端:undo_log
- 测试数据:
- 事务回滚:
- 回滚数据:
行级锁测试:
当事务等待反馈时,再次操作数据库会被行级锁限制: