springboot+docker部署seata-server:latest

//docker下载seata
docker pull seataio/seata-server:latest

linux文件夹创建nacos-config.txt并推送到自己nacos服务器

//nacos-config.txt
transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.thread-factory.boss-thread-prefix=NettyBoss
transport.thread-factory.worker-thread-prefix=NettyServerNIOWorker
transport.thread-factory.server-executor-thread-prefix=NettyServerBizHandler
transport.thread-factory.share-boss-worker=false
transport.thread-factory.client-selector-thread-prefix=NettyClientSelector
transport.thread-factory.client-selector-thread-size=1
transport.thread-factory.client-worker-thread-prefix=NettyClientWorkerThread
transport.thread-factory.boss-thread-size=1
transport.thread-factory.worker-thread-size=8
transport.shutdown.wait=3
service.vgroupMapping.my_test_tx_group=default
service.enableDegrade=false
service.disable=false
service.max.commit.retry.timeout=-1
service.max.rollback.retry.timeout=-1
service.default.grouplist=192.168.0.253:8859
service.enableDegrade=false
service.disableGlobalTransaction=false
client.async.commit.buffer.limit=10000
client.lock.retry.internal=10
client.lock.retry.times=30
client.lock.retry.policy.branch-rollback-on-conflict=true
client.table.meta.check.enable=true
client.report.retry.count=5
client.tm.commit.retry.count=1
client.tm.rollback.retry.count=1
store.mode=file
store.file.dir=file_store/data
store.file.max-branch-session-size=16384
store.file.max-global-session-size=512
store.file.file-write-buffer-cache-size=16384
store.file.flush-disk-mode=async
store.file.session.reload.read_size=100
store.db.datasource=dbcp
store.db.db-type=mysql
store.db.driver-class-name=com.mysql.jdbc.Driver
store.db.url=jdbc:mysql://192.168.0.253:3306/nacos_devtest?useUnicode=true
store.db.user=root
store.db.password=xxx
store.db.min-conn=1
store.db.max-conn=3
store.db.global.table=global_table
store.db.branch.table=branch_table
store.db.query-limit=100
store.db.lock-table=lock_table
recovery.committing-retry-period=1000
recovery.asyn-committing-retry-period=1000
recovery.rollbacking-retry-period=1000
recovery.timeout-retry-period=1000
transaction.undo.data.validation=true
transaction.undo.log.serialization=jackson
transaction.undo.log.save.days=7
transaction.undo.log.delete.period=86400000
transaction.undo.log.table=undo_log
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registry-type=compact
metrics.exporter-list=prometheus
metrics.exporter-prometheus-port=9898
support.spring.datasource.autoproxy=false

推送nacos.sh文件

// 
#!/usr/bin/env bash
if [ $# != 1 ]; then
echo "./ss.sh nacosIp"
exit -1
fi
 
nacosIp=$1
echo "set nacosIp=$nacosIp"
error=0
 
for line in $(cat nacos-config.txt)
 
do
 
key=${line%%=*}
value=${line#*=}
echo "\r\n set "${key}" = "${value}
 
result=`curl -X POST "http://192.168.0.253:8848/nacos/v1/cs/configs?dataId=$key&group=SEATA_GROUP&content=$value"`
 
if [ "$result"x == "true"x ]; then
 
  echo "\033[42;37m $result \033[0m"
 
else
 
  echo "\033[41;37 $result \033[0m"
  let error++
 
fi
 
done
 
 
if [ $error -eq 0 ]; then
 
echo  "\r\n\033[42;37m init nacos config finished, please start seata-server. \033[0m"
 
else
 
echo  "\r\n\033[41;33m init nacos config fail. \033[0m"
 
fi

同目录下执行.sh文件完成推送 ./nacos.sh + nacos ip

./nacos.sh 192.168.0.253

创建数据库nacos_devtest.sql

-- the table to store GlobalSession data
drop table if exists `global_table`;
create table `global_table` (
  `xid` varchar(128)  not null,
  `transaction_id` bigint,
  `status` tinyint not null,
  `application_id` varchar(32),
  `transaction_service_group` varchar(32),
  `transaction_name` varchar(128),
  `timeout` int,
  `begin_time` bigint,
  `application_data` varchar(2000),
  `gmt_create` datetime,
  `gmt_modified` datetime,
  primary key (`xid`),
  key `idx_gmt_modified_status` (`gmt_modified`, `status`),
  key `idx_transaction_id` (`transaction_id`)
);

-- the table to store BranchSession data
drop table if exists `branch_table`;
create table `branch_table` (
  `branch_id` bigint not null,
  `xid` varchar(128) not null,
  `transaction_id` bigint ,
  `resource_group_id` varchar(32),
  `resource_id` varchar(256) ,
  `lock_key` varchar(128) ,
  `branch_type` varchar(8) ,
  `status` tinyint,
  `client_id` varchar(64),
  `application_data` varchar(2000),
  `gmt_create` datetime,
  `gmt_modified` datetime,
  primary key (`branch_id`),
  key `idx_xid` (`xid`)
);

-- the table to store lock data
drop table if exists `lock_table`;
create table `lock_table` (
  `row_key` varchar(128) not null,
  `xid` varchar(96),
  `transaction_id` long ,
  `branch_id` long,
  `resource_id` varchar(256) ,
  `table_name` varchar(32) ,
  `pk` varchar(36) ,
  `gmt_create` datetime ,
  `gmt_modified` datetime,
  primary key(`row_key`)
);



创建同目录下创建file.conf,registry.conf等一下映射到docker容器中

service {
    vgroup_mapping.my_test_tx_group =  "default"
    default.grouplist = "192.168.0.253:8859"
    disableGlobalTransaction = false
}

store {
  ## store mode: file、db
  mode = "db"
 
  ## file store property
  file {
    ## store location dir
    dir = "sessionStore"
    # branch session size , if exceeded first try compress lockkey, still exceeded throws exceptions
    maxBranchSessionSize = 16384
    # globe session size , if exceeded throws exceptions
    maxGlobalSessionSize = 512
    # file buffer size , if exceeded allocate new buffer
    fileWriteBufferCacheSize = 16384
    # when recover batch read size
    sessionReloadReadSize = 100
    # async, sync
    flushDiskMode = async
  }
 
  ## database store property
  db {
    ## the implement of javax.sql.DataSource, such as DruidDataSource(druid)/BasicDataSource(dbcp) etc.
    datasource = "druid"
    ## mysql/oracle/postgresql/h2/oceanbase etc.
    dbType = "mysql"
    driverClassName = "com.mysql.jdbc.Driver"
    url = "jdbc:mysql://192.168.0.253:3310/nacos_devtest"
    user = "nacos"
    password = "xxx"
    minConn = 5
    maxConn = 30
    globalTable = "global_table"
    branchTable = "branch_table"
    lockTable = "lock_table"
    queryLimit = 100
    maxWait = 5000
  }
}

registry.conf

registry {
  # file 銆乶acos 銆乪ureka銆乺edis銆亃k銆乧onsul銆乪tcd3銆乻ofa
  type = "nacos"
 
  nacos {
    serverAddr = "192.168.0.253:8848"
    namespace = "public"
    cluster = "default"
  }
 
}
 
config {
  # file
  type = "file"
 
 
 file {
    name = "file:/seata-server/resources/file.conf"
  }
 
}

运行docker 容器命令
docker run --name seata -p 8859:8091 -v /root/seata/registry.conf:/seata-server/resources/registry.conf -v /root/seata/file.conf:/seata-server/resources/file.conf seataio/seata-server:latest
启动成功
springboot pom配置(版本对应的jar哦):

       <dependency>
            <groupId>io.seata</groupId>
            <artifactId>seata-spring-boot-starter</artifactId>
            <version>1.2.0</version>
        </dependency>

yml配置

seata:
  tx-service-group: my_test_tx_group //如果以file配置必须指定分组
  service:
    disable-global-transaction: false
    grouplist:
      default: 192.168.0.253:8859 //这里与配置文件名称要以键值形式!

至此部署成功

总结: seata的文档有些简陋,所以配置碰到过一些坑, 之前csdn博客上的文档配置感觉或多或少有点问题。

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值