dble安装zk及配置mysql主从模式
下载dble:latest,mysql:5.7,zookeeper:latest镜像
# 下载dble最新新镜像
docker pull actiontech/dble
# 下载mysql:5.7镜像
docker pull mysql:5.7
# 下载zookeeper最新镜像
docker pull zookeeper
# 检查镜像是否下载成功
docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
actiontech/dble latest 9988614a8e4b 7 months ago 755MB
mysql 5.7 efa50097efbd 3 days ago 462MB
zookeeper latest 6c0a15fc2e1a 7 days ago 279MB
创建目录,上传配置文件并执行docker-compose.yml
#创建目录
mkdir -p /app/dble_cluster
#切换到创建的目录下
cd /app/dble_cluster
#按docker-compose.yml中的挂载目录(volumes标签下:前的目录),创建对应的目录
mkdir -p zoo1/conf
mkdir -p zoo1/data
mkdir -p zoo1/datalog
cd /zoo1
ls -l
# 看到有刚才创建的目录,再创建其他目录
cd /app/dble_cluster
mkdir -p zoo2/conf
mkdir -p zoo2/data
mkdir -p zoo2/datalog
mkdir -p zoo3/conf
mkdir -p zoo3/data
mkdir -p zoo3/datalog
mkdir -p mysql1/conf
mkdir -p mysql1/logs
mkdir -p mysql1/data
mkdir -p mysql2/conf
mkdir -p mysql2/logs
mkdir -p mysql2/data
mkdir -p dble1/conf/
mkdir -p dble1/logs
mkdir -p dble2/conf/
mkdir -p dble2/logs
#创建完目录,再上传对应的配置到相应目录。参考yml中的挂载来上传
mysql/conf/my.cnf 中 server-id=1 两个要保持唯一性
上传start.sh到/app/dble_cluster目录下
#上传docker-compose.yml到该目录下
rz
#选择本地的docker-compose.yml
#执行docker-compose命令
docker-compose up -d
[root@localhost dble_cluster]# docker-compose up -d
[+] Running 7/0
⠿ Container mysql2 Running 0.0s
⠿ Container zoo3 Running 0.0s
⠿ Container mysql1 Running 0.0s
⠿ Container dble1 Running 0.0s
⠿ Container zoo1 Running 0.0s
⠿ Container zoo2 Running 0.0s
⠿ Container dble2 Running
#查看dble,mysql,zookeepr的运行状态
docker ps -a | grep dble
docker ps -a | grep mysql
docker ps -a | grep zookeepr
#再查看dble的日志,看下有没有成功启动
[root@localhost dble_cluster]# docker logs -f dble1
wait-for-it.sh: waiting 120 seconds for mysql1:3306
wait-for-it.sh: mysql1:3306 is available after 0 seconds
wait-for-it.sh: waiting 120 seconds for mysql2:3306
wait-for-it.sh: mysql2:3306 is available after 0 seconds
dble start in docker
Starting dble-server...
dble start finish
INFO | jvm 1 | 2022/07/04 10:33:54 | Server execute ShutdownHook.
INFO | jvm 1 | 2022/07/04 10:33:54 | 2022-07-04 10:33:54,086 Thread-2 WARN Unable to register Log4j shutdown hook because JVM is shutting down. Using SimpleLogger
INFO | jvm 1 | 2022/07/04 10:33:54 | You use OuterHa or Cluster, no need to clean up ha process
STATUS | wrapper | 2022/07/04 10:33:56 | <-- Wrapper Stopped
STATUS | wrapper | 2022/07/04 10:46:27 | --> Wrapper Started as Daemon
STATUS | wrapper | 2022/07/04 10:46:27 | Java Service Wrapper Community Edition 64-bit 3.5.40
STATUS | wrapper | 2022/07/04 10:46:27 | Copyright (C) 1999-2019 Tanuki Software, Ltd. All Rights Reserved.
STATUS | wrapper | 2022/07/04 10:46:27 | http://wrapper.tanukisoftware.com
STATUS | wrapper | 2022/07/04 10:46:27 |
STATUS | wrapper | 2022/07/04 10:46:27 | Launching a JVM...
INFO | jvm 1 | 2022/07/04 10:46:27 | OpenJDK 64-Bit Server VM warning: UseCMSCompactAtFullCollection is deprecated and will likely be removed in a future release.
INFO | jvm 1 | 2022/07/04 10:46:27 | OpenJDK 64-Bit Server VM warning: CMSFullGCsBeforeCompaction is deprecated and will likely be removed in a future release.
INFO | jvm 1 | 2022/07/04 10:46:27 | Listening for transport dt_socket at address: 8088
INFO | jvm 1 | 2022/07/04 10:46:27 | WrapperManager: Initializing...
INFO | jvm 1 | 2022/07/04 10:46:30 | Server startup successfully. dble version is [5.7.11-dble-3.21.10.0-97a3b6d73a49cb374020f930c59f82746220b424-20211119064810]. Please see logs in logs/dble.log
#以上就是正常启动的日志
#通过客户端登录dble的账号,或者命令行登录dble的账号
docker exec -it mysql1 mysql -uroot -p
Enter password:
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mysql |
| performance_schema |
| sys |
+--------------------+
4 rows in set (0.00 sec)
#这样就是启动成功了。
在已有mysql数据库前提下,配置mysql主从同步,验证高可用场景
- 先查看mysql容器的挂载目录
docker inspect gaea-mysql | grep Mounts --color -a50
"Mounts": [
{
"Type": "bind",
"Source": "/app/gaea/mysql/conf",
"Destination": "/etc/mysql/conf.d",
"Mode": "rw",
"RW": true,
"Propagation": "rprivate"
},{
"Type": "bind",
"Source": "/app/gaea/mysql/logs",
"Destination": "/logs",
"Mode": "rw",
"RW": true,
"Propagation": "rprivate"
},
{
"Type": "bind",
"Source": "/app/gaea/mysql/data",
"Destination": "/var/lib/mysql",
"Mode": "rw",
"RW": true,
"Propagation": "rprivate"
}
],
可以看到Source目录在/app/gaea/mysql
-
先停止容器,再备份mysql数据库
docker stop gaea-mysql
备份mysql数据库
cd /app/gaea/mysql/ mv mysql mysql1
-
修改docker-compose.yml
把dble1启动参数修改端口为3306,将mysql1的目录挂载到备份的目录下,在备份的conf目录下添加主节点配置。
[mysqld]
server-id=1
log-bin=mysql-bin
symbolic-links=0
mysql2的目录挂载到新的目录下/app/gaea/mysql2
先把mysql1复制一份为mysql2,再删除data,db,logs目录,再修改conf目录下的配置为slave节点
[mysqld]
server-id=2
#Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
#也可以设置log-bin,让他在某个时刻成为主节点
log-bin=mysql-bin
#忽略表
replicate-wild-ignore-table=mysql.*
配置主从同步
有很多种配置主从同步的方法,可以总结为如下的步骤:
- 1.在主服务器上,必须开启二进制日志机制和配置一个独立的ID
- 2.在每一个从服务器上,配置一个唯一的ID,创建一个用来专门复制主服务器数据的账号
- 3.在开始复制进程前,在主服务器上记录二进制文件的位置信息
- 4.如果在开始复制之前,数据库中已经有数据,就必须先创建一个数据快照(可以使用mysqldump导出数据库,或者直接复制数据文件)
- 5.配置从服务器要连接的主服务器的IP地址和登陆授权,二进制日志文件名和位置
-
上传docker-compoe.yml到/root/dble,再使用docker-compse up -d 启动
-
启动成功后,docker 登录mysql1,在mysql中执行创建从库脚本
# 先查看master节点已创建成功 mysql> show master status; +------------------+----------+--------------+------------------+-------------------+ | File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set | +------------------+----------+--------------+------------------+-------------------+ | mysql-bin.000005 | 154 | | | | +------------------+----------+--------------+------------------+-------------------+ 1 row in set (0.00 sec)
再创建备份用户
create user 'slave'@'%' identified by '123456';
再受权
GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'slave'@'%'; flush privileges;
重启mysql1
docker restart mysql1
如果存在原始数据,则需要执行该步骤,没有则跳过
首先登陆数据库,然后刷新所有的表,同时给数据库加上一把锁,阻止对数据库进行任何的写操作
flush tables with read lock;
在锁表之后,再导出数据库里的数据(如果数据库里没有数据,可以忽略这一步)
#先登录到mysql容器中 docker exec -it mysql1 bash # 在mysql容器中创建backup目录 mkdir backup # 再进行数据备份 mysqldump -uroot -ppassword -A > /backup/all.sql # 再把容器中的备份数据复制到宿主机中 docker cp 容器名/容器id:要拷贝的文件在容器里面的路径 要拷贝到宿主机的相应路径 sudo docker cp mysql1:/backup/all.sql /app/gaea
如果数据量很大,可以在导出时就压缩为原来的大概三分之一
mysqldump -uroot -p'123456' -S /data/3306/data/mysql.sock --all-databases | gzip > /server/backup/mysql_bak.$(date +%F).sql.gz
这时可以对数据库解锁,恢复对主数据库的操作
mysql > unlock tables;
再登录从数据库导入备份的数据
~~~bash
# 先把宿主的备份文件复制到容器里面
docker cp 要拷贝的文件路径 容器名:要拷贝到容器里面对应的路径
docker cp /app/gaea/all.sql mysql2:/home
# 登录容器
docker exec -it mysql2 bash
# 导入备份数据
mysql -uroot -ppassword < /home/all.sql
执行完这步可以看到从库中也有了主库的数据。 接下来执行从库主备关联
# 先登录mySql
docker exec -it mysql2 mysql -uroot -ppassword
# 再执行主备关联
change master to master_host='172.18.1.13', master_user='slave', master_password='123456', master_port=3307, master_log_file='mysql-bin.000005', master_log_pos=154, master_connect_retry=30;
/** 上面的
master_host是主节点ip
master_user和master_password是刚才在master创建的备份用户和密码
master_port是主节点的mysql端口号
master_log_file是查看master的show master status;对应的file值。
master_log_pos的值是查看master的show master status;对应的Position的值。
**/
#启动从库拷贝
start slave;
# 查看主从同步状态
show slave status \G;
# 正常会看到这样的,Slave_IO_Running: Yes
Slave_SQL_Running: Yes
show slave status \G;
*************************** 1. row ***************************
Slave_IO_State: Waiting for master to send event
Master_Host: 172.18.1.13
Master_User: slave
Master_Port: 3307
Connect_Retry: 30
Master_Log_File: mysql-bin.000006
Read_Master_Log_Pos: 154
Relay_Log_File: mysql2-relay-bin.000003
Relay_Log_Pos: 367
Relay_Master_Log_File: mysql-bin.000006
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
-
验证主从同步,及dble停止一个,另一个还能不能正常运行,验证mysql主从节点是否可以同步数据。读写是否分离。
#验证主从同步,在主节点创建数据库,并创建表,观察从节点是否会同步新建的数据 docker exec -it mysql1 mysql -uroot -ppassword mysql> create database test; Query OK, 1 row affected (0.03 sec) mysql> use test; Database changed mysql> create table user(id bigint(20) not null auto_increment primary key, name varchar(20)); Query OK, 0 rows affected (0.30 sec) mysql> insert into user(name) values("zs"); Query OK, 1 row affected (0.05 sec) mysql> insert into user(name) values("lisi"); Query OK, 1 row affected (0.10 sec) mysql> select * from user; +----+------+ | id | name | +----+------+ | 1 | zs | | 2 | lisi | +----+------+ 2 rows in set (0.00 sec) #登录到mysql从节点查看 docker exec -it mysql1 mysql -uroot -ppassword 可以看到user表及有对应的内容 #先停止一个dble docker stop dble1 #查看另一个dble的wrapper.log和dble.log看下是否都正常 #wrapper.log日志正常 INFO | jvm 1 | 2022/07/01 15:14:32 | Server startup successfully. dble version is [5.7.11-dble-3.21.10.0-97a3b6d73a49cb374020f930c59f82746220b424-20211119064810]. Please see logs in logs/dble.log
验证高可用结果
可以看到当停止一个dble库时是可以工作的。
但是如果停了一个mysql,另一个mysql可以作为主节点,当重启停止的mysql时,mysql并不能自动恢复原来的主从配置。mysql高可用的特性只在dble的商业版本中提供。
配置文件
start.sh
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
sh /opt/dble/bin/wait-for-it.sh mysql1:3306 -t 120
sh /opt/dble/bin/wait-for-it.sh mysql2:3306 -t 120
echo "dble start in docker"
sh /opt/dble/bin/dble start
echo "dble start finish"
tailf /opt/dble/logs/wrapper.log
dble-conf文件
bootstrap.cnf
#encoding=UTF-8
-agentlib:jdwp=transport=dt_socket,server=y,address=8088,suspend=n
-server
-XX:+AggressiveOpts
-Dfile.encoding=UTF-8
-Dcom.sun.management.jmxremote
-Dcom.sun.management.jmxremote.port=1984
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false
-Dcom.sun.management.jmxremote.host=127.0.0.1
-Xmx4G
-Xms1G
-Xss256k
-XX:MaxDirectMemorySize=2G
-XX:MetaspaceSize=100M
# GC Log
-XX:+PrintHeapAtGC
-XX:+PrintGCDateStamps
-Xloggc:./logs/gc_%WRAPPER_TIME_YYYYMMDDHHIISS%_%p.log
-XX:+PrintGCTimeStamps
-XX:+PrintGCDetails
-XX:+PrintTenuringDistribution
# CMS
-XX:+UseConcMarkSweepGC
-XX:+UseParNewGC
-XX:+CMSParallelRemarkEnabled
-XX:+UseCMSCompactAtFullCollection
-XX:CMSFullGCsBeforeCompaction=0
-XX:+CMSClassUnloadingEnabled
-XX:LargePageSizeInBytes=128M
-XX:+UseFastAccessorMethods
-XX:+UseCMSInitiatingOccupancyOnly
-XX:CMSInitiatingOccupancyFraction=70
# base config
-DhomePath=.
-DinstanceName=1
# valid for sequenceHandlerType=2 or 3
-DinstanceId=1
-DserverId=xxx1
#-DbindIp=0.0.0.0
#-DserverPort=8066
#-DmanagerPort=9066
#-DmaxCon=1024
#-Dprocessors=4
#-DbackendProcessors=12
#-DprocessorExecutor=4
#-DbackendProcessorExecutor=12
#-DcomplexExecutor=8
#-DwriteToBackendExecutor=4
-DfakeMySQLVersion=5.7.11
#-DtraceEndPoint=http://10.186.60.96:14268/api/traces
# serverBacklog size,default 2048
-DserverBacklog=2048
#-DusePerformanceMode=0
# if need out HA
-DuseOuterHa=true
# connection
#-Dcharset=utf8mb4
-DmaxPacketSize=167772160
-DtxIsolation=2
#-Dautocommit=1
#-DidleTimeout=60000
# option
#-DuseCompression=1
#-DcapClientFoundRows=false
-DusingAIO=0
-DuseThreadUsageStat=1
# query time cost statistics
#-DuseCostTimeStat=0
#-DmaxCostStatSize=100
#-DcostSamplePercent=1
# consistency
# check the consistency of table structure between nodes,default not
-DcheckTableConsistency=0
# check period, he default period is 60000 milliseconds
-DcheckTableConsistencyPeriod=60000
# processor check conn
-DprocessorCheckPeriod=1000
-DsqlExecuteTimeout=3000
#-DbackSocket unit:bytes
#-DbackSocketSoRcvbuf=4194304
#-DbackSocketSoSndbuf=1048576
#-DbackSocketNoDelay=1
# frontSocket
#-DfrontSocketSoRcvbuf=1048576
#-DfrontSocketSoSndbuf=4194304
#-DfrontSocketNoDelay=1
# query memory used for per session,unit is M
-DotherMemSize=4
-DorderMemSize=4
-DjoinMemSize=4
# off Heap unit:bytes
-DbufferPoolChunkSize=32767
-DbufferPoolPageNumber=512
-DbufferPoolPageSize=2097152
#-DmappedFileSize=2097152
# sql statistics
# 1 means use SQL statistics, 0 means not
-DuseSqlStat=1
#-DbufferUsagePercent=80
-DclearBigSQLResultSetMapMs=600000
#-DsqlRecordCount=10
#-DmaxResultSet=524288
# transaction log
# 1 enable record the transaction log, 0 disable ,the unit of transactionRotateSize is M
-DrecordTxn=0
#-DtransactionLogBaseDir=/txlogs
#-DtransactionLogBaseName=server-tx
#-DtransactionRotateSize=16
# XA transaction
# use XA transaction ,if the mysql service crash,the unfinished XA commit/rollback will retry for several times , it is the check period for ,default is 1000 milliseconds
-DxaSessionCheckPeriod=1000
# use XA transaction ,the finished XA log will removed. the default period is 1000 milliseconds
-DxaLogCleanPeriod=1000
# XA Recovery Log path
# -DxaRecoveryLogBaseDir=/xalogs/
# XA Recovery Log name
#-DxaRecoveryLogBaseName=xalog
# XA Retry count, retry times in backend, 0 means always retry until success
#-DxaRetryCount=0
#-DviewPersistenceConfBaseDir=/viewPath
#-DviewPersistenceConfBaseName=viewJson
# for join tmp results
#-DmergeQueueSize=1024
#-DorderByQueueSize=1024
#-DjoinQueueSize=1024
# true is use JoinStrategy, default false
#-DuseJoinStrategy=true
-DnestLoopConnSize=4
-DnestLoopRowsSize=2000
# if enable the slow query log
-DenableSlowLog=1
# the slow query log location
#-DslowLogBaseDir=./slowlogs
#-DslowLogBaseName=slow-query
# the max period for flushing the slow query log from memory to disk after last time , unit is second
-DflushSlowLogPeriod=1
# the max records for flushing the slow query log from memory to disk after last time
-DflushSlowLogSize=1000
# the threshold for judging if the query is slow , unit is millisecond
-DsqlSlowTime=100
# used for load data,maxCharsPerColumn means max chars length for per column when load data
#-DmaxCharsPerColumn=65535
# used for load data, because dble need save to disk if loading file contains large size
#-DmaxRowSizeToFile=10000
#-DenableFlowControl=false
#-DflowControlHighLevel=4194304
#-DflowControlLowLevel=262144
cluster.cnf
#
# Copyright (C) 2016-2022 ActionTech.
# License: http://www.gnu.org/licenses/gpl.html GPL version 2 or higher.
#
clusterEnable=true
# cluster ucore/zk
clusterMode=zk
# zk: clusterIP=10.186.19.aa:2281,10.186.60.bb:2281
clusterIP=clusterIP=zoo1:2181,zoo2:2181,zoo3:2181
# zk not need cluster.port
clusterPort=5700
rootPath=/dble
#cluster namespace, please use the same one in one cluster
clusterId=cluster-1
# if HA need sync by cluster, only useful when useOuterHa=true
needSyncHa=false
# unit is millisecond
showBinlogStatusTimeout=60000
sequenceHandlerType=2
# valid for sequenceHandlerType=2 or 3
#sequenceStartTime=2010-11-04 09:42:54
# valid for sequenceHandlerType=3 and clusterMode is zk, default true
#sequenceInstanceByZk=true
db.xml
<?xml version="1.0"?>
<!DOCTYPE dble:db SYSTEM "db.dtd">
<dble:db xmlns:dble="http://dble.cloud/" version="4.0">
<dbGroup name="host_1" rwSplitMode="0" delayThreshold="100">
<heartbeat >select 1</heartbeat>
<dbInstance name="mysql1" url="mysql1:3306" user="root" password="password" maxCon="200" minCon="50" primary="true" />
<dbInstance name="mysql2" url="mysql1:3307" user="root" password="password" maxCon="200" minCon="50" primary="false" />
</dbGroup>
</dble:db>
log4j2.xml
<?xml version="1.0" encoding="UTF-8"?>
<Configuration status="WARN" monitorInterval="30">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d [%-5p][%t] %m %throwable{full} (%C:%F:%L) %n"/>
</Console>
<RollingRandomAccessFile name="RollingFile" fileName="${sys:homePath}/logs/dble.log"
filePattern="${sys:homePath}/logs/$${date:yyyy-MM}/dble-%d{MM-dd}-%i.log.gz">
<PatternLayout>
<Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n</Pattern>
</PatternLayout>
<Policies>
<OnStartupTriggeringPolicy/>
<SizeBasedTriggeringPolicy size="250 MB"/>
<TimeBasedTriggeringPolicy/>
</Policies>
<DefaultRolloverStrategy max="100">
<Delete basePath="logs" maxDepth="2">
<IfFileName glob="*/dble-*.log.gz">
<IfLastModified age="30d">
<IfAny>
<IfAccumulatedFileSize exceeds="1 GB"/>
<IfAccumulatedFileCount exceeds="10"/>
</IfAny>
</IfLastModified>
</IfFileName>
</Delete>
</DefaultRolloverStrategy>
</RollingRandomAccessFile>
<!-- independent log file for new ha interface, for use useOuterHa only
<RollingFile name="ha_log" fileName="logs/ha.log"
filePattern="logs/$${date:yyyy-MM}/ha-%d{MM-dd}-%i.log.gz">
<PatternLayout>
<Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n</Pattern>
</PatternLayout>
<Policies>
<OnStartupTriggeringPolicy/>
<SizeBasedTriggeringPolicy size="250 MB"/>
<TimeBasedTriggeringPolicy/>
</Policies>
<DefaultRolloverStrategy max="10"/>
</RollingFile>
-->
<!-- <RollingFile name="DumpFileLog" fileName="logs/dump.log"-->
<!-- filePattern="logs/$${date:yyyy-MM}/dump-%d{MM-dd}-%i.log.gz">-->
<!-- <PatternLayout>-->
<!-- <Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n</Pattern>-->
<!-- </PatternLayout>-->
<!-- <Policies>-->
<!-- <OnStartupTriggeringPolicy/>-->
<!-- <SizeBasedTriggeringPolicy size="250 MB"/>-->
<!-- <TimeBasedTriggeringPolicy/>-->
<!-- </Policies>-->
<!-- <DefaultRolloverStrategy max="10"/>-->
<!-- </RollingFile>-->
<!--independent log file for ddl trace
<RollingFile name="DDL_TRACE" fileName="logs/ddl.log"
filePattern="logs/$${date:yyyy-MM}/ddl-%d{MM-dd}-%i.log.gz">
<PatternLayout>
<Pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n</Pattern>
</PatternLayout>
<Policies>
<OnStartupTriggeringPolicy/>
<SizeBasedTriggeringPolicy size="250 MB"/>
<TimeBasedTriggeringPolicy/>
</Policies>
<DefaultRolloverStrategy max="10"/>
</RollingFile>
-->
</Appenders>
<Loggers>
<!-- independent log file for new ha interface, for use useOuterHa only
<Logger name="ha_log" additivity="false" includeLocation="false" >
<AppenderRef ref="ha_log" />
<AppenderRef ref="RollingFile"/>
</Logger>-->
<!--independent log file for ddl trace
<Logger name="DDL_TRACE" additivity="false" includeLocation="false">
<AppenderRef ref="DDL_TRACE"/>
<AppenderRef ref="Console"/>
<AppenderRef ref="RollingFile"/>
</Logger> -->
<!-- <Logger name="dumpFileLog" additivity="false" includeLocation="false" >-->
<!-- <AppenderRef ref="DumpFileLog" />-->
<!-- <AppenderRef ref="RollingFile"/>-->
<!-- </Logger>-->
<asyncRoot level="info" includeLocation="true">
<!--<AppenderRef ref="Console" />-->
<AppenderRef ref="RollingFile"/>
</asyncRoot>
</Loggers>
</Configuration>
user.xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE dble:user SYSTEM "user.dtd">
<dble:user xmlns:dble="http://dble.cloud/" version="4.0">
<managerUser name="admin" password="admin"/>
<!-- <shardingUser name="test" password="123456" schemas="testdb" readOnly="false" maxCon="20"/> -->
<rwSplitUser name="root" password="password" dbGroup="host_1" maxCon="64"/>
</dble:user>
Wrapper.conf
#********************************************************************
# Wrapper Properties
#********************************************************************
# Java Application
wrapper.java.command=java
wrapper.working.dir=..
# Java Main class. This class must implement the WrapperListener interface
# or guarantee that the WrapperManager class is initialized. Helper
# classes are provided to do this for you. See the Integration section
# of the documentation for details.
wrapper.java.mainclass=org.tanukisoftware.wrapper.WrapperSimpleApp
set.default.REPO_DIR=lib
set.APP_BASE=.
# Java Classpath (include wrapper.jar) Add class path elements as
# needed starting from 1
wrapper.java.classpath.1=lib/wrapper.jar
wrapper.java.classpath.2=conf
wrapper.java.classpath.3=%REPO_DIR%/*
wrapper.java.classpath.4=algorithm/*
# Java Library Path (location of Wrapper.DLL or libwrapper.so)
wrapper.java.library.path.1=lib
# Java Additional Parameters
#wrapper.java.additional.1=
# Initial Java Heap Size (in MB)
#wrapper.java.initmemory=3
# Maximum Java Heap Size (in MB)
#wrapper.java.maxmemory=64
# Application parameters. Add parameters as needed starting from 1
wrapper.app.parameter.1=com.actiontech.dble.DbleStartup
wrapper.app.parameter.2=start
#********************************************************************
# Wrapper Logging Properties
#********************************************************************
# Format of output for the console. (See docs for formats)
wrapper.console.format=PM
# Log Level for console output. (See docs for log levels)
wrapper.console.loglevel=INFO
# Log file to use for wrapper output logging.
wrapper.logfile=logs/wrapper.log
# Format of output for the log file. (See docs for formats)
wrapper.logfile.format=LPTM
# Log Level for log file output. (See docs for log levels)
wrapper.logfile.loglevel=INFO
# Maximum size that the log file will be allowed to grow to before
# the log is rolled. Size is specified in bytes. The default value
# of 0, disables log rolling. May abbreviate with the 'k' (kb) or
# 'm' (mb) suffix. For example: 10m = 10 megabytes.
wrapper.logfile.maxsize=0
# Maximum number of rolled log files which will be allowed before old
# files are deleted. The default value of 0 implies no limit.
wrapper.logfile.maxfiles=0
# Log Level for sys/event log output. (See docs for log levels)
wrapper.syslog.loglevel=NONE
#********************************************************************
# Wrapper Windows Properties
#********************************************************************
# Title to use when running as a console
wrapper.console.title=dble-server
#********************************************************************
# Wrapper Windows NT/2000/XP Service Properties
#********************************************************************
# WARNING - Do not modify any of these properties when an application
# using this configuration file has been installed as a service.
# Please uninstall the service before modifying this section. The
# service can then be reinstalled.
# Name of the service
wrapper.ntservice.name=dble
# Display name of the service
wrapper.ntservice.displayname=dble-server
# Description of the service
wrapper.ntservice.description=The project of dble-server
# Service dependencies. Add dependencies as needed starting from 1
wrapper.ntservice.dependency.1=
# Mode in which the service is installed. AUTO_START or DEMAND_START
wrapper.ntservice.starttype=AUTO_START
# Allow the service to interact with the desktop.
wrapper.ntservice.interactive=false
configuration.directory.in.classpath.last=algorithm/*
wrapper.java.additional_file=conf/bootstrap.cnf
wrapper.ping.timeout=120
configuration.directory.in.classpath.first=conf
wrapper.pidfile=$BASEDIR
mysql-conf
my.cnf
[mysqld]
# 该字段控制时间字段使用 CURRENT_TIMESTAMP 是否可以工作
explicit_defaults_for_timestamp=0
# 最大连接数
max_connections = 4001
# 表名存储为给定的大小和比较:
# 0-表名存储为给定的大小和比较是区分大小写的
# 1-在磁盘是小写的,但是比较的时候是不区分大小写
# 2-表名存储为给定的大小写但是比较的时候是小写的
lower_case_table_names=1
# 修改时区
default-time-zone = '+08:00'
#禁用DNS反向解析配
skip-name-resolve
#skip-host-cache
#慢日志时间设置/ 秒级
long_query_time=0.2
#开通慢sql
slow_query_log=1
#慢sql日志文件
slow_query_log_file=/var/lib/mysql/mysql-slow.log
#mysql的同步的数据中是包含server-id的,
#用于标识该语句最初是从哪个server写入的
server-id=1
log-bin=/var/lib/mysql/mysql-bin
symbolic-links=0
binlog_format=ROW
innodb_large_prefix=on
innodb_file_format=BARRACUDA
innodb_buffer_pool_size=12288M
innodb_buffer_pool_instances=8
innodb_write_io_threads=4
innodb_read_io_threads=4
zoo-conf
zoo.cfg
dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
autopurge.snapRetainCount=3
autopurge.purgeInterval=0
maxClientCnxns=60
standaloneEnabled=true
admin.enableServer=true
server.1=zoo1:2888:3888;2181
server.2=zoo2:2888:3888;2181
server.3=zoo3:2888:3888;2181
zoo.cfg.dynamic.next
server.1=zoo1:2888:3888:participant;0.0.0.0:2181
server.2=zoo2:2888:3888:participant;0.0.0.0:2181
server.3=zoo3:2888:3888:participant;0.0.0.0:2181
version=100000000
docker-compose.yml
docker-compose.yml
version : '3.8'
services:
zoo1:
container_name: zoo1
image: zookeeper:3.5.7
restart: always
hostname: zoo1
volumes:
- ./zoo1/conf:/conf
- ./zoo1/data:/data
- ./zoo1/datalog:/datalog
ports:
- 2181:2181
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo2:
container_name: zoo2
image: zookeeper:3.5.7
restart: always
hostname: zoo2
volumes:
- ./zoo2/conf:/conf
- ./zoo2/data:/data
- ./zoo2/datalog:/datalog
ports:
- 2182:2181
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo3:
image: zookeeper:3.5.7
container_name: zoo3
restart: always
hostname: zoo3
volumes:
- ./zoo3/conf:/conf
- ./zoo3/data:/data
- ./zoo3/datalog:/datalog
ports:
- 2183:2181
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
mysql1:
image: mysql:5.7
container_name: mysql1
restart: always
hostname: mysql1
volumes:
- ./mysql1/conf:/etc/mysql/conf.d
- ./mysql1/logs:/logs
- ./mysql1/data:/var/lib/mysql
ports:
- 3307:3306
environment:
MYSQL_ROOT_PASSWORD: password
command: [
'mysqld',
'--innodb-buffer-pool-size=80M',
'--character-set-server=utf8mb4',
'--collation-server=utf8mb4_unicode_ci',
'--default-time-zone=+8:00',
'--lower-case-table-names=1',
'--max_connections=1024'
]
mysql2:
image: mysql:5.7
container_name: mysql2
restart: always
hostname: mysql2
volumes:
- ./mysql2/conf:/etc/mysql/conf.d
- ./mysql2/logs:/logs
- ./mysql2/data:/var/lib/mysql
ports:
- 3308:3306
environment:
MYSQL_ROOT_PASSWORD: password
command: [
'mysqld',
'--innodb-buffer-pool-size=80M',
'--character-set-server=utf8mb4',
'--collation-server=utf8mb4_unicode_ci',
'--default-time-zone=+8:00',
'--lower-case-table-names=1',
'--max_connections=1024'
]
dble1:
image: actiontech/dble:latest
hostname: dble1
container_name: dble1
privileged: true
stdin_open: true
tty: true
ports:
- 8066:8066
volumes:
- ./start.sh:/opt/dble/bin/start.sh
- ./dble1/conf/user.xml:/opt/dble/conf/user.xml:rw
- ./dble1/conf/db.xml:/opt/dble/conf/db.xml:rw
- ./dble1/conf/cluster.cnf:/opt/dble/conf/cluster.cnf:rw
- ./dble1/conf/bootstrap.cnf:/opt/dble/conf/bootstrap.cnf:rw
- ./dble1/logs/:/opt/dble/logs/:rw
command: ["sh","/opt/dble/bin/start.sh"]
depends_on:
- mysql1
- mysql2
- zk1
- zk2
- zk3
dble2:
image: actiontech/dble:latest
privileged: true
stdin_open: true
tty: true
container_name: dble2
hostname: dble2
ports:
- 8067:8066
volumes:
- ./start.sh:/opt/dble/bin/start.sh
- ./dble2/conf/user.xml:/opt/dble/conf/user.xml:rw
- ./dble2/conf/db.xml:/opt/dble/conf/db.xml:rw
- ./dble2/conf/cluster.cnf:/opt/dble/conf/cluster.cnf:rw
- ./dble2/conf/bootstrap.cnf:/opt/dble/conf/bootstrap.cnf:rw
- ./dble2/logs/:/opt/dble/logs/:rw
command: ["sh","/opt/dble/bin/start.sh"]
depends_on:
- mysql1
- mysql2
- zoo1
- zoo2
- zoo3