GBase8s Informix Dodker 高可用集群自恢复集群启动命令oninitdb的设计与实现

自恢复集群启动命令oninitdb的设计与实现:

  • 出发点

    • 解决gbase8s集群在莫节点宕机后,拉起数据库不能自动恢复集群的问题。
  • 解题思路

    • 用oninitdb命令替代oninit命令

创建gbase01/02/03/cm1/cm2容器

Dockerfile参考

FROM centos:7.8.2003
RUN groupadd -g 1000 gbasedbt && \
    useradd -g gbasedbt -u 1000 -d /home/gbase -m -s /bin/bash gbasedbt && \
     echo "gbasedbt:GBase123" | chpasswd
COPY profile /home/gbase/.bash_profile
COPY gbasedbtjdbc_3.3.0_2.jar /home/gbase/gbasedbtjdbc_3.3.0_2.jar
ADD GBase8sV8.8_3.3.0_2CSDK.tar.xz /opt
EXPOSE 9088 9200 9300
CMD su - gbasedbt -c "oninit" && /bin/bash

docker run

docker run -p 18080:8080 -p 19088:19088 --name gbase001 -itd docker.io/liaosnet/gbase8s /bin/bash
docker run -p 28080:8080 -p 29088:29088 --name gbase002 -itd docker.io/liaosnet/gbase8s /bin/bash
docker run -p 38080:8080 -p 39088:39088 --name gbase003 -itd docker.io/liaosnet/gbase8s /bin/bash
docker run -p 19099:19099 --name cm1 -itd docker.io/liaosnet/gbase8s /bin/bash
docker run -p 29099:29099 --name cm2 -itd docker.io/liaosnet/gbase8s /bin/bash

说明: 
	1、  .8080 端口用作集群信息通信,和集群管理命令发布。
    2、  .9088 端口用作数据库监听
    3、  .9099 端口用作连接管理器,业务可以通过这两个端口可以自动连接到数据库集群的主节点。(数据库主节点代理端口)

.bash_profile

# .bash_profile

# Get the aliases and functions
if [ -f ~/.bashrc ]; then
    . ~/.bashrc
fi

# User specific environment and startup programs

PATH=$PATH:$HOME/.local/bin:$HOME/bin

export PATH
export GBASEDBTDIR=/opt/gbase
export GBASEDBTSERVER=gbase001
export ONCONFIG=onconfig.$GBASEDBTSERVER
export PATH=$GBASEDBTDIR/bin:${PATH}
export GBASEDBTSQLHOSTS=/opt/gbase/etc/sqlhosts
export LD_LIBRARY_PATH=$GBASEDBTDIR/lib:$GBASEDBTDIR/lib/cli:$GBASEDBTDIR/lib/esql:$LD_LIBRARY_PATH

export DB_LOCALE=zh_CN.utf8
export CLIENT_LOCALE=zh_CN.utf8
export GL_USEGLU=1
export DBDATE=Y4MD-
export DBACCESS_SHOW_TIME=1
export PS1=gbasedbt'[gbase001]$'

sqlhosts

gbase001 onsoctcp 1.1.1.3 19088
gbase002 onsoctcp 1.1.1.3 29088
gbase003 onsoctcp 1.1.1.3 39088

sqlhosts.cm

cat <<! > $GBASEDBTDIR/etc/sqlhosts.cm
db_group        group   -       -       i=1
gbase001    onsoctcp  1.1.1.3 19088 g=db_group
gbase002    onsoctcp  1.1.1.3 29088 g=db_group
gbase003    onsoctcp  1.1.1.3 39088 g=db_group

cm_update       group   -       -       i=2
oltp_update1    onsoctcp 1.1.1.3      19099 g=cm_update
oltp_update2    onsoctcp 1.1.1.3      29099 g=cm_update
!

构建HDR和RSS集群

root shell

​ 统一目录: /root

调整容器root环境:r01.set.os.sh
#!/bin/bash
#放在容器里面的root用户的shell
#docker exec -it gbase001 /bin/bash

chown gbasedbt:gbasedbt /home/gbase/.bash_profile

sed -i '/gbasedbt soft nproc 65535/d' /etc/security/limits.conf
sed -i '/gbasedbt hard nproc 65535/d' /etc/security/limits.conf
sed -i '/gbasedbt soft nofile 1048576/d' /etc/security/limits.conf
sed -i '/gbasedbt hard nofile 1048576/d' /etc/security/limits.conf

sed -i '/root soft nproc 65535/d' /etc/security/limits.conf
sed -i '/root hard nproc 65535/d' /etc/security/limits.conf
sed -i '/root soft nofile 1048576/d' /etc/security/limits.conf
sed -i '/root hard nofile 1048576/d' /etc/security/limits.conf


cat >> /etc/security/limits.conf << EOF

gbasedbt soft nproc 65535
gbasedbt hard nproc 65535
gbasedbt soft nofile 1048576
gbasedbt hard nofile 1048576
root soft nproc 65535
root hard nproc 65535
root soft nofile 1048576g
root hard nofile 1048576
EOF

touch /gbasedbt
touch /root/gbasedbt
#使用
./r01.set.os.sh

gbasedbt shell

​ 统一目录: /home/gbase

修改实例名称: g01.init.instance.name.sh
#!/bin/bash
#放在容器里面的gbasedbt用户的shell
#docker exec -it gbase001 /bin/bash
#su - gbasedbt

INSTANCE=$1
IPADDR=$2

sed -i "s/^export GBASEDBTSERVER=.*$/export GBASEDBTSERVER=$INSTANCE/" ~/.bash_profile
sed -i '/export PS1/d' ~/.bash_profile
echo "export PS1=`whoami`'[$INSTANCE]\$'" >> ~/.bash_profile
. ~/.bash_profile

cp $GBASEDBTDIR/etc/onconfig.gbase01 $GBASEDBTDIR/etc/$ONCONFIG
sed -i "s/^DRAUTO.*$/DRAUTO 3/" $GBASEDBTDIR/etc/$ONCONFIG
sed -i "s/DBSERVERNAME gbase01/DBSERVERNAME $INSTANCE/" $GBASEDBTDIR/etc/$ONCONFIG
sed -i "s/^LOG_INDEX_BUILDS.*$/LOG_INDEX_BUILDS 1/" $GBASEDBTDIR/etc/$ONCONFIG

cat > $GBASEDBTDIR/etc/sqlhosts << EOF
gbase001 onsoctcp $IPADDR 19088 
gbase002 onsoctcp $IPADDR 29088 
gbase003 onsoctcp $IPADDR 39088 
EOF
echo "sed -i 's/^$INSTANCE onsoctcp $IPADDR/$INSTANCE onsoctcp 0.0.0.0/' $GBASEDBTDIR/etc/sqlhosts" |sh

#使用
sh g01.init.instance.name.sh gbase001 1.1.1.3
. .bash_profile 
oninit -v

0级别备份主节点: g02.ontape.s.L0.sh
#!/bin/bash
#放在容器里面的gbasedbt用户的shell
#docker exec -it gbase001 /bin/bash
#su - gbasedbt

rm -f 0b
touch 0b
chmod 777 0b
ontape -s -L 0 -t 0b

宿主机分发0b数据到2个备节点: s01.docker.cp.sh

​ 统一目录: 宿主机的 /root

docker cp gbase001:/home/gbase/0b /tmp/
docker cp /tmp/0b gbase002:/home/gbase/
docker cp /tmp/0b gbase003:/home/gbase/

备节点物理恢复ontap: g02.ontape.p.sh
chown gbasedbt. /home/gbase/0b
su - gbasedbt
ontape -p -t 0b
#y n n

建立3节点集群RSS
#每节点
cat > ~/.rhosts << EOF
+
EOF


#gbase001主
onmode -d add RSS gbase002
onmode -d add RSS gbase003
#gbase002RSS
onmode -d RSS gbase001
#gbase003RSS
onmode -d RSS gbase001


#查看结果
onstat -g cluster
onstat -g dri
onstat -g rss


构建CM集群

  1. 构建cm是,让集群自动故障切换。
  2. 插件启动程序oninitdb是,让节点再宕机后启动时自动恢复集群。
  3. 业务只需要连接 cm_update ,就可以保证不管是cm1/cm2坏了,还是gbase01/gbase02/gbase03坏了。业务都正常。

CM1和CM2环境配置

#这量2哥容器上面已经建立了
docker run -p 19099:19099 --name cm1 -itd docker.io/liaosnet/gbase8s /bin/bash
docker run -p 29099:29099 --name cm2 -itd docker.io/liaosnet/gbase8s /bin/bash

#修改这量容器里面的.profile环境变量
export GBASEDBTSERVER=gbase001
export GBASEDBTSERVER=gbase001
export GBASEDBTSQLHOSTS=/opt/gbase/etc/sqlhosts.cm
export GBASEDBTSQLHOSTS=/opt/gbase/etc/sqlhosts.cm
export PS1=`whoami`'[cm1]\$'
export PS1=`whoami`'[cm2]\$'


CM1配置文件:sqlhosts.cm
cat <<! > $GBASEDBTDIR/etc/sqlhosts.cm
db_group        group   -       -       i=1
gbase001    onsoctcp  1.1.1.3 19088 g=db_group
gbase002    onsoctcp  1.1.1.3 29088 g=db_group
gbase003    onsoctcp  1.1.1.3 39088 g=db_group

cm_update       group   -       -       i=2
oltp_update1    onsoctcp 0.0.0.0      19099 g=cm_update
oltp_update2    onsoctcp 1.1.1.3      29099 g=cm_update
!

CM1配置文件:cm1.cfg
cat <<! >$GBASEDBTDIR/etc/cm1.cfg
NAME            cm1
LOGFILE         ${GBASEDBTDIR}/cm1.log
CM_TIMEOUT 30
SECONDARY_EVENT_TIMEOUT 30
SQLHOSTS LOCAL
LOCAL_IP 1.1.1.3
EVENT_TIMEOUT 30
LOG 1
DEBUG 0
CLUSTER CLUSTER1
{
GBASEDBTSERVER       db_group
SLA oltp_update1 DBSERVERS=PRI
FOC ORDER=ENABLED TIMEOUT=1 RETRY=2 PRIORITY=98
}
!

启动CM1
oncmsm -c $GBASEDBTDIR/etc/cm1.cfg

CM2配置文件:sqlhosts.cm
cat <<! > $GBASEDBTDIR/etc/sqlhosts.cm
db_group        group   -       -       i=1
gbase001    onsoctcp  1.1.1.3 19088 g=db_group
gbase002    onsoctcp  1.1.1.3 29088 g=db_group
gbase003    onsoctcp  1.1.1.3 39088 g=db_group

cm_update       group   -       -       i=2
oltp_update1    onsoctcp 1.1.1.3      19099 g=cm_update
oltp_update2    onsoctcp 0.0.0.0      29099 g=cm_update
!

CM2配置文件:cm2.cfg
cat <<! >$GBASEDBTDIR/etc/cm2.cfg
NAME            cm2
LOGFILE         ${GBASEDBTDIR}/cm2.log
CM_TIMEOUT 30
SECONDARY_EVENT_TIMEOUT 30
SQLHOSTS LOCAL
LOCAL_IP 1.1.1.3
EVENT_TIMEOUT 30
LOG 1
DEBUG 0
CLUSTER CLUSTER2
{
GBASEDBTSERVER       db_group
SLA oltp_update2 DBSERVERS=PRI
FOC ORDER=ENABLED TIMEOUT=1 RETRY=2 PRIORITY=99
}
!

启动CM2
oncmsm -c $GBASEDBTDIR/etc/cm2.cfg

配置完成结果

gbasedbt[gbase01]$onstat -g cmsm
On-Line (Prim) -- Up 02:22:30 -- 833360 Kbytes
Unified Connection Manager: cm1                      Hostname: gateway

CLUSTER         CLUSTER1        LOCAL
        GBasedbt Servers: db_group
        SLA                    Connections   Service/Protocol   Rule
        oltp_update1                     1      9099/onsoctcp   DBSERVERS=PRI

        Failover Arbitrator: Active Arbitrator, Primary is up
        ORDER=SDS,HDR,RSS PRIORITY=98 TIMEOUT=1

Unified Connection Manager: cm2                      Hostname: gateway

CLUSTER         CLUSTER2        LOCAL
        GBasedbt Servers: db_group
        SLA                    Connections   Service/Protocol   Rule
        oltp_update2                     0      9099/onsoctcp   DBSERVERS=PRI

        Failover Arbitrator: Failover is enabled
        ORDER=SDS,HDR,RSS PRIORITY=99 TIMEOUT=1


gbasedbt[gbase01]$

关闭cm1测试结果

gbasedbt[cm1]$oncmsm -k cm1
Shut down Connection Manager cm1
gbasedbt[cm1]$

gbasedbt[gbase01]$onstat -g cmsm
On-Line (Prim) -- Up 02:27:14 -- 833360 Kbytes
Unified Connection Manager: cm2                      Hostname: gateway

CLUSTER         CLUSTER2        LOCAL
        GBasedbt Servers: db_group
        SLA                    Connections   Service/Protocol   Rule
        oltp_update2                     1      9099/onsoctcp   DBSERVERS=PRI

        Failover Arbitrator: Active Arbitrator, Primary is up
        ORDER=SDS,HDR,RSS PRIORITY=99 TIMEOUT=1


gbasedbt[gbase01]$

其他备注

#遇到错误
gbasedbt[gbase01]$onstat -
-bash: fork: retry: No child processes
-bash: fork: retry: No child processes

#处理办法,到docker容器里面配置。当然宿主机也要配置。
[root@localhost ~]#vi /etc/security/limits.conf
gbasedbt	soft	nproc	65535
gbasedbt	hard	nproc	65535
gbasedbt	soft	nofile	1048576
gbasedbt	hard	nofile	1048576
root	soft	nproc	65535
root	hard	nproc	65535
root	soft	nofile	1048576
root	hard	nofile	1048576

#rss得日志里面报错:
05:52:59  listener-thread: err = -956: oserr = 0: errstr = gbasedbt@gateway[7178077f54c9]: Client host or user gbasedbt@gateway[7178077f54c9] is not trusted by the server.
05:53:07  listener-thread: err = -956: oserr = 0: errstr = gbasedbt@gateway[ac5159d24585]: Client host or user gbasedbt@gateway[ac5159d24585] is not trusted by the server.
05:53:12  listener-thread: err = -956: oserr = 0: errstr = gbasedbt@gateway[ac5159d24585]: Client host or user gbasedbt@gateway[ac5159d24585] is not trusted by the server.
#同时cm日志里面报错
05:52:26 The server type of cluster CLUSTER1 server gbase03 is RSS.
05:52:39 The server type of cluster CLUSTER1 server gbase03 is RSS.
05:52:59 The server type of cluster CLUSTER1 server gbase03 is RSS.
...两个cm都报这个
05:51:03 The server type of cluster CLUSTER2 server gbase03 is RSS.
05:51:09 The server type of cluster CLUSTER2 server gbase03 is RSS.
05:51:22 The server type of cluster CLUSTER2 server gbase03 is RSS.

#处理方法添加.rhosts
05:56:39 Connection Manager successfully connected to gbase03

编写插件启动程序–oninitdb

​ 此时重复上面的测试关闭001的docker,再次启动docker,可以启动,但是不能oninit启动数据库。应为数据库集群里面已经有了新的主节点。此时只是需要写oninitdb插件启动脚本,用他来替代oninit即可。

配置插件启动程序的前提环境

​ 容器间的命令和信息如何共享,不同宿主机,用docker cp 路径太长, 用ssh需要容器内配置sshd服务, 都不是很好。 那么用REST API吧。

程序和AIP设计

每个节点暴露自己的信息 (info5)

servername:
mode:
running:

节点间API传递命令 (info5)

/primary
/secondary
/addrss
/rss
/phy
/killy

oninitdb启动插件 (oninitdb)

​ 根据上面暴露的信息,判断应该如何启动并加入集群;再调用api实现之。

逐一访问没个节点暴露的信息
知道001挂了
知道002是主
知道003是rss
去PHY启动001
去002指定添加一个rss
去001指定角色为rss
等待集群恢复完成

程序和AIP实现

info5 web api 服务(把此info5服务设置为 随docker启动)

package main

import (
	"bytes"
	"fmt"
	"os/exec"

	"github.com/gin-gonic/gin"
)

type Info struct {
	servername string
	mode       string
	running    string
}

var info = &Info{
	servername: "",
	mode:       "",
	running:    "",
}

func main() {

	r := gin.Default()
	r.GET("/info", func(c *gin.Context) {
		info.servername = Shell("qu.info5.servername.sh")
		info.mode = Shell("qu.info5.mode.sh")
		info.running = Shell("qu.info5.running.sh")
		c.JSON(200, gin.H{
			"servername": info.servername,
			"mode":       info.mode,
			"running":    info.running,
		})
	})

	r.GET("/addrss", func(ctx *gin.Context) {
		my := ctx.Query("my")
		if my == "" {
			ctx.String(200, "my is null")
			return
		}
		cmd := exec.Command("onmode", "-d", "add", "RSS", my)
		var stdout, stderr bytes.Buffer
		cmd.Stdout = &stdout // 标准输出
		cmd.Stderr = &stderr // 标准错误
		err := cmd.Run()
		outStr, errStr := stdout.String(), stderr.String()
		fmt.Printf("out:\n%s\nerr:\n%s\n", outStr, errStr)
		if err != nil {
			fmt.Printf("cmd.Run() failed with %s\n", err)
			ctx.String(200, "run addrss ok")
		} else {
			ctx.String(200, "run addrss err")
		}
	})

	r.Run(":8080")

}

func Shell(shellfile string) string {
	cmd := exec.Command("sh", shellfile)
	var stdout, stderr bytes.Buffer
	cmd.Stdout = &stdout // 标准输出
	cmd.Stderr = &stderr // 标准错误
	err := cmd.Run()
	outStr, errStr := stdout.String(), stderr.String()
	fmt.Printf("out:\n%s\nerr:\n%s\n", outStr, errStr)
	if err != nil {
		fmt.Printf("cmd.Run() failed with %s\n", err)
	}
	return outStr
}


oninitdb 命令(把此oninitdb加入docker里/opt/gbase/bin/目录)

package main

import (
	"bytes"
	"encoding/json"
	"fmt"
	"io/ioutil"
	"net/http"
	"os/exec"
)

type Info struct {
	Servername string `JSON:"servername"`
	Mode       string `JSON:"mode"`
	Running    string `JSON:"running"`
}

var infoList [3]Info

type web struct {
	servername string
	ip         string
	port       string
}

var webList [3]web

func init() {
	webList[0].servername = "gbase001"
	webList[0].ip = "1.1.1.3"
	webList[0].port = "18080"

	webList[1].servername = "gbase002"
	webList[1].ip = "1.1.1.3"
	webList[1].port = "28080"

	webList[2].servername = "gbase003"
	webList[2].ip = "1.1.1.3"
	webList[2].port = "38080"
}

func main() {

	//我是谁
	fmt.Println("我是:")
	my := Shell("qu.info5.servername.sh")
	myrunning := ""
	zhu := "null"
	zhuip := ""
	zhuport := ""

	//读取集群信息
	info()

	//如果我没有宕机,什么也不做
	for _, v := range infoList {
		if v.Servername == my {
			myrunning = v.Running
			break
		}
	}
	if myrunning == "OK" {
		fmt.Println("我运行好好的不用任何操作")
		return
	}

	//主是谁
	for _, v := range infoList {
		if v.Mode == "Primary" && v.Running == "OK" {
			zhu = v.Servername
			fmt.Println("主是:", zhu)
			break
		}
	}
	for _, v := range webList {
		if v.servername == zhu {
			zhuip = v.ip
			zhuport = v.port
			fmt.Println("主的webIP是:", zhuip)
			fmt.Println("主的webPort是:", zhuport)
			break
		}
	}

	//如果主还是空,那么什么也不做
	//多节点集群的目的就是永远有一个主节点存活,对外提供服务。
	if zhu == "null" {
		fmt.Println("集群里面没有OK的主节点")
		return
	}

	//启动前clean一遍
	fmt.Println("清理残留")
	cmd := exec.Command("onclean", "-ky")
	var stdout, stderr bytes.Buffer
	cmd.Stdout = &stdout // 标准输出
	cmd.Stderr = &stderr // 标准错误
	err := cmd.Run()
	outStr, errStr := stdout.String(), stderr.String()
	fmt.Printf("out:\n%s\nerr:\n%s\n", outStr, errStr)
	if err != nil {
		fmt.Printf("cmd.Run() failed with %s\n", err)
	}

	//启动到phy
	cmd = exec.Command("oninit", "-PHY")
	// var stdout, stderr bytes.Buffer
	cmd.Stdout = &stdout // 标准输出
	cmd.Stderr = &stderr // 标准错误
	err = cmd.Run()
	// outStr, errStr = stdout.String(), stderr.String()
	// fmt.Printf("out:\n%s\nerr:\n%s\n", outStr, errStr)
	fmt.Println("物理启动")
	if err != nil {
		fmt.Printf("cmd.Run() failed with %s\n", err)
	}

	//主addrss
	fmt.Println("远程添加")
	u := "http://" + zhuip + ":" + zhuport + "/addrss?my=" + my
	fmt.Println(u)
	response, err := http.Get(u)
	if err != nil {
		fmt.Println("http.Get addrss 时遇到异常")
	}
	buf, _ := ioutil.ReadAll(response.Body)
	s := string(buf)
	if s == "run addrss ok" {
		fmt.Println("run addrss ok")
	} else {
		fmt.Println("run addrss err")
	}

	//我加入集群
	cmd = exec.Command("onmode", "-d", "RSS", zhu)
	// var stdout, stderr bytes.Buffer
	cmd.Stdout = &stdout // 标准输出
	cmd.Stderr = &stderr // 标准错误
	err = cmd.Run()
	// outStr, errStr = stdout.String(), stderr.String()
	// fmt.Printf("out:\n%s\nerr:\n%s\n", outStr, errStr)
	fmt.Println("加入集群")
	if err != nil {
		fmt.Printf("cmd.Run() failed with %s\n", err)
	}

	//再次查看db节点info
	info()
}

func info() {
	//查看db节点infof
	fmt.Println("查看集群")
	for i, n := range webList {
		response, err := http.Get("http://" + n.ip + ":" + n.port + "/info")
		if err != nil {
			fmt.Println("info API 未正常提供服务,Docker可能已经异常。")
			continue
		}
		buf, _ := ioutil.ReadAll(response.Body)
		json.Unmarshal(buf, &infoList[i])
		fmt.Println(infoList[i])
	}
}

func Shell(shellfile string) string {
	cmd := exec.Command("sh", shellfile)
	var stdout, stderr bytes.Buffer
	cmd.Stdout = &stdout // 标准输出
	cmd.Stderr = &stderr // 标准错误
	err := cmd.Run()
	outStr, errStr := stdout.String(), stderr.String()
	fmt.Printf("out:\n%s\nerr:\n%s\n", outStr, errStr)
	if err != nil {
		fmt.Printf("cmd.Run() failed with %s\n", err)
	}
	return outStr
}


API调用的脚本

取severname等3: cat > qu.info5.sh
echo $GBASEDBTSERVER | awk '{print "info5 servername "$1}'

n=`onstat -g rss |wc -l ` 
if [ $n -gt 2  ]; then
	onstat -g rss |grep "Local server type" | awk '{print "info5 mode "$4}'
else
	echo "info5 mode unknown"
fi

export INFORMIXCONTIME=3
export INFORMIXCONRETRY=0
export TMPRINTOFFSETS=3
export GBASEDBTCONTIME=3
export GBASEDBTCONRETRY=0
dbaccess sysmaster <<! >/dev/null 2>&1
	select first 1 1 from systables;
!
i=$?
if [ $i -eq 0 ]; then
	echo "info5 running OK"
else
	echo "info5 running ERR"
fi

取severname等1/3: cat > qu.info5.servername.sh
echo $GBASEDBTSERVER | awk '{printf("%s",$1)}'

取severname等2/3: cat > qu.info5.mode.sh
n=`onstat -g rss |wc -l ` 
if [ $n -gt 2  ]; then
	onstat -g rss |grep "Local server type" |  awk '{printf("%s",$4)}'
else
	awk 'BEGIN {printf("%s","unknown")}'
fi

取severname等3/3: cat > qu.info5.running.sh
export INFORMIXCONTIME=3
export INFORMIXCONRETRY=0
export TMPRINTOFFSETS=3
export GBASEDBTCONTIME=3
export GBASEDBTCONRETRY=0
dbaccess sysmaster <<! >/dev/null 2>&1
	select first 1 1 from systables;
!
i=$?
if [ $i -eq 0 ]; then
	awk 'BEGIN {printf("%s","OK")}'
else
	awk 'BEGIN {printf("%s","ERR")}'
fi

结语

  • 让info5进程随docker启动
  • 用oninitdb命令替代oninit命令
  • 实现了gbase8s集群的可扩展功能: 1、任意节点宕机后,集群正常有主节点对外提供服务; 2、宕机节点再拉起,自动加入集群,集群自动恢复。
  • 遗留问题: 1、这里写了3哥节点的集群,有些命名写死了。 2、对于数据库损坏严重需要重新备份恢复的情况没有处理。3、如有需要再做更新。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值