文章目录
docker-compose 部署
三节点
+Keepalived
一、机器规划
准备三台机器,在每台机器上,计划各安装一个 etcd
机器IP | 节点 | 节点端口 |
---|---|---|
10.250.81.129 | etcd | 2379/2380 |
10.250.81.130 | etcd | 2379/2380 |
10.250.81.131 | etcd | 2379/2380 |
二、etcd 集群准备工作
创建 nfs
目录
在 nfs
服务的机器上创建挂载目录,etcd 中的 /bitnami/etcd
都持久化到nfs中
#!/bin/bash
mkdir /data/nfs/etcd{1,2,3} -p
chmod 777 /data/nfs -R
cat >>/etc/exports <<EOF
/data/nfs/etcd1 *(rw,sync,no_root_squash)
/data/nfs/etcd2 *(rw,sync,no_root_squash)
/data/nfs/etcd3 *(rw,sync,no_root_squash)
EOF
exportfs -r
在三台机器上创建
docker-compose.yml
文件
- etcd1 docker-compose.yml 文件
version: "3"
services:
etcd1:
image: bitnami/etcd:latest
container_name: etcd1
restart: always
ports:
- "2379:2379"
- "2380:2380"
environment:
BITNAMI_DEBUG:
ALLOW_NONE_AUTHENTICATION: yes #示例中我们不设置密码
ETCD_NAME: etcd1 #节点自己的名字
ETCD_DATA_DIR: /bitnami/etcd/data #数据目录
ETCD_LOG_LEVEL: INFO
ETCD_ADVERTISE_CLIENT_URLS: http://10.250.81.129:2379,http://10.250.81.130:2379,http://10.250.81.131:2379 #告知集群自己的客户端地址
ETCD_LISTEN_CLIENT_URLS: http://0.0.0.0:2379 #设置监听客户端通讯的URL列表
ETCD_INITIAL_ADVERTISE_PEER_URLS: http://10.250.81.129:2380 #告知集群自己集群通讯地址
ETCD_LISTEN_PEER_URLS: http://0.0.0.0:2380 #用于监听伙伴通讯的URL列表
ETCD_INITIAL_CLUSTER_TOKEN: etcd-cluster #etcd 集群的初始化集群记号
ETCD_INITIAL_CLUSTER: etcd1=http://10.250.81.129:2380,etcd2=http://10.250.81.130:2380,etcd3=http://10.250.81.131:2380 #集群成员
ETCD_INITIAL_CLUSTER_STATE: new #初始化集群状态
volumes:
- volume-etcd:/bitnami/etcd
deploy:
resources:
limits:
cpus: '4'
memory: 8G
reservations:
cpus: '2'
memory: 4G
volumes:
volume-etcd:
driver_opts:
type: nfs
o: addr=10.250.81.131,nolock,soft,rw
device: ":/data/nfs/etcd0"
- etcd2 docker-compose.yml 文件
version: "3"
services:
etcd2:
image: bitnami/etcd:latest
container_name: etcd2
restart: always
ports:
- "2379:2379"
- "2380:2380"
environment:
BITNAMI_DEBUG:
ALLOW_NONE_AUTHENTICATION: yes
ETCD_NAME: etcd2
ETCD_DATA_DIR: /bitnami/etcd/data
ETCD_LOG_LEVEL: INFO
ETCD_ADVERTISE_CLIENT_URLS: http://10.250.81.129:2379,http://10.250.81.130:2379,http://10.250.81.131:2379
ETCD_LISTEN_CLIENT_URLS: http://0.0.0.0:2379
ETCD_INITIAL_ADVERTISE_PEER_URLS: http://10.250.81.130:2380
ETCD_LISTEN_PEER_URLS: http://0.0.0.0:2380
ETCD_INITIAL_CLUSTER_TOKEN: etcd-cluster
ETCD_INITIAL_CLUSTER: etcd1=http://10.250.81.129:2380,etcd2=http://10.250.81.130:2380,etcd3=http://10.250.81.131:2380
ETCD_INITIAL_CLUSTER_STATE: new
volumes:
- volume-etcd:/bitnami/etcd
deploy:
resources:
limits:
cpus: '4'
memory: 8G
reservations:
cpus: '2'
memory: 4G
volumes:
volume-etcd:
driver_opts:
type: nfs
o: addr=10.250.81.131,nolock,soft,rw
device: ":/data/nfs/etcd2"
- etcd3 docker-compose.yml 文件
version: "3"
services:
etcd3:
image: bitnami/etcd:latest
container_name: etcd3
restart: always
ports:
- "2379:2379"
- "2380:2380"
environment:
BITNAMI_DEBUG:
ALLOW_NONE_AUTHENTICATION: yes
ETCD_NAME: etcd3
ETCD_DATA_DIR: /bitnami/etcd/data
ETCD_LOG_LEVEL: INFO
ETCD_ADVERTISE_CLIENT_URLS: http://10.250.81.129:2379,http://10.250.81.130:2379,http://10.250.81.131:2379
ETCD_LISTEN_CLIENT_URLS: http://0.0.0.0:2379
ETCD_INITIAL_ADVERTISE_PEER_URLS: http://10.250.81.131:2380
ETCD_LISTEN_PEER_URLS: http://0.0.0.0:2380
ETCD_INITIAL_CLUSTER_TOKEN: etcd-cluster
ETCD_INITIAL_CLUSTER: etcd1=http://10.250.81.129:2380,etcd2=http://10.250.81.130:2380,etcd3=http://10.250.81.131:2380
ETCD_INITIAL_CLUSTER_STATE: new
volumes:
- volume-etcd:/bitnami/etcd
deploy:
resources:
limits:
cpus: '4'
memory: 8G
reservations:
cpus: '2'
memory: 4G
volumes:
volume-etcd:
driver_opts:
type: nfs
o: addr=10.250.81.131,nolock,soft,rw
device: ":/data/nfs/etcd3"
三、 启动 etcd 服务
分别在三台机器上执行启动命令
# 启动命令
docker-compose -f docker-compose.yml up -d
验证集群状态
- 查看集群成员
I have no name!@ac43b7254a32:/opt/bitnami/etcd$ etcdctl --write-out=table member list
+------------------+---------+-------+---------------------------+-------------------------------------------------------------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------+---------------------------+-------------------------------------------------------------------------------+------------+
| 2500541642bb58d | started | etcd2 | http://10.250.81.130:2380 | http://10.250.81.129:2379,http://10.250.81.130:2379,http://10.250.81.131:2379 | false |
| 75fa006823810232 | started | etcd3 | http://10.250.81.131:2380 | http://10.250.81.129:2379,http://10.250.81.130:2379,http://10.250.81.131:2379 | false |
| a6cb2842d9d7cff8 | started | etcd1 | http://10.250.81.129:2380 | http://10.250.81.129:2379,http://10.250.81.130:2379,http://10.250.81.131:2379 | false |
+------------------+---------+-------+---------------------------+-------------------------------------------------------------------------------+------------+
- 查看本节点状态
I have no name!@ac43b7254a32:/opt/bitnami/etcd$ etcdctl --write-out=table endpoint status
+----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 127.0.0.1:2379 | a6cb2842d9d7cff8 | 3.5.1 | 20 kB | true | false | 2 | 12 | 12 | |
+----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
- 查看本节点健康状态
I have no name!@ac43b7254a32:/opt/bitnami/etcd$ etcdctl --write-out=table endpoint health
+----------------+--------+------------+-------+
| ENDPOINT | HEALTH | TOOK | ERROR |
+----------------+--------+------------+-------+
| 127.0.0.1:2379 | true | 7.549647ms | |
+----------------+--------+------------+-------+
测试读写
- 在其中一个节点上进行写操作
I have no name!@ac43b7254a32:/opt/bitnami/etcd$ etcdctl put name "test"
OK
- 在另一个节点上进行读操作
# docker exec -it etcd3 bash
I have no name!@ff3a9e476bda:/opt/bitnami/etcd$ etcdctl get name
name
test
四、安装 keepalived
并设置 VIP
安装keepalived
yum install -y keepalived;
#设置开机启动
systemctl enable keepalived
cp /usr/sbin/keepalived /etc/init.d/keepalived
chmod +x /etc/init.d/keepalived
check_haproxy_etcd.sh
当 etcd 挂掉,则返回1,keepalived 权重降低,主节点被抢占,让另外一台机器接手
vi /etc/keepalived/check_haproxy_etcd.sh
#!/bin/bash
counter=$(netstat -na|grep "LISTEN"|grep "2379"|wc -l)
if [ "${counter}" -eq 0 ]; then
exit 1
else
exit 0
fi
确保这个脚本有执行权限,并且正确地配置了监听端口。如果监控的服务端口不可用,脚本将返回非零值,这将导致Keepalived认为本地服务器失效,并开始VIP转移的过程。
chmod +x /etc/keepalived/check_haproxy_etcd.sh
keepalived.conf
编辑 keepalived 配置文件 /etc/keepalived/keepalived.conf
参考资料:https://blog.csdn.net/cnskylee/article/details/131591733
vi /etc/keepalived/keepalived.conf
- master 机器配置内容
! Configuration File for keepalived
global_defs {
# 指定router_id
router_id ha01
script_user root
}
vrrp_script check_haproxy_etcd {
# 定义脚本
script "/etc/keepalived/check_haproxy_etcd.sh"
# 脚本执行间隔,每2s检测一次
interval 2
}
vrrp_instance VI_5 {
# 在ha1、ha2、ha3都为 BACKUP
state BACKUP
# 设置为不抢占,m1挂了,m2接管VIP,m1重启不会自动抢回VIP
nopreempt
# 具体根据网卡来
interface bond0
garp_master_delay 10
smtp_alert
# 指定虚拟路由器ID, ha1和ha2和h3此值必须相同
virtual_router_id 70
# 在ha2上为80、h3上为60
priority 100
# 本机ip
unicast_src_ip 10.250.81.129
unicast_peer {
#对端ip
10.250.81.130
10.250.81.131
}
advert_int 1
authentication {
auth_type PASS
# 指定验证密码, ha1和ha2和h3此值必须相同
auth_pass 123456
}
virtual_ipaddress {
# 指定VIP, ha1和ha2和h3此值必须相同,这里可以使用
10.250.81.248 dev bond0 label bond0:5
}
track_script {
# 调用上面定义的脚本
check_haproxy_etcd
}
}
- node1 机器配置内容
! Configuration File for keepalived
global_defs {
# 指定router_id
router_id ha01
script_user root
}
vrrp_script check_haproxy_etcd {
# 定义脚本
script "/etc/keepalived/check_haproxy_etcd.sh"
# 脚本执行间隔,每2s检测一次
interval 2
}
vrrp_instance VI_5 {
# 在ha1、ha2、ha3都为 BACKUP
state BACKUP
# 设置为不抢占,m1挂了,m2接管VIP,m1重启不会自动抢回VIP
nopreempt
# 具体根据网卡来
interface bond0
garp_master_delay 10
smtp_alert
# 指定虚拟路由器ID, ha1和ha2和h3此值必须相同
virtual_router_id 70
# 在ha2上为80、h3上为60
priority 80
# 本机ip
unicast_src_ip 10.250.81.130
unicast_peer {
#对端ip
10.250.81.129
10.250.81.131
}
advert_int 1
authentication {
auth_type PASS
# 指定验证密码, ha1和ha2和h3此值必须相同
auth_pass 123456
}
virtual_ipaddress {
# 指定VIP, ha1和ha2和h3此值必须相同,这里可以使用
10.250.81.248 dev bond0 label bond0:5
}
track_script {
# 调用上面定义的脚本
check_haproxy_etcd
}
}
- node2 机器配置内容
! Configuration File for keepalived
global_defs {
# 指定router_id
router_id ha01
script_user root
}
vrrp_script check_haproxy_etcd {
# 定义脚本
script "/etc/keepalived/check_haproxy_etcd.sh"
# 脚本执行间隔,每2s检测一次
interval 2
}
vrrp_instance VI_5 {
# 在ha1、ha2、ha3都为 BACKUP
state BACKUP
# 设置为不抢占,m1挂了,m2接管VIP,m1重启不会自动抢回VIP
nopreempt
# 具体根据网卡来
interface bond0
garp_master_delay 10
smtp_alert
# 指定虚拟路由器ID, ha1和ha2和h3此值必须相同
virtual_router_id 70
# 在ha2上为80、h3上为60
priority 60
# 本机ip
unicast_src_ip 10.250.81.131
unicast_peer {
#对端ip
10.250.81.130
10.250.81.129
}
advert_int 1
authentication {
auth_type PASS
# 指定验证密码, ha1和ha2和h3此值必须相同
auth_pass 123456
}
virtual_ipaddress {
# 指定VIP, ha1和ha2和h3此值必须相同,这里可以使用
10.250.81.248 dev bond0 label bond0:5
}
track_script {
# 调用上面定义的脚本
check_haproxy_etcd
}
}
启动keepalived
systemctl start keepalived
查看keepalived状态
systemctl status keepalived
参考资料
https://blog.csdn.net/xingzuo_1840/article/details/125558919