k8s---多节点部署
环境:
负载均衡
192.168.188.20
192.168.188.60
master节点:
192.168.188.30 master01
192.168.188.10 master02
node节点
192.168.188.40 node01
192.168.188.50 node02
master02部署
优先关闭防火墙和selinux服务
在master01上操作
复制kubernetes目录到master02
如果出现这种现象,先查看环境变量
[root@master ~]# kubectl get nodes
bash: kubectl: 未找到命令.
[root@master ~]# cd /opt/kubernetes/
[root@master kubernetes]# ls
bin cfg ssl
[root@master kubernetes]# cd bin/
[root@master bin]# ls
kube-apiserver kube-controller-manager kubectl kube-scheduler
[root@master bin]# vim /etc/profile
[root@master bin]# source /etc/profile
底部添加
export PATH=$PATH:/opt/kubernetes/bin
[root@master bin]# source /etc/profile
[root@master bin]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
192.168.188.40 Ready <none> 12h v1.12.3
192.168.188.50 Ready <none> 12h v1.12.3
[root@master01 k8s]# scp -r /opt/kubernetes/ root@192.168.188.10:/opt
The authenticity of host '192.168.188.10 (192.168.188.10)' can't be established.
ECDSA key fingerprint is SHA256:ESP5W3/eWNvEQmepDb2teD9WEDiNaXOEleRLozWINns.
ECDSA key fingerprint is MD5:2f:ae:0f:0a:c5:d9:8f:be:ed:77:de:08:9a:3b:90:ff.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.188.10' (ECDSA) to the list of known hosts.
root@192.168.188.10's password:
token.csv 100% 84 168.5KB/s 00:00
kube-apiserver 100% 934 934.8KB/s 00:00
kube-scheduler 100% 94 182.5KB/s 00:00
kube-controller-manager 100% 483 789.2KB/s 00:00
kube-apiserver 100% 184MB 53.1MB/s 00:03
kubectl 100% 55MB 43.8MB/s 00:01
kube-controller-manager 100% 155MB 42.0MB/s 00:03
kube-scheduler 100% 55MB 46.7MB/s 00:01
ca-key.pem 100% 1679 2.5MB/s 00:00
ca.pem 100% 1359 1.2MB/s 00:00
server-key.pem 100% 1679 2.3MB/s 00:00
server.pem 100% 1643 1.7MB/s 00:00
复制master中的三个组件启动脚本kube-apiserver.service kube-controller-manager.service kube-scheduler.service
[root@master01 k8s]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@192.168.188.10:/usr/lib/systemd/system/
root@192.168.188.10's password:
kube-apiserver.service 100% 282 500.8KB/s 00:00
kube-controller-manager.service 100% 317 267.2KB/s 00:00
kube-scheduler.service 100% 281 551.6KB/s 00:00
master02上操作
修改配置文件kube-apiserver中的IP
[root@master02 ~]# cd /opt/kubernetes/cfg/
[root@master02 cfg]# ls
kube-apiserver kube-controller-manager kube-scheduler token.csv
[root@master02 cfg]# vim kube-apiserver
[root@master02 cfg]# cat kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.188.30:2379,https://192.168.188.40:2379,https://192.168.188.50:2379 \
--bind-address=192.168.188.10 \
--secure-port=6443 \
--advertise-address=192.168.188.10 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
特别注意:master02一定要有etcd证书
需要拷贝master01上已有的etcd证书给master02使用
[root@master01 k8s]# scp -r /opt/etcd/ root@192.168.188.10:/opt/
root@192.168.188.10's password:
etcd 100% 516 335.9KB/s 00:00
etcd 100% 18MB 63.1MB/s 00:00
etcdctl 100% 15MB 63.6MB/s 00:00
ca-key.pem 100% 1679 2.2MB/s 00:00
ca.pem 100% 1265 450.7KB/s 00:00
server-key.pem 100% 1679 2.1MB/s 00:00
server.pem 100% 1338 460.4KB/s 00:00
启动master02中的三个组件服务
[root@master02 cfg]# systemctl start kube-apiserver.service
[root@master02 cfg]# systemctl start kube-controller-manager.service
[root@master02 cfg]# systemctl start kube-scheduler.service
增加环境变量
[root@master02 cfg]# vim /etc/profile
末尾添加
export PATH=$PATH:/opt/kubernetes/bin/
[root@master02 cfg]# source /etc/profile
[root@master02 cfg]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.188.40 Ready <none> 13h v1.12.3
192.168.188.50 Ready <none> 13h v1.12.3
负载均衡
lb01 lb02操作
安装nginx服务,把nginx.sh和keepalived.conf脚本拷贝到家目录
[root@nginx02 ~]# vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
[root@nginx02 ~]# yum -y install nginx
添加四层转发
[root@nginx01 ~]# vim /etc/nginx/nginx.conf
events {
worker_connections 1024;
}
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.188.30:6443;
server 192.168.188.10:6443;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}
http {
[root@nginx01 ~]# systemctl start nginx
[root@nginx01 ~]# netstat -anpt | grep nginx
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 40635/nginx: master
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 40635/nginx: master
部署keepalived服务
[root@nginx01 ~]# yum install keepalived -y
修改配置文件
[root@nginx01 ~]# cp keepalived.conf /etc/keepalived/keepalived.conf
cp:是否覆盖"/etc/keepalived/keepalived.conf"? yes
注意:lb01是Mster配置如下:
[root@nginx01 ~]# cd /etc/keepalived/
[root@nginx01 keepalived]# ls
keepalived.conf
[root@nginx01 keepalived]# vim keepalived.conf
[root@nginx01 keepalived]# cat keepalived.conf
! Configuration File for keepalived
global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/nginx/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.188.100/24
}
track_script {
check_nginx
}
}
[root@nginx01 keepalived]# cat /etc/nginx/check_nginx.sh
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
[root@nginx01 keepalived]# cd /etc/nginx/
[root@nginx01 nginx]# chmod +x check_nginx.sh
注意:lb02是Backup配置如下:
[root@nginx02 ~]# cd /etc/keepalived/
[root@nginx02 keepalived]# ls
keepalived.conf
[root@nginx02 keepalived]# vim keepalived.conf
[root@nginx02 keepalived]# cat keepalived.conf
! Configuration File for keepalived
global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/nginx/check_nginx.sh"
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 90 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.188.100/24
}
track_script {
check_nginx
}
}
[root@nginx02 keepalived]# vim /etc/nginx/check_nginx.sh
[root@nginx02 keepalived]# cat /etc/nginx/check_nginx.sh
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
[root@nginx02 keepalived]# cd /etc/nginx/
[root@nginx02 nginx]# chmod +x check_nginx.sh
启动服务,查看是否开启
[root@nginx01 nginx]# systemctl start keepalived
[root@nginx01 nginx]# systemctl status keepalived.service
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; disabled; vendor preset: disabled)
Active: active (running) since 四 2021-03-25 11:34:32 CST; 15s ago
Process: 41970 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exited, status=0/SUCCESS)
Main PID: 41971 (keepalived)
CGroup: /system.slice/keepalived.service
├─41971 /usr/sbin/keepalived -D
├─41972 /usr/sbin/keepalived -D
└─41973 /usr/sbin/keepalived -D
3月 25 11:34:34 nginx01 Keepalived_vrrp[41973]: Sending gratuitous ARP on ens33 fo...0
3月 25 11:34:34 nginx01 Keepalived_vrrp[41973]: Sending gratuitous ARP on ens33 fo...0
3月 25 11:34:34 nginx01 Keepalived_vrrp[41973]: Sending gratuitous ARP on ens33 fo...0
3月 25 11:34:34 nginx01 Keepalived_vrrp[41973]: Sending gratuitous ARP on ens33 fo...0
3月 25 11:34:39 nginx01 Keepalived_vrrp[41973]: Sending gratuitous ARP on ens33 fo...0
3月 25 11:34:39 nginx01 Keepalived_vrrp[41973]: VRRP_Instance(VI_1) Sending/queuei...0
3月 25 11:34:39 nginx01 Keepalived_vrrp[41973]: Sending gratuitous ARP on ens33 fo...0
3月 25 11:34:39 nginx01 Keepalived_vrrp[41973]: Sending gratuitous ARP on ens33 fo...0
3月 25 11:34:39 nginx01 Keepalived_vrrp[41973]: Sending gratuitous ARP on ens33 fo...0
3月 25 11:34:39 nginx01 Keepalived_vrrp[41973]: Sending gratuitous ARP on ens33 fo...0
Hint: Some lines were ellipsized, use -l to show in full.
查看lb01地址信息
[root@nginx01 nginx]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:39:6f:76 brd ff:ff:ff:ff:ff:ff
inet 192.168.188.20/24 brd 192.168.188.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.188.100/24 scope global secondary ens33 //漂移地址在lb01中
valid_lft forever preferred_lft forever
inet6 fe80::2480:efd0:c29d:d370/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:e6:08:e7 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:e6:08:e7 brd ff:ff:ff:ff:ff:ff
验证
验证地址漂移(lb01中使用pkill nginx,再在lb02中使用ip a 查看)
恢复操作(在lb01中先启动nginx服务,再启动keepalived服务)
nginx站点/usr/share/nginx/html
[root@nginx01 nginx]# cd
[root@nginx01 ~]# pkill nginx
[root@nginx01 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:39:6f:76 brd ff:ff:ff:ff:ff:ff
inet 192.168.188.20/24 brd 192.168.188.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::2480:efd0:c29d:d370/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:e6:08:e7 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:e6:08:e7 brd ff:ff:ff:ff:ff:ff
[root@nginx02 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:d1:9b:c1 brd ff:ff:ff:ff:ff:ff
inet 192.168.188.60/24 brd 192.168.188.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.188.100/24 scope global secondary ens33
valid_lft forever preferred_lft forever
inet6 fe80::f0c9:6be3:90af:39fa/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:97:44:1f brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:97:44:1f brd ff:ff:ff:ff:ff:ff
恢复 先启动nginx 在启动keepalived 服务
[root@nginx01 ~]# systemctl start nginx
[root@nginx01 ~]# systemctl start keepalived.service
[root@nginx01 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qln 1000
link/ether 00:0c:29:39:6f:76 brd ff:ff:ff:ff:ff:ff
inet 192.168.188.20/24 brd 192.168.188.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.188.100/24 scope global secondary ens33
valid_lft forever preferred_lft forever
inet6 fe80::2480:efd0:c29d:d370/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN len 1000
link/ether 52:54:00:e6:08:e7 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 stat DOWN qlen 1000
link/ether 52:54:00:e6:08:e7 brd ff:ff:ff:ff:ff:ff
[root@nginx02 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:d1:9b:c1 brd ff:ff:ff:ff:ff:ff
inet 192.168.188.60/24 brd 192.168.188.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::f0c9:6be3:90af:39fa/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:97:44:1f brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:97:44:1f brd ff:ff:ff:ff:ff:ff