一、实验环境
server1 172.25.25.1 harbor仓库
servera 172.25.25.11 haproxy+pcs
serverb 172.25.25.12 haproxy+pcs
serverc 172.25.25.13 k8s master
serverd 172.25.25.14 k8s master
servere 172.25.25.15 k8s master
二、部署haproxy+pcs
servera、b配置yum源
[root@servera ~]# vim /etc/yum.repos.d/dvd.repo
[root@servera ~]# cat /etc/yum.repos.d/dvd.repo
[dvd]
name=rhel7.6
baseurl=http://172.25.25.250/rhel7.6
gpgcheck=0
[HighAvailability]
name=rhel7.6 HighAvailability
baseurl=http://172.25.52.250/rhel7.6/addons/HighAvailability
gpgcheck=0
[root@servera ~]# scp /etc/yum.repos.d/dvd.repo 172.25.25.12:/etc/yum.repos.d/
安装haproxy
[root@servera ~]# yum install haproxy.x86_64 -y
更改配置文件
[root@servera ~]# vim /etc/haproxy/haproxy.cfg
[root@servera ~]# scp /etc/haproxy/haproxy.cfg 172.25.25.12:/etc/haproxy/haproxy.cfg
[root@servera ~]# systemctl enable --now haproxy
添加配置文件信息,监听端口,设置备机名字
添加备机
此时两个pcs端访问都应该有界面
安装pcs
[root@servera ~]# yum install -y pacemaker pcs psmisc policycoreutils-python
[root@servera ~]# systemctl enable --now pcsd.service
认证两个节点、创建集群
[root@servera ~]# echo westos | passwd --stdin hacluster
[root@servera ~]# ssh serverb 'echo westos | passwd --stdin hacluster'
[root@servera ~]# pcs cluster auth servera serverb
[root@servera ~]# pcs cluster setup --name mycluster servera serverb
[root@servera ~]# pcs cluster start --all
[root@servera ~]# pcs status
WARNING警告,解决警告
[root@servera ~]# pcs cluster enable --all
[root@servera ~]# pcs property set stonith-enabled=false
[root@servera ~]# pcs status
配置VIP资源
[root@servera ~]# pcs resource create VIP ocf:heartbeat:IPaddr2 ip=172.25.25.100 op monitor interval=30s
[root@servera ~]# pcs resource create haproxy systemd:haproxy op monitor interval=30s
[root@servera ~]# pcs resource group add Group VIP haproxy
[root@servera ~]# pcs status
三、部署docker
配置yum源、安装启动
[root@server1 /etc/yum.repos.d]# scp docker.repo serverb:/etc/yum.repos.d/
[root@server1 /etc/yum.repos.d]# scp docker.repo serverd:/etc/yum.repos.d/
[root@server1 /etc/yum.repos.d]# scp docker.repo servere:/etc/yum.repos.d/
[root@server1 /etc/yum.repos.d]# yum install -y docker-ce
[root@serverc ~]# systemctl enable --now docker
解决警告
docker info #查看docker信息
sysctl -a | grep bridge-nf-call-iptables
vim /etc/sysctl.d/docker.conf
\\\
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
///
sysctl --system #重启
[root@serverc ~]# scp /etc/sysctl.d/docker.conf serverd:/etc/sysctl.d/
[root@serverc ~]# scp /etc/sysctl.d/docker.conf servere:/etc/sysctl.d/
准备daemon.json
[root@serverc ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://reg.westos.org"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
[root@serverc ~]# scp /etc/docker/daemon.json serverd:/etc/docker/
[root@serverc ~]# scp /etc/docker/daemon.json servere:/etc/docker/
复制仓库密钥,在仓库Server1端,复制密钥至c、d、e
[root@server1 /etc/docker/certs.d]# scp -r /etc/docker/certs.d serverc:/etc/docker/
[root@server1 /etc/docker/certs.d]# scp -r /etc/docker/certs.d serverd:/etc/docker/
[root@server1 /etc/docker/certs.d]# scp -r /etc/docker/certs.d servere:/etc/docker/
重启查看信息
[root@serverc /etc/docker]# systemctl restart docker
[root@serverc /etc/docker]# docker info
四、部署k8s
配置地址解析
禁用所有节点的swap分区
[root@servere ~]# swapoff -a
[root@servere ~]# vim /etc/fstab
安装k8s,启用
[root@serverd /mnt]# tar zxf kubeadm-1.21.3.tar.gz
[root@serverc /mnt/packages]# cd packages/
[root@serverc /mnt/packages]# yum install -y *
[root@serverc /mnt/packages]# systemctl enable --now kubelet
安装ipvsadm(将ip_vs模块添加进去)
[root@serverc /mnt/packages]# yum install -y ipvsadm
[root@serverc /mnt/packages]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
[root@serverc /mnt/packages]# lsmod | grep ip_vs
ip_vs 145497 0
nf_conntrack 133095 7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c 12644 4 xfs,ip_vs,nf_nat,nf_conntrack
准备部署文件(直接生成好的,可以直接拉)
[root@serverc /mnt/packages]# kubeadm config print init-defaults > kubeadm-init.yaml
[root@serverc /mnt/packages]# vim kubeadm-init.yaml
[root@serverc /mnt/packages]# kubeadm init --config kubeadm-init.yaml --upload-certs
[root@serverc /mnt/packages]# cat kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 172.25.25.13 #本地ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: serverc #本地hostname
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master #污点设置
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "172.25.25.100:6443" #通过pcs端创建的vip访问
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: reg.westos.org/k8s
kind: ClusterConfiguration
kubernetesVersion: 1.21.3 #k8s版本
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16 #pod网关
serviceSubnet: 10.96.0.0/12 #服务网关
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1 #添加k8sproxy
kind: KubeProxyConfiguration
mode: ipvs
将serverc、d加入集群,输如以下命令
加入export,否则你的master将没有权限查看,效果如下
[root@serverc /mnt/packages]# kubectl get pod
The connection to the server localhost:8080 was refused - did you specify the right host or port?
[root@serverc /mnt/packages]# export KUBECONFIG=/etc/kubernetes/admin.conf
[root@serverc /mnt/packages]# kubectl get pod
No resources found in default namespace.
添加flannel网络组件
[root@serverc /mnt]# kubectl apply -f kube-flannel.yml
Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
最后查看网络组件状态
查看节点运行状态
五、测试高可用
在网页刷新查看,发现集群节点都已经亮起;haproxy监控显示serverc、d、e正常工作kubectl get pod
在serverc创建一个pod,可以在serverd、e这俩集群的master段看到serverc创建的pod
(因为没有配置worker节点,所以我们看到的pod是pending状态)
关闭sercerc,已经显示down掉
serverd、e作为备用master仍然正常运用,还是可以哈看刚才sercerc创建的pod
测试pcs高可用
当我们down掉一个pcs端servera,此时服务迁移至serverb