[root@m01 ~]# swapoff -a[root@m01 ~]# sed -i 's/.*swap.*/#&/' /etc/fstab
6、配置镜像源
#安装镜像源[root@m01 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
00000000 --:--:-- --:--:-- -100 2523100252300606510 --:--:-- --:--:-- --:--:-- 61536
#初始化[root@m01 ~]# kubeadm init --config init-config.yaml --upload-certs
W0808 21:13:32.175756 92363 strict.go:54] configuration schema.GroupVersionKind{Group:"kubeadm.k8s.io", Version:"v1beta2", Kind:"InitConfiguration"}: error unmarshaling JSON: while decoding JSON: json: unknown field "imagePullPolicy"[init] Using Kubernetes version: v1.21.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
········
····
#实时查看拉取过程(实时监控拉取过程)[root@m01 ~]# while true; do docker images; echo ; sleep 3; clear; done
REPOSITORY TAG IMAGE ID CREATED SIZE
calico/node v3.20.0 5ef66b403f4f 8 days ago 170MB
calico/pod2daemon-flexvol v3.20.0 5991877ebc11 8 days ago 21.7MB
calico/cni v3.20.0 4945b742b8e6 8 days ago 146MB
calico/kube-controllers v3.20.0 76ba70f4748f 8 days ago 63.2MB
registry.cn-hangzhou.aliyuncs.com/k8sos/kube-apiserver v1.21.3 3d174f00aa39 3 weeks ago 126MB
registry.cn-hangzhou.aliyuncs.com/k8sos/kube-scheduler v1.21.3 6be0dc1302e3 3 weeks ago 50.6MB
registry.cn-hangzhou.aliyuncs.com/k8sos/kube-proxy v1.21.3 adb2816ea823 3 weeks ago 103MB
registry.cn-shanghai.aliyuncs.com/hzl-images/kube-proxy v1.21.2 adb2816ea823 3 weeks ago 103MB
registry.cn-hangzhou.aliyuncs.com/k8sos/kube-controller-manager v1.21.3 bc2bb319a703 3 weeks ago 120MB
registry.cn-shanghai.aliyuncs.com/hzl-images/kube-apiserver v1.21.2 106ff58d4308 7 weeks ago 126MB
registry.cn-shanghai.aliyuncs.com/hzl-images/kube-controller-manager v1.21.2 ae24db9aa2cc 7 weeks ago 120MB
registry.cn-shanghai.aliyuncs.com/hzl-images/kube-scheduler v1.21.2 f917b8c8f55b 7 weeks ago 50.6MB
registry.cn-hangzhou.aliyuncs.com/k8sos/pause 3.4.1 0f8457a4c2ec 6 months ago 683kB
registry.cn-shanghai.aliyuncs.com/hzl-images/pause 3.4.1 0f8457a4c2ec 6 months ago 683kB
registry.cn-hangzhou.aliyuncs.com/k8sos/coredns v1.8.0 7916bcd0fd70 9 months ago 42.5MB
registry.cn-shanghai.aliyuncs.com/hzl-images/coredns v1.8.0 7916bcd0fd70 9 months ago 42.5MB
registry.cn-hangzhou.aliyuncs.com/k8sos/etcd 3.4.13-0 8855aefc3b26 11 months ago 253MB
registry.cn-shanghai.aliyuncs.com/hzl-images/etcd 3.4.13-0 8855aefc3b26 11 months ago 253MB
#初始化完成,并保存生成的指令
-----------------------------------------------------------------------
(加入所有master)
join192.168.15.69:8443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:552a8af303d27bdcf20700ef6a318e002dca6fad97abb24be68e71d587f4c6c3 \
--control-plane --certificate-key 623076797c43edd46533603426006d0e8c4c4bad5ec91b72a807bf82529270fd
(加入所有node)
kubeadm join192.168.15.69:8443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:552a8af303d27bdcf20700ef6a318e002dca6fad97abb24be68e71d587f4c6c3
------------------------------------------------------------------------
# 小插曲 ################################ 错错错 ###################################################搭建时出现以下错误:[root@m01 ~]# kubeadm init --config init-config.yaml --upload-certs[init] Using Kubernetes version: v1.22.3
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables contents are not set to 1[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
............################################# 方案 ########################################解决方案:[root@m01 ~]# echo "1" >/proc/sys/net/bridge/bridge-nf-call-iptables
#下载网络插件[root@m01 ~]# curl https://docs.projectcalico.org/manifests/calico.yaml -O
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 197k 100 197k 008331700:00:02 0:00:02 --:--:-- 83353#添加环境变量echo"export KUBECONFIG=/etc/kubernetes/admin.conf">> /etc/profile
source /etc/profile
#部署calico网络插件[root@m01 ~]# kubectl create -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico
·······
····
#部署即查看pod创建状态[root@m01 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-58497c65d5-kchf8 1/1 Running 0 43s
calico-node-cv7wm 1/1 Running 0 43s
coredns-978bbc4b6-bc6pp 1/1 Running 0 7m1s
coredns-978bbc4b6-hn7hw 1/1 Running 0 7m1s
etcd-m01 1/1 Running 0 7m6s
kube-apiserver-m01 1/1 Running 0 7m6s
kube-controller-manager-m01 1/1 Running 0 7m6s
kube-proxy-n427d 1/1 Running 0 7m1s
kube-scheduler-m01 1/1 Running 0 7m7s
七、初始化加入(m02)
1、加入集群
[root@m02 ~]# kubeadm join 192.168.15.69:8443 --token abcdef.0123456789abcdef \> --discovery-token-ca-cert-hash sha256:552a8af303d27bdcf20700ef6a318e002dca6fad97abb24be68e71d587f4c6c3 \> --control-plane --certificate-key 623076797c43edd46533603426006d0e8c4c4bad5ec91b72a807bf82529270fd
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'[preflight] Running pre-flight checks before initializing the new control plane instance
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
········
·····
2、配置用户信息
#添加用户信息[root@m02 ~]# mkdir -p $HOME/.kubesudocp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudochown$(id -u):$(id -g)$HOME/.kube/config
#查看node状态[root@m02 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
m01 Ready control-plane,master 119m v1.21.3
m02 Ready control-plane,master 98m v1.21.3
node01 Ready <none> 91m v1.21.3
八、初始化加入(node01)
root@node01 ~]# kubeadm join 192.168.15.69:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:552a8af303d27bdcf20700ef6a318e002dca6fad97abb24be68e71d587f4c6c3 [preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'[kubelet-start] Writing kubelet configuration to file"/var/lib/kubelet/config.yaml"[kubelet-start] Writing kubelet environment file with flags to file"/var/lib/kubelet/kubeadm-flags.env"
········
····
#查看镜像拉去状态(从初始化成功)[root@node01 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
calico/pod2daemon-flexvol v3.20.0 5991877ebc11 8 days ago 21.7MB
calico/cni v3.20.0 4945b742b8e6 8 days ago 146MB
registry.cn-hangzhou.aliyuncs.com/k8sos/kube-proxy v1.21.3 adb2816ea823 3 weeks ago 103MB
registry.cn-hangzhou.aliyuncs.com/k8sos/pause 3.4.1 0f8457a4c2ec 6 months ago 683kB
九、检查集群状态
################################## 检查集群状态 ######################################查看集群服务配置状态(m01与m02)[root@m01 /]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-58497c65d5-kchf8 1/1 Running 0 118m
calico-node-cv7wm 1/1 Running 0 118m
calico-node-wpgm2 1/1 Running 0 103m
calico-node-zmkl5 1/1 Running 0 96m
coredns-978bbc4b6-bc6pp 1/1 Running 0 124m
coredns-978bbc4b6-hn7hw 1/1 Running 0 124m
etcd-m01 1/1 Running 0 124m
etcd-m02 1/1 Running 0 103m
kube-apiserver-m01 1/1 Running 0 124m
kube-apiserver-m02 1/1 Running 0 103m
kube-controller-manager-m01 1/1 Running 1 124m
kube-controller-manager-m02 1/1 Running 0 103m
kube-proxy-52r2q 1/1 Running 0 96m
kube-proxy-hwdxr 1/1 Running 0 103m
kube-proxy-n427d 1/1 Running 0 124m
kube-scheduler-m01 1/1 Running 1 124m
kube-scheduler-m02 1/1 Running 0 103m
#查看节点状态(全部正常)[root@m01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
m01 Ready control-plane,master 32m v1.21.3
m02 Ready control-plane,master 10m v1.21.3
node01 Ready <none> 4m13s v1.21.3
#查看vip[root@m01 /]# ip a1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:a2:de:c6 brd ff:ff:ff:ff:ff:ff
inet 192.168.15.66/24 brd 192.168.15.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.15.69/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::57eb:becd:18b6:b774/64 scope link noprefixroute
valid_lft forever preferred_lft forever
十、集群测试
1、keepalived测试
#停止keep alive服务m01[root@m01 /]# systemctl stop keepalived.service #vip已经漂移了[root@m01 /]# ip a1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:a2:de:c6 brd ff:ff:ff:ff:ff:ff
inet 192.168.15.66/24 brd 192.168.15.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::57eb:becd:18b6:b774/64 scope link noprefixroute
valid_lft forever preferred_lft forever
#查看m02[root@m02 ~]# ip a #vip已经漂移到m02上1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:d8:98:7b brd ff:ff:ff:ff:ff:ff
inet 192.168.15.67/24 brd 192.168.15.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.15.69/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::abd2:cfda:7db:56ff/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::57eb:becd:18b6:b774/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
inet6 fe80::a95:bcab:b48f:c797/64 scope link noprefixroute
valid_lft forever preferred_lft forever
#重新启动keepalived服务(根据设定的权重,可以来回让vip漂移)[root@m01 /]# systemctl resatrt keepalived.service [root@m01 /]# ip a #vip又漂移到m011: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:a2:de:c6 brd ff:ff:ff:ff:ff:ff
inet 192.168.15.66/24 brd 192.168.15.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 192.168.15.69/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::57eb:becd:18b6:b774/64 scope link noprefixroute
valid_lft forever preferred_lft forever
#简单测试keepalived,向后的脑裂问题,可以添加配置脚本(此处略)详情如下