kubernetes多master搭建,安装Haproxy、Keepalived

目录

1、版本

2、主机名

3、系统配置(all)

4、开启ipvs的前置条件(all)

5、docker安装(all)

6、主节点配置Haproxy、Keepalived

7、主节点安装Kubeadm

8、其他master节点

9、主节点安装Calico


安装包、文件下载

这是我用的kubelet、calico、haproxy、keepalived、etcd、pause等等安装包和一些yaml文件,可能不是很全,只是自己在学习中的一些记录,希望对大家帮助

1、版本

Docker: 18.09.7-3.el7
kubernetes: v1.19.0
calico: v3.21.1

2、主机名

cat /etc/hosts
192.168.1.161 master01
192.168.1.162 master02
192.168.1.163 master03
192.168.1.160 vip

3、系统配置(all)

#安装依赖(需要网络)
#下载依赖包:  yum install -y <插件名称>
[root@master01 ~]# yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl 
[root@master01 ~]# yum install -y sysstat libseccomp wget vim net-tools git

#设置防火墙
#关闭防火墙
[root@master01 ~]# systemctl stop firewalld && systemctl disable firewalld 
#下载iptables、启动iptables(需要网络)
[root@master01 ~]# yum -y install iptables-services && systemctl start iptables
#设置开机自启iptables(需要网络)
[root@master01 ~]# systemctl enable iptables && iptables -F && service iptables save

#关闭SELINUX
[root@master01 ~]# swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab 
[root@master01 ~]# setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

#调整内核参数
[root@master01 ~]# cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
#备份kubernetes.conf文件
[root@master01 ~]# cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
#刷新
[root@master01 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf

#调整系统时区
#设置系统时区为 中国/上海
[root@master01 ~]# timedatectl set-timezone Asia/Shanghai
# 将当前的 UTC 时间写入硬件时钟
[root@master01 ~]# timedatectl set-local-rtc 0
# 重启依赖于系统时间的服务
[root@master01 ~]# systemctl restart rsyslog
[root@master01 ~]# systemctl restart crond

# 关闭系统不要的服务
[root@master01 ~]# systemctl stop postfix && systemctl disable postfix

# 设置日志目录
# 持久化保存日志的目录
[root@master01 ~]# mkdir /var/log/journal
[root@master01 ~]# mkdir /etc/systemd/journald.conf.d
[root@master01 ~]# cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
#重启
[root@master01 ~]# systemctl restart systemd-journald

#关闭NUMA
[root@master01 ~]# cp /etc/default/grub{,.bak}
# 在 GRUB_CMDLINE_LINUX 一行添加 numa=off
[root@master01 ~]# vim /etc/default/grub 
diff /etc/default/grub.bak /etc/default/grub
6c6
< GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rhgb quiet"
---
> GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rhgb quiet numa=off"
[root@master01 ~]# cp /boot/grub2/grub.cfg{,.bak}
[root@master01 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
Generating grub configuration file ...
Found linux image: /boot/vmlinuz-3.10.0-1160.el7.x86_64
Found initrd image: /boot/initramfs-3.10.0-1160.el7.x86_64.img
Found linux image: /boot/vmlinuz-0-rescue-ef24e2b8071740a59fdf3258ac2f9987
Found initrd image: /boot/initramfs-0-rescue-ef24e2b8071740a59fdf3258ac2f9987.img
done

4、开启ipvs的前置条件(all)

[root@master01 ~]# modprobe br_netfilter
[root@master01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
#设置权限、刷新
[root@master01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
ip_vs_sh               12688  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs                 145458  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack_ipv4      15053  0 
nf_defrag_ipv4         12729  1 nf_conntrack_ipv4
nf_conntrack          139264  3 ip_vs,xt_conntrack,nf_conntrack_ipv4
libcrc32c              12644  3 xfs,ip_vs,nf_conntrack

5、docker安装(all)

#安装需要的软件包
[root@master01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
#设置中央仓库
[root@master01 ~]# yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#安装docker
[root@master01 ~]# yum -y install docker-ce-18.09.7-3.el7
# 创建 /etc/docker 目录(默认会创建)
[root@master01 ~]# mkdir /etc/docker
#配置daemon
[root@master01 ~]# cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
EOF
[root@master01 ~]# mkdir -p /etc/systemd/system/docker.service.d
# 重启docker服务
[root@master01 ~]# systemctl daemon-reload && systemctl restart docker && systemctl enable docker

6、主节点配置Haproxy、Keepalived

#创建文件夹 /usr/local/kubernetes/install     
[root@master01 ~]# mkdir /usr/local/kubernetes
[root@master01 kubernetes]# cd /usr/local/kubernetes && mkdir install
[root@master01 install]# cd install
#用XFTP导入准备好的镜像、配置文件
[root@master01 install]# ll
-rw-r--r--. 1 root root    221964 12月 15 14:26 calico.yaml
-rw-r--r--. 1 root root      1038 12月 15 11:56 kubeadm-config.yaml
-rwxr-xr-x. 1 root root       233 12月 15 11:24 load-images.sh
-rw-r--r--. 1 root root      1480 12月 15 11:22 start.keep.tar.gz
#解压镜像
[root@master01 install]# tar -zxvf images.tar.gz
images/
images/calico-cni.tar
images/calico-node.tar
images/calico-pod2daemon-flexvol.tar
images/coredns.tar
images/etcd.tar
images/kube-apiserver.tar
images/kube-controller-manager.tar
images/kube-controllers.tar
images/kube-proxy.tar
images/kube-scheduler.tar
images/pause.tar
images/haproxy.tar
images/keepalived.tar

#设置脚本
#/usr/local/kubernetes/images/: 镜像文件所在地址(根据实际情况修改)
[root@master01 ~]# vim load-images.sh
#!/bin/bash

cd /usr/local/kubernetes/images/

ls /usr/local/kubernetes/images/ | grep -v load-images.sh > /tmp/k8s-images.txt

for i in $( cat  /tmp/k8s-images.txt )
do
    docker load -i $i
done

rm -rf /tmp/k8s-images.txt
#镜像文件 包含kubeadm、calico、haproxy、keepalived
[root@master01 install]# cd images
[root@master01 images]# ll
-rw-------. 1 root root 239771136 12月 15 13:44 calico-cni.tar
-rw-------. 1 root root 218337280 12月 13 10:56 calico-node.tar
-rw-------. 1 root root  21450752 12月 13 10:55 calico-pod2daemon-flexvol.tar
-rw-------. 1 root root  45365760 12月 15 12:49 coredns.tar
-rw-------. 1 root root 254629888 12月 15 12:48 etcd.tar
-rw-------. 1 root root 120040960 12月 15 12:48 kube-apiserver.tar
-rw-------. 1 root root 112045568 12月 15 13:06 kube-controller-manager.tar
-rw-------. 1 root root 133185536 12月 13 10:52 kube-controllers.tar
-rw-------. 1 root root 119695360 12月 15 12:48 kube-proxy.tar
-rw-------. 1 root root  46919168 12月 15 12:48 kube-scheduler.tar
-rw-------. 1 root root    692736 12月 15 12:49 pause.tar
-rw-r--r--. 1 root root  74437120 12月 15 11:22 haproxy.tar
-rw-r--r--. 1 root root  16337408 12月 15 11:22 keepalived.tar
#镜像文件地址
[root@master01 images]# pwd
/usr/local/kubernetes/images
#设置权限
[root@master01 install]# chmod a+x load-images.sh
#加载
[root@master01 install]# ./load-images.sh
225df95e717c: Loading layer [==================================================>]  336.4kB/336.4kB
96d17b0b58a7: Loading layer [==================================================>]  45.02MB/45.02MB
Loaded image: k8s.gcr.io/coredns:1.7.0
79d541cda6cb: Loading layer [==================================================>]  3.041MB/3.041MB
e9933a1f21f5: Loading layer [==================================================>]  1.734MB/1.734MB
c3a6120d2fd6: Loading layer [==================================================>]  115.2MB/115.2MB
Loaded image: k8s.gcr.io/kube-apiserver:v1.19.0
...... (类似)
#解压start.keep.tar.gz
[root@master01 install]# tar -zxvf start.keep.tar.gz
data/
data/lb/
data/lb/start-keepalived.sh
data/lb/kubeadm-config.yaml
data/lb/etc/
data/lb/etc/haproxy.cfg
data/lb/start-haproxy.sh
#移动data文件到根目录
[root@master01 install]# mv data/ /
[root@master01 ~]# cd /data/lb/ && ll
drwxr-xr-x. 2 root root  25 12月 15 11:27 etc
-rw-r--r--. 1 root root 832 8月   5 2019 kubeadm-config.yaml
-rwxr-xr-x. 1 root root 404 12月 15 11:28 start-haproxy.sh
-rwxr-xr-x. 1 root root 479 12月 15 11:30 start-keepalived.sh
#编辑etc文件夹中haproxy.cfg文件, 需要设置 MasterIP1是实例数(红色部分)
[root@master01 etc]# vim haproxy.cfg
文件最后的位置修改红色部分,只需要保留一个master节点即可
backend be_k8s_6443
  mode tcp
  timeout queue 1h
  timeout server 1h
  timeout connect 1h
  log global
  balance roundrobin
  server rancher01 192.168.1.161:6443
#编辑start-haproxy.sh文件, 需要设置MasterIP1是实例数,/usr/local/etc/haproxy/haproxy.cfg是上一步的haproxy.cfg地址
[root@master01 lb]# vim start-haproxy.sh
#!/bin/bash
MasterIP1=192.168.1.161
MasterIP2=192.168.1.162
MasterIP3=192.168.1.163
MasterPort=6443

docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
        -e MasterIP1=$MasterIP1 \
        -e MasterIP2=$MasterIP2 \
        -e MasterIP3=$MasterIP3 \
        -e MasterPort=$MasterPort \
        -v /data/lb/etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg \
        wise2c/haproxy-k8s
#执行,开启haproxy
[root@master01 lb]# ./start-haproxy.sh
#编辑start-keepalived.sh文件,设置红色部分的vip地址和网络连接地址即可
[root@master01 lb]# vim start-keepalived.sh
#!/bin/bash
VIRTUAL_IP=192.168.1.160
INTERFACE=eth0
#kube-ipvs默认采用的是32位掩码,如果部署成功后,http:vip+nodePort访问不通,则不设置这个字段即可
#docker run的参数里也要删除-e NETMASK_BIT=$NETMASK_BIT \
#NETMASK_BIT=24   
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18

docker run -itd --restart=always --name=Keepalived-K8S \
        --net=host --cap-add=NET_ADMIN \
        -e VIRTUAL_IP=$VIRTUAL_IP \
        -e INTERFACE=$INTERFACE \
        -e CHECK_PORT=$CHECK_PORT \
        -e RID=$RID \
        -e VRID=$VRID \
        -e NETMASK_BIT=$NETMASK_BIT \
        -e MCAST_GROUP=$MCAST_GROUP \
        wise2c/keepalived-k8s
#查看VIP,执行ip add命令,网络连接地址eth0中出现192.168.1.160部分就是vip部署成功
[root@master01 lb]# ip add
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:15:5d:01:6e:37 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.161/24 brd 192.168.1.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 192.168.1.160/24 scope global secondary eth0
       valid_lft forever preferred_lft forever
......

7、主节点安装Kubeadm

#配置数据源
[root@master01 install]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#安装kubelet、kubeadm、kubectl
[root@master01 install]# yum install -y kubelet-1.19.0-0 kubeadm-1.19.0-0 kubectl-1.19.0-0

#设置开机自启
[root@master01 install]# systemctl enable kubelet.service

#/usr/local/kubernetes/install 文件夹中有  kubeadm-config.yaml文件
#--experimental-upload-certs  替换为 --upload-certs
[root@master01 install]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log # 这个命令是  kubelet 1.15版本  或者   1.15之前的版本使用的  kubeadm初始化

#初始化(只需要一个节点执行下面的命令即可,其他节点可以通过命令加入到管理节点集群-------其他master节点要注意的地方)
[root@master01 install]# kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
......
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.1.160:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:26a028f21b0173c40480253a6aec8b7e27fa8dae12d0c8bdf0abfe3cb291e449 \
    --control-plane --certificate-key a64c8668c2d3867429f1c43b9fca17c49c8e72b4689112a02dcd54898ef51fbb

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.160:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:26a028f21b0173c40480253a6aec8b7e27fa8dae12d0c8bdf0abfe3cb291e449

#执行初始化的命令
[root@master01 install]# mkdir -p $HOME/.kube
[root@master01 install]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master01 install]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

#查询当前节点
[root@master01 install]# kubectl get nodes
NAME       STATUS     ROLES    AGE     VERSION
master01   NotReady   master   79m     v1.19.0

#查询kube命名空间的pod容器,coredns此时是Pending状态,在安装网络插件calico后会切换为Running状态
[root@master01 install]# kubectl get pods -o wide -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE     IP              NODE       NOMINATED NODE   READINESS GATES
coredns-f9fd979d6-ghx8z            0/1     Pending   0          79m     <none>          <none>     <none>           <none>
coredns-f9fd979d6-qvhd4            0/1     Pending   0          79m     <none>          <none>     <none>           <none>
etcd-master01                      1/1     Running   0          79m     192.168.1.161   master01   <none>           <none>
kube-apiserver-master01            1/1     Running   0          79m     192.168.1.161   master01   <none>           <none>
kube-controller-manager-master01   1/1     Running   1          79m     192.168.1.161   master01   <none>           <none>
kube-proxy-6rphx                   1/1     Running   0          79m     192.168.1.161   master01   <none>           <none>
kube-scheduler-master01            1/1     Running   1          79m     192.168.1.161   master01   <none>           <none>

8、其他master节点

#除了已安装的master节点外,其他节点需要安装上面的步骤完成部署
#但是不包括kubeadm的初始化命令,不需要执行下面的命令----切记
[root@master01 install]# kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log

#完成系统配置、ivps配置、docker安装后
#需要在其他节点创建文件夹(以master02、master03为例)
[root@master02 ~]# mkdir /usr/local/kubernetes
[root@master03 ~]# mkdir /usr/local/kubernetes
#在master01节点中,将install文件夹分发到master02、master03节点
#进入到 /usr/local/kubernetes  文件中
[root@master01 kubernetes]# cd /usr/local/kubernetes
[root@master01 kubernetes]# scp -r install/ root@master02:/usr/local/kubernetes
[root@master01 kubernetes]# scp -r install/ root@master03:/usr/local/kubernetes

#进入到master02、master03节点中
#加载镜像
[root@master02 install]# ./load-images.sh
225df95e717c: Loading layer [==================================================>]  336.4kB/336.4kB
96d17b0b58a7: Loading layer [==================================================>]  45.02MB/45.02MB
Loaded image: k8s.gcr.io/coredns:1.7.0
...... (类似)
[root@master03 install]# ./load-images.sh
225df95e717c: Loading layer [==================================================>]  336.4kB/336.4kB
96d17b0b58a7: Loading layer [==================================================>]  45.02MB/45.02MB
Loaded image: k8s.gcr.io/coredns:1.7.0
...... (类似)

#在master01中,把data文件分发到master02、master03节点中
[root@master01 kubernetes]# scp -r /data/ root@master02:/usr/local/kubernetes
[root@master01 kubernetes]# scp -r /data/ root@master03:/usr/local/kubernetes

#进入到master02、master03节点中,执行脚本
[root@master02 lb]# ./start-haproxy.sh 
12699c44a540736b7261e014da2dc0f2f43d8e531655252706b05c067373d978
[root@master02 lb]# ./start-keepalived.sh 
932deec00fd85c152ccd7bafea0ccd7980f93f2e46ce3f353a9b3e66b353b6b3

[root@master03 lb]# ./start-haproxy.sh 
0550bfef07b2ba01835711a000c92d4b7a458ba43e039c4ecc98daf71bedaddf
[root@master03 lb]# ./start-keepalived.sh 
5f068e3e418eee99a12cb24797a2b604b9e0b7a64e71dd5716c74991323c7fb9

#通过ip查询命令判断6444端口是被监听
[root@master02 lb]# netstat -anpt | grep 6444
tcp6       0      0 :::6444                 :::*                    LISTEN      21358/docker-proxy  
[root@master03 lb]# netstat -anpt | grep 6444
tcp6       0      0 :::6444                 :::*                    LISTEN      21358/docker-proxy  

#配置数据源
[root@master01 install]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF


#安装kubelet、kubeadm、kubectl
[root@master01 install]# yum install -y kubelet-1.19.0-0 kubeadm-1.19.0-0 kubectl-1.19.0-0
#设置开机自启
[root@master01 install]# systemctl enable kubelet.service


#通过master集群加入命令把当前节点加入到集群当中(获取主机点部署成功后的命令,下面是测试的)
#master02/master03都需要执行这个命令
[root@master02 lb]# kubeadm join 192.168.1.160:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:26a028f21b0173c40480253a6aec8b7e27fa8dae12d0c8bdf0abfe3cb291e449 \
    --control-plane --certificate-key a64c8668c2d3867429f1c43b9fca17c49c8e72b4689112a02dcd54898ef51fbb

[root@master03 lb]# kubeadm join 192.168.1.160:6444 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:26a028f21b0173c40480253a6aec8b7e27fa8dae12d0c8bdf0abfe3cb291e449 \
    --control-plane --certificate-key a64c8668c2d3867429f1c43b9fca17c49c8e72b4689112a02dcd54898ef51fbb

#加入成功后
This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

	mkdir -p $HOME/.kube
	sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
	sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.

#执行初始化的命令(因为其他master节点也是管理节点,也需要初始化文件)
[root@master02 install]# mkdir -p $HOME/.kube
[root@master02 install]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master02 install]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

[root@master03 install]# mkdir -p $HOME/.kube
[root@master03 install]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master03 install]# sudo chown $(id -u):$(id -g) $HOME/.kube/config


#其他节点也可以查看node了
[root@master02 install]# kubectl get nodes
NAME       STATUS     ROLES    AGE     VERSION
master01   NotReady   master   79m     v1.19.0
master02   NotReady   master   8m13s   v1.19.0
master03   NotReady   master   65s     v1.19.0

#其他节点也可以查看node了
[root@master02 install]# kubectl get nodes
NAME       STATUS     ROLES    AGE     VERSION
master01   NotReady   master   79m     v1.19.0
master02   NotReady   master   8m13s   v1.19.0
master03   NotReady   master   65s     v1.19.0



#此时的kube命名空间中会出现其他master节点的挨批server、schduler、controller manager容器
#除了coredns外,其他容器都是Running状态


[root@master02 install]# kubectl get pods -o wide -n kube-system
NAME                               READY   STATUS    RESTARTS   AGE   IP              NODE       NOMINATED NODE   READINESS GATES
coredns-f9fd979d6-ghx8z            0/1     Pending   0          91m   <none>          <none>     <none>           <none>
coredns-f9fd979d6-qvhd4            0/1     Pending   0          91m   <none>          <none>     <none>           <none>
etcd-master01                      1/1     Running   0          91m   192.168.1.161   master01   <none>           <none>
etcd-master02                      1/1     Running   0          20m   192.168.1.162   master02   <none>           <none>
etcd-master03                      1/1     Running   0          13m   192.168.1.163   master03   <none>           <none>
kube-apiserver-master01            1/1     Running   0          91m   192.168.1.161   master01   <none>           <none>
kube-apiserver-master02            1/1     Running   1          20m   192.168.1.162   master02   <none>           <none>
kube-apiserver-master03            1/1     Running   0          13m   192.168.1.163   master03   <none>           <none>
kube-controller-manager-master01   1/1     Running   1          91m   192.168.1.161   master01   <none>           <none>
kube-controller-manager-master02   1/1     Running   0          20m   192.168.1.162   master02   <none>           <none>
kube-controller-manager-master03   1/1     Running   0          13m   192.168.1.163   master03   <none>           <none>
kube-proxy-6rphx                   1/1     Running   0          91m   192.168.1.161   master01   <none>           <none>
kube-proxy-dx9ww                   1/1     Running   0          20m   192.168.1.162   master02   <none>           <none>
kube-proxy-l66zg                   1/1     Running   0          13m   192.168.1.163   master03   <none>           <none>
kube-scheduler-master01            1/1     Running   1          91m   192.168.1.161   master01   <none>           <none>
kube-scheduler-master02            1/1     Running   0          20m   192.168.1.162   master02   <none>           <none>
kube-scheduler-master03            1/1     Running   0          13m   192.168.1.163   master03   <none>           <none>

9、主节点安装Calico

#进入到master01节点中
#进入到/usr/local/kubernetes/install 文件夹
#执行calico.yaml文件
#先查看文件中的镜像是否都已加载
[root@master01 install]# cat calico.yaml | grep image
          image: docker.io/calico/cni:v3.21.2
          image: docker.io/calico/cni:v3.21.2
          image: docker.io/calico/pod2daemon-flexvol:v3.21.2
          image: docker.io/calico/node:v3.21.2
          image: docker.io/calico/kube-controllers:v3.21.2

#排查所有节点,包括master01/master02/master03 节点是否都有这些镜像
[root@master01 install]# docker images | grep calico
calico/node                          v3.21.2   f1bca4d4ced2   9 days ago      214MB
calico/pod2daemon-flexvol            v3.21.2   7778dd57e506   10 days ago     21.3MB
calico/cni                           v3.21.2   4c5c32530391   10 days ago     239MB
calico/kube-controllers              v3.21.2   b20652406028   10 days ago     132MB

#执行加载
[root@master01 install]# kubectl apply -f calico.yaml
configmap/calico-config configured
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers configured
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrole.rbac.authorization.k8s.io/calico-node configured
clusterrolebinding.rbac.authorization.k8s.io/calico-node configured
daemonset.apps/calico-node configured
serviceaccount/calico-node configured
deployment.apps/calico-kube-controllers configured
serviceaccount/calico-kube-controllers configured
poddisruptionbudget.policy/calico-kube-controllers configured



#加载有点慢,可以通过如下查看,等待所有pod容器都是Runnig状态即完成
[root@master01 install]# kubectl get pods -o wide -n kube-system 
NAME                                       READY   STATUS    RESTARTS   AGE     IP              NODE       NOMINATED NODE   READINESS GATES
calico-kube-controllers-558995777d-kg7bv   1/1     Running   0          4m27s   10.244.241.66   master01   <none>           <none>
calico-node-5hlnh                          1/1     Running   0          4m28s   192.168.1.161   master01   <none>           <none>
calico-node-vmqv2                          1/1     Running   1          4m28s   192.168.1.163   master03   <none>           <none>
calico-node-zvfg4                          1/1     Running   2          4m28s   192.168.1.162   master02   <none>           <none>
coredns-f9fd979d6-ghx8z                    1/1     Running   0          154m    10.244.59.192   master02   <none>           <none>
coredns-f9fd979d6-qvhd4                    1/1     Running   0          154m    10.244.241.65   master01   <none>           <none>
etcd-master01                              1/1     Running   3          154m    192.168.1.161   master01   <none>           <none>
etcd-master02                              1/1     Running   1          82m     192.168.1.162   master02   <none>           <none>
etcd-master03                              1/1     Running   1          75m     192.168.1.163   master03   <none>           <none>
kube-apiserver-master01                    1/1     Running   3          154m    192.168.1.161   master01   <none>           <none>
kube-apiserver-master02                    1/1     Running   1          83m     192.168.1.162   master02   <none>           <none>
kube-apiserver-master03                    1/1     Running   1          75m     192.168.1.163   master03   <none>           <none>
kube-controller-manager-master01           1/1     Running   7          154m    192.168.1.161   master01   <none>           <none>
kube-controller-manager-master02           1/1     Running   4          83m     192.168.1.162   master02   <none>           <none>
kube-controller-manager-master03           1/1     Running   4          75m     192.168.1.163   master03   <none>           <none>
kube-proxy-6rphx                           1/1     Running   0          154m    192.168.1.161   master01   <none>           <none>
kube-proxy-dx9ww                           1/1     Running   0          83m     192.168.1.162   master02   <none>           <none>
kube-proxy-l66zg                           1/1     Running   0          75m     192.168.1.163   master03   <none>           <none>
kube-scheduler-master01                    1/1     Running   5          154m    192.168.1.161   master01   <none>           <none>
kube-scheduler-master02                    1/1     Running   6          83m     192.168.1.162   master02   <none>           <none>
kube-scheduler-master03                    1/1     Running   5          75m     192.168.1.163   master03   <none>           <none>

#在查看node时,都是ready状态了
[root@master01 images]# kubectl get nodes
NAME       STATUS   ROLES    AGE    VERSION
master01   Ready    master   155m   v1.19.0
master02   Ready    master   83m    v1.19.0
master03   Ready    master   76m    v1.19.0

#最后需要在所有节点中修改  /root/.kube/conf文件中的server地址,改为自身IP + 6443 端口
[root@master03 ~]# vim /root/.kube/config
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeE1USXhOVEF6TlRneU1Wb1hEVE14TVRJeE16QXpOVGd5TVZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTVNhCnZNOGsyb2ZkRHZyanZZNHJLN204NU5McS9ZNWoyZlVnZ0tOSFd0b2hKTnVlM0dVbGFKeXNtNUVNYTlzY1ZHeE4KTkJaTGd2Ujcvc3BIdk5YbU1tRHBMUXlYWU9hZlhmM0tJd3BTamFLS01peXBDcXhRZEtFUGRyVkY0RjZSbEJpegp2MFdiRWM4NHdkU2lRYWNqaXo4dkVEUWt5T1E3ckswemZUZ3MrdzIrRkNyb2F6S1hFSXVkNEllWC9ZbWpXNjlqCmtNYklTMmtVcVZYSzRBNXJIbFdPZkltSXNRWG1zRkVvRkwxdjFyZ1U1dE93WldwSGl0OXduR3g2L3ozK05FYkoKc0Z3dzl5QUlnRzhZNUlxZWVqczMxTTlaNDk2Rkt2ZUNPM29DZ1JlOVdpNDBQMHBCa1paN2J1V0lFY0psdTgvNwpjVEU2eFZmSUlacDRVOXoxYmhNQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZJanc4VkZuNVVmSE5RMEZSeUVrd3VjZWhDUGZNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFCSzhpaHVibFloSkZLMGpjVWFDVUVsQndwS0ZWM1pyOFcyRS9JM3lsVHlGQmc2ek5xSwo1YlcrVjdCekNZeCtVQW1EZURUaFV5Rzl5R1c2OWZtTHRXU2xidVJNSGZOWStQVUF4dTRScWM4NkllekIrOUtxClV1K1BleHRNaDA5YVIvZWRhTXpndlFMRjNNa2tDOGRHUGVjRE96R25NYWlqNXRaN0xMam1pQzZOdWp6TTBtakkKNzlMbWg5T1BmWmpnYVpwRXJQTXZXTGp5WDZmTWxLbThzanFvdkZ2RVZRVTJWSW5jczJ4ZGg3SjNpSmZaSmVZNwppcnRDb2M4bEJzVXplZ2dmVXB3aVZvNEtHZnQ4ZXFUN1BiOEttRjZFQWlEb2MrUE5WZkpVTVZDSVBEQ2tVQnVOCmlzOVVkOVVYN2RsRVNReGx3MjhKeHRINVJkVm83alk5aGxqVAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
    server: https://192.168.1.163:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

林中泽

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值