【Kubernetes离线安装】

离线安装Kubernetes

在这里插入图片描述
软件包下载:提取码 r8xw

IP地址系统节点
10.0.0.30CentOS7master主节点:harbor仓库
10.0.0.40CentOS7node从节点
10.0.0.50CentOS7node从节点

一、环境准备

关闭防火墙、清除防火墙规则、selinux、swap分区、配置hosts主机映射

以下操作三个节点均需实施

给系统升级内核

开启路由转发

$ systemctl stop firewalld
$ systemctl disable firewalld
$ firewall-cmd --state
not running

$ iptables -F
$ iptables -X
$ iptables -Z
/usr/sbin/iptables-save

$ sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
$ setenforce 0
$ swapoff -a
$ sed -i.bak '/swap/s/^/#/' /etc/fstab
$ cat >>/etc/hosts<<EOF
10.0.0.30 xnode1
10.0.0.40 xnode2
10.0.0.50 xnode3
EOF

#升级系统内核:
#导入elrepo软件仓库的公共秘钥
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

#安装elrepo软件仓库的yum源
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

#启用elrepo软件源并下载最新稳定版本内核
yum --enablerepo=elrepo-kernel install kernel-ml -y

#查看可用内核,并设置内核启动顺序
$ sudo awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
0 : CentOS Linux (5.19.1-1.el7.elrepo.x86_64) 7 (Core)
1 : CentOS Linux (3.10.0-327.el7.x86_64) 7 (Core)
2 : CentOS Linux (0-rescue-1924e95064844f2ca7d9aad6ae89d2a3) 7 (Core)

#生成 grub 配置文件:初始化页面的第一个内核将作为默认内核、初始化页面的第一个内核将作为默认内核
$ grub2-set-default 0
$ grub2-mkconfig -o /boot/grub2/grub.cfg
Generating grub configuration file ...
Found linux image: /boot/vmlinuz-5.19.1-1.el7.elrepo.x86_64
Found initrd image: /boot/initramfs-5.19.1-1.el7.elrepo.x86_64.img
Found linux image: /boot/vmlinuz-3.10.0-327.el7.x86_64
Found initrd image: /boot/initramfs-3.10.0-327.el7.x86_64.img
Found linux image: /boot/vmlinuz-0-rescue-1924e95064844f2ca7d9aad6ae89d2a3
Found initrd image: /boot/initramfs-0-rescue-1924e95064844f2ca7d9aad6ae89d2a3.img
done

#重启验证
$ reboot
$ uname -r
5.19.1-1.el7.elrepo.x86_64
#删除旧内核
yum remove kernel-tools kernel -y

#开启路由转发
cat >> /etc/sysctl.conf << EOF
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

$ modprobe br_netfilter
$ sysctl -p
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

三个主机配置IP地址:

#xnode1主节点执行下面的操作
[root@xnode1 ~]# nmcli connection modify eno16777736 connection.autoconnect yes ipv4.addresses 10.0.0.30/24 ipv4.gateway 10.0.0.2 ipv4.dns 8.8.8.8 ipv4.method manual
[root@xnode1 ~]# nmcli connection down eno16777736 && nmcli connection up eno16777736 && nmcli connection reload eno16777736

[root@xnode1 ~]# ifconfig | grep inet | awk NR==12'{print$2}'
10.0.0.30
[root@xnode1 ~]# ping www.baidu.com -c 4
PING www.a.shifen.com (14.215.177.38) 56(84) bytes of data.
64 bytes from 14.215.177.38: icmp_seq=1 ttl=128 time=27.3 ms
64 bytes from 14.215.177.38: icmp_seq=2 ttl=128 time=27.6 ms
64 bytes from 14.215.177.38: icmp_seq=3 ttl=128 time=27.2 ms
64 bytes from 14.215.177.38: icmp_seq=4 ttl=128 time=27.4 ms

--- www.a.shifen.com ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3008ms
rtt min/avg/max/mdev = 27.233/27.443/27.662/0.193 ms

#xnode2从节点执行下面的操作
[root@xnode2 ~]# nmcli connection modify eno16777736 connection.autoconnect yes ipv4.addresses 10.0.0.40/24 ipv4.gateway 10.0.0.2 ipv4.dns 8.8.8.8 ipv4.method manual
[root@xnode2 ~]# nmcli connection down eno16777736 && nmcli connection up eno16777736 && nmcli connection reload eno16777736

[root@xnode2 ~]# ifconfig | grep inet | awk NR==12'{print$2}'
10.0.0.40
[root@xnode2 ~]# ping www.baidu.com -c 4
PING www.a.shifen.com (14.215.177.38) 56(84) bytes of data.
64 bytes from 14.215.177.38: icmp_seq=1 ttl=128 time=27.3 ms
64 bytes from 14.215.177.38: icmp_seq=2 ttl=128 time=27.6 ms
64 bytes from 14.215.177.38: icmp_seq=3 ttl=128 time=27.2 ms
64 bytes from 14.215.177.38: icmp_seq=4 ttl=128 time=27.4 ms

--- www.a.shifen.com ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3008ms
rtt min/avg/max/mdev = 27.233/27.443/27.662/0.193 ms

#xnode3从节点
[root@xnode3 ~]# nmcli connection modify eno16777736 connection.autoconnect yes ipv4.addresses 10.0.0.50/24 ipv4.gateway 10.0.0.2 ipv4.dns 8.8.8.8 ipv4.method manual
[root@xnode3 ~]# nmcli connection down eno16777736 && nmcli connection up eno16777736 && nmcli connection reload eno16777736

[root@xnode3 ~]# ifconfig | grep inet | awk NR==12'{print$2}'
10.0.0.50
[root@xnode3 ~]# ping www.baidu.com -c 4
PING www.a.shifen.com (14.215.177.38) 56(84) bytes of data.
64 bytes from 14.215.177.38: icmp_seq=1 ttl=128 time=27.3 ms
64 bytes from 14.215.177.38: icmp_seq=2 ttl=128 time=27.6 ms
64 bytes from 14.215.177.38: icmp_seq=3 ttl=128 time=27.2 ms
64 bytes from 14.215.177.38: icmp_seq=4 ttl=128 time=27.4 ms

二、配置ftp服务

[root@xnode1 ~]# yum install vsftpd.x86_64 -y
[root@xnode1 ~]# cp -rp /etc/vsftpd/vsftpd.conf{,.bak}
[root@xnode1 ~]# sed -i '1i\anon_root=/' /etc/vsftpd/vsftpd.conf #添加访问目录
[root@xnode1 ~]# systemctl start vsftpd && systemctl enable vsftpd #启动vsftp服务

三、Docker安装

配置Docker、kubernetes源

以下操作三个节点均需实施

①.将软件源文件 Docker 上传到/opt目录下

②.创建Docker本地源

③.创建阿里云加速,部署docker-compose

[root@xnode1 ~]# ll /opt/Docker/Docker/
total 28
drwxr-xr-x 2 root root 20480 Aug 16 17:39 base
drwxr-xr-x 2 root root  4096 Aug 16 17:39 repodata

[root@xnode1 ~]# cat >>/etc/yum.repos.d/local.repo<<EOF
[centos]
name=centos
baseurl=file:///tmp/centos
gpgcheck=0
enable=1
[Docker]
name=Docker
baseurl=file:///opt/Docker/Docker/
gpgcheck=0
enable=1
[Kubernetes]
name=Kubernetes
baseurl=file:///root/kubernetes/Kubernetes/
gpgcheck=0
enable=1
EOF

#配置本地源:并设置开机自动挂载
[root@xnode1 ~]# mount /dev/cdrom/ /tmp/centos/
[root@xnode1 ~]# echo /dev/cdrom/ /tmp/centos/ iso9660 defaults 0 0 >> /etc/fstab

[root@xnode1 ~]# yum clean all
[root@xnode1 ~]# yum makecache fast
[root@xnode1 ~]# yum repolist 
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * elrepo: hkg.mirror.rackspace.com
 * epel: hkg.mirror.rackspace.com
repo id                    repo name                                                              status
Docker                     Docker                                                                    341
centos                     centos                                                                  3,723
elrepo                     ELRepo.org Community Enterprise Linux Repository - el7                    150
epel/x86_64                Extra Packages for Enterprise Linux 7 - x86_64                         13,758
kubernetes                                         kubernetes                                            341


#安装Docker:安装依赖软件包,安装Docker
[root@xnode1 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@xnode1 ~]# yum install docker-ce-18.09.6 docker-ce-cli-18.09.6 containerd.io -y

#配置Docker加速和仓库地址
[root@xnode1 ~]# mkdir -p /etc/docker/
[root@xnode1 ~]# tee /etc/docker/daemon.json <<-'EOF'
{
 "registry-mirrors": ["https://5twf62k1.mirror.aliyuncs.com"], 
 "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

[root@xnode1 ~]# systemctl daemon-reload 
[root@xnode1 ~]# systemctl start docker.service
[root@xnode1 ~]# systemctl enable docker.service

[root@xnode1 ~]# chmod +x /opt/Docker/compose/docker-compose
[root@xnode1 ~]# mv /opt/Docker/compose/docker-compose /usr/local/bin/
[root@xnode1 ~]# docker-compose --version
docker-compose version 1.24.1, build 4667896b

#node2节点部署:部署docker源和ftp源
[root@xnode2 ~]# mkdir -p /tmp/centos
[root@xnode2 ~]# mount /dev/cdrom /tmp/centos/
[root@xnode2 ~]# cat >>/etc/yum.repo.d/local/repo<<EOF
[Docker]
name=Docker
baseurl=ftp://10.0.0.30/opt/Docker
gpgcheck=0
enable=1
[Centos]
name=Centos
baseurl=ftp://10.0.0.30/tmp/centos/
gpgcheck=0
enable=1
[Kubernetes]
name=Kubernetes
baseurl=ftp://10.0.0.30/root/kubernetes/Kubernetes/
gpgcheck=0
enable=1
EOF
EOF

#清除缓存,重新加载可用yum源
[root@xnode2 ~]# yum clean all
[root@xnode2 ~]# yum makecache fast
[root@xnode2 ~]# yum repolist 
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * elrepo: mirror.rackspace.com
repo id                repo name                                                             status
Centos                 Centos                                                                4,070
Docker                 Docker                                                                  463
elrepo                 ELRepo.org Community Enterprise Linux Repository - el7                  150
repolist: 4,683

#node2节点安装docker,并配置阿里云镜像加速
[root@xnode2 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@xnode2 ~]# yum install docker-ce -y
[root@xnode2 opt]# chmod +x docker-compose
[root@xnode2 opt]# mv docker-compose /usr/local/bin/
[root@xnode2 opt]# tee /etc/docker/daemon.json <<-'EOF'
{
 "registry-mirrors": ["https://5twf62k1.mirror.aliyuncs.com"], 
 "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

[root@xnode1 ~]# docker info | grep Cgroup
Cgroup Driver: systemd

#xnode3配置和xnode2一致,就不再赘述了

四、部署Kubernetes

部署环境准备:

①.配置chrony时钟同步

②.所有节点创建/etc/sysctl.d/k8s.conf 文件

③.配置 IPVS

#配置chrony时钟同步
[root@xnode1 ~]# yum install chrony -y

#xnode1主节点注释默认 NTP 服务器,指定上游公共 NTP 服务器,并允许其他节点同步时间。
[root@xnode1 ~]# sed -i 's/^server/#&/' /etc/chrony.conf
[root@xnode1 ~]# cat >> /etc/chrony.conf << EOF
server 0.asia.pool.ntp.org iburst
server 1.asia.pool.ntp.org iburst
server 2.asia.pool.ntp.org iburst
server 3.asia.pool.ntp.org iburst
allow all
EOF

[root@xnode1 ~]# systemctl start chronyd.service 
[root@xnode1 ~]# systemctl start chronyd.service && systemctl enable chronyd

#xnode2和xnode3从节点:指定内网 master 节点为上游 NTP 服务器,重启服务并设为开机启动
sed -i 's/^server/#&/' /etc/chrony.conf
echo server 10.0.0.30 iburst >> /etc/chrony.conf
systemctl start chronyd.service && systemctl enable chronyd

#验证
[root@xnode1 ~]# chronyc sources
210 Number of sources = 4
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^? 103-153-176-123.as131657>     0   6     0     -     +0ns[   +0ns] +/-    0ns
^+ 202.28.116.236                1   6   177     2   +157us[ +157us] +/-  195ms
^* time.cloudflare.com           3   6   177     3  +6250us[+5670us] +/-  109ms
^- 125-228-203-179.hinet-ip>     2   6    77    64    +29ms[  +28ms] +/-   57ms

[root@xnode2 ~]# chronyc sources
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* xnode1                        4   6    37    27  -2446ns[-1655us] +/-  114ms

[root@xnode3 ~]# chronyc sources
210 Number of sources = 1
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^? xnode1                        2   6     1     2  -6092us[-6092us] +/-   42ms

#xnode1 xnode2 xnode3均需实施
#创建k8s.conf文件
cat << EOF | tee /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

#配置ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod +755 /etc/sysconfig/modules/ipvs.modules && bash

#安装ipset软件包
yum install ipset ipvsadm -y

安装Kubernetes:

①.安装Kubernetes集群, 该操作所有节点均需实施

②.初始化kubernetes,master节点实施

③.配置kubectl的工具,安装并配置kubernetes的网络,master节点实施

④.将xnode2和xnode3节点加入kubernetes集群,xnode2和xnode3节点实施

⑤.安装kubernetes dashboard, master节点实施

yum install -y kubelet-1.14.1 kubeadm-1.14.1 kubectl-1.14.1
systemctl enable kubelet.service && systemctl start kubelet.service

#初始化kubernetes
[root@xnode1 ~]# kubeadm init --apiserver-advertise-address 10.0.0.30 \
--kubernetes-version="v1.14.1" --pod-network-cidr=10.244.0.0/16 \
--image-repository=registry.aliyuncs.com/google_containers


  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.0.30:6443 --token yqctmc.j325mz4wzhwq3nfl \
    --discovery-token-ca-cert-hash sha256:3fd5ab2000b407f01ef601461990796f84cc8a6bc5f5c99b461f7d3eda0fcc1b 
    
[root@xnode1 ~]# mkdir -p $HOME/.kube
[root@xnode1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@xnode1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

#检查集群的状态
[root@xnode1 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   

#配置kubernetes网络
[root@xnode1 ~]# wget https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml
[root@xnode1 ~]# kubectl apply -f kube-flannel.yml 
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.extensions/kube-flannel-ds-amd64 created
daemonset.extensions/kube-flannel-ds-arm64 created
daemonset.extensions/kube-flannel-ds-arm created
daemonset.extensions/kube-flannel-ds-ppc64le created
daemonset.extensions/kube-flannel-ds-s390x created

#查看kube状态
[root@xnode1 ~]# kubectl get pod -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-f9pqv         1/1     Running   0          12m
coredns-8686dcc4fd-mtcmn         1/1     Running   0          12m
etcd-xnode1                      1/1     Running   0          12m
kube-apiserver-xnode1            1/1     Running   0          11m
kube-controller-manager-xnode1   1/1     Running   0          11m
kube-flannel-ds-amd64-frh8l      1/1     Running   0          77s
kube-proxy-q7z99                 1/1     Running   0          12m
kube-scheduler-xnode1            1/1     Running   0          12m

#将xnode2和xnode3节点加入kubernetes集群
kubeadm join 10.0.0.30:6443 --token yqctmc.j325mz4wzhwq3nfl \
--discovery-token-ca-cert-hash sha256:3fd5ab2000b407f01ef601461990796f84cc8a6bc5f5c99b461f7d3eda0fcc1b

[root@xnode1 ~]# kubectl get node
NAME     STATUS     ROLES    AGE   VERSION
xnode1   Ready      master   16m   v1.14.1
xnode2   NotReady   <none>   22s   v1.14.1
xnode3   NotReady   <none>   22s   v1.14.1

#安装kubernetes dashboard
[root@xnode1 ~]# kubectl apply -f \
https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml

#设置访问端口
[root@xnode1 ~]# kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
# 将type: ClusterIP 改为 type: NodePort

     21   ports:
     22   - port: 443
     23     protocol: TCP
     24     targetPort: 8443
     25   selector:
     26     k8s-app: kubernetes-dashboard
     27   sessionAffinity: None
     28   type: NodePort	==>修改
     
#查看端口号
[root@xnode1 ~]# kubectl get svc -A |grep kubernetes-dashboard
kubernetes-dashboard   dashboard-metrics-scraper   ClusterIP   10.111.84.203   <none>        8000/TCP                 2m56s
kubernetes-dashboard   kubernetes-dashboard        NodePort    10.99.20.86     <none>        443:32429/TCP            2m56s
#创建访问账号
[root@xnode1 ~]# cat >>dash.yaml<<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
EOF
[root@xnode1 ~]# kubectl apply -f dash.yaml 
serviceaccount/admin-user unchanged
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

#获取访问令牌
[root@xnode1 ~]# kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXA5NjRyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI5NjIxOTAwYy0xZGUxLTExZWQtODFjNy0wMDBjMjkyY2U5YTUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.D6Tq2IK2mxMkwFQ03m16VfpGfjXmtdMUdRpKmW-wsZvQqRxNXsTC1iaA4-dwqoIpRnUmBzILevZ6AtTO911zC5F2KsMSdLcuV0zHeFc1MqR14CD63r_HweGZEHU1668OLrV_1RhTVj2htPj7osA1HzQ3pev7c5I2Ro6fhVRVmnnS3Js8jdaAZMjV38e6uCuzrqOxPF4l-eBXJmtvsf4-XMioGRbkt7GOVrxzSTxBTCLm3vlPowFVRgY71qqtorr36gnaL4n7RJGS0WjOYuBKdywpyg_k5EjhqSJx3rrnvnhuUHiebfMSC6gDhpWv0HGaXYP81wWsBJo0cYwODyanpg

五、Kubernetes优化

配置kubectl的命令填充:

[root@xnode1 ~]# source /usr/share/bash-completion/bash_completion 
[root@xnode1 ~]# source <(kubectl completion bash)
[root@xnode1 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc

在xnode2节点上执行kubectl命令:

[root@xnode1 ~]# scp -r ~/.kube/ 10.0.0.40:~/

[root@xnode2 ~]# kubectl get pods -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-8686dcc4fd-f9pqv         1/1     Running   1          3h48m
coredns-8686dcc4fd-mtcmn         1/1     Running   1          3h48m
etcd-xnode1                      1/1     Running   1          3h47m
kube-apiserver-xnode1            1/1     Running   1          3h47m
kube-controller-manager-xnode1   1/1     Running   1          3h47m
kube-flannel-ds-amd64-67htn      1/1     Running   2          3h32m
kube-flannel-ds-amd64-frh8l      1/1     Running   1          3h36m
kube-proxy-q7z99                 1/1     Running   1          3h48m
kube-proxy-vw9wf                 1/1     Running   1          3h32m
kube-scheduler-xnode1            1/1     Running   1          3h47m

六、配置kube-proxy ipvs

开启ipvs:

①.修改kube-proxy配置文件

②.重启kube-proxy

[root@xnode1 ~]# kubectl edit cm kube-proxy -n kube-system

     32     ipvs:
     33       excludeCIDRs: null
     34       minSyncPeriod: 0s
     35       scheduler: ""
     36       syncPeriod: 30s
     37     kind: KubeProxyConfiguration
     38     metricsBindAddress: 127.0.0.1:10249
     39     mode: "ipvs"				==>添加ipvs
     40     nodePortAddresses: null
     41     oomScoreAdj: -999
     42     portRange: ""
     43     resourceContainer: /kube-proxy
     44     udpIdleTimeout: 250ms
     
[root@xnode1 ~]# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
pod "kube-proxy-q7z99" deleted
pod "kube-proxy-vw9wf" deleted

测试ipvs:

①.由于已经通过了configmap修改了kube-proxy的配置,素有后期增加了节点会直接使用ipvs模式,我们这里查看一下日志

#查看一下kube-proxy容器的详细信息

[root@xnode1 ~]# kubectl describe pod -n kube-system kube-proxy
Name:               kube-proxy-ptlm8
Namespace:          kube-system
Priority:           2000001000
PriorityClassName:  system-node-critical
Node:               xnode1/10.0.0.30
Start Time:         Wed, 17 Aug 2022 03:45:04 -0400
Labels:             controller-revision-hash=5f46cbf776
                    k8s-app=kube-proxy
                    pod-template-generation=1
Annotations:        <none>
Status:             Running
IP:                 10.0.0.30
Controlled By:      DaemonSet/kube-proxy
Containers:
  kube-proxy:
    Container ID:  docker://b7fc4a3b55063900c62415803b30097342e29a2dadd26277c7aac238cb49de55
    Image:         registry.aliyuncs.com/google_containers/kube-proxy:v1.14.1
    Image ID:      docker-pullable://registry.aliyuncs.com/google_containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9
    Port:          <none>
    Host Port:     <none>
    
#查看kube-proxy-ptlm8这个pod的日志信息
[root@xnode1 ~]# kubectl logs kube-proxy-ptlm8 -n kube-system 
I0817 07:45:05.902030       1 server_others.go:177] Using ipvs Proxier.		==>可以看到这里已经在使用ipvs
W0817 07:45:05.902743       1 proxier.go:381] IPVS scheduler not specified, use rr by default
I0817 07:45:05.902982       1 server.go:555] Version: v1.14.1
I0817 07:45:05.922512       1 conntrack.go:52] Setting nf_conntrack_max to 131072
I0817 07:45:05.923301       1 config.go:102] Starting endpoints config controller
I0817 07:45:05.923338       1 controller_utils.go:1027] Waiting for caches to sync for endpoints config controller
I0817 07:45:05.923354       1 config.go:202] Starting service config controller
I0817 07:45:05.923416       1 controller_utils.go:1027] Waiting for caches to sync for service config controller
I0817 07:45:06.023978       1 controller_utils.go:1034] Caches are synced for endpoints config controller
I0817 07:45:06.024736       1 controller_utils.go:1034] Caches are synced for service config controller

七、重新获取加入k8s集群token

[root@xnode3 ~]# kubeadm token create --print-join-command
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值