1.1 Kubernetes基础环境部署
kubernetes有多种部署方式,目前主流的方式有kubeadm、minikube、二进制包
-
minikube:一个用于快速搭建单节点kubernetes的工具
-
kubeadm:一个用于快速搭建kubernetes集群的工具
-
二进制包 :从官网下载每个组件的二进制包,依次去安装,此方式对于理解kubernetes组件更加有效
-
K8s-all:主机名为三台都做
主机名 | IP地址 | 系统 | 配置 |
---|---|---|---|
k8s-master-01 | 192.168.110.21/24 | CentOS 7.9 | 4颗CPU 8G内存 100G硬盘 |
K8s-node-01 | 192.168.110.22/24 | CentOS 7.9 | 4颗CPU 8G内存 100G硬盘 |
K8s-node-02 | 192.168.110.23/24 | CentOS 7.9 | 4颗CPU 8G内存 100G硬盘 |
注意:关闭防火墙和SElinux
1.1.1 配置hosts解析
[root@K8s-master-01 ~]# cat >> /etc/hosts << EOF 192.168.110.21 k8s-master-01 192.168.110.22 K8s-node-01 192.168.110.23 K8s-node-02 EOF [root@K8s-master-01 ~]# scp /etc/hosts K8s-node-01:/etc/ [root@K8s-master-01 ~]# scp /etc/hosts K8s-node-02:/etc/
1.1.2 配置NTP时间服务
[root@K8s-master-01 ~]# sed -i '3,6 s/^/# /' /etc/chrony.conf [root@K8s-master-01 ~]# sed -i '6 a server ntp.aliyun.com iburst' /etc/chrony.conf [root@K8s-master-01 ~]# systemctl restart chronyd.service [root@K8s-master-01 ~]# chronyc sources 210 Number of sources = 1 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^* 203.107.6.88 2 6 17 18 +266us[+1386us] +/- 24ms [root@K8s-node-01 ~]# sed -i '3,6 s/^/# /' /etc/chrony.conf [root@K8s-node-01 ~]# sed -i '6 a server ntp.aliyun.com iburst' /etc/chrony.conf [root@K8s-node-01 ~]# systemctl restart chronyd.service [root@K8s-node-01 ~]# chronyc sources 210 Number of sources = 1 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^* 203.107.6.88 2 6 17 18 +266us[+1386us] +/- 24ms [root@K8s-node-02 ~]# sed -i '3,6 s/^/# /' /etc/chrony.conf [root@K8s-node-02 ~]# sed -i '6 a server ntp.aliyun.com iburst' /etc/chrony.conf [root@K8s-node-02 ~]# systemctl restart chronyd.service [root@K8s-node-02 ~]# chronyc sources 210 Number of sources = 1 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== ^* 203.107.6.88 2 6 7 1 -291us[-4455us] +/- 30ms
1.1.3 禁用Swap交换分区
由于容器设计为尽可能高效地使用资源,Kubernetes通常要求在节点上禁用swap
分区,原因包括:
-
性能问题:如前所述,使用
swap
会降低系统性能,这可能会影响容器的性能和稳定性。 -
资源隔离:禁用
swap
可以确保容器之间的资源隔离更加清晰,避免一个容器使用过多swap
空间而影响其他容器。 -
调试和监控:禁用
swap
可以简化系统监控和调试,因为不需要考虑磁盘空间作为内存使用的复杂性。
[root@K8s-master-01 ~]# sed -i 's/.*swap.*/# &/' /etc/fstab [root@K8s-node-01 ~]# sed -i 's/.*swap.*/# &/' /etc/fstab [root@K8s-node-02 ~]# sed -i 's/.*swap.*/# &/' /etc/fstab
1.1.4 升级操作系统内核
注意:三台机器同时做
1.1.4.1 导入elrepo gpg key
[root@K8s-all ~]# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
1.1.4.2 安装elrepo YUM源仓库
[root@K8s-all ~]# yum install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm -y
1.1.4.3 安装kernel-ml版本
-
ml为长期稳定版本,lt为长期维护版本
[root@K8s-all ~]# yum --enablerepo="elrepo-kernel" install kernel-ml.x86_64 -y [root@K8s-all ~]# uname -r 3.10.0-1160.71.1.el7.x86_64
1.1.4.5 设置grub2默认引导为0
[root@K8s-all ~]# grub2-set-default 0
1.1.4.6 重新生成grub2引导文件
[root@K8s-all ~]# grub2-mkconfig -o /boot/grub2/grub.cfg [root@K8s-all ~]# reboot #更新后,需要重启,使用升级的内核生效 [root@K8s-all ~]# uname -r #重启后,需要验证内核是否为更新对应的版本 6.8.7-1.el7.elrepo.x86_64
1.1.5 开启内核路由转发
[root@K8s-all ~]# sysctl -w net.ipv4.ip_forward=1 net.ipv4.ip_forward = 1 [root@K8s-all ~]# modprobe br_netfilter [root@k8s-all ~]# echo net.ipv4.ip_forward = 1 >> /etc/sysctl.conf [root@k8s-all ~]# sysctl -p net.ipv4.ip_forward = 1
1.1.6 添加网桥过滤及内核转发配置文件
[root@K8s-all ~]# cat > /etc/sysctl.d/k8s.conf << EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 vm.swappiness = 0 EOF [root@K8s-all ~]# modprobe br-netfilter [root@K8s-all ~]# sysctl -p /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 vm.swappiness = 0
1.1.7 开启IPVS
[root@K8s-all ~]# yum install ipset ipvsadm -y [root@K8s-all ~]# vim /etc/sysconfig/modules/ipvs.modules #!/bin/bash ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_vip ip_vs_sed ip_vs_ftp nf_conntrack" for kernel_module in $ipvs_modules; do /sbin/modinfo -F filename $kernel_module >/dev/null 2>&1 if [ $? -eq 0 ]; then /sbin/modprobe $kernel_module fi done chmod 755 /etc/sysconfig/modules/ipvs.modules [root@K8s-all ~]# bash /etc/sysconfig/modules/ipvs.modules
1.1.8 配置国内镜像源
[root@K8s-all ~]# cat >> /etc/yum.repos.d/kubernetes.repo <<EOF [kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF
1.1.9 安装软件包
[root@K8s-all ~]# yum install kubeadm kubelet kubectl -y [root@k8s-master-01 ~]# kubeadm version kubeadm version: &version.Info{Major:"1", Minor:"28", GitVersion:"v1.28.2", GitCommit:"89a4ea3e1e4ddd7f7572286090359983e0387b2f", GitTreeState:"clean", BuildDate:"2023-09-13T09:34:32Z", GoVersion:"go1.20.8", Compiler:"gc", Platform:"linux/amd64"} #为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,修改如下文件内容 [root@K8s-all ~]# cat <<EOF > /etc/sysconfig/kubelet KUBELET_EXTRA_ARGS="--cgroup-driver=systemd" KUBE_PROXY_MODE="ipvs" EOF [root@K8s-all ~]# systemctl enable kubelet.service
1.1.10 kubectl命令自动补全
[root@K8s-all ~]# yum install -y bash-completion [root@K8s-all ~]# source /usr/share/bash-completion/bash_completion [root@K8s-all ~]# source <(kubectl completion bash) [root@K8s-all ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc
1.2 Cri-O方式部署K8s集群
1.2.1 所有节点安装配置cri-o
[root@k8s-all ~]# VERSION=1.28 [root@k8s-all ~]# curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/CentOS_7/devel:kubic:libcontainers:stable.repo [root@k8s-all ~]# curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:${VERSION}/CentOS_7/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo [root@k8s-all ~]# yum install cri-o -y [root@k8s-all ~]# vim /etc/crio/crio.conf 509 pause_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9" 548 insecure_registries = [ 549 "docker.mirrors.ustc.edu.cn","dockerhub.azk8s.cn","hub-mirror.c.163.com" 550 ] [root@k8s-all ~]# systemctl daemon-reload [root@k8s-all ~]# systemctl enable --now crio
-
修改/etc/sysconfig/kubelet
[root@k8s-all ~]# vim /etc/sysconfig/kubelet KUBELET_EXTRA_ARGS="--container-runtime=remote --cgroup-driver=systemd -- container-runtime-endpoint='unix:///var/run/crio/crio.sock' --runtime-req uest-timeout=5m" [root@k8s-all ~]# systemctl daemon-reload [root@k8s-all ~]# systemctl restart kubelet.service
1.2.2 集群初始化
[root@k8s-master-01 ~]# kubeadm init --kubernetes-version=v1.28.2 --pod-network-cidr=10.224.0.0/16 \ --apiserver-advertise-address=192.168.110.21 \ --service-cidr=10.96.0.0/12 \ --cri-socket unix:///var/run/crio/crio.sock \ --image-repository registry.aliyuncs.com/google_containers To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config Alternatively, if you are the root user, you can run: export KUBECONFIG=/etc/kubernetes/admin.conf You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.110.21:6443 --token tct0yt.1bf5docmg9loxj8m \ --discovery-token-ca-cert-hash sha256:b504dc351e052f7ebe92162c0989088b09c2243467ee510d172187e87caf9b74 [root@k8s-master-01 ~]# mkdir -p $HOME/.kube [root@k8s-master-01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@k8s-master-01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
1.2.3 Work结点加入集群
[root@k8s-node-01 ~]# kubeadm join 192.168.110.21:6443 --token tct0yt.1bf5docmg9loxj8m \ --discovery-token-ca-cert-hash sha256:b504dc351e052f7ebe92162c0989088b09c2243467ee510d172187e87caf9b74 [root@k8s-node-02 ~]# kubeadm join 192.168.110.21:6443 --token tct0yt.1bf5docmg9loxj8m \ --discovery-token-ca-cert-hash sha256:b504dc351e052f7ebe92162c0989088b09c2243467ee510d172187e87caf9b74
1.2.4 安装网络插件
[root@k8s-master-01 ~]# wget -c https://docs.projectcalico.org/v3.25/manifests/calico.yaml - name: CALICO_IPV4POOL_CIDR value: "10.244.0.0/16" - name: CLUSTER_TYPE value: "k8s,bgp" # 下方为新增 - name: IP_AUTODETECTION_METHOD value: "interface=ens33" [root@k8s-all ~]# crictl pull docker.io/calico/cni:v3.25.0 [root@k8s-all ~]# crictl pull docker.io/calico/node:v3.25.0 [root@k8s-all ~]# crictl pull docker.io/calico/kube-controllers:v3.25.0 [root@k8s-master-01 ~]# kubectl apply -f calico.yaml
1.2.5 检测
[root@k8s-master-01 ~]# kubectl get pod -n kube-system NAME READY STATUS RESTARTS AGE calico-kube-controllers-658d97c59c-q6b5z 1/1 Running 0 33s calico-node-88hrv 1/1 Running 0 33s calico-node-ph7sw 1/1 Running 0 33s calico-node-ws2h5 1/1 Running 0 33s coredns-66f779496c-gt4zg 1/1 Running 0 22m coredns-66f779496c-jsqcw 1/1 Running 0 22m etcd-k8s-master-01 1/1 Running 0 22m kube-apiserver-k8s-master-01 1/1 Running 0 22m kube-controller-manager-k8s-master-01 1/1 Running 0 22m kube-proxy-5v4sr 1/1 Running 0 19m kube-proxy-b8xvq 1/1 Running 0 22m kube-proxy-vl5k2 1/1 Running 0 19m kube-scheduler-k8s-master-01 1/1 Running 0 22m