部署Kubernetes v1.28.2集群-CentOS7.9

IP地址服务器名称
10.0.0.109master
10.0.0.110node01
10.0.0.119node02

1.主机环境配置

1>设置主机名

[root@master ~]# hostnamectl set-hostname master
[root@node01 ~]# hostnamectl set-hostname node01
[root@node02 ~]# hostnamectl set-hostname node02

2>设置时钟同步

[root@master ~]# yum -y install ntpdate 
[root@master ~]# ntpdate ntp1.aliyun.com
[root@master ~]# echo "0 1 * * * ntpdate ntp1.aliyun.com" >> /var/spool/cron/root
[root@master ~]# crontab -l
​
[root@node01 ~]# yum -y install ntpdate 
[root@node01 ~]# ntpdate ntp1.aliyun.com
[root@node01 ~]# echo "0 1 * * * ntpdate ntp1.aliyun.com" >> /var/spool/cron/root
[root@node01 ~]# crontab -l
​
[root@node02 ~]# yum -y install ntpdate 
[root@node02 ~]# ntpdate ntp1.aliyun.com
[root@node02 ~]# echo "0 1 * * * ntpdate ntp1.aliyun.com" >> /var/spool/cron/root
[root@node02 ~]# crontab -l

3>主机名解析

[root@master ~]# vim /etc/hosts
10.0.0.109 master
10.0.0.110 node01
10.0.0.119 node02
​
[root@node01 ~]# vim /etc/hosts
10.0.0.109 master
10.0.0.110 node01
10.0.0.119 node02
​
[root@node02 ~]# vim /etc/hosts
10.0.0.109 master
10.0.0.110 node01
10.0.0.119 node02

4>禁用swap设备

[root@master ~]# swapoff --all
[root@master ~]# sed -i -r '/swap/ s/^/#/' /etc/fstab
​
[root@node01 ~]# swapoff --all
[root@node01 ~]# sed -i -r '/swap/ s/^/#/' /etc/fstab
​
[root@node02 ~]# swapoff --all
[root@node02 ~]# sed -i -r '/swap/ s/^/#/' /etc/fstab

5>关闭防火墙

[root@master ~]# systemctl disable firewalld --now 
[root@master ~]# setenforce 0 
​
[root@node01 ~]# systemctl disable firewalld --now 
[root@node01 ~]# setenforce 0 
​
[root@node02 ~]# systemctl disable firewalld --now 
[root@node02 ~]# setenforce 0 

6>修改内核参数

a.加载网桥过滤器模块
[root@master ~]# modprobe br_netfilter
[root@master ~]# lsmod | grep br_netfilter
​
[root@node01 ~]# modprobe br_netfilter
[root@node01 ~]# lsmod | grep br_netfilter
​
[root@node02 ~]# modprobe br_netfilter
[root@node02 ~]# lsmod | grep br_netfilter
b.添加网桥过滤器和地址转发功能
[root@master ~]# cat >> /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
[root@master ~]# sysctl -p /etc/sysctl.d/kubernetes.conf
​
[root@node01 ~]# cat >> /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
[root@node01 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf
​
[root@node02 ~]# cat >> /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
[root@node02 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf

7>配置ipvs功能

[root@master ~]# yum -y install ipset ipvsadm
[root@master ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4  
EOF
[root@master ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
# 执行脚本
[root@master ~]# /etc/sysconfig/modules/ipvs.modules
# 验证ipvs模块
[root@master ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
​
[root@node01 ~]# yum -y install ipset ipvsadm
[root@node01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4  
EOF
[root@node01 ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
# 执行脚本
[root@node01 ~]# /etc/sysconfig/modules/ipvs.modules
# 验证ipvs模块
[root@node01 ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
​
[root@node02 ~]# yum -y install ipset ipvsadm
[root@node02 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4  
EOF
[root@node02 ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
# 执行脚本
[root@node02 ~]# /etc/sysconfig/modules/ipvs.modules
# 验证ipvs模块
[root@node02 ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4

2.安装程序包

1>安装并启动docker

[root@master ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
[root@master ~]# yum makecache
# yum-utils软件用于提供yum-config-manager程序
[root@master ~]# yum install -y yum-utils
# 使用yum-config-manager创建docker阿里存储库
[root@master ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#安装最新Docker
[root@master ~]# yum install docker-ce-20.10.6 docker-ce-cli-20.10.6 -y

[root@node01 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
[root@node01 ~]# yum makecache
# yum-utils软件用于提供yum-config-manager程序
[root@node01 ~]# yum install -y yum-utils
# 使用yum-config-manager创建docker阿里存储库
[root@node01 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#安装最新Docker
[root@node01 ~]# yum install docker-ce-20.10.6 docker-ce-cli-20.10.6 -y

[root@node02 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
[root@node02 ~]# yum makecache
# yum-utils软件用于提供yum-config-manager程序
[root@node02 ~]# yum install -y yum-utils
# 使用yum-config-manager创建docker阿里存储库
[root@node02 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#安装最新Docker
[root@node02 ~]# yum install docker-ce-20.10.6 docker-ce-cli-20.10.6 -y

2>配置Docker加速源

[root@master ~]# mkdir /etc/docker
[root@master ~]# cat <<EOF > /etc/docker/daemon.json
{
  "registry-mirrors": ["https://aoewjvel.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
# 启动docker并设置开机自启
[root@master ~]# systemctl enable docker --now
[root@master ~]# systemctl status docker

[root@node01 ~]# mkdir /etc/docker
[root@node01 ~]# cat <<EOF > /etc/docker/daemon.json
{
  "registry-mirrors": ["https://aoewjvel.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
# 启动docker并设置开机自启
[root@node01 ~]# systemctl enable docker --now
[root@node01 ~]# systemctl status docker

[root@node02 ~]# mkdir /etc/docker
[root@node02 ~]# cat <<EOF > /etc/docker/daemon.json
{
  "registry-mirrors": ["https://aoewjvel.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
# 启动docker并设置开机自启
[root@node02 ~]# systemctl enable docker --now
[root@node02 ~]# systemctl status docker

3>安装插件cri-dockerd(三台机器都要执行)

a.安装cri-dockerd插件
[root@node02 ~]# wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.1/cri-dockerd-0.3.1-3.el7.x86_64.rpm
[root@node02 ~]# rpm -ivh cri-dockerd-0.3.1-3.el7.x86_64.rpm
b.备份并更新cri-docker.service文件
[root@node02 ~]# mv /usr/lib/systemd/system/cri-docker.service{,.default}
[root@node02 ~]# vim /usr/lib/systemd/system/cri-docker.service 
#新增如下内容
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target

4>重启cir-dockerd

[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl start cri-docker.service 
[root@master ~]# systemctl enable cri-docker.service

[root@node01 ~]# systemctl daemon-reload
[root@node01 ~]# systemctl start cri-docker.service 
[root@node01 ~]# systemctl enable cri-docker.service

[root@node02 ~]# systemctl daemon-reload
[root@node02 ~]# systemctl start cri-docker.service 
[root@node02 ~]# systemctl enable cri-docker.service

3.安装kubeadm

1>配置软件源
[root@master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
[root@master ~]# yum install -y kubelet-1.28.2 kubeadm-1.28.2 kubectl-1.28.2

[root@node01 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
[root@node01 ~]# yum install -y kubelet-1.28.2 kubeadm-1.28.2 kubectl-1.28.2

[root@node02 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
[root@node02 ~]# yum install -y kubelet-1.28.2 kubeadm-1.28.2 kubectl-1.28.2
2>启动kubelet服务
[root@master ~]# systemctl enable kubelet.service --now
[root@node01 ~]# systemctl enable kubelet.service --now
[root@node02 ~]# systemctl enable kubelet.service --now

4.初始化集群

1>生成初始化默认配置(在master上执行)

[root@master ~]# kubeadm config print init-defaults > kubeadm.yaml
[root@master ~]# vim kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.0.0.109
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  name: master
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.28.2
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd

2>集群初始化

[root@master ~]# [root@master ~]#kubeadm init --control-plane-endpoint="10.0.0.109"       --kubernetes-version=v1.28.2       --pod-network-cidr=10.244.0.0/16       --service-cidr=10.96.0.0/12       --token-ttl=0       --cri-socket unix:///run/cri-dockerd.sock       --upload-certs --image-repository=registry.aliyuncs.com/google_containers
。。。。。。
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 10.0.0.109:6443 --token 5yqkhy.pcsndbb6zoxx4kau \
	--discovery-token-ca-cert-hash sha256:c4752fb19abf4f28ad4df0417ed38d708596533de5b6ddff08c5ee87dce49362 \
	--control-plane --certificate-key 374f61d6ee2c249b259b6fd5e3c0fed3fa3552acd87149ff336b0cd58e40e4a2

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.0.0.109:6443 --token 5yqkhy.pcsndbb6zoxx4kau \
	--discovery-token-ca-cert-hash sha256:c4752fb19abf4f28ad4df0417ed38d708596533de5b6ddff08c5ee87dce49362 

报错解决

报错一:

#修改config.toml文件
[root@master ~]# vim /etc/containerd/config.toml
#disabled_plugins = ["cri"]
[root@master ~]# systemctl restart containerd

报错二:

#报错
Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock
To see the stack trace of this error execute with --v=5 or higher

#解决方法
[root@master ~]# kubeadm reset --cri-socket unix:///run/cri-dockerd.sock
[root@master ~]# systemctl restart cri-docker

3>对kubectl进行授权

[root@master ~]#  mkdir -p $HOME/.kube
[root@master ~]#  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]#  sudo chown $(id -u):$(id -g) $HOME/.kube/config

4>查看集群状态

[root@master ~]#kubectl get nodes
NAME     STATUS     ROLES           AGE     VERSION
master   NotReady   control-plane   3m19s   v1.28.2

5.node节点添加集群

1>工作节点加入集群

[root@node01 ~/docker-image]#kubeadm join 10.0.0.109:6443 --token 5yqkhy.pcsndbb6zoxx4kau \
> --discovery-token-ca-cert-hash sha256:c4752fb19abf4f28ad4df0417ed38d708596533de5b6ddff08c5ee87dce49362 --cri-socket=unix:///var/run/cri-dockerd.sock


[root@node02 ~]#kubeadm join 10.0.0.109:6443 --token 5yqkhy.pcsndbb6zoxx4kau \
> --discovery-token-ca-cert-hash sha256:c4752fb19abf4f28ad4df0417ed38d708596533de5b6ddff08c5ee87dce49362 --cri-socket=unix:///var/run/cri-dockerd.sock

#查看集群状态
[root@master ~]#kubectl get nodes
NAME     STATUS     ROLES           AGE     VERSION
master   NotReady   control-plane   11m     v1.28.2
node01   NotReady   <none>          2m50s   v1.28.2
node02   NotReady   <none>          2m8s    v1.28.2

2>node节点打标签,只在master节点执行

[root@master ~]#kubectl label nodes node01 node-role.kubernetes.io/work=work
node/node01 labeled
[root@master ~]#kubectl label nodes node02 node-role.kubernetes.io/work=work
node/node02 labeled
[root@master ~]#kubectl get nodes
NAME     STATUS     ROLES           AGE     VERSION
master   NotReady   control-plane   12m     v1.28.2
node01   NotReady   work            4m17s   v1.28.2
node02   NotReady   work            3m35s   v1.28.2

安装网络组件Calico

1>上传calico.yaml文件到服务器中执行

[root@master ~]#wget https://docs.projectcalico.org/manifests/calico.yaml
[root@master ~]#kubectl apply -f calico.yaml

2>查看集群状态

[root@master ~]#kubectl get node
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   21m   v1.28.2
node01   Ready    work            13m   v1.28.2
node02   Ready    work            12m   v1.28.2
  • 10
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值