一、环境准备
CPU | 内存 | 硬盘 | 角色 | IP | 主机名 |
32C | 32G | 200G | master | 192.168.20.138 | master01 |
32C | 32G | 200G | worker(node) | 192.168.20.153 | worker02 |
32C | 32G | 200G | worker(node) | 192.168.20.179 | worker03 |
openeuler:20.03 LTS SP4
linux内核版本:5.4.257-1.el7.elrepo.x86_64
docker版本:cri-dockerd-0.3.9-3.el8.x86_64.rpm
kubernetes版本:1.28
二、主机配置
1.所有主机名设置
三台主机,一主两从,设置命令如下:
#master01节点,名称为master01
hostnamectl set-hostname master01
#worker02节点,名称为worker02
hostnamectl set-hostname worker02
#worker03节点,名称为worker03
hostnamectl set-hostname worker03
2.所有主机查看IP
ip addr show
3.所有主机修改hosts,完成主机名与IP的映射
# 所有主机执行
vim /etc/hosts
# 配置内容如下
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.20.138 master01
192.168.20.153 worker02
192.168.20.179 worker03
4.所有主机关闭防火墙
# 关闭防火墙,禁止开机重启
systemctl disable --now firewalld
# 查看防火墙状态
systemctl status firewalld
5.所有主机关闭selinux
# 永久关闭selinux
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# 查看selinux内容
cat /etc/selinux/config
# 查看本机selinux状态
sestatus
6.所有主机设置时间同步
# 安装ntpdate
yum -y install ntpdate
# 创建计划任务
crontab -e
# 一小时同步一次时间
0 */1 * * * /usr/sbin/ntpdate time1.aliyun.com
:wq 保存退出。
#查看现存定时任务
crontab -l
7.所有主机配置内核转发及网桥过滤
# 开启内核路由转发,修改net.ipv4.ip_forward=1
vim /etc/sysctl.conf
# 添加网桥过滤及内核转发配置文件
cat /etc/sysctl.d/k8s.conf #提示没有这个文件
# 编辑(新建)文件
vim /etc/sysctl.d/k8s.conf
# 添加如下内容:
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
# 加载br_netfilter模块(如果不执行就会在应用k8s.conf时出现加载错误)
modprobe br_netfilter
# 查看是否加载
lsmod | grep br_netfilter
# 使其生效
sysctl --system
8.所有主机升级操作系统内核
# 查看系统内核版本
uname -r
# 导入elrepo gpg key
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
# 安装elrepo YUM源仓库
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
# 更新系统
sudo yum update
所有主机都要升级内核,master01已经独立升级完毕,下面只针对worker02和worker03进行升级。
# 查看已经安装内核软件
rpm -qa | grep kernel
# 安装yum镜像源
cat > /etc/yum.repos.d/elrepo.repo << EOF
[elrepo]
name=elrepo
baseurl=https://mirrors.aliyun.com/elrepo/archive/kernel/el7/x86_64
gpgcheck=0
enabled=1
EOF
执行下面命令:
yum clean all && yum makecache
# 查镜像仓库中内核包
yum list --showduplicate kernel*
# 安装内核 kernel-lt-5.4.257
yum install -y kernel-lt-5.4.257 kernel-lt-devel-5.4.257
# 查看内核编码
awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
# 设置启动的内核
grub2-set-default 0
# 重启
reboot
# 查看先有内核版本
[root@worker02 ~]# rpm -qa | grep kernel
kernel-lt-devel-5.4.257-1.el7.elrepo.x86_64
kernel-lt-5.4.257-1.el7.elrepo.x86_64
kernel-tools-4.19.90-2312.6.0.0258.oe2003sp4.x86_64
kernel-devel-4.19.90-2312.6.0.0258.oe2003sp4.x86_64
kernel-4.19.90-2312.6.0.0258.oe2003sp4.x86_64
kernel-4.19.90-2312.1.0.0255.oe2003sp4.x86_64
# 删除旧的内核(根据自己实际情况上面的输出,删除对应的旧版本)
[root@worker02 ~]# yum remove -y kernel-4.19.90-2312.6.0.0258.oe2003sp4.x86_64 kernel-4.19.90-2312.1.0.0255.oe2003sp4.x86_64
升级后内核为:kernel-lt-5.4.257
9.所有主机安装ipset及ipvsadm
# 安装ipset及ipvsadm
yum -y install ipset ipvsadm
# 配置ipvsadm模块加载方式,添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
# 授权、运行、检查是否加载
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
10.所有主机关闭SWAP分区
# 临时关闭,不建议使用
swapoff -a
# 查看是否关闭
free -m
永久关闭swap分区,有两种方法,
方法一:
# 永远关闭swap分区需要重启操作系统
vim /etc/fstab
......
# 注释下面一行,在行首添加#
# /dev/mapper/centos-swap swap swap defaults 0 0
方法二:
# 禁止重启后自动开启
sed -i '/ swap / s/^/#/' /etc/fstab
执行下面命令,重启主机;
# 重启所有主机
reboot
三、安装docker
1.所有主机下载官方repo
# 进入该目录
cd /etc/yum.repos.d/
# 下载官方repo
curl -O https://download.docker.com/linux/centos/docker-ce.repo
执行下面命令,指定centos8版本
sed -i 's/$releasever/8/g' docker-ce.repo
2.所有主机安装docker
yum install -y docker-ce
3.所有主机设置国内镜像加速
# 创建目录
sudo mkdir -p /etc/docker
# 生成文件,写入内容
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": [
"https://dockerproxy.com",
"https://hub-mirror.c.163.com",
"https://mirror.baidubce.com",
"https://ccr.ccs.tencentyun.com"
]
}
EOF
4.所有主机启动docker
# 启动docker
systemctl start docker
# 设置开机自启
systemctl enable docker
# 验证docker
docker version
四、安装cri-dockerd
1. 所有主机下载最新版cri-dockerd rpm包
网络条件好的话直接使用wget下载,网络条件一般的话可以在github上面先下载再上传到虚拟机。
Releases · Mirantis/cri-dockerd · GitHubdockerd as a compliant Container Runtime Interface for Kubernetes - Releases · Mirantis/cri-dockerdhttps://github.com/Mirantis/cri-dockerd/releaseshttps://github.com/Mirantis/cri-dockerd/releases/download/v0.3.9/cri-dockerd-0.3.9-3.el8.x86_64.rpm
# 直接在线下载(如果网速好了,可以用)
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.9/cri-dockerd-0.3.9-3.el8.x86_64.rpm
"ctrl+c"撤销下载,改用迅雷下载;
2.所有主机安装cri-docker
# 根据自己下载版本安装对应rpm
rpm -ivh cri-dockerd-0.3.9-3.el8.x86_64.rpm
3.所有主机启动cri-docker服务
# 启动cri-docker
systemctl start cri-docker
# 设置cri-docker开机自启
systemctl enable cri-docker
4.所有主机cri-dockerd设置国内镜像加速
vim /usr/lib/systemd/system/cri-docker.service
# 找到第10行ExecStart=
# 修改为ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
# 重启Docker组件
systemctl daemon-reload && systemctl restart docker cri-docker.socket cri-docker
# 检查Docker组件状态
systemctl status docker cir-docker.socket cri-docker
五、安装kubernetes组件
1.所有主机配置kubernetes源
# 此操作会覆盖 /etc/yum.repos.d/kubernetes.repo 中现存的所有配置
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.28/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.28/rpm/repodata/repomd.xml.key
#exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
2.所有主机安装kubelet、kubeadm、kubectl、kubernetes-cni
# 安装
yum install -y kubelet kubeadm kubectl kubernetes-cni
# 开机自启
systemctl enable kubelet.service
3.主节点master01初始化集群
# 主节点master01初始化K8s集群(apiserver-advertise-address是master01的ip)
kubeadm init --node-name=master01 \
--image-repository=registry.aliyuncs.com/google_containers \
--cri-socket=unix:///var/run/cri-dockerd.sock \
--apiserver-advertise-address=192.168.20.138 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12
k8s集群初始化成功;
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.20.138:6443 --token kcgudk.4u0z0glybd9hk066 \
--discovery-token-ca-cert-hash sha256:0d22508991c29a3887447042c4056270e91b7d00122e792d54443767404ef79e
# 主节点master01配置环境变量
# 非root用户请执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# root用户直接执行
# 临时生效,重启后失效,不推荐。
export KUBECONFIG=/etc/kubernetes/admin.conf
# 永久生效,执行kubeadm reset后再次init也无需再次执行这条命令
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile
kubectl get nodes
# 如果初始化失败需要重新初始化是,master01节点对集群进行重置
kubeadm reset --cri-socket=unix:///var/run/cri-dockerd.sock
4.master01安装网络组件flannel
以上步骤执行完后,使用 kubectl get nodes 查看,此时节点状态为NotReady状态,需要安装网络插件
在此处下载 kube-flannel.yml
直接apply: kubectl apply -f kube-flannel.yml
也可直接在master01主节点执行下面的命令:
cat > kube-flannel.yml << EOF
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- networking.k8s.io
resources:
- clustercidrs
verbs:
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
k8s-app: flannel
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
k8s-app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: docker.io/flannel/flannel-cni-plugin:v1.2.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: docker.io/flannel/flannel:v0.22.3
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: docker.io/flannel/flannel:v0.22.3
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
EOF
# master01节点对集群进行重置
kubeadm reset --cri-socket=unix:///var/run/cri-dockerd.sock
# master01重新初始化k8s集群(多配置项"--cri-socket=unix:///var/run/cri-dockerd.sock \")
kubeadm init --node-name=master01 \
--image-repository=registry.aliyuncs.com/google_containers \
--cri-socket=unix:///var/run/cri-dockerd.sock \
--apiserver-advertise-address=192.168.20.138 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.20.138:6443 --token lgeka8.7b1lp41hy306vj1l \
--discovery-token-ca-cert-hash sha256:0328f9dd43d94ca625330a449f1e4ed161c9c329abef160795aa93d7fbaa8bfe
# master01执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 永久生效,执行kubeadm reset后再次init也无需再次执行这条命令(master01)
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile
kubectl get nodes
worker02,workr03重新join到master01;
# worker02和worker03都执行(新增参数"--cri-socket=unix:///var/run/cri-dockerd.sock \")
kubeadm join 192.168.20.138:6443 --token lgeka8.7b1lp41hy306vj1l \
--cri-socket=unix:///var/run/cri-dockerd.sock \
--discovery-token-ca-cert-hash sha256:0328f9dd43d94ca625330a449f1e4ed161c9c329abef160795aa93d7fbaa8bfe
# master01执行
kubectl get nodes
TODO NotReady:在对应的节点都要安装flannel即可
5.worker02和worker03均安装网络插件flannerl(这一步不确定要不要)
worker节点执行下面命令:
cat > kube-flannel.yml << EOF
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- networking.k8s.io
resources:
- clustercidrs
verbs:
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
k8s-app: flannel
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
k8s-app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: docker.io/flannel/flannel-cni-plugin:v1.2.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: docker.io/flannel/flannel:v0.22.3
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: docker.io/flannel/flannel:v0.22.3
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
EOF
(注意:此处worker节点为NoReady,估计与前面忘记配置ip与主机名的映射也有关系)
6.master01重新初始化k8s集群
# 所有节点对集群进行重置
kubeadm reset --cri-socket=unix:///var/run/cri-dockerd.sock
# master01重新初始化k8s集群(多配置项"--cri-socket=unix:///var/run/cri-dockerd.sock \")
kubeadm init --node-name=master01 \
--image-repository=registry.aliyuncs.com/google_containers \
--cri-socket=unix:///var/run/cri-dockerd.sock \
--apiserver-advertise-address=192.168.20.138 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.20.138:6443 --token 8spo85.g2jh1g47cjgrnf3k \
--discovery-token-ca-cert-hash sha256:db5759929e3593cdf134908ff1f55afadc14b710c585fd92e91661426058f2f9
# master01执行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# worker02和worker03执行join到master01的命令
kubeadm join 192.168.20.138:6443 --token 8spo85.g2jh1g47cjgrnf3k \
--cri-socket=unix:///var/run/cri-dockerd.sock \
--discovery-token-ca-cert-hash sha256:db5759929e3593cdf134908ff1f55afadc14b710c585fd92e91661426058f2f9
# 所有节点安装kubernetes-cni
yum install -y kubernetes-cni
# 下面这个好像没啥用
ls -lh /opt/cni/bin
# 所有主机执行
vim /etc/sysconfig/kubelet
# 设置内容如下
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
# 永久生效,执行kubeadm reset后再次init也无需再次执行这条命令(master01)
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile
kubectl get nodes
在NotReady的节点,执行下面命令:
# 创建目录
mkdir -p /etc/cni/net.d
# 保存文件,写入内容
cat > /etc/cni/net.d/10-flannel.conflist << EOF
{
"name": "cbr0",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
EOF
# master节点执行
kubectl apply -f kube-flannel.yml
# master节点重启kubelet
systemctl restart kubelet
# master节点重启docker
systemctl daemon-reload && systemctl restart docker
# master节点执行(前面的命令执行完后,需要稍等一会状态更新,再执行这条命令)
kubectl get nodes
# master执行
kubectl get all -n kube-system
[root@master01 ~]# kubectl get all -n kube-system
NAME READY STATUS RESTARTS AGE
pod/coredns-66f779496c-nxp82 1/1 Running 1 (100m ago) 112m
pod/coredns-66f779496c-zgq2l 1/1 Running 1 (100m ago) 112m
pod/etcd-master01 1/1 Running 1 (100m ago) 112m
pod/kube-apiserver-master01 1/1 Running 1 (100m ago) 112m
pod/kube-controller-manager-master01 1/1 Running 1 (100m ago) 112m
pod/kube-proxy-jnrwx 1/1 Running 1 (99m ago) 110m
pod/kube-proxy-qplw8 1/1 Running 1 (100m ago) 112m
pod/kube-proxy-rx6nr 1/1 Running 1 (99m ago) 110m
pod/kube-scheduler-master01 1/1 Running 1 (100m ago) 112m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 112m
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/kube-proxy 3 3 3 3 3 kubernetes.io/os=linux 112m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/coredns 2/2 2 2 112m
NAME DESIRED CURRENT READY AGE
replicaset.apps/coredns-66f779496c 2 2 2 112m
# master执行
[root@master01 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-66f779496c-nxp82 1/1 Running 1 (102m ago) 114m
coredns-66f779496c-zgq2l 1/1 Running 1 (102m ago) 114m
etcd-master01 1/1 Running 1 (102m ago) 114m
kube-apiserver-master01 1/1 Running 1 (102m ago) 114m
kube-controller-manager-master01 1/1 Running 1 (102m ago) 114m
kube-proxy-jnrwx 1/1 Running 1 (101m ago) 112m
kube-proxy-qplw8 1/1 Running 1 (102m ago) 114m
kube-proxy-rx6nr 1/1 Running 1 (101m ago) 112m
kube-scheduler-master01 1/1 Running 1 (102m ago) 114m
# master执行
[root@master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master01 Ready control-plane 115m v1.28.5
worker02 Ready <none> 113m v1.28.5
worker03 Ready <none> 113m v1.28.5
k8s集群部署完成。