1 环境准备
1.1 环境要求
本文中介绍的kubernetes部署方式不建议使用到生产环境,可以用于实验环境和测试环境。生产环境建议手动部署
k8s支持的系统:
Ubuntu 16.04+
Debian 9+
CentOS 7
Red Hat Enterprise Linux (RHEL) 7
Fedora 25+
HypriotOS v1.0.1+
Container Linux (tested with 1800.6.0)
内存:至少2GB及以上
CPU:至少2核及以上
[root@k8smaster180 ~]# cat /etc/redhat-release
CentOS Linux release 7.8.2003 (Core)
[root@k8smaster180 ~]# uname -a
Linux k8smaster180 5.6.10-1.el7.elrepo.x86_64 #1 SMP Sat May 2 12:42:34 EDT 2020 x86_64 x86_64 x86_64 GNU/Linux
1.2 基础环境部署
1.2.1 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
1.2.2 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
1.2.3 关闭swap
临时关闭:
swapoff -a
永久关闭:注释掉/etc/fstab文件中的swap行
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
1.2.4 添加hosts
172.16.1.180 k8smaster180
172.16.1.181 k8snode181
1.2.5 内核修改相关
1.2.5.1 加载内核模块
modprobe ip_vs_rr
modprobe br_netfilter
1.2.5.2 优化内核参数
cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
#由于tcp_tw_recycle与kubernetes的NAT冲突,必须关闭!否则会导致服务不通。4.1x内核已经废弃这项了
#net.ipv4.tcp_tw_recycle=0
#禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.swappiness=0
#不检查物理内存是否够用
vm.overcommit_memory=1
#开启 OOM
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
#关闭不使用的ipv6协议栈,防止触发docker BUG.
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
sysctl -p /etc/sysctl.d/kubernetes.conf
sysctl --system
#必须关闭 tcp_tw_recycle,否则和 NAT 冲突,会导致服务不通;
#关闭 IPV6,防止触发 docker BUG;
1.2.6 开启IP转发功能
echo "1" > /proc/sys/net/ipv4/ip_forward
1.2.7 开启时间同步
echo "*/3 * * * * /usr/sbin/ntpdate ntp3.aliyun.com &> /dev/null" > /tmp/crontab
crontab /tmp/crontab
1.2.8 创建免密访问 - k8smaster180
ssh-keygen -t rsa
cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
ssh-copy-id -i /root/.ssh/id_rsa.pub root@k8smaster180
ssh-copy-id -i /root/.ssh/id_rsa.pub root@k8snode181
1.2.9 修改PATH环境变量
echo 'PATH=/opt/k8s/bin:$PATH' >>/root/.bashrc
source /root/.bashrc
1.2.10 安装依赖包
yum install -y epel-release
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget lsof telnet
1.2.11 安装docker
#安装docker需要的工具
yum install -y yum-utils device-mapper-persistent-data lvm2
#添加docker镜像包
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#安装并更新docker-ce镜像包
#更新缓存
yum makecache fast
#安装docker默认安装最新,可指定版本安装
yum -y install docker-ce
#查看版本
docker -v
#开启docker服务
systemctl enable docker
systemctl start docker
#附:docker镜像加速方法
#添加指定加速地址,这里指定阿里云的
#vim /etc/docker/daemon.json
{
"registry-mirrors": [
"https://registry.cn-hangzhou.aliyuncs.com"
]
}
#重新加载服务
systemctl daemon-reload
systemctl restart docker
#查看 Registry Mirrors是否变成阿里云加速地址
docker info
1.2.12 添加kubenetes软件包
#这里直接使用阿里云的软件包
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#更新软件包
yum makecache fast
#安装kubelet、kubeadm、kubectl(由于这套环境后续还有其他用处,因此指定了版本安装)
yum install -y kubelet-1.18.3 kubeadm-1.18.3 kubectl-1.18.3
systemctl enable kubelet
systemctl start kubelet
2 k8s集群部署
2.1 master部署
#只能在matser上面部署
[root@k8smaster180 ~]# kubeadm init \
> --apiserver-advertise-address=172.16.1.180 \
> --image-repository registry.aliyuncs.com/google_containers \
> --kubernetes-version v1.18.3 \
> --service-cidr=10.2.0.0/16 \
> --pod-network-cidr=10.244.0.0/16
W0817 17:05:19.873513 13819 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.18.3
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8smaster180 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.2.0.1 172.16.1.180]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8smaster180 localhost] and IPs [172.16.1.180 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8smaster180 localhost] and IPs [172.16.1.180 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0817 17:06:33.259312 13819 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0817 17:06:33.261214 13819 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 43.535755 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8smaster180 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8smaster180 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: juh4xl.bbwnmzevs8u6g061
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.16.1.180:6443 --token juh4xl.bbwnmzevs8u6g061 \
--discovery-token-ca-cert-hash sha256:6ab8e6c2d7cb11e274fe740e51d0ccd36c18bee19afde18c31de7a5e9e5399f4
#配置k8stoken相关信息
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#最后输出的token,是节点加入的信息,一般是两个小时内有效
kubeadm join 172.16.1.180:6443 --token juh4xl.bbwnmzevs8u6g061 \
--discovery-token-ca-cert-hash sha256:6ab8e6c2d7cb11e274fe740e51d0ccd36c18bee19afde18c31de7a5e9e5399f4
2.2 node部署
#在node节点上操作即可
[root@k8snode181 ~]# kubeadm join 172.16.1.180:6443 --token juh4xl.bbwnmzevs8u6g061 \
> --discovery-token-ca-cert-hash sha256:6ab8e6c2d7cb11e274fe740e51d0ccd36c18bee19afde18c31de7a5e9e5399f4
W0817 17:10:49.192018 3896 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
#在master节点上验证,当节点准备好时(网络未配置),STATUS会从 NotReady 变成Ready
[root@k8smaster180 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8smaster180 NotReady master 6m30s v1.18.3
k8snode181 NotReady <none> 2m42s v1.18.3
2.3 配置flannel网络
#Kubernetes v1.7+ 适用以下的flannel配置,需要等几分钟,主要是镜像下载较慢,当然也可以修改镜像源
[root@k8smaster180 k8s]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@k8smaster180 k8s]# kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
#验证flannel是否运行起来
[root@k8snode181 ~]# ps -ef|grep flannel|grep -v grep
root 20102 20085 0 17:33 ? 00:00:00 /opt/bin/flanneld --ip-masq --kube-subnet-mgr
3 部署nginx验证
#k8s环境检查
[root@k8smaster180 k8s]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8smaster180 Ready master 43m v1.18.3
k8snode181 Ready <none> 39m v1.18.3
[root@k8smaster180 k8s]# kubectl get po,svc -o wide --all-namespaces=true
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system pod/coredns-7ff77c879f-44p5k 1/1 Running 0 42m 10.244.1.3 k8snode181 <none> <none>
kube-system pod/coredns-7ff77c879f-qcnqv 1/1 Running 0 42m 10.244.1.2 k8snode181 <none> <none>
kube-system pod/etcd-k8smaster180 1/1 Running 0 43m 172.16.1.180 k8smaster180 <none> <none>
kube-system pod/kube-apiserver-k8smaster180 1/1 Running 0 43m 172.16.1.180 k8smaster180 <none> <none>
kube-system pod/kube-controller-manager-k8smaster180 1/1 Running 0 43m 172.16.1.180 k8smaster180 <none> <none>
kube-system pod/kube-flannel-ds-amd64-p7fnx 1/1 Running 0 47s 172.16.1.180 k8smaster180 <none> <none>
kube-system pod/kube-flannel-ds-amd64-pfkjq 1/1 Running 0 47s 172.16.1.181 k8snode181 <none> <none>
kube-system pod/kube-proxy-7qvw7 1/1 Running 0 42m 172.16.1.180 k8smaster180 <none> <none>
kube-system pod/kube-proxy-nszm9 1/1 Running 0 39m 172.16.1.181 k8snode181 <none> <none>
kube-system pod/kube-scheduler-k8smaster180 1/1 Running 0 43m 172.16.1.180 k8smaster180 <none> <none>
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
default service/kubernetes ClusterIP 10.2.0.1 <none> 443/TCP 43m <none>
kube-system service/kube-dns ClusterIP 10.2.0.10 <none> 53/UDP,53/TCP,9153/TCP 43m k8s-app=kube-dns
3.1 部署nginx服务
#拉取默认nginx镜像部署服务
[root@k8smaster180 ~]# kubectl create deployment nginx --image=nginx
deployment.apps/nginx created
#查看pod创建情况
[root@k8smaster180 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-f89759699-l284m 1/1 Running 0 15s
[root@k8smaster180 ~]# kubectl describe pod/nginx-f89759699-l284m --namespace default
Name: nginx-f89759699-l284m
Namespace: default
Priority: 0
Node: k8snode181/172.16.1.181
Start Time: Mon, 17 Aug 2020 18:05:04 +0800
Labels: app=nginx
pod-template-hash=f89759699
Annotations: <none>
Status: Running
IP: 10.244.1.5
IPs:
IP: 10.244.1.5
Controlled By: ReplicaSet/nginx-f89759699
Containers:
nginx:
Container ID: docker://323bd543864751e9151161a8386e3b4d1a8218649e18bc9184934cdfe8ced239
Image: nginx
Image ID: docker-pullable://nginx@sha256:b0ad43f7ee5edbc0effbc14645ae7055e21bc1973aee5150745632a24a752661
Port: <none>
Host Port: <none>
State: Running
Started: Mon, 17 Aug 2020 18:05:14 +0800
Ready: True
Restart Count: 0
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-7k8gp (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-7k8gp:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-7k8gp
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 67s default-scheduler Successfully assigned default/nginx-f89759699-l284m to k8snode181
Normal Pulling 65s kubelet, k8snode181 Pulling image "nginx"
Normal Pulled 57s kubelet, k8snode181 Successfully pulled image "nginx"
Normal Created 57s kubelet, k8snode181 Created container nginx
Normal Started 57s kubelet, k8snode181 Started container nginx
#服务创建完成后,暴露nginx的80端口
[root@k8smaster180 ~]# kubectl get po,svc -o wide --all-namespaces=true |grep nginx
default pod/nginx-f89759699-l284m 1/1 Running 0 2m10s 10.244.1.5 k8snode181 <none> <none>
[root@k8smaster180 ~]# kubectl expose deployment nginx --port=80 --type=NodePort
service/nginx exposed
[root@k8smaster180 ~]# kubectl get po,svc -o wide --all-namespaces=true |grep nginx
default pod/nginx-f89759699-l284m 1/1 Running 0 2m44s 10.244.1.5 k8snode181 <none> <none>
default service/nginx NodePort 10.2.206.178 <none> 80:30527/TCP 9s app=nginx
#使用网页验证通过访问nginx映射的80:30527/TCP,见到以下页面代表部署成功
#http://172.16.1.180:30527
4 Kubenetes Dashboard 部署
4.1 下载并修改dashboard配置
#从git下载dashboard配置
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.3/aio/deploy/recommended.yaml
#修改dashboard配置,特别要注意空格,要按照前面的对齐!
#vim recommended.yaml
#可选项,修改image,自己找对应的国内的包,再根据个人需要进行修改
#增加nodeport配置
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort #增加此行
ports:
- port: 443
targetPort: 8443
nodePort: 30000 #增加此行
selector:
k8s-app: kubernetes-dashboard
#如下图所示,一定要对齐!!!
4.2 应用yaml文件
kubectl apply -f recommended.yaml
4.3 访问dashboard
#直接访问:https://172.16.1.180:30000/
#创建管理员,并赋权,获取token登陆dashboard
[root@k8smaster180]# kubectl create serviceaccount dashboard-admin -n kube-system
serviceaccount/dashboard-admin created
[root@k8smaster180]# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
[root@k8smaster180 k8s]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
Name: dashboard-admin-token-wmn6c
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: dashboard-admin
kubernetes.io/service-account.uid: e92e3ea6-e704-4a7b-8994-c892abb1b36c
Type: kubernetes.io/service-account-token
Data
====
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IjAzNWlES1g2TVl6cXEzVUdrb0Zuc3Z0czVqbmItU0NGNDkzODRJN2g1UnMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4td21uNmMiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiZTkyZTNlYTYtZTcwNC00YTdiLTg5OTQtYzg5MmFiYjFiMzZjIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.VIiatYgC77AgAZiSrKisRyOwwl2ApbeJkHhIN1by315tpFgK8fehFSDOjy597xoq1Qpo9opGHcFv3tmsu3S8V-zCyqmdB-ceAm7GuhSV9gxKmU7FdYReitGWH_XoV6J8reumpkXDBwScxiTO-z9K0Nbf4BBC7MYqUBZMNH15XLOih3Di5x8T2U6QoVhL2-0X_38EnWa1JncDOo5sjOdF0yJ3MiAnK91CNqR5s35xPOS74RaqOs24dLhpOIla3K5_tCn2HkhIIB0T8RMb0okZyiAxQcl1zI0aMtDIT8Ar4iRho1sx7XjahoCUSBNli6WbxXnlJYd6QVk0g37nx0SieQ
ca.crt: 1025 bytes
namespace: 11 bytes
#使用获取到的token登陆token,至此OK!
X. 过程遇到的问题
eg1.sysctl: cannot stat /proc/sys/net/ipv4/tcp_tw_recycle: No such file or directory
原因是:内核已经更新至5.x 而这个配置项在4.x以上已经废弃了
eg2.The connection to the server localhost:8080 was refused - did you specify the right host or port?
解决方案:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
eg3.[root@k8smaster180 ~]# kubeadm init phase upload-certs --experimental-upload-certs
unknown flag: --experimental-upload-certs
To see the stack trace of this error execute with --v=5 or higher
原因是:新版本命令变了
解决方案:把命令换成kubeadm init phase upload-certs --upload-certs