# 获取密钥[root@master01~]# ssh-keygen -t rsa# 备份公钥[root@master01~]# cp -p .ssh/id_rsa.pub .ssh/authorized_keys # 本地密钥同步其它节点[root@master01~]# for H in master0{2..3}; do ssh-copy-id $H; done[root@master01~]# for H in node0{1..2}; do ssh-copy-id $H; done
1.4 修改各节点hostname
[root@master01~]# for H in master0{2..3}; do ssh $H hostnamectl set-hostname $H; done[root@master01~]# for H in node0{1..2}; do ssh $H hostnamectl set-hostname $H; done
# 获取初始化配置文件[root@master01~]# kubeadm config print init-defaults > kubeadm-init.yaml# 修改初始化配置[root@master01~]# vim kubeadm-init.yaml#主要修改localAPIEndpoint:的ip地址,此处为master01ip地址,controlPlaneEndpoint:,imageRepository: ,apiVersion: kubeproxy.config.k8s.io/v1alpha1等。
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:- groups:- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress:192.168.12.221
bindPort:6443
nodeRegistration:
criSocket:/var/run/dockershim.sock
imagePullPolicy: IfNotPresent
name: master01
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir:/etc/kubernetes/pki
clusterName: kubernetes
controllerManager:{}
controlPlaneEndpoint:"master:6443"
dns:{}
etcd:local:
dataDir:/var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion:1.22.0
networking:
dnsDomain: cluster.local
podSubnet:"10.100.0.1/16"
scheduler:{}---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode:"ipvs"# 根据配置文件初始化集群[root@master01~]# kubeadm init --config kubeadm-init.yaml......
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p$HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g)$HOME/.kube/config
Alternatively,if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join master:64430--token abcdef.0123456789abcdef \--discovery-token-ca-cert-hash sha256:503e9566a6b17fe9560d42b0fdc5be01c18671808363891ed130adff2298d01f \--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join master:64430--token abcdef.0123456789abcdef \--discovery-token-ca-cert-hash sha256:503e9566a6b17fe9560d42b0fdc5be01c18671808363891ed130adff2298d01f
3.4 部署calico网络(master01节点运行)
# 获取calico配置文件# 该文件可能下载不下来,文件获取连接在本节末尾[root@master01~]# curl https://docs.projectcalico.org/manifests/calico.yaml -O# 修改calico配置[root@master01~]# vim calico.yaml# 取消CALICO_IPV4POOL_CIDR注释,修改pod网段地址# 输入CALICO_IPV4POOL_CIDR搜索定位即可......# The default IPv4 pool to create on startup if none exists. Pod IPs will be# chosen from this range. Changing this value after installation will have# no effect. This should fall within `--cluster-cidr`.- name: CALICO_IPV4POOL_CIDR
value:"10.100.0.1/16"# Disable file logging so `kubectl logs` works.- name: CALICO_DISABLE_FILE_LOGGING
value:"true"# Set Felix endpoint to host default action to ACCEPT.......# 配置kubectl环境变量[root@master01~]# mkdir -p $HOME/.kube[root@master01~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config[root@master01~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config# 部署calico[root@master01~]# kubectl apply -f calico.yaml