1. 安装篇-K8S安装-kubeadm

错误手册

0. Kubernetes-Error

集群规划

主机规划

服务器名称主机名主机IP地址资源配置节点用途
/k8s-master-lb192.169.1.200/keepalived虚拟IP
K8S-1k8s-master-201192.169.1.2012核|2Gmaster节点
K8S-2k8s-master-202192.169.1.2022核|2Gmaster节点
K8S-3k8s-master-203192.169.1.2032核|2Gmaster节点
K8S-4k8s-worker-204192.169.1.2042核|2Gworker节点
K8S-5k8s-worker-205192.169.1.2052核|2Gworker节点

软件版本

软件版本备注
kubernetesv1.22.6
docker-ce20.10.12
etcd3.5.0-0
corednsv1.8.4
calicov3.21.4
harborv2.3.2
dashboardv2.4.1
helm3.8.0
metallbv0.11.0
ingress-nginxv1.1.1
metrics-serverv0.6.0
prometheusv2.32.1
grafana8.3.3
istio1.11.1

基础环境配置

配置hostname

涉及节点:所有节点逐个操作

hostnamectl set-hostname k8s-master-201 && hostname
hostnamectl set-hostname k8s-master-202 && hostname
hostnamectl set-hostname k8s-master-203 && hostname
hostnamectl set-hostname k8s-worker-204 && hostname
hostnamectl set-hostname k8s-worker-205 && hostname

配置hosts

涉及节点:所有节点

cat >> /etc/hosts << 'EOF'
192.169.1.200 k8s-master-lb
192.169.1.201 k8s-master-201
192.169.1.202 k8s-master-202
192.169.1.203 k8s-master-203
192.169.1.204 k8s-worker-204
192.169.1.205 k8s-worker-205
EOF
ping k8s-master-201 -c 1
ping k8s-master-202 -c 1
ping k8s-master-203 -c 1
ping k8s-worker-204 -c 1
ping k8s-worker-205 -c 1

关闭firewalld

涉及节点:所有节点

systemctl disable --now firewalld
systemctl status firewalld | grep Active

关闭selinux

涉及节点:所有节点

setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

grep 'SELINUX=disabled' /etc/selinux/config

关闭NetworkManager

涉及节点:所有节点

systemctl stop NetworkManager
systemctl disable NetworkManager
systemctl status NetworkManager | grep Active

关闭dnsmasq

涉及节点:所有节点

systemctl stop dnsmasq
systemctl disable dnsmasq
systemctl status dnsmasq| grep Active

关闭swap

涉及节点:所有节点

swapoff -a && sysctl -w vm.swappiness=0
sed -ri 's/.*swap.*/#&/' /etc/fstab
grep 'swap' /etc/fstab

设置时区

涉及节点:所有节点

timedatectl set-timezone Asia/Shanghai

安装ntpdate

涉及节点:所有节点

安装ntpdate

yum -y install epel-release
yum -y install ntpdate

配置时间同步

手动同步时间

ntpdate time2.aliyun.com

定时同步时间

crontab -e
*/5 * * * * ntpdate time2.aliyun.com

开机同步时间

cat >> /etc/rc.d/rc.local << 'EOF'
ntpdate time2.aliyun.com
EOF
chmod +x /etc/rc.d/rc.local

免密登录

涉及节点:k8s-master-201
配置k8s-master-201节点可以免密登录其他节点,用于安装过程中生成的配置文件和证书均在k8s-master-201上操作,集群管理也在k8s-master-201上操作。

生成秘钥

ssh-keygen -t rsa

发送密钥到其他节点

yum -y install sshpass
for HOST_NAME in k8s-master-201 k8s-master-202 k8s-master-203 k8s-worker-204 k8s-worker-205
do
  sshpass -p "cmk521" ssh-copy-id -o "StrictHostKeyChecking no" -i .ssh/id_rsa.pub $HOST_NAME
done

配置源

涉及节点:所有节点

安装epel源

yum -y install epel-release

修改国内源

sed -e 's!^metalink=!#metalink=!g' \
    -e 's!^#baseurl=!baseurl=!g' \
    -e 's!//download\.fedoraproject\.org/pub!//mirrors.tuna.tsinghua.edu.cn!g' \
    -e 's!http://mirrors!https://mirrors!g' \
    -i /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel-testing.repo

安装常用软件

yum -y install net-tools \
    nmap-ncat \
    sysstat \
    git \
    ipset \
    ipvsadm \
    bash-completion \
    wget \
    unzip \
    lrzsz \
    lsof \
    vim

配置docker源

wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo \
   -O /etc/yum.repos.d/docker-ce.repo

配置kubernetes源

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name = kubernetes
baseurl = https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled = 1
gpgcheck =1
gpgkey = https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg \
  https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF

内核配置

涉及节点:所有节点

CentOS7

centos7内核需要升级至4.18以上
升级内核

rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm

yum --enablerepo=elrepo-kernel install kernel-ml-devel kernel-ml -y
yum --enablerepo=elrepo-kernel install kernel-lt-devel kernel-lt -y

修改默认启动内容

awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
grub2-set-default 0
grub2-mkconfig -o /boot/grub2/grub.cfg
reboot

检查内核版本

uname -r

注意:
通过命令awk -F’ ‘$1=="menuentry " {print i++ " : " $2}’ /etc/grub2.cfg可以查看到可用内核,及内核的序号。
通过命令grub2-set-default 0,设置新内核为默认启动的内核。
通过命令grub2-mkconfig -o /boot/grub2/grub.cfg生成grub文件。
通过reboot启动服务器即可。

CentOS8

centos8按需升级
升级内核

rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm

yum --enablerepo=elrepo-kernel install kernel-ml -y
yum --enablerepo=elrepo-kernel install kernel-lt -y

修改默认启动内容

#查看当前默认启动内核
grubby --default-kernel

#以新版本内核启动
grub2-set-default 0

#指定某个内核启动
grubby --set-default /boot/vmlinuz-5.19.2-1.el8.elrepo.x86_64

#重启主机
reboot

检查内核版本

uname -r

参数优化

涉及节点:所有节点

系统调优参数

cat > /etc/sysctl.d/k8s.conf <<EOF
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
# 要求iptables不对bridge的数据进行处理
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
vm.overcommit_memory=1
# 开启OOM
vm.panic_on_oom=0
# 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.swappiness=0
# ipvs优化
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
EOF

sysctl --system

文件最大打开数

cat > /etc/security/limits.d/k8s.conf <<EOF
*       soft    nproc   1048576
*       hard    nproc   1048576
*       soft    nofile  1048576
*       hard    nofile  1048576
root    soft    nproc   1048576
root    hard    nproc   1048576
root    soft    nofile  1048576
root    hard    nofile  1048576
EOF

优化日志处理,减少磁盘IO

sed -ri 's/^\$ModLoad imjournal/#&/' /etc/rsyslog.conf
sed -ri 's/^\$IMJournalStateFile/#&/' /etc/rsyslog.conf

sed -ri 's/^#(DefaultLimitCORE)=/\1=100000/' /etc/systemd/system.conf
sed -ri 's/^#(DefaultLimitNOFILE)=/\1=100000/' /etc/systemd/system.conf

ssh 连接优化

sed -ri 's/^#(UseDNS )yes/\1no/' /etc/ssh/sshd_config

基本组件安装

安装ipvs模块

涉及节点:所有节点

cat > /etc/modules-load.d/k8s-ipvs.conf <<EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF

for mod in ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack; do
    modprobe $mod
done

安装docker

涉及节点:所有节点
注意
docker版本选择参考https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/,如果对应k8s版本没有docker版本信息,则去上一个版本查找。
image.png
docker依赖

yum install -y yum-utils
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

docker可用版本(可忽略执行)

yum list docker-ce --showduplicates
yum list containerd.io --showduplicate
yum list docker-ce-rootless-extras --showduplicate

docker安装

yum -y install docker-ce-20.10.24-3.el7 \
docker-ce-cli-20.10.24-3.el7 \
docker-ce-rootless-extras-20.10.24-3.el7 \
containerd.io

docker配置

mkdir /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
  "registry-mirrors": [
    "https://mciwm180.mirror.aliyuncs.com",
    "https://docker.mirrors.ustc.edu.cn/",
    "https://registry.docker-cn.com"
  ],
  "log-driver": "json-file",
  "log-opts": {
    "max-file": "10",
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

docker启动

systemctl enable --now docker
systemctl enable --now containerd

docker当前版本

docker version

安装k8s组件

涉及节点:所有节点
k8s组件可用版本(可忽略执行)

yum list kubeadm.x86_64 --showduplicates | sort -r

k8s组件安装

yum -y install kubeadm-1.22.6 \
kubectl-1.22.6 \
kubelet-1.22.6

k8s组件当前版本

kubeadm version
kubectl version
kubelet --version

启动kubelet

systemctl daemon-reload
systemctl enable --now kubelet

注意
kubelet可能启动失败,原因是没有设置集群初始化。

命令补全

yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

高可用组件安装

haproxy安装

涉及节点:所有master节点
安装haproxy

yum -y install haproxy

配置haproxy
所有master节点配置相同

cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg_`date +%F`
HOST_NAME_1=k8s-master-201
HOST_IP_1=192.169.1.201
HOST_NAME_2=k8s-master-202
HOST_IP_2=192.169.1.202
HOST_NAME_3=k8s-master-203
HOST_IP_3=192.169.1.203
cat > /etc/haproxy/haproxy.cfg << EOF
global
  maxconn 2000
  ulimit-n 16384
  log 127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode http
  option httplog
  timeout connect 5000
  timeout client 50000
  timeout server 50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

listen stats
  bind *:8006
  mode http
  stats enable
  stats hide-version
  stats uri /stats
  stats refresh 30s
  stats realm Haproxy\ Statistics
  stats auth admin:admin

frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server $HOST_NAME_1 $HOST_IP_1:6443 check
  server $HOST_NAME_2 $HOST_IP_2:6443 check
  server $HOST_NAME_3 $HOST_IP_3:6443 check
EOF

启动haproxy

systemctl enable --now haproxy

keepalived安装

涉及节点:所有master节点
安装keepalived

yum -y install keepalived

配置keepalived(k8s-master-201)

cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_`date +%F`
mkdir -p /etc/keepalived/script


INTERFACE=ens32
HOST_IP=192.169.1.201
HOST_VIP=192.169.1.200
TYPES=MASTER
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
  router_id LVS_DEVEL
}

vrrp_script chk_apiserver {
  script "/etc/keepalived/script/check_apiserver.sh"
  interval 2
  weight -5
  fall 3
  rise 2
}

vrrp_instance VI_1 {
  state MASTER
  interface $INTERFACE
  mcast_src_ip $HOST_IP
  virtual_router_id 51
  priority 100
  advert_int 2
  authentication {
    auth_type PASS
    auth_pass K8SHA_KA_AUTH
  }
  virtual_ipaddress {
    $HOST_VIP
  }
#  track_script {
#    chk_apiserver
#  }
}
EOF

配置keepalived(k8s-master-202)

cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_`date +%F`
mkdir -p /etc/keepalived/script


INTERFACE=ens32
HOST_IP=192.169.1.202
HOST_VIP=192.169.1.200
TYPES=BACKUP
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
  router_id LVS_DEVEL
}

vrrp_script chk_apiserver {
  script "/etc/keepalived/script/check_apiserver.sh"
  interval 2
  weight -5
  fall 3
  rise 2
}

vrrp_instance VI_1 {
  state $TYPES
  interface $INTERFACE
  mcast_src_ip $HOST_IP
  virtual_router_id 51
  priority 100
  advert_int 2
  authentication {
    auth_type PASS
    auth_pass K8SHA_KA_AUTH
  }
  virtual_ipaddress {
    $HOST_VIP
  }
#  track_script {
#    chk_apiserver
#  }
}
EOF

配置keepalived(k8s-master-203)

cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_`date +%F`
mkdir -p /etc/keepalived/script


INTERFACE=ens32
HOST_IP=192.169.1.203
HOST_VIP=192.169.1.200
TYPES=BACKUP
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
  router_id LVS_DEVEL
}

vrrp_script chk_apiserver {
  script "/etc/keepalived/script/check_apiserver.sh"
  interval 2
  weight -5
  fall 3
  rise 2
}

vrrp_instance VI_1 {
  state $TYPES
  interface $INTERFACE
  mcast_src_ip $HOST_IP
  virtual_router_id 51
  priority 100
  advert_int 2
  authentication {
    auth_type PASS
    auth_pass K8SHA_KA_AUTH
  }
  virtual_ipaddress {
    $HOST_VIP
  }
#  track_script {
#    chk_apiserver
#  }
}
EOF

配置健康检查(所有master节点)

cat > /etc/keepalived/script/check_apiserver.sh << "EOF"
!#/bin/bash

ERR=0
for k in $(seq 1 5)
do
  	CHECK_CODE=$(pgrep kube-apiserver)
  	if [[ $CHECK_CODE = "" ]]; then
      	ERR=$(expr $ERR + 1)
      	sleep 5
      	continue
  	else
      	ERR=0
      	break
  	fi
done

if [[ $ERR != "0" ]]; then
  	echo "systemctl stop keepalived"
  	/usr/bin/systemctl stop keepalived
  	exit 1
else
  	exit 0
fi
EOF

启动keepalived

systemctl enable --now keepalived

注意
keepalived健康监控已经注释,等待集群部署完成后再取消注释重启keepalived。

集群初始化

配置kubeadm初始化文件

涉及节点:所有master节点
生成kubeadm文件

kubeadm config print init-defaults --component-configs \
KubeProxyConfiguration,KubeletConfiguration > kubeadm-config.yaml

# 查看所需镜像
kubeadm config images list --config kubeadm-config.yaml

编辑kubeadm文件

HOST_IP="192.169.1.201"
HOST_NAME="k8s-master-201"
REGISTRY="registry.cn-hangzhou.aliyuncs.com/google_containers"
VERSION="1.22.0"

cat > ./kubeadm-config.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  #advertiseAddress: 1.2.3.4
  advertiseAddress: $HOST_IP
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  imagePullPolicy: IfNotPresent
  #name: node
  name: $HOST_NAME
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
#imageRepository: k8s.gcr.io
imageRepository: $REGISTRY
kind: ClusterConfiguration
#kubernetesVersion: 1.22.0
kubernetesVersion: $VERSION
controlPlaneEndpoint: $HOST_IP:6443
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
  acceptContentTypes: ""
  burst: 0
  contentType: ""
  kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
  qps: 0
clusterCIDR: ""
configSyncPeriod: 0s
conntrack:
  maxPerCore: null
  min: null
  tcpCloseWaitTimeout: null
  tcpEstablishedTimeout: null
detectLocalMode: ""
enableProfiling: false
healthzBindAddress: ""
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: null
  minSyncPeriod: 0s
  syncPeriod: 0s
ipvs:
  excludeCIDRs: null
  minSyncPeriod: 0s
  scheduler: ""
  strictARP: false
  syncPeriod: 0s
  tcpFinTimeout: 0s
  tcpTimeout: 0s
  udpTimeout: 0s
kind: KubeProxyConfiguration
metricsBindAddress: ""
mode: ""
nodePortAddresses: null
oomScoreAdj: null
portRange: ""
showHiddenMetricsForVersion: ""
udpIdleTimeout: 0s
winkernel:
  enableDSR: false
  networkName: ""
  sourceVip: ""
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging: {}
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
EOF

kubeadm-config.yaml
检查文件

kubeadm init --config ./kubeadm-config.yaml --dry-run

下载镜像

kubeadm config images pull --config ./kubeadm-config.yaml

初始化k8s集群

涉及节点:所有节点
初始化k8s-master-201节点后,会在/etc/kubernetes目录下生成对应的证书和配置。最后将其他master节点、node节点加入到k8s-master-201即可。
k8s-master-201

#文件初始化命令
kubeadm init --config ./kubeadm-config.yaml --upload-certs

初始化成功以后,会产生token值,用于其他节点加入使用

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:
# 配置管理节点
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:
# 添加master节点
  kubeadm join 192.169.1.201:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f38f229ac8c52df013958675b5f34dc087cbb05fceabf2812cc79507019b10b4 \
	--control-plane --certificate-key 4fcb65ba01180c346087ca289fbddf445af82e692bac65636b63f2ddf7d06b6c

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:
# 添加node节点
kubeadm join 192.169.1.201:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f38f229ac8c52df013958675b5f34dc087cbb05fceabf2812cc79507019b10b4 

如果初始化失败,需要重置后再初始化

#初始化重置命令
kubeadm reset

使用命令也可以初始化(与配置文件初始化二选一)

kubeadm init \
--apiserver-advertise-address=192.169.1.201  \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.27.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--ignore-preflight-errors=all

k8s集群添加节点

如果key过期可以使用如下命令生成新的key。
添加node节点
在k8s-master-201上生成node的token

kubeadm token create --print-join-command

# token结果
kubeadm join 192.169.1.201:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f38f229ac8c52df013958675b5f34dc087cbb05fceabf2812cc79507019b10b4

再k8s-worker-204执行命令添加k8s-worker-204为node节点

kubeadm join 192.169.1.201:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f38f229ac8c52df013958675b5f34dc087cbb05fceabf2812cc79507019b10b4

再k8s-worker-205执行命令添加k8s-worker-205为node节点

kubeadm join 192.169.1.201:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:f38f229ac8c52df013958675b5f34dc087cbb05fceabf2812cc79507019b10b4

添加master节点
在k8s-master-201上生成master的token

kubeadm token create --print-join-command
# token结果
kubeadm join 192.169.1.201:6443 --token b84592.0w9ies5r5nj44tr7 --discovery-token-ca-cert-hash sha256:7b6738e46023e9bd7149e62977fc64ed40cc45f437f3a15ac790af5bb14cc7ca 

kubeadm init phase upload-certs --upload-certs
# token结果
I0516 17:19:58.320947   12440 version.go:255] remote version is much newer: v1.27.1; falling back to: stable-1.22
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
f4559139e998796ba23246d6c0a3d02b262bffeb405c4ee21d9370712662cf52

再k8s-master-202执行命令添加k8s-master-202为master节点

kubeadm join 192.169.1.201:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:f38f229ac8c52df013958675b5f34dc087cbb05fceabf2812cc79507019b10b4 \
--control-plane --certificate-key 4fcb65ba01180c346087ca289fbddf445af82e692bac65636b63f2ddf7d06b6c

再k8s-master-203执行命令添加k8s-master-203为master节点

kubeadm join 192.169.1.201:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:f38f229ac8c52df013958675b5f34dc087cbb05fceabf2812cc79507019b10b4 \
--control-plane --certificate-key 4fcb65ba01180c346087ca289fbddf445af82e692bac65636b63f2ddf7d06b6c

k8s集群管理节点

k8s-master-201
执行如下命令可以将k8s-master-201当做管理节点

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

检查集群节点

kubectl get nodes

NAME             STATUS     ROLES                  AGE     VERSION
k8s-master-201   NotReady   control-plane,master   4m24s   v1.22.6
k8s-master-202   NotReady   control-plane,master   3m43s   v1.22.6
k8s-master-203   NotReady   control-plane,master   3m39s   v1.22.6
k8s-worker-204   NotReady   <none>                 94s     v1.22.6
k8s-worker-205   NotReady   <none>                 110s    v1.22.6

注意
各个节点NotReady的原因是没有安装网络插件。网络插件有:flannel、calico、canal、kube-router、weave net等,这里使用calico网络插件。

k8s集群环境变量

涉及节点:所有master节点

cat >> /root/.bashrc << EOF
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source /root/.bashrc

核心组件安装

安装calico

涉及节点:管理节点k8s-master-201节点
下载calico

curl https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/calico.yaml -O

calico.yaml
编辑calico

vim ./calico.yaml

安装calico

kubectl apply -f ./calico.yaml

安装metrics-server

涉及节点:管理节点k8s-master-201节点
下载metrics-server

wget -O /root/metrics-server-v0.6.0.yaml \
https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.0/components.yaml

metrics-server-v0.6.0.yaml
修改metrics-server配置

sed -i 's@k8s.gcr.io/metrics-server@k8simage@' /root/metrics-server-v0.6.0.yaml
sed -i '/args:/a\        - --kubelet-insecure-tls' /root/metrics-server-v0.6.0.yaml

安装metrics-server

kubectl apply -f /root/metrics-server-v0.6.0.yaml

验证metrics-server
需要等待 metrics-server 的 pod 就绪之后再等它收集一会再看。
查看node情况

kubectl top nodes

NAME             CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master-201   168m         8%     1362Mi          73%       
k8s-master-202   121m         6%     1228Mi          66%       
k8s-master-203   122m         6%     1211Mi          65%       
k8s-worker-204   49m          2%     850Mi           46%       
k8s-worker-205   46m          2%     823Mi           44%  

查看pod情况

kubectl top pods -n kube-system

NAME                                       CPU(cores)   MEMORY(bytes)   
calico-kube-controllers-5b67bc688c-g44xc   1m           24Mi            
calico-node-89t7x                          15m          117Mi           
calico-node-bghvw                          20m          91Mi            
calico-node-cdszv                          15m          120Mi           
calico-node-d4czf                          17m          91Mi            
calico-node-h2rx6                          16m          118Mi           
coredns-7d89d9b6b8-c485g                   1m           21Mi            
coredns-7d89d9b6b8-mxvmh                   1m           16Mi            
etcd-k8s-master-201                        23m          83Mi            
etcd-k8s-master-202                        20m          85Mi            
etcd-k8s-master-203                        20m          83Mi            
kube-apiserver-k8s-master-201              47m          398Mi           
kube-apiserver-k8s-master-202              43m          321Mi           
kube-apiserver-k8s-master-203              40m          301Mi           
kube-controller-manager-k8s-master-201     13m          81Mi            
kube-controller-manager-k8s-master-202     1m           45Mi            
kube-controller-manager-k8s-master-203     1m           41Mi            
kube-proxy-4q47s                           1m           30Mi            
kube-proxy-bbn4c                           1m           17Mi            
kube-proxy-cpqzr                           1m           19Mi            
kube-proxy-wv4qh                           1m           30Mi            
kube-proxy-z8xfk                           1m           16Mi            
kube-scheduler-k8s-master-201              2m           36Mi            
kube-scheduler-k8s-master-202              2m           38Mi            
kube-scheduler-k8s-master-203              2m           31Mi            
metrics-server-f657d6869-mmfns             2m           22Mi

安装dashboard

涉及节点:管理节点k8s-master-201节点
下载dashboard

wget -O dashboard-v2.4.0.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml

dashboard-v2.4.0.yaml
修改dashboard配置

# 使用 NodePort
sed -i '39a\  type: NodePort' dashboard-v2.4.0.yaml
sed -i '43a\      nodePort: 30443' dashboard-v2.4.0.yaml

# 使用 LoadBalancer
sed -i '39a\  type: LoadBalancer' dashboard-v2.4.0.yaml

# 创建自定义证书时需要
sed -i '48,58d' dashboard-v2.4.0.yaml 

# 镜像拉取策略
sed -i '/imagePullPolicy/s@Always@IfNotPresent@' dashboard-v2.4.0.yaml

安装dashboard

kubectl create -f dashboard-v2.4.0.yaml

创建kubernetes-dashboard-certs

openssl genrsa -out dashboard.key 2048
openssl req -days 36500 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt

kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key \
  --from-file=dashboard.crt -n kubernetes-dashboard

安装 dashboard 的 serviceAccount

cat > ./dashboard-admin.yaml<<EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: dashboard-admin
  namespace: kubernetes-dashboard

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: dashboard-admin-bind-cluster-role
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: dashboard-admin
  namespace: kubernetes-dashboard
EOF

kubectl create -f ./dashboard-admin.yaml

查看dashboard

kubectl get pod -n kubernetes-dashboard
NAME                                        READY   STATUS              RESTARTS   AGE
dashboard-metrics-scraper-c45b7869d-84qvb   1/1     Running             0          4m46s
kubernetes-dashboard-74979b6cf6-4vcxt       1/1     Running             0          4m46s

查看dashboard端口

kubectl get svc -A |grep kubernetes-dashboard
kubernetes-dashboard   dashboard-metrics-scraper   ClusterIP   10.99.19.162    <none>        8000/TCP                 4m2s
kubernetes-dashboard   kubernetes-dashboard        NodePort    10.99.170.130   <none>        443:30443/TCP            4m2s

登录dashboard
查看token值(token值即dashboard的登录密码)

kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret \
  | grep dashboard-admin | awk '{print $1}') | grep token: | awk '{print $2}'
eyJhbGciOiJSUzI1NiIsImtpZCI6InF0dEdjcFpGRjBVRllkcnM4TEVPVnMxV0Z0QkJwc0lKMlUyWE5RYlRaUnMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkZWZhdWx0LXRva2VuLXFoMjRwIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImRlZmF1bHQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJlMzFmZGJmMS1mMDgyLTQ5YjQtYTJiNi1hZDQ3NDFmYTk4Y2EiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6ZGVmYXVsdCJ9.Rg-hW9eZcjZd6qGUHUA_mQqaRikE2OMN8_Z6gvLwl7TeKD7jh4R0wQjFpJ1qWGco-KeWrbeNvIE6QYVlS0vGD6SUhwe4ADeEhU2bEHfGjSCMjlm91PMGb2Gc5yKuN4nU_jP6OTWlNFKar3OFUDU4LK2IxpzXQ05KttmzTBwZfYK3vsyaQsMuQ0YaRQ7_0ClUJhf9MSpi3otvBFm4U4gf_eH2S7_HpFml2bCGzDET3kiymIL-ghjzykWgpBvb1_E4pvEXNHEnanGhzDNdEO35tqWSe_I_QE9K5OOcSxpQh7asMfgczr0HFsIhGZNzY98rErEMVgmZpDQS-eP1PHPBsw
eyJhbGciOiJSUzI1NiIsImtpZCI6InF0dEdjcFpGRjBVRllkcnM4TEVPVnMxV0Z0QkJwc0lKMlUyWE5RYlRaUnMifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi12eG5jciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjVkODZmMWU1LTYwMzctNDczYy1hYjc0LTJjNDg1ZTZmYmRkMiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.twjKSQh4RX4jFW9zNaPP4KjF8VxD8MwdywFOrEDpiBlOjIH1Y1crL5Z6lr-g8TRFCNJ4N8X_cpfKtWiJxM3TfbEAfpMpgUoQWY7dVTSiSiCQk0ODN3hF4qlsnDxCuq44KEoD9wCByvRb6OhSS7u4BMLndxA3mzPNXZMlx2rzHSPPhusH2MxgMYdE-aVv4ZQfq7MbJLDePWUL1gGDWyHN1T4Yz8R9LkX5efNZ-0JOkzdmT7e6-5aHlCkC9uoADrgNFuUFR5iwYSOgRRnDDlZvknVFeZVlz9szofdKD2Gj7cSyfTcvx86BOkCIVmsoyw4FLaoL4PBEGh4LFAWwz8d5JQ

登录页面http://192.169.1.201:30443/
image.png

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值