[Centos 7.9] K8S v1.28.2 二进制部署脚本

1. 集群环境准备

1.1. 主机规划

IP主机名主机角色操作系统安装组件
192.168.11.71k8s-master1master,workerCentos7.9api-server, controller-manager, scheduler, etcd, kubectl, kubelet, kube-proxy, containerd, runc

1.2. 软件版本

软件版本备注
kubernetesv1.27.2
etcdv3.5.9
calicov3.26.0
corendsv1.8.4
containerdv1.7.2
runcv1.1.40
crictlv1.27.2
cniv1.1.1
cfsslv1.6.4

1.3. 网络分配

网络名称网段备注
Node网络192.168.11.0/24
Service网络10.96.0.0/12
Pod网络172.16.0.0/12

2. 集群部署

2.1. 主机准备

UUID也需要修改,每个主机的UUID必须要设置不一致,MAC地址也必须不一样,对于通过虚拟机clone的方式尤其需要注意,他们的UUID, MAC地址极有可能是一样的
分别修改每台主机的IP地址,以及hostname;由于我这里每台主机都是通过VMware克隆出来的,所以其主机IP都是一样的

sed -i 's/IPADDR=.*/IPADDR=192.168.11.71/g' /etc/sysconfig/network-scripts/ifcfg-ens32
hostnamectl set-hostname k8s-master1

# 随机生成UUID
UUID=$(cat /proc/sys/kernel/random/uuid)
sed -i "s/UUID=.*/UUID=${UUID}/g" /etc/sysconfig/network-scripts/ifcfg-ens32

cat >> /etc/hosts << EOF
192.168.11.71 k8s-master1
EOF

reboot

重启之后需要查看一下ipvs, containerd相关模块加载情况

lsmod | grep --color=auto -e ip_vs -e nf_conntrack

lsmod | egrep 'br_netfilter | overlay'

2.2. 下载etcd, k8s, cfssl软件安装包

K8S版本可以参考该链接:github参考地址

# 创建工作目录
mkdir -p /data/work
cd /data/work

# 下载cfssl工具
CFSSL_VERSION=1.6.4
wget -O cfssl_linux-amd64 https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v${CFSSL_VERSION}/cfssl_${CFSSL_VERSION}_linux_amd64
wget -O cfssljson_linux-amd64 https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v${CFSSL_VERSION}/cfssljson_${CFSSL_VERSION}_linux_amd64
wget -O cfssl-certinfo_linux-amd64 https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v${CFSSL_VERSION}/cfssl-certinfo_${CFSSL_VERSION}_linux_amd64

chmod +x cfssl*
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo


ETCD_VERSION=v3.5.9
wget https://ghproxy.com/https://github.com/etcd-io/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz
tar -zxvf etcd-${ETCD_VERSION}-linux-amd64.tar.gz
cp -ar etcd-${ETCD_VERSION}-linux-amd64/etcd* /usr/local/bin
chmod +x /usr/local/bin/etcd*
ls -ll /usr/local/bin/etcd*
etcdctl version

CNI_PLUGIN_VERSION=1.3.0
wget https://ghproxy.com/https://github.com/containernetworking/plugins/releases/download/v${CNI_PLUGIN_VERSION}/cni-plugins-linux-amd64-v${CNI_PLUGIN_VERSION}.tgz
mkdir -p /etc/cni/net.d /opt/cni/bin
tar -xvzf cni-plugins-linux-amd64-v*.tgz -C /opt/cni/bin/

CONTAINERD_VERSION=1.7.2
wget https://ghproxy.com/https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/cri-containerd-cni-${CONTAINERD_VERSION}-linux-amd64.tar.gz
tar -xvzf cri-containerd-cni-*-linux-amd64.tar.gz -C /

RUNC_VERSION=1.1.7
wget https://ghproxy.com/https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.amd64
mv runc.amd64 runc && chmod +x runc && mv -f runc /usr/local/sbin/


K8S_VERSION=v1.27.2
wget https://files.m.daocloud.io/storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/kubernetes-server-linux-amd64.tar.gz
tar -xvzf kubernetes-server-linux-amd64.tar.gz
cd  kubernetes/server/bin/

cp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy /usr/local/bin/

# 退出目录,回到/data/work工作目录中,后续所有的操作依然在本目录中
cd /data/work/
rm /data/work/* -rf

验证结果,一共21个二进制文件

[root@k8s-master1 work]# ls /usr/local/bin/ -ll
total 789112
-rwxr-xr-x 1 root   root   12054528 Apr 11 03:07 cfssl
-rwxr-xr-x 1 root   root    9560064 Apr 11 03:08 cfssl-certinfo
-rwxr-xr-x 1 root   root    7643136 Apr 11 03:07 cfssljson
-rwxr-xr-x 1 root   root   38939752 Jun  3 07:07 containerd
-rwxr-xr-x 1 root   root    6590464 Jun  3 07:07 containerd-shim
-rwxr-xr-x 1 root   root    8298496 Jun  3 07:07 containerd-shim-runc-v1
-rwxr-xr-x 1 root   root   12042240 Jun  3 07:07 containerd-shim-runc-v2
-rwxr-xr-x 1 root   root   17637376 Jun  3 07:07 containerd-stress
-rwxr-xr-x 1 root   root   53478227 Jun  3 07:09 crictl
-rwxr-xr-x 1 root   root   55508307 Jun  3 07:09 critest
-rwxr-xr-x 1 root   root   26234016 Jun  3 07:10 ctd-decoder
-rwxr-xr-x 1 root   root   18657280 Jun  3 07:07 ctr
-rwxr-xr-x 1 528287 89939  22474752 May 11 19:40 etcd
-rwxr-xr-x 1 528287 89939  16998400 May 11 19:40 etcdctl
-rwxr-xr-x 1 528287 89939  14118912 May 11 19:40 etcdutl
-rwxr-xr-x 1 root   root  116576256 Jul 12 22:41 kube-apiserver
-rwxr-xr-x 1 root   root  108429312 Jul 12 22:41 kube-controller-manager
-rwxr-xr-x 1 root   root   49258496 Jul 12 22:41 kubectl
-rwxr-xr-x 1 root   root  106151936 Jul 12 22:41 kubelet
-rwxr-xr-x 1 root   root   53067776 Jul 12 22:41 kube-proxy
-rwxr-xr-x 1 root   root   54325248 Jul 12 22:41 kube-scheduler
[root@k8s-master1 work]#
[root@k8s-master1 work]# ls /usr/local/bin/ |wc -l
21
[root@k8s-master1 work]#
[root@k8s-master1 work]# ls /opt/cni/bin/ -ll
total 76636
-rwxr-xr-x 1 root root  4016001 May 10 03:53 bandwidth
-rwxr-xr-x 1 root root  4531309 May 10 03:53 bridge
-rwxr-xr-x 1 root root 10816051 May 10 03:53 dhcp
-rwxr-xr-x 1 root root  4171248 May 10 03:53 dummy
-rwxr-xr-x 1 root root  4649749 May 10 03:53 firewall
-rwxr-xr-x 1 root root  4059321 May 10 03:53 host-device
-rwxr-xr-x 1 root root  3444776 May 10 03:53 host-local
-rwxr-xr-x 1 root root  4193323 May 10 03:53 ipvlan
-rwxr-xr-x 1 root root  3514598 May 10 03:53 loopback
-rwxr-xr-x 1 root root  4227193 May 10 03:53 macvlan
-rwxr-xr-x 1 root root  3955775 May 10 03:53 portmap
-rwxr-xr-x 1 root root  4348835 May 10 03:53 ptp
-rwxr-xr-x 1 root root  3716095 May 10 03:53 sbr
-rwxr-xr-x 1 root root  2984504 May 10 03:53 static
-rwxr-xr-x 1 root root  4258344 May 10 03:53 tap
-rwxr-xr-x 1 root root  3603365 May 10 03:53 tuning
-rwxr-xr-x 1 root root  4187498 May 10 03:53 vlan
-rwxr-xr-x 1 root root  3754911 May 10 03:53 vrf

2.3. 安装containerd

#创建服务启动文件
tee /etc/systemd/system/containerd.service << 'EOF'
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF

# 配置containerd所需要的模块
cat <<EOF | tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

# 加载overlay, br_netfilter模块
systemctl restart systemd-modules-load.service

# 配置contaienrd所需要的内核参数
cat <<EOF | tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

# 加载内核参数
sysctl --system

# 创建默认配置文件
mkdir -p /etc/containerd
# 创建containerd默认配置文件
containerd config default | tee /etc/containerd/config.toml

# 修改Containerd的配置文件
sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep SystemdCgroup

sed -i "s#registry.k8s.io#m.daocloud.io/registry.k8s.io#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep sandbox_image

sed -i "s#config_path\ \=\ \"\"#config_path\ \=\ \"/etc/containerd/certs.d\"#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep certs.d

mkdir /etc/containerd/certs.d/docker.io -pv

# 配置加速器
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://hub-mirror.c.163.com"]
  capabilities = ["pull", "resolve"]
EOF

systemctl daemon-reload
systemctl enable --now containerd
systemctl restart  containerd

#生成配置文件
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

systemctl restart  containerd

# 验证containerd是否安装成功
crictl info

# 验证是否可以下载镜像
ctr images pull docker.io/library/redis:alpine

2.4. 生成CA证书

cat > ca-csr.json   << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF

cfssl gencert -initca ca-csr.json  | cfssljson -bare ca

cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "876000h"
      }
    }
  }
}
EOF

mkdir -p /etc/kubernetes/pki
cp ca*.pem /etc/kubernetes/pki/

2.5. 部署ETCD

# 生成ETCD CA请求文件
tee etcd-ca-csr.json << 'EOF'
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ],
  "ca": {
    "expiry": "876000h"
  }
}
EOF

# 创建etcd csr请求文件
tee etcd-csr.json << 'EOF'
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}
EOF

# 生成ETCD CA证书
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca

# 生成etcd相关证书
cfssl gencert \
   -ca=etcd-ca.pem \
   -ca-key=etcd-ca-key.pem \
   -config=ca-config.json \
   -hostname=127.0.0.1,k8s-master1,192.168.11.71 \
   -profile=kubernetes \
   etcd-csr.json | cfssljson -bare etcd
ls -ll etcd*.pem

# 创建ETCD服务配置文件
tee etcd-config.yml << 'EOF'
name: 'etcd1'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.11.71:2380'
listen-client-urls: 'https://192.168.11.71:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.11.71:2380'
advertise-client-urls: 'https://192.168.11.71:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd1=https://192.168.11.71:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

# 创建etcd服务启动文件
tee etcd.service << 'EOF'
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd-config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF

mkdir -p /etc/etcd/ssl
cp etcd*.pem /etc/etcd/ssl/
cp etcd-config.yml /etc/etcd/
cp etcd.service /usr/lib/systemd/system/

mkdir -p /etc/kubernetes/pki/etcd /var/lib/etcd
ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/

# 启动ETCD
systemctl daemon-reload && systemctl enable etcd.service && systemctl start etcd.service


# 查看ETCD各节点的健康状态
ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 endpoint health

# 查看成员列表
ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 member list

# 查看数据库大小以及哪个节点是Leader节点
ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 endpoint status

# 检查ETCD的性能
ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 check perf

执行结果

[root@k8s-master1 work]# # 查看ETCD各节点的健康状态
[root@k8s-master1 work]# ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 endpoint health
+----------------------------+--------+-------------+-------+
|          ENDPOINT          | HEALTH |    TOOK     | ERROR |
+----------------------------+--------+-------------+-------+
| https://192.168.11.71:2379 |   true | 14.286573ms |       |
+----------------------------+--------+-------------+-------+
[root@k8s-master1 work]#
[root@k8s-master1 work]# # 查看成员列表
[root@k8s-master1 work]# ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 member list
+------------------+---------+-------+----------------------------+----------------------------+------------+
|        ID        | STATUS  | NAME  |         PEER ADDRS         |        CLIENT ADDRS        | IS LEARNER |
+------------------+---------+-------+----------------------------+----------------------------+------------+
| 5ce02800cf5f635d | started | etcd1 | https://192.168.11.71:2380 | https://192.168.11.71:2379 |      false |
+------------------+---------+-------+----------------------------+----------------------------+------------+
[root@k8s-master1 work]#
[root@k8s-master1 work]# # 查看数据库大小以及哪个节点是Leader节点
[root@k8s-master1 work]# ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 endpoint status
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|          ENDPOINT          |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.11.71:2379 | 5ce02800cf5f635d |   3.5.4 |   20 kB |      true |      false |         2 |          5 |                  5 |        |
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
[root@k8s-master1 work]#
[root@k8s-master1 work]# # 检查ETCD的性能
[root@k8s-master1 work]# ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 check perf
 59 / 60 Booooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooom  !  98.33%PASS: Throughput is 150 writes/s
PASS: Slowest request took 0.073586s
PASS: Stddev is 0.002288s
PASS
[root@k8s-master1 work]#

2.6. 部署apiserver

tee kube-apiserver-csr.json << 'EOF'
{
  "CN": "kube-apiserver",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "Kubernetes",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

# 生成apiserver证书
cfssl gencert   \
  -ca=ca.pem   \
  -ca-key=ca-key.pem   \
  -config=ca-config.json   \
  -hostname=10.96.0.1,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.11.71  \
  -profile=kubernetes   kube-apiserver-csr.json | cfssljson -bare kube-apiserver



cat > front-proxy-ca-csr.json  << EOF
{
  "CN": "kubernetes",
  "key": {
     "algo": "rsa",
     "size": 2048
  },
  "ca": {
    "expiry": "876000h"
  }
}
EOF

cat > front-proxy-client-csr.json  << EOF
{
  "CN": "front-proxy-client",
  "key": {
     "algo": "rsa",
     "size": 2048
  }
}
EOF


# 生成apiserver聚合证书
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca

cfssl gencert  \
  -ca=front-proxy-ca.pem   \
  -ca-key=front-proxy-ca-key.pem   \
  -config=ca-config.json   \
  -profile=kubernetes   front-proxy-client-csr.json | cfssljson -bare front-proxy-client

# 创建ServiceAccount Key ——secret
openssl genrsa -out sa.key 2048
openssl rsa -in sa.key -pubout -out sa.pub


# 创建apiserver配置文件
tee kube-apiserver.conf << 'EOF'
KUBE_APISERVER_OPTS="--v=4  \
      --allow-privileged=true  \
      --bind-address=0.0.0.0  \
      --secure-port=6443  \
      --advertise-address=192.168.11.71 \
      --service-cluster-ip-range=10.96.0.0/12  \
      --service-node-port-range=30000-50000  \
      --etcd-servers=https://192.168.11.71:2379 \
      --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
      --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
      --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \
      --tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem  \
      --tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem  \
      --kubelet-client-certificate=/etc/kubernetes/pki/kube-apiserver.pem  \
      --kubelet-client-key=/etc/kubernetes/pki/kube-apiserver-key.pem  \
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
      --authorization-mode=Node,RBAC  \
      --enable-bootstrap-token-auth=true  \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
      --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
      --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
      --requestheader-allowed-names=aggregator  \
      --requestheader-group-headers=X-Remote-Group  \
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \
      --requestheader-username-headers=X-Remote-User \
      --enable-aggregator-routing=true"
EOF

# 创建apiserver服务启动文件
tee kube-apiserver.service << 'EOF'
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service

[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=10s
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 移动apiserver配置文件到相应位置
cp kube-apiserver*.pem /etc/kubernetes/pki/
cp front-proxy*.pem /etc/kubernetes/pki/
cp sa.pub sa.key /etc/kubernetes/pki/
cp kube-apiserver.conf /etc/kubernetes/
cp kube-apiserver.service /usr/lib/systemd/system/
# 所有节点都需要执行
mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

# 启动服务
systemctl daemon-reload && systemctl enable kube-apiserver && systemctl start kube-apiserver


systemctl status kube-apiserver

curl --insecure https://192.168.11.71:6443/

执行结果

[root@k8s-master1 work]#
[root@k8s-master1 work]# systemctl status kube-apiserver
● kube-apiserver.service - Kubernetes API Server
   Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled; vendor preset: disabled)
   Active: active (running) since Thu 2023-07-13 23:51:48 CST; 5ms ago
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 2059 (kube-apiserver)
    Tasks: 23
   Memory: 301.5M
   CGroup: /system.slice/kube-apiserver.service
           └─2059 /usr/local/bin/kube-apiserver --v=4 --allow-privileged=true --bind-address=0.0.0.0 --secure-port=6443 --advertise-address=192.168.11.71...

Jul 13 23:51:48 k8s-master1 systemd[1]: Started Kubernetes API Server.
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745288    2059 reflector.go:323] Listing and watching *v1.ConfigMap from pkg/con...r.go:444
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745315    2059 dynamic_serving_content.go:132] "Starting controller" name="servi...key.pem"
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745354    2059 tlsconfig.go:240] "Starting DynamicServingCertificateController"
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745427    2059 reflector.go:287] Starting reflector *v1.Service (10m0s) from ven...y.go:150
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745433    2059 reflector.go:323] Listing and watching *v1.Service from vendor/k8...y.go:150
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745437    2059 apiservice_controller.go:97] Starting APIServiceRegistrationController
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745441    2059 cache.go:32] Waiting for caches to sync for APIServiceRegistratio...ntroller
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745442    2059 reflector.go:287] Starting reflector *v1.APIService (30s) from pk...y.go:132
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745447    2059 reflector.go:323] Listing and watching *v1.APIService from pkg/cl...y.go:132
Jul 13 23:51:48 k8s-master1 kube-apiserver[2059]: I0713 23:51:48.745457    2059 reflector.go:287] Starting reflector *v1.PriorityClass (10m0s) fr...y.go:150
Hint: Some lines were ellipsized, use -l to show in full.
[root@k8s-master1 work]#
[root@k8s-master1 work]# curl --insecure https://192.168.11.71:6443/
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "forbidden: User \"system:anonymous\" cannot get path \"/\"",
  "reason": "Forbidden",
  "details": {},
  "code": 403
}

2.7. 部署controller-manager

tee kube-controller-manager-csr.json << 'EOF'
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

# 创建controller-manager证书
cfssl gencert \
   -ca=ca.pem \
   -ca-key=ca-key.pem \
   -hostname=127.0.0.1,192.168.11.71 \
   -config=ca-config.json \
   -profile=kubernetes \
   kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager


# 创建Kube-controller-manager的kubeconfig
kubectl config set-cluster kubernetes \
     --certificate-authority=ca.pem \
     --embed-certs=true \
     --server=https://192.168.11.71:6443 \
     --kubeconfig=kube-controller-manager.kubeconfig

# 设置上下文参数
kubectl config set-context system:kube-controller-manager@kubernetes \
    --cluster=kubernetes \
    --user=system:kube-controller-manager \
    --kubeconfig=kube-controller-manager.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials system:kube-controller-manager \
     --client-certificate=kube-controller-manager.pem \
     --client-key=kube-controller-manager-key.pem \
     --embed-certs=true \
     --kubeconfig=kube-controller-manager.kubeconfig

# 设置当前上下文
kubectl config use-context system:kube-controller-manager@kubernetes \
     --kubeconfig=kube-controller-manager.kubeconfig

# 创建controller-manager配置文件
tee kube-controller-manager.conf << 'EOF'
KUBE_CONTROLLER_MANAGER_OPTS="--v=4 \
      --bind-address=0.0.0.0 \
      --root-ca-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
      --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
      --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
      --leader-elect=true \
      --use-service-account-credentials=true \
      --node-monitor-grace-period=40s \
      --node-monitor-period=5s \
      --controllers=*,bootstrapsigner,tokencleaner \
      --allocate-node-cidrs=true \
      --service-cluster-ip-range=10.96.0.0/12 \
      --cluster-cidr=172.16.0.0/12 \
      --node-cidr-mask-size-ipv4=24 \
      --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem"
EOF

# 创建服务启动文件
tee kube-controller-manager.service << 'EOF'
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF


# 复制配置文件到对应目录
cp kube-controller-manager*.pem /etc/kubernetes/pki/
cp kube-controller-manager.kubeconfig /etc/kubernetes/
cp kube-controller-manager.conf /etc/kubernetes/
cp kube-controller-manager.service /usr/lib/systemd/system/


systemctl daemon-reload  &&systemctl enable kube-controller-manager && systemctl start kube-controller-manager && systemctl status kube-controller-manager


2.8. 部署scheduler

tee kube-scheduler-csr.json << 'EOF'
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

# 创建证书
cfssl gencert \
   -ca=ca.pem \
   -ca-key=ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   kube-scheduler-csr.json | cfssljson -bare kube-scheduler


# 创建kube-scheduler的kubeconfig文件
kubectl config set-cluster kubernetes \
     --certificate-authority=ca.pem \
     --embed-certs=true \
     --server=https://192.168.11.71:6443 \
     --kubeconfig=kube-scheduler.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials system:kube-scheduler \
     --client-certificate=kube-scheduler.pem \
     --client-key=kube-scheduler-key.pem \
     --embed-certs=true \
     --kubeconfig=kube-scheduler.kubeconfig

# 设置上下文参数
kubectl config set-context system:kube-scheduler@kubernetes \
     --cluster=kubernetes \
     --user=system:kube-scheduler \
     --kubeconfig=kube-scheduler.kubeconfig

# 设置当前上下文
kubectl config use-context system:kube-scheduler@kubernetes \
     --kubeconfig=kube-scheduler.kubeconfig


# 创建kube-scheduler的配置文件
tee kube-scheduler.conf << 'EOF'
KUBE_SCHEDULER_OPTS="--v=4 \
      --bind-address=127.0.0.1 \
      --leader-elect=true \
      --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig"
EOF


# 创建kube-scheduler服务启动文件
tee kube-scheduler.service << 'EOF'
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF


# 拷贝kube-scheduler到合适的集群节点中
cp kube-scheduler*.pem /etc/kubernetes/pki/
cp kube-scheduler.kubeconfig /etc/kubernetes/
cp kube-scheduler.conf /etc/kubernetes/
cp kube-scheduler.service /usr/lib/systemd/system/


systemctl daemon-reload &&  systemctl enable kube-scheduler && systemctl start kube-scheduler && systemctl status kube-scheduler

2.9. 部署kubectl

tee admin-csr.json << 'EOF'
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

# 生成admin证书
cfssl gencert \
    -ca=ca.pem \
    -ca-key=ca-key.pem \
    -config=ca-config.json \
    -profile=kubernetes \
    admin-csr.json | cfssljson -bare admin


kubectl config set-cluster kubernetes \
  --certificate-authority=ca.pem \
  --embed-certs=true \
  --server=https://192.168.11.71:6443 \
  --kubeconfig=admin.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials kubernetes-admin \
  --client-certificate=admin.pem \
  --client-key=admin-key.pem \
  --embed-certs=true \
  --kubeconfig=admin.kubeconfig

# 设置上下文参数
kubectl config set-context kubernetes-admin@kubernetes \
  --cluster=kubernetes \
  --user=kubernetes-admin \
  --kubeconfig=admin.kubeconfig

# 设置当前上下文
kubectl config use-context kubernetes-admin@kubernetes \
  --kubeconfig=admin.kubeconfig



mkdir -p /root/.kube; cp admin.kubeconfig /root/.kube/config
cp admin*.pem /etc/kubernetes/pki

yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
source '/root/.kube/completion.bash.inc'
source $HOME/.bash_profile
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

# 查看集群状态
kubectl cluster-info
kubectl get componentstatuses
kubectl get all --all-namespaces

2.10. 部署kubelet

cat > bootstrap.secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-c8ad9c
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: c8ad9c
  token-secret: 2e4d610cf3e7426e
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
 
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF

kubectl config set-cluster kubernetes     \
  --certificate-authority=ca.pem     \
  --embed-certs=true  \
  --server=https://192.168.11.71:6443     \
  --kubeconfig=bootstrap-kubelet.kubeconfig

## token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改
kubectl config set-credentials tls-bootstrap-token-user  \
  --token=c8ad9c.2e4d610cf3e7426e \
  --kubeconfig=bootstrap-kubelet.kubeconfig

kubectl config set-context tls-bootstrap-token-user@kubernetes     \
  --cluster=kubernetes     \
  --user=tls-bootstrap-token-user     \
  --kubeconfig=bootstrap-kubelet.kubeconfig

kubectl config use-context tls-bootstrap-token-user@kubernetes     \
  --kubeconfig=bootstrap-kubelet.kubeconfig


cat > kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF

# 创建kubelet服务启动配置文件
tee kubelet.service << 'EOF'
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --config=/etc/kubernetes/kubelet-conf.yml \
  --container-runtime-endpoint=unix:///run/containerd/containerd.sock \
  --node-labels=node.kubernetes.io/node= \
  --v=8
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF


mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
cp kubelet-conf.yml /etc/kubernetes/
cp kubelet.service /usr/lib/systemd/system/
cp bootstrap-kubelet.kubeconfig /etc/kubernetes/


kubectl create -f bootstrap.secret.yaml


systemctl daemon-reload && systemctl enable kubelet &&  systemctl start kubelet && systemctl status kubelet

echo "sleep 20s" && sleep 20
kubectl get nodes -owide
kubectl get csr

执行结果,注意kubelet的启动需要一定时间,等个一两分钟再执行此命令如果还不是该结果,那么多半是配置哪里出问题了

[root@k8s-master1 work]#
[root@k8s-master1 work]# kubectl get nodes -owide
NAME          STATUS   ROLES    AGE   VERSION    INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION              CONTAINER-RUNTIME
k8s-master1   Ready    <none>   53s   v1.21.14   192.168.11.71   <none>        CentOS Linux 7 (Core)   6.0.7-1.el7.elrepo.x86_64   containerd://1.6.10
[root@k8s-master1 work]# kubectl get csr
NAME        AGE     SIGNERNAME                                    REQUESTOR           CONDITION
csr-z56vc   2m12s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
[root@k8s-master1 work]#
[root@k8s-master1 work]#

2.11. 部署proxy

tee kube-proxy-csr.json << 'EOF'
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:kube-proxy",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF


cfssl gencert \
   -ca=ca.pem \
   -ca-key=ca-key.pem \
   -config=ca-config.json \
   -profile=kubernetes \
   kube-proxy-csr.json | cfssljson -bare kube-proxy


kubectl config set-cluster kubernetes     \
  --certificate-authority=ca.pem     \
  --embed-certs=true     \
  --server=https://192.168.11.71:6443     \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy  \
  --client-certificate=kube-proxy.pem     \
  --client-key=kube-proxy-key.pem     \
  --embed-certs=true     \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context kube-proxy@kubernetes  \
  --cluster=kubernetes     \
  --user=kube-proxy     \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context kube-proxy@kubernetes \
  --kubeconfig=kube-proxy.kubeconfig


tee kube-proxy.yaml << 'EOF'
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 172.16.0.0/12
configSyncPeriod: 15m0s
conntrack:
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF


tee kube-proxy.service << 'EOF'
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.yaml \
  --v=8
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

EOF

mkdir -p /var/lib/kube-proxy
cp kube-proxy*.pem /etc/kubernetes/pki/
cp kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/
cp kube-proxy.service /usr/lib/systemd/system/


systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy && systemctl status kube-proxy

2.12. 部署网络组件calico

网友反映说 centos7 要升级libseccomp 不然 无法安装calico

curl https://ghproxy.com/https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml -O

sed -i 's/# - name: CALICO_IPV4POOL_CIDR/- name: CALICO_IPV4POOL_CIDR/g' calico.yaml
sed -i 's/#   value: "192.168.0.0\/16"/  value: "172.16.0.0\/12"/g' calico.yaml
sed -i 's/"type": "calico-ipam"/"type": "calico-ipam",\n              "assign_ipv4": "true"/g' calico.yaml
sed -i "s#docker.io/calico/#m.daocloud.io/docker.io/calico/#g" calico.yaml 

#创建calico组件
kubectl apply -f calico.yaml

#查看calico pod是否成功创建
kubectl get pods -n kube-system -owide

执行过程

[root@k8s-master1 work]#
[root@k8s-master1 work]#
[root@k8s-master1 work]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
Warning: policy/v1beta1 PodDisruptionBudget is deprecated in v1.21+, unavailable in v1.25+; use policy/v1 PodDisruptionBudget
poddisruptionbudget.policy/calico-kube-controllers created
[root@k8s-master1 work]#
[root@k8s-master1 work]#
[root@k8s-master1 work]#
[root@k8s-master1 work]#
[root@k8s-master1 work]#
[root@k8s-master1 work]#
[root@k8s-master1 work]# kubectl get pods -n kube-system -owide
NAME                                       READY   STATUS    RESTARTS   AGE     IP              NODE        NOMINATED NODE   READINESS GATES
calico-kube-controllers-7cc8dd57d9-dpcqw   1/1     Running   0          3m16s   10.88.0.2       k8s-node1   <none>           <none>
calico-node-hr9f5                          1/1     Running   0          3m15s   192.168.11.74   k8s-node1   <none>           <none>
[root@k8s-master1 work]#

2.13. 安装Metric Server

cat <<EOF | kubectl create -f -
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - nodes/metrics
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls
        - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
        - --requestheader-username-headers=X-Remote-User
        - --requestheader-group-headers=X-Remote-Group
        - --requestheader-extra-headers-prefix=X-Remote-Extra-
        image: m.daocloud.io/registry.k8s.io/metrics-server/metrics-server:v0.6.3
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port: https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 4443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port: https
            scheme: HTTPS
          initialDelaySeconds: 20
          periodSeconds: 10
        resources:
          requests:
            cpu: 100m
            memory: 200Mi
        securityContext:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
        - name: ca-ssl
          mountPath: /etc/kubernetes/pki
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: {}
        name: tmp-dir
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100
EOF

2.14. 集群验证

cat <<EOF | kubectl create -f -
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  selector:
    matchLabels:
      app: nginx
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.25.1
        ports:
        - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  selector:
    app: nginx
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80
    nodePort: 30080
  type: NodePort
EOF

# 查看
kubectl get pod
kubectl get svc

2.15. 用pod解析默认命名空间中的kubernetes

kubectl get svc
#NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
#kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   17h

kubectl exec  busybox -n default -- nslookup kubernetes
#3Server:    10.96.0.10
#Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

#Name:      kubernetes
#Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

2.15.1. 测试跨命名空间是否可以解析

kubectl exec  busybox -n default -- nslookup kube-dns.kube-system
#Server:    10.96.0.10
#Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

#Name:      kube-dns.kube-system
#Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

2.15.2. 每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53

telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.

 telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.

curl 10.96.0.10:53
curl: (52) Empty reply from server

2.15.3. Pod和Pod之前要能通

kubectl get po -owide
NAME      READY   STATUS    RESTARTS   AGE   IP              NODE         NOMINATED NODE   READINESS GATES
busybox   1/1     Running   0          17m   172.27.14.193   k8s-node02   <none>           <none>

 kubectl get po -n kube-system -owide
NAME                                       READY   STATUS    RESTARTS      AGE   IP               NODE           NOMINATED NODE   READINESS GATES
calico-kube-controllers-5dffd5886b-4blh6   1/1     Running   0             77m   172.25.244.193   k8s-master01   <none>           <none>
calico-node-fvbdq                          1/1     Running   1 (75m ago)   77m   192.168.1.61     k8s-master01   <none>           <none>
calico-node-g8nqd                          1/1     Running   0             77m   192.168.1.64     k8s-node01     <none>           <none>
calico-node-mdps8                          1/1     Running   0             77m   192.168.1.65     k8s-node02     <none>           <none>
calico-node-nf4nt                          1/1     Running   0             77m   192.168.1.63     k8s-master03   <none>           <none>
calico-node-sq2ml                          1/1     Running   0             77m   192.168.1.62     k8s-master02   <none>           <none>
calico-typha-8445487f56-mg6p8              1/1     Running   0             77m   192.168.1.65     k8s-node02     <none>           <none>
calico-typha-8445487f56-pxbpj              1/1     Running   0             77m   192.168.1.61     k8s-master01   <none>           <none>
calico-typha-8445487f56-tnssl              1/1     Running   0             77m   192.168.1.64     k8s-node01     <none>           <none>
coredns-5db5696c7-67h79                    1/1     Running   0             63m   172.25.92.65     k8s-master02   <none>           <none>
metrics-server-6bf7dcd649-5fhrw            1/1     Running   0             61m   172.18.195.1     k8s-master03   <none>           <none>

# 进入busybox ping其他节点上的pod

kubectl exec -ti busybox -- sh
/ # ping 192.168.1.64
PING 192.168.1.64 (192.168.1.64): 56 data bytes
64 bytes from 192.168.1.64: seq=0 ttl=63 time=0.358 ms
64 bytes from 192.168.1.64: seq=1 ttl=63 time=0.668 ms
64 bytes from 192.168.1.64: seq=2 ttl=63 time=0.637 ms
64 bytes from 192.168.1.64: seq=3 ttl=63 time=0.624 ms
64 bytes from 192.168.1.64: seq=4 ttl=63 time=0.907 ms

# 可以连通证明这个pod是可以跨命名空间和跨主机通信的

2.15.4. 创建三个副本,可以看到3个副本分布在不同的节点

cat > deployments.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: docker.io/library/nginx:1.14.2
        ports:
        - containerPort: 80

EOF

kubectl  apply -f deployments.yaml 
deployment.apps/nginx-deployment created

kubectl  get pod 
NAME                               READY   STATUS    RESTARTS   AGE
busybox                            1/1     Running   0          6m25s
nginx-deployment-9456bbbf9-4bmvk   1/1     Running   0          8s
nginx-deployment-9456bbbf9-9rcdk   1/1     Running   0          8s
nginx-deployment-9456bbbf9-dqv8s   1/1     Running   0          8s

# 删除nginx

kubectl delete -f deployments.yaml
  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值