kubernetes二进制安装 -- 1.30.3

1、节点规划

10.202.99.34 master01
10.202.99.35 master02
10.202.99.36 master03
10.202.99.37 node01
10.202.99.100 vip

2、环境准备

2.1、关闭防火墙、selinux、swap和NetworkManager

# 关闭selinux
## 临时关闭
setenforce 0
## 永久关闭
sed -i 's/enforcing/disabled/' /etc/selinux/config

# 关闭防火墙
systemctl disable --now firewalld
systemctl stop firewalld
systemctl disable firewalld


# 关闭swap分区
sed -ri 's/.*swap.*/#&/' /etc/fstab 
swapoff -a 

# 关闭NetworkMannger
systemctl stop NetworkManager
systemctl disable NetworkManager

2.2、配置节点免密和时间同步

# 配置hosts主机名解析
# vim /etc/hosts
10.202.99.34 master01
10.202.99.35 master02
10.202.99.36 master03
10.202.99.37 node01

# 各节点之间免密通信。
# 生成密钥 (回车)
ssh-keygen -t rsa -q -N ''

# 分发公钥
for i in master01 master02 master03 node01;do ssh-copy-id -i ~/.ssh/id_rsa.pub $i;done
for i in master02 master03 node01;do scp -r /root/.ssh/  $i:/root/;done
# 分发hosts
for i in master02 master03 node01;do scp -r /etc/hosts  $i:/etc/;done

# 时间同步
yum install chrony -y

# vim /etc/chrony/chrony.conf
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp3.aliyun.com iburst

systemctl start chronyd
systemctl enable chronyd

# 配置centos7阿里源
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
# 配置epel阿里源
wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
# yum makecahe

2.3、内核升级

# 下载内核安装包
# https://www.elrepo.org 目前不支持RHEL- 7,支持RHEL- 8和RHEL- 9
# 使用rpm包升级内核,rpm包下载地址
http://mirrors.coreix.net/elrepo-archive-archive/kernel/el7/x86_64/RPMS/
# centos内核升级需要下载三个rpm包(下载对应版本的包)
kernel
kernel-devel
kernel-headers

# 安装内核rpm包
# 安装内核包 (kernel)
rpm -ivh kernel-lt-5.4.278-1.el7.elrepo.x86_64.rpm
# 安装内核开发包 (kernel-devel)
rpm -ivh kernel-lt-devel-5.4.278-1.el7.elrepo.x86_64.rpm
# 安装内核头文件包 (kernel-headers)
rpm -ivh kernel-lt-headers-5.4.278-1.el7.elrepo.x86_64.rpm

# 验证内核版本
# 查看默认启动顺序
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg

CentOS Linux (5.4.278-1.el7.elrepo.x86_64) 7 (Core)
CentOS Linux (3.10.0-1160.el7.x86_64) 7 (Core)
CentOS Linux (0-rescue-a66a00f1a66a00f1a66a00f1a66a00f1) 7 (Core)

# 默认启动的顺序是从0开始,新内核是从头插入(目前位置在0,而5.4.278的是在1),所以需要选择0
grub2-set-default 0  
# 重启系统
reboot
# 验证内核版本
uname -r
# 5.4.278-1.el7.elrepo.x86_64

2.4、安装IPVS

# 安装 IPVS
yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp

# 加载 IPVS 模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr
ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

# 增加k8s转发配置并使其生效。(所有节点)
# /etc/sysctl.d/k8s.conf文件,添加如下内容:
# 内核参数

cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF

# 立即生效
sysctl --system

2.5、docker安装、Containerd配置

# docker安装
# step 1: 安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3
sudo sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# Step 4: 更新并安装Docker-CE
sudo yum makecache fast
sudo yum -y install docker-ce
# Step 5: 开启Docker服务和开机自启
systemctl start docker && systemctl enable docker

# Step 6: 配置阿里镜像加速(登录阿里云->容器镜像服务->镜像工具)
# 需要再添加 "exec-opts": ["native.cgroupdriver=systemd"]
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://d6mtathr.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
sudo systemctl daemon-reload && sudo systemctl restart docker && sudo systemctl status docker


#修改containerd配置
#本步骤node节点也要同样的修改
#备份源文件
cp /etc/containerd/config.toml /etc/containerd/config.toml.bak

####  Containerd 1.4 开始弃用runtime v1 了,考虑使用 runtime v2 需要添加一下配置

cat <<EOF > /etc/containerd/config.toml
version = 2
[plugins]
  [plugins."io.containerd.grpc.v1.cri"]
    sandbox_image = "registry.cn-guangzhou.aliyuncs.com/my_aliyund/pause:v3.9"
    [plugins."io.containerd.grpc.v1.cri".cni]
      bin_dir = "/opt/cni/bin"
      conf_dir = "/etc/cni/net.d"
    [plugins."io.containerd.grpc.v1.cri".registry]
       config_path = "/etc/containerd/certs.d"
   [plugins."io.containerd.grpc.v1.cri".containerd]
      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
          runtime_type = "io.containerd.runc.v2"
          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
            SystemdCgroup = true
  [plugins."io.containerd.internal.v1.opt"]
    path = "/var/lib/containerd/opt"
EOF

sudo systemctl restart containerd && sudo systemctl status containerd && sudo systemctl enable containerd


###### 内核升级需要重启
#重启并检查
reboot  # 可安装IPVS和docker后再重启

3、集群证书

# 以下命令只需要在master01执行即可
# 安装证书生成工具
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

# 设置执行权限
chmod a+x cfssljson_linux-amd64
chmod a+x cfssl_linux-amd64

# 移动到/usr/local/bin
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

3.1、生成配置证书

mkdir -p /opt/cert/ca
cd /opt/cert/ca

cat > /opt/cert/ca/ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"
        ],
           "expiry": "876000h"
      }
    }
  }
}
EOF

3.2、生成根证书请求文件

cat > /opt/cert/ca/ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names":[{
    "C": "CN",
    "ST": "GuangZhou",
    "L": "GuangZhou"
  }]
}
EOF

3.3、生成根证书

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

4、安装ETCD集群

4.1、节点规划

10.202.99.34 etcd01
10.202.99.35 etcd02
10.202.99.36 etcd03

4.2、创建ETCD集群证书

mkdir -p /opt/cert/etcd
cd /opt/cert/etcd

# hosts 为k8s节点和VIP的IP

cat > etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
        "127.0.0.1",
        "10.202.99.34",
        "10.202.99.35",
        "10.202.99.36",
        "10.202.99.37",
        "10.202.99.100"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
          "C": "CN",
          "ST": "GuangZhou",
          "L": "GuangZhou"
        }
    ]
}
EOF

4.3、生成ETCD证书

cfssl gencert -ca=../ca/ca.pem -ca-key=../ca/ca-key.pem -config=../ca/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

4.4、颁发ETCD证书

cfssl gencert -ca=../ca/ca.pem -ca-key=../ca/ca-key.pem -config=../ca/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

4.5、部署ETCD集群

# 下载ETCD安装包
cd ~
wget https://mirrors.huaweicloud.com/etcd/v3.5.12/etcd-v3.5.12-linux-amd64.tar.gz

# 解压
tar -xf etcd-v3.5.12-linux-amd64.tar.gz

# 分发至其他节点
for i in master01 master02 master03
do
	scp ./etcd-v3.5.12-linux-amd64/etcd* root@$i:/usr/local/bin/
done

# 验证
etcd --version

4.6、注册ETCD服务

# 在三台master节点上执行 注意修改IP
mkdir -pv /etc/kubernetes/conf/etcd

ETCD_NAME=`hostname`
INTERNAL_IP=`hostname -i`
INITIAL_CLUSTER=master01=https://10.202.99.34:2380,master02=https://10.202.99.35:2380,master03=https://10.202.99.36:2380

cat << EOF | sudo tee /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos

[Service]
ExecStart=/usr/local/bin/etcd \\
  --name ${ETCD_NAME} \\
  --cert-file=/etc/etcd/ssl/etcd.pem \\
  --key-file=/etc/etcd/ssl/etcd-key.pem \\
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
  --advertise-client-urls https://${INTERNAL_IP}:2379 \\
  --initial-cluster-token etcd-cluster \\
  --initial-cluster ${INITIAL_CLUSTER} \\
  --initial-cluster-state new \\
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

# 启动ETCD服务
systemctl enable --now etcd && systemctl status etcd

4.7、测试ETCD服务

# 第一种方式
ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://10.202.99.34:2379,https://10.202.99.35:2379,https://10.202.99.36:2379" \
endpoint status --write-out='table'

# 第二种方式
ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://10.202.99.34:2379,https://10.202.99.35:2379,https://10.202.99.36:2379" \
member list --write-out='table'

5、部署Master节点

# 部署master节点上的各个组件

5.1、集群规划

10.202.99.34 master01
10.202.99.35 master02
10.202.99.36 master03

# kube-apiserver、控制器、调度器、calico、etcd、kubelet、kube-proxy、DNS

5.2、创建证书

5.2.1、创建集群CA证书
# 只需要在master01上执行
mkdir /opt/cert/k8s
cd /opt/cert/k8s

# 创建配置证书
cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "876000h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "876000h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

# 创建请求证书

cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "GuangZhou",
            "ST": "GuangZhou"
        }
    ]
}
EOF

# 创建证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
5.2.2、创建集群普通证书
5.2.2.1、创建kube-apiserver的证书
cd /opt/cert/k8s
# api-server:

# hosts为节点和VIP的IP

cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
        "127.0.0.1",
        "10.202.99.34",
        "10.202.99.35",
        "10.202.99.36",
        "10.202.99.37",
        "10.202.99.100",
        "10.96.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "GuangZhou",
            "ST": "GuangZhou"
        }
    ]
}
EOF


# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
5.2.2.2、创建controller-manager的证书
cat > kube-controller-manager-csr.json << EOF
 {
     "CN": "system:kube-controller-manager",
     "hosts": [
         "127.0.0.1",
         "10.202.99.34",
         "10.202.99.35",
         "10.202.99.36",
         "10.202.99.37",
         "10.202.99.100"
     ],
     "key": {
         "algo": "rsa",
         "size": 2048
     },
     "names": [
         {
             "C": "CN",
             "L": "GuangZhou",
             "ST": "GuangZhou",
             "O": "system:kube-controller-manager",
             "OU": "System"
         }
     ]
 }
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
5.2.2.3、创建kube-scheduler的证书
cat > kube-scheduler-csr.json << EOF
 {
     "CN": "system:kube-scheduler",
     "hosts": [
         "127.0.0.1",
         "10.202.99.34",
         "10.202.99.35",
         "10.202.99.36",
         "10.202.99.37",
         "10.202.99.100"
     ],
     "key": {
         "algo": "rsa",
         "size": 2048
     },
     "names": [
         {
             "C": "CN",
             "L": "GuangZhou",
             "ST": "GuangZhou",
             "O": "system:kube-scheduler",
             "OU": "System"
         }
     ]
 }
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
5.2.2.4、创建kube-proxy证书
cat > kube-proxy-csr.json << EOF
 {
     "CN":"system:kube-proxy",
     "hosts":[],
     "key":{
         "algo":"rsa",
         "size":2048
     },
     "names":[
         {
             "C":"CN",
             "L":"GuangZhou",
             "ST":"GuangZhou",
             "O":"system:kube-proxy",
             "OU":"System"
         }
     ]
 }
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
5.2.2.5、创建集群管理员证书
cat > admin-csr.json << EOF
 {
     "CN":"admin",
     "key":{
         "algo":"rsa",
         "size":2048
     },
     "names":[
         {
             "C":"CN",
             "L":"GuangZhou",
             "ST":"GuangZhou",
             "O":"system:masters",
             "OU":"System"
         }
     ]
 }
EOF

# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

5.3、分发证书

mkdir -pv /etc/kubernetes/ssl
cp -p ./{ca*pem,server*pem,kube-controller-manager*pem,kube-scheduler*.pem,kube-proxy*pem,admin*.pem} /etc/kubernetes/ssl

for i in master01 master02 master03;do
ssh root@$i "mkdir -pv /etc/kubernetes/ssl"
scp /etc/kubernetes/ssl/* root@$i:/etc/kubernetes/ssl
done

6、下载安装包和创建配置文件

6.1、下载安装包和分发组件

# 下载安装包
## 下载server安装包
wget https://dl.k8s.io/v1.30.2/kubernetes-server-linux-amd64.tar.gz

## 分发组件
tar -xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
for i in master01 master02 master03 ;do  scp kube-apiserver kube-controller-manager kube-proxy kubectl kubelet kube-scheduler root@$i:/usr/local/bin; done

6.2、创建集群配置文件

6.2.1、创建kube-controller-manager.kubeconfig
# 只需在master01上执行
mkdir /opt/conf
cd /opt/conf

## 创建kube-controller-manager.kubeconfig
# KUBE_APISERVER 为 VIP
export KUBE_APISERVER="https://10.202.99.100:8443"
 
# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-controller-manager.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials "kube-controller-manager" \
  --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig
    
# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-controller-manager" \
  --kubeconfig=kube-controller-manager.kubeconfig

# 配置默认上下文
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
6.2.2、创建kube-scheduler.kubeconfig
# 创建kube-scheduler.kubeconfig
# KUBE_APISERVER 为 VIP
export KUBE_APISERVER="https://10.202.99.100:8443"
    
# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-scheduler.kubeconfig
    
# 设置客户端认证参数
kubectl config set-credentials "kube-scheduler" \
  --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \
  --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig
    
# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-scheduler" \
  --kubeconfig=kube-scheduler.kubeconfig
    
# 配置默认上下文
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
6.2.3、创建kube-proxy.kubeconfig集群配置文件
## 创建kube-proxy.kubeconfig集群配置文件
# KUBE_APISERVER 为 VIP
export KUBE_APISERVER="https://10.202.99.100:8443"
    
# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig
    
# 设置客户端认证参数
kubectl config set-credentials "kube-proxy" \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
    
# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-proxy" \
  --kubeconfig=kube-proxy.kubeconfig
    
# 配置默认上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
6.2.4、创建超级管理员的集群配置文件
# KUBE_APISERVER 为 VIP
export KUBE_APISERVER="https://10.202.99.100:8443"
    
# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=admin.kubeconfig
    
# 设置客户端认证参数
kubectl config set-credentials "admin" \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --client-key=/etc/kubernetes/ssl/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=admin.kubeconfig
    
# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="admin" \
  --kubeconfig=admin.kubeconfig
    
# 配置默认上下文
kubectl config use-context default --kubeconfig=admin.kubeconfig

6.3、分发集群配置文件

# /opt/conf/

for i in master01 master02 master03; do
ssh root@$i  "mkdir -pv /etc/kubernetes/cfg"
scp ./*.kubeconfig root@$i:/etc/kubernetes/cfg
done

6.4、创建集群token

# /opt/conf/    
# 只需要创建一次
# 必须要用自己机器创建的Token
TLS_BOOTSTRAPPING_TOKEN=`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`
    
cat > token.csv << EOF
${TLS_BOOTSTRAPPING_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF



3b91c0489acd7bbb2214df2013f7af70,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

# 分发集群token,用于集群TLS认证

for i in master01 master02 master03;do
scp token.csv root@$i:/etc/kubernetes/cfg/
done

7、部署组件

7.1、安装kube-apiserver

7.1.1、创建kube-apiserver的配置文件
#### 部署各个组件
# 在所有的master节点上执行


##### 创建kube-apiserver的配置文件
# 在所有的master节点上执行
KUBE_APISERVER_IP=`hostname -i`

cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--v=2 \\
--advertise-address=${KUBE_APISERVER_IP} \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.96.0.0/16 \\
--service-node-port-range=30000-52767 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/etc/kubernetes/cfg/token.csv \\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-issuer=api \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\
--etcd-servers=https://10.202.99.34:2379,https://10.202.99.35:2379,https://10.202.99.36:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
EOF
7.1.2、注册kube-apiserver的服务
##### 注册kube-apiserver的服务
# 在所有的master节点上执行

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
StandardOutput=file:/var/log/kubernetes/kube-apiserver.log
StandardError=file:/var/log/kubernetes/kube-apiserver.err
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF



systemctl daemon-reload && systemctl restart kube-apiserver
systemctl status kube-apiserver && systemctl enable kube-apiserver
7.1.3、对kube-apiserver做高可用
##### 对kube-apiserver做高可用
- 安装高可用软件
  # 三台master节点都需要安装
  # keeplived + haproxy
yum install -y keepalived haproxy


# - 修改keepalived配置文件
# - 修改haproxy配置文件
7.1.3.1、配置keepalived

# 根据节点的不同,修改的配置也不同

mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
cd /etc/keepalived
KUBE_APISERVER_IP=`hostname -i`
  
cat > /etc/keepalived/keepalived.conf <<EOF
  ! Configuration File for keepalived
  global_defs {
      router_id LVS_DEVEL
  }
  vrrp_script chk_kubernetes {
      script "/etc/keepalived/check_kubernetes.sh"
      interval 2
      weight -5
      fall 3
      rise 2
  }
  vrrp_instance VI_1 {
      state MASTER
      interface eth0
      mcast_src_ip ${KUBE_APISERVER_IP}
      virtual_router_id 57
      priority 100
      advert_int 2
      authentication {
          auth_type PASS
          auth_pass K8SHA_KA_AUTH
      }
      virtual_ipaddress {
          10.202.99.100
      }
  }
EOF
  
systemctl enable --now keepalived && systemctl status keepalived
7.1.3.2、配置haproxy
# 高可用软件
cat > /etc/haproxy/haproxy.cfg <<EOF
  global
    maxconn  2000
    ulimit-n  16384
    log  127.0.0.1 local0 err
    stats timeout 30s
  
  defaults
    log global
    mode  http
    option  httplog
    timeout connect 5000
    timeout client  50000
    timeout server  50000
    timeout http-request 15s
    timeout http-keep-alive 15s
  
  frontend monitor-in
    bind *:33305
    mode http
    option httplog
    monitor-uri /monitor
  
  listen stats
    bind    *:8006
    mode    http
    stats   enable
    stats   hide-version
    stats   uri       /stats
    stats   refresh   30s
    stats   realm     Haproxy\ Statistics
    stats   auth      admin:admin
  
  frontend k8s-master
    bind 0.0.0.0:8443
    bind 127.0.0.1:8443
    mode tcp
    option tcplog
    tcp-request inspect-delay 5s
    default_backend k8s-master
  
  backend k8s-master
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
    server kubernetes-master-01    10.202.99.34:6443  check inter 2000 fall 2 rise 2 weight 100
    server kubernetes-master-02    10.202.99.35:6443  check inter 2000 fall 2 rise 2 weight 100
    server kubernetes-master-03    10.202.99.36:6443  check inter 2000 fall 2 rise 2 weight 100
EOF
   
systemctl enable --now haproxy.service && systemctl status haproxy

7.2、部署TLS

# apiserver 动态签署颁发到master节点,实现证书签署自动化
7.2.1、创建集群配置文件
# 只需要在一台节点上执行
export KUBE_APISERVER="https://10.202.99.100:8443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubelet-bootstrap.kubeconfig

#### !!!!!!!!!!!!!!!!!!!!!     
# 设置客户端认证参数,此处token必须用上叙token.csv中的token
# 使用自己的token.csv里面的token
kubectl config set-credentials "kubelet-bootstrap" \
  --token=fe425383cb799d3ff15a2b779c3e306c \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 配置默认上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
7.2.2、分发证书
# 颁发集群配置文件
for i in master01 master02 master03; do
scp kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg/
done
7.2.3、 创建TLS低权限用户
##### 创建TLS低权限用户
# 声明一下admin配置文件所在位置:
export KUBECONFIG=/etc/kubernetes/cfg/admin.kubeconfig
# 创建一个低权限用户
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

7.3、部署contorller-manager

7.3.1、编辑配置文件
##### 编辑配置文件
# 需要在三台master节点上执行
cat > /etc/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--v=2 \\
--leader-elect=true \\
--cluster-name=kubernetes \\
--bind-address=127.0.0.1 \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.96.0.0/16 \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--kubeconfig=/etc/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \\
--controllers=*,bootstrapsigner,tokencleaner \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=10s"
EOF
7.3.2、注册服务
##### 注册服务
# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

# 启动服务
systemctl daemon-reload && systemctl enable --now kube-controller-manager.service && systemctl status kube-controller-manager

7.4、部署kube-scheduler

7.4.1、编写配置文件
# 三台机器上都需要执行cat > /etc/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--v=2 \
--kubeconfig=/etc/kubernetes/cfg/kube-scheduler.kubeconfig \
--leader-elect=true \
--bind-address=0.0.0.0"
EOF
7.4.2、注册服务
# 三台节点上都需要执行
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF


## 启动服务
systemctl daemon-reload 
systemctl enable --now kube-scheduler.service 
systemctl status kube-scheduler
systemctl restart kube-scheduler

7.5、部署kubelet服务

7.5.1、创建kubelet服务配置文件
# 需要在三台master节点上执行 注意pause的镜像更换成自己的
KUBE_HOSTNAME=`hostname`

cat > /etc/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--v=2 \\
--hostname-override=${KUBE_HOSTNAME} \\
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig \\
--config=/etc/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/etc/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-guangzhou.aliyuncs.com/my_aliyund/pause:3.9 \\
--container-runtime-endpoint=unix:///run/containerd/containerd.sock"
EOF
7.5.2、创建kubelet-config.yaml
# 需要在三台master节点上执行
KUBE_HOSTNAME=`hostname -i`

cat > /etc/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${KUBE_HOSTNAME}
port: 10250
readOnlyPort: 10255
cgroupDriver: systemd
clusterDNS:
- 10.96.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/ssl/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
7.5.3、注册kubelet的服务
# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet.conf
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF



## 启动服务
systemctl daemon-reload 
systemctl enable --now kubelet.service 
systemctl status kubelet

7.6、部署kube-proxy

7.6.1、创建配置文件
# 需要在三台master节点上执行
KUBE_HOSTNAME=`hostname -i`

cat > /etc/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--v=2 \\
--cluster-cidr=10.244.0.0/16 \\
--config=/etc/kubernetes/cfg/kube-proxy-config.yml"
EOF
7.6.2、创建kube-proxy-config.yml
# 需要在三台master节点上执行
KUBE_HOSTNAME=`hostname -i`
HOSTNAME=`hostname`

cat > /etc/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: ${KUBE_HOSTNAME}
healthzBindAddress: ${KUBE_HOSTNAME}:10256
metricsBindAddress: ${KUBE_HOSTNAME}:10249
clientConnection:
  burst: 200
  kubeconfig: /etc/kubernetes/cfg/kube-proxy.kubeconfig
  qps: 100
hostnameOverride: ${HOSTNAME}
clusterCIDR: 10.244.0.0/16
enableProfiling: true
mode: "ipvs"
kubeProxyIPTablesConfiguration:
  masqueradeAll: false
kubeProxyIPVSConfiguration:
  scheduler: rr
  excludeCIDRs: []
EOF
7.6.3、注册服务
# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy.conf
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

### 启动服务
systemctl daemon-reload 
systemctl enable --now kube-proxy.service 
systemctl status kube-proxy
systemctl restart kube-proxy

8、加入集群节点

8.1、查看集群节点加入请求

# 只需要在一台节点上执行即可
kubectl get csr

####
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           REQUESTEDDURATION   CONDITION
node-csr--XQzkuoVY9gDu0_Vtn9ctH4J2-BNldjxawrR5Z2pqFo   2m56s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   <none>              Pending
node-csr-1NtmSLAqnIAD3qdRfGVV1rAiDjhFwx3u4r-sSStvTgo   2m56s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   <none>              Pending
node-csr-76uScdIZ0Nrgu_D4e6TQBcFZ8an6ZCGoaxCPrxtw5xM   2m56s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   <none>              Pending

8.2、批准加入

# 只需要在一台节点上执行即可
kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`

# 查看节点
kubectl get no

9、安装网络插件

# 安装calico插件

9.1、安装 calico

# 下载calico.yaml
curl -L https://projectcalico.docs.tigera.io/manifests/calico.yaml -O

# 查看calico所需的镜像
cat calico.yaml | grep image

# 修改calico.yaml 中的image镜像源
sed -i "#docker.io/calico/cni:v3.28.1#registry.cn-guangzhou.aliyuncs.com/my_aliyund/calico-kube-controllers:v3.28.1#g" calico.yaml

sed -i "#docker.io/calico/cni:v3.28.1#registry.cn-guangzhou.aliyuncs.com/my_aliyund/calico-cni:v3.28.1#g" calico.yaml

sed -i "#docker.io/calico/node:v3.28.1#registry.cn-guangzhou.aliyuncs.com/my_aliyund/calico-node:v3.28.1#g" calico.yaml

# 检查是否修改成功
cat calico.yaml | grep image

##修改calico.yaml 中 CIDR为自己的配置网段
# CALICO_IPV4POOL_CIDR

# 创建calico
kubectl apply -f calico.yaml

查看calico的pod状态
kubectl get pods -n kube-system

# crictl命令执行如果出现下面的告警信息
# WARN[0000] image connect using default endpoints: [unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead.

# 解决
export CRICTL_ENDPOINT=unix:///run/containerd/containerd.sock

cat << EOF> /etc/crictl.yaml 
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
EOF


####### 没有circtl命令
## 下载安装包(下载慢可以在外面下载,再传上虚拟机)
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.30.1/crictl-v1.30.1-linux-amd64.tar.gz

# 解压
tar -xf crictl-v1.30.1-linux-amd64.tar.gz

# 分发到其他节点
for i in master01 master02 master03; do 
    scp crictl root@$i:/usr/local/bin/;  
done

# 验证
crictl version
kubectl命令错误提示
# 错误提示:
The connection to the server localhost:8080 was refused - did you specify the right host or port?
# 解决:
#临时解决:
#声明一下admin配置文件所在位置:
export KUBECONFIG=/etc/kubernetes/cfg/admin.kubeconfig
#永久解决:在profile底部增加
cat << EOF >> /etc/profile
#### kubernetes
export KUBECONFIG=/etc/kubernetes/cfg/admin.kubeconfig
#### kubernetes
EOF

source /etc/profile

9.2、安装集群DNS

# 只需要在一台节点上执行即可
# 下载DNS安装配置文件包
# wget https://github.com/coredns/deployment/archive/refs/heads/master.zip

unzip deployment-master.zip
cd deployment-master/kubernetes/

# 修改coredns.yaml.sed文件中的 image 为可pull的镜像源
# 执行部署命令
./deploy.sh -i 10.96.0.2 -s | kubectl apply -f -

# 验证集群DNS
kubectl get pods -A

10、安装node节点

# node需要部署哪些组件? kubelet、kube-proxy、calico

10.1、分发软件包、证书、配置文件

# master01
## 分发软件包(解压k8s软件包的位置)

for i in node01;do scp kubernetes/server/bin/kubelet kubernetes/server/bin/kube-proxy root@$i:/usr/local/bin; done
 


## 分发证书
# /data/
for i in node01; do ssh root@$i "mkdir -pv /etc/kubernetes/ssl"; scp -pr /etc/kubernetes/ssl/{ca*.pem,admin*pem,kube-proxy*pem} root@$i:/etc/kubernetes/ssl; done



## 分发配置文件
# 分发ETCD证书
cd /etc/etcd/ssl
for i in node01;do ssh root@$i "mkdir -pv /etc/etcd/ssl"; scp ./*  root@$i:/etc/etcd/ssl; done

10.2、部署kubelet

# 分发kubelet配置文件在master01

for i in node01;do 
    ssh root@$i "mkdir -pv  /etc/kubernetes/cfg";
    scp /etc/kubernetes/cfg/kubelet.conf root@$i:/etc/kubernetes/cfg/kubelet.conf; 
    scp /etc/kubernetes/cfg/kubelet-config.yml root@$i:/etc/kubernetes/cfg/kubelet-config.yml; 
    scp /usr/lib/systemd/system/kubelet.service root@$i:/usr/lib/systemd/system/kubelet.service; 
    scp /etc/kubernetes/cfg/kubelet.kubeconfig root@$i:/etc/kubernetes/cfg/kubelet.kubeconfig; 
    scp /etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig; 
    scp /etc/kubernetes/cfg/token.csv root@$i:/etc/kubernetes/cfg/token.csv;
done

## 修改node节点的配置文件kubelet-config.yml和kubelet.conf的IP和主机名
# 启动kubelet
systemctl enable --now kubelet.service && systemctl status kubelet

10.3、部署kube-proxy

# 分发配置文件在master01

for i in node01; do 
    scp /etc/kubernetes/cfg/kube-proxy.conf root@$i:/etc/kubernetes/cfg/kube-proxy.conf;  
    scp /etc/kubernetes/cfg/kube-proxy-config.yml root@$i:/etc/kubernetes/cfg/kube-proxy-config.yml ;  
    scp /usr/lib/systemd/system/kube-proxy.service root@$i:/usr/lib/systemd/system/kube-proxy.service;  
    scp /etc/kubernetes/cfg/kube-proxy.kubeconfig root@$i:/etc/kubernetes/cfg/kube-proxy.kubeconfig;    
done
# !!!!!!!
# 修改kube-proxy-config.yml中IP和主机名

# 启动
systemctl enable --now kube-proxy.service && systemctl status kube-proxy

10.4、加入节点

# 查看集群状态
kubectl get cs

# 查看加入集群请求
kubectl get csr

# 批准加入
kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`

# 查看加入状态
kubectl get csr

# 查看加入节点
kubectl get nodes

# 设置集群角色
kubectl label nodes master01 node-role.kubernetes.io/master=master01
kubectl label nodes node01 node-role.kubernetes.io/node=node01
  • 32
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值