二进制安装部署高可用集群K8S部署v1.20.6版本

安装所需要的 文件 都在这里,请直接 点击下载

一. 初始化

1.0 安装基础软件包

yum install -y epel-release yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel  python-devel epel-release openssh-server socat  ipvsadm conntrack ntpdate telnet
   

1.1 配置固定IP

#vim  /etc/sysconfig/network-scripts/ifcfg-ens33文件,变成如下:
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static  #需要修改
IPADDR=192.168.186.128 #需要添加
NETMASK=255.255.255.0  #需要添加
GATEWAY=192.168.186.2  #需要添加
DNS1=192.168.186.2  #需要添加
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
DEVICE=ens33
ONBOOT=yes

#修改配置文件之后需要重启网络服务才能使配置生效,重启网络服务命令如下:
systemctl restart network

1.2 配置主机名

hostnamectl set-hostname master1  && bash  #master1上执行
hostnamectl set-hostname master2  && bash   #master2上执行
hostnamectl set-hostname master3  && bash  #master3上执行
hostnamectl set-hostname node1   && bash   #node1上执行

1.3 配置hosts文件

cat >> /etc/hosts << EOF
192.168.186.128   master1
192.168.186.129   master2
192.168.186.130   master3
192.168.186.131   node1
EOF

1.4 配置主机之间无密码登录,每台机器都按照如下操作

ssh-keygen -t rsa    #一路回车,不输入密码
###把本地的ssh公钥文件安装到远程主机对应的账户
ssh-copy-id -i .ssh/id_rsa.pub master1
ssh-copy-id -i .ssh/id_rsa.pub master2
ssh-copy-id -i .ssh/id_rsa.pub master3
ssh-copy-id -i .ssh/id_rsa.pub node1

1.5 关闭防火墙和selinux

#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

#关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0 # 临时

#关闭swap
swapoff -a # 临时
sed -i 's/.*swap.*/#&/' /etc/fstab # 永久

1.6 修改内核参数

#修改内核参数
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

cat >>  /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

modprobe br_netfilter #加载br_netfilter模块
lsmod |grep br_netfilter #验证模块是否加载成功
sysctl -p /etc/sysctl.d/k8s.conf  #使刚才修改的内核参数生效

1.7 配置阿里云repo源

yum -y install yum-utils  lrzsz  wget  epel-release openssh-clients rsync
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

1.8 配置时间同步

yum install ntpdate -y
ntpdate cn.pool.ntp.org #跟网络源做同步
crontab -e  ##把时间同步做成计划任务
* */1 * * * /usr/sbin/ntpdate   cn.pool.ntp.org
 systemctl restart  crond #重启crond服务

1.9 开启ipvs

cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in ${ipvs_modules}; do
 /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
 if [ 0 -eq 0 ]; then
 /sbin/modprobe ${kernel_module}
 fi
done
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

1.10 安装docker-ce

yum install docker-ce docker-ce-cli containerd.io -y 
systemctl start docker && systemctl enable docker.service && systemctl status docker

cat > /etc/docker/daemon.json << EOF
{
 "registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
} 
EOF

systemctl daemon-reload
systemctl restart docker
systemctl enable docker
systemctl status docker

二. 搭建etcd集群

2.1 配置etcd工作目录

#创建配置文件和证书文件存放目录

#3台master上操作
mkdir -p /etc/etcd && mkdir -p /etc/etcd/ssl

2.2 安装签发证书工具cfssl

#3台master节点都操作
mkdir /data/work -p && cd /data/work/
##这是二进制的文件
[root@master1 work]# pwd
/data/work
[root@master1 work]# ll
总用量 352648
-rw-r--r--. 1 root root   6595195 513 20:37 cfssl-certinfo_linux-amd64
-rw-r--r--. 1 root root   2277873 513 20:37 cfssljson_linux-amd64
-rw-r--r--. 1 root root  10376657 513 20:37 cfssl_linux-amd64
-rw-r--r--. 1 root root 341851050 715 12:54 kubernetes-server-linux-amd64.tar.gz
[root@master1 work]# 将这4个文件上传到3台master主机,路径是/data/work
###cfssl-certinfo_linux-amd64  cfssljson_linux-amd64  cfssl_linux-amd64 kubernetes-server-linux-amd64.tar.gz
chmod +x *
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

2.3 配置ca证书(只在master1上操作)

cd /data/work/
cat > ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
      "algo": "rsa",
      "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "k8s",
      "OU": "system"
    }
  ],
  "ca": {
          "expiry": "87600h"
  }
}
EOF

cfssl gencert -initca ca-csr.json  | cfssljson -bare ca

注:
CN:Common Name(公用名称),kube-apiserver 从证书中提取该字段作为请求的用户名 (User Name);浏览器使用该字段验证网站是否合法;对于 SSL 证书,一般为网站域名;而对于代码签名证书则为申请单位名称;而对于客户端证书则为证书申请者的姓名。

O:Organization(单位名称),kube-apiserver 从证书中提取该字段作为请求用户所属的组 (Group);对于 SSL 证书,一般为网站域名;而对于代码签名证书则为申请单位名称;而对于客户端单位证书则为证书申请者所在单位名称。

L 字段:所在城市
S 字段:所在省份
C 字段:只能是国家字母缩写,如中国:CN

2.3.1 生成ca证书文件(只在master1上操作)

cd /data/work/
cat > ca-config.json << EOF
{
  "signing": {
      "default": {
          "expiry": "87600h"
        },
      "profiles": {
          "kubernetes": {
              "usages": [
                  "signing",
                  "key encipherment",
                  "server auth",
                  "client auth"
              ],
              "expiry": "87600h"
          }
      }
  }
}
EOF

2.4 生成etcd证书(只在master1上操作)

#配置etcd证书请求,hosts的ip变成自己etcd所在节点的ip

cd /data/work/
cat > etcd-csr.json << EOF
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.186.128",
    "192.168.186.129",
    "192.168.186.130",
    "192.168.186.199"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [{
    "C": "CN",
    "ST": "Hubei",
    "L": "Wuhan",
    "O": "k8s",
    "OU": "system"
  }]
} 
EOF
 
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd

[root@master1 work]# ls etcd*.pem #看到新生成了2个
etcd-key.pem  etcd.pem

上述文件hosts字段中IP为所有etcd节点的集群内部通信IP,可以预留几个,做扩容用。

2.5 部署etcd集群

etcd的下载地址:各个版本

把etcd-v3.4.13-linux-amd64.tar.gz上传到/data/work目录下

#master1上操作
cd /data/work/
tar -xf etcd-v3.4.13-linux-amd64.tar.gz
cp -p etcd-v3.4.13-linux-amd64/etcd* /usr/local/bin/
#master1上操作
scp -r  etcd-v3.4.13-linux-amd64/etcd* master2:/usr/local/bin/
scp -r  etcd-v3.4.13-linux-amd64/etcd* master3:/usr/local/bin/

2.5.1 创建配置文件

#master1上操作
cat > etcd.conf << EOF 
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.186.128:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.186.128:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.186.128:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.186.128:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.186.128:2380,etcd2=https://192.168.186.129:2380,etcd3=https://192.168.186.130:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#注:
ETCD_NAME:节点名称,集群中唯一
ETCD_DATA_DIR:数据目录
ETCD_LISTEN_PEER_URLS:集群通信监听地址
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
ETCD_INITIAL_CLUSTER:集群节点地址
ETCD_INITIAL_CLUSTER_TOKEN:集群Token
ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

2.5.2 创建启动服务文件

#master1上操作
cat > etcd.service  << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-client-cert-auth \
  --client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

#master1上操作
cp ca*.pem /etc/etcd/ssl/
cp etcd*.pem /etc/etcd/ssl/
cp etcd.conf /etc/etcd/
cp etcd.service /usr/lib/systemd/system/
#master1上操作
for i in master2 master3;do rsync -vaz etcd.conf $i:/etc/etcd/;done
for i in master2 master3;do rsync -vaz etcd*.pem ca*.pem $i:/etc/etcd/ssl/;done
for i in master2 master3;do rsync -vaz etcd.service $i:/usr/lib/systemd/system/;done

2.5.3 创建etcd.conf文件

#master1启动etcd
cd /data/work/ && mkdir -p /var/lib/etcd/default.etcd
#master2启动etcd
cd /data/work/ && mkdir -p /var/lib/etcd/default.etcd

cat >  /etc/etcd/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.186.129:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.186.129:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.186.129:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.186.129:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.186.128:2380,etcd2=https://192.168.186.129:2380,etcd3=https://192.168.186.130:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#master3启动etcd
cd /data/work/ && mkdir -p /var/lib/etcd/default.etcd

cat >  /etc/etcd/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.186.130:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.186.130:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.186.130:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.186.130:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.186.128:2380,etcd2=https://192.168.186.129:2380,etcd3=https://192.168.186.130:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

2.5.4 启动3台master的etcd数据库

启动etcd的时候,先启动master3的etcd服务,会一直卡住在启动的状态,然后接着再启动master1和master2的etcd,这样master3这个节点etcd才会正常起来.

systemctl daemon-reload
systemctl enable etcd.service
systemctl start etcd.service
systemctl status etcd

2.5.5 查看etcd集群

ETCDCTL_API=3  /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.186.128:2379,https://192.168.186.129:2379,https://192.168.186.130:2379  endpoint health

三. 安装kubernetes组件

3.1 下载安装包

二进制包所在的github地址如下:
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/ #这里可以下载你需要的版本,选择kubernetes-server-linux-amd64.tar.gz这中二进制的文件就行
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.23.md
https://dl.k8s.io/v1.23.6/kubernetes-server-linux-amd64.tar.gz #v1.23.6下载地址

#把kubernetes-server-linux-amd64.tar.gz上传到master1上的/data/work目录下

#master1上操作
cd /data/work
tar -zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/

#master1上操作
cd /data/work/kubernetes/server/bin/
rsync -vaz kube-apiserver kube-controller-manager kube-scheduler kubectl master2:/usr/local/bin/
rsync -vaz kube-apiserver kube-controller-manager kube-scheduler kubectl master3:/usr/local/bin/
scp kubelet kube-proxy  node1:/usr/local/bin/

cd /data/work/
mkdir -p /etc/kubernetes/ 
mkdir -p /etc/kubernetes/ssl
mkdir /var/log/kubernetes

3.2 部署apiserver组件

3.2.1 创建token.csv文件

#master1上操作
cd /data/work/
cat  > token.csv  <<  EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

#格式:token,用户名,UID,用户组

3.2.2 创建csr请求文件,替换为自己机器的IP

#master1上操作
cd /data/work/
cat >  kube-apiserver-csr.json << EOF
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.186.128",
    "192.168.186.129",
    "192.168.186.130",
    "192.168.186.131",
    "192.168.186.199",
    "10.255.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "k8s",
      "OU": "system"
    }
  ]
}
EOF

#注: 如果 hosts 字段不为空则需要指定授权使用该证书的 IP 或域名列表。 由于该证书后续被 kubernetes master 集群使用,需要将master节点的IP都填上,同时还需要填写 service 网络的首个IP。(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.255.0.1)

3.2.3 生成证书

#master1上操作
cd /data/work/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver

#创建api-server的配置文件,替换成自己的ip

cat >  kube-apiserver.conf  << EOF
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --bind-address=192.168.186.128 \
  --secure-port=6443 \
  --advertise-address=192.168.186.128 \
  --insecure-port=0 \
  --authorization-mode=Node,RBAC \
  --runtime-config=api/all=true \
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.255.0.0/16 \
  --token-auth-file=/etc/kubernetes/token.csv \
  --service-node-port-range=30000-50000 \
  --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
  --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
  --service-account-issuer=https://kubernetes.default.svc.cluster.local \
  --etcd-cafile=/etc/etcd/ssl/ca.pem \
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.186.128:2379,https://192.168.186.129:2379,https://192.168.186.130:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=4"
EOF
  

#注:
–logtostderr:启用日志
–v:日志等级
–log-dir:日志目录
–etcd-servers:etcd集群地址
–bind-address:监听地址
–secure-port:https安全端口
–advertise-address:集群通告地址
–allow-privileged:启用授权
–service-cluster-ip-range:Service虚拟IP地址段
–enable-admission-plugins:准入控制模块
–authorization-mode:认证授权,启用RBAC授权和节点自管理
–enable-bootstrap-token-auth:启用TLS bootstrap机制
–token-auth-file:bootstrap token文件
–service-node-port-range:Service nodeport类型默认分配端口范围
–kubelet-client-xxx:apiserver访问kubelet客户端证书
–tls-xxx-file:apiserver https证书
–etcd-xxxfile:连接Etcd集群证书 –
-audit-log-xxx:审计日志

3.2.4 创建服务启动文件

#master1上操作
cd /data/work/
cat > kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service
 
[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

#master1上操作
cp ca*.pem /etc/kubernetes/ssl
cp kube-apiserver*.pem /etc/kubernetes/ssl/
cp token.csv /etc/kubernetes/
cp kube-apiserver.conf /etc/kubernetes/
cp kube-apiserver.service /usr/lib/systemd/system/

#master1上操作
rsync -vaz token.csv master2:/etc/kubernetes/
rsync -vaz kube-apiserver*.pem master2:/etc/kubernetes/ssl/
rsync -vaz ca*.pem master2:/etc/kubernetes/ssl/
rsync -vaz kube-apiserver.conf master2:/etc/kubernetes/
rsync -vaz kube-apiserver.service master2:/usr/lib/systemd/system/

#master1上操作
rsync -vaz token.csv master3:/etc/kubernetes/
rsync -vaz kube-apiserver*.pem master3:/etc/kubernetes/ssl/
rsync -vaz kube-apiserver.conf master3:/etc/kubernetes/
rsync -vaz ca*.pem master3:/etc/kubernetes/ssl/
rsync -vaz kube-apiserver.service master3:/usr/lib/systemd/system/

注:master2和master3配置文件kube-apiserver.conf的IP地址修改为实际的本机IP

#master2上操作
cat > /etc/kubernetes/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --bind-address=192.168.186.129 \
  --secure-port=6443 \
  --advertise-address=192.168.186.129 \
  --insecure-port=0 \
  --authorization-mode=Node,RBAC \
  --runtime-config=api/all=true \
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.255.0.0/16 \
  --token-auth-file=/etc/kubernetes/token.csv \
  --service-node-port-range=30000-50000 \
  --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
  --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
  --service-account-issuer=https://kubernetes.default.svc.cluster.local \
  --etcd-cafile=/etc/etcd/ssl/ca.pem \
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.186.128:2379,https://192.168.186.129:2379,https://192.168.186.130:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=4"
EOF
  
##master3上操作
cat > /etc/kubernetes/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --bind-address=192.168.186.130 \
  --secure-port=6443 \
  --advertise-address=192.168.186.130 \
  --insecure-port=0 \
  --authorization-mode=Node,RBAC \
  --runtime-config=api/all=true \
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.255.0.0/16 \
  --token-auth-file=/etc/kubernetes/token.csv \
  --service-node-port-range=30000-50000 \
  --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
  --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \
  --service-account-issuer=https://kubernetes.default.svc.cluster.local \
  --etcd-cafile=/etc/etcd/ssl/ca.pem \
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.186.128:2379,https://192.168.186.129:2379,https://192.168.186.130:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=4"
EOF

#####3台master上都需要执行
chmod +x  /usr/lib/systemd/system/kube-apiserver.service
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver  #如果起不来是/usr/lib/systemd/system/kube-apiserver.service中缺少了$KUBE_APISERVER_OPTS这个
systemctl status kube-apiserver
systemctl status kube-apiserver
curl --insecure https://192.168.186.128:6443/  #看到401,这个是正常的的状态,还没认证

3.3 部署kubectl组件

3.3.1 创建csr请求文件

#master1上操作
cat > admin-csr.json << EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "system:masters",             
      "OU": "system"
    }
  ]
}
EOF

3.3.2 生成证书

#master1上操作
cd /data/work/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

cp admin*.pem /etc/kubernetes/ssl/

3.3.3 配置安全上下文

#创建kubeconfig配置文件,比较重要
kubeconfig 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书(这里如果报错找不到kubeconfig路径,请手动复制到相应路径下,没有则忽略)

3.3.3.1 设置集群参数

#master1上操作
cd /data/work/
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.186.128:6443 --kubeconfig=kube.config

#设置客户端认证参数
kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config

#设置上下文参数
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config


#设置当前上下文
kubectl config use-context kubernetes --kubeconfig=kube.config
mkdir ~/.kube -p && cp kube.config ~/.kube/config

#授权kubernetes证书访问kubelet api权限
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

3.3.4 查看集群组件状态

kubectl cluster-info
kubectl get componentstatuses
kubectl get all --all-namespaces

[root@master1 work]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.186.128:6443

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@master1 work]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
etcd-1               Healthy     {"health":"true"}                                                                             
etcd-0               Healthy     {"health":"true"}                                                                             
etcd-2               Healthy     {"health":"true"}                                                                             
[root@master1 work]# kubectl get all --all-namespaces
NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   10.255.0.1   <none>        443/TCP   8m57s
[root@master1 work]# 

3.3.5 同步kubectl文件到其他节点

##master2和master3上操作
mkdir /root/.kube/
#master1上操作
rsync -vaz /root/.kube/config master2:/root/.kube/
rsync -vaz /root/.kube/config master3:/root/.kube/

3.3.6 配置kubectl子命令补全

#3台master上都安装
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
source '/root/.kube/completion.bash.inc'
source $HOME/.bash_profile

3.4 部署kube-controller-manager组件

3.4.1 创建csr请求文件

#master1上执行
cd /data/work/
cat >  kube-controller-manager-csr.json  << EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "192.168.186.128",
      "192.168.186.129",
      "192.168.186.130",
      "192.168.186.199"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "Hubei",
        "L": "Wuhan",
        "O": "system:kube-controller-manager",
        "OU": "system"
      }
    ]
}
EOF

注: hosts 列表包含所有 kube-controller-manager 节点 IP; CN 为 system:kube-controller-manager、O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限

3.4.2 生成证书

#master1上执行
#生成证书
cd /data/work/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

#创建kube-controller-manager的kubeconfig

3.4.2.1 设置集群参数

#master1上执行
#设置集群参数
cd /data/work/
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.186.128:6443 --kubeconfig=kube-controller-manager.kubeconfig

#设置客户端认证参数
kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig

#设置上下文参数
kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

#设置当前上下文
kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

3.4.2.2 创建配置文件kube-controller-manager.conf

#master1上执行
cd /data/work/
cat > kube-controller-manager.conf  << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--port=0 \
  --secure-port=10252 \
  --bind-address=127.0.0.1 \
  --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
  --service-cluster-ip-range=10.255.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --allocate-node-cidrs=true \
  --cluster-cidr=10.0.0.0/16 \
  --experimental-cluster-signing-duration=87600h \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --leader-elect=true \
  --feature-gates=RotateKubeletServerCertificate=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --horizontal-pod-autoscaler-use-rest-clients=true \
  --horizontal-pod-autoscaler-sync-period=10s \
  --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --use-service-account-credentials=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2"
EOF

3.4.2.3 创建启动文件

#master1上执行
cd /data/work/
cat >  kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF

3.4.2.4 启动服务

#master1上执行
cd /data/work/
cp kube-controller-manager*.pem /etc/kubernetes/ssl/
cp kube-controller-manager.kubeconfig /etc/kubernetes/
cp kube-controller-manager.conf /etc/kubernetes/
cp kube-controller-manager.service /usr/lib/systemd/system/
#master1上执行
rsync -vaz kube-controller-manager*.pem master2:/etc/kubernetes/ssl/
rsync -vaz kube-controller-manager.kubeconfig kube-controller-manager.conf master2:/etc/kubernetes/
rsync -vaz kube-controller-manager.service master2:/usr/lib/systemd/system/
#master1上执行
rsync -vaz kube-controller-manager*.pem master3:/etc/kubernetes/ssl/
rsync -vaz kube-controller-manager.kubeconfig kube-controller-manager.conf master3:/etc/kubernetes/
rsync -vaz kube-controller-manager.service master3:/usr/lib/systemd/system/

3.4.2.5 三台master上都启动 kube-controller-manager

#master1,master2,master3上都启动kube-controller-manager服务
systemctl daemon-reload 
systemctl enable kube-controller-manager
systemctl start kube-controller-manager #如果起不来是/usr/lib/systemd/system/kube-controller-manager.service中缺少了$KUBE_CONTROLLER_MANAGER_OPTS这句
systemctl status kube-controller-manager

3.5 部署kube-scheduler组件

3.5.1 创建csr请求

#master1上执行
cd /data/work/
cat > kube-scheduler-csr.json << EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "127.0.0.1",
      "192.168.186.128",
      "192.168.186.129",
      "192.168.186.130",
      "192.168.186.131",
      "192.168.186.199"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "Hubei",
        "L": "Wuhan",
        "O": "system:kube-scheduler",
        "OU": "system"
      }
    ]
}
EOF

注: hosts 列表包含所有 kube-scheduler 节点 IP; CN 为 system:kube-scheduler、O 为 system:kube-scheduler,kubernetes 内置的 ClusterRoleBindings system:kube-scheduler 将赋予 kube-scheduler 工作所需的权限。

3.5.2 生成证书

#master1上执行
cd /data/work/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

#创建kube-scheduler的kubeconfig

3.5.2.1 设置集群参数

#master1上执行
cd /data/work/
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.186.128:6443 --kubeconfig=kube-scheduler.kubeconfig

#设置客户端认证参数
kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig

#设置上下文参数
kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig


#设置当前上下文
kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

3.5.2.5 创建配置文件kube-scheduler.conf

#master1上执行
cd /data/work/
cat >  kube-scheduler.conf  << EOF
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF

3.5.2.6 创建服务启动文件

#master1上执行
cd /data/work/
cat >  kube-scheduler.service  << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
 
[Install]
WantedBy=multi-user.target
EOF

3.5.2.7 启动服务

#master1上执行
cp kube-scheduler*.pem /etc/kubernetes/ssl/
cp kube-scheduler.kubeconfig /etc/kubernetes/
cp kube-scheduler.conf /etc/kubernetes/
cp kube-scheduler.service /usr/lib/systemd/system/
#master1上执行
rsync -vaz kube-scheduler*.pem master2:/etc/kubernetes/ssl/
rsync -vaz kube-scheduler.kubeconfig kube-scheduler.conf master2:/etc/kubernetes/
rsync -vaz kube-scheduler.service master2:/usr/lib/systemd/system/
#master1上执行
rsync -vaz kube-scheduler*.pem master3:/etc/kubernetes/ssl/
rsync -vaz kube-scheduler.kubeconfig kube-scheduler.conf master3:/etc/kubernetes/
rsync -vaz kube-scheduler.service master3:/usr/lib/systemd/system/

3.5.2.8 三台master都需要启动kube-scheduler

#master1,master2,master3上都启动kube-scheduler服务
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler #/usr/lib/systemd/system/kube-scheduler.service缺少$KUBE_SCHEDULER_OPTS
systemctl status kube-scheduler

3.6 部署kubelet组件

3.6.0 把pause-cordns.tar.gz上传到node1节点,手动解压

#node1上执行
docker load -i pause-cordns.tar.gz

3.6.1 创建kubelet-bootstrap.kubeconfig

#master1上执行
cd /data/work/
BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)

rm -r kubelet-bootstrap.kubeconfig
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.186.128:6443 --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

3.6.2 创建配置文件kubelet.json

“cgroupDriver”: "systemd"要和docker的驱动一致。 address替换为自己node1的IP地址。

#master1上执行
cd /data/work/
cat >  kubelet.json  << EOF
{
  "kind": "KubeletConfiguration",
  "apiVersion": "kubelet.config.k8s.io/v1beta1",
  "authentication": {
    "x509": {
      "clientCAFile": "/etc/kubernetes/ssl/ca.pem"
    },
    "webhook": {
      "enabled": true,
      "cacheTTL": "2m0s"
    },
    "anonymous": {
      "enabled": false
    }
  },
  "authorization": {
    "mode": "Webhook",
    "webhook": {
      "cacheAuthorizedTTL": "5m0s",
      "cacheUnauthorizedTTL": "30s"
    }
  },
  "address": "192.168.186.131",
  "port": 10250,
  "readOnlyPort": 10255,
  "cgroupDriver": "systemd",
  "hairpinMode": "promiscuous-bridge",
  "serializeImagePulls": false,
  "featureGates": {
    "RotateKubeletClientCertificate": true,
    "RotateKubeletServerCertificate": true
  },
  "clusterDomain": "cluster.local.",
  "clusterDNS": ["10.255.0.2"]
}
EOF
#master1上执行
cd /data/work/
cat >  kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
  --cert-dir=/etc/kubernetes/ssl \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --config=/etc/kubernetes/kubelet.json \
  --network-plugin=cni \
  --pod-infra-container-image=k8s.gcr.io/pause:3.2 \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5
 
[Install]
WantedBy=multi-user.target
EOF

#注: –hostname-override:显示名称,集群中唯一
–network-plugin:启用CNI
–kubeconfig:空路径,会自动生成,后面用于连接apiserver
–bootstrap-kubeconfig:首次启动向apiserver申请证书
–config:配置参数文件
–cert-dir:kubelet证书生成目录
–pod-infra-container-image:管理Pod网络容器的镜像

注:kubelete.json配置文件address改为各个节点的ip地址,在各个work节点上启动服务

#node1上执行
mkdir /etc/kubernetes/ssl -p  #node1上执行
#master1上执行
scp  kubelet-bootstrap.kubeconfig kubelet.json  node1:/etc/kubernetes/
scp  ca.pem  node1:/etc/kubernetes/ssl/
scp  kubelet.service  node1:/usr/lib/systemd/system/

3.6.3 启动kubelet服务

#node1上启动kubelet服务
mkdir -p /var/lib/kubelet
mkdir -p /var/log/kubernetes
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

3.6.4 确认kubelet服务启动成功后

接着到master1节点上Approve一下bootstrap请求。
执行如下命令可以看到一个worker节点发送了一个 CSR 请求:

#3个master上任何一个都可以
[root@master1 work]# kubectl get csr
NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-SY6gROGEmH0qVZhMVhJKKWN3UaWkKKQzV8dopoIO9Uc   87s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
#3个master上任何一个都可以
[root@master1 work]# kubectl certificate approve node-csr-SY6gROGEmH0qVZhMVhJKKWN3UaWkKKQzV8dopoIO9Uc
certificatesigningrequest.certificates.k8s.io/node-csr-XkV2mzZEqQs-z-ODBZL_lKjZmAsI6AaN6sA7-SKKr0w approved
#3个master上任何一个都可以
[root@master1 work]# kubectl get csr
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-SY6gROGEmH0qVZhMVhJKKWN3UaWkKKQzV8dopoIO9Uc   2m25s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
#3个master上任何一个都可以
[root@master1 work]# kubectl get nodes
NAME    STATUS     ROLES    AGE   VERSION
node1   NotReady   <none>   30s   v1.20.6

#注意:STATUS是NotReady表示还没有安装网络插件

3.8 部署kube-proxy组件

3.8.1 创建csr请求

#master1上执行
cd /data/work/
cat >  kube-proxy-csr.json  << EOF
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Hubei",
      "L": "Wuhan",
      "O": "k8s",
      "OU": "system"
    }
  ]
}
EOF

3.8.2 生成证书

#master1上执行
cd /data/work/
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

3.8.3 创建kubeconfig文件

#master1上执行
cd /data/work/
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.186.128:6443 --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

3.8.4 创建kube-proxy配置文件

cd /data/work/
cat > kube-proxy.yaml  << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.186.131
clientConnection:
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 192.168.186.0/24
healthzBindAddress: 192.168.186.131:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.186.131:10249
mode: "ipvs"
EOF

3.8.5 创建服务启动文件

cd /data/work/
cat >  kube-proxy.service  << EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
 
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.yaml \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

#在master1上执行
scp  kube-proxy.kubeconfig kube-proxy.yaml node1:/etc/kubernetes/
scp  kube-proxy.service node1:/usr/lib/systemd/system/

3.8.6 启动服务

#在node1上执行
mkdir -p /var/lib/kube-proxy
systemctl daemon-reload
systemctl enable kube-proxy
systemctl  start kube-proxy
systemctl status kube-proxy

3.9 部署calico组件

#把calico.yaml文件上传到master上的的/data/work目录

cd /data/work/
kubectl apply -f calico.yaml
kubectl get pods -n kube-system
kubectl get nodes
[root@master ~]# kubectl get nodes
NAME            STATUS   ROLES    AGE   VERSION
node1   Ready    <none>   73m   v1.20.7

3.10 部署coredns组件

cd /data/work/
kubectl apply -f coredns.yaml
kubectl get pods -n kube-system
kubectl get svc -n kube-system

4.查看集群状态

[root@master1 ~]# kubectl get nodes
NAME    STATUS   ROLES    AGE   VERSION
node1   Ready    <none>   38m   v1.20.7

5.安装keepalived+nginx实现k8s apiserver高可用

5.1 安装nginx主备

在master1和master2和master3上做nginx主备安装

yum install nginx keepalived nginx-mod-stream -y

5.2 修改nginx配置文件。主备一样

cd /data/work/
cp -r /etc/nginx/nginx.conf /etc/nginx/nginx.conf.bak
cat  > /etc/nginx/nginx.conf <<  EOF
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

# 四层负载均衡,为两台Master apiserver组件提供负载均衡
stream {

    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
       server 192.168.186.128:6443;   # master APISERVER IP:PORT
       server 192.168.186.129:6443;   # master2 APISERVER IP:PORT
       server 192.168.186.130:6443;   # master3 APISERVER IP:PORT

    }
    
    server {
       listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
       proxy_pass k8s-apiserver;
    }
}

http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    server {
        listen       80 default_server;
        server_name  _;

        location / {
        }
    }
}
EOF

5.3 keepalive配置

主keepalived

#master1上操作
cp -r /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

cat > /etc/keepalived/keepalived.conf << EOF
global_defs { 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER
} 

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER 
    interface ens33   #修改为实际网卡名
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 100    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    # 虚拟IP
    virtual_ipaddress { 
        192.168.186.199/24
    } 
    track_script {
        check_nginx
    } 
}
EOF

#vrrp_script:指定检查nginx工作状态脚本(根据nginx状态判断是否故障转移)
#virtual_ipaddress:虚拟IP(VIP)

cat  > /etc/keepalived/check_nginx.sh << EOF
#!/bin/bash
count=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi
EOF

chmod +x  /etc/keepalived/check_nginx.sh

备keepalive

#master2上操作
cp -r /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

cat > /etc/keepalived/keepalived.conf  << EOF
global_defs { 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_BACKUP
} 

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state BACKUP 
    interface ens33  #修改为实际网卡名
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 90
    advert_int 1
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.186.199/24
    } 
    track_script {
        check_nginx
    } 
}
EOF

cat > /etc/keepalived/check_nginx.sh << EOF
#!/bin/bash
count=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi
EOF

chmod +x /etc/keepalived/check_nginx.sh

#注:keepalived根据脚本返回状态码(0为工作正常,非0不正常)判断是否故障转移。

备keepalive

#master3上操作
cp -r /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak

cat > /etc/keepalived/keepalived.conf  << EOF
global_defs { 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_BACKUP
} 

vrrp_script check_nginx {
    script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state BACKUP 
    interface ens33  #修改为实际网卡名
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 80
    advert_int 1
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.186.199/24
    } 
    track_script {
        check_nginx
    } 
}
EOF

cat > /etc/keepalived/check_nginx.sh << EOF
#!/bin/bash
count=$(ps -ef |grep nginx | grep sbin | egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi
EOF

chmod +x /etc/keepalived/check_nginx.sh

#注:keepalived根据脚本返回状态码(0为工作正常,非0不正常)判断是否故障转移。

5.4 启动服务

systemctl daemon-reload
systemctl start nginx
systemctl start keepalived
systemctl enable nginx keepalived

5.5 测试vip是否绑定成功

[root@master1 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:79:9e:36 brd ff:ff:ff:ff:ff:ff
    inet 192.168.186.128/24 brd 192.168.186.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.186.199/24 scope global secondary ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::b6ef:8646:1cfc:3e0c/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

5.6 测试keepalived

停掉master1上的nginx。vip会漂移到master2

#master1
systemctl stop keepalived  #停掉master1上keepalived
systemctl restart keepalived

目前所有的Worker Node组件连接都还是master1 Node,如果不改为连接VIP走负载均衡器,那么Master还是单点故障。
因此接下来就是要改所有Worker Node(kubectl get node命令查看到的节点)组件配置文件,由原来192.168.186.128修改为192.168.186.199(VIP)。

在所有Worker Node执行:

#node1上执行
cd  /etc/kubernetes/
sed -i 's#192.168.186.128:6443#192.168.186.199:16443#' /etc/kubernetes/kubelet-bootstrap.kubeconfig
sed -i 's#192.168.186.128:6443#192.168.186.199:16443#' /etc/kubernetes/kubelet.json
sed -i 's#192.168.186.128:6443#192.168.186.199:16443#' /etc/kubernetes/kubelet.kubeconfig
#sed -i 's#192.168.186.128:6443#192.168.186.199:16443#' kubelet-bootstrap.kubeconfig
sed -i 's#192.168.186.128:6443#192.168.186.199:16443#' /etc/kubernetes/kube-proxy.yaml
sed -i 's#192.168.186.128:6443#192.168.186.199:16443#' /etc/kubernetes/kube-proxy.kubeconfig
systemctl restart kubelet kube-proxy  

6.验证cordns是否正常

kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -- sh
/ # ping www.baidu.com
PING www.baidu.com (39.156.66.18): 56 data bytes
64 bytes from 120.232.145.185: seq=0 ttl=127 time=89.812 ms
64 bytes from 120.232.145.185: seq=1 ttl=127 time=103.208 ms
64 bytes from 120.232.145.185: seq=2 ttl=127 time=113.258 ms
64 bytes from 120.232.145.185: seq=3 ttl=127 time=99.552 ms
64 bytes from 120.232.145.185: seq=4 ttl=127 time=96.677 ms
64 bytes from 120.232.145.185: seq=5 ttl=127 time=96.394 ms
64 bytes from 120.232.145.185: seq=6 ttl=127 time=96.662 ms
64 bytes from 120.232.145.185: seq=7 ttl=127 time=92.870 ms
#通过上面可以看到能访问网络
/ # nslookup kubernetes.default.svc.cluster.local
Server:		10.255.0.2
Address:	10.255.0.2:53
Name:	kubernetes.default.svc.cluster.local
Address: 10.255.0.1

/ # nslookup nginx.default.svc.cluster.local
Server:    10.255.0.2
Address 1: 10.255.0.2 kube-dns.kube-system.svc.cluster.local

Name:      nginx.default.svc.cluster.local
Address 1: 10.255.227.179 tomcat.default.svc.cluster.local

#注意:
busybox要用指定的1.28版本,不能用最新版本,最新版本,nslookup会解析不到dns和ip,报错如下:
/ # nslookup kubernetes.default.svc.cluster.local
Server: 10.255.0.2
Address: 10.255.0.2:53
*** Can’t find kubernetes.default.svc.cluster.local: No answer
*** Can’t find kubernetes.default.svc.cluster.local: No answer

10.255.0.2 就是我们coreDNS的clusterIP,说明coreDNS配置好了。
解析内部Service的名称,是通过coreDNS去解析的。

7.测试k8s集群部署nginx服务

cat > nginx.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx
  name: nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - image: nginx
        name: nginx
        imagePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx
  name: nginx
spec:
  type: NodePort
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx
EOF

kubectl apply -f nginx.yaml 
kubectl get pods,svc

访问是:node1的IP:36464 用master节点上的任何一个IP是无法访问的

在这里插入图片描述

8. kubernetes群集中用命令创建httpd

kubectl create deployment httpd --image=httpd
kubectl expose deployment httpd --port=80 --type=NodePort
[root@master1 ~]# kubectl get pods,svc 
NAME                         READY   STATUS    RESTARTS   AGE
pod/demo-pod                 2/2     Running   0          16m
pod/httpd-757fb56c8d-pmwh4   1/1     Running   0          6m40s
pod/nginx-6799fc88d8-t5xwj   1/1     Running   0          14m

NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
service/httpd        NodePort    10.255.41.253   <none>        80:42373/TCP     6m38s
service/kubernetes   ClusterIP   10.255.0.1      <none>        443/TCP          4h27m
service/nginx        NodePort    10.255.63.100   <none>        80:36464/TCP     14m
service/tomcat       NodePort    10.255.44.184   <none>        8080:30080/TCP   16m
[root@master1 ~]# 

访问是:node1的IP:36464 用master节点上的任何一个IP是无法访问的

在这里插入图片描述

9. 安装Dashboard

请参考 次篇博文第10步

10. 安装 metrics-server

请参考 次篇博文第11步

这样高可用集群就安装好了,有什么问题请给我留言,远程给您处理,大家共同进步!

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

qq_14910065

你的支持是我最大的努力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值