【运维记录】kubernetes1.18.X二进制部署

服务器规划

角色IP
master192.168.20.131kube-apiserver,kube-controller-manager,kube-scheduler,etcd,keepalived,haproxy
master1192.168.20.138kube-apiserver,kube-controller-manager,kube-scheduler,etcd ,keepalived,haproxy
master2192.168.20.183kube-apiserver,kube-controller-manager,kube-scheduler,etcd,keepalived,haproxy
node1192.168.20.102kubelet,kube-proxy
node2192.168.20.107kubelet,kube-proxy
vip192.168.20.150

环境参数

软件版本
kubernetes1.18.12
docker18.09.9
etcd3.4.13
内核4.4.246

环境搭建
配置host

cat >> /etc/hosts << EOF
192.168.20.131 master
192.168.20.138 master1
192.168.20.83  master2
192.168.20.102 node1
192.168.20.107 node2
192.168.20.150 k8s-vip
EOF

内核升级

#启用 ELRepo 仓库
rpm -Uvh https://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

#查看可安装版本
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
yum --enablerepo=elrepo-kernel install -y kernel-lt

#修改启动程序
sed -i 's/saved/0/g' /etc/default/grub 
cat /etc/default/grub 
#编号为 0 的内核作为默认内核
grub2-set-default 0
#创建内核配置
grub2-mkconfig -o /boot/grub2/grub.cfg
reboot

免密登录

ssh-keygen -t rsa
ssh-copy-id -i ~/.ssh/id_rsa.pub master

脚本运行环境配置

#!/bin/sh


#关闭防火墙
systemctl stop firewalld
systemctl disable  firewalld

#关闭selinux
setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config
#关闭swap分区
swapoff -a
sed -i.bak 's/^.*centos-swap/#&/g' /etc/fstab
#优化系统
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
EOF

#立即生效
sysctl --system
#时区
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
#配置阿里云的base和epel源
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
#安装dnf工具
yum install dnf -y
dnf makecache
#安装ntpdate工具
dnf install ntpdate -y
#同步阿里云时间
ntpdate ntp.aliyun.com
#安装简单插件
yum install vim net-tools lrzsz unzip dos2unix telnet sysstat iotop pciutils lsof tcpdump psmisc bc wget socat keepalived haproxy -y

#开启ipvs
cat > /etc/sysconfig/modules/ipvs.modules << EOF
 #!/bin/bash
 
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
#配置ipvs, PS.内核在4.19以上的nf_conntrack 代替nf_conntrack_ipv4
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack

部署keepalived+haproxy
keepalived


! Configuration File for keepalived

global_defs {
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.20.150
    }

HAproxy

#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
#    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend  kube-apiserver
    mode        tcp
    option      tcplog
    bind        *:16443
    default_backend             kube-apiserver

#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
#backend static
#    balance     roundrobin
#    server      static 127.0.0.1:4331 check

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kube-apiserver
    mode        tcp
    balance     roundrobin
    option      tcplog
    server  master 192.168.20.131:6443 check
    server  master1 192.168.20.138:6443 check
    server  master2 192.168.20.83:6443 check

#监听16443
 netstat -ap | grep haproxy
 tcp        0      0 0.0.0.0:16443           0.0.0.0:*               LISTEN      1667/haproxy

部署etcd
安装cfssl
官⽹地址: https://pkg.cfssl.org/

mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl_linux-amd64 /usr/local/bin/cfssl

chmod 755 /usr/local/bin/cfssl*

创建json

mkdir -pv ~/TLS/etcd && cd ~/TLS/etcd
cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "www": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json << EOF
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF
#生成证书
 cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

cat > server-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
    "192.168.20.131",
    "192.168.20.135",
    "192.168.20.136"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

 ls -l server*.pem

部署集群
(1)解压文件

tar xf etcd-v3.4.13-linux-amd64.tar.gz
cp -a etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /usr/local/bin

(2)创建文件

mkdir -pv /etc/etcd/cfg
#master
cat >/etc/etcd/cfg/etcd.conf <<EOF
#[Member]
ETCD_NAME="etcd-01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.20.101:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.20.101:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.20.101:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.20.101:2379"
ETCD_INITIAL_CLUSTER="etcd-01=https://192.168.20.101:2380,etcd-02=https://192.168.20.131:2380,etcd-03=https://192.168.20.138:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#master1
cat >/etc/etcd/cfg/etcd.conf <<EOF
#[Member]
ETCD_NAME="etcd-02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.20.131:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.20.131:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.20.131:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.20.131:2379"
ETCD_INITIAL_CLUSTER="etcd-01=https://192.168.20.101:2380,etcd-02=https://192.168.20.131:2380,etcd-03=https://192.168.20.138:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

#master2
cat >/etc/etcd/cfg/etcd.conf <<EOF
#[Member]
ETCD_NAME="etcd-03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.20.138:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.20.138:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.20.138:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.20.138:2379"
ETCD_INITIAL_CLUSTER="etcd-01=https://192.168.20.101:2380,etcd-02=https://192.168.20.131:2380,etcd-03=https://192.168.20.138:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/etc/etcd/cfg/etcd.conf
ExecStart=/usr/local/bin/etcd \
--cert-file=/etc/etcd/ssl/server.pem \
--key-file=/etc/etcd/ssl/server-key.pem \
--peer-cert-file=/etc/etcd/ssl/server.pem \
--peer-key-file=/etc/etcd/ssl/server-key.pem \
--trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

分发etcd配置文件,软件

#! /bin/bash
for ip in master1 master2
do
 ssh root@$ip "mkdir -pv /etc/etcd/cfg"
 scp -p /etc/etcd/cfg/etcd.conf root@$ip:/etc/etcd/cfg
 scp -p /usr/local/bin/{etcd,etcdctl} root@$ip:/usr/local/bin
 scp -p /usr/lib/systemd/system/etcd.service root@$ip:/usr/lib/systemd/system
done

查看etcd部署状态是否成功

etcdctl --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/server.pem --key=/etc/etcd/ssl/server-key.pem --endpoints="https://192.168.20.68:2379,https://192.168.20.98:2379,https://192.168.20.83:2379" endpoint health

安装docker

#每个节点进行操作
curl -o /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

 dnf install -y   containerd.io-1.2.13   docker-ce-19.03.11   docker-ce-cli-19.03.11

systemctl enable --now docker

cat > /etc/docker/daemon.json << EOF
{
  "registry-mirrors": ["https://f1bhsuge.mirror.aliyuncs.com"]
}
EOF


systemctl start docker

部署Master

  1. 制作集群证书
  2. 部署kube-apiserver组件
  3. 部署kube-controller-manager组件
  4. 部署kube-scheduler组件
#创建请求证书的json配置文件
[root@master ~]# mkdir ~/TLS/k8s && cd ~/TLS/k8s
[root@master k8s]# cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
    
      "10.0.0.1",
      "127.0.0.1",
      "192.168.20.131",
      "192.168.20.138",
      "192.168.20.83",
      "192.168.20.102",
      "192.168.20.107",
      "192.168.20.150",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

开启kube-apiserver聚合层

cat > aggregator-ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "aggregator": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > aggregator-ca-csr.json << EOF
{
    "CN": "aggregator",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca aggregator-ca-csr.json | cfssljson -bare aggregator-ca

cat > aggregator-csr.json << EOF
{
    "CN": "aggregator",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "192.168.20.131",
      "192.168.20.138",
      "192.168.20.83",
      "192.168.20.102",
      "192.168.20.107",
      "192.168.20.150",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -ca=aggregator-ca.pem -ca-key=aggregator-ca-key.pem -config=aggregator-ca-config.json -profile=aggregator aggregator-csr.json | cfssljson -bare aggregator

创建目录并解压二进制包


[root@master k8s]# cd && mkdir -pv /etc/kubernetes/cfg /var/log/kubernetes

[root@master ~]# tar xf kubernetes-server-linux-amd64.tar.gz

[root@master ~]# cd kubernetes/server/bin && cp kube-apiserver kube-scheduler kube-controller-manager kubectl /usr/local/bin

部署kube-apiserver

cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--advertise-address=192.168.20.131 \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--bind-address=192.168.20.131 \\
--secure-port=6443 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--service-node-port-range=30000-32767 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/etc/kubernetes/ssl/token.csv \\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\
--etcd-servers=https://192.168.20.131:2379,https://192.168.20.138:2379,https://192.168.20.83:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/server.pem \\
--etcd-keyfile=/etc/etcd/ssl/server-key.pem \\
--requestheader-client-ca-file=/etc/kubernetes/ssl/aggregator-ca.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--proxy-client-cert-file=/etc/kubernetes/ssl/aggregator.pem \\
--proxy-client-key-file=/etc/kubernetes/ssl/aggregator-key.pem"
EOF
配置选项	选项说明
--logtostderr=false	输出日志到文件中(文件路径由--log-dir指定),不输出到标准错误控制台
--v=2	指定输出日志的级别
--advertise-address	向集群成员通知 apiserver 消息的 IP 地址,这个地址必须能够被集群中其他成员访问,如果 IP 地址为空,将会使用 --bind-address,如果未指定--bind-address,将会使用主机的默认接口地址
--etcd-servers	连接的 etcd 服务器列表 , 形式为(scheme://ip:port),使用逗号分隔
--etcd-cafile	用于etcd 通信的 SSL CA 文件
--etcd-certfile	用于 etcd 通信的的 SSL 证书文件
--etcd-keyfile	用于 etcd 通信的 SSL 密钥文件
--service-cluster-ip-range	Service网络地址分配 ,CIDR 表示的 IP 范围,服务的 cluster ip 将从中分配, 一定不要和分配给 nodes 和 pods 的 IP 范围产生重叠
--bind-address	监听 --seure-port 的 IP 地址,被关联的接口必须能够被集群其它节点和 CLI/web 客户端访问,如果为空,则将使用所有接口(0.0.0.0)
--secure-port=6443	用于监听具有认证授权功能的 HTTPS 协议的端口,默认值是6443
--allow-privileged	是否启用授权功能
--service-node-port-range	Service使用的端口范围
--default-not-ready-toleration-seconds	表示 notReady状态的容忍度秒数
--default-unreachable-toleration-seconds	表示 unreachable状态的容忍度秒数:
--max-mutating-requests-inflight=2000	在给定时间内进行中可变请求的最大数量,当超过该值时,服务将拒绝所有请求,0 值表示没有限制(默认值 200)
--default-watch-cache-size=200	默认监视缓存大小,0 表示对于没有设置默认监视大小的资源,将禁用监视缓存
--delete-collection-workers=2	用于 DeleteCollection 调用的工作者数量,这被用于加速 namespace 的清理( 默认值 1)
--enable-admission-plugins	资源限制的相关配置
--authorization-mode	在安全端口上进行权限验证的插件的顺序列表,以逗号分隔的列表,包括:AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node.(默认值 "AlwaysAllow"--enable-bootstrap-token-auth	启用此选项以允许 'kube-system' 命名空间中的'bootstrap.kubernetes.io/token' 类型密钥可以被用于 TLS 的启动认证
--token-auth-file	声明bootstrap token文件
--kubelet-certificate-authority	证书 authority 的文件路径
--kubelet-client-certificate	用于 TLS 的客户端证书文件路径
--kubelet-client-key	用于 TLS 的客户端证书密钥文件路径
--tls-private-key-file	包含匹配--tls-cert-file的 x509 证书私钥的文件
--service-account-key-file	包含 PEM 加密的 x509 RSA 或 ECDSA 私钥或公钥的文件,用于验证 ServiceAccount 令牌,如果设置该值,--tls-private-key-file 将会被使用,指定的文件可以包含多个密钥,并且这个标志可以和不同的文件一起多次使用
--audit-log-maxage	基于文件名中的时间戳,旧审计日志文件的最长保留天数
--audit-log-maxbackup	旧审计日志文件的最大保留个数
--audit-log-maxsize	审计日志被轮转前的最大兆字节数
--audit-log-path	如果设置,表示所有到apiserver的请求都会记录到这个文件中,‘-’表示写入标准输出

启用TLS Bootstrapping机制

head -c 16 /dev/urandom | od -An -t x | tr -d ' '
14f2afa0b6ccc53350972c4d2942d452

cat > /etc/kubernetes/ssl/token.csv << EOF
14f2afa0b6ccc53350972c4d2942d452,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF

创建kube-apiserver启动脚本

cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动并设置开机启动

systemctl daemon-reload
systemctl start kube-apiserver
systemctl enable kube-apiserver

授权kubelet-bootstrap用户允许请求证书

kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

部署kube-controller-manager

cat > /etc/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--leader-elect=true \\
--master=127.0.0.1:8080 \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"
EOF

配置选项

--leader-elect	高可用时启用选举功能。这里只有三个controller-manager,所以需要启用选举功能
--master	通过本地非安全本地端口8080连接apiserver
--bind-address	监控地址
--allocate-node-cidrs	是否应在node节点上分配和设置Pod的CIDR
--cluster-cidr	Controller Manager在启动时如果设置了--cluster-cidr参数,那么为每个没有设置Spec.PodCIDR的Node节点生成一个CIDR地址,并用该CIDR地址设置节点的Spec.PodCIDR属性,防止不同的节点的CIDR地址发生冲突
--service-cluster-ip-range	集群Services 的CIDR范围
--cluster-signing-cert-file	指定用于集群签发的所有集群范围内证书文件(根证书文件)
--cluster-signing-key-file	指定集群签发证书的key
--root-ca-file	如果设置,该根证书权限将包含service acount的toker secret,这必须是一个有效的PEM编码CA 包
--service-account-private-key-file	包含用于签署service account token的PEM编码RSA或者ECDSA私钥的文件名
--experimental-cluster-signing-duration	证书签发时间

创建controller-manager启动脚本

cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

启动并设置开机启动

systemctl daemon-reload
systemctl start kube-controller-manager
systemctl enable kube-controller-manager

部署kube-scheduler

cat > /etc/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--leader-elect=false \\
--master=http://127.0.0.1:8080 \\
--bind-address=127.0.0.1 \\
--address=127.0.0.1"
EOF

创建scheduler服务启动脚本

cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF

启动并设置开机启动

[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl start kube-scheduler
[root@master ~]# systemctl enable kube-scheduler

查看集群状态

kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"}  

部署kubelet,kube-proxy

cd kubernetes/server/bin
#分发到所有节点上
cp kubelet kube-proxy /usr/local/bin
cd

部署kubelet服务

创建kubelet.conf配置文件

cat > /etc/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--hostname-override=master \\
--container-runtime=docker \\
--network-plugin=cni \\
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/etc/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/etc/kubernetes/ssl \\
--image-pull-progress-deadline=15m \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"
EOF

配置文件解释说明

--hostname-override	用来配置该节点在集群中显示的主机名,kubelet设置了-–hostname-override参数后,kube-proxy也需要设置,否则会出现找不到Node的情况
--container-runtime	指定容器运行时引擎
--network-plugin	启用CNI网络插件
--kubeconfig	kubelet作为客户端使用的kubeconfig认证文件,此文件是由kube-controller-mananger生成的
--bootstrap-kubeconfig	指定令牌认证文件
--config	指定kubelet配置文件
--cert-dir	设置kube-controller-manager生成证书和私钥的目录
--image-pull-progress-deadline	镜像拉取进度最大时间,如果在这段时间拉取镜像没有任何进展,将取消拉取,默认:1m0s
--pod-infra-container-image	每个pod中的network/ipc名称空间容器将使用的镜像

PS.–hostname-override=master 需要修改对应的hostname

kubelet-config.yml配置参数文件

cat > /etc/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local 
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/ssl/ca.pem 
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
address	kubelet 服务监听的地址
port: 10250	kubelet 服务的端口,默认 10250
readOnlyPort	没有认证/授权的只读 kubelet 服务端口 ,设置为 0 表示禁用,默认 `10255
clusterDNS	DNS 服务器的IP地址列表
clusterDomain	集群域名, kubelet 将配置所有容器除了主机搜索域还将搜索当前域

生成bootstrap.kubeconfig文件

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.20.150:6443 \
  --kubeconfig=bootstrap.kubeconfig

kubectl config set-credentials "kubelet-bootstrap" \
  --token=7805e5d5d4768a233bedb35c5abab3dc \
  --kubeconfig=bootstrap.kubeconfig   # 与token.csv里保持一致

kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

ls -l *.kubeconfig
-rw-------. 1 root root 2169 12月  6 17:50 bootstrap.kubeconfig

拷贝到配置文件路径/etc/kubernetes/cfg,拷贝到所有节点上

cp -p bootstrap.kubeconfig /etc/kubernetes/cfg

创建kubelet服务启动脚本

cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet.conf
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl start kubelet
systemctl enable kubelet

部署CNI容器网络

wget https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz

mkdir -pv /opt/cni/bin

tar xf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin

ls /opt/cni/bin
bandwidth  bridge  dhcp  firewall  flannel  host-device  host-local  ipvlan  loopback  macvlan  portmap  ptp  sbr  static  tuning  vlan

部署kube-proxy服务

创建kube-proxy配置文件

cat > /etc/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--config=/etc/kubernetes/cfg/kube-proxy-config.yml"
EOF

kube-proxy-config.yml配置参数文件

cat > /etc/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
healthzBindAddress: 0.0.0.0:10256
metricsBindAddress: 0.0.0.0:10249
clientConnection:
  burst: 200
  kubeconfig: /etc/kubernetes/cfg/kube-proxy.kubeconfig
  qps: 100
hostnameOverride: node1
clusterCIDR: 10.0.0.0/24
EOF
选项配置	选项意义
clientConnection	与kube-apiserver交互时的参数设置
burst: 200	临时允许该事件记录值超过qps设定值
kubeconfig	kube-proxy 客户端连接 kube-apiserver 的 kubeconfig 文件路径设置
qps: 100	与kube-apiserver交互时的QPS,默认值5
bindAddress	kube-proxy监听地址
healthzBindAddress	用于检查服务的IP地址和端口
metricsBindAddress	metrics服务的ip地址和端口。默认:127.0.0.1:10249
clusterCIDR	kube-proxy 根据 --cluster-cidr 判断集群内部和外部流量,指定 --cluster-cidr 或 --masquerade-all 选项后 kube-proxy 才会对访问 Service IP 的请求做 SNAT
hostnameOverride	参数值必须与 kubelet 的值一致,否则 kube-proxy 启动后会找不到该 Node,从而不会创建任何 ipvs 规则;

生成kube-proxy.kubeconfig证书

cd ~/TLS/k8s

cat > kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

ls kube-proxy*pem

生成kubeconfig文件
kube-proxy是作为kube-apiserver的客户端,由于我们启用了TLS,所以需要认证访问,这里我们需要使用到之前生成的证书。

[root@master k8s]# kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.50.128:6443 \
  --kubeconfig=kube-proxy.kubeconfig
[root@master k8s]# kubectl config set-credentials kube-proxy \
  --client-certificate=./kube-proxy.pem \
  --client-key=./kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
[root@master k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig
[root@master k8s]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

拷贝到配置文件指定的路径下:
分别是node1 node2

scp kube-proxy.kubeconfig node1:/etc/kubernetes/cfg/

创建kube-proxy服务启动脚本

cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy.conf
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动并设置开机启动

[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl start kube-proxy
[root@master ~]# systemctl enable kube-proxy

授权apiserver访问kubelet

在执行kubectl exec、run、logs 等命令时,apiserver会转发到kubelet。这里定义 RBAC规则,授权apiserver调用kubelet API"

cat > apiserver-to-kubelet-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
      - pods/log
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kubernetes
EOF

访问授权

 kubectl apply -f apiserver-to-kubelet-rbac.yaml

部署Flannel网络插件

kube-flannel.yml

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.13.1-rc1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.13.1-rc1
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

部署coredns
coredns 1.19版本

主要修改
__MACHINE_GENERATED_WARNING__替换为This is a file generated from the base
underscore template file: coredns.yaml.base
__PILLAR__DNS__DOMAIN__替换为cluster.local,⼀般不修改,若要修改记得要与node节点上
kubelet-config.yml⽂件中的clusterDomain的值⼀致,并要调整api-server证书中的hosts字段值
并重新⽣产证书
__PILLAR__DNS__MEMORY__LIMIT__替换为170Mi,此内存限制的值可根据实际环境资源进⾏调
整 __
PILLAR__DNS__SERVER__替换为10.0.0.2,此IP地址需要与Node节点
上/opt/kubernetes/cfg/kubelet-config.yml⽂件中配置的clusterDNS字段的IP⼀致
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值