kubernetes集群方式部署

1.集群环境准备

1.1 主机规划

在这里插入图片描述

1.2 网段

在这里插入图片描述

1.3软件版本

在这里插入图片描述

2.主机准备

2.1设置主机名

给不同的主机设置对应的主机名

[root@localhost ~]#hostnamectl set-hostname k8s-master01
[root@localhost ~]#hostnamectl set-hostname k8s-master02
[root@localhost ~]#hostnamectl set-hostname k8s-master03
[root@localhost ~]#hostnamectl set-hostname k8s-node01
[root@localhost ~]#hostnamectl set-hostname k8s-node02
[root@localhost ~]#hostnamectl set-hostname k8s-node03
[root@localhost ~]#hostnamectl set-hostname k8s-node04

2.2配置yum源

在所有服务器上配置

#比如欧拉系统,在/etc/yum.repos.d/目录下没有CentOS-Base.repo文件的可以通过下面链接下载
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
# 对于 CentOS 7
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
         -e 's|^#baseurl=http://mirror.centos.org|baseurl=https://mirrors.tuna.tsinghua.edu.cn|g' \
         -i.bak \
         /etc/yum.repos.d/CentOS-*.repo
# 对于 CentOS 8
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
         -e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \
         -i.bak \
         /etc/yum.repos.d/CentOS-*.repo

sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://10.0.0.123/centos|g' -i.bak  /etc/yum.repos.d/CentOS-*.repo

2.3 安装一些必备工具

在所有服务器上执行

yum -y install wget jq psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl -y

2.4关闭防火墙

systemctl disable --now firewalld
firewall-cmd --state

2.5关闭SELinux

setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
sestatus

2.6关闭交换分区

sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a && sysctl -w vm.swappiness=0

cat /etc/fstab
# /dev/mapper/centos-swap swap                    swap    defaults        0 0

2.7配置ulimit

ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimited
EOF

2.8添加启用源(不一定要做)

升级内核用的,如果内核版本满足要求,不需要升级内核,该步骤不需要执行

为 RHEL-8或 CentOS-8配置源
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm

为 RHEL-7 SL-7 或 CentOS-7 安装 ELRepo 
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm

查看可用安装包
yum  --disablerepo="*"  --enablerepo="elrepo-kernel"  list  available

2.9安装ipvsadm

[root@k8s-node02 ~]# yum install ipvsadm ipset sysstat conntrack libseccomp -y

[root@k8s-node02 ~]# cat >> /etc/modules-load.d/ipvs.conf <<EOF 
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF

[root@k8s-node02 ~]# systemctl restart systemd-modules-load.service

[root@k8s-node02 ~]# lsmod | grep -e ip_vs -e nf_conntrack
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  0
ip_vs                 180224  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          176128  1 ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
nf_defrag_ipv4         16384  1 nf_conntrack
libcrc32c              16384  3 nf_conntrack,xfs,ip_vs

2.10修改内核参数

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384

net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 0

EOF

sysctl --system

2.11所有节点配置hosts本地解析

cat > /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

10.46.137.147 k8s-master01
10.46.137.148 k8s-master02
10.46.137.155 k8s-master03
10.46.137.149 k8s-node01
10.46.137.150 k8s-node02
10.46.137.153 k8s-node03
10.46.137.154 k8s-node04
EOF

2.12 时间同步

通过date 查看如果时间是同步的,该步骤可以忽略

yum -y install ntpdate
crontab -e
0 */1 * * * ntpdate time1.aliyun.com
查看时间是否一样
date

2.13 inotify设置

vi /etc/sysctl.conf     #添加如下内容
fs.inotify.max_queued_events = 32768
fs.inotify.max_user_instances = 65536
fs.inotify.max_user_watches = 1048576
net.ipv4.ip_forward = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.conf.lo.arp_ignore = 1
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.all.arp_announce = 2

sysctl -p       #使修改后文件生效

2.14 重启主机

reboot

文件节点基本软件安装

3.1 docker 二进制安装

10.46.137.151、10.46.137.152两台文件节点用来存储镜像,需要安装docker
1、获取docker离线二进制包

docker二进制包获取地址:https://download.docker.com/linux/static/stable/x86_64/

2、解压缩

tar xzf docker-20.10.8.tgz
#解压缩完成后将docker目录中的所有文件复制至/usr/bin/目录下
cp docker/* /usr/bin

3、配置docker.service文件

vim /usr/lib/systemd/system/docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

4、确认daemon.json

mkdir /etc/docker
vim /etc/docker/daemon.json
##国内可以申请阿里云等的加速镜像地址
{
    "graph": "/data/docker",
    "storage-driver": "overlay2",
    "registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
    "bip": "172.7.21.1/24",
    "log-driver":"json-file",
    "log-opts":{ "max-size" :"100m","max-file":"3"},
    "exec-opts": ["native.cgroupdriver=systemd"]
}
创建目录
mkdir /data/docker

5、启动

systemctl daemon-reload
systemctl enable --now docker

3.2 harbor安装

在两台文件服务器中选择一台安装harbor私有仓库。 如果是云服务器,ip使用外网ip
1、下载安装包后解压

tar -xf harbor-offline-installer-v2.6.0.tgz -C /opt
cd /opt
mv harbor harbor-v2.6.0
ln -s harbor-v2.6.0 harbor

2、生成证书

mkdir /etc/certs && cd  /etc/certs
openssl genrsa -out ca.key 4096

openssl req -x509 -new -nodes -sha512 -days 3650 \
 -subj "/C=CN/ST=BeijingYJHZ/L=BeijingYJHZ/O=example/OU=Personal/CN=10.46.137.152" \
 -key ca.key \
 -out ca.crt
 #生成服务器证书
 openssl genrsa -out 10.46.137.152.key 4096

    
openssl req -sha512 -new \
    -subj "/C=CN/ST=Beijing/L=BeijingYJHZ/O=example/OU=Personal/CN=10.46.137.152" \
    -key 10.46.137.152.key \
    -out 10.46.137.152.csr
cat > v3.ext <<-EOF
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName =  IP:10.46.137.152
EOF
#使用该v3.ext文件为您的Harbor主机生成证书 
openssl x509 -req -sha512 -days 3650 \
    -extfile v3.ext \
    -CA ca.crt -CAkey ca.key -CAcreateserial \
    -in 10.46.137.152.csr \
    -out 10.46.137.152.crt

.转换10.46.137.152.crt为10.46.137.152.cert,供Docker使用
Docker守护程序将.crt文件解释为CA证书,并将.cert文件解释为客户端

openssl x509 -inform PEM -in 10.46.137.152.crt -out 10.46.137.152.cert

3.将服务器证书,密钥和CA文件复制到Harbor主机上的Docker certificate文件夹中。您必须首先创建适当的文件夹,客户端都需要这些文件, 有哪些服务器是要通过docker 去访问harbor的都需要创建下面的目录,并且目录的ip名称不变,都是harbor的ip

mkdir -p /etc/docker/certs.d/10.46.137.152/
cp 10.46.137.152.cert /etc/docker/certs.d/10.46.137.152/
cp 10.46.137.152.key /etc/docker/certs.d/10.46.137.152/
cp ca.crt /etc/docker/certs.d/10.46.137.152/

3、配置harbor.yml

cd /opt/harbor
cp harbor.yml.tmpl harbor.yml

vim /opt/harbor/harbor.yml
hostname: 10.46.137.152
http:
  # port for http, default is 80. If https enabled, this port will redirect to https port
  port: 1800
https:
  # https port for harbor, default is 443
    port: 443
  # The path of cert and key files for nginx
    certificate: /etc/certs/10.46.137.152.crt
    private_key: /etc/certs/10.46.137.152.key
harbor_admin_password: harboradmin
data_volume: /data/harbor/data
log:
 
  local:
   
    location: /data/harbor/log

4、创建目录

mkdir -p /data/harbor/data
mkdir -p /data/harbor/log

5、安装 docker-compose依赖包

yum install docker-compose -y
或者已经下载了docker-compose-linux-x86_64安装文件的
cp docker-compose-linux-x86_64 /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose version

6、安装harbor

cd /opt/harbor
./install.sh 
#安装后如果有修改配置,就用下面命令重新加载配置
./prepare

7、查看运行状态

[root@localhost harbor]# docker-compose ps
NAME                COMMAND                  SERVICE             STATUS              PORTS
harbor-core         "/harbor/entrypoint.…"   core                running (healthy)
harbor-db           "/docker-entrypoint.…"   postgresql          running (healthy)
harbor-jobservice   "/harbor/entrypoint.…"   jobservice          running (healthy)
harbor-log          "/bin/sh -c /usr/loc…"   log                 running (healthy)   127.0.0.1:1514->10514/tcp
harbor-portal       "nginx -g 'daemon of…"   portal              running (healthy)
nginx               "nginx -g 'daemon of…"   proxy               running (healthy)   0.0.0.0:1800->8080/tcp, :::1800->8080/tcp, 0.0.0.0:443->8443/tcp, :::443->8443/tcp
redis               "redis-server /etc/r…"   redis               running (healthy)
registry            "/home/harbor/entryp…"   registry            running (healthy)
registryctl         "/home/harbor/start.…"   registryctl         running (healthy)
[root@localhost harbor]#

3.3 docker 添加仓库地址

在两台文件服务器的docker中添添加harbor地址

vim /etc/docker/daemon.json
{
        "graph": "/data/docker",
        "storage-driver": "overlay2",
        "insecure-registries": ["http://10.46.137.152:1800"],
        "registry-mirrors": [
                "https://q2gr04k2.mirror.aliyuncs.com"],
        "exec-opts": ["native.cgroupdriver=systemd"],
        "bip": "172.7.21.1/24",
        "live-restore": true
}

systemctl daemon-reload
systemctl restart docker

验证是否能登录harbor,如果最后打印Login Succeeded 表示登录成功

root@localhost harbor]# docker login 10.46.137.152:1800
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

访问网站:https://10.46.137.152:1800 看是否能正常登录(浏览器用谷歌,ie可能打不开)
常用命令

docker-compose down         ----停止并卸载harbor服务
docker-compose up -d        ----启动harbor服务

3.4验证推送镜像

创建一个com 的项目。
在这里插入图片描述

#下载一个镜像,如
docker pull docker.io/calico/cni:v3.19.4
docker tag docker.io/calico/cni:v3.19.4 10.46.137.152:1800/com/cni:v3.19.4
docker push 10.46.137.152:1800/com/cni:v3.19.4

如果能推送成功,在harbor网站中能看到,表示harbor的安装配置都正常。

4、k8s基本组件安装

4.1所有k8s节点安装Containerd作为Runtime

4.1.1下载解压

wget https://github.com/containerd/containerd/releases/download/v1.6.1/cri-containerd-cni-1.6.1-linux-amd64.tar.gz
#解压
tar -C / -xzf cri-containerd-cni-1.6.1-linux-amd64.tar.gz
#创建服务启动文件
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF

4.1.2配置Containerd所需的模块

cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

4.1.3加载模块

systemctl restart systemd-modules-load.service

4.1.4配置Containerd所需的内核

cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# 加载内核

sysctl --system

4.1.5创建Containerd的配置文件

注意修改其中的harbor仓库地址

#创建必要的目录
mkdir -p /etc/containerd
mkdir -p /data/containerd
#生成配置文件
containerd config default | tee /etc/containerd/config.toml

#修改Containerd的配置文件
#修改私有仓库的地址
cat > /etc/containerd/config.toml << EOF
root = "/data/containerd"
state = "/run/containerd"
oom_score = -999
[grpc]
  address = "/run/containerd/containerd.sock"
  uid = 0
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216

[debug]
  address = ""
  uid = 0
  gid = 0
  level = ""

[metrics]
  address = ""
  grpc_histogram = false

[cgroup]
  path = ""

[plugins]
  [plugins.cgroups]
    no_prometheus = false
  [plugins.cri]
    stream_server_address = "127.0.0.1"
    stream_server_port = "0"
    disable_apparmor = true
    enable_selinux = false
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
    stats_collect_period = 10
    systemd_cgroup = true
    enable_tls_streaming = false
    max_container_log_line_size = 16384
    [plugins.cri.containerd]
      snapshotter = "overlayfs"
      no_pivot = false
      [plugins.cri.containerd.default_runtime]
        runtime_type = "io.containerd.runtime.v1.linux"
        runtime_engine = ""
        runtime_root = ""
      [plugins.cri.containerd.untrusted_workload_runtime]
        runtime_type = ""
        runtime_engine = ""
        runtime_root = ""
    [plugins.cri.cni]
      bin_dir = "/opt/cni/bin"
      conf_dir = "/etc/cni/net.d"
      conf_template = "/etc/cni/net.d/10-default.conf"
    [plugins.cri.registry]
      [plugins.cri.registry.mirrors]
        [plugins.cri.registry.mirrors."docker.io"]
          endpoint = [
            "https://docker.mirrors.ustc.edu.cn",
            "http://hub-mirror.c.163.com"
          ]
        [plugins.cri.registry.mirrors."gcr.io"]
          endpoint = [
            "https://gcr.mirrors.ustc.edu.cn"
          ]
        [plugins.cri.registry.mirrors."k8s.gcr.io"]
          endpoint = [
            "https://gcr.mirrors.ustc.edu.cn/google-containers/"
          ]
        [plugins.cri.registry.mirrors."quay.io"]
          endpoint = [
            "https://quay.mirrors.ustc.edu.cn"
          ]
        [plugins.cri.registry.mirrors."10.46.137.152:1800"]
          endpoint = [
            "http://10.46.137.152:1800"
          ]
      [plugins.cri.registry.configs]
        [plugins.cri.registry.configs."10.46.137.152:1800".tls]
          insecure_skip_verify = true
        [plugins.cri.registry.configs."10.46.137.152:1800".auth]
          username = "admin"
          password = "123456@34344"
    [plugins.cri.x509_key_pair_streaming]
      tls_cert_file = ""
      tls_key_file = ""
  [plugins.diff-service]
    default = ["walking"]
  [plugins.linux]
    shim = "containerd-shim"
    runtime = "runc"
    runtime_root = ""
    no_shim = false
    shim_debug = false
  [plugins.opt]
    path = "/opt/containerd"
  [plugins.restart]
    interval = "10s"
  [plugins.scheduler]
    pause_threshold = 0.02
    deletion_threshold = 0
    mutation_threshold = 100
    schedule_delay = "0s"
    startup_delay = "100ms"
EOF

4.1.6启动并设置为开机启动

systemctl daemon-reload
systemctl enable --now containerd

可能需要用的重启containerd 命令

systemctl daemon-reload
systemctl restart containerd

验证是否可以下载镜像,10.46.137.152:1800/com/cni:v3.19.4镜像得自己提前推送到私有仓库

#containerd 拉取容器
 crictl pull 10.46.137.152:1800/com/cni:v3.19.4

4.2 所有k8s节点安装supervisor

参考链接:https://blog.csdn.net/m0_37322399/article/details/111144507
supervisor是一个用python语言编写的进程管理工具,它可以很方便的监听、启动、停止、重启一个或多个进程。当一个进程意外被杀死,supervisor监听到进程死后,可以很方便的让进程自动恢复,不再需要程序员或系统管理员自己编写代码来控制。
1、准备好安装包,如下:
meld3-1.0.2.tar.gz
setuptools-65.3.0.tar.gz
supervisor-4.2.4.tar.gz
2、解压

tar xzf meld3-1.0.2.tar.gz -C /opt
tar xzf setuptools-65.3.0.tar.gz -C /opt
tar xzf supervisor-4.2.4.tar.gz -C /opt

3、安装

[root@k8s-master01 ~]# cd /opt/
#安装meld3,如果有报错,可以用yum install python-setuptools检查是否已经安装,有的系统默认已经安装的

[root@k8s-master01 opt]# cd meld3-1.0.2/
[root@k8s-master01 meld3-1.0.2]#python3 setup.py install
#安装setuptools
[root@k8s-master01 meld3-1.0.2]# cd ..
[root@k8s-master01 opt]# cd setuptools-65.3.0/
[root@k8s-master01 setuptools-65.3.0]# python3 setup.py install
#安装   supervisor

[root@k8s-master01 setuptools-65.3.0]# cd ..
[root@k8s-master01 opt]# cd supervisor-4.2.4/
[root@k8s-master01 supervisor-4.2.4]# python3 setup.py install

4、检查是否生成以下文件

[root@k8s-master01 supervisor-4.2.4]# ll /usr/local/bin/ |grep super
-rwxr-xr-x 1 root root  419 Oct 19 17:57 echo_supervisord_conf
-rwxr-xr-x 1 root root  403 Oct 19 17:57 supervisorctl
-rwxr-xr-x 1 root root  399 Oct 19 17:57 supervisord

#验证是否安装成功
[root@k8s-master01 supervisor-4.2.4]# supervisorctl --help

5、supervisor配置

#创建 /etc/supervisor 目录
mkdir /etc/supervisor
mkdir /etc/supervisor/conf.d
#echo_supervisord_conf 生成supervisord.conf
echo_supervisord_conf > /etc/supervisor/supervisord.conf

修改/etc/supervisor/supervisord.conf文件内容

vim /etc/supervisor/supervisord.conf
[unix_http_server] 
file=/var/run/supervisor.sock ; (the path to the socket file)

[supervisord] 
logfile=/var/log/supervisord.log ; (main log file;default $CWD/supervisord.log) 
pidfile=/var/run//supervisord.pid ; (supervisord pidfile;default supervisord.pid)
[supervisorctl] 
serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket

[include]
files = conf.d/*.ini

根据上述修改的路径,创建相应的文件和添加权限

touch /var/run/supervisor.sock
touch /var/log/supervisord.log 
touch /var/run//supervisord.pid  
添加权限 
chmod 777 /var/run
chmod 777 /var/log

6、启动superviosr(启动应用,可以不执行)

#unlink一下
unlink /var/run/supervisor.sock
supervisord -c /etc/supervisor/supervisord.conf

7、将supervisor加入到开启启动服务中

cat > /usr/lib/systemd/system/supervisord.service << EOF
[Unit]
Description=Supervisor daemon
 
[Service]
Type=forking
ExecStart=/usr/local/bin/supervisord -c /etc/supervisor/supervisord.conf
ExecStop=/usr/local/bin/supervisorctl shutdown
ExecReload=/usr/local/bin/supervisorctl reload
KillMode=process
Restart=on-failure
RestartSec=42s
[Install]
WantedBy=multi-user.target
EOF

8、使能服务

systemctl enable supervisord

9、验证是否使能成功

systemctl is-enabled supervisord

出现enable说明成功
10、查看状态
如果出现 active (running) 表示启动成功

[root@k8s-node01 supervisor-4.2.4]# service supervisord status
Redirecting to /bin/systemctl status supervisord.service
● supervisord.service - Supervisor daemon
   Loaded: loaded (/usr/lib/systemd/system/supervisord.service; enabled; vendor preset: disabled)
   Active: active (running) since Thu 2023-02-23 10:34:26 CST; 11s ago
  Process: 173903 ExecStart=/usr/local/bin/supervisord -c /etc/supervisor/supervisord.conf (code=>
 Main PID: 173906 (supervisord)
    Tasks: 1
   Memory: 18.0M
   CGroup: /system.slice/supervisord.service
           └─173906 /usr/bin/python3 /usr/local/bin/supervisord -c /etc/supervisor/supervisord.co>

Feb 23 10:34:26 k8s-node01 systemd[1]: Starting Supervisor daemon...
Feb 23 10:34:26 k8s-node01 systemd[1]: Started Supervisor daemon.

11、常用的supervisor命令

#supervisor的服务器端部分启动命令:
sudo unlink /var/run/supervisor.sock
supervisord -c /etc/supervisor/supervisord.conf     #此时默认开启了所有服务
service supervisord start #启动程序
service supervisord stop #停止程序
service supervisord status #查看状态

#supervisor的客户端部分命令:
supervisorctl status 查看进程运行状态
supervisorctl start 进程名 启动进程
supervisorctl stop 进程名 关闭进程
supervisorctl restart 进程名 重启进程
supervisorctl update 重新载入配置文件
supervisorctl shutdown 关闭supervisord
supervisorctl clear 进程名 清空进程日志
supervisorctl 进入到交互模式下。使用help查看所有命令。
start stop restart + all 表示启动,关闭,重启所有进程。

4.3 etcd安装(安装奇数个)

4.3.1 解压etcd的安装文件(如果master节点是奇数个,所有master节点操作)

注意etcd 安装奇数个,不能安装偶数个,容易产生脑裂
https://github.com/etcd-io/etcd/
进入可以选择自己需要的版本,最新版本已经是3.5.7

tar xzf etcd-v3.5.0-linux-amd64.tar.gz -C /opt
cd /opt
mv etcd-v3.5.0-linux-amd64 etcd-v3.5.0
ln -s etcd-v3.5.0 etcd

4.3.2 创建用户(所有master节点操作)

[root@k8s-master01 etcd]#useradd -s /sbin/nologin -M etcd
[root@k8s-master01 etcd]# id etcd
uid=1000(etcd) gid=1000(etcd) groups=1000(etcd)

4.3.3 下载证书生成工具(只有master01操作)

master01节点下载证书生成工具
wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64" -O /usr/bin/cfssl
wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64" -O /usr/bin/cfssljson
chmod +x /usr/bin/cfssl /usr/bin/cfssljson
#如果cfssl相关文件已经下载下来的,也可以直接拷贝到/usr/bin/

4.3.4 生成etcd相关证书(只有master01操作)

mkdir /data/ssl
cd /data/ssl
cat > ca-csr.json  << EOF 
{
  "CN": "kubernetes",
  "key": {
      "algo": "rsa",
      "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ],
  "ca": {
          "expiry": "876000h"
  }
}

EOF
cat > ca-config.json << EOF
{
  "signing": {
      "default": {
          "expiry": "876000h"
        },
      "profiles": {
          "kubernetes": {
              "usages": [
                  "signing",
                  "key encipherment",
                  "server auth",
                  "client auth"
              ],
              "expiry": "876000h"
          }
      }
  }
}
EOF
#把所有可能用到的ip都添加进去,包括vip

cat > etcd-peer-csr.json << EOF
{
  "CN": "etcd",
  "hosts": [
   "127.0.0.1",
   "10.46.137.147",
   "10.46.137.148",
   "10.46.137.149",
   "10.46.137.150",
   "10.46.137.151",
   "10.46.137.152",
   "10.46.137.153",
   "10.46.137.154",
   "10.46.137.155",
   "10.46.137.156",
   "10.46.137.157",
   "10.46.137.158"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [{
    "C": "CN",
    "ST": "Beijing",
    "L": "Beijing",
    "O": "kubemsb",
    "OU": "CN"
  }]
}
EOF
[root@k8s-master01 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
2023/02/23 10:49:00 [INFO] generating a new CA key and certificate from CSR
2023/02/23 10:49:00 [INFO] generate received request
2023/02/23 10:49:00 [INFO] received CSR
2023/02/23 10:49:00 [INFO] generating key: rsa-2048
2023/02/23 10:49:00 [INFO] encoded CSR
2023/02/23 10:49:00 [INFO] signed certificate with serial number 317728313246672999037024809317163911810082026375

[root@k8s-master01 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-peer-csr.json | cfssljson  -bare etcd-peer
2023/02/23 10:49:33 [INFO] generate received request
2023/02/23 10:49:33 [INFO] received CSR
2023/02/23 10:49:33 [INFO] generating key: rsa-2048
2023/02/23 10:49:33 [INFO] encoded CSR
2023/02/23 10:49:33 [INFO] signed certificate with serial number 388585762486242927578134841818441011942224171842
2023/02/23 10:49:33 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

经过上述操作,生成的文件如下:
master01生成的证书直接拷贝到其他节点使用

[root@k8s-master01 ssl]# ll
-rw-r--r-- 1 root root  358 Oct 25 14:26 ca-config.json
-rw-r--r-- 1 root root 1001 Oct 25 14:49 ca.csr
-rw-r--r-- 1 root root  257 Oct 25 14:26 ca-csr.json
-rw-r--r-- 1 root root 1.7K Oct 25 14:49 ca-key.pem
-rw-r--r-- 1 root root 1.4K Oct 25 14:49 ca.pem
-rw-r--r-- 1 root root 1.2K Oct 26 14:40 etcd-peer.csr
-rw-r--r-- 1 root root  455 Oct 26 14:34 etcd-peer-csr.json
-rw------- 1 root root 1.7K Oct 26 14:40 etcd-peer-key.pem
-rw-r--r-- 1 root root 1.5K Oct 26 14:40 etcd-peer.pem
# master01生成的证书直接拷贝到其他节点使用
#现在master02、master03创建/data/ssl目录
[root@k8s-master01 ssl]# scp *.pem root@10.46.137.148:/data/ssl
[root@k8s-master01 ssl]# scp *.pem root@10.46.137.155:/data/ssl

4.3.5 创建etcd的启动文件(所有master节点都操作)

#1、创建目录存放证书
[root@k8s-master01 ssl]# cd /opt/etcd
[root@k8s-master01 etcd]# mkdir certs
[root@k8s-master01 etcd]# cp /data/ssl/{ca*.pem,etcd*.pem} certs/
[root@k8s-master01 etcd]# ll certs/
total 16K
-rw-r--r-- 1 etcd etcd 1.7K Oct 26 14:58 ca-key.pem
-rw-r--r-- 1 etcd etcd 1.4K Oct 26 14:58 ca.pem
-rw------- 1 etcd etcd 1.7K Oct 26 14:58 etcd-peer-key.pem
-rw-r--r-- 1 etcd etcd 1.5K Oct 26 14:58 etcd-peer.pem
#创建程序启动文件
#其他master节点只需要改listen-peer-urls、listen-client-urls、initial-advertise-peer-urls、advertise-client-urls此几处的ip地址 和name
[root@k8s-master01 etcd]#vim etcd-server-startup.sh
#!/bin/sh
./etcd --name etcd_server_147 \
       --data-dir /data/etcd/etcd-server \
       --listen-peer-urls https://10.46.137.147:2380 \
       --listen-client-urls https://10.46.137.147:2379,http://127.0.0.1:2379 \
       --quota-backend-bytes 8000000000 \
       --initial-advertise-peer-urls https://10.46.137.147:2380 \
       --advertise-client-urls https://10.46.137.147:2379,http://127.0.0.1:2379 \
       --initial-cluster  etcd_server_147=https://10.46.137.147:2380,etcd_server_148=https://10.46.137.148:2380,etcd_server_155=https://10.46.137.155:2380 \
        --initial-cluster-state=new \
        --cert-file ./certs/etcd-peer.pem \
        --key-file ./certs/etcd-peer-key.pem \
        --peer-cert-file ./certs/etcd-peer.pem \
        --peer-key-file ./certs/etcd-peer-key.pem \
        --trusted-ca-file ./certs/ca.pem \
        --peer-trusted-ca-file ./certs/ca.pem
[root@k8s-master01 etcd]# chmod +x etcd-server-startup.sh       
#创建supervisor的启动文件
#其他master节点只需要改program:etcd_server_147此处的名称
[root@k8s-master01 etcd]# vim /etc/supervisor/conf.d/etcd-server.ini
[program:etcd_server_147]
command=/opt/etcd/etcd-server-startup.sh                        ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/etcd                                            ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=etcd                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/etcd-server/etcd.stdout.log           ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false
#创建相关的目录
[root@k8s-master01 etcd]# mkdir -p /data/etcd/etcd-server
[root@k8s-master01 etcd]# mkdir -p /data/logs/etcd-server
#为目录授权
[root@k8s-master01 etcd]# chown -R etcd.etcd /data/etcd/etcd-server /data/logs/etcd-server
[root@k8s-master01 etcd]#chmod -R 777 /data/etcd
[root@k8s-master01 etcd]# chown -R etcd.etcd /opt/etcd-v3.5.0
#启动etcd服务并检查 
[root@k8s-master01 etcd]# supervisorctl update
etcd_server_147: added process group
[root@k8s-master01 etcd]# supervisorctl status
etcd_server_147                  RUNNING   pid 74425, uptime 0:00:23

4.3.6在其他master节点上进行安装

注意修改配置的ip地址

[root@k8s-master02 etcd]# supervisorctl status
etcd_server_148                  RUNNING   pid 74425, uptime 0:00:23
[root@k8s-master03 etcd]# supervisorctl status
etcd_server_155                  RUNNING   pid 74425, uptime 0:00:23

4.3.7 检查集群状态

[root@k8s-master01 etcd]# /opt/etcd/etcdctl endpoint health
127.0.0.1:2379 is healthy: successfully committed proposal: took = 1.629704ms
[root@k8s-master01 etcd]# /opt/etcd/etcdctl member list
7723f26ba611bb19, started, etcd_server_147, https://10.46.137.147:2380, http://127.0.0.1:2379,https://10.46.137.147:2379, false
a985aba4a7a84899, started, etcd_server_155, https://10.46.137.155:2380, http://127.0.0.1:2379,https://10.46.137.155:2379, false
ccccfebdaa208dbf, started, etcd_server_148, https://10.46.137.148:2380, http://127.0.0.1:2379,https://10.46.137.148:2379, false

4.4 高可用配置

4.4.1安装keepalived和haproxy服务(在所有master节点操作)

yum -y install keepalived 
yum -y install haproxy

4.4.2修改haproxy配置文件(配置文件一样)

cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
 maxconn 2000
 ulimit-n 16384
 log 127.0.0.1 local0 err
 stats timeout 30s

defaults
 log global
 mode http
 option httplog
 timeout connect 5000
 timeout client 50000
 timeout server 50000
 timeout http-request 15s
 timeout http-keep-alive 15s


frontend monitor-in
 bind *:33305
 mode http
 option httplog
 monitor-uri /monitor

frontend k8s-master
 bind 0.0.0.0:8443
 bind 127.0.0.1:8443
 mode tcp
 option tcplog
 tcp-request inspect-delay 5s
 default_backend k8s-master


backend k8s-master
 mode tcp
 option tcplog
 option tcp-check
 balance roundrobin
 default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
 server  k8s-master01  10.46.137.147:6443 check
 server  k8s-master02  10.46.137.148:6443 check
 server  k8s-master03  10.46.137.155:6443 check

EOF

4.4.3配置keepalived

每个master节点都操作,注意修改ip地址,mcast_src_ip 为本机ip,virtual_ipaddress为vip ,interface 为自己的网关名称

k8s-master01的配置
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL       #同一个小组要不一样
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2
    rise 1
}
vrrp_instance VI_1 {
    state MASTER
    interface ens192
    unicast_src_ip 10.46.137.147
    unicast_peer {
    10.46.137.148
    10.46.137.155
    }
    virtual_router_id 51       #这个同一个分组要一样
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        10.46.137.156
    }
    track_script {
      chk_apiserver
} }


EOF
k8s-master02的配置
#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL2
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2
    rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens192
    unicast_src_ip 10.46.137.148
    unicast_peer {
    10.46.137.147
    10.46.137.155
    }
    virtual_router_id 51
    priority 80
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        10.46.137.156
    }
    track_script {
      chk_apiserver
} }

EOF
k8s-master03的配置
#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived

global_defs {
    router_id LVS_DEVEL3
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2
    rise 1
}
vrrp_instance VI_1 {
    state BACKUP
    interface ens192
    unicast_src_ip 10.46.137.155
    unicast_peer {
    10.46.137.147
    10.46.137.148
   }
    virtual_router_id 51
    priority 50
    nopreempt
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        10.46.137.156
    }
    track_script {
      chk_apiserver
} }

EOF

4.4.4健康检查脚本配置(master节点都要)

cat >  /etc/keepalived/check_apiserver.sh << EOF
#!/bin/bash

err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
EOF
# 给脚本授权

chmod +x /etc/keepalived/check_apiserver.sh

4.4.5启动服务

systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived

注意: 如果后期有要添加端口,添加后重启haproxy,还要重启keepalived
常用命令

systemctl start haproxy              //启动haproxy
systemctl stop haproxy              //停止haproxy
systemctl restart haproxy            //重启haproxy

systemctl start keepalived         //启动keepalived
systemctl stop keepalived          //停止keepalived
systemctl restart keepalived        //重启keepalived

4.5 apiserver安装(所有master节点都安装)

4.5.1k8s 软件包解压(所有master节点操作)

tar xzf kubernetes-server-linux-amd64.tar.gz -C /opt
cd /opt
mv kubernetes kubernetes-v1.22
ln -s kubernetes-v1.22 kubernetes
cd kubernetes/
rm -f kubernetes-src.tar.gz    #这是源码包,删掉
cd server/bin/ 
rm -f *.tar      #这些是kubeadm安装所需的镜像包,删掉
rm -rf *_tag
ln -s /opt/kubernetes/server/bin/kubectl /usr/bin/kubectl

4.5.2 apiserver相关证书生成(master01操作)

cd /data/ssl/
# 所有相关的ip地址都写上,包括vip
cat > kube-apiserver-csr.json << EOF
{
"CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.0.1",
     "10.96.0.1",
    "10.46.137.147",
    "10.46.137.148",
    "10.46.137.149",
    "10.46.137.150",
    "10.46.137.151",
    "10.46.137.152",
    "10.46.137.153",
    "10.46.137.154",
    "10.46.137.155",
    "10.46.137.156",
    "10.46.137.157",
    "10.46.137.158",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ]
}
EOF
#生成apiserver的证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
#创建token.csv文件
cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
#将证书拷贝到/opt/kubernetes/server/bin/certs/目录
mkdir /opt/kubernetes/server/bin/certs/
cp /data/ssl/{kube-apiserver*.pem,token.csv} /opt/kubernetes/server/bin/certs/
cp /opt/etcd/certs/* /opt/kubernetes/server/bin/certs/

4.5.3 apiserver启动脚本配置

其他master节点注意修改bind-address、advertise-address 的ip,ip为本机ip
[root@k8s-master01 ssl]# cd /opt/kubernetes/server/bin
[root@k8s-master01 bin]# 
cat > kube-apiserver.sh << EOF
#!/bin/bash
./kube-apiserver \
  --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --anonymous-auth=false \
  --bind-address=10.46.137.147 \
  --secure-port=6443 \
  --advertise-address=10.46.137.147 \
  --insecure-port=0 \
  --authorization-mode=Node,RBAC \
  --runtime-config=api/all=true \
  --enable-bootstrap-token-auth \
  --service-cluster-ip-range=10.96.0.0/16 \
  --token-auth-file=./certs/token.csv \
  --service-node-port-range=30-32767 \
  --tls-cert-file=./certs/kube-apiserver.pem  \
  --tls-private-key-file=./certs/kube-apiserver-key.pem \
  --client-ca-file=./certs/ca.pem \
  --kubelet-client-certificate=./certs/kube-apiserver.pem \
  --kubelet-client-key=./certs/kube-apiserver-key.pem \
  --service-account-key-file=./certs/ca-key.pem \
  --service-account-signing-key-file=./certs/ca-key.pem  \
  --service-account-issuer=api \
  --etcd-cafile=./certs/ca.pem \
  --etcd-certfile=./certs/etcd-peer.pem \
  --etcd-keyfile=./certs/etcd-peer-key.pem \
  --etcd-servers=https://10.46.137.147:2379,https://10.46.137.148:2379,https://10.46.137.155:2379 \
  --enable-swagger-ui=true \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/data/logs/kubernetes/kube-apiserver/kube-apiserver-audit.log \
  --event-ttl=1h \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/data/logs/kubernetes/kube-apiserver \
  --v=4
EOF

[root@k8s-master01 bin]# chmod +x kube-apiserver.sh
#创建目录
[root@k8s-master01 bin]# mkdir -p /data/logs/kubernetes/kube-apiserver

4.5.4 创建supervisor 配置

[root@k8s-master01 bin]# cat > /etc/supervisor/conf.d/kube-apiserver.ini << EOF
[program:kube-apiserver-147]
command=/opt/kubernetes/server/bin/kube-apiserver.sh            ; the program (relative uses PATH, can take args)
numprocs=1                                                      ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                            ; directory to cwd to before exec (def no cwd)
autostart=true                                                  ; start at supervisord start (default: true)
autorestart=true                                                ; retstart at unexpected quit (default: true)
startsecs=30                                                    ; number of secs prog must stay running (def. 1)
startretries=3                                                  ; max # of serial start failures (default 3)
exitcodes=0,2                                                   ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                 ; signal used to kill process (default TERM)
stopwaitsecs=10                                                 ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                       ; setuid to this UNIX account to run the program
redirect_stderr=true                                            ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-apiserver/apiserver.stdout.log        ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                    ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                        ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                     ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false
EOF

4.5.5 supervisor 启动apiserver

[root@k8s-master01 bin]# supervisorctl update
kube-apiserver-147: added process group
[root@k8s-master01 bin]# supervisorctl status
etcd_server_147                 RUNNING   pid 9232, uptime 1:36:06 
kube-apiserver-147              RUNNING   pid 10804, uptime 0:00:35 

重复上述步骤,证书拷贝到其他节点直接使用,注意修改ip地址和名称

4.5.6 检查是否正常

[root@k8s-master01 bin]# netstat -lntup | grep kube-api
tcp        0      0 10.46.137.147:6443      0.0.0.0:*               LISTEN      74172/./kube-apiser

4.6 kubectl 安装(所有master节点操作)

kubectl 是一个命令工具,可以管理节点

4.6.1 创建kubectl证书请求文件

[root@k8s-master01 bin]# cd /data/ssl
[root@k8s-master01 ssl]# cat > admin-csr.json << EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "system"
    }
  ]
}
EOF
[root@k8s-master01 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
[root@k8s-master01 ssl]# cp admin*.pem /opt/kubernetes/server/bin/certs
#以上生成证书的操作在master01操作,证书直接拷贝到其他节点使用
[root@k8s-master01 ssl]# cd /opt/kubernetes/server/bin
#kube.config 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书,10.46.137.156为vip地址
[root@k8s-master01 bin]# kubectl config set-cluster kubernetes --certificate-authority=./certs/ca.pem --embed-certs=true --server=https://10.46.137.156:8443 --kubeconfig=kube.config
[root@k8s-master01 bin]# kubectl config set-credentials admin --client-certificate=./certs/admin.pem --client-key=./certs/admin-key.pem --embed-certs=true --kubeconfig=kube.config
[root@k8s-master01 bin]# kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
[root@k8s-master01 bin]# kubectl config use-context kubernetes --kubeconfig=kube.config

4.6.2 角色绑定

[root@k8s-master01 bin]# export KUBECONFIG=$HOME/.kube/config
[root@k8s-master01 bin]# mkdir ~/.kube
[root@k8s-master01 bin]# cp kube.config ~/.kube/config
#下面这句在一个节点执行成功后,在其他节点执行就会提示已经添加
[root@k8s-master01 bin]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config
clusterrolebinding.rbac.authorization.k8s.io/kube-apiserver:kubelet-apis created
#查看集群组件状态
[root@k8s-master01 bin]# kubectl get componentstatuses
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                        ERROR
controller-manager   Unhealthy   Get "https://127.0.0.1:10257/healthz": dial tcp 127.0.0.1:10257: connect: connection refused
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
etcd-0               Healthy     {"health":"true","reason":""}
etcd-1               Healthy     {"health":"true","reason":""}
etcd-2               Healthy     {"health":"true","reason":""}

4.7 controller-manager安装(所有master节点操作)

4.7.1 controller-manager证书生成

[root@k8s-master01 bin]# cd /data/ssl
#所有ip都写上,包括vip
[root@k8s-master01 ssl]# cat > kube-controller-manager-csr.json << EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
        "127.0.0.1",
        "192.168.0.1",
         "10.96.0.1",
        "10.46.137.147",
        "10.46.137.148",
        "10.46.137.149",
        "10.46.137.150",
        "10.46.137.151",
        "10.46.137.152",
        "10.46.137.153",
        "10.46.137.154",
        "10.46.137.155",
        "10.46.137.156",
        "10.46.137.157",
        "10.46.137.158"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "Beijing",
        "L": "Beijing",
        "O": "system:kube-controller-manager",
        "OU": "system"
      }
    ]
}
EOF
[root@k8s-master01 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
[root@k8s-master01 ssl]# cp kube-controller-manager*.pem /opt/kubernetes/server/bin/certs
#以上生成证书的操作在master01操作,证书直接拷贝到其他节点使用

4.7.2 controller-manager启动脚本配置

[root@k8s-master01 ssl]# cd /opt/kubernetes/server/bin
[root@k8s-master01 bin]# cat > kube-controller-manager.sh << EOF
#!/bin/sh
./kube-controller-manager \
  --port=0 \
  --secure-port=10257 \
  --bind-address=127.0.0.1 \
  --kubeconfig=./conf/kube-controller-manager.kubeconfig \
  --service-cluster-ip-range=10.96.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=./certs/ca.pem \
  --cluster-signing-key-file=./certs/ca-key.pem \
  --allocate-node-cidrs=true \
  --cluster-cidr=172.7.0.0/16 \
  --experimental-cluster-signing-duration=876000h \
  --root-ca-file=./certs/ca.pem \
  --service-account-private-key-file=./certs/ca-key.pem \
  --leader-elect=true \
  --feature-gates=RotateKubeletServerCertificate=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --horizontal-pod-autoscaler-sync-period=10s \
  --tls-cert-file=./certs/kube-controller-manager.pem \
  --tls-private-key-file=./certs/kube-controller-manager-key.pem \
  --use-service-account-credentials=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/data/logs/kubernetes/kube-controller-manager \
  --v=2
EOF
[root@k8s-master01 bin]# chmod +x kube-controller-manager.sh
[root@k8s-master01 bin]# mkdir -p /data/logs/kubernetes/kube-controller-manager
[root@k8s-master01 bin]# mkdir conf
#创建kube-controller-manager.kubeconfig文件
[root@k8s-master01 bin]# kubectl config set-cluster kubernetes --certificate-authority=./certs/ca.pem --embed-certs=true --server=https://10.46.137.156:8443 --kubeconfig=kube-controller-manager.kubeconfig
[root@k8s-master01 bin]# kubectl config set-context system:kube-controller-manager@kubernetes --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
[root@k8s-master01 bin]# kubectl config set-credentials system:kube-controller-manager --client-certificate=./certs/kube-controller-manager.pem --client-key=./certs/kube-controller-manager-key.pem --embed-certs=true  \
--kubeconfig=kube-controller-manager.kubeconfig
[root@k8s-master01 bin]# kubectl config use-context system:kube-controller-manager@kubernetes --kubeconfig=kube-controller-manager.kubeconfig
[root@k8s-master01 bin]# mv kube-controller-manager.kubeconfig conf

4.7.3 supervisor 的controller-manager配置

[root@k8s-master01 bin]# cat > /etc/supervisor/conf.d/kube-conntroller-manager.ini << EOF
[program:kube-controller-manager-147]
command=/opt/kubernetes/server/bin/kube-controller-manager.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                              ; directory to cwd to before exec (def no cwd)
autostart=true                                                                    ; start at supervisord start (default: true)
autorestart=true                                                                  ; retstart at unexpected quit (default: true)
startsecs=30                                                                      ; number of secs prog must stay running (def. 1)
startretries=3                                                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                         ; setuid to this UNIX account to run the program
redirect_stderr=false                                                             ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stdout.log  ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                                       ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-controller-manager/controll.stderr.log  ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                                      ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                                          ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                                       ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false
stopasgroup=true
killasgroup=true
EOF

4.7.4 supervisor 启动controller-manager

[root@k8s-master01 bin]# supervisorctl update
kube-apiserver-147: added process group
[root@k8s-master01 bin]# supervisorctl status
etcd_server_147                 RUNNING   pid 9232, uptime 1:36:06 
kube-apiserver-147              RUNNING   pid 10804, uptime 2:00:35 
kube-controller-manager-147     RUNNING   pid 10923, uptime 00:00:35

4.8 kube-scheduler安装(所有master节点操作)

4.8.1 kube-scheduler证书生成

[root@k8s-master01 bin]# cd /data/ssl
#所有ip都加上,包括vip
[root@k8s-master01 ssl]# cat > kube-scheduler-csr.json << EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
       "127.0.0.1",
       "192.168.0.1",
        "10.96.0.1",
        "10.46.137.147",
        "10.46.137.148",
        "10.46.137.149",
        "10.46.137.150",
        "10.46.137.151",
        "10.46.137.152",
        "10.46.137.153",
        "10.46.137.154",
        "10.46.137.155",
        "10.46.137.156",
        "10.46.137.157",
        "10.46.137.158"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "Beijing",
        "L": "Beijing",
        "O": "system:kube-scheduler",
        "OU": "system"
      }
    ]
}
EOF
[root@k8s-master01 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
[root@k8s-master01 ssl]# cp kube-scheduler*.pem /opt/kubernetes/server/bin/certs
#以上生成证书的操作在master01操作,证书直接拷贝到其他节点使用

4.8.2kube-scheduler启动脚本配置

[root@k8s-master01 ssl]# cd /opt/kubernetes/server/bin
[root@k8s-master01 bin]# cat > kube-scheduler.sh << EOF
#!/bin/sh
./kube-scheduler \
  --address=127.0.0.1 \
  --kubeconfig=./conf/kube-scheduler.kubeconfig \
  --leader-elect=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/data/logs/kubernetes/kube-scheduler \
  --v=2
EOF
[root@k8s-master01 bin]# chmod +x kube-scheduler.sh
[root@k8s-master01 bin]# mkdir -p /data/logs/kubernetes/kube-scheduler
#创建kube-scheduler.kubeconfig文件,10.46.137.156是vip地址
[root@k8s-master01 bin]# kubectl config set-cluster kubernetes --certificate-authority=./certs/ca.pem --embed-certs=true --server=https://10.46.137.156:8443 --kubeconfig=kube-scheduler.kubeconfig
[root@k8s-master01 bin]# kubectl config set-credentials system:kube-scheduler --client-certificate=./certs/kube-scheduler.pem --client-key=./certs/kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
[root@k8s-master01 bin]# kubectl config set-context system:kube-scheduler@kubernetes --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
[root@k8s-master01 bin]# kubectl config use-context system:kube-scheduler@kubernetes --kubeconfig=kube-scheduler.kubeconfig
[root@k8s-master01 bin]# mv kube-scheduler.kubeconfig conf

4.8.3 supervisor启动文件配置

[root@k8s-master01 bin]# cat > /etc/supervisor/conf.d/kube-scheduler.ini << EOF
[program:kube-scheduler-147]
command=/opt/kubernetes/server/bin/kube-scheduler.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                               ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                     ; directory to cwd to before exec (def no cwd)
autostart=true                                                           ; start at supervisord start (default: true)
autorestart=true                                                         ; retstart at unexpected quit (default: true)
startsecs=30                                                             ; number of secs prog must stay running (def. 1)
startretries=3                                                           ; max # of serial start failures (default 3)
exitcodes=0,2                                                            ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                          ; signal used to kill process (default TERM)
stopwaitsecs=10                                                          ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                                ; setuid to this UNIX account to run the program
redirect_stderr=false                                                    ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                                 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                              ; emit events on stdout writes (default false)
stderr_logfile=/data/logs/kubernetes/kube-scheduler/scheduler.stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=64MB                                             ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=4                                                 ; # of stderr logfile backups (default 10)
stderr_capture_maxbytes=1MB                                              ; number of bytes in 'capturemode' (default 0)
stderr_events_enabled=false
EOF

4.8.4 supervisor 启动kube-scheduler

[root@k8s-master01 bin]# supervisorctl update
kube-scheduler-147: added process group
[root@k8s-master01 bin]# supervisorctl status

etcd_server_147                  RUNNING   pid 74425, uptime 5 days, 18:20:33
kube-apiserver-147               RUNNING   pid 74169, uptime 5 days, 18:24:35
kube-controller-manager-147      RUNNING   pid 129478, uptime 1 days, 19:27:41
kube-scheduler-147               RUNNING   pid 129669,  00:00:48

4.9 kubelet 安装(所有k8s节点安装)

4.9.1kubelet-bootstrap文件创建(在master01节点操作)

[root@k8s-master01 bin]# BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /opt/kubernetes/server/bin/certs/token.csv)
[root@k8s-master01 bin]# kubectl config set-cluster kubernetes --certificate-authority=./certs/ca.pem --embed-certs=true --server=https://10.46.137.156:8443 --kubeconfig=kubelet-bootstrap.kubeconfig
[root@k8s-master01 bin]# kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig
[root@k8s-master01 bin]# kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
[root@k8s-master01 bin]# kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
#绑定用户IP
[root@k8s-master01 bin]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap
[root@k8s-master01 bin]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
[root@k8s-master01 bin]# kubectl describe clusterrolebinding cluster-system-anonymous
[root@k8s-master01 bin]# kubectl describe clusterrolebinding kubelet-bootstrap
#以上操作成功后
[root@k8s-master01 bin]# mv kubelet-bootstrap.kubeconfig conf

4.9.2 kubelet.json文件配置

#其他master和node节点注意修改ip
[root@k8s-master01 bin]# cat > kubelet.json << "EOF"
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/opt/kubernetes/server/bin/certs/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"address": "10.46.137.147",
"port": 10250,
"readOnlyPort": 10255,
"cgroupDriver": "systemd",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"clusterDomain": "cluster.local.",
"clusterDNS": ["10.96.0.2"]
}
EOF
[root@k8s-master01 bin]# mv kubelet.json conf

4.9.3 创建kubelet启动文件

#kubelet.kubeconfig该文件会自动生成的
[root@k8s-master01 bin]# cat > kubelet.sh << EOF
#!/bin/sh
./kubelet \
  --bootstrap-kubeconfig=./conf/kubelet-bootstrap.kubeconfig \
  --cert-dir=./certs \
  --kubeconfig=./conf/kubelet.kubeconfig \
  --config=./conf/kubelet.json \
  --cni-bin-dir=/opt/cni/bin \
  --cni-conf-dir=/etc/cni/net.d \
  --container-runtime=remote \
  --container-runtime-endpoint=unix:///run/containerd/containerd.sock \
  --network-plugin=cni \
  --rotate-certificates \
  --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \
  --root-dir=/etc/cni/net.d \
  --alsologtostderr=true \
  --max-pods=1000 \
  --logtostderr=false \
  --log-dir=/data/logs/kubernetes/kube-kubelet \
  --v=2
EOF
[root@k8s-master01 bin]# chmod +x kubelet.sh
[root@k8s-master01 bin]# mkdir -p /data/logs/kubernetes/kube-kubelet

4.9.5 supervisor 启动文件配置

[root@k8s-master01 bin]# cat > /etc/supervisor/conf.d/kube-kubelet.ini << EOF
[program:kube-kubelet-147]
command=/opt/kubernetes/server/bin/kubelet.sh     ; the program (relative uses PATH, can take args)
numprocs=1                                        ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin              ; directory to cwd to before exec (def no cwd)
autostart=true                                    ; start at supervisord start (default: true)
autorestart=true                                  ; retstart at unexpected quit (default: true)
startsecs=30                                      ; number of secs prog must stay running (def. 1)
startretries=3                                    ; max # of serial start failures (default 3)
exitcodes=0,2                                     ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                   ; signal used to kill process (default TERM)
stopwaitsecs=10                                   ; max num secs to wait b4 SIGKILL (default 10)
user=root                                         ; setuid to this UNIX account to run the program
redirect_stderr=true                              ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log   ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                      ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                          ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                       ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false
EOF

4.9.6 supervisor 启动kubelet

[root@k8s-master01 bin]# supervisorctl update
kube-kubelet-147: added process group
[root@k8s-master01 bin]# supervisorctl status

etcd_server_147                  RUNNING   pid 74425, uptime 5 days, 18:20:33
kube-apiserver-147               RUNNING   pid 74169, uptime 5 days, 18:24:35
kube-controller-manager-147      RUNNING   pid 129478, uptime 1 days, 19:27:41
kube-scheduler-147               RUNNING   pid 129478, uptime 1 days, 19:27:41
kube-kubelet-147                 RUNNING   pid 129669,  00:00:48

注意:node节点安装的时候要注意ip的修改,和创建相应的文件夹,/opt/kubernetes/server/bin、/opt/kubernetes/server/bin/conf、/opt/kubernetes/server/bin/certs
把bin目录下的kubectl、kubelet、kube-proxy、conf/ 、 certs/ 拷贝过去,并给755权限

4.10 kube-proxy安装(所有k8s节点都安装)

4.10.1kube-proxy证书生成

[root@k8s-master01 ~]# cd /data/ssl/
[root@k8s-master01 ssl]# cat > kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ]
}
EOF
[root@k8s-master01 ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
[root@k8s-master01 ssl]# cp kube-proxy*.pem /opt/kubernetes/server/bin/certs/

4.10.2 kube-proxy相关配置文件创建

#其他节点注意修改ip地址,yaml文件注意格式
[root@k8s-master01 bin]# cd /opt/kubernetes/server/bin
[root@k8s-master01 bin]# cat > kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 10.46.137.147
clientConnection:
   kubeconfig: ./conf/kube-proxy.kubeconfig
clusterCIDR: 172.7.0.0/16
healthzBindAddress: 10.46.137.147:10256
kind: KubeProxyConfiguration
metricsBindAddress: 10.46.137.147:10249
mode: "ipvs"
EOF
[root@k8s-master01 bin]# mv kube-proxy.yaml conf
[root@k8s-master01 bin]# kubectl config set-cluster kubernetes --certificate-authority=./certs/ca.pem --embed-certs=true --server=https://10.46.137.156:8443 --kubeconfig=kube-proxy.kubeconfig
[root@k8s-master01 bin]# kubectl config set-credentials kube-proxy --client-certificate=./certs/kube-proxy.pem --client-key=./certs/kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
[root@k8s-master01 bin]# kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
[root@k8s-master01 bin]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
[root@k8s-master01 bin]# mv kube-proxy.kubeconfig conf
#kube-proxy.kubeconfig conf 文件可以直接拷贝到其他节点使用

4.10.3 kube-proxy启动脚本

[root@k8s-master01 bin]# cat > kube-proxy.sh << EOF
#!/bin/sh
./kube-proxy \
  --config=./conf/kube-proxy.yaml \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/data/logs/kubernetes/kube-proxy \
  --v=2
EOF
[root@k8s-master01 bin]# chmod +x kube-proxy.sh
[root@k8s-master01 bin]# mkdir -p /data/logs/kubernetes/kube-proxy

4.10.4 supervisor 启动文件配置

[root@k8s-master01 bin]# cat > /etc/supervisor/conf.d/kube-proxy.ini << EOF
[program:kube-proxy-147]
command=/opt/kubernetes/server/bin/kube-proxy.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                           ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                 ; directory to cwd to before exec (def no cwd)
autostart=true                                                       ; start at supervisord start (default: true)
autorestart=true                                                     ; retstart at unexpected quit (default: true)
startsecs=30                                                         ; number of secs prog must stay running (def. 1)
startretries=3                                                       ; max # of serial start failures (default 3)
exitcodes=0,2                                                        ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                      ; signal used to kill process (default TERM)
stopwaitsecs=10                                                      ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                            ; setuid to this UNIX account to run the program
redirect_stderr=true                                                 ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log     ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                         ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                             ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                          ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false
EOF

4.10.5 supervisor 启动程序

[root@k8s-master01 bin]# supervisorctl update
kube-proxy-147: added process group
[root@k8s-master01 bin]# supervisorctl status

etcd_server_147                  RUNNING   pid 74425, uptime 5 days, 18:20:33
kube-apiserver-147               RUNNING   pid 74169, uptime 5 days, 18:24:35
kube-controller-manager-147      RUNNING   pid 129478, uptime 1 days, 19:27:41
kube-scheduler-147               RUNNING   pid 129478, uptime 1 days, 19:27:41

kube-kubelet-147                 RUNNING   pid 129669,  01:00:48
kube-proxy-147                   RUNNING   pid 129669,  00:00:48

4.10.6查看集群

[root@k8s-master01 bin]# kubectl  get node
NAME           STATUS   ROLES    AGE     VERSION
k8s-master01   Ready    <none>   5d16h   v1.22.1
k8s-master02   Ready    <none>   5d16h   v1.22.1
k8s-master03   Ready    <none>   5d16h   v1.22.1
k8s-node01     Ready    <none>   4d17h   v1.22.1
k8s-node02     Ready    <none>   4d16h   v1.22.1
k8s-node03     Ready    <none>   4d16h   v1.22.1
k8s-node04     Ready    <none>   4d16h   v1.22.1
kubectl get svc -o wide
#看是否能curl,如果能出来东西,那就没问题
curl https://10.96.0.1:443

到此集群已搭建好。

4.11runc.amd64安装

runc主要是负责容器生命周期的管理,以及对容器状态的描述

wget https://github.com/opencontainers/runc/releases/download/v1.1.0/runc.amd64
mv runc.amd64 /usr/local/sbin/runc
chmod +x /usr/local/sbin/runc
runc -v

4.12 calico 安装(在master01上操作)

Calico 是一种容器之间互通的网络方案

4.12.1 下载calio文件

wget https://docs.projectcalico.org/v3.19/manifests/calico.yaml

4.12.2 将镜像上传到私有仓库

#在calico.yaml中搜索image关键字,把相应的镜像下载到本地让后上传到私有仓库
docker pull docker.io/calico/cni:v3.19.4
docker pull docker.io/calico/pod2daemon-flexvol:v3.19.4
docker pull docker.io/calico/node:v3.19.4
docker pull docker.io/calico/kube-controllers:v3.19.4
docker tag docker.io/calico/cni:v3.19.4 10.46.137.152:1800/com/cni:v3.19.4
docker tag docker.io/calico/pod2daemon-flexvol:v3.19.4 10.46.137.152:1800/com/pod2daemon-flexvol:v3.19.4
docker tag docker.io/calico/node:v3.19.4 10.46.137.152:1800/com/node:v3.19.4
docker tag docker.io/calico/kube-controllers:v3.19.4 10.46.137.152:1800/com/kube-controllers:v3.19.4
docker push 10.46.137.152:1800/com/cni:v3.19.4
docker push 10.46.137.152:1800/com/pod2daemon-flexvol:v3.19.4
docker push 10.46.137.152:1800/com/node:v3.19.4
docker push 10.46.137.152:1800/com/kube-controllers:v3.19.4
#一般还有下面两个地方需要修改,eth0为自己的网卡名
- name: IP_AUTODETECTION_METHOD
    value: "interface=eth0"
上面这个可以先不加,如果找不到网卡的时候在加
- name: CALICO_IPV4POOL_CIDR
    value: "172.7.0.0/16"

将calico.yaml文件中的镜像地址修改为自己私有仓库的地址

4.12.3 执行安装命令

kubectl apply -f calico.yaml
#检查节点,如果都是Running 状态就是正常的。
[root@k8s-master01 setup]# kubectl get pods -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE   IP             NODE           NOMINATED NODE   READINESS GATES
calico-kube-controllers-7cc8dd57d9-wrcgh   1/1     Running   0          81s   10.88.0.2      k8s-master01   <none>           <none>
calico-node-9qpvq                          1/1     Running   0          81s   10.46.137.149    k8s-node01     <none>           <none>
calico-node-f8ww5                          1/1     Running   0          81s   10.46.137.147   k8s-master01   <none>           <none>
calico-node-gfrkb                          1/1     Running   0          81s   10.46.137.150   k8s-node02     <none>           <none>

4.13CoreDNS安装

该安装yaml 自行网上下载

docker pull coredns/coredns:1.9.3
docker tag coredns/coredns:1.9.3 10.46.137.152:1800/com/coredns:1.9.3
docker push 10.46.137.152:1800/com/coredns:1.9.3
#将coredns.yaml中的image地址修改为自己的私有仓库地址
#执行安装命令
kubectl apply -f coredns.yaml

4.14kuboard 运维平台安装

4.14.1 kuboard yaml下载

上官网:https://kuboard.cn/install/v3/install-in-k8s.html#%E5%AE%89%E8%A3%85

官网提供了在线安装和离线安装,选择一种方式安装即可。
在这里插入图片描述
安装之后界面如上图,在该系统可以发布服务,对服务进行管理。

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

javascript_good

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值