本次测试使用一个节点情况如下:
bastion centos7 api.master.example.com 172.20.42.55
bootstrap Fedora CoreOS 32(RHEL) bootstrap.ocp4.example.com 172.20.42.90
master1 Fedora CoreOS 32(RHEL) master1.ocp4.example.com 172.20.42.91
master1 Fedora CoreOS 32(RHEL) master2.ocp4.example.com 172.20.42.92
master1 Fedora CoreOS 32(RHEL) master3.ocp4.example.com 172.20.42.93
worker1 Fedora CoreOS 32(RHEL) worker1.ocp4.example.com 172.20.42.94
master复用为计算节点
在bastion节点安装
DNS配置(很重要)
安装dns
yum install bind bind-utils -y
创建文件
需要创建两个文件,修改一个文件;ocp4.example.com为正向解析的zone,42.20.172.in-addr.arpa为逆向解析的zone。
以下地址除了明确显示的为bootstrap,master,worker开头的域名,其他的都填bastion的域名,本集群ocp4为集群名,example.com为baseName
cat /etc/named/named.conf.local
zone "ocp4.example.com" {
type master;
file "/etc/named/zones/db.example.com"; # zone file path
};
zone "42.20.172.in-addr.arpa" {
type master;
file "/etc/named/zones/db.172.20.42"; # 172.20.42.0/22 subnet
};
cat /etc/named/zones/db.example.com
$TTL 1W
@ IN SOA ns1.example.com. root (
2019070700 ; serial
3H ; refresh (3 hours)
30M ; retry (30 minutes)
2W ; expiry (2 weeks)
1W ) ; minimum (1 week)
IN NS ns1.example.com.
IN MX 10 smtp.example.com.
;
;
ns1.example.com. IN A 172.20.42.55
smtp.example.com. IN A 172.20.42.55
;
helper.example.com. IN A 172.20.42.55
helper.ocp4.example.com. IN A 172.20.42.55
;
api.ocp4.example.com. IN A 172.20.42.55 //haproxy所在主机
api-int.ocp4.example.com. IN A 172.20.42.55 //haproxy所在主机
;
*.apps.ocp4.example.com. IN A 172.20.42.55 //haproxy所在主机
;
bootstrap.ocp4.example.com. IN A 172.20.42.90
;
master1.ocp4.example.com. IN A 172.20.42.91
master2.ocp4.example.com. IN A 172.20.42.92
master3.ocp4.example.com. IN A 172.20.42.93
worker1.ocp4.example.com. IN A 172.20.42.94
;
oauth-openshift.apps.master.example.com. IN A 172.20.42.55 //认证地址,必须加
console-openshift-console.apps.master.example.com. IN A 172.20.42.55 //console地址,不加console起不来
;
;EOF
cat /etc/named.conf
//
// named.conf
//
// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS
// server as a caching only nameserver (as a localhost DNS resolver only).
//
// See /usr/share/doc/bind*/sample/ for example named configuration files.
//
// See the BIND Administrator's Reference Manual (ARM) for details about the
// configuration located in /usr/share/doc/bind-{version}/Bv9ARM.html
options {
listen-on port 53 { 127.0.0.1; 172.20.42.55; }; //需要修改
# listen-on-v6 port 53 { ::1; };
directory "/var/named";
dump-file "/var/named/data/cache_dump.db";
statistics-file "/var/named/data/named_stats.txt";
memstatistics-file "/var/named/data/named_mem_stats.txt";
recursing-file "/var/named/data/named.recursing";
secroots-file "/var/named/data/named.secroots";
allow-query { localhost; 172.20.42.0/22; }; //需要修改
/*
- If you are building an AUTHORITATIVE DNS server, do NOT enable recursion.
- If you are building a RECURSIVE (caching) DNS server, you need to enable
recursion.
- If your recursive DNS server has a public IP address, you MUST enable access
control to limit queries to your legitimate users. Failing to do so will
cause your server to become part of large scale DNS amplification
attacks. Implementing BCP38 within your network would greatly
reduce such attack surface
*/
recursion yes;
forwarders {
8.8.8.8;
8.8.4.4;
};
dnssec-enable yes;
dnssec-validation yes;
/* Path to ISC DLV key */
bindkeys-file "/etc/named.root.key";
managed-keys-directory "/var/named/dynamic";
pid-file "/run/named/named.pid";
session-keyfile "/run/named/session.key";
};
logging {
channel default_debug {
file "data/named.run";
severity dynamic;
};
};
zone "." IN {
type hint;
file "named.ca";
};
include "/etc/named.rfc1912.zones";
include "/etc/named.root.key";
include "/etc/named/named.conf.local"; //需要添加
将DNS更改为127.0.0.1
nmcli connection modify eth0 ipv4.dns "127.0.0.1"
然后重启NetworkManager
systemctl restart NetworkManager
尝试使用ping或者dig 配置文件中的域名看能否通
安装httpd
用于安装操作系统时下载相关文件
安装包
yum install httpd -y
修改/etc/httpd/conf/httpd.conf
<IfModule alias_module>
....
Alias /images "/var/www/install" 添加此行作为安装目录
</IfModule>
<Directory "/var/www/install">
Options Indexes FollowSymLinks
AllowOverride None
Require all granted
</Directory>
重启httpd
systemctl start httpd
haproxy安装
安装包
yum install haproxy -y
cat /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
pidfile /var/run/haproxy.pid
maxconn 4000
daemon
defaults
mode http
log global
option dontlognull
option http-server-close
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend stats
bind *:1936
mode http
log global
maxconn 10
stats enable
stats hide-version
stats refresh 30s
stats show-node
stats show-desc Stats for ocp4 cluster
stats auth admin:ocp4
stats uri /stats
listen api-server-6443
bind *:6443
mode tcp
server bootstrap bootstrap.ocp4.example.com:6443 check check-ssl verify none inter 1s backup
server master1 master1.ocp4.example.com:6443 check inter 1s
server master2 master2.ocp4.example.com:6443 check inter 1s
server master3 master3.ocp4.example.com:6443 check inter 1s
listen machine-config-server-22623
bind *:22623
mode tcp
server bootstrap bootstrap.ocp4.example.com:22623 check check-ssl verify none inter 1s backup
server master1 master1.ocp4.example.com:22623 check inter 1s
server master2 master2.ocp4.example.com:22623 check inter 1s
server master3 master3.ocp4.example.com:22623 check inter 1s
listen ingress-router-443
bind *:443
mode tcp
balance source
server master1 master1.ocp4.example.com:443 check inter 1s //master复用了计算节点,所以需要加上
server master2 master2.ocp4.example.com:443 check inter 1s
server master3 master3.ocp4.example.com:443 check inter 1s
server worker1 worker1.ocp4.example.com:443 check inter 1s
listen ingress-router-80
bind *:80
mode tcp
balance source
server master1 master1.ocp4.example.com:80 check inter 1s
server master2 master2.ocp4.example.com:80 check inter 1s
server master3 master3.ocp4.example.com:80 check inter 1s
server worker1 worker1.ocp4.example.com:80 check inter 1s
启动haproxy
systemctl start haproxy
harbor安装
安装docker-ce
yum install -y yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce docker-ce-cli containerd.io -y
systemctl enable docker; systemctl start docker
安装docker-compose
harbor对docker-compose版本有要求,尽量安装较高版本
curl -L "https://get.daocloud.io/docker/compose/releases/download/1.27.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
安装harbor
(1)使用离线安装包安装
curl -OL https://github.com/goharbor/harbor/releases/download/v2.2.2/harbor-offline-installer-v2.2.2.tgz
(2)解压
tar -xf harbor-offline-installer-v2.2.2.tgz
(3)harbor配置
进入harbor目录,拷贝一份模板配置文件进行修改:
cd harbor
cp harbor.yml.tmpl harbor.yml
(4)参数说明:
//api.ocp4.example.com为本机主机名,需要事先在/etc/hosts配好
hostname: api.ocp4.example.com
//如果使用http则把以下行注释:
‘’’
https:
# https port for harbor, default is 443
port: 445
# The path of cert and key files for nginx
certificate: /etc/harbor/api.ocp4.example.com.crt
private_key: /etc/harbor/api.ocp4.example.com.key
‘’’
//如果使用https则把以下行注释,同时需要生成https所需文件:
‘’’
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 80
‘’’
使用该脚本即可,并将node3.key和node3.crt拷贝到harbor.yml配置文件中配置的路径,注意将以下脚本中的node3改为自己的域名或主机名:
openssl genrsa -out ca.key 4096
openssl req -x509 -new -nodes -sha512 -days 3650 -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=node3" -key ca.key -out ca.crt
openssl genrsa -out node3.key 4096
openssl req -sha512 -new -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=node3" -key node3.key -out node3.csr
cat > v3.ext <<-EOF
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1=node3
DNS.2=node3.com
DNS.3=node3
EOF
openssl x509 -req -sha512 -days 3650 -extfile v3.ext -CA ca.crt -CAkey ca.key -CAcreateserial -in node3.csr -out node3.crt
openssl x509 -inform PEM -in node3.crt -out node3.cert
cp node3.crt /etc/pki/ca-trust/source/anchors/node3.crt
update-ca-trust
(5)开始安装
bash install.sh
openshift-client安装
下载oc
curl -OL https://github.com/openshift/okd/releases/download/4.6.0-0.okd-2021-02-14-205305/openshift-client-linux-4.6.0-0.okd-2021-02-14-205305.tar.gz
tar -xf openshift-client-linux-4.6.0-0.okd-2021-02-14-205305.tar.gz
生成的oc拷贝到/usr/local/bin/下,方便使用
同步镜像
创建私有仓库的secret信息
echo -n 'admin:Harbor12345' | base64 -w0
#本次测试harbor的用户名密码为admin Harbor12345
创建secret
cat /root/secret.json
{
"auths": {
"api.master.example.com:445": { //harbor地址填入此处
"auth": "YWRtaW46SGFyYm9yMTIzNDU=", //上面输出填入此处
"email": ""
}
}
}
export OKD_RELEASE='4.6.0-0.okd-2021-02-14-205305'
#在github okd release上可以看到
export LOCAL_REGISTRY='api.master.example.com:445'
export LOCAL_REPOSITORY='okd/okd4.6'
#需要先在harbor上创建一个okd的project,okd4.6不用创建,会自动创建
export PRODUCT_REPO='openshift'
export LOCAL_SECRET_JSON='/root/secret.json'
export RELEASE_NAME="okd"
开始同步镜像
oc adm release mirror -a ${LOCAL_SECRET_JSON} --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OKD_RELEASE} --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OCP_RELEASE}-${ARCHITECTURE}
使用本地镜像生成openshift-install不要直接下载github上的
//后面的sha256可在github okd release Pull From: quay.io/openshift/okd@sha256:6640a4daf0623023b9046fc91858d018bd34433b5c3485c4a61904a33b59a3b9查看到,把quay.io/openshift换成本地仓库地址即可
oc adm release extract -a ${LOCAL_SECRET_JSON} --command=openshift-install "api.master.example.com:445/okd/okd4.6@sha256:6640a4daf0623023b9046fc91858d018bd34433b5c3485c4a61904a33b59a3b9" --skip-verification=true --insecure=true
将生成的openshift-install文件拷贝到/usr/local/bin/下方便使用
生成安装文件
使用httpd服务中/var/www/install目录作为安装目录
mkdir /var/www/install
创建install-config.yaml (必须为这个名字)
cat install-config.yaml
apiVersion: v1
baseDomain: example.com //与dns中配置一致,baseName都为example.com
mastersSchedulable: true //master复用为计算节点
compute:
- hyperthreading: Enabled //超线程,默认开启
name: worker
replicas: 0 //必须为0
controlPlane:
hyperthreading: Enabled
name: master
replicas: 3 //控制节点数
metadata:
name: ocp4 //clusterName
networking:
clusterNetwork:
- cidr: 10.128.0.0/14 //pod网络,不可与宿主机网络相同
hostPrefix: 23
networkType: OpenShiftSDN //networkType:okd4.7才支持OVN-Kubernetes,其他版本用OpenShiftSDN
serviceNetwork:
- 192.168.40.0/24 //不能与主机所在的网段一致
platform:
none: {}
pullSecret: '{"auths":{"api.master.example.com:445":{"auth":"YWRtaW46SGFyYm9yMTIzNDU=","email":""}}}' //secret.json中的内容
sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD5TI1qIlWgbNC3b26TvHzhG3UPUqEQ8QbL+4NkqIjzrotN4Uk0hSB15jIH8rJSXeZxZS9Rtlpbn5VJSjiGiD5Nz7DVI8S94kzCmbZ3cgNDrPJwfJyCKdO47JPnw5uclYH8Ub9Cw9m9K9aKA4URFUQ0YObA6EXf0RdHw/KFtIYKCkyEDAXYm2ZjidVVWQ0LUAhHKmCdnFIsQwiKproQjvfRJwlkbnBwbjMhU667Tr0Ixw36r6gT0M4qHyPYJhwEgNQQRfDrr7kerIeKU3rOVVkLRt6jhua/tXn5KLBOsKKLFFfUGnTjGCBD6oVw7tInvfGcIYFVBW0CXdn94UoOzI3x' //部署节点公钥
//harbor的认证文件api.master.example.com.crt文件的内容,注意前面需要空两格
additionalTrustBundle: |
-----BEGIN CERTIFICATE-----
MIIGGjCCBAKgAwIBAgIJAOJm2zPo8TDYMA0GCSqGSIb3DQEBDQUAMHcxCzAJBgNV
BAYTAkNOMRAwDgYDVQQIDAdCZWlqaW5nMRAwDgYDVQQHDAdCZWlqaW5nMRAwDgYD
VQQKDAdleGFtcGxlMREwDwYDVQQLDAhQZXJzb25hbDEfMB0GA1UEAwwWYXBpLm1h
c3Rlci5leGFtcGxlLmNvbTAeFw0yMTA4MTgwOTQxMjlaFw0zMTA4MTYwOTQxMjla
MHcxCzAJBgNVBAYTAkNOMRAwDgYDVQQIDAdCZWlqaW5nMRAwDgYDVQQHDAdCZWlq
aW5nMRAwDgYDVQQKDAdleGFtcGxlMREwDwYDVQQLDAhQZXJzb25hbDEfMB0GA1UE
AwwWYXBpLm1hc3Rlci5leGFtcGxlLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIP
ADCCAgoCggIBAMFKZNKFkL8KyJ3f08TIsfft9OraV2laqohouTcEo0Hoe1tF7w0F
vOdM/OZyqfXv6geEU6/JBfHJLhrndNX/EBvKuYsLAPrgW8/jjnVfocrZiqBPxkJX
Fm7JRkGCTXLO3cVQ4JuR2xn9wzl9jW0vuBDWYmxYbBJXjsi8dEc0RZYZQOGeLdVa
rmQVaEhwmQC9JNvDYiKI52IGQFEn38LqZNyszInW5DpzYho0VzqvLH1Z63IJIEN4
igHgu5tVDKA6n8sixPzqnEP87N4lfEsBhhwvWlIFC9mXD9WCZEkJpniTmZSNoAyJ
8TyxN57y6I8mCjuxj6QELA1g1t8OZjg9YMilyD/79M5puuc3MqK/7sKyueSHWl3O
TH/xBj7ndbHUGc2qcv0OF74QAmGxwYMP/d9XCToepc/3pcVCW25dDxKMpsvFLsie
6dTy45yHzBrYhPSp309C1OYzzTyEFdeUIaBNyCwEyxbbW+jbp+eDKvLfjgWuxkd+
ZDwW69/JOAdBZcOqgkJ72szKdvOADaaFeYZAw4SEN3Jhtb3iZb/jOHyXc7Xta/CS
ljhwoghTVizfyygeF+prGqOrCNu2mAPcwBln/a/2Kbibo00fK9A0On0eV5m3rhLT
aTAZ84txL+lf9zQqKgQnSixNbwTr9OysTRQ3jXA/v4z78Fv4zG0byL/jAgMBAAGj
gagwgaUwHwYDVR0jBBgwFoAUFBIEezXlyoQwf22SONQH33MFYY0wCQYDVR0TBAIw
ADALBgNVHQ8EBAMCBPAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwVQYDVR0RBE4wTIIW
YXBpLm1hc3Rlci5leGFtcGxlLmNvbYIaYXBpLm1hc3Rlci5leGFtcGxlLmNvbS5j
b22CFmFwaS5tYXN0ZXIuZXhhbXBsZS5jb20wDQYJKoZIhvcNAQENBQADggIBABoR
JPuC1a0OEZkcB0u0ZJJ+Z3e10HElbnj759Xr4oYt4iEso/4+GKRWXrRmDnLHLj35
iObTUfzDwb0VVYfhgcSc/q1BBRQr2wCqM+gfOCsMEUCFivs/FFb/GhA38aRHdEGp
Ul8rVpcTP+aeI3HnMC+ftKTbPnZqN4t41CSroXYWcMzyCVwm0URjQi6gHtPES8+K
x7DkM4M1L3MdCwNzz2k7VaLEBwB7isrtHvZg8mdJPYN5ldiLeNJTZwIsbUunRPls
8oBYXF1Q/ycn2uSfD9VQOgjOyLNuHn+yw0EGTCjjTQH+9Qh0eQrQfR8LYb1QAm83
FfJMQE52U72JaITakZu0o5VxUTrIXkIr873e3PYWk4WVg2h2E4mHsnjprSmkofeu
i3siAfwviGNlqH8r1ewGM2NGtJ6+vPUdKDUU2QcvkIzon0VYalcs/GHapv7ISAY6
qnEga/o61l3nYcSeBag3nccewe9g9A8ub6CVUj5og9qITjXMFMGFmSYOBb7PEtnM
FKeP8yGvVDxsxpphD5dPeXeke69IwHSI7C6569/LYd5DWxGHwMmuCNdlijJLfTSj
+maUuVfhrbiD0Fppzk1yjYCNWSGOa/H/zX2aqtcbAUIMlow3bTQtln+vnRzDoDOj
QDE5rPBmcmsj+dWMxvGIWiGa8ZgRQ/9QBSgWOV5J
-----END CERTIFICATE-----
imageContentSources: //同步镜像时输出的内容
- mirrors:
- api.master.example.com:445/okd/okd4.6
source: quay.io/openshift/okd
- mirrors:
- api.master.example.com:445/okd/okd4.6
source: quay.io/openshift/okd-content
拷贝install-config.yaml到/var/www/install目录,因为生成后续的安装文件时该目录下的install-config.yaml会丢失。便于重复使用
cp install-config.yaml /var/www/install/install-config.yaml
生成文件
openshift-install create manifests --dir=/var/www/install
openshift-install create ignition-configs --dir=/var/www/install
使有权限通过http下载
chmod 777 -R /var/www/install
通过fedora coreos iso启动集群节点
下载iso
4.6版本okd需要使用32版本,详见https://access.redhat.com/articles/4763741
镜像地址:https://builds.coreos.fedoraproject.org/browser
curl -OL https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/32.20201104.3.0/x86_64/fedora-coreos-32.20201104.3.0-live.x86_64.iso
通过iso启动操作系统
启动时修改启动参数,先启动bootstrap节点,在启动参数添加:
rd.neednet=1 ip=172.20.42.90::172.20.43.253:255.255.252.0:bootstrap.ocp4.example.com:ens3:none nameserver=172.20.42.55 coreos.inst.install_dev=/dev/sda coreos.inst.ignition_url=http://172.20.42.55:8081/images/bootstrap.ign
然后启动master、worker节点,其中master节点ign文件为master.ign,worker节点ign文件为worker.ign,可在http服务器查看到
其中针对boostrap节点:
1. 需要将harbor的ssl文件添加到bootstrap节点的/etc/pki/tls/certs/ca-bundle.crt文件,否则拉取镜像时会出现x509错误(按理已经在配置文件中添加了harbor的ssl文件内容,应该不需要才对)
cat api.master.example.com.crt >> /etc/pki/tls/certs/ca-bundle.crt
如果出现等待重启(The connection to the server localhost:6443 was refused - did you specify the right host or port?此错误可忽略)
针对master和worker节点:
1. 需要将harbor的ssl文件添加到master节点的/etc/pki/tls/certs/ca-bundle.crt
2. 需要先将/etc/ignition-machine-config-encapsulated.json中
"extensions":["glusterfs","glusterfs-fuse","qemu-guest-agent","NetworkManager-ovs"],改为"extensions":[],
"osImageURL":"quay.io/openshift/okd-content@sha256:8a06d787f558a5af0da2b34e30c0aeaa3b1675e150e6ea44645f4ed698df77a3"改为harbor的地址,即api.ocp4.example.com:445/okd/okd4.6@sha256:8axxx
/*可以参考以下,也可手动改
cp /etc/ignition-machine-config-encapsulated.json /etc/ignition-machine-config-encapsulated.json.bak
jq -cM 'del (.spec.extensions[])' /etc/ignition-machine-config-encapsulated.json.bak > /etc/ignition-machine-config-encapsulated.json
*/
然后执行
systemctl start machine-config-daemon-firstboot.service
等待启动完毕自动重启
重启之后会出现ovs-configuration.service服务无法启动,此时
cp /etc/ignition-machine-config-encapsulated.json.bak /etc/ignition-machine-config-encapsulated.json # (.bak文件是系统自动生成的)
修改为"extensions": ["NetworkManager-ovs"]
然后执行
/usr/libexec/machine-config-daemon firstboot-complete-machineconfig
等待执行完毕自动重启
在bastion节点查看bootstrap安装进度
openshift-install --dir=/var/www/install wait-for bootstrap-complete --log-level=debug
输出:
INFO Waiting up to 20m0s for the Kubernetes API at https://api.master.example.com:6443...
INFO API v1.19.2-1049+f173eb4a83e557-dirty up
INFO Waiting up to 30m0s for bootstrapping to complete...
DEBUG Bootstrap status: complete
INFO It is now safe to remove the bootstrap resources
INFO Time elapsed: 0s
此时可以关闭bootstrap节点
查看集群安装进度
openshift-install --dir=/var/www/install wait-for install-complete --log-level=debug
输出:
INFO Install complete!
INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/var/www/install/auth/kubeconfig'
INFO Access the OpenShift web-console here: https://console-openshift-console.apps.master.example.com
INFO Login to the console with user: "kubeadmin", and password: "hBrw7-SIsRB-5yIeU-iqDYs"
INFO Time elapsed: 0s
安装完成
可通过https://console-openshift-console.apps.master.example.com 登录集群
user: "kubeadmin", and password: "hBrw7-SIsRB-5yIeU-iqDYs"
password也可在安装目录的auth/kubeadmin-password查看到
注意需要先在浏览器所在主机hosts添加如下:
172.20.42.55 oauth-openshift.apps.master.example.com console-openshift-console.apps.master.example.com
官方文档
https://docs.okd.io/latest/installing/installing_bare_metal/installing-bare-metal.html
中文参考
https://www.cnblogs.com/ooops/p/14389844.html
https://blog.csdn.net/weixin_42507440/article/details/117513347
DNS配置
https://docs.okd.io/latest/installing/installing_bare_metal/installing-bare-metal.html#installation-dns-user-infra_installing-bare-metal
https://blog.csdn.net/weixin_26717681/article/details/108935059
https://github.com/cragr/okd4_files
版本匹配
https://access.redhat.com/articles/4763741