OKD4.5裸机安装 2021-06-03

1 篇文章 0 订阅

机器规划配置

机器节点系统cpu/核内存/GB磁盘/GB
bastioncentos74850
bootstrapFedora CoreOS 32(RHEL)416120
masterFedora CoreOS 32(RHEL)620120
workerFedora CoreOS 32(RHEL)416120
机器节点hostnameip服务
bastionbastion.okd.example.com192.168.2.134Corden、etcd、HaProxy、http、Container Image Registry
bootstrapbootstrap.okd.example.com192.168.2.149bootstrap
mastermaster.okd.example.com192.168.2.131openshift-master
workerworker.okd.example.com192.168.2.134openshift-worker

bastion节点操作

hostname配置

 hostnamectl set-hostname bastion.okd.example.com

外网访问配置

vi /etc/resolv.conf
nameserver 8.8.8.8
nameserver 192.168.2.134

# 给resolv.conf增加属性防止串改
chattr +i /etc/resolv.conf

防火墙设置

# Selinux 配置
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0

# 关闭防火墙
systemctl disable firewalld
systemctl stop firewalld

SSH登陆设置

# 创建 SSH 密钥
ssh-keygen -t rsa -b 4096 -N '' -f ~/.ssh/id_rsa

# 启动 ssh-agent 进程为后台任务
eval "$(ssh-agent -s)"

# 将 SSH 私钥添加到 ssh-agent
ssh-add ~/.ssh/id_rsa

openshift-client安装

#yum安装一些包
yum install -y wget
 
wget https://github.com/openshift/okd/releases/download/4.5.0-0.okd-2020-10-15-235428/openshift-client-linux-4.5.0-0.okd-2020-10-15-235428.tar.gz

tar -zxvf openshift-client-linux-4.5.0-0.okd-2020-10-15-235428.tar.gz

cp oc /usr/local/bin/

# 检查版本
oc version 

Client Version: 4.5.0-0.okd-2020-10-15-235428

openshift-install安装

wget https://github.com/openshift/okd/releases/download/4.5.0-0.okd-2020-10-15-235428/openshift-install-linux-4.5.0-0.okd-2020-10-15-235428.tar.gz

tar -zxvf openshift-install-linux-4.5.0-0.okd-2020-10-15-235428.tar.gz

cp openshift-install /usr/local/bin/

# 检查版本
openshift-install version

openshift-install 4.5.0-0.okd-2020-10-15-235428
built from commit 63200c80c431b8dbaa06c0cc13282d819bd7e5f8
release image quay.io/openshift/okd@sha256:67cc7cb47d56237adcf0ecc2ee76446785add5fa236cd08746b55f578b9200a5

安装coredns

wget https://github.com/coredns/coredns/releases/download/v1.6.9/coredns_1.6.9_linux_amd64.tgz

tar zxvf coredns_1.6.9_linux_amd64.tgz

mv coredns /usr/local/bin

useradd coredns -s /sbin/nologin

配置coredns.server

vi /etc/systemd/system/coredns.service

配置文件

[Unit]
Description=CoreDNS DNS server
Documentation=https://coredns.io
After=network.target
 
[Service]
PermissionsStartOnly=true
LimitNOFILE=1048576
LimitNPROC=512
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
AmbientCapabilities=CAP_NET_BIND_SERVICE
NoNewPrivileges=true
User=coredns
WorkingDirectory=~
ExecStart=/usr/local/bin/coredns -conf=/etc/coredns/Corefile
ExecReload=/bin/kill -SIGUSR1 
Restart=on-failure
 
[Install]
WantedBy=multi-user.target

配置Corefile

mkdir -p /etc/coredns

vi /etc/coredns/Corefile

配置文件

注意:answer部分配置为自己的解析机器

.:53 {                                              # 监听 53 端口
    template IN A apps.okd.example.com {
        match .*apps\.okd\.example\.com               # 匹配 DNS Query Request Domain Name 的正则表达式
        answer "{{ .Name }} 60 IN A 192.68.2.134"  # 配置 Domain Name 应答的 IP 地址
        fallthrough
    }
    etcd {                              # 配置启用 etcd 插件,后面可以指定域名,例如:etcd test.com
        path /skydns                    # 默认路径为 /skydns,后续所有的 DNS 记录都存储在该路径下
        endpoint http://localhost:2379  # etcd 访问地址,使用空格分隔多个 endpoint
        fallthrough
    }
    prometheus                          # 监控插件
    cache 160
    loadbalance                         # 负载均衡,开启 DNS 轮训查询测试
    forward . 114.114.114.114
    log                                 # 打印日志
}

设置开机自起并验证

systemctl enable coredns --now

# 验证
yum install bind-utils

dig +short apps.okd.example.com @127.0.0.1

安装etcd

yum install -y etcd

systemctl enable etcd --now

systemctl status etcd
查看域名解析
cat /etc/resolv.conf
# Generated by NetworkManager
search okd.example.com
nameserver 192.168.2.134
nameserver 8.8.8.8
etcd添加域名解析
export ETCDCTL_API=3

# Bastion API Server HA
etcdctl put /skydns/com/example/okd/api '{"host":"192.168.2.134", "ttl":60}'
etcdctl put /skydns/com/example/okd/api-int '{"host":"192.168.2.134", "ttl":60}'

# Bastion Container Images Registry
etcdctl put /skydns/com/example/okd/registry '{"host":"192.168.2.134", "ttl":60}'

# Master ETCD
etcdctl put /skydns/com/example/okd/etcd-0 '{"host":"192.168.2.130", "ttl":60}'

etcdctl put /skydns/com/example/okd/_tcp/_etcd-server-ssl/x1 '{"host":"etcd-0.okd.example.com", "ttl":60, "priority":0, "weight":10, "port":2380}'

etcdctl put /skydns/com/example/okd/bastion '{"host":"192.168.2.130", "ttl":60}'
etcdctl put /skydns/com/example/okd/bootstrap '{"host":"192.168.2.149", "ttl":60}'
etcdctl put /skydns/com/example/okd/master '{"host":"192.168.2.130", "ttl":60}'
etcdctl put /skydns/com/example/okd/worker '{"host":"192.168.2.131", "ttl":60}'

# etcd查看域名添加
etcdctl get --prefix /skydns

etcd域名删除命令

# etcd删除命令
etcdctl del <key>
etcdctl del /skydns/com/example/okd/api

域名验证

dig +short api.okd.example.com @127.0.0.1
192.168.2.134

dig +short api-int.okd.example.com @127.0.0.1
192.168.2.134

dig +short registry.okd.example.com @127.0.0.1
192.168.2.134

dig +short etcd-0.okd.example.com @127.0.0.1
192.168.2.130

dig +short -t SRV _etcd-server-ssl._tcp.okd.example.com @127.0.0.1
10 100 2380 etcd-0.okd.example.com.

dig +short bootstrap.okd.example.com @127.0.0.1
192.168.2.149

dig +short master.okd.example.com @127.0.0.1
192.168.2.130

dig +short worker.okd.example.com @127.0.0.1
192.168.2.131

安装HaProxy

yum install haproxy -y

vi /etc/haproxy/haproxy.cfg

# 修改某个配置端口
frontend  main *:5001
    acl url_static       path_beg       -i /static /images /javascript /stylesheets
    acl url_static       path_end       -i .jpg .gif .png .css .js

    use_backend static          if url_static
    default_backend             app
# 将5000端口修改为其他端口,不然会和后面的镜像仓库端口冲突

配置

#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend  main *:5001
    acl url_static       path_beg       -i /static /images /javascript /stylesheets
    acl url_static       path_end       -i .jpg .gif .png .css .js

    use_backend static          if url_static
    default_backend             app

#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static
    balance     roundrobin
    server      static 127.0.0.1:4331 check

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend app
    balance     roundrobin
    server  app1 127.0.0.1:5001 check
    server  app2 127.0.0.1:5002 check
    server  app3 127.0.0.1:5003 check
    server  app4 127.0.0.1:5004 check

#-----------------------------增加----------------------------------------------
listen stats
    bind :9000
    mode http
    stats enable
    stats uri /
    monitor-uri /healthz

frontend openshift-api-server                    # OpenShift API Server (HA)
    bind *:6443
    default_backend openshift-api-server
    mode tcp
    option tcplog

backend openshift-api-server
    balance source
    mode tcp
    server bootstrap 192.168.2.149:6443 check  # API Server in Bootstrap Node
    server master 192.168.2.130:6443 check     # API Server in Master Node

frontend machine-config-server                   # OpenShift Machine Config Server (HA)
    bind *:22623
    default_backend machine-config-server
    mode tcp
    option tcplog

backend machine-config-server
    balance source
    mode tcp
    server bootstrap 192.168.2.149:22623 check # Machine Config Server in Bootstrap Node
    server master 192.168.2.130:22623 check    # Machine Config Server in Master Node

frontend ingress-http
    bind *:80
    default_backend ingress-http
    mode tcp
    option tcplog

backend ingress-http
    balance source
    mode tcp
    server worker1 192.168.2.131:80 check

frontend ingress-https
    bind *:443
    default_backend ingress-https
    mode tcp
    option tcplog

backend ingress-https
    balance source
    mode tcp
    server worker1 192.168.2.131:443 check

启动

systemctl enable haproxy && systemctl restart haproxy

安装httpd

yum -y install podman httpd-tools httpd

vi /etc/httpd/conf/httpd.conf
# 为避免端口冲突,将80端口修改为82端口

systemctl enable httpd && systemctl restart httpd

# 创建必要的目录
mkdir -p /var/www/html/os     # 存放镜像文件
mkdir -p /var/www/html/ign    # 存放点火文件

# 测试是否可以下载
http://192.168.2.134:82/os/a.txt

安装Registry

证书准备
# 自建 CA 中心
mkdir -p /opt/registry/{auth,certs,data}
cd /opt/registry/certs

# 自签发证书,域名为 registry.okd.example.com
openssl req -subj '/CN=registry.okd.example.com/O=My Company Name LTD./C=US' -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout domain.key -out domain.crt

# 将自签名的证书复制到默认信任证书路径
cp /opt/registry/certs/domain.crt /etc/pki/ca-trust/source/anchors/

update-ca-trust extract
生成镜像仓库密钥
# 为镜像仓库生成密钥
htpasswd -bBc /opt/registry/auth/htpasswd admin admin
用户:admin 密码:admin

echo -n 'admin:admin' | base64 -w0    # YWRtaW46YWRtaW4=

vi /root/pull-secret.json

{
    "auths":{
        "registry.okd.example.com:5000":{
            "auth":"YWRtaW46YWRtaW4=",
            "email":"847422774@qq.com"
        }
    }
}

OCP 需要从 Redhat 官网下载密钥,地址:https://cloud.redhat.com/openshift/install/pull-secret;而 OKD 则可以自己生成

下载官方镜像

OKD_RELEASE环境变量和openshift的版本一致

设置变量

export OKD_RELEASE="4.5.0-0.okd-2020-10-15-235428"
export LOCAL_REGISTRY='registry.okd.example.com:5000'
export LOCAL_REPOSITORY='openshift/okd'
export PRODUCT_REPO='openshift'
export LOCAL_SECRET_JSON='/root/pull-secret.json'
export RELEASE_NAME="okd"

启动仓库,我的仓库镜像是提前准备好的

echo "192.168.2.134 registry.okd.example.com" >>/etc/hosts

podman load -i docker-io-registry-2.tar 

podman run --name example-registry -p 5000:5000 \
     -v /opt/registry/data:/var/lib/registry:z \
     -v /opt/registry/auth:/auth:z \
     -e "REGISTRY_AUTH=htpasswd" \
     -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
     -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
     -v /opt/registry/certs:/certs:z \
     -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \
     -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \
     -d docker.io/library/registry:2
     

拉取镜像

 # 拉取就镜像
 oc adm -a ${LOCAL_SECRET_JSON} release mirror \
     --from=quay.io/${PRODUCT_REPO}/${RELEASE_NAME}:${OKD_RELEASE} \
     --to=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY} \
     --to-release-image=${LOCAL_REGISTRY}/${LOCAL_REPOSITORY}:${OKD_RELEASE}
 # 此命令尝试过不行,使用下面命令
 
oc adm -a /root/pull-secret.json release mirror \
     --from=quay.io/openshift/okd:4.5.0-0.okd-2020-10-15-235428 \
     --to=registry.okd.example.com:5000/openshift/okd \
     --to-release-image=registry.okd.example.com:5000/openshift/okd:4.5.0-0.okd-2020-10-15-235428
验证仓库时候通
curl -u admin:admin -k https://registry.okd.example.com:5000/v2/_catalog
# 没有拉取镜像是空的,拉取完后有内容
{"repositories":["openshift/okd"]}

# 查看所有镜像tag
curl -u admin:admin -k https://registry.okd.example.com:5000/v2/openshift/okd/tags/list

openshift部署

镜像准备使用的是32

fedora-coreos版本有问题,34版本登仓库会报x509,这里使用32

镜像下载网址
https://getfedora.org/coreos/download?tab=metal_virtualized&stream=stable

将镜像文件放在/var/www/html/os路径下
cd /var/www/html/os

wget https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/32.20200715.3.0/x86_64/fedora-coreos-32.20200715.3.0-metal.x86_64.raw.xz.sig

wget https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/32.20200715.3.0/x86_64/fedora-coreos-32.20200715.3.0-metal.x86_64.raw.xz

wget https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/32.20200715.3.0/x86_64/fedora-coreos-32.20200715.3.0-live.x86_64.iso

准备好ios放在服务器上
mv ./fedora-coreos-34.20210427.3.0-metal.x86_64.raw.xz coreos.raw.xz
mv ./fedora-coreos-34.20210427.3.0-metal.x86_64.raw.xz.sig coreos.raw.xz.sig

yaml文件准备

mkdir /root/okdinstall

id_rsa.pub文件

cat /root/.ssh/id_rsa.pub

ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDJgpOx93dFHFGoYblfCsjuZlUsDG6l6BZ79mAQCHJpXnGLhOXlYCTd6N4T4MxT88XyCchPAFZIFEhUGDnRU2sP7Dkt09jn11XGXwNGIzdYi77wyuBSrNzAYfb2XYId8eHj2T3M1f+QMgRW5szs2NUYIvTFIXsitZcB83+1oayM3OU9M8C6Vt+BvA8v3yIMc1vKs3lKsHzV/MJ/6j7g1aT4R3i24hDF6KSDo9WIXyNhZEiYFUjADtlG0UcIlm+fNecAPDW8EsdMdzT25rGvdvSP0Xtt3NTDoFKwbZgl925WFlDRrgw09SZR/A2tQzAbPV+ZLJX26RyinBg+N7OCB/7uWhUH8JMNFhs7jOuXFrwBtUdE/n6HdW78bXrpbLOPGiTCxirjrOmi7cbUoCddMtPf86z5/giRDy5XiosVMMLpgbzj3MCG0N7woTyx+nAtWYeti9BEOurvAjjv/aQoQyEjOcmEE3rNHpmWcVJaeBshWJlkuOawirUgNep0aFQcxyOkMTg5E10aEo8CQAdv/r0pQvmBpscA+4Q9pgRMl65Mrgz76DlkHrrp9ZnUiu8WucDl+MaDK19EeH6PwHql2jpzysiLpxahjvg4pejW9v9nY3qyEx8vP+k58DXOUKIGJzfVO9Je1lgVsfm2eNB6aaVOT78d82tamZal//L5+dfy2Q== root@bastion.okd.example.com

domain.crt文件

cat /opt/registry/certs/domain.crt

  -----BEGIN CERTIFICATE-----
  MIIDcTCCAlmgAwIBAgIJAMetw3HLW00xMA0GCSqGSIb3DQEBCwUAME8xITAfBgNV
  BAMMGHJlZ2lzdHJ5Lm9rZC5leGFtcGxlLmNvbTEdMBsGA1UECgwUTXkgQ29tcGFu
  eSBOYW1lIExURC4xCzAJBgNVBAYTAlVTMB4XDTIxMDYwMjA1MzgxOVoXDTIyMDYw
  MjA1MzgxOVowTzEhMB8GA1UEAwwYcmVnaXN0cnkub2tkLmV4YW1wbGUuY29tMR0w
  GwYDVQQKDBRNeSBDb21wYW55IE5hbWUgTFRELjELMAkGA1UEBhMCVVMwggEiMA0G
  CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDIELjaifWbSxa7kLcMI6UHPbx+mMD
  f5P6Ep1qJ0OoM7uqJRZQvS58ZIMzEDoYDvdrvUQ7SchQqUhQVWPQOptaNV/+UOvD
  KOfz5kFAzJM+HpEFVxMYND9yvEKhn7V968hUecbFCnGuldhHekTkZprvruWnfdgy
  AchL6B29qI0DsR+VDMQ6me6DbL+tN65wq4lkXU/IUN/nSzD0kIoU2hAfOfqzaMku
  O9gEdeeZX0EzCRhex9QFL/jZjC+jOD4owiriyVkYqf0LCta3WvSdGdkU3SZfFBWg
  5JIYrURfIigKa5kmv1R0lyF2aXbWi2oQWdKAPpCq2/kGCvgZ01xjl2OJAgMBAAGj
  UDBOMB0GA1UdDgQWBBQI1Y1atjl+rCREmDgsgd3oN4+puTAfBgNVHSMEGDAWgBQI
  1Y1atjl+rCREmDgsgd3oN4+puTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA
  A4IBAQBS+FEEREiZK3W58i4tmCY+3Imy+gdECSWpIgXhSkEX8rTq8Mz6vpBvJkPt
  fJKjtcAT0tnG43vZ2mBxQrbogap0OTEYnmpDuQHeW2dYKYvRJwJr0I2g2+x023yn
  VzP16nET5L5IbSmA8afaLBwwIVzAxG3SEAmu0MpGkf8RXQEXVcr5sX5mMtTP9Z12
  mXIZCea1XRDj1N5HRFZ9ZbqMtUsTCBoGrnlQvhlzstXTwApa8IF69npI+bMOdDz7
  A0uqt9AGNX9WFh62+79EBcMSfNVvugsvr97YH3MGsYnUaSSJZJtgR6C4mNtCVRWe
  1Y/2YTRguccnVJq7MuNMhPy02Utq
  -----END CERTIFICATE-----

Registry 的密钥信息

pullSecret: '{"auths":{"registry.okd.example.com:5000": {"auth": "YWRtaW46YWRtaW4=","email": "847422774@qq.com"}}}'

vi /root/okdinstall/install-config.yaml

注意集群名字,他会对应你的域名

注意这里有个坑,networkType:okd4.7才支持OVN-Kubernetes,其他版本用OpenShiftSDN

apiVersion: v1
baseDomain: example.com
compute:
- hyperthreading: Enabled
  name: worker
  replicas: 0
controlPlane:
  hyperthreading: Enabled
  name: master
  replicas: 1
metadata:
  name: okd
networking:
  clusterNetwork:
  - cidr: 10.128.0.0/14
    hostPrefix: 23
  networkType: OpenShiftSDN
  serviceNetwork:
  - 172.30.0.0/16
platform:
  none: {}
fips: false
pullSecret: '{"auths":{"registry.okd.example.com:5000": {"auth": "YWRtaW46YWRtaW4=","email": "847422774@qq.com"}}}'
sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDJgpOx93dFHFGoYblfCsjuZlUsDG6l6BZ79mAQCHJpXnGLhOXlYCTd6N4T4MxT88XyCchPAFZIFEhUGDnRU2sP7Dkt09jn11XGXwNGIzdYi77wyuBSrNzAYfb2XYId8eHj2T3M1f+QMgRW5szs2NUYIvTFIXsitZcB83+1oayM3OU9M8C6Vt+BvA8v3yIMc1vKs3lKsHzV/MJ/6j7g1aT4R3i24hDF6KSDo9WIXyNhZEiYFUjADtlG0UcIlm+fNecAPDW8EsdMdzT25rGvdvSP0Xtt3NTDoFKwbZgl925WFlDRrgw09SZR/A2tQzAbPV+ZLJX26RyinBg+N7OCB/7uWhUH8JMNFhs7jOuXFrwBtUdE/n6HdW78bXrpbLOPGiTCxirjrOmi7cbUoCddMtPf86z5/giRDy5XiosVMMLpgbzj3MCG0N7woTyx+nAtWYeti9BEOurvAjjv/aQoQyEjOcmEE3rNHpmWcVJaeBshWJlkuOawirUgNep0aFQcxyOkMTg5E10aEo8CQAdv/r0pQvmBpscA+4Q9pgRMl65Mrgz76DlkHrrp9ZnUiu8WucDl+MaDK19EeH6PwHql2jpzysiLpxahjvg4pejW9v9nY3qyEx8vP+k58DXOUKIGJzfVO9Je1lgVsfm2eNB6aaVOT78d82tamZal//L5+dfy2Q== root@bastion.okd.example.com'
additionalTrustBundle: |
  -----BEGIN CERTIFICATE-----
  MIIDcTCCAlmgAwIBAgIJAMetw3HLW00xMA0GCSqGSIb3DQEBCwUAME8xITAfBgNV
  BAMMGHJlZ2lzdHJ5Lm9rZC5leGFtcGxlLmNvbTEdMBsGA1UECgwUTXkgQ29tcGFu
  eSBOYW1lIExURC4xCzAJBgNVBAYTAlVTMB4XDTIxMDYwMjA1MzgxOVoXDTIyMDYw
  MjA1MzgxOVowTzEhMB8GA1UEAwwYcmVnaXN0cnkub2tkLmV4YW1wbGUuY29tMR0w
  GwYDVQQKDBRNeSBDb21wYW55IE5hbWUgTFRELjELMAkGA1UEBhMCVVMwggEiMA0G
  CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDIELjaifWbSxa7kLcMI6UHPbx+mMD
  f5P6Ep1qJ0OoM7uqJRZQvS58ZIMzEDoYDvdrvUQ7SchQqUhQVWPQOptaNV/+UOvD
  KOfz5kFAzJM+HpEFVxMYND9yvEKhn7V968hUecbFCnGuldhHekTkZprvruWnfdgy
  AchL6B29qI0DsR+VDMQ6me6DbL+tN65wq4lkXU/IUN/nSzD0kIoU2hAfOfqzaMku
  O9gEdeeZX0EzCRhex9QFL/jZjC+jOD4owiriyVkYqf0LCta3WvSdGdkU3SZfFBWg
  5JIYrURfIigKa5kmv1R0lyF2aXbWi2oQWdKAPpCq2/kGCvgZ01xjl2OJAgMBAAGj
  UDBOMB0GA1UdDgQWBBQI1Y1atjl+rCREmDgsgd3oN4+puTAfBgNVHSMEGDAWgBQI
  1Y1atjl+rCREmDgsgd3oN4+puTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUA
  A4IBAQBS+FEEREiZK3W58i4tmCY+3Imy+gdECSWpIgXhSkEX8rTq8Mz6vpBvJkPt
  fJKjtcAT0tnG43vZ2mBxQrbogap0OTEYnmpDuQHeW2dYKYvRJwJr0I2g2+x023yn
  VzP16nET5L5IbSmA8afaLBwwIVzAxG3SEAmu0MpGkf8RXQEXVcr5sX5mMtTP9Z12
  mXIZCea1XRDj1N5HRFZ9ZbqMtUsTCBoGrnlQvhlzstXTwApa8IF69npI+bMOdDz7
  A0uqt9AGNX9WFh62+79EBcMSfNVvugsvr97YH3MGsYnUaSSJZJtgR6C4mNtCVRWe
  1Y/2YTRguccnVJq7MuNMhPy02Utq
  -----END CERTIFICATE-----
imageContentSources:
- mirrors:
  - registry.okd.example.com:5000/openshift/okd
  source: quay.io/openshift/okd
- mirrors:
  - registry.okd.example.com:5000/openshift/okd
  source: quay.io/openshift/okd-content

配置说明

# 根据需要修改 OKD 部署配置

apiVersion: v1
baseDomain: example.com         # 配置基础域名,OpenShift 内部所有的 DNS 记录必须是此基础域名的子域,并包含集群名称。
compute:
- hyperthreading: Enabled 
  name: worker
  replicas: 0                   # 配置 Worker Node 数量,因为我们要手动创建 Worker Node,所以这里设置为 0。
controlPlane:
  hyperthreading: Enabled
  name: master
  replicas: 1                   # 配置 Master Node 数量,我们部署单 Master,所以写 1,注意:Master Node 数量必须和 etcd 节点数量一致。
metadata:
  name: okd                     # 集群名称。
networking:
  clusterNetwork:
  - cidr: 10.128.0.0/14         # 配置 Pod IP Pool。注意:Pod IP Pool 不能与物理网络冲突。
    hostPrefix: 23              # 分配给每个节点的子网前缀长度。
  networkType: OVNKubernetes    # 配置集群网络类型
  serviceNetwork: 
  - 172.30.0.0/16               # 配置 Service IP Pool。
platform:
  none: {}                      # 平台类型,因为我们使用裸金属安装类别,所有不填。
fips: false

# 配置 Bastion SSH pub key
sshKey: ''

# 配置 Image Registry 地址
imageContentSources:
- mirrors:
  - registry.okd.example.com:5000/openshift/okd
  source: quay.io/openshift/okd
- mirrors:
  - registry.okd.example.com:5000/openshift/okd
  source: quay.io/openshift/okd-content

# 配置 Image Registry 的密钥信息
pullSecret: '{"auths":{"registry.okd.example.com:5000": {"auth": "YWRtaW46YWRtaW4=","email": ""}}}'

# 配置 Image Registry 的信任证书。
# cat /opt/registry/certs/domain.crt
# 注意:前面要保持两个空格作为缩进。
additionalTrustBundle: |
  -----BEGIN CERTIFICATE-----
  -----END CERTIFICATE-----
生成必要的点火文件
cp /root/okdinstall/install-config.yaml /tmp/install-config.yaml.bak 

openshift-install create manifests --dir=/root/okdinstall

cp /tmp/install-config.yaml.bak /root/okdinstall/install-config.yaml

openshift-install create ignition-configs --dir=/root/okdinstall

cp /root/okdinstall/*.ign /var/www/html/ign/

chmod 755 /var/www/html/ign/*

在bastion节点使用oc和kubectl命令

mkdir -p /root/.kube

cp /root/okdinstall/auth/kubeconfig ~/.kube/config

bootstrap节点操作

系统使用RHEL7,镜像文件如图fedora-coreos-32-xxx.iso

在这里插入图片描述在这里插入图片描述在这里插入图片描述

启动虚拟机在界面按table键进去配置,按下方配置进行手动配置

cpu,内存,网卡,盘需要提前看好

配置

ip=192.168.2.149::192.168.2.1:255.255.255.0:bootstrap.okd.example.com:ens192:none nameserver=192.168.2.134 coreos.inst.install_dev=sda coreos.inst.image_url=http://192.168.2.134:82/os/coreos.raw.xz coreos.inst.ignition_url=http://192.168.2.134:82/ign/bootstrap.ign

在部署机 bastion 可以通过 ssh 命令进入bootstrap

ssh -i ~/.ssh/id_rsa core@192.168.2.149

# 检查方法:
# 查看网络信息
hostname -I
ip route show
cat /etc/resolv.conf
验证方式:在 bootstrap 主机上 curl -k https://localhost:22623/config/master 如果有返回就标明 OK。

# 查看运行容器,sudo podman ps, 应该有运行中容器
# 如果没有在运行的容器, ps -ef 看下是否有podman pull 的进程
# 通过curl命令检查跟镜像仓库的连通性,我遇到过连不上仓库,在部署机上重启了下仓库后好了
sudo podman ps 
sudo ps -ef 

# 查看进程端口
netstat -an 查看 6443、22623

# 也可以通过haproxy的页面,就是192.168.2.10:9000 可以看到bootstrap的状态变成了绿色
# 说明这个时候bootstrap 已经部署成功

# 查看服务状态的命令,ssh进去的时候就会提示这条命令
journalctl -b -f -u bootkube.service

# 查看系统日志
journalctl -xef

sudo podman ps -a --no-trunc --sort created --format "{{.Command}}"
sudo crictl pods

openshift-install --dir=/root/okdinstall/ wait-for bootstrap-complete --log-level debug

当出现如下图7哥ready的pod就可以引导master
在这里插入图片描述

宏观日志bastion节点

openshift-install wait-for install-complete --log-level=debug  --dir=/root/okdinstall

master节点操作

ip=192.168.2.130::192.168.2.1:255.255.255.0:master.okd.example.com:ens192:none nameserver=192.168.2.134 coreos.inst.install_dev=sda coreos.inst.image_url=http://192.168.2.134:82/os/coreos.raw.xz coreos.inst.ignition_url=http://192.168.2.134:82/ign/master.ign
ssh -i ~/.ssh/id_rsa core@192.168.2.130

# 查看系统日志
journalctl -xef

hostname -I
ip route show
cat /etc/resolv.conf

sudo crictl pods

使用oc命令

mkdir .kube
scp -rp root@192.168.2.134:/root/.kube/config ./.kube

oc get node

登日志出现如图所示,可以剔除bootstrap节点
在这里插入图片描述

vim /etc/haproxy/haproxy.cfg

systemctl restart haproxy
systemctl status haproxy

在这里插入图片描述

单master操作,多master不需要

# 编辑文件,写入内容。必须打这个patch,不然直接改副本数,还会恢复回去。

vi /opt/etcd_quorum_guard.yaml

- op: add
  path: /spec/overrides
  value:
  - kind: Deployment
    group: apps/v1
    name: etcd-quorum-guard
    namespace: openshift-machine-config-operator
    unmanaged: true


oc patch clusterversion version --type json -p "$(cat /opt/etcd_quorum_guard.yaml)"

oc scale --replicas=1 deployment/etcd-quorum-guard -n openshift-machine-config-operator

最后查看日志,会显示登录名和密码

openshift-install wait-for install-complete --log-level=debug  --dir=/root/okdinstall

在这里插入图片描述


worker节点操作

ip=192.168.2.131::192.168.2.1:255.255.255.0:worker.okd.example.com:ens192:none nameserver=192.168.2.134 coreos.inst.install_dev=sda coreos.inst.image_url=http://192.168.2.134:82/os/ccoreos.raw.xz coreos.inst.ignition_url=http://192.168.2.134:82/ign/worker.ign
ssh -i ~/.ssh/id_rsa core@192.168.2.131

# 查看系统日志
journalctl -xef

hostname -I
ip route show
cat /etc/resolv.conf

部署完后在 Bastion Node 上批准 Worker Node 的加入:将 Worker Node 添加到 Cluster 时,会为添加的每台节点生成两个待处理证书签名请求(CSR)。必须确认这些 CSR 已获得批准,或者在必要时自行批准。

# 查看挂起的证书签名请求(CSR),并确保添加到集群的每台节点都能看到具有 Pending 或 Approved 状态的客户端和服务端请求。针对 Pending 状态的 CSR 批准请求执行批准:$ oc adm certificate approve xxx
oc get csr

# 或者执行以下命令批准所有 CSR:
oc get csr -o json | jq -r '.items[] | select(.status == {} ) | .metadata.name' | xargs oc adm certificate approve

单worker方式部署调整副本数

# 修改下面这些服务副本数为 1。

oc scale --replicas=1 ingresscontroller/default -n openshift-ingress-operator
oc scale --replicas=1 deployment.apps/console -n openshift-console
oc scale --replicas=1 deployment.apps/downloads -n openshift-console
oc scale --replicas=1 deployment.apps/oauth-openshift -n openshift-authentication
oc scale --replicas=1 deployment.apps/packageserver -n openshift-operator-lifecycle-manager

oc scale --replicas=1 deployment.apps/prometheus-adapter -n openshift-monitoring
oc scale --replicas=1 deployment.apps/thanos-querier -n openshift-monitoring
oc scale --replicas=1 statefulset.apps/prometheus-k8s -n openshift-monitoring
oc scale --replicas=1 statefulset.apps/alertmanager-main -n openshift-monitoring

登陆方式

[root@bastion ~]# oc get route -A
NAMESPACE                  NAME                HOST/PORT                                                   PATH   SERVICES            PORT    TERMINATION            WILDCARD
openshift-authentication   oauth-openshift     oauth-openshift.apps.okd45.qyos.v2                                 oauth-openshift     6443    passthrough/Redirect   None
openshift-console          console             console-openshift-console.apps.okd45.qyos.v2                       console             https   reencrypt/Redirect     None
openshift-console          downloads           downloads-openshift-console.apps.okd45.qyos.v2                     downloads           http    edge/Redirect          None
openshift-monitoring       alertmanager-main   alertmanager-main-openshift-monitoring.apps.okd45.qyos.v2          alertmanager-main   web     reencrypt/Redirect     None
openshift-monitoring       grafana             grafana-openshift-monitoring.apps.okd45.qyos.v2                    grafana             https   reencrypt/Redirect     None
openshift-monitoring       prometheus-k8s      prometheus-k8s-openshift-monitoring.apps.okd45.qyos.v2             prometheus-k8s      web     reencrypt/Redirect     None
openshift-monitoring       thanos-querier      thanos-querier-openshift-monitoring.apps.okd45.qyos.v2             thanos-querier      web     reencrypt/Redirect     None
[root@bastion ~]# 
===========================================================================
[root@bastion ~]# openshift-install wait-for install-complete --log-level=debug  --dir=/root/install_dir/
DEBUG OpenShift Installer 4.5.0-0.okd-2020-10-15-235428 
DEBUG Built from commit 63200c80c431b8dbaa06c0cc13282d819bd7e5f8 
DEBUG Fetching Install Config...                   
DEBUG Loading Install Config...                    
DEBUG   Loading SSH Key...                         
DEBUG   Loading Base Domain...                     
DEBUG     Loading Platform...                      
DEBUG   Loading Cluster Name...                    
DEBUG     Loading Base Domain...                   
DEBUG     Loading Platform...                      
DEBUG   Loading Pull Secret...                     
DEBUG   Loading Platform...                        
DEBUG Using Install Config loaded from state file  
DEBUG Reusing previously-fetched Install Config    
INFO Waiting up to 30m0s for the cluster at https://api.okd45.qyos.v2:6443 to initialize... 
DEBUG Cluster is initialized                       
INFO Waiting up to 10m0s for the openshift-console route to be created... 
DEBUG Route found in openshift-console namespace: console 
DEBUG Route found in openshift-console namespace: downloads 
DEBUG OpenShift console route is created           
INFO Install complete!                            
INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/root/install_dir/auth/kubeconfig' 
INFO Access the OpenShift web-console here: https://console-openshift-console.apps.okd45.qyos.v2 
INFO Login to the console with user: "kubeadmin", and password: "iGAee-weXaS-BAWkU-CKANt" 
INFO Time elapsed: 0s                             
[root@bastion ~]# 
名称
utlhttps://console-openshift-console.apps.okd45.qyos.v2
userkubeadmin
passwordiGAee-weXaS-BAWkU-CKANt
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值