Openshift-企业容器云落地-openshift3.11集群安装

节点规划

192.168.6.39 master
192.168.6.42 node1
192.168.6.43 node2
192.168.6.47 node3
192.168.6.51 etcd
192.168.6.62 dns
所有节点关闭防火墙或者开放所需端口
systemctl stop firewalld.service
systemctl disable firewalld.service

DNS

DNS 192.168.6.62

#yum install bind
#vi /etc/named.conf
options {
listen-on port 53 { any; };
listen-on-v6 port 53 { any; };
directory “/var/named”;
dump-file “/var/named/data/cache_dump.db”;
statistics-file “/var/named/data/named_stats.txt”;
memstatistics-file “/var/named/data/named_mem_stats.txt”;
recursing-file “/var/named/data/named.recursing”;
secroots-file “/var/named/data/named.secroots”;
allow-query { any; };

#vi /etc/named.rfc1912.zones
zone “okd.com” IN {
type master;
file “okd.com.hosts”;
allow-update { none; };
};

zone “6.168.192.in-addr.arpa” IN {
type master;
file “192.168.6.rev”;
allow-update { none; };
};

[root@dns ~]# cat /var/named/192.168.6.rev
$TTL 1D
@ IN SOA @ rname.invalid. (
0 ; serial
1D ; refresh
1H ; retry
1W ; expire
3H ) ; minimum
NS @
A 127.0.0.1
AAAA ::1
39 IN PTR master.okd.com
42 IN PTR node1.okd.com
43 IN PTR node2.okd.com
47 IN PTR node3.okd.com
51 IN PTR etcd.okd.com

[root@dns ~]# systemctl restart named
[root@dns ~]# systemctl enable named

编辑/etc/resolv.conf,将主机DNS执行DNS服务器192.168.0.250
nslookup进行测试

搭建

所有节点:

#yum install -y lrzsz git wget net-tools bind-utils iptables-services bridge-utils bash-completion
#yum install -y docker

各个节点关机增加500G硬盘作为docker存储:
fdisk -l查看添加的数据盘在/dev/sdb
编辑docker存储配置位置
#pvcreate /dev/sdb
Physical volume “/dev/sdb” successfully created.
#vgcreate vg1 /dev/sdb
Volume group “vg1” successfully created
#echo VG=vg1 >> /etc/sysconfig/docker-storage-setup
systemctl restart docker

将node3作为docker镜像仓库,安装docker-distribution
yum install -y docker-distribution;systemctl start docker-distribution;systemctl enable docker-distribution

配置docker /etc/containers/registries.conf,修改镜像仓库地址。分发到各节点并重启docker
[registries.search]
registries = [‘192.168.6.47:5000’]

#If you need to access insecure registries, add the registry’s fully-qualified name.
#An insecure registry is one that does not have a valid SSL certificate or only does HTTP.
[registries.insecure]
registries = [192.168.6.47:5000]
#If you need to block pull access from a registry, uncomment the section below
#and add the registries fully-qualified name.

#Docker only
[registries.block]
registries = [registry.access.redhat.com]

分发
[root@node3 ~]# scp /etc/containers/registries.conf root@master:/etc/containers/registries.conf
[root@node3 ~]# scp /etc/containers/registries.conf root@node1:/etc/containers/registries.conf
[root@node3 ~]# scp /etc/containers/registries.conf root@node2:/etc/containers/registries.conf
[root@node3 ~]# scp /etc/containers/registries.conf root@etcd:/etc/containers/registries.conf

[root@node3 ~]# cat /etc/docker/daemon.json
{“insecure-registries”:[“192.168.6.47:5000”]}
[root@node3 ~]# scp /etc/docker/daemon.json root@master:/etc/docker/daemon.json
root@master’s password:
daemon.json 100% 46 1.0KB/s 00:00
[root@node3 ~]# scp /etc/docker/daemon.json root@node1:/etc/docker/daemon.json
root@node1’s password:
daemon.json 100% 46 0.7KB/s 00:00
[root@node3 ~]# scp /etc/docker/daemon.json root@node2:/etc/docker/daemon.json
root@node2’s password:
daemon.json 100% 46 1.0KB/s 00:00
[root@node3 ~]# scp /etc/docker/daemon.json root@etcd:/etc/docker/daemon.json
root@etcd’s password:
daemon.json 100% 46 0.1KB/s 00:00
所有节点重启docker:
systemctl restart docker

增加docker开机启动服务systemctl enable docker
启动dockersystemctl start docker
查看docker服务情况systemctl status docker

配置ETCD节点:

[root@etcd ~]# yum install -y etcd
[root@etcd ~]# systemctl enable etcd
[root@etcd ~]# systemctl start etcd

配置Master节点

1、Master安装epel:

[root@master ~]# rpm -Uvh http://mirrors.kernel.org/fedora-epel/epel-release-latest-7.noarch.rpm

2、安装ansible pyOpenSSL:

[root@master ~]# yum install -y --enablerepo=epel ansible pyOpenSSL

3、配置各个节点和master节点的互信

[root@master ~]# ssh-keygen -f /root/.ssh/id_rsa -N ‘’
[root@master ~]# vi /etc/ssh/ssh_config
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub master.okd.com
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub etcd.okd.com
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub node1.okd.com
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub node2.okd.com
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub node3.okd.com

4、下载openshift-ansible源码

[root@master ~]# git clone https://github.com/openshift/openshift-ansible
[root@master ~]# ls
anaconda-ks.cfg Documents initial-setup-ks.cfg openshift-ansible Public Videos
Desktop Downloads Music Pictures Templates

5、配置ansible

备份hosts
[root@master ~]# cd /etc/ansible/
[root@master ansible]# ls
ansible.cfg hosts roles
[root@master ansible]# cp hosts hosts.bak

编辑hosts
添加:
#Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
masters
nodes
etcd

#Set variables common for all OSEv3 hosts
[OSEv3:vars]
#SSH user, this user should allow ssh based auth without requiring a password
ansible_ssh_user=root

#If ansible_ssh_user is not root, ansible_become must be set to true
#ansible_become=true
openshift_deployment_type=origin
openshift_release=“3.11”
openshift_image_tag=v3.11
openshift_pkg_version=-3.11.0
openshift_use_openshift_sdn=true
#uncomment the following to enable htpasswd authentication; defaults to DenyAllPasswordIdentityProvider
openshift_master_identity_providers=[{‘name’: ‘htpasswd_auth’, ‘login’: ‘true’, ‘challenge’: ‘true’, ‘kind’: ‘HTPasswdPasswordIdentityProvider’}]

#openshift_master_default_subdomain=ai.com
openshift_disable_check=memory_availability,disk_availability,docker_image_availability
openshift_master_cluster_method=native
openshift_master_cluster_hostname=master
openshift_master_cluster_public_hostname=master
#false
ansible_service_broker_install=false
openshift_enable_service_catalog=false
template_service_broker_install=false
openshift_logging_install_logging=false
enable_excluders=false

#registry passwd
#oreg_url=192.168.6.47:5000/openshift3/ose- c o m p o n e n t : {component}: component:{version}
oreg_url=192.168.6.47:5000/openshift/origin- c o m p o n e n t : {component}: component:{version}
openshift_examples_modify_imagestreams=true

#docker config
openshift_docker_additional_registries=192.168.6.47:5000
openshift_docker_insecure_registries=192.168.6.47:5000
#openshift_docker_blocked_registries
openshift_docker_options="–log-driver json-file --log-opt max-size=1M --log-opt max-file=3"

#host group for masters
[masters]
master.okd.com

#host group for etcd
[etcd]
etcd.okd.com

#host group for nodes, includes region info
[nodes]
master.okd.com openshift_node_group_name=‘node-config-master’
node1.okd.com openshift_node_group_name=‘node-config-compute’
node2.okd.com openshift_node_group_name=‘node-config-compute’
node3.okd.com openshift_node_group_name=‘node-config-compute’
node2.okd.com openshift_node_group_name=‘node-config-infra’

6、下载如下docker镜像,并上传到私有镜像仓库中.(离线安装使用)

[root@node3 ~]# docker pull docker.io/openshift/origin-node:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-control-plane:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-deployer:v3.11.0
[root@node3 ~]# docker pull docker.io/openshift/origin-haproxy-router:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-pod:v3.11.0
[root@node3 ~]# docker pull docker.io/openshift/origin-web-console:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-docker-registry:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-metrics-server:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-console:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-metrics-heapster:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-metrics-hawkular-metrics:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-metrics-schema-installer:v3.11
[root@node3 ~]# docker pull docker.io/openshift/origin-metrics-cassandra:v3.11
[root@node3 ~]# docker pull docker.io/cockpit/kubernetes:latest
[root@node3 ~]# docker pull quay.io/coreos/cluster-monitoring-operator:v0.1.1
[root@node3 ~]# docker pull quay.io/coreos/prometheus-config-reloader:v0.23.2
[root@node3 ~]# docker pull quay.io/coreos/prometheus-operator:v0.23.2
[root@node3 ~]# docker pull docker.io/openshift/prometheus-alertmanager:v0.15.2
[root@node3 ~]# docker pull docker.io/openshift/prometheus-node-exporter:v0.16.0
[root@node3 ~]# docker pull docker.io/openshift/prometheus:v2.3.2
[root@node3 ~]# docker pull docker.io/grafana/grafana:5.2.1
[root@node3 ~]# docker pull quay.io/coreos/kube-rbac-proxy:v0.3.1
[root@node3 ~]# docker pull quay.io/coreos/etcd:v3.2.22
[root@node3 ~]# docker pull quay.io/coreos/kube-state-metrics:v1.3.1
[root@node3 ~]# docker pull docker.io/openshift/oauth-proxy:v1.1.0
[root@node3 ~]# docker pull quay.io/coreos/configmap-reload:v0.0.1

给镜像打私有仓库的tag并push到私有仓库中:
docker images|grep docker.io/openshift/origin-control-plane|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-haproxy-router|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-deployer|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-pod|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-docker-registry|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-console|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-web-console|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-metrics-server|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-metrics-heapster|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-metrics-hawkular-metrics|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-metrics-schema-installer|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/origin-metrics-cassandra|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/cockpit/kubernetes|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep quay.io/coreos/cluster-monitoring-operator|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep quay.io/coreos/prometheus-config-reloader|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep quay.io/coreos/prometheus-operator|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/prometheus-alertmanager|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/prometheus-node-exporter|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/prometheus|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/grafana/grafana|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep quay.io/coreos/kube-rbac-proxy|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep quay.io/coreos/etcd|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep quay.io/coreos/kube-state-metrics|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep docker.io/openshift/oauth-proxy|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh
docker images|grep dquay.io/coreos/configmap-reload|awk ‘{print $1"/"$2}’|awk -F “/” ‘{print “docker tag “$1”/”$2"/"$3":"$4" 192.168.6.47:5000/"$2"/"$3":"$4}’|sh

上传:
#docker images|grep 192.168.6.47:5000|awk ‘{print “docker push " $1”:"$2}’|sh

7、开始安装集群

问题:
在操作过程中,编辑/etc/docker/daemon.json 加入{“insecure-registries”:[“192.168.6.47:5000”]}后各节点docker无法启动,删除该配置,使用在线部署模式,调整hosts文件

[root@master ansible]# ansible-playbook openshift-ansible/playbooks/byo/config.yml
。。。。。。
PLAY RECAP ******************************************************************************************
etcd.okd.com : ok=71 changed=25 unreachable=0 failed=0
localhost : ok=11 changed=0 unreachable=0 failed=0
master.okd.com : ok=548 changed=224 unreachable=0 failed=0
node1.okd.com : ok=109 changed=51 unreachable=0 failed=0
node2.okd.com : ok=109 changed=51 unreachable=0 failed=0
node3.okd.com : ok=107 changed=49 unreachable=0 failed=0

INSTALLER STATUS ************************************************************************************
Initialization : Complete (0:00:59)
Health Check : Complete (0:00:30)
Node Bootstrap Preparation : Complete (0:09:46)
etcd Install : Complete (0:01:19)
Master Install : Complete (0:09:09)
Master Additional Install : Complete (0:01:22)
Node Join : Complete (0:00:31)
Hosted Install : Complete (0:01:44)
Cluster Monitoring Operator : Complete (0:03:06)
Web Console Install : Complete (0:01:21)
Console Install : Complete (0:00:55)
metrics-server Install : Complete (0:00:01)

8、安装结束后创建用户

[root@master ~]# htpasswd -cb /etc/origin/master/htpasswd admin mypwd
Adding password for user admin
[root@master ~]# oc adm policy add-cluster-role-to-user cluster-admin admin
Warning: User ‘admin’ not found
cluster role “cluster-admin” added: “admin”

检查:
[root@master ~]# oc get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 47m v1.11.0+d4cacc0
node1 Ready compute 41m v1.11.0+d4cacc0
node2 Ready infra 41m v1.11.0+d4cacc0
node3 Ready compute 41m v1.11.0+d4cacc0

[root@etcd ~]# etcdctl --ca-file=/etc/etcd/ca.crt --cert-file=/etc/etcd/peer.crt --key-file=/etc/etcd/peer.key --endpoints=https://etcd:2379 cluster-health
member 38a07c9ba2da50a7 is healthy: got healthy result from https://192.168.6.51:2379
cluster is healthy
[root@etcd ~]# etcdctl --ca-file=/etc/etcd/ca.crt --cert-file=/etc/etcd/peer.crt --key-file=/etc/etcd/peer.key --endpoints=https://etcd:2379 member list
38a07c9ba2da50a7: name=etcd peerURLs=https://192.168.6.51:2380 clientURLs=https://192.168.6.51:2379 isLeader=true

[root@master ~]# oc projects
You have access to the following projects and can switch between them with 'oc project ':

  • default
    kube-public
    kube-system
    management-infra
    openshift
    openshift-console
    openshift-infra
    openshift-logging
    openshift-monitoring
    openshift-node
    openshift-sdn
    openshift-web-console

Using project “default” on server “https://master:8443”.

使用admin密码mypwd登录 https://master:8443

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值