OpenStack HA

OpenStack HA

OpenStack High Availability Guide — openstackhaguide 0.0.1.dev826 documentation

1. mariadb cluster

#所有控制节点执行

yum install mariadb mariadb-server python2-PyMySQL  mariadb-server-galera mariadb-galera-common galera xinetd rsync -y

systemctl restart mariadb.service

systemctl enable mariadb.service

#初始化

mysql_secure_installation

#任意节点修改配置文件

 cat /etc/my.cnf.d/openstack.cnf

[server]

[mysqld]

bind-address = 10.10.0.11

max_connections = 4096

datadir=/var/lib/mysql

socket=/var/lib/mysql/mysql.sock

log-error=/var/log/mariadb/mariadb.log

pid-file=/run/mariadb/mariadb.pid

[galera]

wsrep_on=ON

wsrep_provider=/usr/lib64/galera/libgalera_smm.so

wsrep_cluster_name="mariadb_galera_cluster"

wsrep_cluster_address="gcomm://openstack1,openstack2,openstack3"

wsrep_node_name="openstack1"

wsrep_node_address="10.10.0.11"

binlog_format=ROW

default_storage_engine=InnoDB

innodb_autoinc_lock_mode=2

wsrep_slave_threads=4

innodb_flush_log_at_trx_commit=2

innodb_buffer_pool_size=1024M

wsrep_sst_method=rsync

[embedded]

[mariadb]

[mariadb-10.3]

#分发到其他两个节点控制节点修改两台节点对应的地址和主机名:wsrep_node_name、wsrep_node_address,bind-address

#停止所有控制节点的mariadb

systemctl stop mariadb

#openstack1节点创建集群

 /usr/libexec/mysqld --wsrep-new-cluster --user=root &

#其他节点加入集群

systemctl start mariadb.service

#控制节点,删除相应的目录重新拉起服务

pkill -9 mysqld

rm -rf /var/lib/mysql/*

chown mysql:mysql /var/run/mariadb/mariadb.pid

systemctl start mariadb.service

#查看集群状态

MariaDB [(none)]>  show status like "wsrep_cluster_size";

+--------------------+-------+

| Variable_name      | Value |

+--------------------+-------+

| wsrep_cluster_size | 3     |

+--------------------+-------+

1 row in set (0.001 sec)

MariaDB [(none)]>  SHOW status LIKE 'wsrep_ready';

+---------------+-------+

| Variable_name | Value |

+---------------+-------+

| wsrep_ready   | ON    |

+---------------+-------+

1 row in set (0.001 sec)

#任意节点数据库创建心跳检测用户

GRANT PROCESS ON *.* TO 'clustercheckuser'@'localhost' IDENTIFIED BY 'clustercheckpassword!';

flush privileges;

#全部控制节点更改心跳检查配置文件

vim /usr/bin/clustercheck

MYSQL_USERNAME="${MYSQL_USERNAME-clustercheckuser}"

MYSQL_PASSWORD="${MYSQL_PASSWORD-clustercheckpassword!}"

MYSQL_HOST="${MYSQL_HOST:-localhost}"

MYSQL_PORT="${MYSQL_PORT:-3306}"

#全部控制节点创建心跳检测文件

touch /etc/xinetd.d/galera-monitor

 cat >/etc/xinetd.d/galera-monitor <<EOF

# default:on

# description: galera-monitor

service galera-monitor

{

port = 9200

disable = no

socket_type = stream

protocol = tcp

wait = no

user = root

group = root

groups = yes

server = /usr/bin/clustercheck

type = UNLISTED

per_source = UNLIMITED

log_on_success =

log_on_failure = HOST

flags = REUSE

}

EOF

#在全部控制节点修改/etc/services,变更tcp9200端口用途,以controller01节点为例

vim /etc/services

...

#wap-wsp        9200/tcp                # WAP connectionless session service

galera-monitor  9200/tcp                # galera-monitor

#全部控制节点启动 xinetd 服务

systemctl daemon-reload

systemctl enable xinetd

systemctl start xinetd

#监察心跳脚本

/usr/bin/clustercheck

2. RabbitMQ Cluster

#所有控制节点安装

yum install -y erlang   rabbitmq-server

systemctl enable rabbitmq-server.service

#任意控制节点先启动并查看集群状态

systemctl start rabbitmq-server.service

 rabbitmqctl cluster_status

#在openstack1上操作:

 scp /var/lib/rabbitmq/.erlang.cookie openstack2:/var/lib/rabbitmq/.erlang.cookie

  scp /var/lib/rabbitmq/.erlang.cookie openstack3:/var/lib/rabbitmq/.erlang.cookie

#在openstack2和 3上操作:

chown rabbitmq:rabbitmq /var/lib/rabbitmq/.erlang.cookie

 systemctl start rabbitmq-server

 rabbitmqctl stop_app

 rabbitmqctl join_cluster --ram rabbit@openstack1

 rabbitmqctl start_app

#查看集群状态

rabbitmqctl cluster_status

#创建openstack用户,密码为Bl666666

rabbitmqctl add_user openstack  Bl666666

rabbitmqctl set_permissions openstack ".*" ".*" ".*"

#然后再任意节点都能查看到openstack这个用户

rabbitmqctl list_users

#更改密码

 rabbitmqctl change_password openstack Bl666666

3.memcache 和etcd集群

#三个控制节点安装

yum install memcached python-memcached -y

三个控制节点配置/etc/sysconfig/memcached

PORT="11211"

USER="memcached"

MAXCONN="8192"

CACHESIZE="4096"

OPTIONS="-l 0.0.0.0"

systemctl enable memcached.service

systemctl start memcached.service

systemctl status memcached.service

#三节点安装etcd

yum install -y etcd

#配置文件各节点改为各节点自己的IP与hostname

cat > /etc/etcd/etcd.conf <<EOF

#[Member]

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"

ETCD_LISTEN_CLIENT_URLS="http://10.10.0.11:2379,http://127.0.0.1:2379"

ETCD_NAME="openstack1"

#[Clustering]

ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.10.0.11:2380"

ETCD_ADVERTISE_CLIENT_URLS="http://10.10.0.11:2379"

ETCD_INITIAL_CLUSTER="openstack1=http://10.10.0.11:2380,openstack2=http://10.10.0.12:2380,openstack3=http://10.10.0.13:2380"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"

ETCD_INITIAL_CLUSTER_STATE="new"

EOF

#service配置需要更改service部分,控制节点保持一致

vim /usr/lib/systemd/system/etcd.service

[Service]

Type=notify

WorkingDirectory=/var/lib/etcd/

EnvironmentFile=-/etc/etcd/etcd.conf

User=etcd

# set GOMAXPROCS to number of processors

ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd \

--name=\"${ETCD_NAME}\" \

--data-dir=\"${ETCD_DATA_DIR}\" \

--listen-peer-urls=\"${ETCD_LISTEN_PEER_URLS}\" \

--listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" \

--initial-advertise-peer-urls=\"${ETCD_INITIAL_ADVERTISE_PEER_URLS}\" \

--advertise-client-urls=\"${ETCD_ADVERTISE_CLIENT_URLS}\" \

--initial-cluster=\"${ETCD_INITIAL_CLUSTER}\"  \

--initial-cluster-token=\"${ETCD_INITIAL_CLUSTER_TOKEN}\" \

--initial-cluster-state=\"${ETCD_INITIAL_CLUSTER_STATE}\""

Restart=on-failure

LimitNOFILE=65536

#启动

systemctl enable etcd

systemctl restart etcd

systemctl status etcd

#验证

etcdctl cluster-health

etcdctl member list

4.配置Pacemaker集群

 # 三个控制节点都需要安装执行,前提三台相互免密

 yum install -y pacemaker pcs corosync fence-agents resource-agents   psmisc policycoreutils-python

 systemctl start pcsd.service

 systemctl enable pcsd.service

 echo Bl666666 | passwd hacluster --stdin  #Bl666666为密码

 #任意节点初始化集群

 pcs cluster auth openstack01 openstack02 openstack03 -u hacluster -p Bl666666

#创建并启动名为openstack-cluster的集群,其中openstack1 openstack2 openstack3为集群成员:

 pcs cluster setup --name openstack-cluster openstack1 openstack2 openstack3

# 设置集群自启动并启动

 pcs cluster start  --all

 pcs cluster enable --all

#查看集群状态

pcs cluster status

#查看节点配置

cibadmin --query --scope nodes

#查看corosync状态

 pcs status corosync

#查看节点

 corosync-cmapctl | grep members

#查看资源

pcs resource

#删除资源

pcs resource delete <resource>

#通过web界面访问pacemaker

#访问任意控制节点:https://10.10.0.11:2224

#账号/密码(即构建集群时生成的密码):hacluster/Bl666666

#虽然以命令行的方式设置了集群,但web界面默认并不显示,手动添加集群,实际操作只需要添加已组建集群的任意节点即可,如下

#设置集群的一些特性

pcs property set pe-warn-series-max=1000 pe-input-series-max=1000  pe-error-series-max=1000

pcs property set cluster-recheck-interval=5

pcs property set stonith-enabled=false

pcs property set no-quorum-policy=ignore

#查看修改后的属性值

pcs property list

# 创建vip与haproxy资源,并添加限制,确保p_vip运行在haproxy服务正常的节点

 pcs resource create vip ocf:heartbeat:IPaddr2 ip=10.10.0.10 cidr_netmask=24 op monitor interval=2s

5. 部署Haproxy

#所有控制节点执行

yum install haproxy -y

# vip为10.10.0.10三个节点IP分别为10.10.0.11,10.10.0.12,10.10.0.13

vim /etc/haproxy/haproxy.cfg #填入下面模板里面的内容

# 其中10.10.0.10 替换为VIP ,10.10.0.11-13分别替换为节点IP

所有节点配置/etc/sysctl.conf

net.ipv6.conf.all.disable_ipv6 =1

net.ipv6.conf.default.disable_ipv6 =1

fs.file-max = 655360

net.ipv4.ip_forward = 1

net.ipv4.ip_nonlocal_bind = 1

net.ipv4.conf.all.rp_filter = 0

net.ipv4.conf.default.rp_filter = 0

#关闭sysrq功能

kernel.sysrq = 0

#允许系统打开的端口范围

net.ipv4.ip_local_port_range = 10240 65000

vm.swappiness = 0

sysctl -p

systemctl enable haproxy

systemctl restart haproxy

systemctl status haproxy

ss -antlp | grep haproxy

#访问网站:http://10.10.0.10:1080 用户名/密码:admin/admin

#创建pcs资源

pcs resource create lb-haproxy systemd:haproxy clone

pcs constraint order start vip then lb-haproxy-clone kind=Optional

pcs constraint colocation add lb-haproxy-clone with vip

global

  log      127.0.0.1     local0

  chroot   /var/lib/haproxy

  daemon

  group    haproxy

  user     haproxy

  maxconn  4000

  pidfile  /var/run/haproxy.pid

  stats    socket /var/lib/haproxy/stats

defaults

    mode                    http

    log                     global

    maxconn                 4000 #最大连接数

    option                  httplog

    option                  redispatch

    retries                 3

    timeout http-request    10s

    timeout queue           1m

    timeout connect         10s

    timeout client          1m

    timeout server          1m

    timeout check           10s

# haproxy监控页

listen stats

  bind 0.0.0.0:1080

  mode http

  stats enable

  stats uri /

  stats realm OpenStack\ Haproxy

  stats auth admin:admin

  stats  refresh 30s

  stats  show-node

  stats  show-legends

  stats  hide-version

# horizon服务

listen dashboard_cluster

  bind  10.10.0.10:80

  balance  source

  option  tcpka

  option  httpchk

  option  tcplog

  server openstack1 10.10.0.11:80 check inter 2000 rise 2 fall 5

  server openstack2 10.10.0.12:80 check inter 2000 rise 2 fall 5

  server openstack3 10.10.0.13:80 check inter 2000 rise 2 fall 5

# mariadb服务;

#设置openstack1节点为master,openstack2/03节点为backup,一主多备的架构可规避数据不一致性;

#另外官方示例为检测9200(心跳)端口,测试在mariadb服务宕机的情况下,虽然”/usr/bin/clustercheck”脚本已探测不到服务,但受xinetd控制的9200端口依然正常,导致haproxy始终将请求转发到mariadb服务宕机的节点,暂时修改为监听3306端口

listen galera_cluster

  bind 10.10.0.10:3306

  balance  source

  mode    tcp

  server openstack1 10.10.0.12:3306 check inter 2000 rise 2 fall 5

  server openstack2 10.10.0.11:3306 backup check inter 2000 rise 2 fall 5

  server openstack3 10.10.0.13:3306 backup check inter 2000 rise 2 fall 5

#为rabbirmq提供ha集群访问端口,供openstack各服务访问;

#如果openstack各服务直接连接rabbitmq集群,这里可不设置rabbitmq的负载均衡

 listen rabbitmq_cluster

   bind 10.10.0.10:5673

   mode tcp

   option tcpka

   balance roundrobin

   timeout client  3h

   timeout server  3h

   option  clitcpka

   server openstack1 10.10.0.12:5672 check inter 10s rise 2 fall 5

   server openstack2 10.10.0.11:5672 check inter 10s rise 2 fall 5

   server openstack3 10.10.0.13:5672 check inter 10s rise 2 fall 5

# glance_api服务

 listen glance_api_cluster

  bind  10.10.0.10:9292

  balance  source

  option  tcpka

  option  httpchk

  option  tcplog

  server openstack1 10.10.0.12:9292 check inter 2000 rise 2 fall 5

  server openstack2 10.10.0.11:9292 check inter 2000 rise 2 fall 5

  server openstack3 10.10.0.13:9292 check inter 2000 rise 2 fall 5

# keystone_public _api服务

 listen keystone_public_cluster

  bind 10.10.0.10:5000

  balance  source

  option  tcpka

  option  httpchk

  option  tcplog

  server openstack1 10.10.0.12:5000 check inter 2000 rise 2 fall 5

  server openstack2 10.10.0.11:5000 check inter 2000 rise 2 fall 5

  server openstack3 10.10.0.13:5000 check inter 2000 rise 2 fall 5

 listen nova_compute_api_cluster

  bind 10.10.0.10:8774

  balance  source

  option  tcpka

  option  httpchk

  option  tcplog

  server openstack1 10.10.0.12:8774 check inter 2000 rise 2 fall 5

  server openstack2 10.10.0.11:8774 check inter 2000 rise 2 fall 5

  server openstack3 10.10.0.13:8774 check inter 2000 rise 2 fall 5

 listen nova_placement_cluster

  bind 10.10.0.10:8778

  balance  source

  option  tcpka

  option  tcplog

  server openstack1 10.10.0.12:8778 check inter 2000 rise 2 fall 5

  server openstack2 10.10.0.11:8778 check inter 2000 rise 2 fall 5

  server openstack3 10.10.0.13:8778 check inter 2000 rise 2 fall 5

 listen nova_metadata_api_cluster

  bind 10.10.0.10:8775

  balance  source

  option  tcpka

  option  tcplog

  server openstack1 10.10.0.12:8775 check inter 2000 rise 2 fall 5

  server openstack2 10.10.0.11:8775 check inter 2000 rise 2 fall 5

  server openstack3 10.10.0.13:8775 check inter 2000 rise 2 fall 5

 listen nova_vncproxy_cluster

  bind 10.10.0.10:6080

  balance  source

  option  tcpka

  option  tcplog

  server openstack1 10.10.0.12:6080 check inter 2000 rise 2 fall 5

  server openstack2 10.10.0.11:6080 check inter 2000 rise 2 fall 5

  server openstack3 10.10.0.13:6080 check inter 2000 rise 2 fall 5

 listen neutron_api_cluster

  bind 10.10.0.10:9696

  balance  source

  option  tcpka

  option  httpchk

  option  tcplog

  server openstack1 10.10.0.12:9696 check inter 2000 rise 2 fall 5

  server openstack2 10.10.0.11:9696 check inter 2000 rise 2 fall 5

  server openstack3 10.10.0.13:9696 check inter 2000 rise 2 fall 5

6. keyston安装

OpenStack Docs: Install and configure

# 任意控制节点创建数据库

mysql -u root -p

MariaDB [(none)]> CREATE DATABASE keystone;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \

IDENTIFIED BY 'KEYSTONE_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \

IDENTIFIED BY 'KEYSTONE_DBPASS';

# 所有控制节点安装

yum install  openstack-keystone httpd mod_wsgi  --enablerepo=centos-openstack-stein --disablerepo=epel -y

# 配置文件三节点同步相同配置文件

#注意controller添加域名解析

vi /etc/keystone/keystone.conf

scp -rp /etc/keystone/keystone.conf openstack02:/etc/keystone/keystone.conf

scp -rp /etc/keystone/keystone.conf openstack03:/etc/keystone/keystone.conf

# 任意控制节点数据导入

su -s /bin/sh -c "keystone-manage db_sync" keystone

# 验证数据是否同步

mysql -uroot -pBl666666  keystone  -e "show  tables";

# 任意节点初始化 Fernet 密钥存储库:

keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone

keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

#秘钥拷贝到其他节点

scp -rp /etc/keystone/fernet-keys /etc/keystone/credential-keys openstack02:/etc/keystone/

scp -rp /etc/keystone/fernet-keys /etc/keystone/credential-keys openstack03:/etc/keystone/

#更改拷贝过去的文件权限

chown -R keystone:keystone /etc/keystone/credential-keys/

chown -R keystone:keystone /etc/keystone/fernet-keys/

# 创建admin用户

keystone-manage bootstrap --bootstrap-password ADMIN_PASS \

  --bootstrap-admin-url http://controller:5000/v3/ \

  --bootstrap-internal-url http://controller:5000/v3/ \

  --bootstrap-public-url http://controller:5000/v3/ \

  --bootstrap-region-id RegionOne

  

# 所有节点配置访问

cp /etc/httpd/conf/httpd.conf{,.bak}

sed -i "s/#ServerName www.example.com:80/ServerName ${HOSTNAME}/" /etc/httpd/conf/httpd.conf

#不同的节点替换不同的ip地址

##openstack01

sed -i "s/Listen\ 80/Listen\ 10.10.0.11:80/g" /etc/httpd/conf/httpd.conf

##openstack02

sed -i "s/Listen\ 80/Listen\ 10.10.0.12:80/g" /etc/httpd/conf/httpd.conf

##openstack03

sed -i "s/Listen\ 80/Listen\ 10.10.0.13:80/g" /etc/httpd/conf/httpd.conf

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

#不同的节点替换不同的ip地址

##openstack01

sed -i "s/Listen\ 5000/Listen\ 10.10.0.11:5000/g" /etc/httpd/conf.d/wsgi-keystone.conf

sed -i "s#*:5000#10.10.0.11:5000#g" /etc/httpd/conf.d/wsgi-keystone.conf

##openstack02

sed -i "s/Listen\ 5000/Listen\ 10.10.0.12:5000/g" /etc/httpd/conf.d/wsgi-keystone.conf

sed -i "s#*:5000#10.10.0.12:5000#g" /etc/httpd/conf.d/wsgi-keystone.conf

##openstack03

sed -i "s/Listen\ 5000/Listen\ 10.10.0.13:5000/g" /etc/httpd/conf.d/wsgi-keystone.conf

sed -i "s#*:5000#10.10.0.13:5000#g" /etc/httpd/conf.d/wsgi-keystone.conf

# 所有节点启动

systemctl enable httpd.service &&  systemctl start httpd.service

#所有控制节点任意节点

cat >> ~/admin-openrc << EOF

#admin-openrc

export OS_USERNAME=admin

export OS_PASSWORD=Bl666666

export OS_PROJECT_NAME=admin

export OS_USER_DOMAIN_NAME=Default

export OS_PROJECT_DOMAIN_NAME=Default

export OS_AUTH_URL=http://10.10.0.10:5000/v3

export OS_IDENTITY_API_VERSION=3

export OS_IMAGE_API_VERSION=2

EOF

source  ~/admin-openrc

#所有控制节点验证

openstack domain list

# 任意节点 创建service 项目

openstack project create --domain default \

  --description "Service Project" service

openstack token issue

openstack project list

#创建pcs资源

 pcs resource create openstack-keystone systemd:httpd clone interleave=true

配置文件模板

[database]

# ...

connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone

[token]

# ...

provider = fernet

7. glance

OpenStack Docs: Install and configure (Red Hat)

# 任意控制节点创建数据库

mysql -u root -p

MariaDB [(none)]> CREATE DATABASE glance;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \

  IDENTIFIED BY 'GLANCE_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \

  IDENTIFIED BY 'GLANCE_DBPASS';

source admin-openrc

# 任意控制节点创建keystone用户 ,输入GLANCE_PASS

openstack user create --domain default --password-prompt glance

# 用户权限

openstack role add --project service --user glance admin

# 注册服务

openstack service create --name glance --description "OpenStack Image" image

openstack endpoint create --region RegionOne image public http://controller:9292

openstack endpoint create --region RegionOne image internal http://controller:9292

openstack endpoint create --region RegionOne image admin http://controller:9292

# 所有控制节点验证

openstack endpoint list

# 所有控制节点安装

yum install  openstack-glance  --enablerepo=centos-openstack-stein --disablerepo=epel -y

# 配置文件配置模板如下,bind_host IP改为各节点IP

vi /etc/glance/glance-api.conf

vi /etc/glance/glance-registry.conf

# 任意节点数据导入

su -s /bin/sh -c "glance-manage db_sync" glance

#验证是否导入成功

mysql -uglance -pBl666666 -e "use glance;show tables;"

# 所有控制节点启动

systemctl enable openstack-glance-api.service \

  openstack-glance-registry.service

systemctl start openstack-glance-api.service \

  openstack-glance-registry.service

# 任意节点创建pcs资源

pcs resource create openstack-glance-api systemd:openstack-glance-api clone interleave=true

pcs resource

#任意节点验证

source admin-openrc

wget -c http://download.cirros-cloud.net/0.5.1/cirros-0.5.1-x86_64-disk.img

openstack image create --file ~/cirros-0.5.1-x86_64-disk.img --disk-format qcow2 --container-format bare --public cirros-qcow2

openstack image list

配置文件模板

[DEFAULT]

bind_host = 10.10.0.11

[database]

# ...

connection = mysql+pymysql://glance:Bl666666@controller/glance

[keystone_authtoken]

# ...

www_authenticate_uri  = http://controller:5000

auth_url = http://controller:5000

memcached_servers = openstack01:11211,openstack02:11211,openstack03:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = glance

password = Bl666666

[paste_deploy]

# ...

flavor = keystone

[glance_store]

# ...

stores = file,http

default_store = file

filesystem_store_datadir = /var/lib/glance/images/

[DEFAULT]

bind_host = 192.168.40.102

[database]

# ...

connection = mysql+pymysql://glance:Bl666666@controller/glance

[keystone_authtoken]

# ...

www_authenticate_uri = http://controller:5000

auth_url = http://controller:5000

memcached_servers = openstack01:11211,openstack02:11211,openstack03:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = glance

password = Bl666666

[paste_deploy]

# ...

flavor = keystone

8. Placement

OpenStack Docs: Placement Service

# 创建数据库

mysql -u root -p

MariaDB [(none)]> CREATE DATABASE placement;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \

  IDENTIFIED BY 'PLACEMENT_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \

  IDENTIFIED BY 'PLACEMENT_DBPASS';

source admin-openrc

# 创建keystone用户 ,输入 PLACEMENT_PASS

openstack user create --domain default --password-prompt placement

# 用户权限

openstack role add --project service --user placement admin

# 注册服务

openstack service create --name placement --description "Placement API" placement

openstack endpoint create --region RegionOne placement public http://controller:8778

openstack endpoint create --region RegionOne placement internal http://controller:8778

openstack endpoint create --region RegionOne placement admin http://controller:8778

# 安装

 yum install openstack-placement-api -y

# 配置文件

vi /etc/placement/placement.conf

# 数据导入

su -s /bin/sh -c "placement-manage db sync" placement

#验证数据是否导入

 mysql -uroot -pBl666666 placement -e " show tables;"

#三个节点分别修改修改/etc/httpd/conf.d/00-placement-api.conf

sed -i "s/Listen\ 8778/Listen\ 10.10.0.11:8778/g" /etc/httpd/conf.d/00-placement-api.conf

sed -i "s/*:8778/10.10.0.11:8778/g" /etc/httpd/conf.d/00-placement-api.conf

# 启动

systemctl restart httpd

# 校验安装

placement-status upgrade check

配置文件模板

[placement_database]

# ...

connection = mysql+pymysql://placement:Bl666666@controller/placement

[api]

# ...

auth_strategy = keystone

[keystone_authtoken]

# ...

auth_url = http://controller:5000/v3

memcached_servers = openstack01:11211,openstack02:11211,openstack03:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = placement

password = Bl666666

在</VirtualHost>添加如下内容

  <Directory /usr/bin>

     <IfVersion >= 2.4>

        Require all granted

     </IfVersion>

     <IfVersion < 2.4>

        Order allow,deny

        Allow from all

     </IfVersion>

  </Directory>

9. nova

9.1 controller

OpenStack Docs: Install and configure controller node for Red Hat Enterprise Linux and CentOS

# 任意节点创建数据库

mysql -u root -p

MariaDB [(none)]> CREATE DATABASE nova_api;

MariaDB [(none)]> CREATE DATABASE nova;

MariaDB [(none)]> CREATE DATABASE nova_cell0;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \

  IDENTIFIED BY 'NOVA_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \

  IDENTIFIED BY 'NOVA_DBPASS';

  

source admin-openrc

# 任意控制节点创建keystone用户 ,输入 NOVA_PASS

openstack user create --domain default --password-prompt nova

# 任意控制节设置用户权限

openstack role add --project service --user nova admin

# 任意控制节注册服务

openstack service create --name nova --description "OpenStack Compute" compute

openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1

openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1

openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1

# 所有控制及诶单安装  

yum install --disablerepo="epel" --disablerepo=centos-ceph-nautilus openstack-nova-api \

openstack-nova-conductor   openstack-nova-novncproxy openstack-nova-scheduler

# 配置文件,见下面模板,my_ip改为各个节点IP

vi /etc/nova/nova.conf

# 任意控制节点数据导入

su -s /bin/sh -c "nova-manage api_db sync" nova

su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

su -s /bin/sh -c "nova-manage db sync" nova

su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova

# 所有控制节点启动

systemctl enable openstack-nova-api.service \

  openstack-nova-scheduler.service \

  openstack-nova-conductor.service \

  openstack-nova-novncproxy.service

systemctl start openstack-nova-api.service \

  openstack-nova-scheduler.service \

  openstack-nova-conductor.service \

  openstack-nova-novncproxy.service

# 任意控制节点验证

openstack compute service list

openstack catalog list

#任意控制节点创建pcs资源

pcs resource create openstack-nova-api systemd:openstack-nova-api clone interleave=true

pcs resource create openstack-nova-scheduler systemd:openstack-nova-scheduler clone interleave=true

pcs resource create openstack-nova-conductor systemd:openstack-nova-conductor clone interleave=true

pcs resource create openstack-nova-novncproxy systemd:openstack-nova-novncproxy clone interleave=true

#查看pcs资源

pcs resource

配置文件模板

[DEFAULT]

# ...

enabled_apis = osapi_compute,metadata

transport_url = rabbit://openstack:Bl666666@openstack01:5672,openstack:Bl666666@openstack02:5672,openstack:Bl666666@openstack03:5672

my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS

use_neutron = true

firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api_database]

# ...

connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[database]

# ...

connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[api]

# ...

auth_strategy = keystone

[keystone_authtoken]

# ...

auth_url = http://controller:5000/v3

memcached_servers = controller:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = nova

password = NOVA_PASS

[vnc]

enabled = true

# ...

server_listen = $my_ip

server_proxyclient_address = $my_ip

[glance]

# ...

api_servers = http://controller:9292

[oslo_concurrency]

# ...

lock_path = /var/lib/nova/tmp

[placement]

# ...

region_name = RegionOne

project_domain_name = Default

project_name = service

auth_type = password

user_domain_name = Default

auth_url = http://controller:5000/v3

username = placement

password = PLACEMENT_PASS

9.2 compute

OpenStack Docs: Install and configure a compute node for Red Hat Enterprise Linux and CentOS

# 安装  

yum install --disablerepo="epel" openstack-nova-compute -y

# 配置文件,见下面模板,my_ip改为各个节点IP

vi /etc/nova/nova.conf

# 启动

systemctl enable libvirtd.service openstack-nova-compute.service

systemctl start  libvirtd.service openstack-nova-compute.service

#节点注册后验证,注册见9.3 验证

openstack compute service list

openstack catalog list

nova-status upgrade check

配置文件模板

[DEFAULT]

# ...

enabled_apis = osapi_compute,metadata

transport_url = rabbit://openstack:Bl666666@openstack01:5672,openstack:Bl666666@openstack02:5672,openstack:Bl666666@openstack03:5672

my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS

use_neutron = true

firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api_database]

# ...

connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[database]

# ...

connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[api]

# ...

auth_strategy = keystone

[keystone_authtoken]

# ...

auth_url = http://controller:5000/v3

memcached_servers = controller:11211

auth_type = password

project_domain_name = Default

user_domain_name = Default

project_name = service

username = nova

password = NOVA_PASS

[vnc]

enabled = true

# ...

server_listen = $my_ip

server_proxyclient_address = $my_ip

novncproxy_base_url = http://10.10.0.10:6080/vnc_auto.html

[glance]

# ...

api_servers = http://controller:9292

[oslo_concurrency]

# ...

lock_path = /var/lib/nova/tmp

[placement]

# ...

region_name = RegionOne

project_domain_name = Default

project_name = service

auth_type = password

user_domain_name = Default

auth_url = http://controller:5000/v3

username = placement

password = PLACEMENT_PASS

9.3 compute节点注册

# controller 操作

openstack compute service list --service nova-compute

su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

# 配置自动发现

vi /etc/nova/nova.conf

# 重启服务

systemctl restart openstack-nova-api.service \

  openstack-nova-scheduler.service

10. dashboard

OpenStack Docs: Install and configure a compute node for Red Hat Enterprise Linux and CentOS

# 安装  

yum install --disablerepo="epel"  --disablerepo=extras   openstack-dashboard -y

# 所有控制节点配置文件

sed -i '3a WSGIApplicationGroup %{GLOBAL}' /etc/httpd/conf.d/openstack-dashboard.conf

vi /etc/nova/nova.conf

scp -rp /etc/openstack-dashboard/local_settings  openstack02:/etc/openstack-dashboard/

# 重启

systemctl restart httpd.service memcached.service

# ...

OPENSTACK_HOST = "10.10.0.10"

ALLOWED_HOSTS = ['*', 'localhost']

SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {

    'default': {

         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',

         'LOCATION': 'openstack01:11211,openstack02:11211,openstack03:11211',

    }

}

OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

OPENSTACK_API_VERSIONS = {

    "identity": 3,

    "image": 2,

    "volume": 3,

}

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

OPENSTACK_NEUTRON_NETWORK = {

    ...

    'enable_ipv6': False,

}

TIME_ZONE = "Asia/Shanghai"

11. network

OpenStack Docs: Open vSwitch mechanism driver

11.1 controller 节点

# 任意控制节点创建数据库

mysql -u root -p

MariaDB [(none)] CREATE DATABASE neutron;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \

  IDENTIFIED BY 'NEUTRON_DBPASS';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \

  IDENTIFIED BY 'NEUTRON_DBPASS';

source admin-openrc

# 任意控制节点创建keystone用户 ,输入 NEUTRON_PASS

openstack user create --domain default --password-prompt neutron

# 任意控制节点设置用户权限

openstack role add --project service --user neutron admin

# 任意控制节点注册服务

openstack service create --name neutron --description "OpenStack Networking" network

openstack endpoint create --region RegionOne network public http://controller:9696

openstack endpoint create --region RegionOne network internal http://controller:9696

openstack endpoint create --region RegionOne network admin http://controller:9696

11.2 network node

# 所有控制节点安装

yum install openstack-neutron openstack-neutron-ml2 \

openstack-neutron-openvswitch ebtables --disablerepo=epel \

--disablerepo=centos-ceph-nautilus -y

  

# 所有控制节点更改配置文件

vi /etc/neutron/neutron.conf

vi /etc/neutron/plugins/ml2/ml2_conf.ini

# 所有控制节点配置ovs bridge

yum install -y libibverbs

systemctl enable neutron-openvswitch-agent.service

systemctl start neutron-openvswitch-agent.service

ovs-vsctl add-br br-provider

ovs-vsctl add-port br-provider PROVIDER_INTERFACE

# 所有控制节点更改配置文件

vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

vi /etc/neutron/l3_agent.ini

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

# 任意控制节点导入数据

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \

  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

# 所有控制节点启动

systemctl enable neutron-server.service neutron-l3-agent.service

systemctl start neutron-server.service neutron-l3-agent.service

systemctl restart neutron-openvswitch-agent.service

#验证

openstack network agent list

#创建pcs资源

pcs resource create neutron-openvswitch-agent systemd:neutron-openvswitch-agent clone interleave=true

pcs resource create neutron-l3-agent systemd:neutron-l3-agent clone interleave=true

pcs resource create neutron-dhcp-agent systemd:neutron-dhcp-agent clone interleave=true

pcs resource create neutron-metadata-agent systemd:neutron-metadata-agent clone interleave=true

pcs resource create neutron-server systemd:neutron-server clone interleave=true

pcs resource

nova启用neutron

vi /etc/nova/nova.conf

[neutron]

# ...

auth_url = http://controller:5000

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = NEUTRON_PASS

service_metadata_proxy = true

metadata_proxy_shared_secret = METADATA_SECRET

systemctl restart openstack-nova-api.service

配置文件模板

[DEFAULT]

core_plugin = ml2

service_plugins = router

allow_overlapping_ips = true

transport_url = rabbit://openstack:Bl666666@openstack01:5672,openstack:Bl666666@openstack02:5672,openstack:Bl666666@openstack03:5672

auth_strategy = keystone

notify_nova_on_port_status_changes = true

notify_nova_on_port_data_changes = true

dhcp_agents_per_network = 2

bind_host = 192.168.40.100

[database]

connection = mysql+pymysql://neutron:Bl666666@controller/neutron

[keystone_authtoken]

www_authenticate_uri = http://controller:5000

auth_url = http://controller:5000

memcached_servers = openstack01:11211,openstack02:11211,openstack03:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = neutron

password = Bl666666

[nova]

auth_url = http://controller:5000

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = nova

password = Bl666666

[oslo_concurrency]

lock_path = /var/lib/neutron/tmp

[ml2]

# ...

type_drivers = flat,vlan,vxlan

tenant_network_types = vxlan

mechanism_drivers = openvswitch,l2population

extension_drivers = port_security

[ml2_type_flat]

# ...

flat_networks = provider

[ml2_type_vxlan]

# ...

vni_ranges = 1:1000

[securitygroup]

# ...

enable_ipset = true

[ovs]

bridge_mappings = provider:br-provider

local_ip = 10.10.0.11

[agent]

tunnel_types = vxlan

l2_population = True

[securitygroup]

firewall_driver = iptables_hybrid

[DEFAULT]

interface_driver = openvswitch

 11.3 compute节点网络

# 安装

yum -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch ebtables

# 修改配置文件

vi /etc/neutron/neutron.conf

vi /etc/neutron/plugins/ml2/openvswitch_agent.ini

vi /etc/neutron/dhcp_agent.ini

vi /etc/neutron/metadata_agent.ini

# 配置ovs bridge

yum install -y libibverbs

systemctl enable neutron-openvswitch-agent.service

systemctl start neutron-openvswitch-agent.service

ovs-vsctl add-br br-provider

ovs-vsctl add-port br-provider PROVIDER_INTERFACE

# 启动

systemctl enable neutron-dhcp-agent.service neutron-metadata-agent.service

systemctl start neutron-dhcp-agent.service neutron-metadata-agent.service

systemctl restart neutron-openvswitch-agent.service

nova启用neutron

vi /etc/nova/nova.conf

[neutron]

# ...

auth_url = http://controller:5000

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = NEUTRON_PASS

systemctl restart openstack-nova-compute.service

配置文件模板

[DEFAULT]

transport_url = rabbit://openstack:Bl666666@openstack01:5672,openstack:Bl666666@openstack02:5672,openstack:Bl666666@openstack03:5672

core_plugin = ml2

auth_strategy = keystone

[keystone_authtoken]

# ...

www_authenticate_uri = http://controller:5000

auth_url = http://controller:5000

memcached_servers = controller:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = neutron

password = NEUTRON_PASS

[oslo_concurrency]

# ...

lock_path = /var/lib/neutron/tmp

[ovs]

bridge_mappings = provider:br-provider

local_ip = OVERLAY_INTERFACE_IP_ADDRESS

[securitygroup]

firewall_driver = iptables_hybrid

[agent]

tunnel_types = vxlan

l2_population = True

[DEFAULT]

interface_driver = openvswitch

enable_isolated_metadata = True

force_metadata = True

[DEFAULT]

nova_metadata_host = controller

metadata_proxy_shared_secret = METADATA_SECRET

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值