Linux部署openstack

规划概览:

主机名

管理网络

网卡1

网卡2

controller

192.168.162.76

ens32

ens33

compute

192.168.162.77

ens32

ens33

网关:192.168.162.254

一、环境配置

1.环境配置

1.1修改主机名(每个节点修改对应名称)。

#controller执行 

[root@controller ~]# hostnamectl set-hostname controller 

#compute 

[root@compute ~]# hostnamectl set-hostname compute 
#修改后使用bash刷新

##修改后重新连接终端 

##MobaXterm中按ctrl+d 退出 按R重新登陆 

##CRT中按ctrl+d 退出 按回车重新登陆

1.2 配置网络环境。

#控制节点
[root@controller ~]# vi /etc/sysconfig/network-scripts/ifcfg-ens32                #ifcfg-需要配置的网卡名
BOOTPROTO=dhcp            #修改dhcp动态IP地址为static静态IP地址
改为BOOTPROTO=static

ONBOOT=no                 # 修改为yes,设为开机自启
改为ONBOOT=yes

#结尾添加
IPADDR=192.168.162.76            #配置的IP地址,根据虚拟网络编辑器的网段来配置
NETMASK=255.255.255.0            #子网掩码,也可以用PREFIX=24来表示
GATEWAY=192.168.162.254            #网关
DNS1=114.114.114.114             #DNS域名解析
DNS2=8.8.8.8                     #备用DNS域名解析
:wq (保存退出)
[root@controller ~]# systemctl restart network            #重启网络


#计算节点
[root@compute ~]# vi /etc/sysconfig/network-scripts/ifcfg-ens32                 #ifcfg-需要配置的网卡名
BOOTPROTO=dhcp            #修改dhcp动态IP地址为static静态IP地址
改为BOOTPROTO=static

ONBOOT=no                 # 修改为yes,设为开机自启
改为ONBOOT=yes

#结尾添加
IPADDR=192.168.162.77            #配置的IP地址,根据虚拟网络编辑器的网段来配置
NETMASK=255.255.255.0            #子网掩码,也可以用PREFIX=24来表示
GATEWAY=192.168.162.254            #网关
DNS1=114.114.114.114             #DNS域名解析
DNS2=8.8.8.8                     #备用DNS域名解析
:wq (保存退出)
[root@compute ~]# systemctl restart network            #重启网络

1.3 ssh配置(每个节点)

远程可能需要配置ssh

vi /etc/ssh/sshd_ config
PermitRootLogin yes   #允许root用户远程登陆。

UseDNS no  #必要时需要关闭此项,不然远程登陆可能一直处于验证远程访问地址状态,无法登陆。

1.4 关闭selinux(每个节点)

vi /etc/selinux/config

# This file controls the state of SELinux on the system.

# SELINUX= can take one of these three values:

#     enforcing - SELinux security policy is enforced.

#     permissive - SELinux prints warnings instead of enforcing.

#     disabled - No SELinux policy is loaded.

SELINUX=disabled #将此项的enforcing修改为disabled

# SELINUXTYPE= can take one of three values:

#     targeted - Targeted processes are protected,

#     minimum - Modification of targeted policy. Only selected processes are protected.

#     mls - Multi Level Security protection.

SELINUXTYPE=targeted

修改完成后使用getenforce查看selinux状态。

看到还是为开启状态,使用setenforce 0命令将其关闭。然后再确认状态。

1.5 关闭防火墙(每个节点)

#控制节点   计算节点同样的操作
[root@controller ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
   Active: active (running) since 三 2023-08-30 20:49:29 CST; 1s ago
     Docs: man:firewalld(1)
 Main PID: 2424 (firewalld)
   CGroup: /system.slice/firewalld.service
           └─2424 /usr/bin/python2 -Es /usr/sbin/firewalld --nofork --nopid

8月 30 20:49:29 controller systemd[1]: Starting firewalld - dynamic firewall daemon...
8月 30 20:49:29 controller systemd[1]: Started firewalld - dynamic firewall daemon.
8月 30 20:49:29 controller firewalld[2424]: WARNING: AllowZoneDrifting is enabled. This is considered an insecure configuration option. It wi... it now.
Hint: Some lines were ellipsized, use -l to show in full.
#Active为running

[root@controller ~]# systemctl stop firewalld && systemctl disable firewalld 
[root@controller ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
   Loaded: loaded (/usr/lib/systemd/system/firewalld.service; enabled; vendor preset: enabled)
   Active: inactive (dead) since 三 2023-08-30 20:54:10 CST; 966ms ago
     Docs: man:firewalld(1)
  Process: 905 ExecStart=/usr/sbin/firewalld --nofork --nopid $FIREWALLD_ARGS (code=exited, status=0/SUCCESS)
 Main PID: 905 (code=exited, status=0/SUCCESS)

8月 30 20:53:50 controller systemd[1]: Starting firewalld - dynamic firewall daemon...
8月 30 20:53:50 controller systemd[1]: Started firewalld - dynamic firewall daemon.
8月 30 20:53:50 controller firewalld[905]: WARNING: AllowZoneDrifting is enabled. This is considered an insecure configuration option. It wil... it now.
8月 30 20:54:08 controller systemd[1]: Stopping firewalld - dynamic firewall daemon...
8月 30 20:54:10 controller systemd[1]: Stopped firewalld - dynamic firewall daemon.
Hint: Some lines were ellipsized, use -l to show in full.
#Active为dead

#参数解析
start 启动
stop 停止
disable 开机不自启
enable 开机自启

2.域名解析配置(依据实际情况进行配置)

#控制节点:
[root@controller ~]vi /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.162.76 controller
192.168.162.77 compute					 		#在文件最后添加这两行

[root@controller ~]scp /etc/hosts compute:/etc/hosts
#将controller节点上的配置文件传输到compute节点上将其替换,省的两头来回切换的敲代码了,在后面这点好处会体现的更明显

#这一步做完后,后面就可以直接使用域名(controller,compute)来代替IP地址了


#验证连通性,在控制节点和计算节点都执行以下代码:
ping -c 4 controller
ping -c 4 compute


#出现以下情况证明两个节点已经联通:

[root@controller ~]# ping -c 4 controller
PING controller (192.168.162.76) 56(84) bytes of data.
64 bytes from controller (192.168.162.76): icmp_seq=1 ttl=64 time=0.035 ms
64 bytes from controller (192.168.162.76): icmp_seq=2 ttl=64 time=0.193 ms
64 bytes from controller (192.168.162.76): icmp_seq=3 ttl=64 time=0.097 ms
64 bytes from controller (192.168.162.76): icmp_seq=4 ttl=64 time=0.078 ms

--- controller ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3000ms
rtt min/avg/max/mdev = 0.035/0.100/0.193/0.059 ms


[root@controller ~]# ping -c 4 compute
PING compute (192.168.162.77) 56(84) bytes of data.
64 bytes from compute (192.168.162.77): icmp_seq=1 ttl=64 time=0.706 ms
64 bytes from compute (192.168.162.77): icmp_seq=2 ttl=64 time=1.82 ms
64 bytes from compute (192.168.162.77): icmp_seq=3 ttl=64 time=1.60 ms
64 bytes from compute (192.168.162.77): icmp_seq=4 ttl=64 time=1.15 ms

--- compute ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3005ms
rtt min/avg/max/mdev = 0.706/1.324/1.827/0.433 ms

3.系统基础软件安装

3.1 Linux工具(每个节点)

[root@controller ~]# yum install -y lsof vim net-tools wget git
[root@compute ~]# yum install -y lsof vim net-tools wget git

3.2 NTP时间同步配置,安装chrony服务(每个节点)

[root@controller ~]# yum  -y install chrony
[root@compute ~]# yum  -y install chrony

编辑chrony.conf文件,添加一下内容。

# 配置文件    控制节点
[root@controller ~]# vim /etc/chrony.conf
server 0.centos.pool.ntp.org iburst							#删除这4句
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst

server ntp3.aliyun.com iburst    # 添加该字段
allow all												 # 删除前面的#号
local stratum 10								 # 删除前面的#号

#计算节点
[root@compute ~]# vim /etc/chrony.conf
server 0.centos.pool.ntp.org iburst							#删除这4句
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst

server controller iburst					#添加该字段


# 所有节点都重启服务
systemctl restart chronyd

#控制节点出现以下情况证明时间同步成功
[root@controller ~]# chronyc sources -v
210 Number of sources = 1

  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '*' = current synced, '+' = combined , '-' = not combined,
| /   '?' = unreachable, 'x' = time may be in error, '~' = time too variable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* 203.107.6.88                  2   6    17    41  +1407us[+5089us] +/-   20ms   		 
[root@controller ~]# date
2023年 08月 30日 星期三 21:55:02 CST

#计算节点出现以下情况证明同步成功
[root@compute ~]# chronyc sources -v
210 Number of sources = 1

  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '*' = current synced, '+' = combined , '-' = not combined,
| /   '?' = unreachable, 'x' = time may be in error, '~' = time too variable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* controller                    3   6   377    34  -6695us[-6726us] +/-   25ms
[root@compute ~]# date
2023年 08月 30日 星期三 21:59:28 CST

4.openstack基础软件安装(所有节点)

openstack仓库软件安装

#使用命令查看可以安装的仓库包
[root@controller ~]# yum list |grep openstack*
centos-release-openstack-queens.noarch      1-2.el7.centos             extras   
centos-release-openstack-rocky.noarch       1-1.el7.centos             extras   
centos-release-openstack-stein.noarch       1-1.el7.centos             extras   
centos-release-openstack-train.noarch       1-1.el7.centos             extras   

# 两个节点都需要安装
[root@controller ~]# yum install centos-release-openstack-train -y
[root@controller ~]# yum install python-openstackclient openstack-selinux -y

5.数据库SQL(控制节点)

(1)安装mariadb相关软件包


[root@controller ~]# yum install mariadb mariadb-server python2-PyMySQL -y

(2)创建openstack的数据库配置文件,/etc/my.cnf.d/mariadb_openstack.cnf

[root@controller ~]# vim /etc/my.cnf.d/openstack.cnf

在[mysqld]添加以下配置

[mysqld]
bind-address = 192.168.162.76   	  	 	 #IP地址设置成本机的IP地址

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

#启动数据库服务,并将其配置为在系统启动时启动
[root@controller ~]# systemctl enable mariadb.service --now   或  systemctl enable mariadb.service && systemctl start mariadb.service 
Created symlink from /etc/systemd/system/mysql.service to /usr/lib/systemd/system/mariadb.service.
Created symlink from /etc/systemd/system/mysqld.service to /usr/lib/systemd/system/mariadb.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/mariadb.service to /usr/lib/systemd/system/mariadb.service.

(3)初始化数据库并设置开机启动

设置数据库root账户密码,默认密码为000000

[root@controller ~]# mysql_secure_installation

NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB
      SERVERS IN PRODUCTION USE!  PLEASE READ EACH STEP CAREFULLY!

In order to log into MariaDB to secure it, we'll need the current
password for the root user.  If you've just installed MariaDB, and
you haven't set the root password yet, the password will be blank,
so you should just press enter here.

Enter current password for root (enter for none):     				  #这里为mariadb的root密码,我们安装的时候没有设置,直接回车即可
OK, successfully used password, moving on...

Setting the root password ensures that nobody can log into the MariaDB
root user without the proper authorisation.

Set root password? [Y/n] y                                #这里设置数据库root用户的密码
New password:   000000                                  #密码为000000,在终端不显示密码的输入      
Re-enter new password:    000000    	 									#需要再一次确认密码
Password updated successfully!
Reloading privilege tables..
 ... Success!


By default, a MariaDB installation has an anonymous user, allowing anyone
to log into MariaDB without having to have a user account created for
them.  This is intended only for testing, and to make the installation
go a bit smoother.  You should remove them before moving into a
production environment.

Remove anonymous users? [Y/n] y                 			#删除mariadb的默认匿名用户
 ... Success!

Normally, root should only be allowed to connect from 'localhost'.  This
ensures that someone cannot guess at the root password from the network.

Disallow root login remotely? [Y/n] n                   #这里表示的禁止root用户远程登陆,我们选择no
 ... skipping.

By default, MariaDB comes with a database named 'test' that anyone can
access.  This is also intended only for testing, and should be removed
before moving into a production environment.

Remove test database and access to it? [Y/n] y        		#删除mariadb自带的test测试数据库
 - Dropping test database...
 ... Success!
 - Removing privileges on test database...
 ... Success!

Reloading the privilege tables will ensure that all changes made so far
will take effect immediately.

Reload privilege tables now? [Y/n] y     						 	 #保存更改
 ... Success! 

Cleaning up...

All done!  If you've completed all of the above steps, your MariaDB
installation should now be secure.

Thanks for using MariaDB!

#至此数据库安装完成

(4)进入数据库

[root@controller ~]# mysql -uroot -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 18
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> 


#使用exit命令或quit退出数据库
[root@controller ~]# mysql -uroot -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 20
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> exit
Bye

6、消息队列Message queue(控制节点)

消息队列( MQ)全称为 Message Queue, 是一种应用程序对应用程序的通信方法。应用程序通过读写出入队列的消息(针对应用程序的数据)来通信,而无需专用连接来链接它们。
消息传递指的是程序之间通过在消息中发送数据进行通信,而不是通过直接调用彼此来通信,直接调用通常是用于诸如远程过程调用的技术。排队指的是应用程序通过 队列来通信。
队列的使用除去了接收和发送应用程序同时执行的要求。
RabbitMQ 是一个在 AMQP 基础上完整的,可复用的企业消息系统。遵循 Mozilla Public License 开源协议。

(1).安装rabbitmq-server

[root@controller ~]# yum install rabbitmq-server -y

(2).启动rabbitmq,并配置自启动
端口5672,15672,用于排错

[root@controller ~]# systemctl start rabbitmq-server.service
[root@controller ~]# systemctl enable rabbitmq-server.service
Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service.
[root@controller ~]# systemctl status rabbitmq-server.service
● rabbitmq-server.service - RabbitMQ broker
   Loaded: loaded (/usr/lib/systemd/system/rabbitmq-server.service; enabled; vendor preset: disabled)
   Active: active (running) since 四 2023-08-31 09:15:05 CST; 11s ago
 Main PID: 3574 (beam.smp)
   Status: "Initialized"
   CGroup: /system.slice/rabbitmq-server.service
           ├─3574 /usr/lib64/erlang/erts-8.3.5.3/bin/beam.smp -W w -A 256 -P 1048576 -t 5000000 -stbt db -zdbbl 128000 -K true -- -root /usr/lib64/erl...
           ├─3987 erl_child_setup 1024
           ├─4013 inet_gethost 4
           └─4014 inet_gethost 4

8月 31 09:15:04 controller systemd[1]: Starting RabbitMQ broker...
8月 31 09:15:04 controller rabbitmq-server[3574]: RabbitMQ 3.6.16. Copyright (C) 2007-2018 Pivotal Software, Inc.
8月 31 09:15:04 controller rabbitmq-server[3574]: ##  ##      Licensed under the MPL.  See http://www.rabbitmq.com/
8月 31 09:15:04 controller rabbitmq-server[3574]: ##  ##
8月 31 09:15:04 controller rabbitmq-server[3574]: ##########  Logs: /var/log/rabbitmq/rabbit@controller.log
8月 31 09:15:04 controller rabbitmq-server[3574]: ######  ##        /var/log/rabbitmq/rabbit@controller-sasl.log
8月 31 09:15:04 controller rabbitmq-server[3574]: ##########
8月 31 09:15:04 controller rabbitmq-server[3574]: Starting broker...
8月 31 09:15:05 controller systemd[1]: Started RabbitMQ broker.
8月 31 09:15:05 controller rabbitmq-server[3574]: completed with 0 plugins.

(3).创建消息队列中openstack账号及密码
添加openstack用户和密码
格式为:

[root@controller ~]# rabbitmqctl add_user openstack openstack123   						#添加openstack用户,并设置密码为openstack123
Creating user "openstack"

允许openstack用户进行配置、写入和读取访问

[root@controller ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"      #添加所有的权限
Setting permissions for user "openstack" in vhost "/"

#查看用户
[root@controller ~]# rabbitmqctl list_users
Listing users 
openstack	[]    						 # 创建的open stack用户
guest	[administrator]        # 超级用户

(4)启用rabbitmq_management插件实现 web 管理(可选)

#查看rabbitmq可启动的服务
[root@controller ~]# rabbitmq-plugins list
 Configured: E = explicitly enabled; e = implicitly enabled
 | Status:   * = running on rabbit@controller
 |/
[  ] amqp_client                       3.6.16
[  ] cowboy                            1.0.4
[  ] cowlib                            1.0.2
[  ] rabbitmq_amqp1_0                  3.6.16
[  ] rabbitmq_auth_backend_ldap        3.6.16
[  ] rabbitmq_auth_mechanism_ssl       3.6.16
[  ] rabbitmq_consistent_hash_exchange 3.6.16
[  ] rabbitmq_event_exchange           3.6.16
[  ] rabbitmq_federation               3.6.16
[  ] rabbitmq_federation_management    3.6.16
[  ] rabbitmq_jms_topic_exchange       3.6.16
[  ] rabbitmq_management               3.6.16
[  ] rabbitmq_management_agent         3.6.16
[  ] rabbitmq_management_visualiser    3.6.16
[  ] rabbitmq_mqtt                     3.6.16
[  ] rabbitmq_random_exchange          3.6.16
[  ] rabbitmq_recent_history_exchange  3.6.16
[  ] rabbitmq_sharding                 3.6.16
[  ] rabbitmq_shovel                   3.6.16
[  ] rabbitmq_shovel_management        3.6.16
[  ] rabbitmq_stomp                    3.6.16
[  ] rabbitmq_top                      3.6.16
[  ] rabbitmq_tracing                  3.6.16
[  ] rabbitmq_trust_store              3.6.16
[  ] rabbitmq_web_dispatch             3.6.16
[  ] rabbitmq_web_mqtt                 3.6.16
[  ] rabbitmq_web_mqtt_examples        3.6.16
[  ] rabbitmq_web_stomp                3.6.16
[  ] rabbitmq_web_stomp_examples       3.6.16
[  ] sockjs                            0.3.4

#开启图形化界面
[root@controller ~]# rabbitmq-plugins enable rabbitmq_management rabbitmq_management_agent
The following plugins have been enabled:
  amqp_client
  cowlib
  cowboy
  rabbitmq_web_dispatch
  rabbitmq_management_agent
  rabbitmq_management

Applying plugin configuration to rabbit@controller... started 6 plugins.

#查看端口
[root@controller ~]# ss -ntl
State       Recv-Q Send-Q                               Local Address:Port                                              Peer Address:Port              
LISTEN      0      128                                              *:25672                                                        *:*                  
LISTEN      0      128                                 192.168.162.76:3306                                                         *:*                  
LISTEN      0      128                                              *:4369                                                         *:*                  
LISTEN      0      128                                              *:22                                                           *:*                  
LISTEN      0      128                                              *:15672                                                        *:*                  
LISTEN      0      100                                      127.0.0.1:25                                                           *:*                  
LISTEN      0      128                                           [::]:5672                                                      [::]:*                  
LISTEN      0      128                                           [::]:22                                                        [::]:*                  
LISTEN      0      100                                          [::1]:25                                                        [::]:*                  

#查看IP地址
[root@controller ~]# hostname -i
192.168.162.76

(5).浏览器访问RabbitMQ进行测试(如果上一项未配置,就不用测试此项)
访问地址:http://192.168.162.76:15672/
默认用户名密码都是guest,web界面可以管理和创建用户,管理权限如果无法访问请查看控制节点的防火墙配置状态。
可以进入选择admin,点击openstack用户名,设置openstack用户的密码和权限,然后update user,退出当前guest用户,使用用户名openstack密码openstack123登录。
至此,RabbitMQ配置完毕

7、缓存服务Memcached

认证服务使用Memcached缓存令牌。缓存服务memecached运行在控制节点。在生产部署中,推荐联合启用防火墙、认证和加密保证它的安全。

(1)安装Memcached用于缓存令牌

[root@controller ~]# yum install memcached python-memcached -y

(2)修改memcached配置文件

[root@controller ~]# vim /etc/sysconfig/memcached

PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="1024"    					  #原文为64,更改为1024
OPTIONS="-l 127.0.0.1,::1,controller"     #在最后添加controller


--------------------------------------
OPTIONS="-l 127.0.0.1,::1,controller"
--------------------------------------

# memcached参数说明:
-d选项是作为守护进程在后台运行 
-m是分配给Memcache使用的内存数量,单位是MB,我这里是10MB, 
-u是运行Memcache的用户,我这里是root, 
-l是监听的服务器IP地址,如果有多个地址的话 
-p是设置Memcache监听的端口,我这里设置了12000,最好是1024以上的端口, 
-c选项是最大运行的并发连接数,默认是1024,我这里设置了256,按照你服务器的负载量来设定, 
-P是设置保存Memcache的pid文件
-vv是以very vrebose模式启动,将调试信息和错误输出到控制台

(3)设置开机自启动并启动memcached

[root@controller ~]# systemctl start memcached.service
[root@controller ~]# systemctl enable memcached.service
Created symlink from /etc/systemd/system/multi-user.target.wants/memcached.service to /usr/lib/systemd/system/memcached.service.

至此,memcached配置完毕

8、服务发现与注册Etcd(可选)

这个Etcd服务是新加入的,用于自动化配置

(1).安装etcd服务

[root@controller ~]# yum install etcd -y

(2).修改etcd配置文件

# 注意IP地址不能用controller替代,无法解析  
[root@controller ~]# vim /etc/etcd/etcd.conf
-----------------------------------
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.162.76:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.162.76:2379"
ETCD_NAME="controller"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.162.76:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.162.76:2379"
ETCD_INITIAL_CLUSTER="controller=http://192.168.162.76:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
------------------------------------

(3)启动etcd并设置开机自启动

[root@controller ~]# systemctl start etcd.service
[root@controller ~]# systemctl enable etcd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.

至此,完成基础环境的配置,后面可以开始安装 openstack 的组件。
使用VMware虚拟机的话,现在可以关机做快照 。

二、控制节点-Keystone认证服务组件

(一)、创建keystone数据库并授权

1.登录进入数据库

使用mysql数据库的root账户登录,密码为控制节点环境准备时初始化mysql数据设置的密码为root。

[root@controller ~]# mysql -uroot -p000000

2.创建keystone数据库并授权

[root@controller ~]# mysql -uroot -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 8
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE keystone;
Query OK, 1 row affected (0.001 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone123';       # keystone数据库密码为keystone123
Query OK, 0 rows affected (0.002 sec)

MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| keystone           |
| mysql              |
| performance_schema |
+--------------------+
4 rows in set (0.005 sec)

MariaDB [(none)]> exit
Bye
[root@controller ~]# 

(二)、keystone相关软件安装与配置

1.安装keystone相关软件

配置Apache服务,使用带有“mod_wsgi”的HTTP服务器来相应认证服务请求,端口为5000和35357, 默认情况下,Kestone服务仍然监听这些端口

[root@controller ~]# yum install openstack-keystone httpd mod_wsgi  -y

2.快速修改keystone配置

[root@controller ~]# vim /etc/keystone/keystone.conf 

# 在[database]部分,配置数据库访问:
#使用vim编辑器的命令模式输入 /\[database],按o键进入下一行进行编辑。 
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone    	#将KEYSTONE_DBPASS替换成上面设置的密码keystone123

修改后: connection = mysql+pymysql://keystone:keystone123@controller/keystone

# 在[token]部分,配置Fernet令牌提供程序:
[token]
provider = fernet         

修改后执行:wq保存并退出

3.初始化同步keystone数据库

同步keystone数据库

[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone

4.同步完成进行连接测试

#进入数据库查看是否同步成功
[root@controller ~]# mysql -uroot -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 10
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| keystone           |
| mysql              |
| performance_schema |
+--------------------+
4 rows in set (0.001 sec)

MariaDB [(none)]> use keystone;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
MariaDB [keystone]> show tables;   				 #出现以下情况证明数据库同步成功
+------------------------------------+
| Tables_in_keystone                 |
+------------------------------------+
| access_rule                        |
| access_token                       |
| application_credential             |
| application_credential_access_rule |
| application_credential_role        |
| assignment                         |
| config_register                    |
| consumer                           |
| credential                         |
| endpoint                           |
| endpoint_group                     |
| federated_user                     |
| federation_protocol                |
| group                              |
| id_mapping                         |
| identity_provider                  |
| idp_remote_ids                     |
| implied_role                       |
| limit                              |
| local_user                         |
| mapping                            |
| migrate_version                    |
| nonlocal_user                      |
| password                           |
| policy                             |
| policy_association                 |
| project                            |
| project_endpoint                   |
| project_endpoint_group             |
| project_option                     |
| project_tag                        |
| region                             |
| registered_limit                   |
| request_token                      |
| revocation_event                   |
| role                               |
| role_option                        |
| sensitive_config                   |
| service                            |
| service_provider                   |
| system_assignment                  |
| token                              |
| trust                              |
| trust_role                         |
| user                               |
| user_group_membership              |
| user_option                        |
| whitelisted_config                 |
+------------------------------------+
48 rows in set (0.002 sec)

5.初始化Fernet令牌库

Initialize Fernet key repositories,关于Fernet令牌的介绍Keystone Fernet tokens

# 以下命令无返回信息
[root@controller ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
[root@controller ~]# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

6. 引导Identity服务:

#将ADMIN_PASS替换为管理用户的合适密码。  密码设置为admin
[root@controller ~]# keystone-manage bootstrap --bootstrap-password ADMIN_PASS --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne

替换后:
[root@controller ~]# keystone-manage bootstrap --bootstrap-password admin --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne

7.配置启动Apache(httpd)

(1)修改httpd主配置文件

编辑/etc/httpd/conf/httpd.conf文件,配置ServerName选项以引用控制节点:

[root@controller ~]# vim /etc/httpd/conf/httpd.conf 
#ServerName www.example.com:80       # 将www.example.com改为controller

ServerName controller:80

(2)配置虚拟主机

创建到/usr/share/keystone/wsgi-keystone.conf文件的链接:

[root@controller ~]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

(3)启动httpd并配置开机自启动

启动Apache HTTP服务,并将其配置为在系统启动时启动
# 如果http起不来,需要关闭 selinux 或者安装 yum install openstack-selinux

[root@controller ~]# systemctl start httpd.service
[root@controller ~]# systemctl enable httpd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/httpd.service to /usr/lib/systemd/system/httpd.service.

至此,http服务配置完成

8.初始化引导keystone认证服务

创建 keystone 用户,初始化的服务实体和API端点
创建用户:需要创建一个密码ADMIN_PASS,作为登陆openstack的admin管理员用户,这里密码创建为000000(knight2023)。
创建keystone服务实体和身份认证服务,以下三种类型分别为公共的、内部的、管理的。

在endpoint表增加3个服务实体的API端点
在local_user表中创建admin用户
在project表中创建admin和Default项目(默认域)
在role表创建3种角色,admin,member和reader,即公共的、内部的、管理的。
在service表中创建identity服务

配置OpenStack认证环境变量:
[root@controller ~]# cd /etc/keystone/
[root@controller keystone]# vim admin-openrc.sh
添加下列配置:
#这里的export OS_PASSWORD要使用上面配置的ADMIN_PASS
#!/bin/bash
export OS_USERNAME=admin
export OS_PASSWORD=admin  #这里的export OS_PASSWORD要使用上面配置的ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
#按ecs后:wq  保存并退出

验证:
[root@controller keystone]# source admin-openrc.sh
[root@controller keystone]# openstack catalog list
+----------+----------+----------------------------------------+
| Name     | Type     | Endpoints                              |
+----------+----------+----------------------------------------+
| keystone | identity | RegionOne                              |
|          |          |   admin: http://controller:5000/v3/    |
|          |          | RegionOne                              |
|          |          |   public: http://controller:5000/v3/   |
|          |          | RegionOne                              |
|          |          |   internal: http://controller:5000/v3/ |
|          |          |                                        |
+----------+----------+----------------------------------------+

思考:
①此处的controller是否可以考虑换为管理网的IP地址,以便其他主机方便调用API,或者在其他主机配置主机文件controller为相应的IP地址。
②在之前的版本(queens之前),引导服务需要2个端口提供服务(用户5000和管理35357),本版本通过同一个端口提供服务
③关于访问地址endpoint表:Keystone介绍Keystone介绍与安装
Endpoint 是一个网络上可访问的地址,通常是一个 URL。Service 通过 Endpoint 暴露自己的 API。 Keystone 负责管理和维护每个 Service 的 Endpoint。可以使用下面的命令来查看 Endpoint。

附:常用的openstack管理命令,需要应用管理员的环境变量

# 查看keystone实例相关信息
[root@controller keystone]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                        |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| 03ab533d85ed4d81b511945d33545982 | RegionOne | keystone     | identity     | True    | admin     | http://controller:5000/v3/ |
| b70c096688d947f696c19fc300abce8e | RegionOne | keystone     | identity     | True    | public    | http://controller:5000/v3/ |
| eea90f0f15fe4fb6b8421e1ce349e1ca | RegionOne | keystone     | identity     | True    | internal  | http://controller:5000/v3/ |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
[root@controller ~]# openstack project list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| bde16102d6ac43d7a385efffc09281f3 | admin |
+----------------------------------+-------+
[root@controller ~]# openstack user list
+----------------------------------+-------+
| ID                               | Name  |
+----------------------------------+-------+
| 3b0edff458ae423fbe92f1e8d8e99b91 | admin |
+----------------------------------+-------+

# 删除endpoint
openstack endpoint delete [ID]

(三)、创建域、项目、用户和角色

1.创建域、项目

#创建名为example域
[root@controller ~]# openstack domain create --description "An Example Domain" example
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | An Example Domain                |
| enabled     | True                             |
| id          | 356025ddb32f47c7b38eddf8fc4bcc62 |
| name        | example                          |
| options     | {}                               |
| tags        | []                               |
+-------------+----------------------------------+

#创建名为service的项目
[root@controller ~]# openstack project create --domain default --description "Service Project" service
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Service Project                  |
| domain_id   | default                          |
| enabled     | True                             |
| id          | fbf8238bdc734b63b2cf4025baa1e4a7 |
| is_domain   | False                            |
| name        | service                          |
| options     | {}                               |
| parent_id   | default                          |
| tags        | []                               |
+-------------+----------------------------------+

2、创建(非管理员)myproject项目、myuser用户和myrole角色

#创建myproject项目
[root@controller ~]# openstack project create --domain default --description "Demo Project" myproject
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Demo Project                     |
| domain_id   | default                          |
| enabled     | True                             |
| id          | 53ebdfb014eb4c849437a8138ef002a0 |
| is_domain   | False                            |
| name        | myproject                        |
| options     | {}                               |
| parent_id   | default                          |
| tags        | []                               |
+-------------+----------------------------------+

#创建myuser用户
[root@controller ~]# openstack user create --domain default --password-prompt myuser
User Password:   							#这里密码统一为myuser
Repeat User Password:
+---------------------+----------------------------------+
| Field               | Value                            |
+---------------------+----------------------------------+
| domain_id           | default                          |
| enabled             | True                             |
| id                  | 1e66a51c9853415fb70a0af195c10ed1 |
| name                | myuser                           |
| options             | {}                               |
| password_expires_at | None                             |
+---------------------+----------------------------------+


#创建myrole角色
[root@controller ~]# openstack role create myrole
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | None                             |
| domain_id   | None                             |
| id          | b38f25973d9a4825ab7d45088db6fc5d |
| name        | myrole                           |
| options     | {}                               |
+-------------+----------------------------------+


#向myproject项目和myuser用户添加myrole角色
[root@controller ~]# openstack role add --project myproject --user myuser myrole

3、验证操作

1、取消临时环境变量oS_AUTH_URL和OS_PASSWORD的设置
[root@controller ~]# unset OS_AUTH_URL OS_PASSWORD
2、作为admin用户,请求一个身份验证令牌:

测试是否可以使用admin账户进行登陆认证,请求认证令牌

[root@controller ~]# openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
Password:  
Password:    			#密码为admin
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                                                                                                   |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires    | 2023-08-31T07:10:49+0000                                                                                                                                                                |
| id         | gAAAAABk8C7pday04-lVrHY3ODFywjOP5lfJ1p9ZqdqAI-6gnSmolibNnoWwUtfeB2By1ofBJEPHCnggFYTpBzEE-0383OAqx8yK1Q7DnBYb1Tp88tvGShNSSMaj3fp87sMLceID9ik9X4IQAtfgre8rkEiCDIbR-Mg3IblbBOtYFo0dWMhqbZg |
| project_id | bde16102d6ac43d7a385efffc09281f3                                                                                                                                                        |
| user_id    | 3b0edff458ae423fbe92f1e8d8e99b91                                                                                                                                                        |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+

3、作为创建的myuser用户,请求一个身份验证令牌:

以下命令使用”myuser“用户的密码和API端口5000,只允许对身份认证服务API的常规(非管理)访问。

[root@controller ~]# openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name myproject --os-username myuser token issue
Password: 
Password:     			#密码为myuser
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                                                                                                   |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires    | 2023-08-31T07:13:06+0000                                                                                                                                                                |
| id         | gAAAAABk8C9yKO0PhiKhCQhhmDldORMEb1MpqkRmUsKYLODQzpRHHGOw8CVFk99yTs1pXiGoVEKHR4c2YeH9KLH-tkOgk13YouS8c9y7P17gbTc25_rN9QLsF5LqaLOYXSeySBiVa-QlDX8oGXZLtIkFrLZc6WUVOp6rpTgV0yUYZvkqjSLr1MY |
| project_id | 53ebdfb014eb4c849437a8138ef002a0                                                                                                                                                        |
| user_id    | 1e66a51c9853415fb70a0af195c10ed1                                                                                                                                                        |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+

(四)、创建OpenStack客户端环境变量脚本

至此,控制节点的keystone认证服务组件完成安装与配置,使用VMware虚拟机的话,现在可以关机做快照。Create OpenStack client environment scripts,上面使用环境变量和命令选项的组合通过“openstack”客户端与身份认证服务交互。
为了提升客户端操作的效率,OpenStack支持简单的客户端环境变量脚本即OpenRC 文件,我这里使用自定义的文件名

1.创建admin用户的环境管理脚本

[root@controller ~]# cd
[root@controller ~]# vim admin-openrc.sh
# 文件内容
#!/bin/bash
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

2.创建普通用户myuser的客户端环境变量脚本

[root@controller ~]# vim myuser-openrc.sh
# 文件内容
#!/bin/bash
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=myuser
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

3.测试环境管理脚本

[root@controller ~]# source admin-openrc.sh
[root@controller ~]# openstack token issue
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                                                                                                   |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires    | 2023-08-31T07:19:19+0000                                                                                                                                                                |
| id         | gAAAAABk8DDnlhvalH4Ke-p4paqAtLBQTyb0lQhC3DzOu1lckW-_0jGC3d-WHJnd8DlazzEgtzus4A1A2AJz4GK0TDLf1wAnvZCk7IuBAykJoTkZNVo0b3U-aJ_UTrRJ36raDDC16A4E2effRflXXJwkCs73PUIs5TYvVU-cvqLqjxyVyJgpPDs |
| project_id | bde16102d6ac43d7a385efffc09281f3                                                                                                                                                        |
| user_id    | 3b0edff458ae423fbe92f1e8d8e99b91                                                                                                                                                        |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+


[root@controller ~]# source myuser-openrc.sh 
[root@controller ~]# openstack token issue
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field      | Value                                                                                                                                                                                   |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires    | 2023-08-31T07:19:56+0000                                                                                                                                                                |
| id         | gAAAAABk8DEM06dSx5hZfTLLsdXbopOoug61WPrV1J8jMcKWQN4AOnW2gEmYEgC2uJVZO9_7LmsZF0wi6cGkFgdZUQsFJKxNksJBkk7TtIFO7zzht4vCu-KkXxA6oFle58-yX5r6VRV1h1jwc4xLDZI6DSRNZ79j3rqFfluFmEa3sk2dICIjtuw |
| project_id | 53ebdfb014eb4c849437a8138ef002a0                                                                                                                                                        |
| user_id    | 1e66a51c9853415fb70a0af195c10ed1                                                                                                                                                        |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+

可以看到user_id和上面用命令获取到的是一样的,说明配置成功。

至此,控制节点的keystone认证服务组件完成安装与配置,使用VMware虚拟机的话,现在可以关机做快照。

三、控制节点-Glance镜像服务组件

(一)、创建glance相关数据库、凭据与API端点

创建glance数据库并授权

[root@controller ~]# mysql -uroot -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 19
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE glance;
Query OK, 1 row affected (0.001 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance123';    		 #设置密码为glance123
Query OK, 0 rows affected (0.001 sec)

MariaDB [(none)]> quit
Bye
[root@controller ~]#

2.创建服务凭据

(1)在keystone上创建glance用户
以下命令在local_user表创建glance用户

[root@controller ~]# source /etc/keystone/admin-openrc.sh 
[root@controller ~]# openstack user create --domain default --password-prompt glance
User Password:      			 	 	 	#密码统一为glance
Repeat User Password:     		
No password was supplied, authentication will fail when a user does not have a password.
+---------------------+----------------------------------+
| Field               | Value                            |
+---------------------+----------------------------------+
| domain_id           | default                          |
| enabled             | True                             |
| id                  | c5b2bd3cb7ca40818e9cbbb2b6e073d2 |
| name                | glance                           |
| options             | {}                               |
| password_expires_at | None                             |
+---------------------+----------------------------------+

(2)在keystone上将glance用户添加为service项目的admin角色(权限)
以下命令无输出

[root@controller ~]# openstack role add --project service --user glance admin

(3)创建glance镜像服务的实体
以下命令在service表中增加glance项目

[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Image                  |
| enabled     | True                             |
| id          | 7d46e25c824742bfa091c8a2249edfe8 |
| name        | glance                           |
| type        | image                            |
+-------------+----------------------------------+
[root@controller ~]# openstack service list
+----------------------------------+----------+----------+
| ID                               | Name     | Type     |
+----------------------------------+----------+----------+
| 4d8366e6b68a4953a6bd1df9cebf6b49 | keystone | identity |
| 7d46e25c824742bfa091c8a2249edfe8 | glance   | image    |
+----------------------------------+----------+----------+

3.创建镜像服务的 API 端点(endpoint)

【这里会影响API的调用】
以下命令会在endpoint表增加3条项目

[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 3664621381b74f79a833029762f879b8 |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 7d46e25c824742bfa091c8a2249edfe8 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 68e0f4afa6464d9ea43652b0248d1d2e |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 7d46e25c824742bfa091c8a2249edfe8 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 6c0cc1dfd0584251b34c95516a46130d |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 7d46e25c824742bfa091c8a2249edfe8 |
| service_name | glance                           |
| service_type | image                            |
| url          | http://controller:9292           |
+--------------+----------------------------------+

#查看API端点
[root@controller ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                        |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+
| 03ab533d85ed4d81b511945d33545982 | RegionOne | keystone     | identity     | True    | admin     | http://controller:5000/v3/ |
| 3664621381b74f79a833029762f879b8 | RegionOne | glance       | image        | True    | public    | http://controller:9292     |
| 68e0f4afa6464d9ea43652b0248d1d2e | RegionOne | glance       | image        | True    | internal  | http://controller:9292     |
| 6c0cc1dfd0584251b34c95516a46130d | RegionOne | glance       | image        | True    | admin     | http://controller:9292     |
| b70c096688d947f696c19fc300abce8e | RegionOne | keystone     | identity     | True    | public    | http://controller:5000/v3/ |
| eea90f0f15fe4fb6b8421e1ce349e1ca | RegionOne | keystone     | identity     | True    | internal  | http://controller:5000/v3/ |
+----------------------------------+-----------+--------------+--------------+---------+-----------+----------------------------+

至此,glance在keystone上面注册完成,可以进行安装

(二)、glance相关软件安装与配置

1.安装glance软件

[root@controller ~]# yum install openstack-glance

2.配置文件修改

[root@controller ~]# vim  /etc/glance/glance-api.conf

[database]
connection = mysql+pymysql://glance:glance123@controller/glance

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance


[paste_deploy]
flavor = keystone

[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/

3.同步glance数据库

(1)为glance镜像服务初始化同步数据库
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance

INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
/usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1280, u"Name 'alembic_version_pkc' ignored for PRIMARY key.")
  result = self._query(query)
INFO  [alembic.runtime.migration] Running upgrade  -> liberty, liberty initial
INFO  [alembic.runtime.migration] Running upgrade liberty -> mitaka01, add index on created_at and updated_at columns of 'images' table
INFO  [alembic.runtime.migration] Running upgrade mitaka01 -> mitaka02, update metadef os_nova_server
INFO  [alembic.runtime.migration] Running upgrade mitaka02 -> ocata_expand01, add visibility to images
INFO  [alembic.runtime.migration] Running upgrade ocata_expand01 -> pike_expand01, empty expand for symmetry with pike_contract01
INFO  [alembic.runtime.migration] Running upgrade pike_expand01 -> queens_expand01
INFO  [alembic.runtime.migration] Running upgrade queens_expand01 -> rocky_expand01, add os_hidden column to images table
INFO  [alembic.runtime.migration] Running upgrade rocky_expand01 -> rocky_expand02, add os_hash_algo and os_hash_value columns to images table
INFO  [alembic.runtime.migration] Running upgrade rocky_expand02 -> train_expand01, empty expand for symmetry with train_contract01
INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
Upgraded database to: train_expand01, current revision(s): train_expand01
INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
Database migration is up to date. No migration needed.
INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
INFO  [alembic.runtime.migration] Running upgrade mitaka02 -> ocata_contract01, remove is_public from images
INFO  [alembic.runtime.migration] Running upgrade ocata_contract01 -> pike_contract01, drop glare artifacts tables
INFO  [alembic.runtime.migration] Running upgrade pike_contract01 -> queens_contract01
INFO  [alembic.runtime.migration] Running upgrade queens_contract01 -> rocky_contract01
INFO  [alembic.runtime.migration] Running upgrade rocky_contract01 -> rocky_contract02
INFO  [alembic.runtime.migration] Running upgrade rocky_contract02 -> train_contract01
INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
Upgraded database to: train_contract01, current revision(s): train_contract01
INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
Database is synced successfully.

忽略输出的内容,最后结果为:Database is synced successfully.

(2)同步完成进行连接测试


保证所有需要的表已经建立,否则后面可能无法进行下去

[root@controller ~]# mysql -uroot -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 23
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> use glance
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
MariaDB [glance]> show tables;
+----------------------------------+
| Tables_in_glance                 |
+----------------------------------+
| alembic_version                  |
| image_locations                  |
| image_members                    |
| image_properties                 |
| image_tags                       |
| images                           |
| metadef_namespace_resource_types |
| metadef_namespaces               |
| metadef_objects                  |
| metadef_properties               |
| metadef_resource_types           |
| metadef_tags                     |
| migrate_version                  |
| task_info                        |
| tasks                            |
+----------------------------------+
15 rows in set (0.001 sec)

[root@controller ~]# 

5.glance镜像服务启动并设置开机自启

[root@controller ~]# systemctl enable openstack-glance-api.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-api.service to /usr/lib/systemd/system/openstack-glance-api.service.
[root@controller ~]# systemctl start openstack-glance-api.service
[root@controller ~]# systemctl status openstack-glance-api.service
● openstack-glance-api.service - OpenStack Image Service (code-named Glance) API server
   Loaded: loaded (/usr/lib/systemd/system/openstack-glance-api.service; enabled; vendor preset: disabled)
   Active: active (running) since 四 2023-08-31 14:48:53 CST; 21s ago
 Main PID: 7696 (glance-api)
   CGroup: /system.slice/openstack-glance-api.service
           ├─7696 /usr/bin/python2 /usr/bin/glance-api
           ├─7708 /usr/bin/python2 /usr/bin/glance-api
           ├─7709 /usr/bin/python2 /usr/bin/glance-api
           ├─7710 /usr/bin/python2 /usr/bin/glance-api
           ├─7711 /usr/bin/python2 /usr/bin/glance-api
           ├─7712 /usr/bin/python2 /usr/bin/glance-api
           ├─7713 /usr/bin/python2 /usr/bin/glance-api
           ├─7714 /usr/bin/python2 /usr/bin/glance-api
           └─7715 /usr/bin/python2 /usr/bin/glance-api

8月 31 14:48:53 controller systemd[1]: Started OpenStack Image Service (code-named Glance) API server.
8月 31 14:48:53 controller glance-api[7696]: /usr/lib/python2.7/site-packages/paste/deploy/loadwsgi.py:22: PkgResourcesDeprecationWarning: Pa...arately.
8月 31 14:48:53 controller glance-api[7696]: return pkg_resources.EntryPoint.parse("x=" + s).load(False)
Hint: Some lines were ellipsized, use -l to show in full.

(三)、验证glance安装正确

1.上传镜像

使用qcow2磁盘格式, bare容器格式上传镜像到镜像服务并设置公共可见,这样所有的项目都可以访问它

[root@controller ~]# glance image-create --name cirros --disk-format qcow2 --container-format bare --file cirros-0.3.3-x86_64-disk.img
+------------------+----------------------------------------------------------------------------------+
| Property         | Value                                                                            |
+------------------+----------------------------------------------------------------------------------+
| checksum         | ee1eca47dc88f4879d8a229cc70a07c6                                                 |
| container_format | bare                                                                             |
| created_at       | 2023-08-31T13:52:04Z                                                             |
| disk_format      | qcow2                                                                            |
| id               | 9ce079b3-4224-4447-9ce0-766ac59cf185                                             |
| min_disk         | 0                                                                                |
| min_ram          | 0                                                                                |
| name             | cirros                                                                           |
| os_hash_algo     | sha512                                                                           |
| os_hash_value    | 1b03ca1bc3fafe448b90583c12f367949f8b0e665685979d95b004e48574b953316799e23240f4f7 |
|                  | 39d1b5eb4c4ca24d38fdc6f4f9d8247a2bc64db25d6bbdb2                                 |
| os_hidden        | False                                                                            |
| owner            | bde16102d6ac43d7a385efffc09281f3                                                 |
| protected        | False                                                                            |
| size             | 13287936                                                                         |
| status           | active                                                                           |
| tags             | []                                                                               |
| updated_at       | 2023-08-31T13:52:04Z                                                             |
| virtual_size     | Not available                                                                    |
| visibility       | shared                                                                           |
+------------------+----------------------------------------------------------------------------------+

2.查看镜像

[root@controller ~]# glance image-list
+--------------------------------------+--------+
| ID                                   | Name   |
+--------------------------------------+--------+
| 9ce079b3-4224-4447-9ce0-766ac59cf185 | cirros |
+--------------------------------------+--------+

至此glance镜像服务安装、启动成功。

四、控制节点-Placement服务组件

(一)、创建placement相关数据库、凭据与API端点

1.创建placement数据库并授权

使用mysql数据库的root账户登录,密码为控制节点环境准备时初始化mysql数据设置的密码为root。

[root@controller ~]# mysql -uroot -p000000

创建placement数据库并授权

[root@controller ~]# mysql -uroot -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 19
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE placement;
Query OK, 1 row affected (0.001 sec)

MariaDB [(none)]>  GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \
    ->   IDENTIFIED BY 'placement123';
Query OK, 0 rows affected (0.001 sec)

MariaDB [(none)]> quit
Bye
[root@controller ~]#

2.创建服务凭据

同样,创建并注册该项目的服务证书

cd 
source admin-openrc.sh
# 创建服务用户
openstack user create --domain default --password=knight2023 placement
# 将Placement用户添加到具有管理员角色的服务项目中
openstack role add --project service --user placement admin
# 创建服务用户
openstack service create --name placement --description "Placement API" placement

#

3.创建账号、域、用户等配置

[root@controller ~]# openstack user create --domain default --password-prompt placement 
User Password:   					 	 	 	#密码统一为placement
Repeat User Password:
+---------------------+----------------------------------+
| Field               | Value                            |
+---------------------+----------------------------------+
| domain_id           | default                          |
| enabled             | True                             |
| id                  | ad0e4fcb427a492a919726adfc371a51 |
| name                | placement                        |
| options             | {}                               |
| password_expires_at | None                             |
+---------------------+----------------------------------+
#将Placement用户添加到具有admin角色的服务项目
[root@controller ~]# openstack role add --project service --user placement admin
#此代码没有任何输出将结果
#在服务目录中创建Placement API条目
[root@controller ~]# openstack service create --name placement --description "Placement API" placement
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | Placement API                    |
| enabled     | True                             |
| id          | 4f3b43c0ca0e4709bee8756fc1ca88f5 |
| name        | placement                        |
| type        | placement                        |
+-------------+----------------------------------+

#创建放置API服务端点:
[root@controller ~]# openstack endpoint create --region RegionOne placement public http://controller:8778
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | d68e31b5203d4a5485aad82f741655a8 |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 4f3b43c0ca0e4709bee8756fc1ca88f5 |
| service_name | placement                        |
| service_type | placement                        |
| url          | http://controller:8778           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | fe09667060b84d068a81c47209fa3dbd |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 4f3b43c0ca0e4709bee8756fc1ca88f5 |
| service_name | placement                        |
| service_type | placement                        |
| url          | http://controller:8778           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 2790883b25e94e6f8e2dd62196ca7f3b |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 4f3b43c0ca0e4709bee8756fc1ca88f5 |
| service_name | placement                        |
| service_type | placement                        |
| url          | http://controller:8778           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------+
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                         |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------+
| 03ab533d85ed4d81b511945d33545982 | RegionOne | keystone     | identity     | True    | admin     | http://controller:5000/v3/  |
| 2790883b25e94e6f8e2dd62196ca7f3b | RegionOne | placement    | placement    | True    | admin     | http://controller:8778      |
| 3664621381b74f79a833029762f879b8 | RegionOne | glance       | image        | True    | public    | http://controller:9292      |
| 68e0f4afa6464d9ea43652b0248d1d2e | RegionOne | glance       | image        | True    | internal  | http://controller:9292      |
| 6c0cc1dfd0584251b34c95516a46130d | RegionOne | glance       | image        | True    | admin     | http://controller:9292      |
| b70c096688d947f696c19fc300abce8e | RegionOne | keystone     | identity     | True    | public    | http://controller:5000/v3/  |
| d68e31b5203d4a5485aad82f741655a8 | RegionOne | placement    | placement    | True    | public    | http://controller:8778      |
| eea90f0f15fe4fb6b8421e1ce349e1ca | RegionOne | keystone     | identity     | True    | internal  | http://controller:5000/v3/  |
| fe09667060b84d068a81c47209fa3dbd | RegionOne | placement    | placement    | True    | internal  | http://controller:8778      |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------+

(二)、placement相关软件安装与配置

1.安装placement软件

[root@controller ~]#  yum install openstack-placement-api -y

2.配置文件修改

[root@controller ~]# vim /etc/placement/placement.conf 
[placement_database]
connection = mysql+pymysql://placement:placement123@controller/placement

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = placement

依次执行以下命令,将其写入脚本,执行脚本
touch placement.conf.sh
vim placement.conf.sh
bash placement.conf.sh

placement.conf.sh文件内容

#!/bin/bash
#placement.conf.sh
openstack-config --set  /etc/placement/placement.conf api auth_strategy  keystone 
openstack-config --set  /etc/placement/placement.conf keystone_authtoken auth_url  http://controller:5000/v3
openstack-config --set  /etc/placement/placement.conf keystone_authtoken memcached_servers  controller:11211
openstack-config --set  /etc/placement/placement.conf keystone_authtoken auth_type  password
openstack-config --set  /etc/placement/placement.conf keystone_authtoken project_domain_name  default 
openstack-config --set  /etc/placement/placement.conf keystone_authtoken user_domain_name  default
openstack-config --set  /etc/placement/placement.conf keystone_authtoken project_name  service 
openstack-config --set  /etc/placement/placement.conf keystone_authtoken username  placement 
openstack-config --set  /etc/placement/placement.conf keystone_authtoken password  placement  
openstack-config --set  /etc/placement/placement.conf placement_database connection  mysql+pymysql://placement:placement@controller/placement

echo "Result of Configuration"
grep '^[a-z]' /etc/placement/placement.conf

bash placement.conf.sh

(2)nova的虚拟主机配置文件
/etc/httpd/conf.d/00-placement-api.conf
Train版本安装完成后会自动生成:/etc/httpd/conf.d/00-placement-api.conf文件
由于有个包的bug需要配置修改文件,需要修改nova虚拟主机配置文件,增加内容,完整的文件内容如下:

cp /etc/httpd/conf.d/00-placement-api.conf{,.bak}
vim /etc/httpd/conf.d/00-placement-api.conf

在末尾添加以下内容

<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
   
</Directory>

3.同步placement数据库

(1)同步并初始化

[root@controller ~]# su -s /bin/sh -c "placement-manage db sync" placement
/usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1280, u"Name 'alembic_version_pkc' ignored for PRIMARY key.")
  result = self._query(query)
#忽略此处的警告

(2)同步完成进行连接测试
保证所有需要的表已经建立,否则后面可能无法进行下去

[root@controller ~]# mysql -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 23
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> use placement
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
MariaDB [placement]> show tables;
+------------------------------+
| Tables_in_placement          |
+------------------------------+
| alembic_version              |
| allocations                  |
| consumers                    |
| inventories                  |
| placement_aggregates         |
| projects                     |
| resource_classes             |
| resource_provider_aggregates |
| resource_provider_traits     |
| resource_providers           |
| traits                       |
| users                        |
+------------------------------+
12 rows in set (0.001 sec)

4.验证

[root@controller ~]# placement-status upgrade check
+----------------------------------+
| Upgrade Check Results            |
+----------------------------------+
| Check: Missing Root Provider IDs |
| Result: Success                  |
| Details: None                    |
+----------------------------------+
| Check: Incomplete Consumers      |
| Result: Success                  |
| Details: None                    |
+----------------------------------+

placement服务安装完成。

五、Nova计算服务组件

5.1 控制节点-Nova计算服务组件

注意:此小结在控制节点中完成。

(一)、创建Nova相关数据库、凭据与API端点

1.创建数据库并授权

需要创建3个数据库,分别是:nova_api,nova,nova_cell0

[root@controller ~]# mysql -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 25
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE nova_api;
Query OK, 1 row affected (0.001 sec)

MariaDB [(none)]> CREATE DATABASE nova;
Query OK, 1 row affected (0.001 sec)

MariaDB [(none)]> CREATE DATABASE nova_cell0;
Query OK, 1 row affected (0.001 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
Query OK, 0 rows affected (0.001 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
Query OK, 0 rows affected (0.001 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
Query OK, 0 rows affected (0.001 sec)

MariaDB [(none)]> quit
Bye
[root@controller ~]# 
2.创建计算服务凭据

(1)在keystone上创建nova用户

[root@controller ~]# source /etc/keystone/admin-openrc.sh 
[root@controller ~]# openstack user create --domain default --password-prompt nova
User Password:  					 		 #密码统一为nova
Repeat User Password:   
+---------------------+----------------------------------+
| Field               | Value                            |
+---------------------+----------------------------------+
| domain_id           | default                          |
| enabled             | True                             |
| id                  | 211fe006e8e44ee69599ec8656397e8e |
| name                | nova                             |
| options             | {}                               |
| password_expires_at | None                             |
+---------------------+----------------------------------+
[root@controller ~]#

(2)在keystone上将nova用户,配置为admin角色,并添加进service项目
以下命令无输出

[root@controller ~]# openstack role add --project service --user nova admin
[root@controller ~]# 

(3)创建nova计算服务的实体

[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Compute                |
| enabled     | True                             |
| id          | 104e1e8e3bec4d2eae8953438fd994a9 |
| name        | nova                             |
| type        | compute                          |
+-------------+----------------------------------+
[root@controller ~]#

3.创建compute API服务端点

[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | b915aca153fd4b8ab11ce6effaa21c37 |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 104e1e8e3bec4d2eae8953438fd994a9 |
| service_name | nova                             |
| service_type | compute                          |
| url          | http://controller:8774/v2.1      |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | 0e7a452323d9465b8e4f43b1216beef5 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 104e1e8e3bec4d2eae8953438fd994a9 |
| service_name | nova                             |
| service_type | compute                          |
| url          | http://controller:8774/v2.1      |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | be06e99962e14c479b3afb7d33f2c237 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 104e1e8e3bec4d2eae8953438fd994a9 |
| service_name | nova                             |
| service_type | compute                          |
| url          | http://controller:8774/v2.1      |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------+
| ID                               | Region    | Service Name | Service Type | Enabled | Interface | URL                         |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------+
| 03ab533d85ed4d81b511945d33545982 | RegionOne | keystone     | identity     | True    | admin     | http://controller:5000/v3/  |
| 0e7a452323d9465b8e4f43b1216beef5 | RegionOne | nova         | compute      | True    | internal  | http://controller:8774/v2.1 |
| 2790883b25e94e6f8e2dd62196ca7f3b | RegionOne | placement    | placement    | True    | admin     | http://controller:8778      |
| 3664621381b74f79a833029762f879b8 | RegionOne | glance       | image        | True    | public    | http://controller:9292      |
| 68e0f4afa6464d9ea43652b0248d1d2e | RegionOne | glance       | image        | True    | internal  | http://controller:9292      |
| 6c0cc1dfd0584251b34c95516a46130d | RegionOne | glance       | image        | True    | admin     | http://controller:9292      |
| b70c096688d947f696c19fc300abce8e | RegionOne | keystone     | identity     | True    | public    | http://controller:5000/v3/  |
| b915aca153fd4b8ab11ce6effaa21c37 | RegionOne | nova         | compute      | True    | public    | http://controller:8774/v2.1 |
| be06e99962e14c479b3afb7d33f2c237 | RegionOne | nova         | compute      | True    | admin     | http://controller:8774/v2.1 |
| d68e31b5203d4a5485aad82f741655a8 | RegionOne | placement    | placement    | True    | public    | http://controller:8778      |
| eea90f0f15fe4fb6b8421e1ce349e1ca | RegionOne | keystone     | identity     | True    | internal  | http://controller:5000/v3/  |
| fe09667060b84d068a81c47209fa3dbd | RegionOne | placement    | placement    | True    | internal  | http://controller:8778      |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-----------------------------+

(二)、nova相关软件安装与配置

1.安装nova相关软件
[root@controller ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y

openstack-nova-conductor 负责数据库
openstack-nova-novncproxy  负责云主机连接
openstack-nova-scheduler  负责调度调度

2.配置文件修改

[root@controller ~]# vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata

[api_database]
connection = mysql+pymysql://nova:nova123@controller/nova_api

[database]
connection = mysql+pymysql://nova:nova123@controller/nova

[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller:5672/

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova

[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[DEFAULT]
my_ip = 192.168.162.76

[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement

[root@controller ~]#
3.同步创建相关数据库(注意顺序)

(1)填充nova-api数据库

[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova

验证数据库

[root@controller ~]# mysql -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 41
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| glance             |
| information_schema |
| keystone           |
| mysql              |
| nova               |
| nova_api           |
| nova_cell0         |
| performance_schema |
| placement          |
+--------------------+
9 rows in set (0.001 sec)

MariaDB [(none)]> use nova;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
MariaDB [nova]> show tables;
+--------------------------------------------+
| Tables_in_nova                             |
+--------------------------------------------+
| agent_builds                               |
| aggregate_hosts                            |
| aggregate_metadata                         |
| aggregates                                 |
| allocations                                |
| block_device_mapping                       |
| bw_usage_cache                             |
| cells                                      |
| certificates                               |
| compute_nodes                              |
| console_auth_tokens                        |
| console_pools                              |
| consoles                                   |
| dns_domains                                |
| fixed_ips                                  |
| floating_ips                               |
| instance_actions                           |
| instance_actions_events                    |
| instance_extra                             |
| instance_faults                            |
| instance_group_member                      |
| instance_group_policy                      |
| instance_groups                            |
| instance_id_mappings                       |
| instance_info_caches                       |
| instance_metadata                          |
| instance_system_metadata                   |
| instance_type_extra_specs                  |
| instance_type_projects                     |
| instance_types                             |
| instances                                  |
| inventories                                |
| key_pairs                                  |
| migrate_version                            |
| migrations                                 |
| networks                                   |
| pci_devices                                |
| project_user_quotas                        |
| provider_fw_rules                          |
| quota_classes                              |
| quota_usages                               |
| quotas                                     |
| reservations                               |
| resource_provider_aggregates               |
| resource_providers                         |
| s3_images                                  |
| security_group_default_rules               |
| security_group_instance_association        |
| security_group_rules                       |
| security_groups                            |
| services                                   |
| shadow_agent_builds                        |
| shadow_aggregate_hosts                     |
| shadow_aggregate_metadata                  |
| shadow_aggregates                          |
| shadow_block_device_mapping                |
| shadow_bw_usage_cache                      |
| shadow_cells                               |
| shadow_certificates                        |
| shadow_compute_nodes                       |
| shadow_console_pools                       |
| shadow_consoles                            |
| shadow_dns_domains                         |
| shadow_fixed_ips                           |
| shadow_floating_ips                        |
| shadow_instance_actions                    |
| shadow_instance_actions_events             |
| shadow_instance_extra                      |
| shadow_instance_faults                     |
| shadow_instance_group_member               |
| shadow_instance_group_policy               |
| shadow_instance_groups                     |
| shadow_instance_id_mappings                |
| shadow_instance_info_caches                |
| shadow_instance_metadata                   |
| shadow_instance_system_metadata            |
| shadow_instance_type_extra_specs           |
| shadow_instance_type_projects              |
| shadow_instance_types                      |
| shadow_instances                           |
| shadow_key_pairs                           |
| shadow_migrate_version                     |
| shadow_migrations                          |
| shadow_networks                            |
| shadow_pci_devices                         |
| shadow_project_user_quotas                 |
| shadow_provider_fw_rules                   |
| shadow_quota_classes                       |
| shadow_quota_usages                        |
| shadow_quotas                              |
| shadow_reservations                        |
| shadow_s3_images                           |
| shadow_security_group_default_rules        |
| shadow_security_group_instance_association |
| shadow_security_group_rules                |
| shadow_security_groups                     |
| shadow_services                            |
| shadow_snapshot_id_mappings                |
| shadow_snapshots                           |
| shadow_task_log                            |
| shadow_virtual_interfaces                  |
| shadow_volume_id_mappings                  |
| shadow_volume_usage_cache                  |
| snapshot_id_mappings                       |
| snapshots                                  |
| tags                                       |
| task_log                                   |
| virtual_interfaces                         |
| volume_id_mappings                         |
| volume_usage_cache                         |
+--------------------------------------------+
110 rows in set (0.002 sec)

(2)注册cell0数据库

[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
[root@controller ~]#

(3)创建cell1单元

[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
d23ddc0c-3592-4d1f-ab8e-86ae968ea073
[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova

(4)验证数据库

[root@controller ~]# mysql -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 41
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> show databases;
+--------------------+
| Database           |
+--------------------+
| glance             |
| information_schema |
| keystone           |
| mysql              |
| nova               |
| nova_api           |
| nova_cell0         |
| performance_schema |
| placement          |
+--------------------+
9 rows in set (0.001 sec)

MariaDB [nova]> use nova_api
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
MariaDB [nova_api]> show tables;
+------------------------------+
| Tables_in_nova_api           |
+------------------------------+
| aggregate_hosts              |
| aggregate_metadata           |
| aggregates                   |
| allocations                  |
| build_requests               |
| cell_mappings                |
| consumers                    |
| flavor_extra_specs           |
| flavor_projects              |
| flavors                      |
| host_mappings                |
| instance_group_member        |
| instance_group_policy        |
| instance_groups              |
| instance_mappings            |
| inventories                  |
| key_pairs                    |
| migrate_version              |
| placement_aggregates         |
| project_user_quotas          |
| projects                     |
| quota_classes                |
| quota_usages                 |
| quotas                       |
| request_specs                |
| reservations                 |
| resource_classes             |
| resource_provider_aggregates |
| resource_provider_traits     |
| resource_providers           |
| traits                       |
| users                        |
+------------------------------+
32 rows in set (0.001 sec)

MariaDB [nova_api]> use nova_cell0
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
MariaDB [nova_cell0]> show tables;
+--------------------------------------------+
| Tables_in_nova_cell0                       |
+--------------------------------------------+
| agent_builds                               |
| aggregate_hosts                            |
| aggregate_metadata                         |
| aggregates                                 |
| allocations                                |
| block_device_mapping                       |
| bw_usage_cache                             |
| cells                                      |
| certificates                               |
| compute_nodes                              |
| console_auth_tokens                        |
| console_pools                              |
| consoles                                   |
| dns_domains                                |
| fixed_ips                                  |
| floating_ips                               |
| instance_actions                           |
| instance_actions_events                    |
| instance_extra                             |
| instance_faults                            |
| instance_group_member                      |
| instance_group_policy                      |
| instance_groups                            |
| instance_id_mappings                       |
| instance_info_caches                       |
| instance_metadata                          |
| instance_system_metadata                   |
| instance_type_extra_specs                  |
| instance_type_projects                     |
| instance_types                             |
| instances                                  |
| inventories                                |
| key_pairs                                  |
| migrate_version                            |
| migrations                                 |
| networks                                   |
| pci_devices                                |
| project_user_quotas                        |
| provider_fw_rules                          |
| quota_classes                              |
| quota_usages                               |
| quotas                                     |
| reservations                               |
| resource_provider_aggregates               |
| resource_providers                         |
| s3_images                                  |
| security_group_default_rules               |
| security_group_instance_association        |
| security_group_rules                       |
| security_groups                            |
| services                                   |
| shadow_agent_builds                        |
| shadow_aggregate_hosts                     |
| shadow_aggregate_metadata                  |
| shadow_aggregates                          |
| shadow_block_device_mapping                |
| shadow_bw_usage_cache                      |
| shadow_cells                               |
| shadow_certificates                        |
| shadow_compute_nodes                       |
| shadow_console_pools                       |
| shadow_consoles                            |
| shadow_dns_domains                         |
| shadow_fixed_ips                           |
| shadow_floating_ips                        |
| shadow_instance_actions                    |
| shadow_instance_actions_events             |
| shadow_instance_extra                      |
| shadow_instance_faults                     |
| shadow_instance_group_member               |
| shadow_instance_group_policy               |
| shadow_instance_groups                     |
| shadow_instance_id_mappings                |
| shadow_instance_info_caches                |
| shadow_instance_metadata                   |
| shadow_instance_system_metadata            |
| shadow_instance_type_extra_specs           |
| shadow_instance_type_projects              |
| shadow_instance_types                      |
| shadow_instances                           |
| shadow_key_pairs                           |
| shadow_migrate_version                     |
| shadow_migrations                          |
| shadow_networks                            |
| shadow_pci_devices                         |
| shadow_project_user_quotas                 |
| shadow_provider_fw_rules                   |
| shadow_quota_classes                       |
| shadow_quota_usages                        |
| shadow_quotas                              |
| shadow_reservations                        |
| shadow_s3_images                           |
| shadow_security_group_default_rules        |
| shadow_security_group_instance_association |
| shadow_security_group_rules                |
| shadow_security_groups                     |
| shadow_services                            |
| shadow_snapshot_id_mappings                |
| shadow_snapshots                           |
| shadow_task_log                            |
| shadow_virtual_interfaces                  |
| shadow_volume_id_mappings                  |
| shadow_volume_usage_cache                  |
| snapshot_id_mappings                       |
| snapshots                                  |
| tags                                       |
| task_log                                   |
| virtual_interfaces                         |
| volume_id_mappings                         |
| volume_usage_cache                         |
+--------------------------------------------+
110 rows in set (0.002 sec)

4.启动计算服并设置开机自启

[root@controller ~]# systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-api.service to /usr/lib/systemd/system/openstack-nova-api.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-scheduler.service to /usr/lib/systemd/system/openstack-nova-scheduler.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-conductor.service to /usr/lib/systemd/system/openstack-nova-conductor.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-novncproxy.service to /usr/lib/systemd/system/openstack-nova-novncproxy.service.
[root@controller ~]#  systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller ~]# 

5、验证

[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
+-------+--------------------------------------+------------------------------------------+-------------------------------------------------+----------+
|  名称 |                 UUID                 |              Transport URL               |                    数据库连接                   | Disabled |
+-------+--------------------------------------+------------------------------------------+-------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 |                  none:/                  | mysql+pymysql://nova:****@controller/nova_cell0 |  False   |
| cell1 | d23ddc0c-3592-4d1f-ab8e-86ae968ea073 | rabbit://openstack:****@controller:5672/ |    mysql+pymysql://nova:****@controller/nova    |  False   |
+-------+--------------------------------------+------------------------------------------+-------------------------------------------------+----------+
[root@controller ~]# 

(三)、控制节点Nova服务安装完成

至此,在控制节点安装计算服务nova就完成,继续搭建进入下一篇文档。

五.2 计算节点-Nova计算服务组件

注意:此小结在所有的计算节点和存储节点中完成。

Nova相关软件安装与配置

1.安装nova软件
[root@compoute ~]# yum install openstack-nova-compute -y
2.配置文件修改
[root@compoute ~]# vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata

[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova

[DEFAULT]
my_ip = 192.168.162.77

[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver


[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://172.31.7.8:6080/vnc_auto.html

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement


# 查看是否支持cpu虚拟化
egrep -c '(vmx|svm)' /proc/cpuinfo
# 如为零配置
vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu

注:Configure the [neutron] section of /etc/nova/nova.conf. Refer to the Networking service install guide for more details.
服务器组件监听本机所有的网卡的 IP 地址,而代理组件仅仅监听本机作为计算节点管理网络接口的 IP 地址。
在安装完成后注意修改:novncproxy_base_url为IP地址,IP地址为供应商地址,当除了管理网以外有多个IP地址时,选择合适的作为外部访问地址。

3.确认计算节点是否支持虚拟机的硬件加速

首先确定您的计算节点是否支持虚拟机的硬件加速,

[root@compoute ~]# egrep -c '(vmx|svm)' /proc/cpuinfo
0

①如果返回值为0,则表示计算节点不支持硬件加速,并且需要配置libvirt为使用QEMU,使用以下命令:

[root@compoute ~]# vim /etc/nova/nova.conf

[libvirt]
virt_type = qemu

另外,如果在VMware中搭建OpenStack,也需要配置libvirt为qemu。
②如果返回为其他值,表示计算节点支持硬件加速且通常不需要额外的配置

4.启动计算服务并配置为开机自启

需要启动2个服务

[root@compoute ~]# systemctl enable libvirtd.service openstack-nova-compute.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to /usr/lib/systemd/system/openstack-nova-compute.service.
[root@compoute ~]# systemctl start libvirtd.service openstack-nova-compute.service

注意:
(1)如果nova-compute服务无法启动,请检查 /var/log/nova/nova-compute.log。
该错误消息可能表明控制器节点上的防火墙阻止访问端口5672。将控制节点防火墙配置为打开控制器节点上的端口5672,并重新启动计算节点上的服务。
(2)控制节点

# 验证

[root@controller ~]# openstack compute service list --service nova-compute
+----+--------------+----------+------+---------+-------+----------------------------+
| ID | Binary       | Host     | Zone | Status  | State | Updated At                 |
+----+--------------+----------+------+---------+-------+----------------------------+
|  9 | nova-compute | compoute | nova | enabled | up    | 2023-08-31T09:41:27.000000 |
+----+--------------+----------+------+---------+-------+----------------------------+

# 主机发现

[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': d23ddc0c-3592-4d1f-ab8e-86ae968ea073
Checking host mapping for compute host 'compoute': 62d20431-c196-4d53-9794-6c4e3cf99f87
Creating host mapping for compute host 'compoute': 62d20431-c196-4d53-9794-6c4e3cf99f87
Found 1 unmapped computes in cell: d23ddc0c-3592-4d1f-ab8e-86ae968ea073
[root@controller ~]# 

六、配置Neutron网络服务

1.1 控制节点配置

  • 创建neutron数据库并授权
[root@controller ~]# mysql -p000000
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 166
Server version: 10.3.20-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> CREATE DATABASE neutron;
Query OK, 1 row affected (0.001 sec)

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron123';      	#密码为neutron123
Query OK, 0 rows affected (0.004 sec)

MariaDB [(none)]> quit
Bye
[root@controller ~]#

创建neutron用户

[root@controller ~]# openstack user create --domain default --password-prompt neutron          # 密码统一neutron
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field               | Value                            |
+---------------------+----------------------------------+
| domain_id           | default                          |
| enabled             | True                             |
| id                  | 7f4a741449254aaba4215bdb7db9a72f |
| name                | neutron                          |
| options             | {}                               |
| password_expires_at | None                             |
+---------------------+----------------------------------+

为neutron用户添加admin角色

[root@controller ~]# openstack role add --project service --user neutron admin

创建neutron服务实体

[root@controller ~]# openstack service create --name neutron --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field       | Value                            |
+-------------+----------------------------------+
| description | OpenStack Networking             |
| enabled     | True                             |
| id          | 87f8b705519f4d1baceb9a0b256b22f2 |
| name        | neutron                          |
| type        | network                          |
+-------------+----------------------------------+

创建网络服务API端点

[root@controller ~]# openstack endpoint create --region RegionOne network public http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | bd312690b34b475386224bc94b781cf7 |
| interface    | public                           |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 87f8b705519f4d1baceb9a0b256b22f2 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne network internal http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | a62e420848cc4727bee4f62d36024906 |
| interface    | internal                         |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 87f8b705519f4d1baceb9a0b256b22f2 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
+--------------+----------------------------------+
| Field        | Value                            |
+--------------+----------------------------------+
| enabled      | True                             |
| id           | c97931acdbc04e7491dd49cde78c28e0 |
| interface    | admin                            |
| region       | RegionOne                        |
| region_id    | RegionOne                        |
| service_id   | 87f8b705519f4d1baceb9a0b256b22f2 |
| service_name | neutron                          |
| service_type | network                          |
| url          | http://controller:9696           |
+--------------+----------------------------------+

安装服务

[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y

配置neutron文件

[root@controller ~]# vim /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:neutron123@controller/neutron

[DEFAULT]
core_plugin = ml2
service_plugins =

[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller

[DEFAULT]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[DEFAULT]
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

#在文件最后加入[nova]
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova


[oslo_concurrency]
lock_path = /var/lib/neutron/tmp




[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan

[ml2]
tenant_network_types =

[ml2]
mechanism_drivers = linuxbridge

[ml2]
extension_drivers = port_security

[ml2_type_flat]
flat_networks = extnet

[securitygroup]
enable_ipset = true



[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = extnet:ens33       	#这个名字自己定义

[vxlan]
enable_vxlan = false

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver


# 配置内核
[root@controller ~]# vim /etc/sysctl.conf 
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
#生效配置
[root@controller ~]# sysctl -p
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: 没有那个文件或目录
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: 没有那个文件或目录
#出现这种情况是有一个模块没有加载,需要加载一下名为br_netfilter的模块
[root@controller ~]# modprobe br_netfilter        	#使用modprobe命令加载该模块
[root@controller ~]# sysctl -p
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1


[root@controller ~]# vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true



[root@controller ~]# vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = xier123      #密码自己设置一个



[root@controller ~]# vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = xier123       #这里的密码与metadata_agent.ini文件内设置的密码一致

配置软链接

[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

同步数据库

[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
/usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1280, u"Name 'alembic_version_pkc' ignored for PRIMARY key.")
  result = self._query(query)
  正在对 neutron 运行 upgrade...
INFO  [alembic.runtime.migration] Context impl MySQLImpl.
INFO  [alembic.runtime.migration] Will assume non-transactional DDL.
INFO  [alembic.runtime.migration] Running upgrade  -> kilo
INFO  [alembic.runtime.migration] Running upgrade kilo -> 354db87e3225
INFO  [alembic.runtime.migration] Running upgrade 354db87e3225 -> 599c6a226151
INFO  [alembic.runtime.migration] Running upgrade 599c6a226151 -> 52c5312f6baf
INFO  [alembic.runtime.migration] Running upgrade 52c5312f6baf -> 313373c0ffee
INFO  [alembic.runtime.migration] Running upgrade 313373c0ffee -> 8675309a5c4f
INFO  [alembic.runtime.migration] Running upgrade 8675309a5c4f -> 45f955889773
INFO  [alembic.runtime.migration] Running upgrade 45f955889773 -> 26c371498592
INFO  [alembic.runtime.migration] Running upgrade 26c371498592 -> 1c844d1677f7
INFO  [alembic.runtime.migration] Running upgrade 1c844d1677f7 -> 1b4c6e320f79
INFO  [alembic.runtime.migration] Running upgrade 1b4c6e320f79 -> 48153cb5f051
INFO  [alembic.runtime.migration] Running upgrade 48153cb5f051 -> 9859ac9c136
INFO  [alembic.runtime.migration] Running upgrade 9859ac9c136 -> 34af2b5c5a59
INFO  [alembic.runtime.migration] Running upgrade 34af2b5c5a59 -> 59cb5b6cf4d
INFO  [alembic.runtime.migration] Running upgrade 59cb5b6cf4d -> 13cfb89f881a
INFO  [alembic.runtime.migration] Running upgrade 13cfb89f881a -> 32e5974ada25
INFO  [alembic.runtime.migration] Running upgrade 32e5974ada25 -> ec7fcfbf72ee
INFO  [alembic.runtime.migration] Running upgrade ec7fcfbf72ee -> dce3ec7a25c9
INFO  [alembic.runtime.migration] Running upgrade dce3ec7a25c9 -> c3a73f615e4
INFO  [alembic.runtime.migration] Running upgrade c3a73f615e4 -> 659bf3d90664
INFO  [alembic.runtime.migration] Running upgrade 659bf3d90664 -> 1df244e556f5
INFO  [alembic.runtime.migration] Running upgrade 1df244e556f5 -> 19f26505c74f
INFO  [alembic.runtime.migration] Running upgrade 19f26505c74f -> 15be73214821
INFO  [alembic.runtime.migration] Running upgrade 15be73214821 -> b4caf27aae4
INFO  [alembic.runtime.migration] Running upgrade b4caf27aae4 -> 15e43b934f81
INFO  [alembic.runtime.migration] Running upgrade 15e43b934f81 -> 31ed664953e6
INFO  [alembic.runtime.migration] Running upgrade 31ed664953e6 -> 2f9e956e7532
INFO  [alembic.runtime.migration] Running upgrade 2f9e956e7532 -> 3894bccad37f
INFO  [alembic.runtime.migration] Running upgrade 3894bccad37f -> 0e66c5227a8a
INFO  [alembic.runtime.migration] Running upgrade 0e66c5227a8a -> 45f8dd33480b
INFO  [alembic.runtime.migration] Running upgrade 45f8dd33480b -> 5abc0278ca73
INFO  [alembic.runtime.migration] Running upgrade 5abc0278ca73 -> d3435b514502
INFO  [alembic.runtime.migration] Running upgrade d3435b514502 -> 30107ab6a3ee
INFO  [alembic.runtime.migration] Running upgrade 30107ab6a3ee -> c415aab1c048
INFO  [alembic.runtime.migration] Running upgrade c415aab1c048 -> a963b38d82f4
INFO  [alembic.runtime.migration] Running upgrade kilo -> 30018084ec99
INFO  [alembic.runtime.migration] Running upgrade 30018084ec99 -> 4ffceebfada
INFO  [alembic.runtime.migration] Running upgrade 4ffceebfada -> 5498d17be016
INFO  [alembic.runtime.migration] Running upgrade 5498d17be016 -> 2a16083502f3
INFO  [alembic.runtime.migration] Running upgrade 2a16083502f3 -> 2e5352a0ad4d
INFO  [alembic.runtime.migration] Running upgrade 2e5352a0ad4d -> 11926bcfe72d
INFO  [alembic.runtime.migration] Running upgrade 11926bcfe72d -> 4af11ca47297
INFO  [alembic.runtime.migration] Running upgrade 4af11ca47297 -> 1b294093239c
INFO  [alembic.runtime.migration] Running upgrade 1b294093239c -> 8a6d8bdae39
INFO  [alembic.runtime.migration] Running upgrade 8a6d8bdae39 -> 2b4c2465d44b
INFO  [alembic.runtime.migration] Running upgrade 2b4c2465d44b -> e3278ee65050
INFO  [alembic.runtime.migration] Running upgrade e3278ee65050 -> c6c112992c9
INFO  [alembic.runtime.migration] Running upgrade c6c112992c9 -> 5ffceebfada
INFO  [alembic.runtime.migration] Running upgrade 5ffceebfada -> 4ffceebfcdc
INFO  [alembic.runtime.migration] Running upgrade 4ffceebfcdc -> 7bbb25278f53
INFO  [alembic.runtime.migration] Running upgrade 7bbb25278f53 -> 89ab9a816d70
INFO  [alembic.runtime.migration] Running upgrade a963b38d82f4 -> 3d0e74aa7d37
INFO  [alembic.runtime.migration] Running upgrade 3d0e74aa7d37 -> 030a959ceafa
INFO  [alembic.runtime.migration] Running upgrade 030a959ceafa -> a5648cfeeadf
INFO  [alembic.runtime.migration] Running upgrade a5648cfeeadf -> 0f5bef0f87d4
INFO  [alembic.runtime.migration] Running upgrade 0f5bef0f87d4 -> 67daae611b6e
INFO  [alembic.runtime.migration] Running upgrade 89ab9a816d70 -> c879c5e1ee90
INFO  [alembic.runtime.migration] Running upgrade c879c5e1ee90 -> 8fd3918ef6f4
INFO  [alembic.runtime.migration] Running upgrade 8fd3918ef6f4 -> 4bcd4df1f426
INFO  [alembic.runtime.migration] Running upgrade 4bcd4df1f426 -> b67e765a3524
INFO  [alembic.runtime.migration] Running upgrade 67daae611b6e -> 6b461a21bcfc
INFO  [alembic.runtime.migration] Running upgrade 6b461a21bcfc -> 5cd92597d11d
INFO  [alembic.runtime.migration] Running upgrade 5cd92597d11d -> 929c968efe70
INFO  [alembic.runtime.migration] Running upgrade 929c968efe70 -> a9c43481023c
INFO  [alembic.runtime.migration] Running upgrade a9c43481023c -> 804a3c76314c
INFO  [alembic.runtime.migration] Running upgrade 804a3c76314c -> 2b42d90729da
INFO  [alembic.runtime.migration] Running upgrade 2b42d90729da -> 62c781cb6192
INFO  [alembic.runtime.migration] Running upgrade 62c781cb6192 -> c8c222d42aa9
INFO  [alembic.runtime.migration] Running upgrade c8c222d42aa9 -> 349b6fd605a6
INFO  [alembic.runtime.migration] Running upgrade 349b6fd605a6 -> 7d32f979895f
INFO  [alembic.runtime.migration] Running upgrade 7d32f979895f -> 594422d373ee
INFO  [alembic.runtime.migration] Running upgrade 594422d373ee -> 61663558142c
INFO  [alembic.runtime.migration] Running upgrade 61663558142c -> 867d39095bf4, port forwarding
INFO  [alembic.runtime.migration] Running upgrade 867d39095bf4 -> d72db3e25539, modify uniq port forwarding
INFO  [alembic.runtime.migration] Running upgrade d72db3e25539 -> cada2437bf41
INFO  [alembic.runtime.migration] Running upgrade cada2437bf41 -> 195176fb410d, router gateway IP QoS
INFO  [alembic.runtime.migration] Running upgrade 195176fb410d -> fb0167bd9639
INFO  [alembic.runtime.migration] Running upgrade fb0167bd9639 -> 0ff9e3881597
INFO  [alembic.runtime.migration] Running upgrade 0ff9e3881597 -> 9bfad3f1e780
INFO  [alembic.runtime.migration] Running upgrade 9bfad3f1e780 -> 63fd95af7dcd
INFO  [alembic.runtime.migration] Running upgrade 63fd95af7dcd -> c613d0b82681
INFO  [alembic.runtime.migration] Running upgrade b67e765a3524 -> a84ccf28f06a
INFO  [alembic.runtime.migration] Running upgrade a84ccf28f06a -> 7d9d8eeec6ad
INFO  [alembic.runtime.migration] Running upgrade 7d9d8eeec6ad -> a8b517cff8ab
INFO  [alembic.runtime.migration] Running upgrade a8b517cff8ab -> 3b935b28e7a0
INFO  [alembic.runtime.migration] Running upgrade 3b935b28e7a0 -> b12a3ef66e62
INFO  [alembic.runtime.migration] Running upgrade b12a3ef66e62 -> 97c25b0d2353
INFO  [alembic.runtime.migration] Running upgrade 97c25b0d2353 -> 2e0d7a8a1586
INFO  [alembic.runtime.migration] Running upgrade 2e0d7a8a1586 -> 5c85685d616d
  确定
[root@controller ~]# 

启动neutron服务

# 重新启动Compute API服务
[root@controller ~]# systemctl restart openstack-nova-api.service
# 启动Networking服务,并将其配置为在系统启动时启动。
[root@controller ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-server.service to /usr/lib/systemd/system/neutron-server.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-dhcp-agent.service to /usr/lib/systemd/system/neutron-dhcp-agent.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-metadata-agent.service to /usr/lib/systemd/system/neutron-metadata-agent.service.
[root@controller ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller ~]# 

1.2 计算节点

安装服务并配置neutron文件

[root@compoute ~]# yum install openstack-neutron-linuxbridge ebtables ipset -y


# 配置文件
]# vim /etc/neutron/neutron.conf 
[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller

[DEFAULT]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp


]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = extnet:ens33       # 这里为上面自定义的名字
[vxlan]
enable_vxlan = false

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver



]# vim /etc/nova/nova.conf 
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron

#设置开机自启
[root@compoute ~]# systemctl restart openstack-nova-compute.service
[root@compoute ~]# systemctl enable neutron-linuxbridge-agent.service
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
[root@compoute ~]# systemctl start neutron-linuxbridge-agent.service

校验neutron

[root@controller ~]# openstack network agent list
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| 0856a37f-1b0b-45cb-b7cf-69b87350c875 | DHCP agent         | controller | nova              | :-)   | UP    | neutron-dhcp-agent        |
| 27903376-6cfa-45ed-ba3b-9cee7a74dc8f | Metadata agent     | controller | None              | :-)   | UP    | neutron-metadata-agent    |
| 74dca0d2-3315-482e-9fec-70c4023f827b | Linux bridge agent | compoute   | None              | :-)   | UP    | neutron-linuxbridge-agent |
| c313688b-bc8f-4bce-9267-2f1cd2ec08a6 | Linux bridge agent | controller | None              | :-)   | UP    | neutron-linuxbridge-agent |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+

七. 配置dashboard仪表盘服务

安装软件包:

[root@controller ~]# yum install openstack-dashboard -y

配置local_settings.py文件

[root@controller ~]# vim /etc/openstack-dashboard/local_settings
'''
# 配置仪表板以在控制器节点上使用OpenStack服务
OPENSTACK_HOST = "controller"

# 在Dashboard configuration部分中,允许主机访问Dashboard
ALLOWED_HOSTS = ["*"]

# 配置memcached会话存储服务
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller:11211',
    }
}

# 启用Identity API版本3
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

# 启用对域的支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

#配置时区
TIME_ZONE = "Asia/shanghai"


# 配置API版本
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}

# 将Default配置为通过仪表板创建的用户的默认域
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

# 将用户配置为通过仪表板创建的用户的默认角色
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
WEBROOT = '/dashboard'
# 启用卷备份
OPENSTACK_CINDER_FEATURES = {
    'enable_backup': True,
}

# 配置时区
TIME_ZONE = "Asia/Shanghai"
'''

[root@controller ~]# systemctl restart httpd.service memcached.service
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

星星乘坐的船

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值