Nova具体功能如下:
1 实例生命周期管理
2 管理计算资源
3 网络和认证管理
4 REST风格的API
5 异步的一致性通信
6 Hypervisor透明:支持Xen,XenServer/XCP, KVM, UML, VMware vSphere and Hyper-V
7.1 配置Nova数据库
#在任意控制节点创建数据库,数据库自动同步,以controller160节点为例;
#使用root登陆数据库:
mysql -u root -p
#创建nova数据库:
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
#授予对nova数据库的访问权限,刷新退出数据库:
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
IDENTIFIED BY 'nova.123';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
IDENTIFIED BY 'nova.123';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
IDENTIFIED BY 'nova.123';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
IDENTIFIED BY 'nova.123';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
IDENTIFIED BY 'nova.123';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
IDENTIFIED BY 'nova.123';
flush privileges;
exit
7.2 加载管理凭证
source adminrc.sh
7.3 创建nova相关服务凭证
#创建nova服务用户,并设置密码为nova.123
openstack user create --domain default --password-prompt nova
#输出
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | b058dad78e26482da8c42698a5844a39 |
| name | nova |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
#赋予nova服务用户服务管理员权限及角色,无输出
openstack role add --project service --user nova admin
#创建nova服务
openstack service create --name nova --description "OpenStack Compute" compute
#输出
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Compute |
| enabled | True |
| id | c1dd935a10df41bcbb1313365d772578 |
| name | nova |
| type | compute |
+-------------+----------------------------------+
7.4 创建compute API endpoints
#public
openstack endpoint create --region RegionOne compute public http://controller168:8774/v2.1
#输出
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 4a828f59e7534049bf9b459a20c11714 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | c1dd935a10df41bcbb1313365d772578 |
| service_name | nova |
| service_type | compute |
| url | http://controller168:8774/v2.1 |
+--------------+----------------------------------+
#internal
openstack endpoint create --region RegionOne compute internal http://controller168:8774/v2.1
#输出
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | fdf500b5acfa41dc8870312d9b2e8c26 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | c1dd935a10df41bcbb1313365d772578 |
| service_name | nova |
| service_type | compute |
| url | http://controller168:8774/v2.1 |
+--------------+----------------------------------+
#admin
openstack endpoint create --region RegionOne compute admin http://controller168:8774/v2.1
#输出
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | bfead8cc8232443f826b6b6b8631e265 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | c1dd935a10df41bcbb1313365d772578 |
| service_name | nova |
| service_type | compute |
| url | http://controller168:8774/v2.1 |
+--------------+----------------------------------+
7.5 部署与配置nova - ALL Controller
#安装包
yum install openstack-nova-api openstack-nova-conductor \
openstack-nova-novncproxy openstack-nova-scheduler -y
#备份nova配置
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
egrep -v "^$|^#" /etc/nova/nova.conf.bak >/etc/nova/nova.conf
#配置nova配置文件,在对应项底下增加以下字段
#vim /etc/nova/nova.conf
[DEFAULT]
# ...
my_ip = 172.16.1.160
transport_url = rabbit://rabbitmq:rabbitmq.123@controller160:5672,rabbitmq:rabbitmq.123@controller161:5672,rabbitmq:rabbitmq.123@controller162:5672
osapi_compute_listen=$my_ip
osapi_compute_listen_port=8774
metadata_listen=$my_ip
metadata_listen_port=8775
[api_database]
# ...
connection = mysql+pymysql://nova:nova.123@controller168/nova_api
[database]
# ...
connection = mysql+pymysql://nova:nova.123@controller168/nova
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller168:5000
auth_url = http://controller168:5000
memcached_servers = controller160:11211,controller161:11211,controller162:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova.123
[vnc]
enabled = true
# ...
server_listen = $my_ip
server_proxyclient_address = $my_ip
novncproxy_host=$my_ip
novncproxy_port=6080
[glance]
# ...
api_servers = http://controller168:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp
[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller168:5000/v3
username = placement
password = placement.123
#填充nova-api数据库,无输出
su -s /bin/sh -c "nova-manage api_db sync" nova
#填充cell0数据库,无输出
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
#创建cell1表
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
#输出,可忽略warning
--transport-url not provided in the command line, using the value [DEFAULT]/transport_url from the configuration file
--database_connection not provided in the command line, using the value [database]/connection from the configuration file
ec2bd311-33de-4c9f-ae88-6d351c8abbb2
#填充nova数据库
su -s /bin/sh -c "nova-manage db sync" nova
#输出,可忽略
/usr/lib/python3/dist-packages/pymysql/cursors.py:165: Warning: (1831, 'Duplicate index `block_device_mapping_instance_uuid_virtual_name_device_name_idx`. This is deprecated and will be disallowed in a future release.')
result = self._query(query)
/usr/lib/python3/dist-packages/pymysql/cursors.py:165: Warning: (1831, 'Duplicate index `uniq_instances0uuid`. This is deprecated and will be disallowed in a future release.')
result = self._query(query)
#验证nova的cell0及cell1是否已成功创建
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
#输出
+-------+--------------------------------------+---------------------------------------------------------------------------------+----------------------------------------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+-------+--------------------------------------+---------------------------------------------------------------------------------+----------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller168/nova_cell0 | False |
| cell1 | 3c43b8cf-140d-401f-8b1c-4140ddad20a3 | rabbit://rabbitmq:****@controller160:5672,controller161:5672,controller162:5672 | mysql+pymysql://nova:****@controller168/nova | False |
+-------+--------------------------------------+---------------------------------------------------------------------------------+----------------------------------------------------+----------+
#验证nova数据库是否正常写入:
mysql -h controller168 -unova -pnova.123 -e "use nova;show tables;"
#重启nova服务,并配置开机启动:
systemctl enable \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
systemctl start \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
systemctl status \
openstack-nova-api.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
#查看端口使用情况
[root@controller161 ~]# netstat -tunlp | egrep '8774|8775|8778|6080'
tcp 0 0 172.16.1.161:6080 0.0.0.0:* LISTEN 3228/python3
tcp 0 0 172.16.1.168:6080 0.0.0.0:* LISTEN 727/haproxy
tcp 0 0 172.16.1.161:8774 0.0.0.0:* LISTEN 697/python3
tcp 0 0 172.16.1.168:8774 0.0.0.0:* LISTEN 727/haproxy
tcp 0 0 172.16.1.161:8775 0.0.0.0:* LISTEN 697/python3
tcp 0 0 172.16.1.168:8775 0.0.0.0:* LISTEN 727/haproxy
tcp 0 0 172.16.1.161:8778 0.0.0.0:* LISTEN 696/httpd
tcp 0 0 172.16.1.168:8778 0.0.0.0:* LISTEN 727/haproxy
7.6 添加pcs资源
#在任意控制节点操作;
#添加资源openstack-nova-api,openstack-nova-scheduler,openstack-nova-conductor与openstack-nova-novncproxy
pcs resource create openstack-nova-api systemd:openstack-nova-api clone interleave=true
pcs resource create openstack-nova-scheduler systemd:openstack-nova-scheduler clone interleave=true
pcs resource create openstack-nova-conductor systemd:openstack-nova-conductor clone interleave=true
pcs resource create openstack-nova-novncproxy systemd:openstack-nova-novncproxy clone interleave=true
#经验证,建议openstack-nova-api,openstack-nova-conductor与openstack-nova-novncproxy 等无状态服务以active/active模式运行;
#openstack-nova-scheduler等服务以active/passive模式运行
#查看pcs资源;
[root@controller161 ~]# pcs status
Cluster name: openstack-u-cluster
Cluster Summary:
* Stack: corosync
* Current DC: controller160 (version 2.0.3-5.el8_2.1-4b1f869f0f) - partition with quorum
* Last updated: Fri Jun 19 15:30:35 2020
* Last change: Fri Jun 19 15:30:31 2020 by root via cibadmin on controller161
* 3 nodes configured
* 22 resource instances configured
Node List:
* Online: [ controller160 controller161 controller162 ]
Full List of Resources:
* vip (ocf::heartbeat:IPaddr2): Started controller161
* Clone Set: lb-haproxy-clone [lb-haproxy]:
* Started: [ controller161 ]
* Stopped: [ controller160 controller162 ]
* Clone Set: openstack-keystone-clone [openstack-keystone]:
* Started: [ controller160 controller161 controller162 ]
* Clone Set: openstack-glance-api-clone [openstack-glance-api]:
* Started: [ controller160 controller161 controller162 ]
* Clone Set: openstack-nova-api-clone [openstack-nova-api]:
* Started: [ controller160 controller161 controller162 ]
* Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler]:
* Started: [ controller160 controller161 controller162 ]
* Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor]:
* Started: [ controller160 controller161 controller162 ]
* Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy]:
* Started: [ controller160 controller161 controller162 ]
Daemon Status:
corosync: active/enabled
pacemaker: active/enabled
pcsd: active/enabled
7.7 nova服务验证
#加载管理凭证
source adminrc.sh
#执行状态检查,都为success为正常
nova-status upgrade check
#输出
+--------------------------------------------------------------------+
| Upgrade Check Results |
+--------------------------------------------------------------------+
| Check: Cells v2 |
| Result: Success |
| Details: No host mappings or compute nodes were found. Remember to |
| run command 'nova-manage cell_v2 discover_hosts' when new |
| compute hosts are deployed. |
+--------------------------------------------------------------------+
| Check: Placement API |
| Result: Success |
| Details: None |
+--------------------------------------------------------------------+
| Check: Ironic Flavor Migration |
| Result: Success |
| Details: None |
+--------------------------------------------------------------------+
| Check: Cinder API |
| Result: Success |
| Details: None |
+--------------------------------------------------------------------+
| Check: Policy Scope-based Defaults |
| Result: Success |
| Details: None |
+--------------------------------------------------------------------+
至此,nova服务已部署完毕,如有问题请联系我改正,感激不尽!
7.x 部署过程遇到的问题汇总
eg1.ERROR oslo_service.service amqp.exceptions.AccessRefused: (0, 0): (403) ACCESS_REFUSED - Login was refused using authentication mechanism AMQPLAIN. For details see the broker logfile.
解决方案:
1 查看nova配置里的rabbitmq配置是否正常
2 查看rabbitmq是否有创建对应的用户及权限
3 如果是账号错误需要操作如下:
root@controller160:/etc/apt# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
+-------+--------------------------------------+---------------------------------------------+----------------------------------------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+-------+--------------------------------------+---------------------------------------------+----------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller160/nova_cell0 | False |
| cell1 | a3527e1f-85dc-46a6-bd17-af5f19b3a725 | rabbit://openstack:****@controller160:5672/ | mysql+pymysql://nova:****@controller160/nova | False |
+-------+--------------------------------------+---------------------------------------------+----------------------------------------------------+----------+
#停止服务
root@controller160:/etc/apt# systemctl stop nova-api nova-scheduler nova-conductor nova-novncproxy
#修改nova.conf配置里的rabbitmq_url为正确的账号
#删除使用错误账户的cell
root@controller160:/etc/apt# su -s /bin/sh -c "nova-manage cell_v2 delete_cell --cell_uuid a3527e1f-85dc-46a6-bd17-af5f19b3a725" nova
#重新创建cell1
root@controller160:/etc/apt# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
--transport-url not provided in the command line, using the value [DEFAULT]/transport_url from the configuration file
--database_connection not provided in the command line, using the value [database]/connection from the configuration file
74d4b65d-4d9c-4699-885e-4a773e295ef8
#验证是否正解
root@controller160:/etc/apt# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
+-------+--------------------------------------+--------------------------------------------+----------------------------------------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+-------+--------------------------------------+--------------------------------------------+----------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller160/nova_cell0 | False |
| cell1 | 74d4b65d-4d9c-4699-885e-4a773e295ef8 | rabbit://rabbitmq:****@controller160:5672/ | mysql+pymysql://nova:****@controller160/nova | False |
+-------+--------------------------------------+--------------------------------------------+----------------------------------------------------+----------+
#重新启动服务
systemctl start nova-api nova-scheduler nova-conductor nova-novncproxy
#查看对应日志是否还有报错
eg2.All hosts must be set with username/password or not at the same time. Hosts with credentials are: ['controller160']. Hosts without credentials are ['controller161', 'controller162'].
解决方案:官网上说如果没有指定用户名跟密码会自动忽略掉host
现配置:transport_url = rabbit://rabbitmq:rabbitmq.123@controller160:5672,controller161:5672,controller162:5672
改成:transport_url = rabbit://rabbitmq:rabbitmq.123@controller160:5672,rabbitmq:rabbitmq.123@controller161:5672,rabbitmq:rabbitmq.123@controller162:5672
可解决
eg3.ERROR nova.wsgi [-] Could not bind to 0.0.0.0:8774: OSError: [Errno 98] Address already in use
解决方案:vim /etc/nova/nova.conf 在default底下添加
bind_host = 172.16.1.160
osapi_compute_listen=$my_ip
osapi_compute_listen_port=8774
metadata_listen=$my_ip
metadata_listen_port=8775
eg4.[root@controller160 ~]# nova-status upgrade check
Error:
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/nova/cmd/status.py", line 465, in main
ret = fn(*fn_args, **fn_kwargs)
File "/usr/lib/python3.6/site-packages/oslo_upgradecheck/upgradecheck.py", line 102, in check
result = func(self)
File "/usr/lib/python3.6/site-packages/nova/cmd/status.py", line 165, in _check_placement
versions = self._placement_get("/")
File "/usr/lib/python3.6/site-packages/nova/cmd/status.py", line 155, in _placement_get
return client.get(path, raise_exc=True).json()
File "/usr/lib/python3.6/site-packages/keystoneauth1/adapter.py", line 386, in get
return self.request(url, 'GET', **kwargs)
File "/usr/lib/python3.6/site-packages/keystoneauth1/adapter.py", line 248, in request
return self.session.request(url, method, **kwargs)
File "/usr/lib/python3.6/site-packages/keystoneauth1/session.py", line 968, in request
raise exceptions.from_response(resp, method, url)
原因是:placement没有授权
解决方案:vim /etc/httpd/conf.d/00-placement-api.conf 在#SSLCertificateKeyFile ...底下添加
#Placement API
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
#保存退出
systemctl restart httpd