参考文档:OpenStack Docs: Installationhttps://docs.openstack.org/placement/train/install/
Placement 提供了一个 WSGI 脚本,用于使用 Apache、nginx 或其他支持 WSGI 的 Web 服务器运行服务。
在openstack中主要用于跟踪和监控各种资源的使用情况
我们先安装Placement,在控制节点
创建数据库
1、创建相关的数据库
mysql -u root -p
MariaDB [(none)]> CREATE DATABASE placement;
2、对数据库授权
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement123';
#这里我设置placement123当密码
配置用户和端点
1、获取凭证以获取访问权限
. admin-openrc
2、创建用户
openstack user create --domain default --password-prompt placement
#密码我这里设置123
3、将 Placement 用户添加到具有管理员角色的服务项目中:
openstack role add --project service --user placement admin
4、在服务目录中注册创建API :
[root@localhost ~]# openstack service create --name placement --description "Placement API" placement
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Placement API |
| enabled | True |
| id | a5a24a194c624065baf138396d926bae |
| name | placement |
| type | placement |
+-------------+----------------------------------+
5、创建API 服务终端节点(包括内网、外网、管理网):
#openstack endpoint create --region RegionOne placement public http://controller:8778
#openstack endpoint create --region RegionOne placement internal http://controller:8778
#openstack endpoint create --region RegionOne placement admin http://controller:8778
安装和配置组件
1、安装软件包
yum install openstack‐placement‐api -y
2、编辑文件并完成以下操作:/etc/placement/placement.conf
#vi /etc/placement/placement.conf
#将以下内容复制进去
#password要用自己设置的,请根据实际情况而定
[DEFAULT]
[api]
auth_strategy = keystone
[cors]
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = placement
password = 123
[oslo_policy]
[placement]
[placement_database]
connection = mysql+pymysql://placement:placement123@controller/placement
[profiler]
编辑vi /etc/httpd/conf.d/00-placement-api.conf
在 ErrorLog /var/log/placement/placement-api.log下面添加以下内容
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion <2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
3、同步placement数据库
[root@localhost placement]# su -s /bin/sh -c "placement-manage db sync" placement
/usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1280, u"Name 'alembic_version_pkc' ignored for PRIMARY key.")
result = self._query(query)
4、重启httpd服务
systemctl restart httpd
查看httpd状态,如重启失败,可以重新安装http服务,请参考以下文章
5、查看placement的状态
[root@localhost placement]# placement-status upgrade check
+----------------------------------+
| Upgrade Check Results |
+----------------------------------+
| Check: Missing Root Provider IDs |
| Result: Success |
| Details: None |
+----------------------------------+
| Check: Incomplete Consumers |
| Result: Success |
| Details: None |
+----------------------------------+
成功开启!
安装Nova服务(在控制节点)
1、创建数据库
#mysql -u root -p #输入密码
MariaDB [(none)]> CREATE DATABASE nova_api;
Query OK, 1 row affected (0.012 sec)
MariaDB [(none)]> CREATE DATABASE nova;
Query OK, 1 row affected (0.000 sec)
MariaDB [(none)]> CREATE DATABASE nova_cell0;
Query OK, 1 row affected (0.001 sec)
2、 对数据库进行授权访问
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
3、创建服务凭证
创建用户nova(我设置的密码为123)
#openstack user create --domain default --password-prompt nova
添加角色
#openstack role add --project service --user nova admin
创建服务实体nova
#openstack service create --name nova --description "OpenStack Compute" compute
4、创建计算API服务端点
#openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
#openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
#openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
安装和配置组件
1、安装软件包
#yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler
2、编辑文件并完成以下操作:/etc/nova/nova.conf
#vi /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
[api_database]
connection = mysql+pymysql://nova:nova123@controller/nova_api
[database]
connection = mysql+pymysql://nova:nova123@controller/nova
[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller:5672/
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 123
[DEFAULT]
my_ip = 192.168.134.134 #IP地址请根据实际情况而定
[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123
3、同步数据库
#su -s /bin/sh -c "nova-manage api_db sync" nova
#su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
#su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
#su -s /bin/sh -c "nova-manage db sync" nova #执行这条命令时会报warning,我们不用管他
验证
进入数据库
# mysql -u root -p
MariaDB [(none)]> show databases;
+--------------------+
| Database |
+--------------------+
| glance |
| information_schema |
| keystone |
| mysql |
| nova |
| nova_api |
| nova_cell0 |
| performance_schema |
| placement |
+--------------------+
9 rows in set (0.003 sec)
MariaDB [(none)]> use nova; show tables;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
+--------------------------------------------+
| Tables_in_nova |
+--------------------------------------------+
| agent_builds |
| aggregate_hosts |
| aggregate_metadata |
| aggregates |
| allocations |
| block_device_mapping |
| bw_usage_cache |
| cells |
| certificates |
| compute_nodes |
| console_auth_tokens |
| console_pools |
| consoles |
| dns_domains |
| fixed_ips |
| floating_ips |
| instance_actions |
| instance_actions_events |
| instance_extra |
| instance_faults |
| instance_group_member |
| instance_group_policy |
| instance_groups |
| instance_id_mappings |
| instance_info_caches |
| instance_metadata |
| instance_system_metadata |
| instance_type_extra_specs |
| instance_type_projects |
| instance_types |
| instances |
| inventories |
| key_pairs |
| migrate_version |
| migrations |
| networks |
| pci_devices |
| project_user_quotas |
| provider_fw_rules |
| quota_classes |
| quota_usages |
| quotas |
| reservations |
| resource_provider_aggregates |
| resource_providers |
| s3_images |
| security_group_default_rules |
| security_group_instance_association |
| security_group_rules |
| security_groups |
| services |
| shadow_agent_builds |
| shadow_aggregate_hosts |
| shadow_aggregate_metadata |
| shadow_aggregates |
| shadow_block_device_mapping |
| shadow_bw_usage_cache |
| shadow_cells |
| shadow_certificates |
| shadow_compute_nodes |
| shadow_console_pools |
| shadow_consoles |
| shadow_dns_domains |
| shadow_fixed_ips |
| shadow_floating_ips |
| shadow_instance_actions |
| shadow_instance_actions_events |
| shadow_instance_extra |
| shadow_instance_faults |
| shadow_instance_group_member |
| shadow_instance_group_policy |
| shadow_instance_groups |
| shadow_instance_id_mappings |
| shadow_instance_info_caches |
| shadow_instance_metadata |
| shadow_instance_system_metadata |
| shadow_instance_type_extra_specs |
| shadow_instance_type_projects |
| shadow_instance_types |
| shadow_instances |
| shadow_key_pairs |
| shadow_migrate_version |
| shadow_migrations |
| shadow_networks |
| shadow_pci_devices |
| shadow_project_user_quotas |
| shadow_provider_fw_rules |
| shadow_quota_classes |
| shadow_quota_usages |
| shadow_quotas |
| shadow_reservations |
| shadow_s3_images |
| shadow_security_group_default_rules |
| shadow_security_group_instance_association |
| shadow_security_group_rules |
| shadow_security_groups |
| shadow_services |
| shadow_snapshot_id_mappings |
| shadow_snapshots |
| shadow_task_log |
| shadow_virtual_interfaces |
| shadow_volume_id_mappings |
| shadow_volume_usage_cache |
| snapshot_id_mappings |
| snapshots |
| tags |
| task_log |
| virtual_interfaces |
| volume_id_mappings |
| volume_usage_cache |
+--------------------------------------------+
110 rows in set (0.001 sec)
MariaDB [nova]> use nova_api; show tables;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
+------------------------------+
| Tables_in_nova_api |
+------------------------------+
| aggregate_hosts |
| aggregate_metadata |
| aggregates |
| allocations |
| build_requests |
| cell_mappings |
| consumers |
| flavor_extra_specs |
| flavor_projects |
| flavors |
| host_mappings |
| instance_group_member |
| instance_group_policy |
| instance_groups |
| instance_mappings |
| inventories |
| key_pairs |
| migrate_version |
| placement_aggregates |
| project_user_quotas |
| projects |
| quota_classes |
| quota_usages |
| quotas |
| request_specs |
| reservations |
| resource_classes |
| resource_provider_aggregates |
| resource_provider_traits |
| resource_providers |
| traits |
| users |
+------------------------------+
32 rows in set (0.001 sec)
MariaDB [nova_api]> use nova_cell0; show tables;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
+--------------------------------------------+
| Tables_in_nova_cell0 |
+--------------------------------------------+
| agent_builds |
| aggregate_hosts |
| aggregate_metadata |
| aggregates |
| allocations |
| block_device_mapping |
| bw_usage_cache |
| cells |
| certificates |
| compute_nodes |
| console_auth_tokens |
| console_pools |
| consoles |
| dns_domains |
| fixed_ips |
| floating_ips |
| instance_actions |
| instance_actions_events |
| instance_extra |
| instance_faults |
| instance_group_member |
| instance_group_policy |
| instance_groups |
| instance_id_mappings |
| instance_info_caches |
| instance_metadata |
| instance_system_metadata |
| instance_type_extra_specs |
| instance_type_projects |
| instance_types |
| instances |
| inventories |
| key_pairs |
| migrate_version |
| migrations |
| networks |
| pci_devices |
| project_user_quotas |
| provider_fw_rules |
| quota_classes |
| quota_usages |
| quotas |
| reservations |
| resource_provider_aggregates |
| resource_providers |
| s3_images |
| security_group_default_rules |
| security_group_instance_association |
| security_group_rules |
| security_groups |
| services |
| shadow_agent_builds |
| shadow_aggregate_hosts |
| shadow_aggregate_metadata |
| shadow_aggregates |
| shadow_block_device_mapping |
| shadow_bw_usage_cache |
| shadow_cells |
| shadow_certificates |
| shadow_compute_nodes |
| shadow_console_pools |
| shadow_consoles |
| shadow_dns_domains |
| shadow_fixed_ips |
| shadow_floating_ips |
| shadow_instance_actions |
| shadow_instance_actions_events |
| shadow_instance_extra |
| shadow_instance_faults |
| shadow_instance_group_member |
| shadow_instance_group_policy |
| shadow_instance_groups |
| shadow_instance_id_mappings |
| shadow_instance_info_caches |
| shadow_instance_metadata |
| shadow_instance_system_metadata |
| shadow_instance_type_extra_specs |
| shadow_instance_type_projects |
| shadow_instance_types |
| shadow_instances |
| shadow_key_pairs |
| shadow_migrate_version |
| shadow_migrations |
| shadow_networks |
| shadow_pci_devices |
| shadow_project_user_quotas |
| shadow_provider_fw_rules |
| shadow_quota_classes |
| shadow_quota_usages |
| shadow_quotas |
| shadow_reservations |
| shadow_s3_images |
| shadow_security_group_default_rules |
| shadow_security_group_instance_association |
| shadow_security_group_rules |
| shadow_security_groups |
| shadow_services |
| shadow_snapshot_id_mappings |
| shadow_snapshots |
| shadow_task_log |
| shadow_virtual_interfaces |
| shadow_volume_id_mappings |
| shadow_volume_usage_cache |
| snapshot_id_mappings |
| snapshots |
| tags |
| task_log |
| virtual_interfaces |
| volume_id_mappings |
| volume_usage_cache |
+--------------------------------------------+
110 rows in set (0.001 sec)
可以看到有关于nova、nova_api、nova_cell0的数据
验证是否创建成功
# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
+-------+--------------------------------------+------------------------------------------+-------------------------------------------------+----------+
| 名称 | UUID | Transport URL | 数据库连接 | Disabled |
+-------+--------------------------------------+------------------------------------------+-------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@controller/nova_cell0 | False |
| cell1 | fe52b1d2-af75-4212-892b-8d2e0a52851d | rabbit://openstack:****@controller:5672/ | mysql+pymysql://nova:****@controller/nova | False |
+-------+--------------------------------------+------------------------------------------+-------------------------------------------------+----------+
启动服务
# systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
设置开机自启
# systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
Nova之在计算节点的安装
(ps:两个节点都要安装Nova)
安装和配置组件
1、安装软件包
# yum install openstack-nova-compute -y
2、编辑文件并完成以下操作:/etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 123
[DEFAULT]
my_ip = 192.168.134.135 #IP请根据实际情况而定,可以使用hostname -I查看
[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 123
3、查看是否支持CPU虚拟化
# egrep -c '(vmx|svm)' /proc/cpuinfo #非零就是支持
如果返回为0,则配置以下文件/etc/nova/nova.conf
[libvirt]
# ...
virt_type = qemu
4、启动服务
# systemctl enable libvirtd.service openstack-nova-compute.service;systemctl start libvirtd.service openstack-nova-compute.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to /usr/lib/systemd/system/openstack-nova-compute.service.
验证
在控制节点上运行以下命令
[root@localhost ~]# openstack compute service list --service nova-compute
+----+--------------+----------+------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+--------------+----------+------+---------+-------+----------------------------+
| 7 | nova-compute | compute1 | nova | enabled | up | 2022-04-20T06:33:11.000000 |
+----+--------------+----------+------+---------+-------+----------------------------+
主机发现(每添加一台主机都要执行主机发现)
# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': fe52b1d2-af75-4212-892b-8d2e0a52851d
Checking host mapping for compute host 'compute1': c9d89385-7a3e-4619-8d09-50b0e1321e6d
Creating host mapping for compute host 'compute1': c9d89385-7a3e-4619-8d09-50b0e1321e6d
Found 1 unmapped computes in cell: fe52b1d2-af75-4212-892b-8d2e0a52851d
选做:添加新的计算节点时,必须在控制器节点上运行以注册这些新的计算节点。或者,您可以在 中设置适当的间隔:
#vi /etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300
重启Nova服务及组件
systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
可以使用tail -f /var/log/nova/*.log(控制节点)、tail -f /var/log/nova/nova-compute.log
(计算节点),查看相关日志,没有error即可!
至此Nova配置完毕!