目录
环境
主机网络
配置8G 2核 2网卡的虚机
将网卡命名改为eth
[root@controller ~]# vim /boot/grub2/grub.cfg
linux16 /vmlinuz-3.10.0-957.el7.x86_64 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8 net.ifnames=0
[root@controller ~]# cd /etc/sysconfig/network-scripts/
[root@controller ~]# mv ifcfg-ens33 ifcfg-eth0
NAME=etho0
DEVICE=eth0
TYPE=Ethernet
[root@controller network-scripts]# cp ifcfg-eth0 ifcfg-eth1
[root@controller network-scripts]# vim ifcfg-eth1
NAME=eth1
DEVICE=eth1
ONBOOT=yes
解析
网络时间协议(ntp)
时间同步
[root@controller network-scripts]# vim /etc/chrony.conf
server ntp1.aliyun.com iburst
[root@controller network-scripts]# systemctl enable --now chronyd
[root@controller network-scripts]# systemctl restart chronyd
openstack包
先启用OpenStack库OpenStack Docs: OpenStack包
[root@controller network-scripts]# cat /etc/yum.repos.d/mitaka.repo
[mitaka]
name=mitaka
baseurl=file:///opt/mitaka/
gpgcheck=0
升级包
[root@controller mitaka]# yum upgrade
安装客户端
[root@controller mitaka]# yum install python-openstackclient
SQL数据库
安全并配置组件
安装软件包
[root@controller mitaka]# yum install mariadb mariadb-server python2-PyMySQL
创建并编辑 /etc/my.cnf.d/openstack.cnf
在 [mysqld] 部分,设置 ``bind-address``值为控制节点的管理网络IP地址以使得其它节点可以通过管理网络访问数据库
在``[mysqld]`` 部分,设置如下键值来启用一起有用的选项和 UTF-8 字符集
[root@controller mitaka]# vim /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 192.168.81.11
[mysqld]
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
启动服务、初始化
[root@controller mitaka]# systemctl enable --now mariadb.service
[root@controller mitaka]# mysql_secure_installation
消息队列
安装
[root@controller mitaka]# yum install rabbitmq-server
[root@controller mitaka]# systemctl enable --now rabbitmq-server.service
添加 openstack 用户
[root@controller mitaka]# rabbitmqctl add_user openstack openstack //后面为密码
给``openstack``用户配置写和读权限
[root@controller mitaka]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
启用 RabbitMQ 管理插件
[root@controller mitaka]# rabbitmq-plugins enable rabbitmq_management
访问192.168.81.11:6572 guest/guset
Memcached
安全并配置组件
[root@controller ~]# yum install memcached python-memcached
[root@controller ~]# vim /etc/sysconfig/memcached
[root@controller ~]# systemctl enable --now memcached.service
认证服务
安装和配置
配置 OpenStack 身份认证服务前,你必须创建一个数据库和管理员令牌
创建数据库
[root@controller ~]# mysql -u root -pshg12345
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
-> IDENTIFIED BY 'keystone';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
-> IDENTIFIED BY 'keystone';
生成一个随机值在初始的配置中作为管理员的令牌
[root@controller ~]# openssl rand -hex 10
fb4ca0491adb3e7e66d5
安全并配置组件
[root@controller ~]# yum install openstack-keystone httpd mod_wsgi
[root@controller ~]# vim /etc/keystone/keystone.conf
[DEFAULT]
admin_token = fb4ca0491adb3e7e66d5
[database]
connection = mysql+pymysql://keystone:keystone@controller/keystone
[token]
provider = fernet
初始化身份认证服务的数据库
初始化Fernet keys
[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
[root@controller ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
配置 Apache HTTP 服务器
配置``ServerName`` 选项为控制节点
[root@controller ~]# vim /etc/httpd/conf/httpd.conf
ServerName controller
下面的内容创建文件 /etc/httpd/conf.d/wsgi-keystone.conf
[root@controller ~]# vim /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>
[root@controller ~]# systemctl enable --now httpd.service
创建服务实体和API端点
配置认证令牌 配置端点URL 配置认证 API 版本
[root@controller ~]# head /etc/keystone/keystone.conf
[root@controller ~]# export OS_TOKEN=fb4ca0491adb3e7e66d5
[root@controller ~]# export OS_URL=http://controller:35357/v3
[root@controller ~]# export OS_IDENTITY_API_VERSION=3
创建服务实体和API端点
创建服务实体和身份认证服务
[root@controller ~]# openstack service create \
> --name keystone --description "OpenStack Identity" identity
创建认证服务的 API 端点
[root@controller ~]# openstack endpoint create --region RegionOne \
> identity public http://controller:5000/v3
[root@controller ~]# openstack endpoint create --region RegionOne \
> identity internal http://controller:5000/v3
[root@controller ~]# openstack endpoint create --region RegionOne \
> identity admin http://controller:35357/v3
[root@controller ~]# openstack endpoint list
创建域、项目、用户和角色
创建域``default``
[root@controller ~]# openstack domain create --description "Default Domain" default
在你的环境中,为进行管理操作,创建管理的项目、用户和角色
创建 admin 项目、用户、角色 添加``admin`` 角色到 admin 项目和用户上
[root@controller ~]# openstack project create --domain default \
> --description "Admin Project" admin
[root@controller ~]# openstack user create --domain default \
> --password admin admin
[root@controller ~]# openstack role create admin
[root@controller ~]# openstack role add --project admin --user admin admin
创建``service``项目
[root@controller ~]# openstack project create --domain default \
> --description "Service Project" service
创建``demo`` 项目、用户、角色 添加 user``角色到 ``demo 项目和用户
[root@controller ~]# openstack project create --domain default \
> --description "Demo Project" demo
[root@controller ~]# openstack user create --domain default \
> --password demo demo
[root@controller ~]# openstack role create user
[root@controller ~]# openstack role add --project demo --user demo user
验证操作
重置``OS_TOKEN``和``OS_URL`` 环境变量
[root@controller ~]# unset OS_TOKEN OS_URL
作为 admin 用户,请求认证令牌
[root@controller ~]# openstack --os-auth-url http://controller:35357/v3 \
> --os-project-domain-name default --os-user-domain-name default \
> --os-project-name admin --os-username admin token issue
Password:
作为``demo`` 用户,请求认证令牌
[root@controller ~]# openstack --os-auth-url http://controller:5000/v3 \
> --os-project-domain-name default --os-user-domain-name default \
> --os-project-name demo --os-username demo token issue
创建 OpenStack 客户端环境脚本
创建脚本
[root@controller ~]# vim admin-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@controller ~]# vim demo-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=demo
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
使用脚本
[root@controller ~]# source admin-openrc
[root@controller ~]# openstack user list
[root@controller ~]# source demo-openrc
[root@controller ~]# openstack user list
镜像服务
安装和配置
创建数据库
[root@controller ~]# mysql -u root -p
MariaDB [(none)]> CREATE DATABASE glance;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
-> IDENTIFIED BY 'glance';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
-> IDENTIFIED BY 'glance';
获得 admin 凭证来获取只有管理员能执行的命令的访问权限
[root@controller ~]# . admin-openrc
创建服务证书
创建 glance 用户,添加 admin 角色到 glance 用户和 service 项目上
[root@controller ~]# openstack user create --domain default --password glance glance
[root@controller ~]# openstack role add --project service --user glance admin
创建``glance``服务实体
[root@controller ~]# openstack service create --name glance \
> --description "OpenStack Image" image
创建镜像服务的 API 端点
[root@controller ~]# openstack endpoint create --region RegionOne \
> image public http://controller:9292
[root@controller ~]# openstack endpoint create --region RegionOne \
> image internal http://controller:9292
[root@controller ~]# openstack endpoint create --region RegionOne \
> image admin http://controller:9292
安全并配置组件
[root@controller ~]# yum install openstack-glance
编辑文件 /etc/glance/glance-api.conf 并完成如下动作
[root@controller ~]# vim /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:glance@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
编辑文件 ``/etc/glance/glance-registry.conf``
[database]
connection = mysql+pymysql://glance:glance@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
写入镜像服务数据库
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance
完成安装
[root@controller ~]# systemctl enable openstack-glance-api.service \
> openstack-glance-registry.service
[root@controller ~]# systemctl start openstack-glance-api.service \
> openstack-glance-registry.service
验证操作
获得 admin 凭证并下载镜像
[root@controller ~]# . admin-openrc
使用 QCOW2 磁盘格式, bare 容器格式上传镜像到镜像服务并设置公共可见,这样所有的项目都可以访问它
[root@controller ~]# openstack image create "cirros" \
> --file cirros-0.3.4-x86_64-disk.img \
> --disk-format qcow2 --container-format bare \
> --public
确认镜像的上传并验证属性
[root@controller ~]# openstack image list
计算服务
安装并配置控制节点
创建数据库
[root@controller ~]# mysql -u root -pshg12345
MariaDB [(none)]> CREATE DATABASE nova_api;
MariaDB [(none)]> CREATE DATABASE nova;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
-> IDENTIFIED BY 'nova';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
-> IDENTIFIED BY 'nova';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
-> IDENTIFIED BY 'nova';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
-> IDENTIFIED BY 'nova';
获得 admin 凭证
[root@controller ~]# . admin-openrc
创建服务证书
创建 nova 用户 给 nova 用户添加 admin 角色
[root@controller ~]# openstack user create --domain default --password nova nova
[root@controller ~]# openstack role add --project service --user nova admin
创建 nova 服务实体
[root@controller ~]# openstack service create --name nova \
> --description "OpenStack Compute" compute
创建 Compute 服务 API 端点
[root@controller ~]# openstack endpoint create --region RegionOne \
> compute public http://controller:8774/v2.1/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne \
> compute internal http://controller:8774/v2.1/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne \
> compute admin http://controller:8774/v2.1/%\(tenant_id\)s
安全并配置组件
[root@controller ~]# yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler
[root@controller ~]# vim /etc/nova/nova.conf
只启用计算和元数据API
[DEFAULT]
enabled_apis = osapi_compute,metadata
配置数据库的连接
[api_database]
connection = mysql+pymysql://nova:nova@controller/nova_api
[database]
connection = mysql+pymysql://nova:nova@controller/nova
配置 “RabbitMQ” 消息队列访问
[DEFAULT]
rpc_backend = rabbit
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = openstack
配置认证服务访问
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
配置``my_ip`` 来使用控制节点的管理接口的IP 地址 使能 Networking 服务
[DEFAULT]
my_ip = 192.168.81.11
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
配置VNC代理使用控制节点的管理接口IP地址
[vnc]
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
配置镜像服务 API 的位置
[glance]
api_servers = http://controller:9292
配置锁路径
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
同步Compute 数据库
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova
[root@controller ~]# systemctl enable --now openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
验证
[root@controller ~]# openstack compute service list
安装和配置计算节点
新建虚机
拷贝controller的软件源,修改网卡
安全并配置组件
[root@compute1 tmp]# yum install openstack-nova-compute
[root@compute1 tmp]# vim /etc/nova/nova.conf
配置``RabbitMQ``消息队列的连接
[DEFAULT]
rpc_backend = rabbit
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = openstack
配置认证服务访问
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = nova
配置 my_ip 选项 使能 Networking 服务
[DEFAULT]
my_ip = 192.168.81.12
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
启用并配置远程控制台访问
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
配置镜像服务 API 的位置
[glance]
api_servers = http://controller:9292
配置锁路径
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
确定计算节点是否支持虚拟机的硬件加速
[root@compute1 tmp]# egrep -c '(vmx|svm)' /proc/cpuinfo
2
[root@compute1 tmp]# systemctl enable --now libvirtd.service openstack-nova-compute.service
如果这个命令返回了 one or greater 的值,那么你的计算节点支持硬件加速且不需要额外的配置。
如果这个命令返回了 zero 值,那么你的计算节点不支持硬件加速。你必须配置 libvirt 来使用 QEMU 去代替 KVM
在 /etc/nova/nova.conf 文件的 [libvirt] 区域做出如下的编辑
[libvirt]
...
virt_type = qemu
验证
[root@controller ~]# openstack compute service list
Networking 服务
安装并配置控制节点
创建数据库
[root@controller ~]# mysql -u root -pshg12345
MariaDB [(none)]> CREATE DATABASE neutron;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
-> IDENTIFIED BY 'neutron';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
-> IDENTIFIED BY 'neutron';
创建服务证书
创建``neutron``用户 添加``admin`` 角色到``neutron`` 用户
[root@controller ~]# openstack user create --domain default --password neutron neutron
[root@controller ~]# openstack role add --project service --user neutron admin
[root@controller ~]# openstack service create --name neutron \
--description "OpenStack Networking" network
创建``neutron``服务实体
创建网络服务API端点
[root@controller ~]# openstack endpoint create --region RegionOne \
> network public http://controller:9696
[root@controller ~]# openstack endpoint create --region RegionOne \
> network internal http://controller:9696
[root@controller ~]# openstack endpoint create --region RegionOne \
> network admin http://controller:9696
公共网络
安装组件
[root@controller ~]# yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
[root@controller ~]# vim /etc/neutron/neutron.conf
配置数据库访问
[database]
connection = mysql+pymysql://neutron:neutron@controller/neutron
启用ML2插件并禁用其他插件 配置 “RabbitMQ” 消息队列的连接 配置认证服务访问
[DEFAULT]
core_plugin = ml2
service_plugins =
rpc_backend = rabbit
auth_strategy = keystone
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = openstack
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
配置网络服务来通知计算节点的网络拓扑变化
[DEFAULT]
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
配置锁路径
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
配置 Modular Layer 2 (ML2) 插件
[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini
启用flat和VLAN网络 禁用私有网络 启用Linuxbridge机制 启用端口安全扩展驱动
[ml2]
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security
配置公共虚拟网络为flat网络
[ml2_type_flat]
flat_networks = provider
启用 ipset 增加安全组规则的高效性
[securitygroup]
enable_ipset = True
配配置Linuxbridge代理
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
将公共虚拟网络和公共物理网络接口对应起来
[linux_bridge]
physical_interface_mappings = provider:eth1
禁止VXLAN覆盖网络
[vxlan]
enable_vxlan = False
启用安全组并配置 Linuxbridge iptables firewall driver
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
配置DHCP代理
[root@controller ~]# vim /etc/neutron/dhcp_agent.ini
配置Linuxbridge驱动接口,DHCP驱动并启用隔离元数据,这样在公共网络上的实例就可以通过网络来访问元数据
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True
配置元数据代理
配置元数据主机以及共享密码
[root@controller ~]# vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = westos
为计算节点配置网络服务
配置访问参数,启用元数据代理并设置密码
[root@controller ~]# vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = True
metadata_proxy_shared_secret = westos
完成安装
网络服务初始化脚本需要一个超链接 /etc/neutron/plugin.ini``指向ML2插件配置文件/etc/neutron/plugins/ml2/ml2_conf.ini``
[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
同步数据库
[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
> --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
重启计算API服务
[root@controller ~]# systemctl restart openstack-nova-api.service
启动 Networking 服务
[root@controller ~]# systemctl enable --now neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
私有网络
公共网络完成后进行,在公共网络基础配置完的前提下开始
启用Modular Layer 2 (ML2)插件,路由服务和重叠的IP地址
[root@controller ~]# vim /etc/neutron/neutron.conf
[DEFAULT]
service_plugins = router
allow_overlapping_ips = True
[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini
启用flat,VLAN以及VXLAN网络 启用VXLAN私有网络 启用Linuxbridge和layer-2机制
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
为私有网络配置VXLAN网络识别的网络范围
[ml2_type_vxlan]
vni_ranges = 1:1000
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
启用VXLAN覆盖网络,配置覆盖网络的物理网络接口的IP地址,启用layer-2 population
[vxlan]
enable_vxlan = True
local_ip = 192.168.81.11
l2_population = True
配置Linuxbridge接口驱动和外部网络网桥
[root@controller ~]# vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
external_network_bridge =
[root@controller ~]# systemctl restart neutron-server.service neutron-linuxbridge-agent.service
[root@controller ~]# systemctl enable --now neutron-l3-agent.service
安装和配置计算节点
安装组件
[root@compute1 ~]# yum install openstack-neutron-linuxbridge ebtables ipset
配置通用组件
[root@compute1 ~]# vim /etc/neutron/neutron.conf
配置 “RabbitMQ” 消息队列的连接
[DEFAULT]
rpc_backend = rabbit
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = openstack
配置认证服务访问
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
配置锁路径
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
配置网络选项
公共网络
配置Linuxbridge代理
[root@compute1 ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
将公共虚拟网络和公共物理网络接口对应起来
[linux_bridge]
physical_interface_mappings = provider:eth1
禁止VXLAN覆盖网络
[vxlan]
enable_vxlan = False
启用安全组并配置 Linuxbridge iptables firewall driver
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
为计算节点配置网络服务
[root@compute1 ~]# vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
完成安装
重启计算服务 启动Linuxbridge代理
[root@compute1 ~]# systemctl restart openstack-nova-compute.service
[root@compute1 ~]# systemctl enable --now neutron-linuxbridge-agent.service
私有网络
启用VXLAN覆盖网络,配置覆盖网络的物理网络接口的IP地址,启用layer-2 population
[root@compute1 ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[vxlan]
enable_vxlan = True
local_ip = 192.168.81.12
l2_population = True
[root@compute1 ~]# systemctl restart neutron-linuxbridge-agent.service
[root@controller ~]# vim /etc/openstack-dashboard/local_settings
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': True,
'enable_ipv6': True,
'enable_distributed_router': True,
'enable_ha_router': True,
'enable_lb': True,
'enable_firewall': True,
'enable_vpn': True,
'enable_fip_topology_check': True,
}
[root@controller ~]# systemctl restart httpd.service memcached.service
验证操作
公共网络
[root@controller ~]# neutron agent-list
私有网络
创建一个实例
创建虚拟网络
提供者网络
创建网络
[root@controller ~]# . admin-openrc
[root@controller ~]# neutron net-create --shared --provider:physical_network provider \
> --provider:network_type flat provider
在网络上创建一个子网
[root@controller ~]# neutron subnet-create --name provider --allocation-pool start=192.168.81.150,end=192.168.81.200 --dns-nameserver 114.114.114.114 --gateway 192.168.81.2 provider 192.168.81.0/24
创建m1.nano规格的主机
[root@controller ~]# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
生成一个键值对
生成和添加秘钥对
[root@controller ~]# . demo-openrc
[root@controller ~]# ssh-keygen -q -N ""
[root@controller ~]# openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
验证公钥的添加
[root@controller ~]# openstack keypair list
增加安全组规则
添加规则到 default 安全组(允许 ICMP (ping)、允许安全 shell (SSH) 的访问)
[root@controller ~]# openstack security group rule create --proto icmp default
[root@controller ~]# openstack security group rule create --proto tcp --dst-port 22 default
启动一个实例
在公有网络上创建实例
确定实例选项
列出可用类型、可用镜像、可用网络、可用的安全组
[root@controller ~]# openstack flavor list
[root@controller ~]# openstack image list
[root@controller ~]# openstack network list
[root@controller ~]# openstack security group list
创建实例
[root@controller ~]# openstack server create --flavor m1.nano --image cirros --nic net-id=f035d40f-9d02-43d5-a78f-769d05699780 --security-group default --key-name mykey provider-instance
检查实例的状态
[root@controller ~]# openstack server list
使用虚拟控制台访问实例
获取你实例的 Virtual Network Computing (VNC) 会话URL并从web浏览器访问它
[root@controller ~]# openstack console url show provider-instance
访问http://192.168.81.11:6080/vnc_auto.html?token=3c14b440-7c04-47bb-afd2-56fff5ad2327
验证能否远程访问
[root@controller ~]# ping 192.168.81.151
[root@controller ~]# ssh cirros@192.168.81.151
在私有网络上创建实例
创建网络
创建路由
添加接口
创建实例vm2
绑定浮动ip
Dashboard
安装和配置
[root@controller ~]# yum install openstack-dashboard
[root@controller ~]# vim /etc/openstack-dashboard/local_settings
在 controller 节点上配置仪表盘以使用 OpenStack 服务
启用第3版认证API
通过仪表盘创建的用户默认角色配置为 user
OPENSTACK_HOST = "controller"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
允许所有主机访问仪表板
ALLOWED_HOSTS = ['*', ]
配置 memcached 会话存储服务
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
启用对域的支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
配置API版本
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 2,
}
通过仪表盘创建用户时的默认域配置为 default
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
如果是公共网络,禁用支持3层网络服务
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': False,
'enable_quotas': False,
'enable_ipv6': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': False,
'enable_firewall': False,
'enable_vpn': False,
'enable_fip_topology_check': False,
}
选择性地配置时区
TIME_ZONE = "Asia/Shanghai"
vim /etc/httpd/conf.d/openstack.conf
WSGIApplicationGroup %{GLOBAL}
重启web服务器以及会话存储服务
[root@controller ~]# systemctl restart httpd.service memcached.service