OpenStack安装配置_Mitaka版本


 


IP主机名备注
192.168.117.14controller控制节点
192.168.117.15compute1计算节点1

控制节点:内存4G,处理器2核,开启VT虚拟化
计算节点1:内存1G,处理器2核,开启VT虚拟化
 

基础环境准备(所有节点)

  1. 配置host解析
[root@controller ~]# vim /etc/hosts
192.168.117.14  controller
192.168.117.15  compute1
  1. 挂载镜像文件
[root@controller ~]# echo 'mount /dev/cdrom /mnt
[root@controller ~]# echo 'mount /dev/cdrom /mnt' >> /etc/rc.local 
[root@controller ~]# chmod +x /etc/rc.d/rc.local
  1. 上传openstack_rpm.tar.gz至/opt/目录下并解压
[root@controller opt]# tar zxvf openstack_rpm.tar.gz
  1. 配置yum源
[root@controller ~]# vim /etc/yum.repos.d/local.repo 

[local]
name=local
baseurl=file:///mnt
gpgcheck=0

[openstack]
name=openstack
baseurl=file:///opt/repo
gpgcheck=0
  1. 分别配置时间同步
[root@controller ~]# vim /etc/chrony.conf
server ntp6.aliyun.com iburst
allow 192.168.117.0/24
[root@controller ~]# systemctl restart chronyd


[root@compute1 ~]# vim /etc/chrony.conf
server 192.168.117.14 iburst
[root@compute1 ~]# systemctl restart chronyd
  1. 安装openstack客户端和openstack-selinux
[root@controller ~]# yum install -y python-openstackclient openstack-selinux

配置keystone认证服务(控制节点)

  1. 安装数据库及python连接模块
[root@controller ~]# yum install -y mariadb mariadb-server python2-PyMySQL
  1. 创建配置文件
[root@controller ~]# vim /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 192.168.117.14        //控制节点IP
default-storage-engine = innodb     //默认存储引擎
innodb_file_per_table                     //独立表空间
max_connections = 4096
collation-server = utf8_general_ci     
character-set-server = utf8
  1. 启动mariadb,并设置开机自启
[root@controller ~]# systemctl enable --now mariadb
  1. 运行mariadb安全配置向导,为了方便不设置密码
[root@controller ~]# mysql_secure_installation
Enter current password for root (enter for none): 
Set root password? [Y/n] n
Remove anonymous users? [Y/n] y
Disallow root login remotely? [Y/n] y
Remove test database and access to it? [Y/n] y
Reload privilege tables now? [Y/n] y
  1. 安装消息队列rabbitmq,启动服务并设置开机启动
[root@controller ~]# yum install -y rabbitmq-server
[root@controller ~]# systemctl enable --now rabbitmq-server
  1. 创建用户并授权
[root@controller ~]# rabbitmqctl add_user openstack RABBIT_PASS
[root@controller ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
  1. 启动监控页面插件
[root@controller ~]# rabbitmq-plugins enable rabbitmq_management
  1. 安装memcached
[root@controller ~]# yum install -y memcached python-memcached
  1. 修改memcached配置文件,重启服务并设置开机启动
[root@controller ~]# sed -i 's/127.0.0.1/192.168.117.14/g' /etc/sysconfig/memcached
[root@controller ~]# systemctl enable --now memcached
  1. 创建数据库并授权
[root@controller ~]# mysql
MariaDB [(none)]> create database keystone;
MariaDB [(none)]> grant all privileges on keystone.* to 'keystone'@'localhost' identified by 'KEYSTONE_DBPASS';
MariaDB [(none)]> grant all privileges on keystone.* to 'keystone'@'%' identified by 'KEYSTONE_DBPASS';
  1. 安装keystone相关软件包
[root@controller ~]# yum install -y openstack-keystone httpd mod_wsgi
  1. 配置keystone文件
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/keystone/keystone.conf
[root@controller ~]# yum install -y openstack-utils
[root@controller ~]# openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token ADMIN_TOKEN       //定义初始管理令牌的值
[root@controller ~]# openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone      //配置数据库访问
[root@controller ~]# openstack-config --set /etc/keystone/keystone.conf token provider fernet        //配置Fernet UUID令牌的提供者
  1. 同步数据库
[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
  1. 初始化fernet keys
[root@controller ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
  1. 修改http配置文件,创建虚拟主机
[root@controller ~]# vim /etc/httpd/conf/httpd.conf 
ServerName controller

[root@controller ~]# vim /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357

<VirtualHost *:5000>
    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-public
    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>
        Require all granted
    </Directory>
</VirtualHost>

<VirtualHost *:35357>
    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
    WSGIProcessGroup keystone-admin
    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
    WSGIApplicationGroup %{GLOBAL}
    WSGIPassAuthorization On
    ErrorLogFormat "%{cu}t %M"
    ErrorLog /var/log/httpd/keystone-error.log
    CustomLog /var/log/httpd/keystone-access.log combined

    <Directory /usr/bin>
        Require all granted
    </Directory>
  </VirtualHost>
  1. 启动http服务,并设置开机自启
[root@controller ~]# systemctl enable --now httpd
  1. 配置认证令牌、端点URL、认证API版本
[root@controller ~]# export OS_TOKEN=ADMIN_TOKEN
[root@controller ~]# export OS_URL=http://controller:35357/v3
[root@controller ~]# export OS_IDENTITY_API_VERSION=3
  1. 创建认证服务
[root@controller ~]# openstack service create --name keystone --description "Openstack Identity" identity
  1. 创建三个API端点:admin、internal、public
[root@controller ~]# openstack endpoint create  --region RegionOne identity public http://controller:5000/v3
[root@controller ~]# openstack endpoint create  --region RegionOne identity internal http://controller:5000/v3
[root@controller ~]# openstack endpoint create  --region RegionOne identity admin http://controller:35357/v3
  1. 创建域、项目(租户)、用户、角色
[root@controller ~]# openstack domain create --description "Default Domain" default
[root@controller ~]# openstack project create --domain default --description "Admin Project" admin
[root@controller ~]# openstack user create --domain default --password ADMIN_PASS admin
[root@controller ~]# openstack role create admin
  1. 关联项目、用户、角色
[root@controller ~]# openstack role add --project admin --user admin admin
  1. 创建service项目
[root@controller ~]# openstack project create --domain default --description "Service Project" service
  1. 创建环境变量文件admin-openrc,设置登录时自动执行该文件
[root@controller ~]# vim admin-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

[root@controller ~]# vim .bashrc
source admin-openrc
  1. 重新登录主机,使用openstack token issue命令,检查是否能正常请求认证令牌
    在这里插入图片描述

配置glance镜像服务(控制节点)

  1. 创建数据库并授权
[root@controller ~]# mysql
MariaDB [(none)]> create database glance;
MariaDB [(none)]> grant all privileges on glance.* to 'glance'@'localhost' identified by 'GLANCE_DBPASS';
MariaDB [(none)]> grant all privileges on glance.* to 'glance'@'%' identified by 'GLANCE_DBPASS';
MariaDB [(none)]> quit
  1. 在keystone创建glance用户关联角色
[root@controller ~]# openstack user create --domain default --password GLANCE_PASS glance
[root@controller ~]# openstack role add --project service --user glance admin
  1. 在keystone上创建服务和注册api
[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image
[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292
[root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292
[root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
  1. 安装软件包
[root@controller ~]# yum install -y openstack-glance
  1. 配置glance-api配置文件
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/glance/glance-api.conf
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf glance_store stores file,http
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf glance_store default_store file
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_uri http://controller:5000
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://controller:35357
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers controller:11211
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_type password
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name default
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name default
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_name service
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf keystone_authtoken username glance
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf keystone_authtoken password GLANCE_PASS
[root@controller ~]# openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
  1. 配置glance-registry配置文件
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/glance/glance-registry.conf
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf database connection mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_uri http://controller:5000
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_url http://controller:35357
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken memcached_servers controller:11211
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_type password
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_domain_name default
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken user_domain_name default
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_name service
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken username glance
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken password GLANCE_PASS
[root@controller ~]# openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
  1. 同步数据库
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance
  1. 启动glance服务,并设置开机自启
[root@controller ~]# systemctl enable --now openstack-glance-api.service
[root@controller ~]# systemctl enable --now openstack-glance-registry.service
  1. 验证glance服务
    在这里插入图片描述

配置nova计算服务(控制节点)

  1. 创建数据库并授权
[root@controller ~]# mysql
MariaDB [(none)]> create database nova_api;
MariaDB [(none)]> create database nova;
MariaDB [(none)]> grant all privileges on nova_api.* to 'nova'@'localhost' identified by 'NOVA_DBPASS';
MariaDB [(none)]> grant all privileges on nova_api.* to 'nova'@'%' identiffied by 'NOVA_DBPASS';
MariaDB [(none)]> grant all privileges on nova.* to 'nova'@'localhost' identified by 'NOVA_DBPASS';
MariaDB [(none)]> grant all privileges on nova.* to 'nova'@'%' identified  by 'NOVA_DBPASS';
MariaDB [(none)]> quit
  1. 在keystone创建系统用户并关联角色
[root@controller ~]# openstack user create --domain default --password NOVA_PASS nova
[root@controller ~]# openstack role add --project service --user nova admin
  1. 在keystone上创建服务和注册api
[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute
[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
  1. 安装服务相应软件包
[root@controller ~]# yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler
  1. 修改nova配置文件
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/nova/nova.conf
[root@controller ~]# openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
[root@controller ~]# openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
[root@controller ~]# openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:NOVA_DBPASS@controller/nova
[root@controller ~]# openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend rabbit
[root@controller ~]# openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_host controller
[root@controller ~]# openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_userid openstack
[root@controller ~]# openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
[root@controller ~]# openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
[root@controller ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
[root@controller ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
[root@controller ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
[root@controller ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
[root@controller ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
[root@controller ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
[root@controller ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
[root@controller ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
[root@controller ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS
[root@controller ~]# openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.117.14
[root@controller ~]# openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
[root@controller ~]# openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
[root@controller ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen '$my_ip'
[root@controller ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip'
[root@controller ~]# openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
[root@controller ~]# openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
  1. 同步compute数据库
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova
  1. 启动服务并设置开机自启
[root@controller ~]# systemctl enable --now openstack-nova-api.service  openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

配置nova计算服务(计算节点1)

  1. 安装软件包
[root@compute1 ~]# yum install -y openstack-utils openstack-nova-compute
  1. 编辑nova配置文件
[root@compute1 ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/nova/nova.conf
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend rabbit
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_host controller
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_userid openstack
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.117.15
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf vnc enabled True
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip'
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
  1. 启动服务并设置开机自启
[root@compute1 ~]# systemctl enable --now libvirtd.service openstack-nova-compute.service

配置neutron网络服务(计算节点1)

  1. 创建数据库并授权
[root@controller ~]# mysql
MariaDB [(none)]> create database neutron;
MariaDB [(none)]> grant all privileges on neutron.* to 'neutron'@'localhost' identified by 'NEUTRON_DBPASS';
MariaDB [(none)]> grant all privileges on neutron.* to 'neutron'@'%' identified by 'NEUTRON_DBPASS';
MariaDB [(none)]> quit
  1. 在keystone创建系统用户(glance,nova,neutron)关联角色
[root@controller ~]# openstack user create --domain default --password NEUTRON_PASS neutron
[root@controller ~]# openstack role add --project service --user neutron admin
  1. 在keystone上创建服务和注册api
[root@controller ~]# openstack service create --name neutron --description "OpenStack Networking" network
[root@controller ~]# openstack endpoint create --region RegionOne network public http://controller:9696
[root@controller ~]# openstack endpoint create --region RegionOne network internal http://controller:9696
[root@controller ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
  1. 安装网络组件
[root@controller ~]# yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
  1. 编辑neutron配置文件
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/neutron.conf
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT rpc_backend rabbit
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_host controller
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_userid openstack
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT  notify_nova_on_port_status_changes True
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes True
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf nova auth_url http://controller:35357
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf nova auth_type password
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf nova project_domain_name default
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf nova user_domain_name default
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf nova region_name RegionOne
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf nova project_name service
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf nova username nova
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf nova password NOVA_PASS
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
  1. 编辑ML2插件的配置文件
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/plugins/ml2/ml2_conf.ini 
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types 
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks provider
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset True
  1. 编辑Linuxbridge代理配置文件
[root@controller ~]# 
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/plugins/ml2/linuxbridge_agent.ini 
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens33
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan False
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  1. 配置DHCP代理
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/dhcp_agent.ini 
[root@controller ~]# openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver
[root@controller ~]# openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
[root@controller ~]# openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True
  1. 配置元数据代理
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/metadata_agent.ini
[root@controller ~]# openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_ip controller
[root@controller ~]# openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret METADATA_SECRET
  1. 配置网络服务
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/nova/nova.conf
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron auth_type password
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron project_name service
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron username neutron
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy True
[root@controller ~]# openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret METADATA_SECRET
  1. 创建超链接,指向ML2插件配置文件/etc/neutron/plugins/ml2/ml2_conf.ini
[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
  1. 同步数据库
[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
  1. 重启计算API服务
[root@controller ~]# systemctl restart openstack-nova-api.service

63.启动Networking服务并设置开机自启

[root@controller ~]# systemctl enable --now neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

配置neutron网络服务(计算节点1)

  1. 安装组件
[root@compute1 ~]# yum install -y openstack-neutron-linuxbridge ebtables ipset
  1. 编辑neutron配置文件
[root@compute1 ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/neutron.conf
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT rpc_backend rabbit
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_host controller
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_userid openstack
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS
[root@compute1 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
  1. 配置Linuxbridge代理
[root@compute1 ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[root@compute1 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens33
[root@compute1 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan False
[root@compute1 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True
[root@compute1 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  1. 配置网络服务
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_type password
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf neutron project_name service
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf neutron username neutron
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS
  1. 重启计算服务
[root@compute1 ~]# systemctl restart openstack-nova-compute.service
  1. 启动Linuxbridge代理并设置开机自启
[root@compute1 ~]# systemctl enable --now neutron-linuxbridge-agent.service

配置web界面(计算节点1)

  1. 安装软件包
[root@compute1 ~]# yum install -y openstack-dashboard
  1. 修改dashboard配置文件
[root@compute1 ~]# cp /etc/openstack-dashboard/local_settings{,.bak}
[root@compute1 ~]# vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = ['*', ]
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
     'default': {         
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller:11211',    
     }
 }
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {   
    "identity": 3,    
    "image": 2,    
    "volume": 2,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_NEUTRON_NETWORK = {   
    ...   
    'enable_router': False,    
    'enable_quotas': False,    
    'enable_distributed_router': False,    
    'enable_ha_router': False,    
    'enable_lb': False,    
    'enable_firewall': False,    
    'enable_vpn': False,    
    'enable_fip_topology_check': False,
}
TIME_ZONE = "Asia/Shanghai"
  1. 如果重启http后访问不了dashboard页面,添加以下配置
[root@compute1 ~]# vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}
  1. 重启web服务
[root@compute1 ~]# systemctl restart httpd.service
  1. 浏览器访问192.168.117.15/dashboard

启动一个实例(控制节点)

  1. 创建网络及子网
[root@controller ~]# neutron net-create --shared --provider:physical_network provider --provider:network_type flat provider
[root@controller ~]# neutron subnet-create --name sub1_provider --allocation-pool start=192.168.117.101,end=192.168.117.250 --dns-nameserver 223.5.5.5 --gateway 192.168.117.2 provider 192.168.117.0/24
  1. 创建m1.nano规格的主机
[root@controller ~]# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
  1. 创建密钥对并将公钥添加至openstack
[root@controller ~]# ssh-keygen -q -N ""
[root@controller ~]# openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
  1. 增加安全组规则
[root@controller ~]# openstack security group rule create --proto icmp default
[root@controller ~]# openstack security group rule create --proto tcp --dst-port 22 default
  1. 启动一个实例
[root@controller ~]# openstack server create --flavor m1.nano --image cirros --nic net-id=c1788266-be0c-4b36-bbb0-58e249281b9f --security-group default --key-name mykey provider-instance
  1. 查看实例状态
    在这里插入图片描述

  2. 在计算节点1上,添加两行配置,以防止无法进入系统bug

[root@compute1 ~]# vim /etc/nova/nova.conf  // 在[libvirt]下添加
cpu_mode = none
virt_type = qemu
[root@compute1 ~]# systemctl restart openstack-nova-compute
  1. 测试正常进入实例的控制台
    在这里插入图片描述

快速增加一台计算节点(计算节点2)

IP主机名备注
192.168.117.16compute2计算节点2

增加一台计算节点2:内存1G,处理器2核,开启VT虚拟化

  1. 配置host解析
[root@compute2 ~]# vim  /etc/hosts
192.168.117.14 controller
  1. 将计算节点1的rpm包和yum源拷贝至计算节点2上
[root@compute2 ~]# scp -rp 192.168.117.15:/opt/openstack_rpm.tar.gz /opt/
[root@compute2 ~]# cd /opt
[root@compute2 opt]# tar zxvf openstack_rpm.tar.gz
[root@compute2 opt]# scp -rp 192.168.117.15:/etc/yum.repos.d/openstack.repo /etc/yum.repos.d/
  1. 挂载光盘
[root@compute2 ~]# mount /dev/cdrom /mnt
[root@compute2 ~]# echo 'mount /dev/cdrom /mnt' >> /etc/rc.local 
[root@compute2 ~]# chmod +x /etc/rc.local
  1. 配置时间同步
[root@compute2 ~]# vim /etc/chrony.conf
server 192.168.117.14 iburst
[root@compute2 ~]# systemctl restart chronyd
  1. 安装openstack客户端和openstack-selinux
[root@compute2 ~]# yum install -y python-openstackclient.noarch openstack-selinux.noarch
  1. 安装nova-compute
[root@compute2 ~]# yum install -y openstack-nova-compute openstack-utils.noarch
  1. 编辑nova配置文件(仅my_ip与计算节点1不同)
[root@compute2 ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/nova/nova.conf
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT rpc_backend rabbit
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_host controller
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_userid openstack
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.117.16
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf vnc enabled True
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip'
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf neutron auth_type password
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf neutron project_name service
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf neutron username neutron
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS
  1. 安装neutron-linuxbridge-agent
[root@compute2 ~]# yum install -y openstack-neutron-linuxbridge ebtables ipset
  1. 编辑neutron配置文件
[root@compute2 ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/neutron.conf
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT rpc_backend rabbit
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_host controller
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_userid openstack
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS
[root@compute2 ~]# openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
  1. 配置Linuxbridge代理
[root@compute2 ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[root@compute2 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens33
[root@compute2 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan False
[root@compute2 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True
[root@compute2 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
  1. 启动服务并设置开机自启
[root@compute2 ~]# systemctl enable --now libvirtd.service openstack-nova-compute.service neutron-linuxbridge-agent.service
  1. 进入控制节点,查看neutron agent-list是否增加
    在这里插入图片描述

镜像服务glance迁移(控制节点->计算节点2)

  1. 停掉控制节点的glance服务
[root@controller ~]# systemctl disable --now openstack-glance-api openstack-glance-registry
  1. 在计算节点2上安装数据库并启动
[root@compute2 ~]# yum install -y mariadb mariadb-server python2-PyMySQL
[root@compute2 ~]# systemctl enable --now mariadb
  1. 运行安全配置向导
[root@compute2 ~]# mysql_secure_installation
Enter current password for root (enter for none): 
Set root password? [Y/n] n
Remove anonymous users? [Y/n] y
Disallow root login remotely? [Y/n] y
Remove test database and access to it? [Y/n] y
Reload privilege tables now? [Y/n] y
  1. 在控制节点上备份glance数据库,推送至计算节点2上导入数据
[root@controller ~]# mysqldump -B glance > glance.sql
[root@controller ~]# scp glance.sql 192.168.117.16:/root
[root@compute2 ~]# mysql < glance.sql
  1. 数据库授权
[root@compute2 ~]# mysql
MariaDB [(none)]> grant all privileges on glance.* to 'glance'@'localhost' identified by 'GLANCE_DBPASS';
MariaDB [(none)]> grant all privileges on glance.* to 'glance'@'%' identified by 'GLANCE_DBPASS';
MariaDB [(none)]> quit
  1. 安装glance服务
[root@compute2 ~]# yum install -y openstack-glance
[root@compute2 ~]# scp -rp 192.168.117.14:/etc/glance/glance-api.conf /etc/glance/glance-api.conf
[root@compute2 ~]# scp -rp 192.168.117.14:/etc/glance/glance-registry.conf /etc/glance/glance-registry.conf
  1. 修改数据库配置文件
[root@compute2 ~]# openstack-config --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:GLANCE_DBPASS@192.168.117.16/glance
[root@compute2 ~]# openstack-config --set /etc/glance/glance-registry.conf database connection mysql+pymysql://glance:GLANCE_DBPASS@192.168.117.16/glance
  1. 启动服务并设置开机自启
[root@compute2 ~]# systemctl enable --now openstack-glance-api openstack-glance-registry
  1. glance镜像文件迁移
[root@compute2 ~]# scp -rp 192.168.117.14:/var/lib/glance/images/* /var/lib/glance/images/
[root@compute2 ~]# chown glance:glance /var/lib/glance/images/*
  1. 进入控制节点,修改keystone服务目录的glance的api地址
[root@controller ~]# mysqldump keystone endpoint > endpoint.sql
[root@controller ~]# vim endpoint.sql    // 命令模式下进行替换
:%s@http://controller:9292@http://192.168.117.16:9292@gc
[root@controller ~]# mysql keystone < endpoint.sql
  1. 修改所有节点nova的配置文件
[root@controller ~]# sed -i 's@http://controller:9292@http://192.168.117.16:9292@g' /etc/nova/nova.conf
[root@compute1 ~]# sed -i 's@http://controller:9292@http://192.168.117.16:9292@g' /etc/nova/nova.conf
[root@compute2 ~]# sed -i 's@http://controller:9292@http://192.168.117.16:9292@g' /etc/nova/nova.conf
  1. 重启所有节点的nova服务
[root@controller ~]# systemctl restart openstack-nova-api
[root@compute1 ~]# systemctl restart openstack-nova-compute
[root@compute2 ~]# systemctl restart openstack-nova-compute
  1. 测试(上传一个新镜像,并启动实例)
    在这里插入图片描述
    在这里插入图片描述

配置cinder块存储服务(控制节点)

  1. 创建数据库并授权
[root@controller ~]# mysql
MariaDB [(none)]> create database cinder;
MariaDB [(none)]> grant all privileges on cinder.* to 'cinder'@'localhost' identified by 'CINDER_DBPASS';
MariaDB [(none)]> grant all privileges on cinder.* to 'cinder'@'%' identified by 'CINDER_DBPASS';
MariaDB [(none)]> quit
  1. 在keystone创建系统用户(glance,nova,neutron,cinder)关联角色
[root@controller ~]# openstack user create --domain default --password CINDER_PASS cinder
[root@controller ~]# openstack role add --project service --user cinder admin
  1. 在keystone上创建服务
[root@controller ~]# openstack service create --name cinder --description "OpenStack Block Storage" volume
[root@controller ~]# openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
  1. 创建块设备存储服务的API
[root@controller ~]# openstack endpoint create --region RegionOne volume public http://controller:8776/v1/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne volume internal http://controller:8776/v1/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne volume admin http://controller:8776/v1/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(tenant_id\)s
  1. 安装软件包
[root@controller ~]# yum install -y openstack-cinder
  1. 编辑cinder配置文件
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/cinder/cinder.conf
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT rpc_backend rabbit
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf oslo_messaging_rabbit rabbit_host controller
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf oslo_messaging_rabbit rabbit_userid openstack
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller:11211
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password CINDER_PASS
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip 192.168.117.14
[root@controller ~]# openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
  1. 同步数据库
[root@controller ~]# su -s /bin/sh -c "cinder-manage db sync" cinder
  1. 配置使用块设备存储
[root@controller ~]# openstack-config --set /etc/nova/nova.conf cinder os_region_name RegionOne
  1. 启动服务并设置开机自启
[root@controller ~]# systemctl enable --now openstack-cinder-api.service openstack-cinder-scheduler.service
  1. 查看cinder服务状态
    在这里插入图片描述

配置cinder块存储服务(计算节点1)

  1. 安装软件包并启动
[root@compute1 ~]# yum install -y lvm2
[root@compute1 ~]# systemctl restart openstack-nova-compute
[root@compute1 ~]# systemctl enable --now lvm2-lvmetad.service
  1. 添加一块30G硬盘,一块10G硬盘。创建物理卷及卷组
[root@compute1 ~]# pvcreate /dev/sdb
[root@compute1 ~]# pvcreate /dev/sdc
[root@compute1 ~]# vgcreate cinder-ssd /dev/sdb
[root@compute1 ~]# vgcreate cinder-sata /dev/sdc
  1. 添加过滤器,拒绝其他所有设备
[root@compute1 ~]# vim /etc/lvm/lvm.conf    // 修改第130行
filter = [ "a/sdb/", "a/sdc/", "r/.*/" ]
  1. 安装软件包
[root@compute1 ~]# yum install -y openstack-cinder targetcli python-keystone
  1. 编辑cinder配置文件
[root@compute1 ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/cinder/cinder.conf
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT rpc_backend rabbit
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf oslo_messaging_rabbit rabbit_host controller
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf oslo_messaging_rabbit rabbit_userid openstack
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf oslo_messaging_rabbit rabbit_password RABBIT_PASS
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller:11211
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password CINDER_PASS
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip 192.168.117.15
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends ssd,sata
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://192.168.117.16:9292
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf ssd volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf ssd volume_group cinder-ssd
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf ssd iscsi_protocol iscsi
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf ssd iscsi_helper lioadm
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf ssd volume_backend_name ssd
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf sata volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf sata volume_group cinder-sata
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf sata iscsi_protocol iscsi
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf sata iscsi_helper lioadm
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf sata volume_backend_name sata
  1. 启动服务并设置开机自启
[root@compute1 ~]# systemctl enable --now openstack-cinder-volume.service target.service
  1. 进入控制节点,检测cinder服务是否正常
    在这里插入图片描述

  2. 进入web界面,测试是否能创建卷
    在这里插入图片描述


增加flat网段

  1. 三台主机都增加一块网卡,LAN网段为172.16.0.1/24,分别配置网卡ens37。
[root@controller ~]# vim /etc/sysconfig/network-scripts/ifcfg-ens37
TYPE=Ethernet
BOOTPROTO=static
NAME=ens37
DEVICE=ens37
ONBOOT=yes
IPADDR=172.16.0.14
PREFIX=24

[root@compute1 ~]# vim /etc/sysconfig/network-scripts/ifcfg-ens37
TYPE=Ethernet
BOOTPROTO=none
NAME=ens37
DEVICE=ens37
ONBOOT=yes
IPADDR=172.16.0.15
PREFIX=24

[root@compute2 ~]# vim /etc/sysconfig/network-scripts/ifcfg-ens37
TYPE=Ethernet
BOOTPROTO=none
NAME=ens37
DEVICE=ens37
ONBOOT=yes
IPADDR=172.16.0.16
PREFIX=24
  1. 进入控制节点,在配置文件中添加网络net172_16_0
[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini
flat_networks = provider,net172_16_0
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
physical_interface_mappings = provider:ens33,net172_16_0:ens37
[root@controller ~]# systemctl restart neutron-server neutron-linuxbridge-agent
  1. 进入计算节点1和2,在配置文件中添加网络net172_16_0
[root@compute1 ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
physical_interface_mappings = provider:ens33,net172_16_0:ens37
[root@compute1 ~]# systemctl restart neutron-linuxbridge-agent

[root@compute2 ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
physical_interface_mappings = provider:ens33,net172_16_0:ens37
[root@compute2 ~]# systemctl restart neutron-linuxbridge-agent
  1. 在控制节点上创建网络
[root@controller ~]# neutron net-create --shared --provider:physical_network net172_16_0 --provider:network_type flat net172_16_0
[root@controller ~]# neutron subnet-create --name sub1_net172_16_0 --allocation-pool start=172.16.0.101,end=172.16.0.250 --dns-nameserver 223.5.5.5 --gateway 172.16.0.2 net172_16_0 172.16.0.0/24

cinder对接nfs存储

  1. 进入计算节点2,安装配置nfs,共享/data目录
[root@compute2 ~]# yum install -y nfs-utils
[root@compute2 ~]# vim /etc/exports
/data 192.168.117.0/24(rw,async,no_root_squash,no_all_squash)
[root@compute2 ~]# mkdir /data/
[root@compute2 ~]# systemctl enable --now rpcbind nfs
  1. 进入计算节点1,配置cinder支持nfs
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends ssd,sata,nfs
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf nfs volume_driver cinder.volume.drivers.nfs.NfsDriver
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf nfs nfs_shares_config /etc/cinder/nfs_shares
[root@compute1 ~]# openstack-config --set /etc/cinder/cinder.conf nfs volume_backend_name nfs
  1. 配置需要挂载的共享目录
[root@compute1 ~]# vim /etc/cinder/nfs_shares
192.168.117.16:/data
[root@compute1 ~]# systemctl restart openstack-cinder-volume
  1. 在控制节点上查看是否增加了一条cinder服务
    在这里插入图片描述

  2. 测试,创建一个nfs卷类型以及nfs卷
    在这里插入图片描述
    在这里插入图片描述


让控制节点兼职计算节点

  1. 安装软件包
[root@controller ~]# yum install -y openstack-nova-compute
  1. 进入控制节点,编辑nova配置文件
[root@controller ~]# openstack-config --set /etc/nova/nova.conf vnc enabled True
[root@controller ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0
[root@controller ~]# openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip'
[root@controller ~]# openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html
  1. 启动服务
[root@controller ~]# systemctl enable --now libvirtd openstack-nova-compute
  1. 查看是否增加了一条nova服务
    在这里插入图片描述

实例的冷迁移

  1. 避免资源不够导致失败,为两台计算节点增加1G内存,并更改nova用户的shell为可登录
[root@compute1 ~]# usermod -s /bin/bash nova

[root@compute2 ~]# usermod -s /bin/bash nova
  1. 登录nova用户,复制环境配置文件,解决-bash-4.2$的状态
[root@compute1 ~]# su - nova
-bash-4.2$ cp /etc/skel/.bash* .
-bash-4.2$ exit
[root@compute1 ~]# su - nova

[root@compute2 ~]# su - nova
-bash-4.2$ cp /etc/skel/.bash* .
-bash-4.2$ exit
  1. 在nova用户下,配置免密登录
[nova@compute1 ~]$ ssh-keygen -t rsa -q -N ''
Enter file in which to save the key (/var/lib/nova/.ssh/id_rsa):
[nova@compute1 ~]$ cd .ssh/
[nova@compute1 .ssh]$ cp -af id_rsa.pub authorized_keys
  1. 进入计算节点1,nova用户的.ssh目录推送至计算节点2相同位置
[nova@compute1 ~]$ scp -rp /var/lib/nova/.ssh root@192.168.117.16:/var/lib/nova/.ssh
  1. 进入计算节点2,修改.ssh授权
[root@compute2 ~]# chown -R nova:nova /var/lib/nova/.ssh/
  1. 分别以nova用户身份,测试免密登录
    在这里插入图片描述
    在这里插入图片描述

  2. 进入控制节点,在nova配置文件中添加过滤器,并重启服务

[root@controller ~]# openstack-config --set /etc/nova/nova.conf DEFAULT scheduler_default_filters RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
[root@controller ~]# systemctl restart openstack-nova-scheduler
  1. 分别为两台计算节点配置允许迁移至本机,并重启服务
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT allow_resize_to_same_host True
[root@compute1 ~]# systemctl restart openstack-nova-compute

[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf DEFAULT allow_resize_to_same_host True
[root@compute2 ~]# systemctl restart openstack-nova-compute
  1. 确保两台计算节点已解决进不了系统的bug,否则会迁移失败
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf libvirt cpu_mode none
[root@compute1 ~]# openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
[root@compute1 ~]# systemctl restart openstack-nova-compute

[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf libvirt cpu_mode none
[root@compute2 ~]# openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
[root@compute2 ~]# systemctl restart openstack-nova-compute
  1. 老实例无法迁移,创建新实例测试迁移
    在这里插入图片描述
    在这里插入图片描述

配置三层网络vxlan

  1. 删除所有实例

  2. 为三台主机各增加一块网卡,LAN网段为172.16.1.0/24,分别配置网卡ens38

[root@controller ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens38
TYPE=Ethernet
BOOTPROTO=static
NAME=ens38
DEVICE=ens38
ONBOOT=yes
IPADDR=172.16.1.14
PREFIX=24

[root@compute1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens38
TYPE=Ethernet
BOOTPROTO=none
NAME=ens38
DEVICE=ens38
ONBOOT=yes
IPADDR=172.16.1.15
PREFIX=24

[root@compute2 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens38
TYPE=Ethernet
BOOTPROTO=none
NAME=ens38
DEVICE=ens38
ONBOOT=yes
IPADDR=172.16.1.16
PREFIX=24
  1. 进入控制节点,修改neutron配置文件
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
[root@controller ~]# openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips True
  1. 修改ml2插件配置
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan,vxlan
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge,l2population
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges '1:100000'
  1. 修改linuxbridge代理配置
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan True
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 172.16.1.14
[root@controller ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population True
  1. 修改layer-3代理配置
[root@controller ~]# sed -i.bak -e '/^$/d' -e '/^#.*/d' /etc/neutron/l3_agent.ini 
[root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver
[root@controller ~]# openstack-config --set /etc/neutron/l3_agent.ini DEFAULT external_network_bridge
  1. 重启服务以及设置开机自启
[root@controller ~]# systemctl restart neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent
[root@controller ~]# systemctl enable --now neutron-l3-agent
  1. 进入两台计算节点,修改linuxbridge代理配置
[root@compute1 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan True
[root@compute1 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 172.16.1.15
[root@compute1 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population True
[root@compute1 ~]# systemctl restart neutron-linuxbridge-agent

[root@compute2 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan True
[root@compute2 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 172.16.1.16
[root@compute2 ~]# openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population True
[root@compute2 ~]# systemctl restart neutron-linuxbridge-agent
  1. 勾选provider网络的‘外部网络’选项。
    在这里插入图片描述

  2. 添加一个网络vpc-1
    在这里插入图片描述

  3. 进入计算节点1,修改dashboard配置,将路由改为启用

OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
  1. 重启服务并刷新web界面
[root@compute1 ~]# systemctl restart httpd
  1. 创建一个路由器vpc
    在这里插入图片描述

  2. 在网络拓扑界面,为vpc网络添加一个接口,
    在这里插入图片描述

  3. 创建两台使用vpc网络的实例
    在这里插入图片描述

  4. 为vpc-2关联公网ip
    在这里插入图片描述
    在这里插入图片描述

  5. 测试通过xshell链接vpc-2
    在这里插入图片描述

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值