Ubuntu 安装openstack (stein),超详细

Ubuntu 安装openstack (stein)

host解析和IP设置

# 修改主机名
hostnamectl set-hostname controller
# host 解析
vi /etc/hosts
# 添加内容
10.0.0.11 controller
10.0.0.22 compute01

NTP网络时间协议

# 安装chrony软件包
apt install chrony
vi /etc/chorny/chront.conf
# 添加内容(控制节点)
server ntp1.aliyun.com iburst
allow 10.0.0.0/24
# 添加内容(计算节点)
server 10.0.0.11 iburst

# 重启服务
service chrony restart
# 验证
netstat -lntup	# 查看是否开启了123和323两个端口,计算节点只有323一个端口

openstack软件包

# 下载openstack服务基础软件包
add-apt-repository cloud-archive:stein
apt update && apt dist-upgrade
# 安装openstack客户端
apt install python3-openstackclient

SQL数据库安装

# 安装数据库服务以及python连接数据库的模块
apt install mariadb-server python-pymysql
# 创建并编辑
vi /etc/mysql/mariadb.conf.d/99-openstack.cnf
# 添加内容
[mysqld]
bind-address = 10.0.0.11

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

# 重启服务
service mysql restart
# 安全初始化
mysql_secure_installation

消息队列(RabbitMQ)

# 安装消息队列服务
apt install rabbitmq-server
# 添加用户
rabbitmqctl add_user openstack RABBIT_PASS
# 允许用户配置,写入和读取访问权限 openstack
rabbitmqctl set_permissions openstack ".*" ".*" ".*"

缓存(memcached)

# 安装缓存token服务
apt install memcached python-memcache
vi /etc/memcached.conf
# 修改内容
-l 10.0.0.11

# 重启服务
service memcached restart

keystone 安装

# 创库建表 授权
mysql -u root -p
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
IDENTIFIED BY 'KEYSTONE_DBPASS';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
IDENTIFIED BY 'KEYSTONE_DBPASS';

# 安装keystone 软件包
apt install keystone
# 将配置文件的注释删除方便修改
cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
grep -Ev '^$|#' /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf
vi /etc/keystone/keystone.conf
# 配置以下内容
[database]
# ...
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[token]
# ...
provider = fernet

# 创建keystone数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone
# 初始化Fernet密钥存储库
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
# 引导身份认证
keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
  --bootstrap-admin-url http://controller:5000/v3/ \
  --bootstrap-internal-url http://controller:5000/v3/ \
  --bootstrap-public-url http://controller:5000/v3/ \
  --bootstrap-region-id RegionOne

配置Apache HTTP服务器

vi /etc/apache2/apache2.conf
# 添加内容
ServerName controller

# 启动服务
service apache2 restart
# 配置管理账户
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3

创建域、项目、用户和角色

# 创建新域
openstack domain create --description "An Example Domain" example
# 创建service 项目
openstack project create --domain default --description "Service Project" service
# 创建myproject项目和myuser 用户
openstack project create --domain default --description "Demo Project" myproject
openstack user create --domain default --password-prompt myuser
# 创建myrole角色
openstack role create myrole
# 将myrole角色添加到myproject项目和myuser用户
openstack role add --project myproject --user myuser myrole

验证

# 取消设置临时 变量OS_AUTH_URL和OS_PASSWORD环境变量
unset OS_AUTH_URL OS_PASSWORD
# 以admin用户身份请求身份验证令牌
openstack --os-auth-url http://controller:5000/v3 \
  --os-project-domain-name Default --os-user-domain-name Default \
  --os-project-name admin --os-username admin token issue
# myuser与前面创建的用户一样,请求身份验证令牌
openstack --os-auth-url http://controller:5000/v3 \
  --os-project-domain-name Default --os-user-domain-name Default \
  --os-project-name myproject --os-username myuser token issue

创建openstack客户端环境脚本

# 创建和编辑admin-openrc文件,并添加以下内容
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

# 创建和编辑demo-openrc文件并添加以下内容
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=MYUSER_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

# 加载admin-openrc文件以使用身份服务的位置以及admin项目和用户凭据填充环境变量
. admin-openrc
# 请求身份验证令牌
openstack token issue

配置 glance 服务

# 创库建表授权
mysql -u root -p
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
  IDENTIFIED BY 'GLANCE_DBPASS';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
  IDENTIFIED BY 'GLANCE_DBPASS';

# 生成环境变量
. admin-openrc

# 创建glance用户
openstack user create --domain default --password-prompt glance
# 将admin角色添加到glance用户和 service项目
openstack role add --project service --user glance admin
# 创建glance服务实体
openstack service create --name glance --description "OpenStack Image" image
# 创建图像服务API端点
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292

# 安装glance
apt install glance
# 编辑/etc/glance/glance-api.conf
cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak
grep -Ev '^$|#' /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf
vi /etc/glance/glance-api.conf
# 添加以下内容
[database]
# ...
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

# 配置身份服务访问
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = GLANCE_PASS

[paste_deploy]
# ...
flavor = keystone

# 配置本地文件系统存储和图像文件的位置
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/


# 编辑/etc/glance/glance-registry.conf
cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak
grep -Ev '^$|#' /etc/glance/glance-registry.conf.bak > /etc/glance/glance-registry.conf
vi /etc/glance/glance-registry.conf

# 添加以下内容
[database]
# ...
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance

[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = GLANCE_PASS

[paste_deploy]
# ...
flavor = keystone

# 同步数据库数据
su -s /bin/sh -c "glance-manage db_sync" glance

# 重启服务
service glance-registry restart		# S之后的版本已经弃用
service glance-api restart

验证

# 使用CirrOS(一个小型Linux映像,可帮助测试OpenStack部署)验证Image Service的运行 

. admin-openrc
# 下载源镜像
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
# 使用QCOW2磁盘格式,裸 容器格式和公共可见性将图像上传到图像服务
openstack image create "cirros" \
  --file cirros-0.4.0-x86_64-disk.img \
  --disk-format qcow2 --container-format bare \
  --public
# 确认上传图片并验证属性
openstack image list

安装Placement

# 创库建表授权
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' \
  IDENTIFIED BY 'PLACEMENT_DBPASS';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \
  IDENTIFIED BY 'PLACEMENT_DBPASS';
# 生成管理员环境
. admin-openrc
# 创建用户
openstack user create --domain default --password-prompt placement
# 将用户添加到项目
openstack role add --project service --user placement admin
# 创建PlacementAPI服务
openstack service create --name placement \
  --description "Placement API" placement
# 创建Placement API服务端点
openstack endpoint create --region RegionOne \
  placement public http://controller:8778
openstack endpoint create --region RegionOne \
  placement internal http://controller:8778
openstack endpoint create --region RegionOne \
  placement admin http://controller:8778
# 安装软件包
apt install placement-api
# 编辑/etc/placement/placement.conf
cp /etc/placement/placement.conf /etc/placement/placement.conf.bak
grep -Ev '^$|#' /etc/placement/placement.conf.bak > /etc/placement/placement.conf
vi /etc/placement/placement.conf
# 添加以下内容
[placement_database]
# ...
connection = mysql+pymysql://placement:PLACEMENT_DBPASS@controller/placement

[api]
# ...
auth_strategy = keystone

[keystone_authtoken]
# ...
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = PLACEMENT_PASS

# 同步palcement数据库
su -s /bin/sh -c "placement-manage db sync" placement
# 重启web服务器
service apache2 restart

验证

. admin-openrc
# 执行状态检查
placement-status upgrade check
# 安装osc-placement插件
pip install osc-placement
# 列出可用的资源类和特征
openstack --os-placement-api-version 1.2 resource class list --sort-column name
openstack --os-placement-api-version 1.6 trait list --sort-column name

安装Nova

# 控制节点
# 创库建表授权
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \
  IDENTIFIED BY 'NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
  IDENTIFIED BY 'NOVA_DBPASS';
  
# 管理员环境
. admin-openrc
# 创建nova用户
openstack user create --domain default --password-prompt nova
# admin向nova用户添加角色
openstack role add --project service --user nova admin
# 创建nova服务实体
openstack service create --name nova \
  --description "OpenStack Compute" compute
# 创建Compute API服务端点
openstack endpoint create --region RegionOne \
  compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne \
  compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne \
  compute admin http://controller:8774/v2.1
# 安装软件包
apt install nova-api nova-conductor nova-consoleauth \
  nova-novncproxy nova-scheduler
# 编辑/etc/nova/nova.conf
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
grep -Ev '^$|#' /etc/nova/nova.conf.bak > /etc/nova/nova.conf
vi /etc/nova/nova.conf
# 添加内容如下
[api_database]
# ...
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api

[database]
# ...
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova

[DEFAULT]
# ...
transport_url = rabbit://openstack:RABBIT_PASS@controller

[api]
# ...
auth_strategy = keystone

[keystone_authtoken]
# ...
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS

[DEFAULT]
# ...
my_ip = 10.0.0.11

[DEFAULT]
# ...
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver

# 配置[neutron]
[neutron]
# ...
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET

[vnc]
enabled = true
# ...
server_listen = $my_ip
server_proxyclient_address = $my_ip

[glance]
# ...
api_servers = http://controller:9292

[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp

[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = PLACEMENT_PASS

# 同步nova-api数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
# 同步cell0数据库
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
# 同步cell1数据库
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
# 同步nova数据库
su -s /bin/sh -c "nova-manage db sync" nova
# 验证是否正确注册
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova

# 启动
service nova-api restart
service nova-consoleauth restart
service nova-scheduler restart
service nova-conductor restart
service nova-novncproxy restart

# 计算节点
apt install nova-compute
# 编辑/etc/nova/nova.conf
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
grep -Ev '^$|#' /etc/nova/nova.conf.bak > /etc/nova/nova.conf
vi /etc/nova/nova.conf
# 添加以下内容
[DEFAULT]
# ...
transport_url = rabbit://openstack:RABBIT_PASS@controller

[api]
# ...
auth_strategy = keystone

[keystone_authtoken]
# ...
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = NOVA_PASS

[DEFAULT]
# ...
my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS

[DEFAULT]
# ...
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[vnc]
# ...
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html

[glance]
# ...
api_servers = http://controller:9292

[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp

[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = PLACEMENT_PASS

# 确定计算节点是否支持虚拟机的硬件加速
egrep -c '(vmx|svm)' /proc/cpuinfo
# 编辑文件中的[libvirt]部分,/etc/nova/nova-compute.conf
[libvirt]
# ...
virt_type = qemu
# 重新启动计算服务
service nova-compute restart

# 将计算节点添加到单元数据库中(控制节点执行)
openstack compute service list --service nova-compute
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
# 编辑 nova-manage cell_v2 discover_hosts/etc/nova/nova.conf,设置时间间隔
[scheduler]
discover_hosts_in_cells_interval = 300
# 验证
# 列出服务组件以验证每个进程的成功启动和注册
openstack compute service list
# 列出身份服务中的API端点以验证与身份服务的连接性
openstack catalog list
# 列出图像服务中的图像以验证与图像服务的连接性
openstack image list
# 检查单元格和展示位置API是否正常运行
nova-status upgrade check

安装 neuture

# 创库建表授权
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
  IDENTIFIED BY 'NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
  IDENTIFIED BY 'NEUTRON_DBPASS';
# 管理员环境
. admin-openrc
# 创建neuture用户
openstack user create --domain default --password-prompt neutron
# admin向neutron用户添加角色
openstack role add --project service --user neutron admin
# 创建neutron服务实体
openstack service create --name neutron \
  --description "OpenStack Networking" network
# 创建网络服务API端点
openstack endpoint create --region RegionOne \
  network public http://controller:9696
openstack endpoint create --region RegionOne \
  network internal http://controller:9696
openstack endpoint create --region RegionOne \
  network admin http://controller:9696
  
# 公网配置
apt install neutron-server neutron-plugin-ml2 \
  neutron-linuxbridge-agent neutron-dhcp-agent \
  neutron-metadata-agent
# 编辑/etc/neutron/neutron.conf
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
grep -Ev '^$|#' /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf
vi /etc/neutron/neutron.conf
# 添加以下内容
[database]
# ...
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron

[DEFAULT]
# ...
core_plugin = ml2
service_plugins =

[DEFAULT]
# ...
transport_url = rabbit://openstack:RABBIT_PASS@controller

[DEFAULT]
# ...
auth_strategy = keystone

[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS

[DEFAULT]
# ...
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[nova]
# ...
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = NOVA_PASS

[oslo_concurrency]
# ...
lock_path = /var/lib/neutron/tmp

# 编辑/etc/neutron/plugins/ml2/ml2_conf.ini
cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
grep -Ev '^$|#' /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini
vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
# ...
type_drivers = flat,vlan
[ml2]
# ...
tenant_network_types =
[ml2]
# ...
mechanism_drivers = linuxbridge
[ml2]
# ...
extension_drivers = port_security
[ml2_type_flat]
# ...
flat_networks = provider
[securitygroup]
# ...
enable_ipset = true

# 编辑/etc/neutron/plugins/ml2/linuxbridge_agent.ini
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
grep -Ev '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[linux_bridge]
physical_interface_mappings = provider:PROVIDER_INTERFACE_NAME
[vxlan]
enable_vxlan = false
[securitygroup]:
# ...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# 验证
net.bridge.bridge-nf-call-iptables
net.bridge.bridge-nf-call-ip6tables
# 编辑/etc/neutron/dhcp_agent.ini
[DEFAULT]
# ...
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

# 配置元数据代理
# 编辑/etc/neutron/metadata_agent.ini
cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak
grep -Ev '^$|#' /etc/neutron/metadata_agent.ini.bak > /etc/neutron/metadata_agent.ini
vi /etc/neutron/metadata_agent.ini

[DEFAULT]
# ...
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET

# 同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

# 重新启动Compute API服务
service nova-api restart

# 重新启动网络服务
service neutron-server restart
service neutron-linuxbridge-agent restart
service neutron-dhcp-agent restart
service neutron-metadata-agent restart


# 计算节点
apt install neutron-linuxbridge-agent
# 编辑/etc/neutron/neutron.conf
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
grep -Ev '^$|#' /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf
vi /etc/neutron/neutron.conf

[DEFAULT]
# ...
transport_url = rabbit://openstack:RABBIT_PASS@controller
[DEFAULT]
# ...
auth_strategy = keystone

[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS

[oslo_concurrency]
# ...
lock_path = /var/lib/neutron/tmp

# 配置网桥代理
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
grep -Ev '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
vi /etc/neutron/plugins/ml2/linuxbridge_agent.ini
# 添加内容
[linux_bridge]
physical_interface_mappings = ens33
[vxlan]
enable_vxlan = false
[securitygroup]
# ...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# 验证
net.bridge.bridge-nf-call-iptables
net.bridge.bridge-nf-call-ip6tables
# 列出已加载的扩展,以验证该neutron-server过程是否成功启动
openstack extension list --network
# 列出成功启动的代理列表
openstack network agent list

安装UI服务

apt install openstack-dashboard
# 编辑 /etc/openstack-dashboard/local_settings.py
OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = ['one.example.com', 'two.example.com']
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller:11211',
    }
}
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_NEUTRON_NETWORK = {
    ...
    'enable_router': False,
    'enable_quotas': False,
    'enable_ipv6': False,
    'enable_distributed_router': False,
    'enable_ha_router': False,
    'enable_lb': False,
    'enable_firewall': False,
    'enable_vpn': False,
    'enable_fip_topology_check': False,
}
TIME_ZONE = "Asia/Shanghai"
# 重新加载Web服务器配置
service apache2 reload
# 验证
curl http://controller/horizon

安装 cinder

# 创库建表授权
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
  IDENTIFIED BY 'CINDER_DBPASS';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
  IDENTIFIED BY 'CINDER_DBPASS';
# admin管理员环境
. admin-openrc
# 创建一个cinder用户
openstack user create --domain default --password-prompt cinder
# admin向cinder用户添加角色
openstack role add --project service --user cinder admin
# 创建cinderv2和cinderv3服务实体
openstack service create --name cinderv2 \
  --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 \
  --description "OpenStack Block Storage" volumev3
# 创建块存储服务API端点
openstack endpoint create --region RegionOne \
  volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne \
  volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne \
  volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne \
  volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne \
  volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne \
  volumev3 admin http://controller:8776/v3/%\(project_id\)s
# 安装组件
apt install cinder-api cinder-scheduler
# 编辑/etc/cinder/cinder.conf
cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
grep -Ev '^$|#' /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf
vi /etc/cinder/cinder.conf
# 添加内容
[database]
# ...
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[DEFAULT]
# ...
transport_url = rabbit://openstack:RABBIT_PASS@controller
[DEFAULT]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[DEFAULT]
# ...
my_ip = 10.0.0.11
[oslo_concurrency]
# ...
lock_path = /var/lib/cinder/tmp
# 同步数据库
su -s /bin/sh -c "cinder-manage db sync" cinder
# 配置计算服务以使用块存储
# 编辑/etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
# 重启服务
service nova-api restart
service cinder-scheduler restart
service apache2 restart

安装和配置存储节点

apt install lvm2 thin-provisioning-tools
# 添加两块硬盘
# 扫描
echo '---' > /sys/class/scsi_host/host0/scan
# 创建LVM物理卷/dev/sdb
pvcreate /dev/sdb
pvcreate /dev/sdc
# 创建LVM卷组cinder-volumes
vgcreate cinder-volumes /dev/sdb
vgcreate cinder-ssd /dev/sdc
# 编辑 /etc/lvm/lvm.conf
cp /etc/lvm/lvm.conf /etc/lvm/lvm.conf.bak
grep -Ev '^$|#' /etc/lvm/lvm.conf.bak > /etc/lvm/lvm.conf
vi /etc/lvm/lvm.conf
devices {
...
filter = [ "a/sdb/", "r/.*/"]
# 如果存储节点在操作系统磁盘上使用LVM,则还必须将关联的设备添加到过滤器中。例如,如果/dev/sda设备包含操作系统
filter = [ "a/sda/", "a/sdb/", "r/.*/"]
# 如果您的计算节点在操作系统磁盘上使用LVM,则还必须/etc/lvm/lvm.conf在这些节点上的文件中修改过滤器, 使其仅包括操作系统磁盘。例如,如果/dev/sda 设备包含操作系统
filter = [ "a/sda/", "r/.*/"]
# 安装和配置的部件
apt install cinder-volume
# 编辑/etc/cinder/cinder.conf
[database]
# ...
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[DEFAULT]
# ...
transport_url = rabbit://openstack:RABBIT_PASS@controller
[DEFAULT]
# ...
auth_strategy = keystone

[keystone_authtoken]
# ...
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[DEFAULT]
# ...
my_ip = 10.0.0.22
[lvm]
# ...
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = tgtadm
[DEFAULT]
# ...
enabled_backends = lvm
[DEFAULT]
# ...
glance_api_servers = http://controller:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/cinder/tmp
# 重新启动块存储卷服务:w
service tgt restart
service cinder-volume restart
# 验证
openstack volume service list
  • 1
    点赞
  • 23
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值