环境说明
192.168.3.241 controller = 控制节点
192.168.3.170 compute == 计算节点/块节点/对象节点、
192.168.3.171 block == 计算节点/块节点/对象节点
准备工作:
1.根据"环境说明"配置hosts和hostname
2.修订yum源(阿里,163的都可以)
3.保证可以访问互联网
部署工作:
A.所有节点需要操作
1.关闭该服务,否者yum clean会卡死,属于系统bug(centos7.2版本左右的)
systemctl stop initial-setup-text
2.防火墙操作
systemctl stop firewalld.service
systemctl disable firewalld.service
systemctl status firewalld.service
3.关闭selinux安全服务
setenforce 0
getenforce
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
grep SELINUX=disabled /etc/sysconfig/selinux
4.安装时间同步NTP服务
yum install chrony -y
vim /etc/chrony.conf
--参考网络配置
systemctl restart chronyd.service
systemctl status chronyd.service
systemctl enable chronyd.service
systemctl list-unit-files |grep chronyd.service
5.修订时区
timedatectl set-timezone Asia/Shanghai
chronyc sources
6.安装openstack-更新yum
yum install centos-release-openstack-rocky -y
yum clean all
yum makecache
7.安装客户端软件
yum install python-openstackclient openstack-selinux -y
B.控制节点安装服务
10.安装数据库
yum install mariadb mariadb-server python2-PyMySQL -y
10.1创建并且编辑文件
vi /etc/my.cnf.d/openstack.cnf
内容:
1.绑定地址
2.默认存储引擎
3.使用独享表空间模式,每一个表都会建一个表空间,都会有索引文件,
查索引快,共享表空间,共用一个表空间和索引,如果有损坏很难修复,比如说zabbix用到的数据库如果不使用的独享表空间,很难进行优化
4.
[mysqld]
bind-address = 192.168.3.241
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
10.2增加开机启动并启动服务
systemctl enable mariadb.service
systemctl start mariadb.service
systemctl list-unit-files |grep mariadb.service
10.3启动数据库服务的初始安全设置(root/wtoe@123456)
mysql_secure_installation
11.安装消息队列服务rabbitMQ
11.1 命令
yum install rabbitmq-server -y
11.2 添加开机启动和服务
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
11.3 增加访问rabbitMQ的用户和权限
rabbitmqctl add_user openstack wtoe@123456
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
rabbitmqctl set_permissions -p "/" openstack ".*" ".*" ".*"
11.4 启动web管理
rabbitmq-plugins list
rabbitmq-plugins enable rabbitmq_management
systemctl restart rabbitmq-server.service
rabbitmq-plugins list
访问地址:http://192.168.1.241:15672
#默认用户名密码都是guest
12.安装缓存/NoSql服务
yum install memcached python-memcached -y
编辑配置文件
vi /etc/sysconfig/memcached
修订最后一行
OPTIONS="-l 127.0.0.1,::1" 为
OPTIONS="-l 127.0.0.1,::1,controller"
# 如果没有启用IPv6地址需要删掉::1的地址绑定
增加开机启动和服务启动
systemctl enable memcached.service
systemctl start memcached.service
13.安装etcd--服务发现系统
yum install etcd -y
编辑配置文件
vi /etc/etcd/etcd.conf
修订内容
# 注意上面的IP地址不能用controller替代,无法解析
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.3.241:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.3.241:2379"
ETCD_NAME="controller"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.3.241:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.3.241:2379"
ETCD_INITIAL_CLUSTER="controller=http://192.168.3.241:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
启动服务
systemctl enable etcd
systemctl start etcd
######################################keystone认证服务安装####################
14.在控制节点上安装keystone认证服务
先增加root针对所有数据库的权限
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'wtoe@123456';
14.1 进入数据库
mysql -u root -p
#创建数据库
CREATE DATABASE keystone;
#增加用户配置权限
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'wtoe@123456';
flush privileges;
show databases;
select user,host from mysql.user;
exit
14.2 在控制节点安装keystone相关软件包
yum install openstack-keystone httpd mod_wsgi -y
yum install openstack-keystone python-keystoneclient openstack-utils -y
14.3 快速修改keystone配置--非官网,需要openstack-utils支持
openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:wtoe@123456@controller/keystone
openstack-config --set /etc/keystone/keystone.conf token provider fernet
# 注意:keystone不需要连接rabbitmq
# 查看生效的配置
egrep -v "^#|^$" /etc/keystone/keystone.conf
grep '^[a-z]' /etc/keystone/keystone.conf
# keystone不需要启动,通过http服务进行调用
14.4 初始化同步keystone数据库(44张)
su -s /bin/sh -c "keystone-manage db_sync" keystone
# 同步完成进行连接测试 保证所有需要的表已经建立,否则后面可能无法进行下去
mysql -h192.168.3.241 -ukeystone -pwtoe@123456 -e "use keystone;show tables;"
15.初始化Fernet令牌库
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
16.配置启动Apache(httpd)
16.1 修改httpd主配置文件
vim /etc/httpd/conf/httpd.conf +95
#修订如下
ServerName controller
#检查
cat /etc/httpd/conf/httpd.conf |grep ServerName
#配置虚拟主机
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
#启动服务并增加启动项
systemctl enable httpd.service
systemctl start httpd.service
systemctl list-unit-files |grep httpd.service
#检查
netstat -anptl|grep httpd
# 如果http起不来,需要关闭 selinux 或者安装 yum install openstack-selinux
17.创建 keystone 用户,初始化的服务实体和API端点
# 创建keystone服务实体和身份认证服务,以下三种类型分别为公共的、内部的、管理的。
keystone-manage bootstrap --bootstrap-password wtoe@123456 \
--bootstrap-admin-url http://controller:5000/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne
#????????????此处密码首次为ADMIN_PASS,后来通过命令修订为,是可行
keystone-manage bootstrap --bootstrap-password wtoe@123456
# 运行这条命令,会在keystone数据库执增加以下任务,之前的版本需要手动创建:
1)在endpoint表增加3个服务实体的API端点
2)在local_user表中创建admin用户
3)在project表中创建admin和Default项目(默认域)
4)在role表创建3种角色,admin,member和reader
5)在service表中创建identity服务
配置admin的系统环境变量
export OS_USERNAME=admin
export OS_PASSWORD=wtoe@123456
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
#查看配置
env |grep OS_
# 删除endpoint
# 以前的版本单独创建endpoint可能会出错需要删除,新版本已经优化好,只要系统配置没问题,会自动生成一般也不会出错
openstack endpoint delete [ID]
18.创建keystone的一般实例
# 以下命令会在project表中创建名为example的项目
openstack domain create --description "An Example Domain" example
# 为keystone系统环境创建名为service的项目提供服务
# 用于常规(非管理)任务,需要使用无特权用户
# 以下命令会在project表中创建名为service的项目
openstack project create --domain default --description "Service Project" service
#创建myproject项目和对应的用户及角色
# 作为一般用户(非管理员)的项目,为普通用户提供服务
# 以下命令会在project表中创建名为myproject项目
openstack project create --domain default --description "Demo Project" myproject
#在默认域创建myuser用户
# 使用--password选项为直接配置明文密码,使用--password-prompt选项为交互式输入密码
# 以下命令会在local_user表增加myuser用户
openstack user create --domain default --password-prompt myuser
#密码 wtoe@123456
## openstack user create --domain default --password=myuser wtoe@123456
#在role表创建myrole角色
openstack role create myrole
#将myrole角色添加到myproject项目中和myuser用户组中
openstack role add --project myproject --user myuser myrole
#验证操作keystone是否安装成功
1)去除环境变量
# 关闭临时认证令牌机制,获取 token,验证keystone配置成功
2)作为管理员用户去请求一个认证的token
# 测试是否可以使用admin账户进行登陆认证,请求认证令牌
openstack --os-auth-url http://controller:5000/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name admin --os-username admin token issue
3)使用普通用户获取认证token
# 以下命令使用”myuser“用户的密码和API端口5000,只允许对身份认证服务API的常规(非管理)访问。
#创建OpenStack客户端环境脚本
# 上面使用环境变量和命令选项的组合通过“openstack”客户端与身份认证服务交互。
# 为了提升客户端操作的效率,OpenStack支持简单的客户端环境变量脚本即OpenRC 文件,我这里使用自定义的文件名
# admin的系统环境变量 vi admin-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=wtoe@123456
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
#myuser的 vi myuser-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=wtoe@123456
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
#验证
source admin-openrc
openstack token issue
source myuser-openrc
openstack token issue
#####################安装镜像服务##############################
1.创建库
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'wtoe@123456';
flush privileges;
exit
2.在keystone上面注册glance
1)在keystone上创建glance用户
# 以下命令在local_user表创建glance用户
openstack user create --domain default --password=wtoe@123456 glance
openstack user list
2)在keystone上将glance用户添加为service项目的admin角色(权限)
openstack role add --project service --user glance admin
3)创建glance镜像服务的实体
# 以下命令在service表中增加glance项目
openstack service create --name glance --description "OpenStack Image" image
openstack service list
4)创建镜像服务的 API 端点(endpoint)
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
# 至此,glance在keystone上面注册完成,可以进行安装
5.安装glance相关软件
2) 安装glance软件
yum install openstack-glance python-glance python-glanceclient -y
3) 执行以下命令可以快速配置glance-api.conf
openstack-config --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:wtoe@123456@controller/glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken www_authenticate_uri http://controller:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://controller:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name Default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name Default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken password wtoe@123456
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
openstack-config --set /etc/glance/glance-api.conf glance_store stores file,http
openstack-config --set /etc/glance/glance-api.conf glance_store default_store file
openstack-config --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/
4)执行以下命令可以快速配置glance-registry.conf
openstack-config --set /etc/glance/glance-registry.conf database connection mysql+pymysql://glance:wtoe@123456@controller/glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken www_authenticate_uri http://controller:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_url http://controller:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_domain_name Default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken user_domain_name Default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken password wtoe@123456
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
# 至此,glance服务安装完毕,该服务需要启动
6.同步glance数据库
1)为glance镜像服务初始化同步数据库
# 生成的相关表(15张表)
su -s /bin/sh -c "glance-manage db_sync" glance
2)同步完成进行连接测试
# 保证所有需要的表已经建立,否则后面可能无法进行下去
mysql -h192.168.3.241 -uglance -pwtoe@123456 -e "use glance;show tables;"
7.启动glance镜像服务
1)启动glance镜像服务、并配置开机自启动
systemctl start openstack-glance-api.service openstack-glance-registry.service
systemctl status openstack-glance-api.service openstack-glance-registry.service
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl list-unit-files |grep openstack-glance*
2)其他命令:重启,停止
systemctl restart openstack-glance-api.service openstack-glance-registry.
8.检查确认glance安装正确
1)下载镜像
http://download.cirros-cloud.net/ #可手动
cd /home
wget http://download.cirros-cloud.net/0.3.5/cirros-d190515-x86_64-disk.img
2)获取管理员权限
. admin-openrc
3)上传镜像到glance
openstack image create "cirros" --file cirros-d190515-x86_64-disk.img --disk-format qcow2 --container-format bare --public
openstack image create "CentOS7" --file CentOS-7-x86_64-GenericCloud-1907.qcow2 --disk-format qcow2 --container-format bare --public
4)查看镜像是否上传成功
openstack image list
###############################nova计算服务#############################
1.创建数据和用户
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'wtoe@123456';
flush privileges;
show databases;
select user,host from mysql.user;
exit
2.在keystone上面注册nova服务
# 创建服务证书
1)在keystone上创建nova用户
. admin-openrc
openstack user create --domain default --password=wtoe@123456 nova
2)在keystone上将nova用户配置为admin角色并添加进service项目
openstack role add --project service --user nova admin
3)创建nova计算服务的实体
openstack service create --name nova --description "OpenStack Compute" compute
4)创建计算服务的API端点(endpoint)
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
openstack endpoint list
5)这个版本的nova增加了placement项目
# 同样,创建并注册该项目的服务证书
openstack user create --domain default --password=wtoe@123456 placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
# 创建placement项目的endpoint(API端口)
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
openstack endpoint list
# 完毕
4.3.在控制节点安装nova相关服务
1)安装nova相关软件包
yum install openstack-nova-api openstack-nova-conductor \
openstack-nova-console openstack-nova-novncproxy \
openstack-nova-scheduler openstack-nova-placement-api -y
2)快速修改nova配置
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.3.241
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron true
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:wtoe@123456@controller
openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:wtoe@123456@controller/nova_api
openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:wtoe@123456@controller/nova
openstack-config --set /etc/nova/nova.conf placement_database connection mysql+pymysql://placement:wtoe@123456@controller/placement
openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:5000/v3
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password wtoe@123456
openstack-config --set /etc/nova/nova.conf vnc enabled true
openstack-config --set /etc/nova/nova.conf vnc server_listen '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc server_proxyclient_address '$my_ip'
openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf placement region_name RegionOne
openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
openstack-config --set /etc/nova/nova.conf placement project_name service
openstack-config --set /etc/nova/nova.conf placement auth_type password
openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:5000/v3
openstack-config --set /etc/nova/nova.conf placement username placement
openstack-config --set /etc/nova/nova.conf placement password wtoe@123456
openstack-config --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300
# 服务端的计算节点多久去检查一次新加入的host主机信息,可以自动将安装好的计算节点主机加入集群 300
#查看配置
grep '^[a-z]' /etc/nova/nova.conf
egrep -v "^#|^$" /etc/nova/nova.conf
# 默认情况下,计算服务使用内置的防火墙服务。由于网络服务包含了防火墙服务,必须使用``nova.virt.firewall.NoopFirewallDriver``防火墙服务来禁用掉计算服务内置的防火墙服务
3)修改nova的虚拟主机配置文件
# 由于有个包的bug需要配置修改文件,需要修改nova虚拟主机配置文件,增加内容,完整的文件内容如下:
vim /etc/httpd/conf.d/00-nova-placement-api.conf
最后增加内容
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
# 修改完毕重启httpd服务
systemctl restart httpd
systemctl status httpd
systemctl status httpd
# 至此,nova计算服务的软件包安装完成
4.4.同步nova数据(注意同步顺序)
# nova_api有32张表,placement有32张表,nova_cell0有110张表,nova也有110张表
1)初始化nova-api和placement数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
mysql -h192.168.3.241 -unova -pwtoe@123456 -e "use nova_api;show tables;"
mysql -h192.168.3.241 -uplacement -pwtoe@123456 -e "use placement;show tables;"
# 通过对比可知,nova_api和placement都有32张表,区别在于nova_api数据库的cell_mappings表多两条数据,存放的是nova和rabbitmq等的配置信息
2)初始化nova_cell0和nova数据库
# 注册cell0数据库
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
# 创建cell1单元
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
# 初始化nova数据库
su -s /bin/sh -c "nova-manage db sync" nova
# 检查确认cell0和cell1注册成功
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
#验证数据库
mysql -h192.168.3.241 -unova -pwtoe@123456 -e "use nova_cell0;show tables;"
mysql -h192.168.3.241 -unova -pwtoe@123456 -e "use nova;show tables;"
# 通过对比可知,这两个数据库的表目前完全一样,区别在于nova数据库的service表中有4条数据,存放的是当前版本nova相关服务的注册信息
5)检查确认cell0和cell1注册成功
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
# 返回的数据存储在nova_api数据库的cell_mappings表中
4.5.启动nova服务
1)启动nova服务并设置为开机自启动
# 需要启动5个服务
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service \
openstack-nova-scheduler.service openstack-nova-conductor.service \
openstack-nova-novncproxy.service
systemctl status openstack-nova-api.service openstack-nova-consoleauth.service \
openstack-nova-scheduler.service openstack-nova-conductor.service \
openstack-nova-novncproxy.service
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service \
openstack-nova-scheduler.service openstack-nova-conductor.service \
openstack-nova-novncproxy.service
systemctl list-unit-files |grep openstack-nova* |grep enabled
#控制节点配置完成
##################################配置计算节点######################################
5.1.配置域名解析
1)配置主机名
# 主机名设置好就不能修改,否则会出问题,控制节点和计算节点配置相同,且都需要配置
echo "compute"> /etc/hostname
cat /etc/hostname
2)配置主机名解析
vim /etc/hosts
5.2.关闭防火墙和selinux
1)关闭iptables
systemctl stop firewalld.service
systemctl disable firewalld.service
systemctl status firewalld.service
2)关闭 selinux
setenforce 0
getenforce
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
grep SELINUX=disabled /etc/sysconfig/selinux
5.3.配置时间同步
1)在在计算节点配置时间同步服务
# 安装时间同步的软件包
yum install chrony -y
2)编辑配置文件确认有以下配置
vim /etc/chrony.conf
# 修改引用控制节点openstack01的IP
server 192.168.3.241 iburst
3)重启chronyd服务,并配置开机自启动
systemctl restart chronyd.service
systemctl status chronyd.service
systemctl enable chronyd.service
systemctl list-unit-files |grep chronyd.service
4)设置时区,首次同步时间
timedatectl set-timezone Asia/Shanghai
chronyc sources
timedatectl status
# 至此,时间同步配置完成
5.4 安装基础环境
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
mv /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel.repo.backup
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum install centos-release-openstack-rocky -y
yum clean all
yum makecache
5.5.安装nova计算节点相关软件包
1)计算节点安装nova软件包
yum install openstack-nova-compute python-openstackclient openstack-utils -y
2)快速修改配置文件(/etc/nova/nova.conf)
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.3.170
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:wtoe@123456@controller
openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:5000/v3
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password wtoe@123456
openstack-config --set /etc/nova/nova.conf vnc enabled True
openstack-config --set /etc/nova/nova.conf vnc server_listen 0.0.0.0
openstack-config --set /etc/nova/nova.conf vnc server_proxyclient_address '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html
openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf placement region_name RegionOne
openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
openstack-config --set /etc/nova/nova.conf placement project_name service
openstack-config --set /etc/nova/nova.conf placement auth_type password
openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:5000/v3
openstack-config --set /etc/nova/nova.conf placement username placement
openstack-config --set /etc/nova/nova.conf placement password wtoe@123456
#检查修订有效性
egrep -v "^#|^$" /etc/nova/nova.conf
3)配置虚拟机的硬件加速
# 首先确定您的计算节点是否支持虚拟机的硬件加速。
egrep -c '(vmx|svm)' /proc/cpuinfo
# 如果返回位0,表示计算节点不支持硬件加速,需要配置libvirt使用QEMU方式管理虚拟机,使用以下命令:
openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
egrep -v "^#|^$" /etc/nova/nova.conf|grep 'virt_type'
# 如果返回为其他值,表示计算节点支持硬件加速且不需要额外的配置,使用以下命令:
openstack-config --set /etc/nova/nova.conf libvirt virt_type kvm
4)启动nova相关服务,并配置为开机自启动
# 需要启动2个服务
systemctl start libvirtd.service openstack-nova-compute.service
systemctl status libvirtd.service openstack-nova-compute.service
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl list-unit-files |grep libvirtd.service
systemctl list-unit-files |grep openstack-nova-compute.service
5)将计算节点增加到cell数据库
# 以下命令在控制节点操作:
. admin-openrc
# 检查确认数据库有新的计算节点
openstack compute service list --service nova-compute
#如果没有,则需要手动增加 ---手动将新的计算节点添加到openstack集群 ??????????????[多久会自动刷新]
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
# 设置新创建节点自动注册的任务(已经添加到配置文件中)
[scheduler]
discover_hosts_in_cells_interval = 300
# 至此,计算节点安装完毕,接下来需要进行测试,检查nova节点的状态
5.6.在控制节点进行验证
1)应用管理员环境变量脚本
. admin-openrc
2)列表查看安装的nova服务组件
# 验证是否成功注册并启动了每个进程
openstack compute service list
3)在身份认证服务中列出API端点以验证其连接性
openstack catalog list
4)在镜像服务中列出已有镜像已检查镜像服务的连接性
openstack image list
5)检查nova各组件的状态
# 检查placement API和cell服务是否正常工作
nova-status upgrade check
# 至此,nova计算节点,安装完毕并添加到openstack集群中
################################Neutron安装#######################################
6.1.主机网络配置及测试
1)控制节点配置
vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.3.241 controller
192.168.3.170 compute block object
2)计算节点配置
vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.3.241 controller
192.168.3.170 compute block object
3)块存储节点配置
vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.3.241 controller
192.168.3.170 compute block object
4)检测各节点到控制节点和公网的联通性
ping -c 4 controller
6.2.在keystone数据库中注册neutron相关服务
1)创建neutron数据库,授予合适的访问权限
mysql -u root -p
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'wtoe@123456';
exit
2)在keystone上创建neutron用户
openstack user create --domain default --password=wtoe@123456 neutron
openstack user list
# ok
3)将neutron添加到service项目并授予admin角色
# 以下命令无输出
openstack role add --project service --user neutron admin
4)创建neutron服务实体
openstack service create --name neutron --description "OpenStack Networking" network
openstack service list
# ok
5)创建neutron网络服务的API端点(endpoint)
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
openstack endpoint list
6.3.在控制节点安装neutron网络组件
# 关于neutron的网络提供了两种方式:
以下为第一种Networking Option 1: Provider networks
1)安装neutron软件包
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
2)快速配置/etc/neutron/neutron.conf
openstack-config --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:wtoe@123456@controller/neutron
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:wtoe@123456@controller
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password wtoe@123456
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes True
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes True
openstack-config --set /etc/neutron/neutron.conf nova auth_url http://controller:5000
openstack-config --set /etc/neutron/neutron.conf nova auth_type password
openstack-config --set /etc/neutron/neutron.conf nova project_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova user_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova region_name RegionOne
openstack-config --set /etc/neutron/neutron.conf nova project_name service
openstack-config --set /etc/neutron/neutron.conf nova username nova
openstack-config --set /etc/neutron/neutron.conf nova password wtoe@123456
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
#检查修订有效性
egrep -v "^#|^$" /etc/neutron/neutron.conf
3)快速配置/etc/neutron/plugins/ml2/ml2_conf.ini
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks provider
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset True
#检查修订有效性
egrep -v "^#|^$" /etc/neutron/plugins/ml2/ml2_conf.ini
4)快速配置/etc/neutron/plugins/ml2/linuxbridge_agent.ini
#第一条命令的网卡名称要根据特定进行修订
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:em1
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan False
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
#检查修订有效性
egrep -v "^#|^$" /etc/neutron/plugins/ml2/linuxbridge_agent.ini
# 以下参数在启动neutron-linuxbridge-agent.service的时候会自动设置为1 ??????????????????
sysctl net.bridge.bridge-nf-call-iptables
sysctl net.bridge.bridge-nf-call-ip6tables
5)快速配置/etc/neutron/dhcp_agent.ini
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True
# 查看生效的配置
egrep -v '(^$|^#)' /etc/neutron/dhcp_agent.ini
# 至此,方式1的配置文件修改完毕
############################以下内容是两种模式都需要配置的#####################
6)快速配置/etc/neutron/metadata_agent.ini
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host controller
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret wtoe@123456
# 查看生效的配置
egrep -v '(^$|^#)' /etc/neutron/metadata_agent.ini
# metadata_proxy_shared_secret选项是元数据代理,需要设置一个合适的密码这里设置为wtoe@123456
7)配置计算服务使用网络服务
# 快速配置/etc/nova/nova.conf,将neutron添加到计算节点中
openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:5000
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password wtoe@123456
openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy true
openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret wtoe@123456
# 查看生效的配置
egrep -v '(^$|^#)' /etc/nova/nova.conf
8)初始化安装网络插件
# 创建网络插件的链接,初始化网络的脚本插件会用到/etc/neutron/plugin.ini,需要使用ML2的插件进行提供
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
9)同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
# ok
10)重启nova_api服务
systemctl restart openstack-nova-api.service
11)启动neutron服务并设置开机启动
systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl list-unit-files |grep neutron* |grep enabled
# 至此,控制端的neutron网络服务就安装完成,之后需要在计算节点安装网络服务组件,使计算节点可以连接到openstack集群
###########################在计算节点安装neutron网络组件###############################
1)安装neutron组件
yum install openstack-neutron-linuxbridge ebtables ipset -y
2)快速配置/etc/neutron/neutron.conf
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:wtoe@123456@controller
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password wtoe@123456
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
# 查看生效的配置
egrep -v '(^$|^#)' /etc/neutron/neutron.conf
3)快速配置/etc/neutron/plugins/ml2/linuxbridge_agent.ini
#第一行配置要参照主机当前配置的网卡名称(p2p2)
# 注意:第一个选项physical_interface_mappings选项要配置计算节点自身的网卡名称provider:p2p2
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:p2p2
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan false
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# 查看生效的配置
egrep -v '(^$|^#)' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
4)配置nova计算服务与neutron网络服务协同工作
# 快速配置/etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:5000
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password wtoe@123456
# 查看生效的配置
egrep -v '(^$|^#)' /etc/nova/nova.conf
5)重启计算节点
systemctl restart openstack-nova-compute.service
systemctl status openstack-nova-compute.service
6)启动neutron网络组件,并配置开机自启动
# 需要启动1个服务,网桥代理
systemctl restart neutron-linuxbridge-agent.service
systemctl status neutron-linuxbridge-agent.service
systemctl enable neutron-linuxbridge-agent.service
systemctl list-unit-files |grep neutron* |grep enabled
# 至此,计算节点的网络配置完成,转回到控制节点进行验证操作
6.5.在控制节点检查确认neutron服务安装成功
# 以下命令在控制节点执行
1)获取管理权限
. admin-openrc
2)列表查看加载的网络插件
openstack extension list --network
# 或者使用另一种方法:显示简版信息
neutron ext-list
3)查看网络代理列表
openstack network agent list
# 正常情况下:控制节点有3个服务,计算节点有1个服务,如果不是,需要检查计算节点配置:网卡名称,IP地址,端口,密码等要素
###########################安装openvswitch网络组件###############################
1.yum install openstack-neutron openstack-neutron-openvswitch ebtables ipset -y (控制节点)
1.yum install openstack-neutron-openvswitch ebtables ipset -y (计算节点)
2.参考官网配置即可
systemctl enable neutron-server.service neutron-openvswitch-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
######################################## horizon(dashboard)web界面管理服务######################################
7.1.安装dashboard WEB控制台
1)安装dashboard软件包
yum install openstack-dashboard -y
2)修改配置文件/etc/openstack-dashboard/local_settings
# 检查确认有以下配置
vim /etc/openstack-dashboard/local_settings
ALLOWED_HOSTS = ['*', ]
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 2,
}
OPENSTACK_HOST = "controller"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "default"
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': False,
'enable_quotas': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_fip_topology_check': False,
'enable_lb': False,
'enable_firewall': False,
'enable_vpn': False,
}
TIME_ZONE = "Asia/Shanghai"
配置dashboard运行在192.168.3.241上(192.168.3.241为OS主机系统)
配置允许登陆dashboard的主机
配置memcached存储服务
启用第3版认证API
启用对域的支持
配置API版本
通过仪表盘创建用户时的默认域配置为 default
通过仪表盘创建的用户默认角色配置为 user
如果您选择网络参数1,禁用支持3层网络服务:
可以选择性地配置时区,不能用CST否则无法启动httpd服务
3)修改/etc/httpd/conf.d/openstack-dashboard.conf
# 增加以下内容
vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}
4)重启web服务器以及会话存储服务
systemctl restart httpd.service memcached.service
systemctl status httpd.service memcached.service
5)检查dashboard是否可用
# 在浏览器中输入下面的地址:域名用default
http://controller:80/dashboard
用户1:admin/wtoe@123456
用户2: myuser/wtoe@123456
#########################08.启动一个虚拟机实例###########################
8.1.创建provider提供者网络
1)在控制节点上,创建网络接口
# 加载 admin 凭证来获取管理员能执行的命令访问权限
source admin-openrc
openstack network create --share --external --provider-physical-network provider --provider-network-type flat provider
openstack network list
2)检查网络配置
# 确认ml2_conf.ini以下配置选项
# 上面的命令--provider-network-type flat网络名称provider与此对应
vim /etc/neutron/plugins/ml2/ml2_conf.ini
# 确认linuxbridge_agent.ini以下配置选项
# 上面的命令--provider-physical-network provider于此对应,网卡注意要于此对应,控制节点的网卡名称
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
3)创建provider子网[两个子网]
openstack subnet create --network provider --no-dhcp --allocation-pool start=192.168.1.2,end=192.168.1.254 --dns-nameserver 8.8.8.8 --gateway 192.168.1.1 --subnet-range 192.168.1.0/24 provider-subnet01
openstack subnet create --network provider --dhcp --subnet-range 192.168.2.0/24 provider-subnet02
openstack subnet list
=========================安装Cinder存储服务组件(控制节点)=============================
9.1.在控制节点安装cinder存储服务
1)创建cinder数据库
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'wtoe@123456';
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'wtoe@123456';
flush privileges;
show databases;
select user,host from mysql.user;
exit
2)在keystone上面注册cinder服务(创建服务证书)
# 在keystone上创建cinder用户
openstack user create --domain default --password=wtoe@123456 cinder
openstack user list
# 在keystone上将cinder用户配置为admin角色并添加进service项目,以下命令无输出
openstack role add --project service --user cinder admin
# 创建cinder服务的实体
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
openstack service list
# 创建cinder服务的API端点(endpoint)
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)s
openstack endpoint list
3)安装cinder相关软件包
yum install openstack-cinder -y
4)快速修改cinder配置
openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:wtoe@123456@controller/cinder
openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:wtoe@123456@controller
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken www_authenticate_uri http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password wtoe@123456
openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip 192.168.3.241
openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
# 检查生效的cinder配置
egrep -v "^#|^$" /etc/cinder/cinder.conf
grep '^[a-z]' /etc/cinder/cinder.conf
5)同步cinder数据库
su -s /bin/sh -c "cinder-manage db sync" cinder
# 验证数据库
mysql -h192.168.3.241 -ucinder -pwtoe@123456 -e "use cinder;show tables;"
6)修改nova配置文件
# 配置nova调用cinder服务
# 检查生效的nova配置
grep '^[a-z]' /etc/nova/nova.conf |grep os_region_name
7)重启nova-api服务
systemctl restart openstack-nova-api.service
systemctl status openstack-nova-api.service
8)启动cinder存储服务
# 需要启动2个服务
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl status openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl list-unit-files |grep openstack-cinder |grep enabled
# 至此,控制端的cinder服务安装完毕,在dashboard上面可以看到项目目录中多了一个卷服务
# 接下来安装块存储节点服务器storage node
9.2.在存储节点服务器安装cinder存储服务
# 存储节点建议单独部署服务器(最好是物理机),测试时也可以部署在控制节点或者计算节点
# 在本文,存储节点使用LVM逻辑卷提供服务,需要提供一块空的磁盘用以创建LVM逻辑卷
# 我这里在VMware虚拟机增加一块100GB的磁盘
1)安装LVM相关软件包
yum install lvm2 device-mapper-persistent-data -y
2)启动LVM的metadata服务并配置开机自启动
systemctl start lvm2-lvmetad.service
systemctl status lvm2-lvmetad.service
systemctl enable lvm2-lvmetad.service
systemctl list-unit-files |grep lvm2-lvmetad |grep enabled
3)创建LVM逻辑卷
# 检查磁盘状态
fdisk -l
# 创建LVM 物理卷 /dev/sdb
pvcreate /dev/sdb
# 创建 LVM 卷组 cinder-volumes,块存储服务会在这个卷组中创建逻辑卷
vgcreate cinder-volumes /dev/sdb
4)配置过滤器,防止系统出错
5)在存储节点安装配置cinder组件
yum install openstack-cinder targetcli python-keystone openstack-utils -y
openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:wtoe@123456@controller/cinder
openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:wtoe@123456@controller
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken www_authenticate_uri http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password wtoe@123456
openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip 192.168.3.171
openstack-config --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
openstack-config --set /etc/cinder/cinder.conf lvm volume_group cinder-volumes
openstack-config --set /etc/cinder/cinder.conf lvm iscsi_protocol iscsi
openstack-config --set /etc/cinder/cinder.conf lvm iscsi_helper lioadm
openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm
openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://controller:9292
openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
# 如果存储节点是双网卡,选项my_ip需要配置存储节点的管理IP,否则配置本机IP
# 检查生效的cinder配置
egrep -v "^#|^$" /etc/cinder/cinder.conf
grep '^[a-z]' /etc/cinder/cinder.conf
7)在存储节点启动cinder服务并配置开机自启动
# 需要启动2个服务
systemctl start openstack-cinder-volume.service target.service
systemctl status openstack-cinder-volume.service target.service
systemctl enable openstack-cinder-volume.service target.service
systemctl list-unit-files |grep openstack-cinder |grep enabled
systemctl list-unit-files |grep target.service |grep enabled
# 至此,在存储节点安装cinder服务就完成了
9.3.在控制节点进行验证
1)获取管理员变量
source admin-openrc
2)查看存储卷列表
openstack volume service list
# 返回以上信息,表示cinder相关节点安装完成