openstack 部署详情请参考:https://blog.csdn.net/qq_41786090/article/details/131301296?spm=1001.2014.3001.5501
ceph 部署详情请参考:https://blog.csdn.net/qq_41786090/article/details/131570549?spm=1001.2014.3001.5501
一.对接前基础环境构建
1.配置openstack为ceph客户端
在控制节点安装python-rbd和ceph软件包:
[root@controller01 ~]# yum install -y python-rbd ceph
在计算节点安装ceph:
[root@computer01 ~]# yum install -y ceph
2.创建osd存储池添加认证(ceph01)
[root@ceph01 ~]# ceph osd pool create volumes 128
[root@ceph01 ~]# ceph osd pool create vms 128
[root@ceph01 ~]# ceph osd pool create images 128
[root@ceph01 ~]# ceph osd pool application enable volumes rgw
[root@ceph01 ~]# ceph osd pool application enable images rgw
[root@ceph01 ~]# ceph osd pool application enable vms rgw
[root@ceph01 ~]# ceph osd pool set volumes pg_autoscale_mode on
[root@ceph01 ~]# ceph osd pool set images pg_autoscale_mode on
[root@ceph01 ~]# ceph osd pool set vms pg_autoscale_mode on
3.在ceph01上,生成相应的 key 文件
[root@ceph01 ~]#
pwd 进入这个目录
/etc/ceph[root@ceph01 ceph]# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' -o /etc/ceph/ceph.client.cinder.keyring
[root@ceph01 ceph]# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' -o /etc/ceph/ceph.client.glance.keyring
4.把生成key文件发送到openstack的所有节点
[root@ceph01 ceph]# scp ceph.client.glance.keyring controller01:/etc/ceph/
[root@ceph01 ceph]# scp ceph.client.cinder.keyring computer01:/etc/ceph/
[root@ceph01 ceph]# scp ceph.client.glance.keyring controller01:/etc/ceph/
[root@ceph01 ceph]# scp ceph.client.cinder.keyring computer01:/etc/ceph/
5.在控制节点修改文件权限(controller01)
[root@controller01 ~]# chown glance:glance /etc/ceph/ceph.client.glance.keyring
[root@controller01 ~]# chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
6.在计算节点修改文件权限
[root@computer01 ~]# chown nova:nova /etc/ceph/ceph.client.cinder.keyring
7.在计算节点上配置libvirt
[root@computer01 ceph]# ceph auth get-key client.cinder | tee client.cinder.key
AQCmPppg9++EKxAAeFb5gd5GpabgThKcGwtP8w==
[root@computer01 ceph]# uuidgen 每一台的uuidgen都不一样注意
3ede2ed5-a8fb-48f6-92ea-fb32db1dde1e
--------------
cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>3ede2ed5-a8fb-48f6-92ea-fb32db1dde1e</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
[root@computer01 ceph]# virsh secret-define --file secret.xml
[root@computer01 ceph]# virsh secret-set-value --secret 3ede2ed5-a8fb-48f6-92ea-fb32db1dde1e --base64 $(cat client.cinder.key) && rm client.cinder.key secret.xml
rm: remove regular file ‘client.cinder.key’? y
rm: remove regular file ‘secret.xml’? y
二.对接
1.对接glance服务(控制节点controller01)
[root@controller01 ~]# vim /etc/glance/glance-api.conf
[DEFAULT]
......
default_store = rbd
[glance_store]
stores = rbd
rbd_store_chunk_size = 8
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
重启glance服务 [root@controller01 ~]# systemctl restart openstack-glance*
2.对接cinder服务(控制节点controller01)
[root@controller01 ~]# vim /etc/cinder/cinder.conf
[DEFAULT]
......
enabled_backends = ceph
[ceph]
rbd_pool = volumes
rbd_user = cinder
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_secret_uuid = 3ede2ed5-a8fb-48f6-92ea-fb32db1dde1e
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
重启cinder服务 [root@controller01 ~]# systemctl restart openstack-cinder*
3.对接nova服务(计算节点computer01)
[root@computer01 ~]# vim/etc/nova/nova.conf
[libvirt]
virt_type=kvm
inject_password=false
inject_key=false
inject_partition=-2
disk_cachemodes = "network=writeback"
images_type=rbd
images_rbd_pool=vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
hw_disk_discard=unmap
rbd_user=cinder
rbd_secret_uuid=3ede2ed5-a8fb-48f6-92ea-fb32db1dde1e
重启服务[root@computer01 ~]# systemctl restart openstack-nova*