参考OpenStack 对接 Ceph_openstack 访问ceph界面_ball-4444的博客-CSDN博客
目录
对接前基础环境构建
创建资源池
[root@controller ~]# ceph osd pool create volumes 64
pool 'volumes' created
[root@controller ~]# ceph osd pool create vms 64
pool 'vms' created
[root@controller ~]# ceph osd pool create images 64
pool 'images' created
在控制节点创建 Ceph 客户端及存储池的鉴权,生成相应的 key 文件
[root@controller ceph]# pwd 进入这个目录
/etc/ceph
[root@controller ceph]# ceph auth get-or-create client.cinder \
mon 'allow r' \
osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' \
-o /etc/ceph/ceph.client.cinder.keyring
[root@controller ceph]# ceph auth get-or-create client.glance \
mon 'allow r' \
osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' \
-o /etc/ceph/ceph.client.glance.keyring
将生成的 key 文件拷贝到其他所有 OpenStack节点
[root@controller ceph]# scp ceph.client.glance.keyring compute1:/etc/ceph/
[root@controller ceph]# scp ceph.client.cinder.keyring compute1:/etc/ceph/
[root@controller ceph]# scp ceph.client.glance.keyring compute2:/etc/ceph/
[root@controller ceph]# scp ceph.client.cinder.keyring compute2:/etc/ceph/
[root@controller ceph]# scp ceph.client.admin.keyring compute1:/etc/ceph/
[root@controller ceph]# scp ceph.client.admin.keyring compute2:/etc/ceph/
[root@controller ceph]# scp /etc/ceph/ceph.conf compute1:/etc/ceph/ceph.conf
[root@controller ceph]# scp /etc/ceph/ceph.conf compute2:/etc/ceph/ceph.conf
任何进入控制节点修改以下文件的权限
[root@controller ceph]# chown glance:glance /etc/ceph/ceph.client.glance.keyring
[root@controller ceph]# chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
在compute1、compute2上进行如下操作
[root@compute1 ceph]# ceph auth get-key client.cinder | tee client.cinder.key
AQCmPppg9++EKxAAeFb5gd5GpabgThKcGwtP8w==
[root@compute1 ceph]# uuidgen 每一台的uuidgen都不一样注意
58de3642-3bc2-49a2-913b-678795339875
--------------
cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>58de3642-3bc2-49a2-913b-678795339875</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
[root@compute1 ceph]# virsh secret-define --file secret.xml
Secret 58de3642-3bc2-49a2-913b-678795339875 created
[root@compute1 ceph]# virsh secret-set-value --secret 58de3642-3bc2-49a2-913b-678795339875 --base64 $(cat client.cinder.key) && rm client.cinder.key secret.xml
Secret value set
rm: remove regular file ‘client.cinder.key’? y
rm: remove regular file ‘secret.xml’? y
注:两台操作一致,就是uuid注意区分以及改写
对接Glance镜像服务
控制节点修改如下文件
[root@controller ~]# vi /etc/glance/glance-api.conf
show_image_direct_url = false
stores=glance.store.filesystem.Store, glance.store.http.Store, glance.store.rbd.Store
default_store=rbd
rbd_store_user = glance
rbd_store_pool = images
flavor=keystone
重启glance服务
[root@controller ~]# systemctl restart openstack-glance*
验证是否对接成功
[root@controller ~]# source keystonerc_admin
[root@node6 ceph(keystone_admin)]# glance image-create --name cirros --disk-format qcow2 --container-format bare < /tmp/cirros-0.4.0-x86_64-disk.img
+------------------+----------------------------------------------------------------------------------+
| Property | Value |
+------------------+----------------------------------------------------------------------------------+
| checksum | 443b7623e27ecf03dc9e01ee93f67afe |
| container_format | bare |
| created_at | 2023-11-10T02:28:24Z |
| disk_format | qcow2 |
| id | c98f4ec6-9243-488b-92d4-2c47fad5f43c |
| min_disk | 0 |
| min_ram | 0 |
| name | cirros |
| os_hash_algo | sha512 |
| os_hash_value | 6513f21e44aa3da349f248188a44bc304a3653a04122d8fb4535423c8e1d14cd6a153f735bb0982e |
| | 2161b5b5186106570c17a9e58b64dd39390617cd5a350f78 |
| os_hidden | False |
| owner | ef1bac9acd6142d1ba2f1a41473f58d8 |
| protected | False |
| size | 12716032 |
| status | active |
| tags | [] |
| updated_at | 2023-11-10T02:28:26Z |
| virtual_size | Not available |
| visibility | shared |
+------------------+----------------------------------------------------------------------------------+
[root@node6 ceph(keystone_admin)]#
[root@ceph1 ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
60 GiB 57 GiB 3.1 GiB 5.14
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
volumes 12 709 B 0 18 GiB 5
vms 13 0 B 0 18 GiB 0
images 14 12 MiB 0.07 18 GiB 8
[root@ceph1 ~]#
对接Cinder卷服务
控制节点修改如下文件
[root@controller ~]# vi /etc/cinder/cinder.conf
enabled_backends=ceph,lvm
glance_api_version = 2 自行添加
在最后添加即可 复制即可
[ceph]
glance_api_version = 2
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = 58de3642-3bc2-49a2-913b-678795339875
uuid填compute1的uuid
重启cinder服务
[root@controller ~]# systemctl restart openstack-cinder*
验证是否对接成功
[root@controller ~]# source keystonerc_admin
[root@node7 ceph(keystone_admin)]# openstack volume type create ceph
+-------------+--------------------------------------+
| Field | Value |
+-------------+--------------------------------------+
| description | None |
| id | dfc4ebaf-4825-4a84-b3bc-73dcd5714fff |
| is_public | True |
| name | ceph |
+-------------+--------------------------------------+
[root@node7 ceph(keystone_admin)]# openstack volume create --type ceph --size 10 test
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| consistencygroup_id | None |
| created_at | 2023-10-27T06:48:16.000000 |
| description | None |
| encrypted | False |
| id | ae200d07-f2d2-408b-b3aa-8ec6913a9230 |
| migration_status | None |
| multiattach | False |
| name | test |
| properties | |
| replication_status | None |
| size | 10 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| type | ceph |
| updated_at | None |
| user_id | cbecce0ba0444faab1d5825f496f01da |
+---------------------+--------------------------------------+
[root@controller ~(keystone_admin)]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
220 GiB 217 GiB 3.0 GiB 1.39
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
volumes 1 709 B 0 66 GiB 5
vms 2 0 B 0 66 GiB 0
images 3 13 MiB 0.02 66 GiB 8
[root@ceph1 ~]# rbd list -p volumes
volume-ae200d07-f2d2-408b-b3aa-8ec6913a9230
对接Nova计算服务
在compute1、compute2节点修改如下文件
[root@compute1 ~]# vi /etc/nova/nova.conf
inject_password=False
inject_key=False
inject_partition=-2
disk_cachemodes = "network=writeback"
images_type=rbd
images_rbd_pool=vms
images_rbd_ceph_conf =/etc/ceph/ceph.conf
hw_disk_discard=unmap
rbd_user=cinder
rbd_secret_uuid=58de3642-3bc2-49a2-913b-678795339875
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
修改完成以后保存退出重启nova-compute服务即可
[root@compute1 nova]# systemctl restart openstack-nova*
任何将配置文件发送到compute2节点上即可
[root@compute1 nova]# scp /etc/nova/nova.conf compute2:/etc/nova/nova.conf
进入compute2节点进行如下修改
[root@compute2 ~]# vi /etc/nova/nova.conf
10982 vncserver_proxyclient_address=compute2 修改为compute2即可
[root@compute2 ~]# systemctl restart openstack-nova*
创建云主机验证
ceph集群查看
[root@ceph1 ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
60 GiB 57 GiB 3.3 GiB 5.47
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
volumes 12 709 B 0 18 GiB 5
vms 13 196 MiB 1.06 18 GiB 138
images 14 12 MiB 0.07 18 GiB 8
[root@ceph1 ~]#