环境信息
官方参考:
https://docs.ceph.com/docs/master/rbd/rbd-openstack/
https://docs.openstack.org/kolla-ansible/train/reference/storage/external-ceph-guide.html
环境准备:
- 准备 kolla ansible部署的单节点all-in-one openstack
- 准备外部ceph集群(当前使用的外部ceph为3节点集群)
- 其中192.168.93.30节点为kolla节点
配置ceph
以下操作在ceph节点执行。
ceph节点创建存储池
#创建存储池
ceph osd pool create volumes 64 64
ceph osd pool create images 64 64
ceph osd pool create backups 64 64
ceph osd pool create vms 64 64
#初始化存储池
rbd pool init volumes
rbd pool init images
rbd pool init backups
rbd pool init vms
#查看存储池
# ceph osd pool ls
volumes
images
backups
vms
ceph节点配置认证
如果开启了认证, 为Nova、Cinder和Glance创建新的用户
#创建glance用户及权限
ceph auth get-or-create client.glance mon 'profile rbd' osd 'profile rbd pool=images' mgr 'profile rbd pool=images' -o /etc/ceph/ceph.client.glance.keyring
#创建cinder用户及权限
ceph auth get-or-create client.cinder mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images' mgr 'profile rbd pool=volumes, profile rbd pool=vms' -o /etc/ceph/ceph.client.cinder.keyring
#创建cinder-backup用户及权限
ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups' mgr 'profile rbd pool=backups' -o /etc/ceph/ceph.client.cinder-backup.keyring
#创建nova用户及权限
ceph auth get-or-create client.nova mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images,profile rbd pool=backups' mgr 'profile rbd pool=volumes, profile rbd pool=vms' -o /etc/ceph/ceph.client.nova.keyring
安装ceph客户端
以下操作在openstack节点执行。
所有要访问ceph集群的节点安装ceph客户端工具,这里只有一个openstack节点
yum install -y python-rbd
yum install -y ceph-common
kolla-deploy节点创建相关配置文件目录
mkdir -p /etc/kolla/config/{glance,cinder,nova}
mkdir -p /etc/kolla/config/cinder/{cinder-volume,cinder-backup}
修改glance配置文件
以下操作在kolla deploy节点执行。
复制ceph key
scp 192.168.93.30:/etc/ceph/ceph.client.glance.keyring /etc/kolla/config/glance/
复制ceph配置文件
scp 192.168.93.30:/etc/ceph/ceph.conf /etc/kolla/config/glance/
对接glance服务,执行以下操作
cat > /etc/kolla/config/glance/glance-api.conf <<EOF
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
EOF
修改cinder配置文件
复制key,注意cinder-backup需要2个keyrings,以访问voluems以及备份pool。
scp 192.168.93.30:/etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-volume/
scp 192.168.93.30:/etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-backup/
scp 192.168.93.30:/etc/ceph/ceph.client.cinder-backup.keyring /etc/kolla/config/cinder/cinder-backup/
复制ceph.conf
scp 192.168.93.30:/etc/ceph/ceph.conf /etc/kolla/config/cinder/
创建cinder-volume.conf配置文件
cat > /etc/kolla/config/cinder/cinder-volume.conf <<EOF
[DEFAULT]
enabled_backends=rbd-1
[rbd-1]
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=cinder
backend_host=rbd:volumes
rbd_pool=volumes
volume_backend_name=rbd-1
volume_driver=cinder.volume.drivers.rbd.RBDDriver
rbd_secret_uuid = a0a12844-3910-452c-9808-5c82f0d6f67d
EOF
在/etc/kolla/passwords.yml中可以找到rbd_secret_uuid值,注意选择cinder_rbd_secret_uuid。
# cat /etc/kolla/passwords.yml | grep rbd_secret_uuid
cinder_rbd_secret_uuid: a0a12844-3910-452c-9808-5c82f0d6f67d
rbd_secret_uuid: bf77feeb-3d95-437c-a05e-71ba93676770
创建cinder-backup.conf配置文件
cat > /etc/kolla/config/cinder/cinder-backup.conf <<EOF
[DEFAULT]
backup_ceph_conf=/etc/ceph/ceph.conf
backup_ceph_user=cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool=backups
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
EOF
修改nova配置文件
复制key
scp 192.168.93.30:/etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/nova/
scp 192.168.93.30:/etc/ceph/ceph.client.nova.keyring /etc/kolla/config/nova/
复制ceph配置文件
scp 192.168.93.30:/etc/ceph/ceph.conf /etc/kolla/config/nova/
创建nova-compute.conf配置文件,为nova配置RBD后端
cat > /etc/kolla/config/nova/nova-compute.conf <<EOF
[libvirt]
images_rbd_pool=vms
images_type=rbd
images_rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=nova
rbd_secret_uuid: bf77feeb-3d95-437c-a05e-71ba93676770
EOF
在/etc/kolla/passwords.yml中可以找到rbd_secret_uuid值,注意选择rbd_secret_uuid。
# cat /etc/kolla/passwords.yml | grep rbd_secret_uuid
cinder_rbd_secret_uuid: a0a12844-3910-452c-9808-5c82f0d6f67d
rbd_secret_uuid: bf77feeb-3d95-437c-a05e-71ba93676770
最终/etc/kolla/config/配置信息
[root@kolla ~]# tree /etc/kolla/config/
/etc/kolla/config/
├── cinder
│ ├── ceph.conf
│ ├── cinder-backup
│ │ ├── ceph.client.cinder-backup.keyring
│ │ └── ceph.client.cinder.keyring
│ ├── cinder-backup.conf
│ ├── cinder-volume
│ │ └── ceph.client.cinder.keyring
│ └── cinder-volume.conf
├── glance
│ ├── ceph.client.glance.keyring
│ ├── ceph.conf
│ └── glance-api.conf
└── nova
├── ceph.client.cinder.keyring
├── ceph.client.nova.keyring
├── ceph.conf
└── nova-compute.conf
修改globals.yaml
编辑kolla globals.yml,由于对接外部ceph,不执行ceph部署,开启Cinder服务,并开启Glance、Cinder和Nova的后端Ceph功能:
cat >> /etc/kolla/globals.yml <<EOF
#version
kolla_base_distro: "centos"
kolla_install_type: "binary"
openstack_release: "train"
#vip
kolla_internal_vip_address: "192.168.93.100"
#docker registry
docker_registry: "registry.cn-shenzhen.aliyuncs.com"
docker_namespace: "kollaimage"
#network
network_interface: "ens33"
neutron_external_interface: "ens37"
neutron_plugin_agent: "openvswitch"
enable_neutron_provider_networks: "yes"
#storage
enable_cinder: "yes"
#virt_type
nova_compute_virt_type: "qemu"
#ceph
enable_ceph: "no"
glance_backend_ceph: "yes"
cinder_backend_ceph: "yes"
nova_backend_ceph: "yes"
gnocchi_backend_storage: "ceph"
enable_manila_backend_cephfs_native: "yes"
EOF
重新配置openstack
kolla-ansible -i all-in-one reconfigure
验证对接ceph
查看部署的volume服务概况
$ openstack volume service list
+------------------+-------------------+------+---------+-------+----------------------------+
| Binary | Host | Zone | Status | State | Updated At |
+------------------+-------------------+------+---------+-------+----------------------------+
| cinder-scheduler | kolla | nova | enabled | up | 2020-06-17T09:42:03.000000 |
| cinder-backup | kolla | nova | enabled | up | 2020-06-17T09:41:57.000000 |
| cinder-volume | rbd:volumes@rbd-1 | nova | enabled | up | 2020-06-17T09:42:01.000000 |
+------------------+-------------------+------+---------+-------+----------------------------+
查看初始的RBD存储池情况,全部是空的:
rbd -p images ls
rbd -p volumes ls
rbd -p vms ls
创建镜像
上传镜像然后查看新增的镜像信息:
# openstack image list
+--------------------------------------+----------------+--------+
| ID | Name | Status |
+--------------------------------------+----------------+--------+
| 0fec116d-ec19-4bca-ba71-bd0c40e4630c | cirros01 | active |
+--------------------------------------+----------------+--------+
查看image详细信息,properties的location参数可以看到使用rbd存储
$ openstack image show 0fec116d-ec19-4bca-ba71-bd0c40e4630c
+------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| checksum | 1d3062cd89af34e419f7100277f38b2b |
| container_format | bare |
| created_at | 2020-06-17T09:44:35Z |
| disk_format | qcow2 |
| file | /v2/images/0fec116d-ec19-4bca-ba71-bd0c40e4630c/file |
| id | 0fec116d-ec19-4bca-ba71-bd0c40e4630c |
| min_disk | 0 |
| min_ram | 0 |
| name | cirros01 |
| owner | 65850af146fe478ab13f59f7edf838ec |
| properties | locations='[{u'url': u'rbd://c64af733-b16a-4962-b613-d37faaab60fe/images/0fec116d-ec19-4bca-ba71-bd0c40e4630c/snap', u'metadata': {}}]', os_hash_algo='sha512', os_hash_value='553d220ed58cfee7dafe003c446a9f197ab5edf8ffc09396c74187cf83873c877e7ae041cb80f3b91489acf687183adcd689b53b38e3ddd22e627e7f98a09c46', os_hidden='False' |
| protected | False |
| schema | /v2/schemas/image |
| size | 16338944 |
| status | active |
| tags | |
| updated_at | 2020-06-17T09:44:36Z |
| virtual_size | None |
| visibility | shared |
+------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
查看RBD存储池的变化,可见镜像被存储在images存储池中
[root@ceph01 ~]# rbd -p images ls
0fec116d-ec19-4bca-ba71-bd0c40e4630c
查看存储池镜像详细信息,并且镜像有一个快照:
[root@ceph01 ~]# rbd -p images info 0fec116d-ec19-4bca-ba71-bd0c40e4630c
rbd image '0fec116d-ec19-4bca-ba71-bd0c40e4630c':
size 16 MiB in 2 objects
order 23 (8 MiB objects)
snapshot_count: 1
id: d9976b58fc3d
block_name_prefix: rbd_data.d9976b58fc3d
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Jun 17 17:44:35 2020
access_timestamp: Wed Jun 17 17:44:35 2020
modify_timestamp: Wed Jun 17 17:44:35 2020
[root@ceph01 ~]# rbd -p images snap list 0fec116d-ec19-4bca-ba71-bd0c40e4630c
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap 16 MiB yes Wed Jun 17 17:44:36 2020
创建实例
# openstack server list
+--------------------------------------+-------+--------+--------------------+--------+---------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+-------+--------+--------------------+--------+---------+
| 42dc6abb-b6b6-4f56-a8fd-d29d71c15057 | demo1 | ACTIVE | demo-net=10.0.0.12 | cirros | m1.tiny |
+--------------------------------------+-------+--------+--------------------+--------+---------+
注意创建实例时,选择不创建新卷
可见虚拟机在vms存储池中创建了一个卷:
[root@ceph01 ceph]# rbd -p vms ls
42dc6abb-b6b6-4f56-a8fd-d29d71c15057_disk
登录虚拟机所在节点,可以看到虚拟机的系统卷使用的是在vms中创建的这个卷,
[root@kolla ~]# docker exec -it nova_libvirt virsh list
Id Name State
----------------------------------------------------
1 instance-00000001 running
查看dumpxml
[root@kolla ~]# docker exec -it nova_libvirt virsh dumpxml 1 | grep "disk type" -A14
<disk type='network' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<auth username='nova'>
<secret type='ceph' uuid='bf77feeb-3d95-437c-a05e-71ba93676770'/>
</auth>
<source protocol='rbd' name='vms/42dc6abb-b6b6-4f56-a8fd-d29d71c15057_disk'>
<host name='192.168.93.60' port='6789'/>
<host name='192.168.93.61' port='6789'/>
<host name='192.168.93.62' port='6789'/>
</source>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</disk>
从进程参数可以看出qemu直接使用的是Ceph的librbd库访问的RBD块设备:
[root@kolla ~]# ps -aux | grep qemu
[root@kolla ~]# ldd /usr/libexec/qemu-kvm | grep -e ceph -e rbd
librbd.so.1 => /lib64/librbd.so.1 (0x00007f7cfaa3f000)
创建卷
$ openstack volume create --size 1 volume01
# openstack volume list
+--------------------------------------+----------+--------+------+--------------------------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+----------+--------+------+--------------------------------+
| 5e662290-f806-477a-8de1-d564904b7231 | volume01 | in-use | 1 | Attached to demo1 on /dev/vdb |
+--------------------------------------+----------+--------+------+--------------------------------+
查看存储池状态,可以看到新建的卷被放在volumes存储池:
[root@ceph01 ~]# rbd -p volumes ls
volume-5e662290-f806-477a-8de1-d564904b7231
创建卷备份
创建一个卷备份
# openstack volume backup create volume01
+-------+--------------------------------------+
| Field | Value |
+-------+--------------------------------------+
| id | c4eca260-81a6-4763-9234-0e193c157b7f |
| name | None |
+-------+--------------------------------------+
# openstack volume backup list
可以看到是创建在backups存储池中:
[root@ceph01 ~]# rbd -p backups ls
volume-5e662290-f806-477a-8de1-d564904b7231.backup.c4eca260-81a6-4763-9234-0e193c157b7f
[root@ceph01 ceph]# rbd -p backups snap list volume-5e662290-f806-477a-8de1-d564904b7231.backup.c4eca260-81a6-4763-9234-0e193c157b7f
SNAPID NAME SIZE PROTECTED TIMESTAMP
6 backup.c4eca260-81a6-4763-9234-0e193c157b7f.snap.1592556822.77 1 GiB Fri Jun 19 16:53:44 2020
连接卷,把新增的卷链接到之前创建的虚拟机中:
# openstack server add volume demo1 volume01
到虚拟机所在节点查看其libvirt上参数的变化,发现新增了一个RBD磁盘:
[root@kolla ~]# docker exec -it nova_libvirt virsh dumpxml 1 | grep "disk type" -A14
<disk type='network' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<auth username='nova'>
<secret type='ceph' uuid='bf77feeb-3d95-437c-a05e-71ba93676770'/>
</auth>
<source protocol='rbd' name='vms/42dc6abb-b6b6-4f56-a8fd-d29d71c15057_disk'>
<host name='192.168.93.60' port='6789'/>
<host name='192.168.93.61' port='6789'/>
<host name='192.168.93.62' port='6789'/>
</source>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='raw' cache='none' discard='unmap'/>
<auth username='cinder'>
<secret type='ceph' uuid='a0a12844-3910-452c-9808-5c82f0d6f67d'/>
</auth>
<source protocol='rbd' name='volumes/volume-5e662290-f806-477a-8de1-d564904b7231'>
<host name='192.168.93.60' port='6789'/>
<host name='192.168.93.61' port='6789'/>
<host name='192.168.93.62' port='6789'/>
</source>
<target dev='vdb' bus='virtio'/>
<serial>5e662290-f806-477a-8de1-d564904b7231</serial>
<alias name='virtio-disk1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</disk>
为虚拟机创建一个浮动IP,使用SSH登陆进去:
# openstack floating ip create public1
# openstack server add floating ip demo01 192.168.1.205
# openstack server list
+--------------------------------------+-------+--------+-----------------------------------+--------+---------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+-------+--------+-----------------------------------+--------+---------+
| 42dc6abb-b6b6-4f56-a8fd-d29d71c15057 | demo1 | ACTIVE | demo-net=10.0.0.12, 192.168.1.205 | cirros | m1.tiny |
+--------------------------------------+-------+--------+-----------------------------------+--------+---------+
#用户名"cirros",密码"gocubsgo"
$ ssh cirros@192.168.1.205
$ sudo passwd root
Changing password for root
New password:
Bad password: too weak
Retype password:
Password for root changed by root
$ su -
Password:
创建分区并写入测试文件,最后卸载分区:
# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 1G 0 disk
|-vda1 253:1 0 1015M 0 part /
`-vda15 253:15 0 8M 0 part
vdb 253:16 0 1G 0 disk
# mkfs.ext4 /dev/vdb
mke2fs 1.42.12 (29-Aug-2014)
Creating filesystem with 262144 4k blocks and 65536 inodes
Filesystem UUID: bb0e8ce5-ef15-49be-b8f8-97131e072a5e
Superblock backups stored on blocks:
32768, 98304, 163840, 229376
Allocating group tables: done
Writing inode tables: done
Creating journal (8192 blocks): done
Writing superblocks and filesystem accounting information: done
# mount /dev/vdb /mnt
# df -h
Filesystem Size Used Available Use% Mounted on
/dev 240.2M 0 240.2M 0% /dev
/dev/vda1 978.9M 24.0M 914.1M 3% /
tmpfs 244.2M 0 244.2M 0% /dev/shm
tmpfs 244.2M 92.0K 244.1M 0% /run
/dev/vdb 975.9M 1.3M 907.4M 0% /mnt
断开卷与实例的绑定
$ openstack server remove volume demo1 volume01