1 nova与glusterfs结合
在所有计算节点上,把创建成功的,glusterfs共享出来的volume,挂在到/var/lib/nova/instances目录:
mount -t glusterfs <本机IP>:/instances /var/lib/nova/instances
chown -R nova:nova /var/lib/nova/instances
2 glance与glusterfs结合
在控制节点上,把创建成功的,glusterfs共享出来的volume,挂在到/var/lib/glance/images目录:
mount -t glusterfs <本机IP>:/images /var/lib/glance/images
chown -R glance:glance /var/lib/glance/images
添加images、instances到fstab自动挂载
192.168.4.131:/images /var/lib/glance/images glusterfs defaults,_netdev,backupvolfile-server=controller2,backupvolfile-server=compute01 0 0
192.168.4.131:/instances /var/lib/nova/instances glusterfs defaults,_netdev,backupvolfile-server=controller2,backupvolfile-server=compute01 0 0
使用backupvolfile起到了高可用性,避免单点故障
3 cinder与glusterfs结合
3.1 cinder常用的三种后端
(1)本地创建逻辑卷lvm后端(2)glusterfs后端
(3)使用openstack中第三方驱动的IP-SAN,型号为IBM Storwize系列
cinder.conf配置如下:
[DEFAULT]
enabled_backends = lvm,glusterfs,ibm
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name=LVM
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm
[glusterfs]
volume_driver=cinder.volume.drivers.glusterfs.GlusterfsDriver
volume_backend_name=GlusterFS
glusterfs_shares_config=/etc/cinder/shares.conf
glusterfs_mount_point_base=/var/lib/cinder/glusterfs
[ibm]
volume_driver = cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver
san_ip = 172.28.21.10
san_login = superuser
san_password = 123456
storwize_svc_volpool_name = vtt1
storwize_svc_connection_protocol = iSCSI
volume_backend_name=IBM
3.2 GlusterFS
块存储服务中,GlusterFS Driver官方文档:
https://docs.openstack.org/liberty/config-reference/content/GlusterFS-driver.html
https://docs.openstack.org/ocata/config-reference/block-storage/drivers/glusterfs-driver.html
0 安装glusterfs服务器
在cinder节点192.168.4.130上,安装glusterfs客户端软件# apt-get install glusterfs-client
在存储节点network:192.168.4.131和存储节点compute:192.168.4.132上,创建volume:
root@network:~# gluster peer status Number of Peers: 1 Hostname: compute Uuid: 007468f2-9bb8-4b92-aff4-57bbc87dad57 State: Peer in Cluster (Connected) root@network:~# gluster volume create volumes replica 2 network:/glusterfs/volumes1 compute:/glusterfs/volumes1 network:/glusterfs/volumes2 compute:/glusterfs/volumes2 root@network:~# gluster volume info volumes Volume Name: volumes Type: Distributed-Replicate Volume ID: 54e699ca-6e46-4648-8548-d7d843adc1aa Status: Started Snapshot Count: 0 Number of Bricks: 2 x 2 = 4 Transport-type: tcp Bricks: Brick1: network:/glusterfs/volumes1 Brick2: compute:/glusterfs/volumes1 Brick3: network:/glusterfs/volumes2 Brick4: compute:/glusterfs/volumes2 Options Reconfigured: transport.address-family: inet performance.readdir-ahead: on nfs.disable: on
1 在cinder节点,cinder-volume端配置内容如下
[DEFAULT]
enabled_backends = glusterfs
[glusterfs] #最后添加
volume_driver = cinder.volume.drivers.glusterfs.GlusterfsDriver #驱动
glusterfs_shares_config = /etc/cinder/shares.conf #glusterfs存储
glusterfs_mount_point_base = /var/lib/cinder/volumes #挂载点
Configuration option = Default value | Description |
---|---|
[DEFAULT] | |
glusterfs_backup_mount_point = $state_path/backup_mount | (StrOpt) Base dir containing mount point for gluster share. |
glusterfs_backup_share = None | (StrOpt) GlusterFS share in <hostname|ipv4addr|ipv6addr>:<gluster_vol_name> format. Eg: 1.2.3.4:backup_vol |
glusterfs_mount_point_base = $state_path/mnt | (StrOpt) Base dir containing mount points for gluster shares. |
glusterfs_shares_config = /etc/cinder/glusterfs_shares | (StrOpt) File with the list of available gluster shares |
nas_volume_prov_type = thin | (StrOpt) Provisioning type that will be used when creating volumes. |
2. 配置glusterfs存储配置
在/etc/cinder/shares.conf文件中配置上卷信息:
# cat /etc/cinder/shares.conf
192.168.4.131:/openstack_cinder
文件中添加glusterfs卷信息,注意该文件的权限,所属组
root@controller:~# chown -R cinder:cinder /etc/cinder/shares.conf
root@controller:~# ll /etc/cinder/shares.conf
-rw-r--r-- 1 cinder cinder 34 May 17 16:49 /etc/cinder/shares.conf
root@controller:~# chown -R cinder:cinder /var/lib/cinder/*
root@controller:~# ll /var/lib/cinder/volumes/
3. 重启cinder-volume服务
# service cinder-volume restart
检查日志信息,看是否有错误/var/log/cinder/volume.log
重启服务后,使用mount查看信息:
192.168.4.131:/openstack_cinder on /var/lib/cinder/volumes/16b81d8d542fdbf4d70330bb672e9714 type fuse.glusterfs (rw,default_permissions,allow_other,max_read=131072)
4. controller节点检查服务状态
controller节点的/etc/cinder/cinder.conf文件中添加内容
[glusterfs]
volume_driver=cinder.volume.drivers.glusterfs.GlusterfsDriver
查看服务状态:
# cinder service-list
+------------------+----------------------+------+---------+-------+----------------------------+-----------------+
| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason |
+------------------+----------------------+------+---------+-------+----------------------------+-----------------+
| cinder-scheduler | controller | nova | enabled | up | 2017-05-17T09:35:38.000000 | - |
| cinder-volume | controller@glusterfs | nova | enabled | up | 2017-05-17T09:35:43.000000 | - |
+------------------+----------------------+------+---------+-------+----------------------------+-----------------+
5. controller建立type
root@controller:~# cinder type-create glusterfs
+--------------------------------------+-----------+-------------+-----------+
| ID | Name | Description | Is_Public |
+--------------------------------------+-----------+-------------+-----------+
| ffd4caf8-2b0f-48d8-aaea-488339922914 | glusterfs | - | True |
+--------------------------------------+-----------+-------------+-----------+
6. controller配置cinder-type和volume_backend_name联动
root@controller:~# cinder type-key glusterfs set volume_backend_name=glusterfs
#查看type的设置情况
root@controller:~# cinder extra-specs-list
+--------------------------------------+-----------+----------------------------------------+
| ID | Name | extra_specs |
+--------------------------------------+-----------+----------------------------------------+
| ffd4caf8-2b0f-48d8-aaea-488339922914 | glusterfs | {u'volume_backend_name': u'glusterfs'} |
+--------------------------------------+-----------+----------------------------------------+
7. 重启controller的cinder服务
root@controller:~# service cinder-scheduler restart
cinder-scheduler stop/waiting
cinder-scheduler start/running, process 27121
root@controller:~# service cinder-api restart
cinder-api stop/waiting
cinder-api start/running, process 27157
8 创建cinder volume
root@controller:~# cinder create --display-name "test1" --volume-type glusterfs 10 #执行cinder type的类型
root@controller:~# cinder show 59e2e560-6633-45f4-9d73-6f7ea62c06ef
+---------------------------------------+--------------------------------------+
| Property | Value |
+---------------------------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| consistencygroup_id | None |
| created_at | 2017-05-17T09:19:47.000000 |
| description | None |
| encrypted | False |
| id | 59e2e560-6633-45f4-9d73-6f7ea62c06ef |
| metadata | {} |
| migration_status | None |
| multiattach | False |
| name | test1 |
| os-vol-host-attr:host | controller@glusterfs#GlusterFS |
| os-vol-mig-status-attr:migstat | None |
| os-vol-mig-status-attr:name_id | None |
| os-vol-tenant-attr:tenant_id | 27a967778eb84f5296258809de65f15e |
| os-volume-replication:driver_data | None |
| os-volume-replication:extended_status | None |
| replication_status | disabled |
| size | 10 |
| snapshot_id | None |
| source_volid | None |
| status | available |
| user_id | 73b742285a6049d5a806d34c2020a1e1 |
| volume_type | glusterfs |
+---------------------------------------+--------------------------------------+
9 查看两个集群节点的存储内容
root@network:~# ls /glusterfs/*
/glusterfs/brick1:
volume-59e2e560-6633-45f4-9d73-6f7ea62c06ef
/glusterfs/brick2:
参考:
1 GlusterFS作为OpenStack后端存储: http://linuxnote.blog.51cto.com/9876511/1826501
2 http://blog.csdn.net/xuanlangjian/article/details/16881431