本文存储节点直接用的计算节点,所有的cinder 服务都在计算节点上操作,且cinder-volumes 安装在计算节点上
在kvm 环境中生成一个数据盘
[root@kvm images]# qemu-img create -f raw storage.raw 100G
Formatting 'storage.raw', fmt=raw size=107374182400
把数据盘添加到计算节点
[root@kvm images]# virsh attach-disk Compute6.12 /home/images/storage.raw vdb --config
Disk attached successfully
[root@kvm images]# virsh attach-disk Compute6.12 /home/images/storage.raw vdb --current
Disk attached successfully
[root@kvm images]# virsh domblklist 103
Target Source
------------------------------------------------
vda /home/images/Compute6.12.raw
vdb /home/images/storage.raw
hda -
登录计算节点查看盘数
[root@controller ~]# ssh 10.10.6.12
Last login: Tue Mar 27 11:26:43 2018 from controller
[root@compute12 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 253:0 0 100G 0 disk
├─vda1 253:1 0 10G 0 part [SWAP]
└─vda2 253:2 0 90G 0 part /
vdb 253:16 0 100G 0 disk
LVM 设置
[root@compute12 ~]# yum install lvm2 device-mapper-persistent-data
启动服务
[root@compute12 ~]# systemctl enable lvm2-lvmetad.service
Created symlink from /etc/systemd/system/sysinit.target.wants/lvm2-lvmetad.service to /usr/lib/systemd/system/lvm2-lvmetad.service.
[root@compute12 ~]# systemctl start lvm2-lvmetad.service
[root@compute12 ~]# pvcreate /dev/vdb
Physical volume "/dev/vdb" successfully created.
[root@compute12 ~]# vgcreate cinder-volumes /dev/vdb
Volume group "cinder-volumes" successfully created
[root@compute12 ~]# vgdisplay
--- Volume group ---
VG Name cinder-volumes
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 4
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 1
Open LV 0
Max PV 0
Cur PV 1
Act PV 1
VG Size <100.00 GiB
PE Size 4.00 MiB
Total PE 25599
Alloc PE / Size 24368 / <95.19 GiB
Free PE / Size 1231 / <4.81 GiB
VG UUID foRIcK-xf8B-ni2u-yXU0-vyv7-yiIF-ZAU3AE
设置lvm.conf
[root@compute12 ~]# cp /etc/lvm/lvm.conf{,.bk}
[root@compute12 ~]# egrep -v '#|^$' /etc/lvm/lvm.conf
config {
checks = 1
abort_on_errors = 0
profile_dir = "/etc/lvm/profile"
}
devices {
dir = "/dev"
scan = [ "/dev" ]
obtain_device_list_from_udev = 1
external_device_info_source = "none"
preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
filter = [ "a/vdb/", "r/.*/"]
cache_dir = "/etc/lvm/cache"
cache_file_prefix = ""
write_cache_state = 1
sysfs_scan = 1
multipath_component_detection = 1
md_component_detection = 1
fw_raid_component_detection = 0
md_chunk_alignment = 1
data_alignment_detection = 1
data_alignment = 0
data_alignment_offset_detection = 1
ignore_suspended_devices = 0
ignore_lvm_mirrors = 1
disable_after_error_count = 0
require_restorefile_with_uuid = 1
pv_min_size = 2048
issue_discards = 0
allow_changes_with_duplicate_pvs = 1
}
allocation {
maximise_cling = 1
use_blkid_wiping = 1
wipe_signatures_when_zeroing_new_lvs = 1
mirror_logs_require_separate_pvs = 0
cache_pool_metadata_require_separate_pvs = 0
thin_pool_metadata_require_separate_pvs = 0
}
log {
verbose = 0
silent = 0
syslog = 1
overwrite = 0
level = 0
indent = 1
command_names = 0
prefix = " "
activation = 0
debug_classes = [ "memory", "devices", "activation", "allocation", "lvmetad", "metadata", "cache", "locking", "lvmpolld", "dbus" ]
}
backup {
backup = 1
backup_dir = "/etc/lvm/backup"
archive = 1
archive_dir = "/etc/lvm/archive"
retain_min = 10
retain_days = 30
}
shell {
history_size = 100
}
global {
umask = 077
test = 0
units = "r"
si_unit_consistency = 1
suffix = 1
activation = 1
proc = "/proc"
etc = "/etc"
locking_type = 1
wait_for_locks = 1
fallback_to_clustered_locking = 1
fallback_to_local_locking = 1
locking_dir = "/run/lock/lvm"
prioritise_write_locks = 1
abort_on_internal_errors = 0
detect_internal_vg_cache_corruption = 0
metadata_read_only = 0
mirror_segtype_default = "raid1"
raid10_segtype_default = "raid10"
sparse_segtype_default = "thin"
use_lvmetad = 1
use_lvmlockd = 0
system_id_source = "none"
use_lvmpolld = 1
notify_dbus = 1
}
activation {
checks = 0
udev_sync = 1
udev_rules = 1
verify_udev_operations = 0
retry_deactivation = 1
missing_stripe_filler = "error"
use_linear_target = 1
reserved_stack = 64
reserved_memory = 8192
process_priority = -18
raid_region_size = 2048
readahead = "auto"
raid_fault_policy = "warn"
mirror_image_fault_policy = "remove"
mirror_log_fault_policy = "allocate"
snapshot_autoextend_threshold = 100
snapshot_autoextend_percent = 20
thin_pool_autoextend_threshold = 100
thin_pool_autoextend_percent = 20
use_mlockall = 0
monitoring = 1
polling_interval = 15
activation_mode = "degraded"
}
dmeventd {
mirror_library = "libdevmapper-event-lvm2mirror.so"
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
thin_library = "libdevmapper-event-lvm2thin.so"
}
这里把Cinder 组件安装到计算节点(Cinder-volume)
[root@compute12 ~]# yum install openstack-cinder targetcli python-keystone -y
[root@compute12 ~]# cp /etc/cinder/cinder.conf{,.bak}
Cinder 配置最终配置
[root@compute12 ~]# egrep -v '#|^$' /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
my_ip = 10.10.6.12
enabled_backends = lvm
log_dir = /var/log/cinder
state_path = /var/lib/cinder
glance_api_servers = http://controller:9292
enabled_backends = lvm,nfs
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm
iscsi_ip_address = 10.10.6.12
volumes_dir = $state_path/volumes
volume_backend_name = lvm01
[backend]
[backend_defaults]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[database]
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[profiler]
[service_user]
[ssl]
[vault]
启动服务
[root@compute12 ~]# systemctl enable openstack-cinder-volume.service target.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-volume.service to /usr/lib/systemd/system/openstack-cinder-volume.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/target.service to /usr/lib/systemd/system/target.service.
[root@compute12 ~]# systemctl start openstack-cinder-volume.service target.service
在controller 节点操作
创建Cinder数据库
[root@controller ~]# mysql -u root -psursen@2015
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 145
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE cinder;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
-> IDENTIFIED BY 'CINDER_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
-> IDENTIFIED BY 'CINDER_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> quit
Bye
设置认证
[root@controller ~]# . admin-openstack.sh
[root@controller ~]# openstack user create --domain default --password-prompt cinder
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | e321cf085600488da10630102134aeea |
| name | cinder |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
[root@controller ~]# openstack role add --project service --user cinder admin
[root@controller ~]# openstack service create --name cinderv2 \
> --description "OpenStack Block Storage" volumev2
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Block Storage |
| enabled | True |
| id | f8a033962f74428bb40f13bdd9d7e1dd |
| name | cinderv2 |
| type | volumev2 |
+-------------+----------------------------------+
[root@controller ~]# openstack service create --name cinderv3 \
> --description "OpenStack Block Storage" volumev3
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Block Storage |
| enabled | True |
| id | 334c323d58a4450cbc501e7dc2a89a12 |
| name | cinderv3 |
| type | volumev3 |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev2 public http://controller:8776/v2/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | 86010fe9a9d54ac3ba505c0b1e7ff462 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | f8a033962f74428bb40f13bdd9d7e1dd |
| service_name | cinderv2 |
| service_type | volumev2 |
| url | http://controller:8776/v2/%(project_id)s |
+--------------+------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev2 internal http://controller:8776/v2/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | c2aaad6ee4ee41ec9b567c7bc9929a90 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | f8a033962f74428bb40f13bdd9d7e1dd |
| service_name | cinderv2 |
| service_type | volumev2 |
| url | http://controller:8776/v2/%(project_id)s |
+--------------+------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev2 admin http://controller:8776/v2/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | 9569a3884992488d9e8693fc8727d93d |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | f8a033962f74428bb40f13bdd9d7e1dd |
| service_name | cinderv2 |
| service_type | volumev2 |
| url | http://controller:8776/v2/%(project_id)s |
+--------------+------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev3 public http://controller:8776/v3/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | 5c580f3873534428836860db62eacc98 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 334c323d58a4450cbc501e7dc2a89a12 |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://controller:8776/v3/%(project_id)s |
+--------------+------------------------------------------+
[root@controller ~]#
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev3 internal http://controller:8776/v3/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | f3577a6d61a94af8b2b7b997dac0ab96 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 334c323d58a4450cbc501e7dc2a89a12 |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://controller:8776/v3/%(project_id)s |
+--------------+------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev3 admin http://controller:8776/v3/%\(project_id\)s
+--------------+------------------------------------------+
| Field | Value |
+--------------+------------------------------------------+
| enabled | True |
| id | d315120850524790b7ea56ca9990fc08 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 334c323d58a4450cbc501e7dc2a89a12 |
| service_name | cinderv3 |
| service_type | volumev3 |
| url | http://controller:8776/v3/%(project_id)s |
+--------------+------------------------------------------+
安装Cinder 组件
[root@controller ~]# yum install openstack-cinder -y
[root@controller ~]# cp /etc/cinder/cinder.conf{,.bak}
[root@controller ~]# vi /etc/cinder/cinder.conf
[root@controller ~]#
[root@controller ~]# egrep -v '#|^$' /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:RABBIT_PASS@controller
auth_strategy = keystone
my_ip = 10.10.6.11
[backend]
[backend_defaults]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[database]
connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = CINDER_PASS
[matchmaker_redis]
[nova]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[profiler]
[service_user]
[ssl]
[vault]
同步数据库
[root@controller ~]# su -s /bin/sh -c "cinder-manage db sync" cinder
Option "logdir" from group "DEFAULT" is deprecated. Use option "log-dir" from group "DEFAULT"
设置nova ,在nova篇已经全配置,这里以忽略
启动服务
[root@controller ~]# systemctl restart openstack-nova-api.service
[root@controller ~]# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-api.service to /usr/lib/systemd/system/openstack-cinder-api.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-cinder-scheduler.service to /usr/lib/systemd/system/openstack-cinder-scheduler.service.
[root@controller ~]# systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
登录计算节点进行设置
需要安装和配置对象存储,官网是swift 这里没有进行安装已省略
若设置请参考官网https://docs.openstack.org/cinder/pike/install/cinder-backup-install-rdo.html
在控制节点 确认安装
[root@controller ~]# . admin-openstack.sh
[root@controller ~]# openstack catalog show object-store
service object-store not found
[root@controller ~]# cinder service-list
+------------------+---------------+------+---------+-------+----------------------------+-----------------+
| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason |
+------------------+---------------+------+---------+-------+----------------------------+-----------------+
| cinder-scheduler | controller | nova | enabled | up | 2018-03-27T04:53:29.000000 | - |
| cinder-volume | compute12@lvm | nova | enabled | up | 2018-03-27T04:53:24.000000 | - |
| cinder-volume | compute12@nfs | nova | enabled | up | 2018-03-27T04:53:20.000000 | - |
+------------------+---------------+------+---------+-------+----------------------------+-----------------+
[root@controller ~]# cinder extra-specs-list
+----+------+-------------+
| ID | Name | extra_specs |
+----+------+-------------+
+----+------+-------------+
创建块设备
[root@controller ~]# openstack volume create --size 1 disk01
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| consistencygroup_id | None |
| created_at | 2018-03-27T04:55:37.000000 |
| description | None |
| encrypted | False |
| id | 64a61c0e-3bf9-4edd-9086-01ac87e70f69 |
| migration_status | None |
| multiattach | False |
| name | disk01 |
| properties | |
| replication_status | None |
| size | 1 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| type | None |
| updated_at | None |
| user_id | 9eaf7fdcfc4446c58bca578611ebce9f |
+---------------------+--------------------------------------+
[root@controller ~]# openstack volume list
+--------------------------------------+--------+-----------+------+-------------+
| ID | Name | Status | Size | Attached to |
+--------------------------------------+--------+-----------+------+-------------+
| 64a61c0e-3bf9-4edd-9086-01ac87e70f69 | disk01 | available | 1 | |
+--------------------------------------+--------+-----------+------+-------------+