上文讲过创建volume最终是使用taskflow创建几个任务:
// 构建了如下几个任务,依次是:
// ExtractVolumeRequestTask 构造请求
// QuotaReserveTask 预留配额
// QuotaCommitTask 提交配额
// VolumeCastTask 创建volume
api_flow.add(ExtractVolumeRequestTask(
image_service_api,
availability_zones,
rebind={'size': 'raw_size',
'availability_zone': 'raw_availability_zone',
'volume_type': 'raw_volume_type'}))
api_flow.add(QuotaReserveTask(),
EntryCreateTask(),
QuotaCommitTask())
if scheduler_rpcapi and volume_rpcapi:
# This will cast it out to either the scheduler or volume manager via
# the rpc apis provided.
api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db_api))
ExtractVolumeRequestTask:
该任务主要是对参数进行基本的校验和重新封装,所有没有revert函数(也不需要)
def execute(self, context, size, snapshot, image_id, source_volume,
availability_zone, volume_type, metadata, key_manager,
consistencygroup, cgsnapshot, group, group_snapshot, backup):
.... // 对参数的校验和封装,最终返回给下个任务的参数如下:
return {
'size': size,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'availability_zone': availability_zone,
'volume_type': volume_type,
'volume_type_id': volume_type_id,
'encryption_key_id': encryption_key_id,
'qos_specs': specs,
'consistencygroup_id': consistencygroup_id,
'cgsnapshot_id': cgsnapshot_id,
'group_id': group_id,
'replication_status': replication_status,
'refresh_az': refresh_az,
'backup_id': backup_id,
}
QuotaReserveTask
该任务是检查当前的配额是否足够,并将即将创建的配额预留;
当该任务失败后的revert任务则是回滚(释放)预留的配额
def execute(self, context, size, volume_type_id, group_snapshot,
optional_args):
try:
values = {'per_volume_gigabytes': size}
// 检查当前的配额
QUOTAS.limit_check(context, project_id=context.project_id,
**values)
except exception.OverQuota as e:
quotas = e.kwargs['quotas']
raise exception.VolumeSizeExceedsLimit(
size=size, limit=quotas['per_volume_gigabytes'])
try:
if group_snapshot:
reserve_opts = {'volumes': 1}
else:
reserve_opts = {'volumes': 1, 'gigabytes': size}
if ('update_size' in optional_args
and optional_args['update_size']):
reserve_opts.pop('volumes', None)
QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)
// 配额的预留主要是volume个数和大小
reservations = QUOTAS.reserve(context, **reserve_opts)
return {
'reservations': reservations,
}
except exception.OverQuota as e:
quota_utils.process_reserve_over_quota(context, e,
resource='volumes',
size=size)
def revert(self, context, result, optional_args, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
if optional_args['is_quota_committed']:
# The reservations have already been committed and can not be
# rolled back at this point.
return
# We actually produced an output that we can revert so lets attempt
# to use said output to rollback the reservation.
reservations = result['reservations']
try:
QUOTAS.rollback(context, reservations)
except exception.CinderException:
# We are already reverting, therefore we should silence this
# exception since a second exception being active will be bad.
LOG.exception("Failed rolling back quota for"
" %s reservations", reservations)
QuotaCommitTask
当配额检查通过后,则提交预留的配额;
该任务失败后,则回滚提交的配额
def execute(self, context, reservations, volume_properties,
optional_args):
QUOTAS.commit(context, reservations)
# updating is_quota_committed attribute of optional_args dictionary
optional_args['is_quota_committed'] = True
return {'volume_properties': volume_properties}
def revert(self, context, result, **kwargs):
# We never produced a result and therefore can't destroy anything.
if isinstance(result, ft.Failure):
return
volume = result['volume_properties']
try:
// 减去volume个数及大小
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=context.project_id,
**reserve_opts)
if reservations:
QUOTAS.commit(context, reservations,
project_id=context.project_id)
except Exception:
LOG.exception("Failed to update quota for deleting "
"volume: %s", volume['id'])
VolumeCastTask
该任务则是调用cinder-scheduler模块开始进行任务的volume的调度工作
def _cast_create_volume(self, context, request_spec, filter_properties):
// 进行backend的选择,代码省略
....
self.scheduler_rpcapi.create_volume(
context,
volume,
snapshot_id=snapshot_id,
image_id=image_id,
request_spec=request_spec,
filter_properties=filter_properties,
backup_id=backup_id)
def execute(self, context, **kwargs):
scheduler_hints = kwargs.pop('scheduler_hints', None)
db_vt = kwargs.pop('volume_type')
kwargs['volume_type'] = None
if db_vt:
kwargs['volume_type'] = objects.VolumeType()
objects.VolumeType()._from_db_object(context,
kwargs['volume_type'], db_vt)
request_spec = objects.RequestSpec(**kwargs)
filter_properties = {}
if scheduler_hints:
filter_properties['scheduler_hints'] = scheduler_hints
self._cast_create_volume(context, request_spec, filter_properties)
self.scheduler_rpcapi.create_volume对应的入口是:cinder/scheduler/rpcapi.py
def create_volume(self, ctxt, volume, snapshot_id=None, image_id=None,
request_spec=None, filter_properties=None,
backup_id=None):
volume.create_worker()
cctxt = self._get_cctxt()
msg_args = {'snapshot_id': snapshot_id, 'image_id': image_id,
'request_spec': request_spec,
'filter_properties': filter_properties,
'volume': volume, 'backup_id': backup_id}
if not self.client.can_send_version('3.10'):
msg_args.pop('backup_id')
// 发送rpc请求
return cctxt.cast(ctxt, 'create_volume', **msg_args)
RPC请求被cinder/scheduler/manager.py中的create_volume函数接收:
cinder-scheduler一样使用taskflow对任务进行管理
@objects.Volume.set_workers
def create_volume(self, context, volume, snapshot_id=None, image_id=None,
request_spec=None, filter_properties=None,
backup_id=None):
self._wait_for_scheduler()
try:
// cinder-scheduler一样使用taskflow管理自己的任务
flow_engine = create_volume.get_flow(context,
self.driver,
request_spec,
filter_properties,
volume,
snapshot_id,
image_id,
backup_id)
except Exception:
msg = _("Failed to create scheduler manager volume flow")
LOG.exception(msg)
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
create_volume.get_flow函数入口:cinder/scheduler/flows/create_volume.py
从代码可以看到主要是2个任务,具体的任务分析,参考下一章
def get_flow(context, driver_api, request_spec=None,
filter_properties=None,
volume=None, snapshot_id=None, image_id=None, backup_id=None):
"""Constructs and returns the scheduler entrypoint flow.
This flow will do the following:
1. Inject keys & values for dependent tasks.
2. Extract a scheduler specification from the provided inputs.
3. Use provided scheduler driver to select host and pass volume creation
request further.
"""
create_what = {
'context': context,
'raw_request_spec': request_spec,
'filter_properties': filter_properties,
'volume': volume,
'snapshot_id': snapshot_id,
'image_id': image_id,
'backup_id': backup_id,
}
flow_name = ACTION.replace(":", "_") + "_scheduler"
scheduler_flow = linear_flow.Flow(flow_name)
# This will extract and clean the spec from the starting values.
// 封装相关的参数
scheduler_flow.add(ExtractSchedulerSpecTask(
rebind={'request_spec': 'raw_request_spec'}))
# This will activate the desired scheduler driver (and handle any
# driver related failures appropriately).
// 真正的调度任务
scheduler_flow.add(ScheduleCreateVolumeTask(driver_api))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(scheduler_flow, store=create_what)