openstack ice resize 详解(二)

感谢朋友支持本博客,欢迎共同探讨交流,由于能力和时间有限,错误之处在所难免,欢迎指正!

如有转载,请保留源作者博客信息。

Better Me的博客blog.csdn.net/tantexian

如需交流,欢迎大家博客留言。


由于篇幅较长:上接之前博文: openstack ice resize 详解(一)

9、/nova/compute/manage.py
     @wrap_exception()
    @reverts_task_state
    @wrap_instance_event
    @wrap_instance_fault
    def prep_resize(self, context, image, instance, instance_type,
                    reservations, request_spec, filter_properties, node):
        """Initiates the process of moving a running instance to another host.
        Possibly changes the RAM and disk size in the process.
        """
        if node is None:  #假若上述调度中没有选择传递node,则再次选择一个node
            node = self.driver.get_available_nodes(refresh=True)[0]
            LOG.debug(_("No node specified, defaulting to %s"), node,
                      instance=instance)
        with self._error_out_instance_on_exception(context, instance['uuid'],
                                                   reservations):
            self.conductor_api.notify_usage_exists(
                    context, instance, current_period=True)
            self._notify_about_instance_usage(  #通知该实例resize的前序工作开始
                    context, instance, "resize.prep.start")
            try:
                self ._prep_resize (context, image, instance,  #跟进到10
                                  instance_type, reservations,
                                  request_spec, filter_properties,
                                  node)
            except Exception:
                # try to re-schedule the resize elsewhere:
                #_prep_resize出现异常则重新调度机resize
                exc_info = sys.exc_info()
                self._reschedule_resize_or_reraise(context, image, instance,
                        exc_info, instance_type, reservations, request_spec,
                        filter_properties)
            finally:
                extra_usage_info = dict(
                        new_instance_type=instance_type['name'],
                        new_instance_type_id=instance_type['id'])
                self._notify_about_instance_usage( #通知该实例resize的前序工作结束
                    context, instance, "resize.prep.end",
                    extra_usage_info=extra_usage_info)


10、 /nova/compute/manage.py
    def _prep_resize(self, context, image, instance, instance_type,
            reservations, request_spec, filter_properties, node):
        if not filter_properties:
            filter_properties = {}
        if not instance['host']:
            self._set_instance_error_state(context, instance['uuid'])
            msg = _('Instance has no source host')
            raise exception.MigrationError(msg)
        same_host = instance['host'] == self.host  #获取当前实例host
         #假若该host是本机且配置文件不让生成在本机,则设置error状态
        if same_host and not CONF.allow_resize_to_same_host:
            self._set_instance_error_state(context, instance['uuid'])
            msg = _('destination same as source!')
            raise exception.MigrationError(msg)
        # NOTE(danms): Stash the new instance_type to avoid having to
        # look it up in the database later
        sys_meta = instance.system_metadata
        flavors.save_flavor_info(sys_meta, instance_type, prefix='new_')
        # NOTE(mriedem): Stash the old vm_state so we can set the
        # resized/reverted instance back to the same state later.
         #保存需要迁移的信息
        vm_state = instance['vm_state']
        LOG.debug(_('Stashing vm_state: %s'), vm_state, instance=instance)
        sys_meta['old_vm_state'] = vm_state
        instance.save()
        limits = filter_properties.get('limits', {})
        rt = self._get_resource_tracker(node)
         #构成迁移的信息数据,跟进10-1
        with rt. resize_claim (context, instance, instance_type,
                             limits=limits) as claim:
            LOG.audit(_('Migrating'), context=context, instance=instance)
             #正式开始迁移,resize_instance,跟进11
            self. compute_rpcapi.resize_instance (context, instance,
                    claim.migration, image, instance_type, reservations)

注:其中#claim.migration参数由10-1获取。
    
10-1、/nova/compute/resource_tracker.py
     @utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
    def resize_claim(self, context, instance, instance_type, limits=None):
        """Indicate that resources are needed for a resize operation to this
        compute host.
        :param context: security context
        :param instance: instance object to reserve resources for
        :param instance_type: new instance_type being resized to
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  This
                  should be turned into finalize  a resource claim or free
                  resources after the compute operation is finished.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # generate the migration record and continue the resize:
            migration = self._create_migration(context, instance,
                                               instance_type)
            return claims.NopClaim(migration=migration)
        # get memory overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(instance_type)
        LOG.debug(_("Memory overhead for %(flavor)d MB instance; %(overhead)d "
                    "MB"), {'flavor': instance_type['memory_mb'],
                            'overhead': overhead['memory_mb']})
        instance_ref = obj_base.obj_to_primitive(instance)
        claim = claims.ResizeClaim(instance_ref, instance_type, self,
                                   self.compute_node, overhead=overhead,
                                   limits=limits)
        migration = self. _create_migration (context, instance_ref,
                                           instance_type)  #跟进到10-2
        claim.migration = migration
        # Mark the resources in-use for the resize landing on this
        # compute host:
        self._update_usage_from_migration(context, instance_ref,
                                              self.compute_node, migration)
        elevated = context.elevated()
        self._update(elevated, self.compute_node)
        return claim


10-2、/nova/compute/resource_tracker.py
构造 migration数据
     def _create_migration(self, context, instance, instance_type):
        """Create a migration record for the upcoming resize.  This should
        be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
        claim will not be lost if the audit process starts.
        """
        old_instance_type = flavors.extract_flavor(instance)
        migration = migration_obj.Migration()
        migration.dest_compute = self.host
        migration.dest_node = self.nodename
        migration.dest_host = self.driver.get_host_ip_addr()
        migration.old_instance_type_id = old_instance_type['id']
        migration.new_instance_type_id = instance_type['id']
        migration.status = 'pre-migrating'
        migration.instance_uuid = instance['uuid']
        migration.source_compute = instance['host']
        migration.source_node = instance['node']
        migration.create(context.elevated())
        return migration

11、/nova/compute/rpcapi.py
     def resize_instance(self, ctxt, instance, migration, image, instance_type,
                        reservations=None):
        # NOTE(russellb) Havana compat
        version = self._get_compat_version('3.0', '2.45')
        instance_type_p = jsonutils.to_primitive(instance_type)
        cctxt = self.client.prepare(server=_compute_host(None, instance),
                version=version)
         cctxt.cast(ctxt, 'resize_instance',  #rpc调用resize_instance,跟进到12
                   instance=instance, migration=migration,
                   image=image, reservations=reservations,
                   instance_type=instance_type_p)

12、 /nova/compute/manage.py
     @wrap_exception()
    @reverts_task_state
    @wrap_instance_event
    @errors_out_migration
    @wrap_instance_fault
    def resize_instance(self, context, instance, image,
                        reservations, migration, instance_type):
        """Starts the migration of a running instance to another host."""
        with self._error_out_instance_on_exception(context, instance.uuid,
                                                   reservations):
            if not instance_type:
                instance_type = flavor_obj.Flavor.get_by_id(
                    context, migration['new_instance_type_id'])
             #获取虚拟机网络信息
            network_info = self._get_instance_nw_info(context, instance)
             #设置迁移相关状态,并保存在数据库表migrate中
            #(包括源主机,目的主机,迁移状态,此处为migrating状态
            migration.status = 'migrating'
            migration.save(context.elevated())
            instance.task_state = task_states.RESIZE_MIGRATING
             #设置虚拟机状态为RESIZE_PREP
            instance.save(expected_task_state=task_states.RESIZE_PREP)
            self._notify_about_instance_usage(#通知实例resize开始
                context, instance, "resize.start", network_info=network_info)
             #获取虚拟机块设备信息
            bdms = (block_device_obj.BlockDeviceMappingList.
                    get_by_instance_uuid(context, instance.uuid))
             #获取磁盘块设备信息(包括卷,快照、镜像磁盘文件),跟进到12-1
            block_device_info = self._get_instance_volume_block_device_info(
                                context, instance, bdms=bdms)
             #迁移磁盘,且关机(***重要逻辑),跟进到12-2
            注:self.driver == compute_driver = CONF.compute_driver
            disk_info = self.driver.migrate_disk_and_power_off(
                    context, instance, migration.dest_host,
                    instance_type, network_info,
                    block_device_info)
            self._terminate_volume_connections(context, instance, bdms)
            migration_p = obj_base.obj_to_primitive(migration)
            instance_p = obj_base.obj_to_primitive(instance)
             #网络迁移开始
            self.conductor_api.network_migrate_instance_start(context,
                                                              instance_p,
                                                              migration_p)
             #migration数据库表状态设置为post-migrating、
            migration.status = 'post-migrating'
            migration.save(context.elevated())
            instance.host = migration.dest_compute
            instance.node = migration.dest_node
             #instance数据库表instance.task_state为RESIZE_MIGRATED
            instance.task_state = task_states.RESIZE_MIGRATED
            instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
            self. compute_rpcapi.finish_resize (context, instance, #跟进到13
                    migration, image, disk_info,
                    migration.dest_compute, reservations=reservations)
            self._notify_about_instance_usage(context, instance, "resize.end",
                                              network_info=network_info)
            self.instance_events.clear_events_for_instance(instance)

12-1、 /nova/compute/manage.py
     def _get_instance_volume_block_device_info(self, context, instance,
                                               refresh_conn_info=False,
                                               bdms=None):
        """Transform volumes to the driver block_device format."""
        if not bdms:
            bdms = (block_device_obj.BlockDeviceMappingList.
                    get_by_instance_uuid(context, instance['uuid']))
        block_device_mapping = (&.bsp; #获取卷,快照、镜像磁盘文件
             driver_blo#k_device.convert_volumes(bdms) +
  %r6nbsp;         driver_block_device.convert_snapshots(bdms) +
          %r6nbsp; driver_block_device.convert_images(bdms))
        if not refresh_conn_info:
            # if the block_device_mapping has no value in connection_info
            # (returned as None), don't include in the mapping
            block_device_mapping = [
                bdm for bdm in block_device_mapping
                if bdm.get('connection_info')]
        else:
            block_device_mapping = driver_block_device.refresh_conn_infos(
                block_device_mapping, context, instance, self.volume_api,
                self.driver)
        if self.use_legacy_block_device_info:
            block_device_mapping = driver_block_device.legacy_block_devices(
                block_device_mapping)
        return {'block_device_mapping': block_device_mapping}


12-2、 /nova/compute/virt/libvirt/driver.py
     def migrate_disk_and_power_off(self, context, instance, dest,
                                   flavor, network_info,
                                   block_device_info=None):
        LOG.debug(_("Starting migrate_disk_and_power_off"),
                   instance=instance)
        # Checks if the migration needs a disk resize down.
        for kind in ('root_gb', 'ephemeral_gb'):  #只能resieze升级
            if flavor[kind] < instance[kind]:
                reason = _("Unable to resize disk down.")
                raise exception.InstanceFaultRollback(
                    exception.ResizeError(reason=reason))
         #从instance的xml文件获取disk信息
        disk_info_text = self.get_instance_disk_info(instance['name'],
                block_device_info=block_device_info)
        disk_info = jsonutils.loads(disk_info_text)
        #复制disks磁盘信息到目标主机
        # copy disks to destination
        # rename instance dir to +_resize at first for using
        # shared storage for instance dir (eg. NFS).
         #获取instance base路径
        inst_base = libvirt_utils.get_instance_path(instance)
        inst_base_resize = inst_base + "_resize"
         #判断是否为共享存储,详解跟进到12-2-1
        shared_storage = self._is_storage_shared_with(dest, inst_base)
        # try to create the directory on the remote compute node
        # if this fails we pass the exception up the stack so we can catch
        # failures here earlier
        if not shared_storage:  #如果不是共享存储,创建目录
            utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
         #虚拟机关机
        self.power_off(instance)
        #获取该实例下所有的卷,快照、镜像磁盘文件
%2vnbsp;       block_device_mapping = driver.block_device_info_get_mapping(
            block_device_info)
 %2p      for vol in block_device_mapping:
            connection_info e3D vol['connection_info']
            disk_dev = vol['mount_device'].rpartition("/")[2]
            self.volume_driver_method('disconnect_volume',
                                      connection_info,
                                      disk_dev)
        try:
             #将instance base路径下的文件移动到inst_base_resize文件夹
            utils.execute('mv', inst_base, inst_base_resize)
            # if we are migrating the instance with shared storage then
            # create the directory.  If it is a remote node the directory
            # has already been created
            if shared_storage:  #如果是共享存储,则创建inst_base目录
                dest = None
                utils.execute('mkdir', '-p', inst_base)
            for info in disk_info:
                # assume inst_base == dirname(info['path'])
                img_path = info['path']
                fname = os.path.basename(img_path)
                from_path = os.path.join(inst_base_resize, fname)
                 #如果文件类型为 backing_file后端文件(实质即为_base目录文件)
                if info['type'] == 'qcow2' and info['backing_file']:
                    tmp_path = from_path + "_rbase"
                    # merge backing file
                     #从 tmp_path(增量镜像和base镜像合并?)
                    utils.execute('qemu-img', 'convert', '-f', 'qcow2',
                                  '-O', 'qcow2', from_path, tmp_path)
                   #共享存储,将临时文件(libvirt.xml、console.log)移动到img_path
                    if shared_storage:
                        utils.execute('mv', tmp_path, img_path)
                    else:  #否则调用scp或者rsync将_base文件复制到目标主机
                        libvirt_utils.copy_image(tmp_path, img_path, host=dest)
                        utils.execute('rm', '-f', tmp_path)  #复制完毕删除本机临时文件
                 #如果文件类型 raw or qcow2
                else:  # raw or qcow2 with no backing file
                    #调用scp或者rsync将虚拟机自己的文件复制到目标主机
                    libvirt_utils.copy_image(from_path, img_path, host=dest)
        except Exception:  #出现异常,则清理远程迁移主机的文件及数据
            with excutils.save_and_reraise_exception():
                self._cleanup_remote_migration(dest, inst_base,
                                               inst_base_resize,
                                               shared_storage)
        return disk_info_text

    

12-2-1、 /nova/compute/virt/libvirt/driver.py
     def _is_storage_shared_with(self, dest, inst_base):
        # NOTE (rmk): There are two methods of determining whether we are
        #             on the same filesystem: the source and dest IP are the
        #             same, or we create a file on the dest system via SSH
        #             and check whether the source system can also see it.
         #如果源IP和目的IP相同则判断为共享存储
        shared_storage = (dest == self.get_host_ip_addr())
        if not shared_storage:
            tmp_file = uuid.uuid4().hex + '.tmp'
            tmp_path = os.path.join(inst_base, tmp_file)
            try:
                 #在destIP主机上创建一个tmp临时文件
                utils.execute('ssh', dest, 'touch', tmp_path)
                 #在本机判断文件是否存在,存在则是共享存储,不存在则不是共享存储
                if os.path.exists(tmp_path):
  %r6nbsp;                 shared_storage = True
                    os.unlink(tmp_path)
<3pan style="line-height:e201.8; background-color: rgb(146, 208, 80);">                else:
                    utils.execute('ssh', dest, 'rm', tmp_path)
            except Exception:
                pass
        return shared_storage


13、/nova/compute/rpcapi.py
     def finish_resize(self, ctxt, instance, migration, image, disk_info,
            host, reservations=None):
        # NOTE(russellb) Havana compat
        version = self._get_compat_version('3.0', '2.46')
        cctxt = self.client.prepare(server=host, version=version)
         cctxt.cast(ctxt, 'finish_resize',  #rpc调用,跟进到14
                   instance=instance, migration=migration,
                   image=image, disk_info=disk_info, reservations=reservations)

14、/nova/compute/manage.py
    @wrap_exception()
    @reverts_task_state
    @wrap_instance_event
    @errors_out_migration
    @wrap_instance_fault
    def finish_resize(self, context, disk_info, image, instance,
                      reservations, migration):
        """Completes the migration process.
        Sets up the newly transferred disk and turns on the instance at its
        new host machine.
        """
        try:
             self._finish_resize (context, instance, migration,  #跟进到15
                                disk_info, image)
            self._quota_commit(context, reservations)
        except Exception as error:
            LOG.exception(_('Setting instance vm_state to ERROR'),
                          instance=instance)
            with excutils.save_and_reraise_exception():
                try:
                    self._quota_rollback(context, reservations)
                except Exception as qr_error:
                    LOG.exception(_("Failed to rollback quota for failed "
                                    "finish_resize: %s"),
                                  qr_error, instance=instance)
                self._set_instance_error_state(context, instance['uuid'])


15、 /nova/compute/manage.py
     def _finish_resize(self, context, instance, migration, disk_info,
                       image):
        resize_instance = False
        old_instance_type_id = migration['old_instance_type_id']
        new_instance_type_id = migration['new_instance_type_id']
        old_instance_type = flavors.extract_flavor(instance)
        sys_meta = instance.system_metadata
        # NOTE(mriedem): Get the old_vm_state so we know if we should
        # power on the instance. If old_vm_sate is not set we need to default
        # to ACTIVE for backwards compatibility
         #获取老虚拟机状态
        old_vm_state = sys_meta.get('old_vm_state', vm_states.ACTIVE)
        flavors.save_flavor_info(sys_meta,
                                 old_instance_type,
                                 prefix='old_')
        #修改虚拟机类型(内存、磁盘等信息)
        if old_instance_type_id != new_instance_type_id:
            instance_type = flavors.extract_flavor(instance, prefix='new_')
            flavors.save_flavor_info(sys_meta, instance_type)
            instance.instance_type_id = instance_type['id']
            instance.memory_mb = instance_type['memory_mb']
            instance.vcpus = instance_type['vcpus']
            instance.root_gb = instance_type['root_gb']
            instance.ephemeral_gb = instance_type['ephemeral_gb']
            instance.system_metadata = sys_meta
            instance.save()
            resize_instance = True
        # NOTE(tr3buchet): setup networks on destination host
         #目标主机建立网络
        self.network_api.setup_networks_on_host(context, instance,
                                                migration['dest_compute'])
        instance_p = obj_base.obj_to_primitive(instance)
        migration_p = obj_base.obj_to_primitive(migration)
        #
        self.conductor_api.network_migrate_instance_finish(context,
                                                           instance_p,
                                                           migration_p)
        #迁移网络
        network_info = self._get_instance_nw_info(context, instance)
        #数据库实例状态更新为RESIZE_FINISH
        instance.task_state = task_states.RESIZE_FINISH
        instance.system_metadata = sys_meta
        instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
        self._notify_about_instance_usage( #通知实例resize开始
            context, instance, "finish_resize.start",
            network_info=network_info)
        #获取磁盘文件信息(包括卷、快照,镜像)
        block_device_info = self._get_instance_volume_block_device_info(
                            context, instance, refresh_conn_info=True)
        # NOTE(mriedem): If the original vm_state was STOPPED, we don't
        # automatically power on the instance after it's migrated
         #获取参数迁移完毕是否开机与否
        power_on = old_vm_state != vm_states.STOPPED
        self. driver.finish_migration (context, migration, instance,  #完成迁移,跟进到16
                                     disk_info,
                                     network_info,
                                     image, resize_instance,
                                     block_device_info, power_on)
        #更新迁移完毕后虚拟机状态信息
        migration.status = 'finished'
        migration.save(context.elevated())
        instance.vm_state = vm_states.RESIZED
        instance.task_state = None
        instance.launched_at = timeutils.utcnow()
        instance.save(expected_task_state=task_states.RESIZE_FINISH)
        self._notify_about_instance_usage(
            context, instance, "finish_resize.end",
            network_info=network_info)



16、 /nova/compute/virt/libvirt/driver.py
     def finish_migration(self, context, migration, instance, disk_info,
                         network_info, image_meta, resize_instance,
                         block_device_info=None, power_on=True):
        LOG.debug(_("Starting finish_migration"), instance=instance)
        # resize disks. only "disk" and "disk.local" are necessary.
         #迁移磁盘信息(仅仅对于root_gb与ephemeral_gb
        disk_info = jsonutils.loads(disk_info)
        for info in disk_info:
             #获取该disk类型下的磁盘size
            fname = os.path.basename(info['path'])
            if fname == 'disk':
                size = instance['root_gb']
            elif fname == 'disk.local':
                size = instance['ephemeral_gb']
            else:
                size = 0
            size *= units.Gi
            # If we have a non partitioned image that we can extend
            # then ensure we're in 'raw' format so we can extend file system.
            fmt = info['type'] #获取文件格式
            if (size and fmt == 'qcow2' and
                    disk.can_resize_image(info['path'], size) and
                    disk.is_image_partitionless(info['path'], use_cow=True)):
                path_raw = info['path'] + '_raw'
                 #将磁盘格式转化为raw
                utils.execute('qemu-img', 'convert', '-f', 'qcow2',
                              '-O', 'raw', info['path'], path_raw)
                utils.execute('mv', path_raw, info['path'])
                fmt = 'raw'
            if size:
                use_cow = fmt == 'qcow2' #如果文件格式为qcow2,则使用qemu 命令resize
                disk.extend(info['path'], size, use_cow=use_cow)
             #如果磁盘格式为raw,且使用了use_cow_images,则先转化为qcow2
            if fmt == 'raw' and CONF.use_cow_images:
                # back to qcow2 (no backing_file though) so that snapshot
                # will be available
                path_qcow = info['path'] + '_qcow'
                utils.execute('qemu-img', 'convert', '-f', 'raw',
                              '-O', 'qcow2', info['path'], path_qcow)
                utils.execute('mv', path_qcow, info['path'])
          #获取磁盘信息(例如virio、disk.local、disk.swap)
        disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
                                            instance,
                                            block_device_info,
                                            image_meta)
        # assume _create_image do nothing if a target file exists.
        self._create_image(context, instance, #创建镜像,跟进到16-1
                           disk_mapping=disk_info['mapping'],
                           network_info=network_info,
                           block_device_info=None, inject_files=False)
         #生成xml文件
        xml = self.to_xml(context, instance, network_info, disk_info,
                          block_device_info=block_device_info,
                          write_to_disk=True)
         #根据配置及xml文件创建虚拟机
        self._create_domain_and_network(context, xml, instance, network_info,
                                        block_device_info, power_on)
        if power_on: #如果迁移前为开机状态,则开机(至此迁移全部完成)
            timer = loopingcall.FixedIntervalLoopingCall(
                                                    self._wait_for_running,
                                                    instance)
            timer.start(interval=0.5).wait()


16-1、 /nova/compute/virt/libvirt/driver.py
     def _create_image(self, context, instance,
                      disk_mapping, suffix='',
                      disk_images=None, network_info=None,
                      block_device_info=None, files=None,
                      admin_pass=None, inject_files=True):
        if not suffix:
            suffix = ''
        booted_from_volume = self._is_booted_from_volume(
            instance, disk_mapping)
        def image(fname, image_type=CONF.libvirt.images_type):
            return self.image_backend.image(instance,
                                            fname + suffix, image_type)
        def raw(fname):
            return image(fname, image_type='raw')
        # ensure directories exist and are writable  #确保目录存在且有写权限
        fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
        LOG.info(_('Creating image'), instance=instance)
        # NOTE(dprince): for rescue console.log may already exist... chown it.
        self._chown_console_log_for_instance(instance)  #改变 console.log控制台文件权限
        # NOTE(yaguang): For evacuate disk.config already exist in shared
        # storage, chown it.  #改变 libvirt.xml配置文件权限
        self._chown_disk_config_for_instance(instance)
        # NOTE(vish): No need add the suffix to console.log
        libvirt_utils.write_to_file(
            self._get_console_log_path(instance), '', 7)
        if not disk_images:
            disk_images = {'image_id': instance['image_ref'],
                           'kernel_id': instance['kernel_id'],
                           'ramdisk_id': instance['ramdisk_id']}
        if disk_images['kernel_id']:
            fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
            raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,#跟进到16-1-1
                                context=context,
                                filename=fname,
                                image_id=disk_images['kernel_id'],
                                user_id=instance['user_id'],
                                project_id=instance['project_id'])
            if disk_images['ramdisk_id']:
                fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
                raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image, #跟进到16-1-1
                                     context=context,
                                     filename=fname,
                                     image_id=disk_images['ramdisk_id'],
                                     user_id=instance['user_id'],
                                     project_id=instance['project_id'])
        inst_type = flavors.extract_flavor(instance)
        # NOTE(ndipanov): Even if disk_mapping was passed in, which
        # currently happens only on rescue - we still don't want to
        # create a base image.
        if not booted_from_volume:
            root_fname = imagecache.get_cache_fname(disk_imag%s, 'image_id')
            if size == 0 or suffix == '.rescue':
                size = None
            image('disk').cache(fetch_func=libvirt_utils.fetch_image,#跟进到16-1-1
                                context=context,
                                filename=root_fname,
                                size=size,
                                image_id=disk_images['image_id'],
                                user_id=instance['user_id'],
                                project_id=instance['project_id'])
        # Lookup the filesystem type if required
        os_type_with_default = disk.get_fs_type_for_os_type(
                                                          instance['os_type'])
        ephemeral_gb = instance['ephemeral_gb']
        if 'disk.local' in disk_mapping:
            disk_image = image('disk.local')
            fn = functools.partial(self._create_ephemeral,
                                   fs_label='ephemeral0',
                                   os_type=instance["os_type"],
                                   is_block_dev=disk_image.is_block_dev)
            fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
            size = ephemeral_gb * units.Gi
            disk_image.cache(fetch_func=fn,
                             filename=fname,
                             size=size,
                             ephemeral_size=ephemeral_gb)
        for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
                block_device_info)):
            disk_image = image(blockinfo.get_eph_disk(idx))
            fn = functools.partial(self._create_ephemeral,
                                   fs_label='ephemeral%d' % idx,
                                   os_type=instance["os_type"],
                                   is_block_dev=disk_image.is_block_dev)
            size = eph['size'] * units.Gi
            fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
            disk_image.cache(
                             fetch_func=fn,
                             filename=fname,
                             size=size,
                             ephemeral_size=eph['size'])
        if 'disk.swap' in disk_mapping:
            mapping = disk_mapping['disk.swap']
            swap_mb = 0
            swap = driver.block_device_info_get_swap(block_device_info)
            if driver.swap_is_usable(swap):
                swap_mb = swap['swap_size']
            elif (inst_type['swap'] > 0 and
                  not block_device.volume_in_mapping(
                    mapping['dev'], block_device_info)):
                swap_mb = inst_type['swap']
            if swap_mb > 0:
                size = swap_mb * units.Mi
                image('disk.swap').cache(fetch_func=self._create_swap,
                                         filename="swap_%s" % swap_mb,
                                         size=size,
                                         swap_mb=swap_mb)
        # Config drive
        if configdrive.required_by(instance):
            LOG.info(_('Using config drive'), instance=instance)
            extra_md = {}
            if admin_pass:
                extra_md['admin_pass'] = admin_pass
            inst_md = instance_metadata.InstanceMetadata(instance,
                content=files, extra_md=extra_md, network_info=network_info)
            with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
                configdrive_path = self._get_disk_config_path(instance)
                LOG.info(_('Creating config drive at %(path)s'),
                         {'path': configdrive_path}, instance=instance)
                try:
                    cdb.make_drive(configdrive_path)
                except processutils.ProcessExecutionError as e:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_('Creating config drive failed '
                                  'with error: %s'),
                                  e, instance=instance)
        # File injection only if needed
        elif inject_files and CONF.libvirt.inject_partition != -2:
            if booted_from_volume:
                LOG.warn(_('File injection into a boot from volume '
                           'instance is not supported'), instance=instance)
            self._inject_data(
                instance, network_info, admin_pass, files, suffix)
        if CONF.libvirt.virt_type == 'uml':
            libvirt_utils.chown(image('disk').path, 'root')


16-1-1、 /nova/compute/virt/libvirt/imagebackend.py
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
        """Creates image from template.
        Ensures that template and image not already exists.
        Ensures that base directory exists.
        Synchronizes on template fetching.
        :fetch_func: Function that creates the base image
                     Should accept `target` argument.
        :filename: Name of the file in the image directory
        :size: Size of created image in bytes (optional)
        """
        @utils.synchronized(filename, external=True, lock_path=self.lock_path)
        def fetch_func_sync(target, *args, **kwargs):
            fetch_func(target=target, *args, **kwargs)
        base_dir = os.path.join(CONF.instances_path,
                                CONF.image_cache_subdirectory_name)
        if not os.path.exists(base_dir):
            fileutils.ensure_tree(base_dir)
        base = os.path.join(base_dir, filename)
        if not self.check_image_exists() or not os.path.exists(base):
            self.create_image(fetch_func_sync, base, size,
                              *args, **kwargs)
        if (size and self.preallocate and self._can_fallocate() and
                os.access(self.path, os.W_OK)):
            utils.execute('fallocate', '-n', '-l', size, self.path)

至此完成了resize操作,接下来,分别分析,resize完成后的确认confirm_resize和回退revert_resize操作。

由于csdn限制了文章字数,因此接下来的步骤,请参考博文: openstack ice resize 详解(三)

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值