openstack-nova-虚机创建流程以及源码分析(二)

基于openstack stein

前面调度完主机后开始rpc通知计算节点启动

#nova/conductor/manager.py #
def build_instances(self, context, instances, image, filter_properties,
            admin_password, injected_files, requested_networks,
            security_groups, block_device_mapping=None, legacy_bdm=True,
            request_spec=None, host_lists=None):
            .......................
            self.compute_rpcapi.build_and_run_instance(context,
                                instance=instance, host=host.service_host, image=image,
                                request_spec=local_reqspec,
                                filter_properties=local_filter_props,
                                admin_password=admin_password,
                                injected_files=injected_files,
                                requested_networks=requested_networks,
                                security_groups=security_groups,
                                block_device_mapping=bdms, node=host.nodename,
                                limits=host.limits, host_list=host_list)
def build_and_run_instance(self, ctxt, instance, host, image, request_spec,
            filter_properties, admin_password=None, injected_files=None,
            requested_networks=None, security_groups=None,
            block_device_mapping=None, node=None, limits=None,
            host_list=None):
            .........
            cctxt.cast(ctxt, 'build_and_run_instance', **kwargs)

自此消息cast 传给了compute 节点

#nova/compute/manager.py
def build_and_run_instance(self, context, instance, image, request_spec,
                     filter_properties, admin_password=None,
                     injected_files=None, requested_networks=None,
                     security_groups=None, block_device_mapping=None,
                     node=None, limits=None, host_list=None):

        @utils.synchronized(instance.uuid)
        def _locked_do_build_and_run_instance(*args, **kwargs):
            # NOTE(danms): We grab the semaphore with the instance uuid
            # locked because we could wait in line to build this instance
            # for a while and we want to make sure that nothing else tries
            # to do anything with this instance while we wait.
            with self._build_semaphore:
                try:
                    result = self._do_build_and_run_instance(*args, **kwargs)
                .............................
        utils.spawn_n(_locked_do_build_and_run_instance,
                      context, instance, image, request_spec,
                      filter_properties, admin_password, injected_files,
                      requested_networks, security_groups,
                      block_device_mapping, node, limits, host_list)
    def _do_build_and_run_instance(self, context, instance, image,
            request_spec, filter_properties, admin_password, injected_files,
            requested_networks, security_groups, block_device_mapping,
            node=None, limits=None, host_list=None):

        try:
            LOG.debug('Starting instance...', instance=instance)
            instance.vm_state = vm_states.BUILDING
            instance.task_state = None
            instance.save(expected_task_state=
                    (task_states.SCHEDULING, None))
        except exception.InstanceNotFound:
            msg = 'Instance disappeared before build.'
            LOG.debug(msg, instance=instance)
            return build_results.FAILED
        except exception.UnexpectedTaskStateError as e:
            LOG.debug(e.format_message(), instance=instance)
            return build_results.FAILED

        # b64 decode the files to inject:
        decoded_files = self._decode_files(injected_files)

        if limits is None:
            limits = {}

        if node is None:
            node = self._get_nodename(instance, refresh=True)

        try:
            with timeutils.StopWatch() as timer:
                self._build_and_run_instance(context, instance, image,
                        decoded_files, admin_password, requested_networks,
                        security_groups, block_device_mapping, node, limits,
                        filter_properties, request_spec)
            LOG.info('Took %0.2f seconds to build instance.',
                     timer.elapsed(), instance=instance)
            return build_results.ACTIVE
 def _build_and_run_instance(self, context, instance, image, injected_files,
            admin_password, requested_networks, security_groups,
            block_device_mapping, node, limits, filter_properties,
            request_spec=None):
 	..............................
 	try:
            scheduler_hints = self._get_scheduler_hints(filter_properties,
                                                        request_spec)
            with self.rt.instance_claim(context, instance, node, limits):
                # NOTE(russellb) It's important that this validation be done
                # *after* the resource tracker instance claim, as that is where
                # the host is set on the instance.
                self._validate_instance_group_policy(context, instance,
                                                     scheduler_hints)
                image_meta = objects.ImageMeta.from_dict(image)

                LOG.debug('image_meta:%s, image:%s' % (image_meta, image))
                # verify image checksum
                if CONF.enable_verify_image_md5sum:
                    self.driver.verify_image_md5sum(context, instance, image_meta.id)

                request_group_resource_providers_mapping = \
                    self._get_request_group_mapping(request_spec)

                with self._build_resources(context, instance,
                        requested_networks, security_groups, image_meta,
                        block_device_mapping,
                        request_group_resource_providers_mapping) as resources:
                    instance.vm_state = vm_states.BUILDING
                    instance.task_state = task_states.SPAWNING
                    # NOTE(JoshNang) This also saves the changes to the
                    # instance from _allocate_network_async, as they aren't
                    # saved in that function to prevent races.
                    instance.save(expected_task_state=
                            task_states.BLOCK_DEVICE_MAPPING)
                    block_device_info = resources['block_device_info']
                    network_info = resources['network_info']
                    allocs = resources['allocations']
                    LOG.debug('Start spawning the instance on the hypervisor.',
                              instance=instance)
                    with timeutils.StopWatch() as timer:
                        LOG.debug("_build_and_run_instance self.driver.spawn "
                                  "image_meta:%s, block_device_info:%s"
                                  % (image_meta, block_device_info))
                        self.driver.spawn(context, instance, image_meta,
                                          injected_files, admin_password,
                                          allocs, network_info=network_info,
                                          block_device_info=block_device_info,
                                          destroy_disks_on_failure=True)
                    LOG.info('Took %0.2f seconds to spawn the instance on '
                             'the hypervisor.', timer.elapsed(),
                             instance=instance)
def _build_resources(self, context, instance, requested_networks,
                         security_groups, image_meta, block_device_mapping,
                         resource_provider_mapping):
     try:
            LOG.debug('Start building networks asynchronously for instance.',
                      instance=instance)
            network_info = self._build_networks_for_instance(context, instance,
                    requested_networks, security_groups,
                    resource_provider_mapping)
           ..................
    try:
            # Perform any driver preparation work for the driver.
            self.driver.prepare_for_spawn(instance)

            # Depending on a virt driver, some network configuration is
            # necessary before preparing block devices.
            self.driver.prepare_networks_before_block_device_mapping(
                instance, network_info)

            # Verify that all the BDMs have a device_name set and assign a
            # default to the ones missing it with the help of the driver.
            self._default_block_device_names(instance, image_meta,
                                             block_device_mapping)

            LOG.debug('Start building block device mappings for instance.',
                      instance=instance)
            instance.vm_state = vm_states.BUILDING
            instance.task_state = task_states.BLOCK_DEVICE_MAPPING
            instance.save()

            block_device_info = self._prep_block_device(context, instance,
                    block_device_mapping)   
class DriverVolImageBlockDevice(DriverVolumeBlockDevice):

    _valid_source = 'image'
    _proxy_as_attr_inherited = set(['image_id'])

    def attach(self, context, instance, volume_api,
               virt_driver, wait_func=None):
        if not self.volume_id:
            av_zone = _get_volume_create_az_value(instance)
            vol = volume_api.create(context, self.volume_size,
                                    '', '', image_id=self.image_id,
                                    volume_type=self.volume_type,
                                    availability_zone=av_zone)
            if wait_func:
                self._call_wait_func(context, wait_func, volume_api, vol['id'])

            self.volume_id = vol['id']

            # TODO(mriedem): Create an attachment to reserve the volume and
            # make us go down the new-style attach flow.

        super(DriverVolImageBlockDevice, self).attach(
            context, instance, volume_api, virt_driver)

保存instance状态为BUILD状态,然后启用超时watch调用 self._build_and_run_instance, build虚机时对镜像等相关进行检查 , 再调用 self._build_resources 调用self._build_networks_for_instance异步准备分配网络并将instance task 更改为networking, 然后再将instance task 更改为block_device_mapping状态并调用self._prep_block_device开始调用cinder api创建volume并attach,再上报虚机信息到placement,至此准备基本做完了,将instance task状态变更为spawning ,再调用libvirt.spawn创建虚机了

#nova/libvirt/driver.py
def spawn(self, context, instance, image_meta, injected_files,
              admin_password, allocations, network_info=None,
              block_device_info=None, destroy_disks_on_failure=False):
   .............
   		self._create_image(context, instance, disk_info['mapping'],
                           injection_info=injection_info,
                           block_device_info=block_device_info)
    	xml = self._get_guest_xml(context, instance, network_info,
                                  disk_info, image_meta,
                                  block_device_info=block_device_info,
                                  mdevs=mdevs)
        self._create_domain_and_network(
            context, xml, instance, network_info,
            block_device_info=block_device_info,
            post_xml_callback=gen_confdrive,
            destroy_disks_on_failure=destroy_disks_on_failure)

创建image镜像,准备xml信息,然后调用_create_domain_and_network 创建libvirt guest信息 以及网络vif等信息,进行 plug_vifs 以及根据所选的防火墙driver进行 配置基础的filter (Set up basic filtering (MAC, IP, and ARP spoofing protection) 以及准备虚机的basic 防火墙filter (放行dhcp udp 67/68端口等),再直接调用libvirt 的driver启动虚机。

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

robin5911

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值