nova 注入密钥的两种方式

nova 注入密钥的两种方式:
1、metadata
2、inject file(config dirver)
如下所示:
其中
–meta即虚拟机metadata,其中可以通过metadata的admin_password修改root密码
–file 是注入脚本文件,
–user-data一般也是注入一些脚本内容。
通过设置metadata之后,metadata相关信息会写入数据库,当虚拟机启动的时候,cloud-init会通过curl http://169.254.169.254/latest/user_data/的方式去获取上述设定的一些信息;另一种则是通过将上述信息写入到虚拟机的一块额外的盘中,cloud-init会去调用这个脚本实现虚拟机密码修改(config driver)。
nova/api/openstack/compute/servers.py:532
def create(self, req, body):
“”“Creates a new server for a given user.”“”

    context = req.environ['nova.context']
    server_dict = body['server']
    password = self._get_server_admin_password(server_dict)
    name = common.normalize_name(server_dict['name'])

    if api_version_request.is_supported(req, min_version='2.19'):
        if 'description' in server_dict:
            # This is allowed to be None
            description = server_dict['description']
        else:
            # No default description
            description = None
    else:
        description = name
        ....
        ....
    try:
        inst_type = flavors.get_flavor_by_flavor_id(
                flavor_id, ctxt=context, read_deleted="no")

        (instances, resv_id) = self.compute_api.create(context,
                        inst_type,
                        image_uuid,
                        display_name=name,
                        display_description=description,
                        availability_zone=availability_zone,
                        forced_host=host, forced_node=node,
                        metadata=server_dict.get('metadata', {}),
                        admin_password=password,
                        requested_networks=requested_networks,
                        check_server_group_quota=True,
                        **create_kwargs)

其中相应的user-data和inject-file都在**create_kwargs参数中
nova/compute/api.py:1134

def _create_instance(self, context, instance_type,
           image_href, kernel_id, ramdisk_id,
           min_count, max_count,
           display_name, display_description,
           key_name, key_data, security_groups,
           availability_zone, user_data, metadata, injected_files,
           admin_password, access_ip_v4, access_ip_v6,
           requested_networks, config_drive,
           block_device_mapping, auto_disk_config, filter_properties,
           reservation_id=None, legacy_bdm=True, shutdown_terminate=False,
           check_server_group_quota=False):
    """Verify all the input parameters regardless of the provisioning
    strategy being performed and schedule the instance(s) for
    creation.
    """
    . . .
    base_options, max_net_count, key_pair, security_groups = \
            self._validate_and_build_base_options(
                context, instance_type, boot_meta, image_href, image_id,
                kernel_id, ramdisk_id, display_name, display_description,
                key_name, key_data, security_groups, availability_zone,
                user_data, metadata, access_ip_v4, access_ip_v6,
                requested_networks, config_drive, auto_disk_config,
经过封装之后参数在变量base_options中。
. . .
    instances_to_build = self._provision_instances(context, instance_type,
            min_count, max_count, base_options, boot_meta, security_groups,
            block_device_mapping, shutdown_terminate,
            instance_group, check_server_group_quota, filter_properties,
            key_pair)

    instances = []
    request_specs = []
    build_requests = []
    for rs, build_request, im in instances_to_build:
        build_requests.append(build_request)
        instance = build_request.get_new_instance(context)
        instances.append(instance)
        request_specs.append(rs)

nova/compute/manager.py:1883

def _build_and_run_instance(self, context, instance, image, injected_files,
        admin_password, requested_networks, security_groups,
        block_device_mapping, node, limits, filter_properties):
    image_name = image.get('name')
    self._notify_about_instance_usage(context, instance, 'create.start',
            extra_usage_info={'image_name': image_name})
    compute_utils.notify_about_instance_action(
        context, instance, self.host,
        action=fields.NotificationAction.CREATE,
        phase=fields.NotificationPhase.START)
        . . .
     try:
        rt = self._get_resource_tracker()
        with rt.instance_claim(context, instance, node, limits):
            # NOTE(russellb) It's important that this validation be done
            # *after* the resource tracker instance claim, as that is where
            # the host is set on the instance.
            self._validate_instance_group_policy(context, instance,
                    filter_properties)
            image_meta = objects.ImageMeta.from_dict(image)
            with self._build_resources(context, instance,
                    requested_networks, security_groups, image_meta,
                    block_device_mapping) as resources:
                instance.vm_state = vm_states.BUILDING
                instance.task_state = task_states.SPAWNING
                # NOTE(JoshNang) This also saves the changes to the
                # instance from _allocate_network_async, as they aren't
                # saved in that function to prevent races.
                instance.save(expected_task_state=
                        task_states.BLOCK_DEVICE_MAPPING)
                block_device_info = resources['block_device_info']
                network_info = resources['network_info']
                LOG.debug('Start spawning the instance on the hypervisor.',
                          instance=instance)
                with timeutils.StopWatch() as timer:
                    self.driver.spawn(context, instance, image_meta,
                                      injected_files, admin_password,
                                      network_info=network_info,
                                      block_device_info=block_device_info)
                LOG.info(_LI('Took %0.2f seconds to spawn the instance on '
                             'the hypervisor.'), timer.elapsed(),
                         instance=instance)

接下来进入nova/virt/libvirt/driver.py:2657

def spawn(self, context, instance, image_meta, injected_files,
          admin_password, network_info=None, block_device_info=None):
    disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
                                        instance,
                                        image_meta,
                                        block_device_info)
    injection_info = InjectionInfo(network_info=network_info,
                                   files=injected_files,
                                   admin_pass=admin_password)
    gen_confdrive = functools.partial(self._create_configdrive,
                                      context, instance,
                                      injection_info)
    #创建config driver 即我们平常所见的disk.config,并将相应的注入信息写入盘中。
    self._create_image(context, instance, disk_info['mapping'],
                       injection_info=injection_info,
                       block_device_info=block_device_info)
    #其中注入文件的方式使用的是guestfs.GuestFS类,去连接一个rbd卷,进行修改rbd卷的操作,比如注入密钥其函数使用的是:
def _inject_key_into_fs(key, fs):
"""Add the given public ssh key to root's authorized_keys.

key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""

LOG.debug("Inject key fs=%(fs)s key=%(key)s", {'fs': fs, 'key': key})
sshdir = os.path.join('root', '.ssh')
fs.make_path(sshdir)
fs.set_ownership(sshdir, "root", "root")
fs.set_permissions(sshdir, 0o700)

keyfile = os.path.join(sshdir, 'authorized_keys')

key_data = ''.join([
    '\n',
    '# The following ssh key was injected by Nova',
    '\n',
    key.strip(),
    '\n',
])

_inject_file_into_fs(fs, keyfile, key_data, append=True)
fs.set_permissions(keyfile, 0o600)

_setup_selinux_for_keys(fs, sshdir)
#其中fs就是一个guestfs.GuestFS对象,也就是可以对rbd卷操作的一个对象,可以通过这个对象修改系统里面的目录下的一些文件!

    # Required by Quobyte CI
    self._ensure_console_log_for_instance(instance)

    xml = self._get_guest_xml(context, instance, network_info,
                              disk_info, image_meta,
                              block_device_info=block_device_info)
    self._create_domain_and_network(
        context, xml, instance, network_info, disk_info,
        block_device_info=block_device_info,
        post_xml_callback=gen_confdrive,
        destroy_disks_on_failure=True)
    LOG.debug("Instance is running", instance=instance)
    其中post_xml_callback为创建libvirt domain的时候进行config driver的创建
    def _wait_for_boot():
        """Called at an interval until the VM is running."""
        state = self.get_info(instance).state

        if state == power_state.RUNNING:
            LOG.info(_LI("Instance spawned successfully."),
                     instance=instance)
            raise loopingcall.LoopingCallDone()

    timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
    timer.start(interval=0.5).wait()
    之后只需要等待libvitd中虚拟机的运行状态为running之后,修改mysql中的虚拟机状态为active即可。

说明:由于config driver是在创建虚拟机的时候就生成的,相应的metadata也已经注入了,因此无法通过horizon或者nova api去修改metadata来达到修改虚拟机密码
其中:
/var/log/cloud-init.log 可以看到cloud-init目前从哪取数据
使用config driver的时候会在虚拟机中看到一个/dev/vdb,可以通过mount /dev/vdb/ /mnt然后通过查看

ls /mnt/openstack/latest/
-rwxr-xr-x. 1 root root 1885 33 13:35 meta_data.json
-rwxr-xr-x. 1 root root  348 33 13:35 network_data.json
-rwxr-xr-x. 1 root root   60 322 12:47 user_data
-rwxr-xr-x. 1 root root    2 33 13:35 vendor_data.json
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值