# 通过RPC发送到指定的nova-compute节点后,进入到nova\compute\manager.py中:
@object_compat
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint,
instance, bdm=None):
"""Attach a volume to an instance."""
if not bdm:
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
@utils.synchronized(instance.uuid)
def do_attach_volume(context, instance, driver_bdm):
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy(context)
do_attach_volume(context, instance, driver_bdm)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_check_attach=False, do_driver_attach=True)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to attach %(volume_id)s "
"at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
self.volume_api.unreserve_volume(context, bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
# pdb调试结果
> /usr/lib/python2.7/site-packages/nova/compute/manager.py(4855)attach_volume()
-> if not bdm:
(Pdb) l
4850 @wrap_instance_fault
4851 def attach_volume(self, context, volume_id, mountpoint,
4852 instance, bdm=None):
4853 """Attach a volume to an instance."""
4854 import pdb; pdb.set_trace()
4855 if not bdm:
4856 bdm = objects.BlockDeviceMapping.get_by_volume_id(
4857 context, volume_id)
4858 driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
4859
4860 @utils.synchronized(instance.uuid)
(Pdb) pp volume_id
u'c6129426-c4ae-45fe-9052-b9248eacad73'
(Pdb) pp mountpoint
u''
(Pdb) pp instance
Instance(access_ip_v4=None,access_ip_v6=None,architecture=None,auto_disk_config=True,availability_zone='nova',cell_name=None,cleaned=False,config_drive='',cpu_pinning=<?>,created_at=2015-07-15T08:49:29Z,default_ephemeral_device=None,default_swap_device=None,deleted=False,deleted_at=None,disable_terminate=False,display_description='instance01',display_name='instance01',ephemeral_gb=0,ephemeral_key_uuid=None,fault=<?>,host='compute1',hostname='instance01',id=33,image_ref='',info_cache=InstanceInfoCache,instance_type_id=2,kernel_id='',key_data=None,key_name=None,launch_index=0,launched_at=2015-07-15T08:49:36Z,launched_on='compute1',locked=False,locked_by=None,memory_mb=512,metadata={},node='compute1',numa_topology=<?>,os_type=None,pci_devices=<?>,power_state=1,progress=0,project_id='0ea8ceb2f23a4814976b878fe61b1b55',ramdisk_id='',reservation_id='r-jajb10ay',root_device_name='/dev/vda',root_gb=1,scheduled_at=None,security_groups=SecurityGroupList,shutdown_terminate=False,system_metadata={image_base_image_ref='',image_min_disk='1',instance_type_ephemeral_gb='0',instance_type_flavorid='1',instance_type_id='2',instance_type_memory_mb='512',instance_type_name='m1.tiny',instance_type_root_gb='1',instance_type_rxtx_factor='1.0',instance_type_swap='0',instance_type_vcpu_weight=None,instance_type_vcpus='1'},task_state=None,terminated_at=None,updated_at=2015-07-16T03:17:59Z,user_data=None,user_id='8a3073f7423d46bea51a3ab1398da61c',uuid=4cfde5a8-58eb-42e7-ace6-1e2de41b5c3a,vcpus=1,vm_mode=None,vm_state='active')
(Pdb) pp bdm
BlockDeviceMapping(boot_index=None,connection_info=None,created_at=2015-07-16T03:24:46Z,delete_on_termination=False,deleted=False,deleted_at=None,destination_type='volume',device_name='/dev/vdb',device_type=None,disk_bus=None,guest_format=None,id=41,image_id=None,instance=<?>,instance_uuid=4cfde5a8-58eb-42e7-ace6-1e2de41b5c3a,no_device=False,snapshot_id=None,source_type='volume',updated_at=None,volume_id='c6129426-c4ae-45fe-9052-b9248eacad73',volume_size=None)
4870 def _attach_volume(self, context, instance, bdm):
4871 context = context.elevated()
4872 LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
4873 {'volume_id': bdm.volume_id,
4874 'mountpoint': bdm['mount_device']},
4875 context=context, instance=instance)
(Pdb) l
4876 try:
4877 bdm.attach(context, instance, self.volume_api, self.driver,
4878 do_check_attach=False, do_driver_attach=True)
4879 except Exception: # pylint: disable=W0702
4880 with excutils.save_and_reraise_exception():
4881 LOG.exception(_LE("Failed to attach %(volume_id)s "
4882 "at %(mountpoint)s"),
4883 {'volume_id': bdm.volume_id,
4884 'mountpoint': bdm['mount_device']},
4885 context=context, instance=instance)
4886 self.volume_api.unreserve_volume(context, bdm.volume_id)
(Pdb) l
4887
4888 info = {'volume_id': bdm.volume_id}
4889 self._notify_about_instance_usage(
4890 context, instance, "volume.attach", extra_usage_info=info)
4891
4892 def _detach_volume(self, context, instance, bdm):
4893 """Do the actual driver detach using block device mapping."""
4894 mp = bdm.device_name
4895 volume_id = bdm.volume_id
4896
4897 LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
(Pdb) pp bdm
{'boot_index': None,
'connection_info': None,
'delete_on_termination': False,
'device_type': None,
'disk_bus': None,
'guest_format': None,
'mount_device': u'/dev/vdb'}
(Pdb) pp type(bdm)
<class 'nova.virt.block_device.DriverVolumeBlockDevice'>
# 进而调用\nova\virt\block_device.py class DriverVolumeBlockDevice(DriverBlockDevice):
@update_db
def attach(self, context, instance, volume_api, virt_driver,
do_check_attach=True, do_driver_attach=False):
volume = volume_api.get(context, self.volume_id)
if do_check_attach:
volume_api.check_attach(context, volume, instance=instance)
volume_id = volume['id']
context = context.elevated()
connector = virt_driver.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(context,
volume_id,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
self._preserve_multipath_id(connection_info)
# If do_driver_attach is False, we will attach a volume to an instance
# at boot time. So actual attach is done by instance creation code.
if do_driver_attach:
encryption = encryptors.get_encryption_metadata(
context, volume_api, volume_id, connection_info)
try:
virt_driver.attach_volume(
context, connection_info, instance,
self['mount_device'], disk_bus=self['disk_bus'],
device_type=self['device_type'], encryption=encryption)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_("Driver failed to attach volume "
"%(volume_id)s at %(mountpoint)s"),
{'volume_id': volume_id,
'mountpoint': self['mount_device']},
context=context, instance=instance)
volume_api.terminate_connection(context, volume_id,
connector)
self['connection_info'] = connection_info
mode = 'rw'
if 'data' in connection_info:
mode = connection_info['data'].get('access_mode', 'rw')
if volume['attach_status'] == "detached":
volume_api.attach(context, volume_id, instance['uuid'],
self['mount_device'], mode=mode)
# pdb调试结果
331 @update_db
332 def attach(self, context, instance, volume_api, virt_driver,
333 do_check_attach=True, do_driver_attach=False):
334 volume = volume_api.get(context, self.volume_id)
335 if do_check_attach:
336 volume_api.check_attach(context, volume, instance=instance)
337
338 volume_id = volume['id']
339 context = context.elevated()
(Pdb) pp volume_api
<nova.volume.cinder.API object at 0x444c990>
(Pdb) pp virt_driver
<nova.virt.libvirt.driver.LibvirtDriver object at 0x449d8d0>
(Pdb) pp do_check_attach
False
(Pdb) pp do_driver_attach
True
# pdb打印出connector和connection_info的值:
> /usr/lib/python2.7/site-packages/nova/virt/block_device.py(345)attach()
-> if 'serial' not in connection_info:
(Pdb) l
340
341 connector = virt_driver.get_volume_connector(instance)
342 connection_info = volume_api.initialize_connection(context,
343 volume_id,
344 connector)
345 -> if 'serial' not in connection_info:
346 connection_info['serial'] = self.volume_id
347 self._preserve_multipath_id(connection_info)
348
349 # If do_driver_attach is False, we will attach a volume to an instance
350 # at boot time. So actual attach is done by instance creation code.
(Pdb) pp connector
{'host': 'compute1',
'initiator': 'iqn.1994-05.com.redhat:ac5f49ab23a1',
'ip': '192.168.56.106'}
(Pdb) pp connection_info
{u'data': {u'access_mode': u'rw',
u'auth_enabled': True,
u'auth_username': u'cinder',
u'hosts': [u'192.168.56.102'],
u'name': u'volumes/volume-c6129426-c4ae-45fe-9052-b9248eacad73',
u'ports': [u'6789'],
u'qos_specs': None,
u'secret_type': u'ceph',
u'secret_uuid': u'691a9b3f-b839-4233-b050-7d061fecea5a'},
u'driver_volume_type': u'rbd'}
# 可以看出使用的是ceph卷,进而会去调用如下方法:
/usr/lib/python2.7/sitepackages/nova/virt/libvirt/driver.py(1540)attach_volume()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None,
update_device=False):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
if not self._has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
raise exception.Invalid(msg)
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, bdm)
conf = self._connect_volume(connection_info, disk_info)
self._set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if not update_device and \
state in (power_state.RUNNING, power_state.PAUSED):
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
# cache device_path in connection_info -- required by encryptors
if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
if update_device:
virt_dom.updateDeviceFlags(conf.to_xml(), flags)
else:
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
LOG.exception(_('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self._disconnect_volume(connection_info, disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
# 至此,nova中挂载磁盘操作结束,接着会去调用cinder中更新卷的元数据操作,即更新cinder数据库中volume表的挂载信息。