在filter_scheduler.py 的_schedule 中选出需要的host后,会调用chosen_host.obj.consume_from_request(spec_obj) 来更新resource资源.
consume_from_request 是在host_manager.py中实现
def consume_from_request(self, spec_obj):
"""Incrementally update host state from a RequestSpec object."""
@utils.synchronized(self._lock_name)
@set_update_time_on_success
def _locked(self, spec_obj):
# Scheduler API is inherently multi-threaded as every incoming RPC
# message will be dispatched in it's own green thread. So the
# shared host state should be consumed in a consistent way to make
# sure its data is valid under concurrent write operations.
self._locked_consume_from_request(spec_obj)
return _locked(self, spec_obj)
consume_from_request 又调用通过_locked调用_locked_consume_from_request
def _locked_consume_from_request(self, spec_obj):
//计算使用的disk和ram以及cpu
disk_mb = (spec_obj.root_gb +
spec_obj.ephemeral_gb) * 1024
ram_mb = spec_obj.memory_mb
vcpus = spec_obj.vcpus
从HostState 持有的资源中减去ram/disk/vcpu
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
//HostState 记录的总的instance的个数增加1
# Track number of instances on host
self.num_instances += 1
pci_requests = spec_obj.pci_requests
if pci_requests and self.pci_stats:
pci_requests = pci_requests.requests
else:
pci_requests = None
# Calculate the numa usage
//计算numa的使用
host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
self)
instance_numa_topology = spec_obj.numa_topology
spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
host_numa_topology, instance_numa_topology,
limits=self.limits.get('numa_topology'),
pci_requests=pci_requests, pci_stats=self.pci_stats)
if pci_requests:
instance_cells = None
if spec_obj.numa_topology:
instance_cells = spec_obj.numa_topology.cells
self.pci_stats.apply_requests(pci_requests, instance_cells)
# NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
# NUMA helpers because now we're 100% sure that spec_obj.numa_topology
# is an InstanceNUMATopology object. Unfortunately, since
# HostState.host_numa_topology is still limbo between an NUMATopology
# object (when updated by consume_from_request), a ComputeNode object
# (when updated by update_from_compute_node), we need to keep the call
# to get_host_numa_usage_from_instance until it's fixed (and use a
# temporary orphaned Instance object as a proxy)
instance = objects.Instance(numa_topology=spec_obj.numa_topology)
self.numa_topology = hardware.get_host_numa_usage_from_instance(
self, instance)
# NOTE(sbauza): By considering all cases when the scheduler is called
# and when consume_from_request() is run, we can safely say that there
# is always an IO operation because we want to move the instance
self.num_io_ops += 1
consume_from_request 是在host_manager.py中实现
def consume_from_request(self, spec_obj):
"""Incrementally update host state from a RequestSpec object."""
@utils.synchronized(self._lock_name)
@set_update_time_on_success
def _locked(self, spec_obj):
# Scheduler API is inherently multi-threaded as every incoming RPC
# message will be dispatched in it's own green thread. So the
# shared host state should be consumed in a consistent way to make
# sure its data is valid under concurrent write operations.
self._locked_consume_from_request(spec_obj)
return _locked(self, spec_obj)
consume_from_request 又调用通过_locked调用_locked_consume_from_request
def _locked_consume_from_request(self, spec_obj):
//计算使用的disk和ram以及cpu
disk_mb = (spec_obj.root_gb +
spec_obj.ephemeral_gb) * 1024
ram_mb = spec_obj.memory_mb
vcpus = spec_obj.vcpus
从HostState 持有的资源中减去ram/disk/vcpu
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
//HostState 记录的总的instance的个数增加1
# Track number of instances on host
self.num_instances += 1
pci_requests = spec_obj.pci_requests
if pci_requests and self.pci_stats:
pci_requests = pci_requests.requests
else:
pci_requests = None
# Calculate the numa usage
//计算numa的使用
host_numa_topology, _fmt = hardware.host_topology_and_format_from_host(
self)
instance_numa_topology = spec_obj.numa_topology
spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
host_numa_topology, instance_numa_topology,
limits=self.limits.get('numa_topology'),
pci_requests=pci_requests, pci_stats=self.pci_stats)
if pci_requests:
instance_cells = None
if spec_obj.numa_topology:
instance_cells = spec_obj.numa_topology.cells
self.pci_stats.apply_requests(pci_requests, instance_cells)
# NOTE(sbauza): Yeah, that's crap. We should get rid of all of those
# NUMA helpers because now we're 100% sure that spec_obj.numa_topology
# is an InstanceNUMATopology object. Unfortunately, since
# HostState.host_numa_topology is still limbo between an NUMATopology
# object (when updated by consume_from_request), a ComputeNode object
# (when updated by update_from_compute_node), we need to keep the call
# to get_host_numa_usage_from_instance until it's fixed (and use a
# temporary orphaned Instance object as a proxy)
instance = objects.Instance(numa_topology=spec_obj.numa_topology)
self.numa_topology = hardware.get_host_numa_usage_from_instance(
self, instance)
# NOTE(sbauza): By considering all cases when the scheduler is called
# and when consume_from_request() is run, we can safely say that there
# is always an IO operation because we want to move the instance
self.num_io_ops += 1