以i386为例
static void memory_map_init(void)
{
system_memory = g_malloc(sizeof(*system_memory));
memory_region_init(system_memory, NULL, "system", UINT64_MAX);
address_space_init(&address_space_memory, system_memory, "memory");
system_io = g_malloc(sizeof(*system_io));
memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
65536);
address_space_init(&address_space_io, system_io, "I/O");
}
创建纯容器 system_memory , 作为address_space_memory的root MemoryRegion, 也就是这段AddressSpace作为cpu可见的内存地址空间,大小为2^64(注意i386是没有这种寻址能力的,那么做只是方便32位和64位的代码统一)
另外创建io容器,对应cpu可见的io地址空间的AddressSpace ,大小为65535个字节. 新式设备很少使用io空间,所以这个空间还是比较小的, 使用 memory_region_init_io 初始化, 对应读写回调函数为unassigned_io_ops,terminates 为true表示一个叶子节点. 叶子节点有什么特性呢?虽然叶子节点可以作为容器,但是它有对应的内存地址,不会在内存地址空间上留下空洞,这和纯容器是不同的。具体参考qemu 内存模型(1)—文档
MemoryRegion的qom构造函数如下(关于qom参考qemu2的qom系统分析)
static void memory_region_initfn(Object *obj)
{
MemoryRegion *mr = MEMORY_REGION(obj);
ObjectProperty *op;
mr->ops = &unassigned_mem_ops;
mr->enabled = true;
mr->romd_mode = true;
mr->global_locking = true;
mr->destructor = memory_region_destructor_none;
QTAILQ_INIT(&mr->subregions);
QTAILQ_INIT(&mr->coalesced);
op = object_property_add(OBJECT(mr), "container",
"link<" TYPE_MEMORY_REGION ">",
memory_region_get_container,
NULL, /* memory_region_set_container */
NULL, NULL, &error_abort);
op->resolve = memory_region_resolve_container;
object_property_add(OBJECT(mr), "addr", "uint64",
memory_region_get_addr,
NULL, /* memory_region_set_addr */
NULL, NULL, &error_abort);
object_property_add(OBJECT(mr), "priority", "uint32",
memory_region_get_priority,
NULL, /* memory_region_set_priority */
NULL, NULL, &error_abort);
object_property_add(OBJECT(mr), "size", "uint64",
memory_region_get_size,
NULL, /* memory_region_set_size, */
NULL, NULL, &error_abort);
}
system_memory是一个纯容器,所以读写的回调参数为unassigned_mem_ops 表示这段地址空间没有分配。
cpu_address_space_init用于初始化cpu的地址空间
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
{
...
#ifndef CONFIG_USER_ONLY
if (tcg_enabled()) {
cpu->cpu_as_mem = g_new(MemoryRegion, 1);
cpu->cpu_as_root = g_new(MemoryRegion, 1);
/* Outer container... */
memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
memory_region_set_enabled(cpu->cpu_as_root, true);
/* ... with two regions inside: normal system memory with low
* priority, and...
*/
memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
get_system_memory(), 0, ~0ull);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
memory_region_set_enabled(cpu->cpu_as_mem, true);
cs->num_ases = 2;
cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
/* ... SMRAM with higher priority, linked from /machine/smram. */
cpu->machine_done.notify = x86_cpu_machine_done;
qemu_add_machine_init_done_notifier(&cpu->machine_done);
}
#endif
...
}
创建了一个纯容器 cpu_as_root , 一个别名 cpu_as_mem 作为 cpu_as_root的子region, 所以cpu_as_mem作为system_memory的别名,添加到cpu_as_root 的子节点,所以cpu对内存地址空间的该cpu地址空间的操作将转化到system_memory对应的MemoryRegion.
cpu_address_space_init(cs, 0, “cpu-memory”, cs->memory) 用于初始化该cpu的地址空间
这里的cs->memory 其实就是system_memory这个MemoryRegion
void cpu_address_space_init(CPUState *cpu, int asidx,
const char *prefix, MemoryRegion *mr)
{
CPUAddressSpace *newas;
AddressSpace *as = g_new0(AddressSpace, 1);
char *as_name;
assert(mr);
as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
address_space_init(as, mr, as_name);
g_free(as_name);
/* Target code should have set num_ases before calling us */
assert(asidx < cpu->num_ases);
if (asidx == 0) {
/* address space 0 gets the convenience alias */
cpu->as = as;
}
/* KVM cannot currently support multiple address spaces. */
assert(asidx == 0 || !kvm_enabled());
if (!cpu->cpu_ases) {
cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
}
newas = &cpu->cpu_ases[asidx];
newas->cpu = cpu;
newas->as = as;
if (tcg_enabled()) {
newas->tcg_as_listener.commit = tcg_commit;
memory_listener_register(&newas->tcg_as_listener, as);
}
}
这里把system_memory作为该cpu的地址空间的root MemoryRegion. 并且将该AddressSpace 保存到了cpu->cpu_ases[asidx] 中方便索引。
接下来就是真正创建pc的物理内存布局的时候
在pc_init1函数里面 ,执行完 cpu地址空间的初始化后
/* PC hardware initialisation */
static void pc_init1(MachineState *machine,
const char *host_type, const char *pci_type)
{
......
pc_cpus_init(pcms);
if (pcmc->pci_enabled) {
pci_memory = g_new(MemoryRegion, 1);
memory_region_init(pci_memory, NULL, "pci", UINT64_MAX);
rom_memory = pci_memory;
} else {
pci_memory = NULL;
rom_memory = system_memory;
}
pc_guest_info_init(pcms);
if (kvm_enabled() && pcmc->kvmclock_enabled) {
kvmclock_create();
}
if (pcmc->smbios_defaults) {
MachineClass *mc = MACHINE_GET_CLASS(machine);
/* These values are guest ABI, do not change */
smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)",
mc->name, pcmc->smbios_legacy_mode,
pcmc->smbios_uuid_encoded,
SMBIOS_ENTRY_POINT_21);
}
/* allocate ram and load rom/bios */
if (!xen_enabled()) {
pc_memory_init(pcms, system_memory,
rom_memory, &ram_memory);
} else if (machine->kernel_filename != NULL) {
/* For xen HVM direct kernel boot, load linux here */
xen_load_linux(pcms);
}
......
}
如果要模拟的机器支持pci则创建pcbios的 MemoryRegion, 否则 rom放在system_memory中.
我们只关心使用pci的情况, 最后都是使用pc_memory_init来初始化cpu看到内存布局
void pc_memory_init(PCMachineState *pcms,
MemoryRegion *system_memory,
MemoryRegion *rom_memory,
MemoryRegion **ram_memory)
{
int linux_boot, i;
MemoryRegion *ram, *option_rom_mr;
MemoryRegion *ram_below_4g, *ram_above_4g;
FWCfgState *fw_cfg;
MachineState *machine = MACHINE(pcms);
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
assert(machine->ram_size == pcms->below_4g_mem_size +
pcms->above_4g_mem_size);
linux_boot = (machine->kernel_filename != NULL);
/* Allocate RAM. We allocate it as a single memory region and use
* aliases to address portions of it, mostly for backwards compatibility
* with older qemus that used qemu_ram_alloc().
*/
ram = g_malloc(sizeof(*ram));
memory_region_allocate_system_memory(ram, NULL, "pc.ram",
machine->ram_size);
*ram_memory = ram;
ram_below_4g = g_malloc(sizeof(*ram_below_4g));
memory_region_init_alias(ram_below_4g, NULL, "ram-below-4g", ram,
0, pcms->below_4g_mem_size);
memory_region_add_subregion(system_memory, 0, ram_below_4g);
e820_add_entry(0, pcms->below_4g_mem_size, E820_RAM);
if (pcms->above_4g_mem_size > 0) {
ram_above_4g = g_malloc(sizeof(*ram_above_4g));
memory_region_init_alias(ram_above_4g, NULL, "ram-above-4g", ram,
pcms->below_4g_mem_size,
pcms->above_4g_mem_size);
memory_region_add_subregion(system_memory, 0x100000000ULL,
ram_above_4g);
e820_add_entry(0x100000000ULL, pcms->above_4g_mem_size, E820_RAM);
}
if (!pcmc->has_reserved_memory &&
(machine->ram_slots ||
(machine->maxram_size > machine->ram_size))) {
MachineClass *mc = MACHINE_GET_CLASS(machine);
error_report("\"-memory 'slots|maxmem'\" is not supported by: %s",
mc->name);
exit(EXIT_FAILURE);
}
/* initialize hotplug memory address space */
if (pcmc->has_reserved_memory &&
(machine->ram_size < machine->maxram_size)) {
ram_addr_t hotplug_mem_size =
machine->maxram_size - machine->ram_size;
if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) {
error_report("unsupported amount of memory slots: %"PRIu64,
machine->ram_slots);
exit(EXIT_FAILURE);
}
if (QEMU_ALIGN_UP(machine->maxram_size,
TARGET_PAGE_SIZE) != machine->maxram_size) {
error_report("maximum memory size must by aligned to multiple of "
"%d bytes", TARGET_PAGE_SIZE);
exit(EXIT_FAILURE);
}
pcms->hotplug_memory.base =
ROUND_UP(0x100000000ULL + pcms->above_4g_mem_size, 1ULL << 30);
if (pcmc->enforce_aligned_dimm) {
/* size hotplug region assuming 1G page max alignment per slot */
hotplug_mem_size += (1ULL << 30) * machine->ram_slots;
}
if ((pcms->hotplug_memory.base + hotplug_mem_size) <
hotplug_mem_size) {
error_report("unsupported amount of maximum memory: " RAM_ADDR_FMT,
machine->maxram_size);
exit(EXIT_FAILURE);
}
memory_region_init(&pcms->hotplug_memory.mr, OBJECT(pcms),
"hotplug-memory", hotplug_mem_size);
memory_region_add_subregion(system_memory, pcms->hotplug_memory.base,
&pcms->hotplug_memory.mr);
}
/* Initialize PC system firmware */
pc_system_firmware_init(rom_memory, !pcmc->pci_enabled);
option_rom_mr = g_malloc(sizeof(*option_rom_mr));
memory_region_init_ram(option_rom_mr, NULL, "pc.rom", PC_ROM_SIZE,
&error_fatal);
if (pcmc->pci_enabled) {
memory_region_set_readonly(option_rom_mr, true);
}
memory_region_add_subregion_overlap(rom_memory,
PC_ROM_MIN_VGA,
option_rom_mr,
option_rom_mr,
1);
fw_cfg = bochs_bios_init(&address_space_memory, pcms);
rom_set_fw(fw_cfg);
if (pcmc->has_reserved_memory && pcms->hotplug_memory.base) {
uint64_t *val = g_malloc(sizeof(*val));
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
uint64_t res_mem_end = pcms->hotplug_memory.base;
if (!pcmc->broken_reserved_end) {
res_mem_end += memory_region_size(&pcms->hotplug_memory.mr);
}
*val = cpu_to_le64(ROUND_UP(res_mem_end, 0x1ULL << 30));
fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, sizeof(*val));
}
if (linux_boot) {
load_linux(pcms, fw_cfg);
}
for (i = 0; i < nb_option_roms; i++) {
rom_add_option(option_rom[i].name, option_rom[i].bootindex);
}
pcms->fw_cfg = fw_cfg;
/* Init default IOAPIC address space */
pcms->ioapic_as = &address_space_memory;
}
memory_region_allocate_system_memory函数用于初始化内存条,也就是-m 指定的内存大小,名字叫做pc.ram
void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
const char *name,
uint64_t ram_size)
{
uint64_t addr = 0;
int i;
if (nb_numa_nodes == 0 || !have_memdevs) {
allocate_system_memory_nonnuma(mr, owner, name, ram_size);
return;
}
memory_region_init(mr, owner, name, ram_size);
for (i = 0; i < nb_numa_nodes; i++) {
uint64_t size = numa_info[i].node_mem;
HostMemoryBackend *backend = numa_info[i].node_memdev;
if (!backend) {
continue;
}
MemoryRegion *seg = host_memory_backend_get_memory(backend,
&error_fatal);
if (memory_region_is_mapped(seg)) {
char *path = object_get_canonical_path_component(OBJECT(backend));
error_report("memory backend %s is used multiple times. Each "
"-numa option must use a different memdev value.",
path);
exit(1);
}
host_memory_backend_set_mapped(backend, true);
memory_region_add_subregion(mr, addr, seg);
vmstate_register_ram_global(seg);
addr += size;
}
}
对于numa 和非numa类型的cpu采用不同的分配算法,这里分析
allocate_system_memory_nonnuma 函数,这个函数是真正分配hva的函数
static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
const char *name,
uint64_t ram_size)
{
if (mem_path) {
#ifdef __linux__
....
memory_region_init_ram_from_file(mr, owner, name, ram_size, 0, false,
mem_path, &err);
if (err) {
....
memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal);
}
#else
...
#endif
} else {
memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal);
}
vmstate_register_ram_global(mr);
}
这个函数可以从文件恢复hva内存,也可以直接分配hva内存,memory_region_init_ram_from_file函数用于从文件恢复,而memory_region_init_ram_nomigrate用于分配内存,我们分析memory_region_init_ram_nomigrate
void memory_region_init_ram_nomigrate(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
Error **errp)
{
memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
}
void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
Object *owner,
const char *name,
uint64_t size,
bool share,
Error **errp)
{
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
mr->ram_block = qemu_ram_alloc(size, share, mr, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share,
MemoryRegion *mr, Error **errp)
{
return qemu_ram_alloc_internal(size, size, NULL, NULL, false,
share, mr, errp);
}
static
RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
uint64_t length,
void *host),
void *host, bool resizeable, bool share,
MemoryRegion *mr, Error **errp)
{
RAMBlock *new_block;
Error *local_err = NULL;
...
new_block = g_malloc0(sizeof(*new_block));
...
if (host) {
new_block->flags |= RAM_PREALLOC;
}
if (resizeable) {
new_block->flags |= RAM_RESIZEABLE;
}
ram_block_add(new_block, &local_err, share);
...
return new_block;
}
static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared)
{
RAMBlock *block;
...
if (!new_block->host) {
...
} else {
new_block->host = phys_mem_alloc(new_block->max_length,
&new_block->mr->align, shared);
....
}
...
}
}
....
}
这里经过层层调用主要的目的就是创建RAMBlock和分配hva内存,hva内存的分配在phys_mem_alloc 函数中,对于x86的linux平台宿主机其实就是mmap了一段虚拟内存。
内存条创建完了,写下来就是pc的rom空间初始化。
pc_system_firmware_init 函数正是完成这部分工作。
展开看下系统固件内存的初始化
void pc_system_firmware_init(MemoryRegion *rom_memory, bool isapc_ram_fw)
{
DriveInfo *pflash_drv;
pflash_drv = drive_get(IF_PFLASH, 0, 0);
if (isapc_ram_fw || pflash_drv == NULL) {
/* When a pflash drive is not found, use rom-mode */
old_pc_system_rom_init(rom_memory, isapc_ram_fw);
return;
}
if (kvm_enabled() && !kvm_readonly_mem_enabled()) {
/* Older KVM cannot execute from device memory. So, flash memory
* cannot be used unless the readonly memory kvm capability is present. */
fprintf(stderr, "qemu: pflash with kvm requires KVM readonly memory support\n");
exit(1);
}
pc_system_flash_init(rom_memory);
}
static void old_pc_system_rom_init(MemoryRegion *rom_memory, bool isapc_ram_fw)
{
char *filename;
MemoryRegion *bios, *isa_bios;
int bios_size, isa_bios_size;
int ret;
/* BIOS load */
if (bios_name == NULL) {
bios_name = BIOS_FILENAME;
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
if (filename) {
bios_size = get_image_size(filename);
} else {
bios_size = -1;
}
if (bios_size <= 0 ||
(bios_size % 65536) != 0) {
goto bios_error;
}
bios = g_malloc(sizeof(*bios));
memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal);
if (!isapc_ram_fw) {
memory_region_set_readonly(bios, true);
}
ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1);
if (ret != 0) {
bios_error:
fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name);
exit(1);
}
g_free(filename);
/* map the last 128KB of the BIOS in ISA space */
isa_bios_size = bios_size;
if (isa_bios_size > (128 * 1024)) {
isa_bios_size = 128 * 1024;
}
isa_bios = g_malloc(sizeof(*isa_bios));
memory_region_init_alias(isa_bios, NULL, "isa-bios", bios,
bios_size - isa_bios_size, isa_bios_size);
memory_region_add_subregion_overlap(rom_memory,
0x100000 - isa_bios_size,
isa_bios,
1);
if (!isapc_ram_fw) {
memory_region_set_readonly(isa_bios, true);
}
/* map all the bios at the top of memory */
memory_region_add_subregion(rom_memory,
(uint32_t)(-bios_size),
bios);
}
这里创建bios的region, 并创建了一个别名,放在rom的(1m-128k)–1m的地方, 另外bios作为一个整体放在了 (4g-bios_size) --4g的地方
/* setup pci memory address space mapping into system address space */
void pc_pci_as_mapping_init(Object *owner, MemoryRegion *system_memory,
MemoryRegion *pci_address_space)
{
/* Set to lower priority than RAM */
memory_region_add_subregion_overlap(system_memory, 0x0,
pci_address_space, -1);
}
pc_pci_as_mapping_init 的时候将pci_rom放入的 system_memory ,作为一个优先级比较低的子容器
void init_pam(DeviceState *dev, MemoryRegion *ram_memory,
MemoryRegion *system_memory, MemoryRegion *pci_address_space,
PAMMemoryRegion *mem, uint32_t start, uint32_t size)
{
int i;
/* RAM */
memory_region_init_alias(&mem->alias[3], OBJECT(dev), "pam-ram", ram_memory,
start, size);
/* ROM (XXX: not quite correct) */
memory_region_init_alias(&mem->alias[1], OBJECT(dev), "pam-rom", ram_memory,
start, size);
memory_region_set_readonly(&mem->alias[1], true);
/* XXX: should distinguish read/write cases */
memory_region_init_alias(&mem->alias[0], OBJECT(dev), "pam-pci", pci_address_space,
start, size);
memory_region_init_alias(&mem->alias[2], OBJECT(dev), "pam-pci", ram_memory,
start, size);
for (i = 0; i < 4; ++i) {
memory_region_set_enabled(&mem->alias[i], false);
memory_region_add_subregion_overlap(system_memory, start,
&mem->alias[i], 1);
}
mem->current = 0;
}
init ram的时候作为子容器加入到system_region
最终内存布局如下
memory
0000000000000000-ffffffffffffffff (prio 0, RW): system
0000000000000000-000000003fffffff (prio 0, RW): alias ram-below-4g @pc.ram 0000000000000000-000000003fffffff
0000000000000000-ffffffffffffffff (prio -1, RW): pci
00000000000a0000-00000000000bffff (prio 1, RW): cirrus-lowmem-container
00000000000a0000-00000000000bffff (prio 0, RW): cirrus-low-memory
00000000000c0000-00000000000dffff (prio 1, RW): pc.rom
00000000000e0000-00000000000fffff (prio 1, R-): alias isa-bios @pc.bios 0000000000020000-000000000003ffff
00000000fc000000-00000000fdffffff (prio 1, RW): cirrus-pci-bar0
00000000fc000000-00000000fc7fffff (prio 1, RW): vga.vram
00000000fc000000-00000000fc7fffff (prio 0, RW): cirrus-linear-io
00000000fd000000-00000000fd3fffff (prio 0, RW): cirrus-bitblt-mmio
00000000febc0000-00000000febdffff (prio 1, RW): e1000-mmio
00000000febf0000-00000000febf0fff (prio 1, RW): cirrus-mmio
00000000fffc0000-00000000ffffffff (prio 0, R-): pc.bios
00000000000a0000-00000000000bffff (prio 1, RW): alias smram-region @pci 00000000000a0000-00000000000bffff
00000000000c0000-00000000000c3fff (prio 1, R-): alias pam-rom @pc.ram 00000000000c0000-00000000000c3fff
00000000000c4000-00000000000c7fff (prio 1, R-): alias pam-rom @pc.ram 00000000000c4000-00000000000c7fff
00000000000c8000-00000000000cbfff (prio 1, R-): alias pam-rom @pc.ram 00000000000c8000-00000000000cbfff
00000000000ca000-00000000000ccfff (prio 1000, RW): alias kvmvapic-rom @pc.ram 00000000000ca000-00000000000ccfff
00000000000cc000-00000000000cffff (prio 1, R-): alias pam-rom @pc.ram 00000000000cc000-00000000000cffff
00000000000d0000-00000000000d3fff (prio 1, R-): alias pam-rom @pc.ram 00000000000d0000-00000000000d3fff
00000000000d4000-00000000000d7fff (prio 1, R-): alias pam-rom @pc.ram 00000000000d4000-00000000000d7fff
00000000000d8000-00000000000dbfff (prio 1, R-): alias pam-rom @pc.ram 00000000000d8000-00000000000dbfff
00000000000dc000-00000000000dffff (prio 1, R-): alias pam-rom @pc.ram 00000000000dc000-00000000000dffff
00000000000e0000-00000000000e3fff (prio 1, R-): alias pam-rom @pc.ram 00000000000e0000-00000000000e3fff
00000000000e4000-00000000000e7fff (prio 1, R-): alias pam-rom @pc.ram 00000000000e4000-00000000000e7fff
00000000000e8000-00000000000ebfff (prio 1, R-): alias pam-rom @pc.ram 00000000000e8000-00000000000ebfff
00000000000ec000-00000000000effff (prio 1, RW): alias pam-ram @pc.ram 00000000000ec000-00000000000effff
00000000000f0000-00000000000fffff (prio 1, R-): alias pam-rom @pc.ram 00000000000f0000-00000000000fffff
00000000fec00000-00000000fec00fff (prio 0, RW): ioapic
00000000fed00000-00000000fed003ff (prio 0, RW): hpet
00000000fee00000-00000000feefffff (prio 4096, RW): icc-apic-container
00000000fee00000-00000000feefffff (prio 0, RW): apic-msi
I/O
0000000000000000-000000000000ffff (prio 0, RW): io
0000000000000000-0000000000000007 (prio 0, RW): dma-chan
0000000000000008-000000000000000f (prio 0, RW): dma-cont
0000000000000020-0000000000000021 (prio 0, RW): pic
0000000000000040-0000000000000043 (prio 0, RW): pit
0000000000000060-0000000000000060 (prio 0, RW): i8042-data
0000000000000061-0000000000000061 (prio 0, RW): elcr
0000000000000064-0000000000000064 (prio 0, RW): i8042-cmd
0000000000000070-0000000000000071 (prio 0, RW): rtc
000000000000007e-000000000000007f (prio 0, RW): kvmvapic
0000000000000080-0000000000000080 (prio 0, RW): ioport80
0000000000000081-0000000000000083 (prio 0, RW): dma-page
0000000000000087-0000000000000087 (prio 0, RW): dma-page
0000000000000089-000000000000008b (prio 0, RW): dma-page
000000000000008f-000000000000008f (prio 0, RW): dma-page
0000000000000092-0000000000000092 (prio 0, RW): port92
00000000000000a0-00000000000000a1 (prio 0, RW): pic
00000000000000b2-00000000000000b3 (prio 0, RW): apm-io
00000000000000c0-00000000000000cf (prio 0, RW): dma-chan
00000000000000d0-00000000000000df (prio 0, RW): dma-cont
00000000000000f0-00000000000000f0 (prio 0, RW): ioportF0
0000000000000170-0000000000000177 (prio 0, RW): ide
00000000000001f0-00000000000001f7 (prio 0, RW): ide
0000000000000376-0000000000000376 (prio 0, RW): ide
0000000000000378-000000000000037f (prio 0, RW): parallel
00000000000003b0-00000000000003df (prio 0, RW): cirrus-io
00000000000003f1-00000000000003f5 (prio 0, RW): fdc
00000000000003f6-00000000000003f6 (prio 0, RW): ide
00000000000003f7-00000000000003f7 (prio 0, RW): fdc
00000000000003f8-00000000000003ff (prio 0, RW): serial
00000000000004d0-00000000000004d0 (prio 0, RW): elcr
00000000000004d1-00000000000004d1 (prio 0, RW): elcr
0000000000000510-0000000000000511 (prio 0, RW): fwcfg
0000000000000cf8-0000000000000cfb (prio 0, RW): pci-conf-idx
0000000000000cf9-0000000000000cf9 (prio 1, RW): piix3-reset-control
0000000000000cfc-0000000000000cff (prio 0, RW): pci-conf-data
0000000000005658-0000000000005658 (prio 0, RW): vmport
000000000000ae00-000000000000ae13 (prio 0, RW): acpi-pci-hotplug
000000000000af00-000000000000af1f (prio 0, RW): acpi-cpu-hotplug
000000000000afe0-000000000000afe3 (prio 0, RW): acpi-gpe0
000000000000b000-000000000000b03f (prio 0, RW): piix4-pm
000000000000b000-000000000000b003 (prio 0, RW): acpi-evt
000000000000b004-000000000000b005 (prio 0, RW): acpi-cnt
000000000000b008-000000000000b00b (prio 0, RW): acpi-tmr
000000000000b100-000000000000b13f (prio 0, RW): pm-smbus
000000000000c000-000000000000c03f (prio 1, RW): e1000-io
000000000000c040-000000000000c04f (prio 1, RW): piix-bmdma-container
000000000000c040-000000000000c043 (prio 0, RW): piix-bmdma
000000000000c044-000000000000c047 (prio 0, RW): bmdma
000000000000c048-000000000000c04b (prio 0, RW): piix-bmdma
000000000000c04c-000000000000c04f (prio 0, RW): bmdma
怎么看最终的内存布局呢,其实qemu提供了这样的方法
启动qemu的时候加上-monitor stdio 参数, 就可以在标准输出中执行监控命令了
info mtree
mtree命令就是用于打印内存布局的