跳过bootloader,从start.S开始分析
FUNCTION(_start)
...
//如果不是primary cpu就不需要保存bootinfo
cbnz cpuid, .Lno_save_bootinfo
//primary cpu执行下面的命令
/*step 1保存当前_start代码的物理地址到kernel_entry_paddr*/
adrp tmp, kernel_entry_paddr
adr tmp2, _start
str tmp2, [tmp, #:lo12:kernel_entry_paddr]
adrp tmp2, arch_boot_el
mrs x2, CurrentEL
str x2, [tmp2, #:lo12:arch_boot_el]
接着开始保存bootinfo
.Lno_save_bootinfo:
/* if we entered at a higher EL than 1, drop to EL1 */
bl arm64_elX_to_el1
/* enable caches so atomics and spinlocks work */
mrs tmp, sctlr_el1
orr tmp, tmp, #(1<<12) /* Enable icache */
orr tmp, tmp, #(1<<2) /* Enable dcache/ucache */
msr sctlr_el1, tmp
// This can be any arbitrary (page-aligned) address >= KERNEL_ASPACE_BASE.
//加载 kernel_relocated_base 段地址
//内核重定向的基地址,即内核开始的虚拟地址
adr_global tmp, kernel_relocated_base
//虚拟地址使用kernel_vaddrb保存
ldr kernel_vaddr, [tmp]
// Load the base of the translation tables.
adr_global page_table0, tt_trampoline
adr_global page_table1, arm64_kernel_translation_table
// Send secondary cpus over to a waiting spot for the primary to finish.
cbnz cpuid, .Lmmu_enable_secondary
// The fixup code appears right after the kernel image (at __data_end in
// our view). Note this code overlaps with the kernel's bss! It
// expects x0 to contain the actual runtime address of __code_start.
mov x0, kernel_vaddr
//__data_end 指向 image.S - apply_fixups 方法
bl __data_end
然后跳转到image.S执行apply_fixups方法
然后清空bss段
.Ldo_bss:
adr_global tmp, __bss_start
adr_global tmp2, _end
sub tmp2, tmp2, tmp
cbz tmp2, .Lbss_loop_done
.Lbss_loop:
sub tmp2, tmp2, #16
stp xzr, xzr, [tmp], #16
cbnz tmp2, .Lbss_loop
获取bss段首和断尾,判断差值不为空,则循环进行清空处理
然后设置内核栈
.Lbss_loop_done:
/* set up a functional stack pointer */
//设定内核栈地址,准备调用 C 代码
adr_global tmp, boot_cpu_kstack_end
mov sp, tmp
/* make sure the boot allocator is given a chance to figure out where
* we are loaded in physical memory. */
bl boot_alloc_init
/* save the physical address the kernel is loaded at */
//保存内核开始地址到 kernel_base_phys 全局变量
adr_global x0, __code_start
adr_global x1, kernel_base_phys
str x0, [x1]
/* Save a copy of the physical address of the kernel page table */
adr_global tmp, arm64_kernel_translation_table_phys
str page_table1, [tmp]
/* set up the mmu according to mmu_initial_mappings */
/* clear out the kernel translation table */
mov tmp, #0
下一步:映射物理内存
/* map a large run of physical memory at the base of the kernel's address space
* TODO(fxbug.dev/47856): Only map the arenas. */
mov x0, page_table1
mov x1, KERNEL_ASPACE_BASE
mov x2, 0
mov x3, ARCH_PHYSMAP_SIZE
movlit x4, MMU_PTE_KERNEL_DATA_FLAGS
bl arm64_boot_map
/* map the kernel to a fixed address */
/* note: mapping the kernel here with full rwx, this will get locked down later in vm initialization; */
mov x0, page_table1
mov x1, kernel_vaddr
adr_global x2, __code_start
adr_global x3, _end
sub x3, x3, x2
movlit x4, MMU_PTE_KERNEL_RWX_FLAGS
bl arm64_boot_map
重新设置mmu
/* set up the mmu */
/* Invalidate the entire TLB */
tlbi vmalle1is
dsb sy
isb
/* Initialize Memory Attribute Indirection Register */
movlit tmp, MMU_MAIR_VAL
msr mair_el1, tmp
/* Initialize TCR_EL1 */
/* set cacheable attributes on translation walk */
/* (SMP extensions) non-shareable, inner write-back write-allocate */
/* both aspaces active, current ASID in TTBR1 */
movlit tmp, MMU_TCR_FLAGS_IDENT
msr tcr_el1, tmp
isb
/* Write the ttbrs with phys addr of the translation table */
msr ttbr0_el1, page_table0
/* Or in 0x1 (GLOBAL_ASID) bits. Keep in sync with mmu.h */
orr tmp, page_table1, #(0x1 << 48)
msr ttbr1_el1, tmp
isb
/* Read SCTLR */
mrs tmp, sctlr_el1
/* Turn on the MMU */
orr tmp, tmp, #(1<<0)
/* Write back SCTLR */
msr sctlr_el1, tmp
Tips:SCTLR 系统控制寄存器
然后重新设置栈指针
// set up the boot stack for real
adr_global tmp, boot_cpu_kstack_end
mov sp, tmp
设置栈溢出异常
配置 Stack Guard,其实就是在栈末尾设置一个页中断,如果程序读写到这里,代表栈溢出,触发异常。
防止编译期间没有启用栈保护
//配置 Stack Guard,其实就是在栈末尾设置一个页中断,如果程序读写到这里,代表栈溢出,触发异常
adr_global tmp, boot_cpu_fake_thread_pointer_location
msr tpidr_el1, tmp
// set the per cpu pointer for cpu 0
adr_global x18, arm64_percpu_array
// Choose a good (ideally random) stack-guard value as early as possible.
bl choose_stack_guard
mrs tmp, tpidr_el1
str x0, [tmp, #ZX_TLS_STACK_GUARD_OFFSET]
// Don't leak the value to other code.
mov x0, xzr
设置其他CPU栈,初始化
.Lsecondary_boot:
//配置其他 CPU 内核的栈指针
bl arm64_get_secondary_sp
cbz x0, .Lunsupported_cpu_trap
mov sp, x0
msr tpidr_el1, x1
bl arm64_secondary_entry
.Lunsupported_cpu_trap:
//其他 CPU 内核初始化完毕
wfe
b .Lunsupported_cpu_trap
进入C代码执行内核初始化
mov x0, handoff_paddr
bl lk_main
b .