/*
* Setup the initial page tables. We only setup the barest
* amount which are required to get the kernel running, which
* generally means mapping in the kernel code.
*
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*
* Returns:
* r0, r3, r5-r7 corrupted
* r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
*/
@ __create_page_tables函数初始化了一个非常简单页表,仅映射了使内核能够正常启动的代码空间,
@ 更加细致的工作将会在后续阶段完善。
__create_page_tables:
pgtbl r4, r8 @ page table address,[r4] = 0x20000000([r8])+0x8000-0x4000
@ 该地址是页表的起始地址,在kernel的入口地址(0x20008000)之前16KB。
@ 第一步、将这16KB空间清零
/*
* Clear the swapper page table
*/
mov r0, r4 @ [r0] = 0x20004000
mov r3, #0 @ [r3] = 0
add r6, r0, #PG_DIR_SIZE @ [r6] = 0x20008000
1: str r3, [r0], #4 @ loop clear from 0x20004000 to 0x20008000
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
teq r0, r6
bne 1b
#ifdef CONFIG_ARM_LPAE
/*
* Build the PGD(Page Global Directory) table (first level) to point to the PMD(Page Middle Directory) table. A PGD
* entry is 64-bit wide.
*/
mov r0, r4
add r3, r4, #0x1000 @ first PMD table address
orr r3, r3, #3 @ PGD block type
mov r6, #4 @ PTRS_PER_PGD
mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
1:
#ifdef CONFIG_CPU_ENDIAN_BE8
str r7, [r0], #4 @ set top PGD entry bits
str r3, [r0], #4 @ set bottom PGD entry bits
#else
str r3, [r0], #4 @ set bottom PGD entry bits
str r7, [r0], #4 @ set top PGD entry bits
#endif
add r3, r3, #0x1000 @ next PMD table
subs r6, r6, #1
bne 1b
add r4, r4, #0x1000 @ point to the PMD tables
#ifdef CONFIG_CPU_ENDIAN_BE8
add r4, r4, #4 @ we only write the bottom word
#endif
#endif
第二步、 __enable_mmu页表的建立
/*__enable_mmu函数使能mmu后,CPU发出的地址是虚拟地址,程序正常运行需要映射得到物理地址,
为了保障正常地配置mmu,这里需要对这段代码1:1的绝对映射,映射范围__turn_mmu_on至__turn_mmu_on_end。
正常使能mmu后,不需要这段特定的映射了,在后续C代码启动阶段时被paging_init()函数删除。
*/
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags,[r7] = 10001110000001110b=0x11c0e
/*
* Create identity mapping to cater for __enable_mmu.
* This identity mapping will be removed by paging_init().
*/
adr r0, __turn_mmu_on_loc
ldmia r0, {r3, r5, r6}
sub r0, r0, r3 @ virt->phys offset
add r5, r5, r0 @ phys __turn_mmu_on
add r6, r6, r0 @ phys __turn_mmu_on_end
mov r5, r5, lsr #SECTION_SHIFT @ >>20 1M
mov r6, r6, lsr #SECTION_SHIFT @ >>20
1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping
@ [r4 + r5 << 2] = [r3],将描述符写到转换表中,4bytes对齐,每个符占4bytes
cmp r5, r6
addlo r5, r5, #1 @ next section
blo 1b
@ 第三步、根据当前内核所划分的虚拟地址空间,建立内核页表项,Linux默认内核在最高1G,因此内核起始虚拟地址一般为0xc0000000
/*
* Map our RAM from the start to the end of the kernel .bss section.
*/
add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) @>>20 + <<2 = >>18,内核空间起始地址的描述符
@ 截取PAGE_OFFSET的高14位与表头地址相加,作为基址送入寄存器r0;
@ 这里之所以截取高14位,是由于section页表,可记录4K个表项,正好取虚拟地址的高12位作为页表的索引,
@ 每个表项占用4字节,则索引号需乘4,为14位。[r0]对应转换表的起始一项
ldr r6, =(_end - 1) @ bss section结束地址-1
orr r3, r8, r7 @ [r7] = mm_mmuflags,[r8] = phys_offset = 0x20000000
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) @ 需要map的bss end address,[r6]对应转换表的最后一项
@ 由此可以算得一级描述符的条目为([r6]-[r0])>>2个
1: str r3, [r0], #1 << PMD_ORDER @ 将一个描述符(4bytes)写到对应转换表项后,转换表项后移4bytes,这是个1:1映射
add r3, r3, #1 << SECTION_SHIFT @ 下1M
cmp r0, r6
bls 1b
#ifdef CONFIG_XIP_KERNEL
/*
* Map the kernel image separately as it is not located in RAM.
*/
@ 这里只是将内核存储空间重新映射
#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
mov r3, pc
mov r3, r3, lsr #SECTION_SHIFT
orr r3, r7, r3, lsl #SECTION_SHIFT
add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! @ [r0] = [r0] + #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER
ldr r6, =(_edata_loc - 1)
add r0, r0, #1 << PMD_ORDER
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: cmp r0, r6
add r3, r3, #1 << SECTION_SHIFT
strls r3, [r0], #1 << PMD_ORDER
bls 1b
#endif
/*
* Then map boot params address in r2 if specified.
* We map 2 sections in case the ATAGs/DTB crosses a section boundary.
*/
@ 第四步、填写boot传来的内核启动参数所在段的映射,一般为内存开始时的1MB段,这个段一般已经映射过了
@ 对boot 参数的映射,boot参数存在r2中,需要map 2个sections以防该参数处于一个段的边界
mov r0, r2, lsr #SECTION_SHIFT @ [r0] = [r2] >> 20
movs r0, r0, lsl #SECTION_SHIFT @ [r0]就是[r2]的高12bit,1M对齐
subne r3, r0, r8 @ [r3] = [r0] - phys_addr
addne r3, r3, #PAGE_OFFSET @ 对应的虚拟地址
addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) @ 转换表项
orrne r6, r7, r0 @ 描述符项
strne r6, [r3], #1 << PMD_ORDER @ 下一个setion的描述符也更新
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
@ 如果哪一步等于0就说明已经map过了
#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
sub r4, r4, #4 @ Fixup page table pointer
@ for 64-bit descriptors
#endif
#ifdef CONFIG_DEBUG_LL
#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
/*
* Map in IO space for serial debugging.
* This allows debug messages to be output
* via a serial console before paging_init.
*/
addruart r7, r3, r0
mov r3, r3, lsr #SECTION_SHIFT
mov r3, r3, lsl #PMD_ORDER
add r0, r4, r3
mov r3, r7, lsr #SECTION_SHIFT
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
orr r3, r7, r3, lsl #SECTION_SHIFT
#ifdef CONFIG_ARM_LPAE
mov r7, #1 << (54 - 32) @ XN
#ifdef CONFIG_CPU_ENDIAN_BE8
str r7, [r0], #4
str r3, [r0], #4
#else
str r3, [r0], #4
str r7, [r0], #4
#endif
#else
orr r3, r3, #PMD_SECT_XN
str r3, [r0], #4
#endif
#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
/* we don't need any serial debugging mappings */
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
#endif
#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
/*
* If we're using the NetWinder or CATS, we also need to map
* in the 16550-type serial port for the debug messages
*/
add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
orr r3, r7, #0x7c000000
str r3, [r0]
#endif
#ifdef CONFIG_ARCH_RPC
/*
* Map in screen at 0x02000000 & SCREEN2_BASE
* Similar reasons here - for debug. This is
* only for Acorn RiscPC architectures.
*/
add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
orr r3, r7, #0x02000000
str r3, [r0]
add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
str r3, [r0]
#endif
#endif
#ifdef CONFIG_ARM_LPAE
sub r4, r4, #0x1000 @ point to the PGD table
mov r4, r4, lsr #ARCH_PGD_SHIFT
#endif
mov pc, lr
ENDPROC(__create_page_tables)
.ltorg
.align
__turn_mmu_on_loc:
.long .
.long __turn_mmu_on
.long __turn_mmu_on_end