如果个系统有ATF(ARM Trusted Frimware)(ARF的部分,请参考ATF的相关文档),OP-TEE OS属于bl32阶段被启动,通过在bl31阶段调用opteed_enter_sp函数跳转到OP-TEE OS中执行启动操作。而在使用qemu+OP-TEE的环境中,运行时,如果启动OP-TEE OS请参考《7.OP-TEE+qemu的启动过程分析--加载optee_os和rootfs》。
以qemu+OP-TEE的方式运行TEE时,OP-TEE的image的链接文件是:optee_os/core/arch/arm/kernel/kern.ld.S,在该文件中中可以得知TEE image启动的时的入口函数是_start函数,代码如下:
_start函数被定义在core/arch/arm目录中的不同文件中,根据不同的板级拥有不同的.S文件。在qemu中使用的时候,使用的是ARM32,故本例子中使用QEMU+OPTEE运行TEE时,使用的是ARM32,所以入口函数被定义在core/arch/arm/kernel/generic_entry_a32.S文件中。整个启动的大致流程图如下(只考虑主CPU的启动):
该文件的主要内容如下,整个流程从_start函数开始,然后跳转到reset操作,最重要的是执行reset_primary函数:
.section .data
.balign 4
#ifdef CFG_BOOT_SYNC_CPU
.equ SEM_CPU_READY, 1
#endif
#ifdef CFG_PL310
.section .rodata.init
panic_boot_file:
.asciz __FILE__
/*
* void assert_flat_mapped_range(uint32_t vaddr, uint32_t line)
*/
.section .text.init
/* __assert_flat_mapped_range 函数,判定MMU中虚拟地址和物理地址的映射关系是否正常 */
LOCAL_FUNC __assert_flat_mapped_range , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
push { r4-r6, lr }
mov r4, r0
mov r5, r1
bl cpu_mmu_enabled
cmp r0, #0
beq 1f
mov r0, r4
bl virt_to_phys
cmp r0, r4
beq 1f
/*
* this must be compliant with the panic generic routine:
* __do_panic(__FILE__, __LINE__, __func__, str)
*/
ldr r0, =panic_boot_file
mov r1, r5
mov r2, #0
mov r3, #0
bl __do_panic
b . /* should NOT return */
1: pop { r4-r6, pc }
UNWIND( .fnend)
END_FUNC __assert_flat_mapped_range
/* panic if mmu is enable and vaddr != paddr (scratch lr) */
/* assert_flat_mapped_range宏,如果MMU使能但虚拟地址和物理地址不匹配则引起panic */
.macro assert_flat_mapped_range va, line
ldr r0, =(\va)
ldr r1, =\line
bl __assert_flat_mapped_range
.endm
#endif /* CFG_PL310 */
.weak plat_cpu_reset_early
/* CPU重置 */
FUNC plat_cpu_reset_early , :
UNWIND( .fnstart)
bx lr
UNWIND( .fnend)
END_FUNC plat_cpu_reset_early
KEEP_PAGER plat_cpu_reset_early
.section .text.boot
/* _start函数,跳转到reset执行 */
FUNC _start , :
b reset
b . /* Undef */
b . /* Syscall */
b . /* Prefetch abort */
b . /* Data abort */
b . /* Reserved */
b . /* IRQ */
b . /* FIQ */
END_FUNC _start
/* 准备非主CPU进入ready */
.macro cpu_is_ready
#ifdef CFG_BOOT_SYNC_CPU
bl get_core_pos
lsl r0, r0, #2
ldr r1,=sem_cpu_sync
ldr r2, =SEM_CPU_READY
str r2, [r1, r0]
dsb
sev
#endif
.endm
/* 等待主CPU */
.macro wait_primary
#ifdef CFG_BOOT_SYNC_CPU
ldr r0, =sem_cpu_sync
mov r2, #SEM_CPU_READY
sev
1:
ldr r1, [r0]
cmp r1, r2
wfene
bne 1b
#endif
.endm
/* 等待从CPU */
.macro wait_secondary
#ifdef CFG_BOOT_SYNC_CPU
ldr r0, =sem_cpu_sync
mov r3, #CFG_TEE_CORE_NB_CORE
mov r2, #SEM_CPU_READY
sev
1:
subs r3, r3, #1
beq 3f
add r0, r0, #4
2:
ldr r1, [r0]
cmp r1, r2
wfene
bne 2b
b 1b
3:
#endif
.endm
/*
* Save boot arguments
* entry r0, saved r4: pagestore
* entry r1, saved r7: (ARMv7 standard bootarg #1)
* entry r2, saved r6: device tree address, (ARMv7 standard bootarg #2)
* entry lr, saved r5: non-secure entry address (ARMv7 bootarg #0)
*/
/* 保存启动的时候传入的参数:page table, dta, linux kernel entry address, bootarg */
.macro bootargs_entry
#if defined(CFG_NS_ENTRY_ADDR)
ldr r5, =CFG_NS_ENTRY_ADDR
#else
mov r5, lr
#endif
#if defined(CFG_PAGEABLE_ADDR)
ldr r4, =CFG_PAGEABLE_ADDR
#else
mov r4, r0
#endif
#if defined(CFG_DT_ADDR)
ldr r6, =CFG_DT_ADDR
#else
mov r6, r2
#endif
mov r7, r1
.endm
/* reset函数定义,执行主要启动操作 */
LOCAL_FUNC reset , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
bootargs_entry //保存传入的启动参数
/* Enable alignment checks and disable data and instruction cache. */
/* 使能对齐检查并禁用cache */
read_sctlr r0
orr r0, r0, #SCTLR_A
bic r0, r0, #SCTLR_C
bic r0, r0, #SCTLR_I
write_sctlr r0
isb
/* Early ARM secure MP specific configuration */
/* 重置CPU的特殊配置 */
bl plat_cpu_reset_early
ldr r0, =_start
write_vbar r0
#if defined(CFG_WITH_ARM_TRUSTED_FW)
b reset_primary //当支持ATF的时候只执行reset_primary完成OP-TEE的启动
#else
bl get_core_pos //获取当前cpu的ID,用来判定当前执行的cortex是否是主CPU
cmp r0, #0
beq reset_primary //如果当前CPU是主CPU,则执行reset_primary来完成OP-TEE的启动
b reset_secondary //执行其他CPU的初始化操作,主CPU不会执行到这里
#endif
UNWIND( .fnend)
END_FUNC reset
/*
* Setup sp to point to the top of the tmp stack for the current CPU:
* sp is assigned stack_tmp + (cpu_id + 1) * stack_tmp_stride -
* stack_tmp_offset
*/
/* 设置SP的值为栈顶 */
.macro set_sp
bl get_core_pos
cmp r0, #CFG_TEE_CORE_NB_CORE
/* Unsupported CPU, park it before it breaks something */
bge unhandled_cpu
add r0, r0, #1
ldr r2, =stack_tmp_stride
ldr r1, [r2]
mul r2, r0, r1
ldr r1, =stack_tmp
add r1, r1, r2
ldr r2, =stack_tmp_offset
ldr r2, [r2]
sub sp, r1, r2
.endm
/*
* Cache maintenance during entry: handle outer cache.
* End address is exclusive: first byte not to be changed.
* Note however arm_clX_inv/cleanbyva operate on full cache lines.
*
* Use ANSI #define to trap source file line number for PL310 assertion
*/
.macro __inval_cache_vrange vbase, vend, line
#ifdef CFG_PL310
assert_flat_mapped_range (\vbase), (\line)
bl pl310_base
ldr r1, =(\vbase)
ldr r2, =(\vend)
bl arm_cl2_invbypa
#endif
ldr r0, =(\vbase)
ldr r1, =(\vend)
bl arm_cl1_d_invbyva
.endm
.macro __flush_cache_vrange vbase, vend, line
#ifdef CFG_PL310
assert_flat_mapped_range (\vbase), (\line)
ldr r0, =(\vbase)
ldr r1, =(\vend)
bl arm_cl1_d_cleanbyva
bl pl310_base
ldr r1, =(\vbase)
ldr r2, =(\vend)
bl arm_cl2_cleaninvbypa
#endif
ldr r0, =(\vbase)
ldr r1, =(\vend)
bl arm_cl1_d_cleaninvbyva
.endm
#define inval_cache_vrange(vbase, vend) \
__inval_cache_vrange (vbase), ((vend) - 1), __LINE__
#define flush_cache_vrange(vbase, vend) \
__flush_cache_vrange (vbase), ((vend) - 1), __LINE__
#ifdef CFG_BOOT_SYNC_CPU
#define flush_cpu_semaphores \
flush_cache_vrange(sem_cpu_sync, \
(sem_cpu_sync + (CFG_TEE_CORE_NB_CORE << 2)))
#else
#define flush_cpu_semaphores
#endif
/* reset_primary函数,执行主CPU的主要初始化操作 */
LOCAL_FUNC reset_primary , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
/* preserve r4-r7: bootargs */
#ifdef CFG_WITH_PAGER
/*
* Move init code into correct location and move hashes to a
* temporary safe location until the heap is initialized.
*
* The binary is built as:
* [Pager code, rodata and data] : In correct location
* [Init code and rodata] : Should be copied to __text_init_start
* [Hashes] : Should be saved before initializing pager
*
*/
/* 拷贝init section 到RAM中 */
ldr r0, =__text_init_start /* dst */
ldr r1, =__data_end /* src */
ldr r2, =__tmp_hashes_end /* dst limit */
/* Copy backwards (as memmove) in case we're overlapping */
sub r2, r2, r0 /* len */
add r0, r0, r2
add r1, r1, r2
ldr r2, =__text_init_start
copy_init:
ldmdb r1!, {r3, r8-r12, sp}
stmdb r0!, {r3, r8-r12, sp}
cmp r0, r2
bgt copy_init
#endif
#ifdef CFG_CORE_SANITIZE_KADDRESS
/* First initialize the entire shadow area with no access */
ldr r0, =__asan_shadow_start /* start */
ldr r1, =__asan_shadow_end /* limit */
mov r2, #ASAN_DATA_RED_ZONE
shadow_no_access:
str r2, [r0], #4
cmp r0, r1
bls shadow_no_access
/* Mark the entire stack area as OK */
ldr r2, =CFG_ASAN_SHADOW_OFFSET
ldr r0, =__nozi_stack_start /* start */
lsr r0, r0, #ASAN_BLOCK_SHIFT
add r0, r0, r2
ldr r1, =__nozi_stack_end /* limit */
lsr r1, r1, #ASAN_BLOCK_SHIFT
add r1, r1, r2
mov r2, #0
shadow_stack_access_ok:
strb r2, [r0], #1
cmp r0, r1
bls shadow_stack_access_ok
#endif
set_sp
/* complete ARM secure MP common configuration */
bl plat_cpu_reset_late //完成ARM安全内存保护的基本配置
/* Enable Console */
bl console_init //初始化串口
#ifdef CFG_PL310
bl pl310_base
bl arm_cl2_config
#endif
/*
* Invalidate dcache for all memory used during initialization to
* avoid nasty surprices when the cache is turned on. We must not
* invalidate memory not used by OP-TEE since we may invalidate
* entries used by for instance ARM Trusted Firmware.
*/
/* 无效cache操作 */
#ifdef CFG_WITH_PAGER
inval_cache_vrange(__text_start, __tmp_hashes_end)
#else
inval_cache_vrange(__text_start, __end)
#endif
#ifdef CFG_PL310
/* Enable PL310 if not yet enabled */
bl pl310_base
bl arm_cl2_enable
#endif
bl core_init_mmu_map //初始化MMU的内存映射表
bl core_init_mmu_regs //初始化MMU中的寄存器映射
bl cpu_mmu_enable //使能MMU
bl cpu_mmu_enable_icache //使能指令cache
bl cpu_mmu_enable_dcache //使能data cache
mov r0, r4 /* pageable part address */
mov r1, r5 /* ns-entry address */
mov r2, r6 /* DT address */
/* 执行主要初始化操作,带入page part 地址,linux kernel入口地址, DT地址 */
bl generic_boot_init_primary
mov r4, r0 /* save entry test vector */
/*
* In case we've touched memory that secondary CPUs will use before
* they have turned on their D-cache, clean and invalidate the
* D-cache before exiting to normal world.
*/
/* 清空cache */
#ifdef CFG_WITH_PAGER
flush_cache_vrange(__text_start, __init_end)
#else
flush_cache_vrange(__text_start, __end)
#endif
/* release secondary boot cores and sync with them */
/* 同步从核的启动 */
cpu_is_ready
flush_cpu_semaphores
wait_secondary
#ifdef CFG_PL310_LOCKED
/* lock/invalidate all lines: pl310 behaves as if disable */
bl pl310_base
bl arm_cl2_lockallways
bl pl310_base
bl arm_cl2_cleaninvbyway
#endif
/*
* Clear current thread id now to allow the thread to be reused on
* next entry. Matches the thread_init_boot_thread() in
* generic_boot.c.
*/
bl thread_clr_boot_thread //清空当前线程的ID值,以备被重新使用
#if defined(CFG_WITH_ARM_TRUSTED_FW)
/* Pass the vector address returned from main_init */
mov r1, r4
#else
/* realy standard bootarg #1 and #2 to non secure entry */
mov r4, #0
mov r3, r6 /* std bootarg #2 for register R2 */
mov r2, r7 /* std bootarg #1 for register R1 */
mov r1, #0
#endif /* CFG_WITH_ARM_TRUSTED_FW */
/* 触发smc调用,如果支持ATF,则返回TEESMC_OPTEED_RETURN_ENTRY_DONE的值到EL3中,通知ATF OPTEE已经启动完成。如果不支持ATF,则同样触发smc调用,切换到no-secure world状态进行linux kernel的启动 */
mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC reset_primary
LOCAL_FUNC unhandled_cpu , :
UNWIND( .fnstart)
wfi
b unhandled_cpu
UNWIND( .fnend)
END_FUNC unhandled_cpu
#if defined(CFG_WITH_ARM_TRUSTED_FW)
FUNC cpu_on_handler , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
mov r4, r0
mov r5, r1
mov r6, lr
read_sctlr r0
orr r0, r0, #SCTLR_A
write_sctlr r0
ldr r0, =_start
write_vbar r0
mov r4, lr
set_sp
bl core_init_mmu_regs
bl cpu_mmu_enable
bl cpu_mmu_enable_icache
bl cpu_mmu_enable_dcache
mov r0, r4
mov r1, r5
bl generic_boot_cpu_on_handler
bx r6
UNWIND( .fnend)
END_FUNC cpu_on_handler
#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
/* 从核的启动函数 */
LOCAL_FUNC reset_secondary , :
UNWIND( .fnstart)
UNWIND( .cantunwind)
wait_primary
set_sp
bl plat_cpu_reset_late
#if defined (CFG_BOOT_SECONDARY_REQUEST)
/* if L1 is not invalidated before, do it here */
bl arm_cl1_d_invbysetway
#endif
bl core_init_mmu_regs
bl cpu_mmu_enable
bl cpu_mmu_enable_icache
bl cpu_mmu_enable_dcache
cpu_is_ready
#if defined (CFG_BOOT_SECONDARY_REQUEST)
/* generic_boot_core_hpen return value (r0) is ns entry point */
bl generic_boot_core_hpen
#else
mov r0, r5 /* ns-entry address */
#endif
bl generic_boot_init_secondary
mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
mov r1, #0
mov r2, #0
mov r3, #0
mov r4, #0
smc #0
b . /* SMC should not return */
UNWIND( .fnend)
END_FUNC reset_secondary
#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */