Detailed analysis for runtime exception vector table and handler in TFA

1 篇文章 0 订阅

1.Entries in TFA

bl31\aarch64\runtime_exceptions.S

Exception taken fromSynchronousIRQ or vIRQFIQ or vFIQSError or vSErrorTips
Current Exception level with SP_EL0sync_exception_sp_el0irq_sp_el0fiq_sp_el0serror_sp_el0We don’t expect any synchronous exceptions from EL3.EL3 code is non-reentrant. Any asynchronous exception is a seriouserror.
Current Exception level with SP_EL3sync_exception_sp_elxirq_sp_elxfiq_sp_elxserror_sp_elxThis exception will trigger if anything went wrong during a previousexception entry or exit or while handling an earlier unexpected exception. There is a high probability that SP_EL3 is corrupted.
Lower Exception level, where the implemented level immediately lower than the target level is using AArch64sync_exception_aarch64irq_aarch64fiq_aarch64serror_aarch64
Lower Exception level, where the implemented level immediately lower than the target level is using AArch32sync_exception_aarch32irq_aarch32fiq_aarch32serror_aarch32

2.struct cpu_context

When an exception occur, context should be preserved in the struct cpu_context defined in include\lib\el3_runtime\aarch64\context.h:

typedef struct cpu_context {
        gp_regs_t gpregs_ctx;
        el3_state_t el3state_ctx;
        el1_sysregs_t el1_sysregs_ctx;
#if CTX_INCLUDE_EL2_REGS
        el2_sysregs_t el2_sysregs_ctx;
#endif
#if CTX_INCLUDE_FPREGS
        fp_regs_t fpregs_ctx;
#endif
        cve_2018_3639_t cve_2018_3639_ctx;
#if CTX_INCLUDE_PAUTH_REGS
        pauth_t pauth_ctx;
#endif
} cpu_context_t;

Register structs are defined in the format as below:

#define DEFINE_REG_STRUCT(name, num_regs)       \
        typedef struct name {                   \
                uint64_t ctx_regs[num_regs];    \
        }  __aligned(16) name##_t

For example, gp_regs_t is defined as below:

DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL);

3.sync_exception_aarch64

vector_entry sync_exception_aarch64
    /*
     * This exception vector will be the entry point for SMCs and traps
     * that are unhandled at lower ELs most commonly. SP_EL3 should point
     * to a valid cpu context where the general purpose and system register
     * state can be saved.
     */
    apply_at_speculative_wa
    check_and_unmask_ea
    handle_sync_exception
end_vector_entry sync_exception_aarch64

3.1 apply_at_speculative_wa

    .macro  apply_at_speculative_wa
#if ERRATA_SPECULATIVE_AT
    /*
     * Explicitly save x30 so as to free up a register and to enable
     * branching and also, save x29 which will be used in the called
     * function
     */
    stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
    bl  save_and_update_ptw_el1_sys_regs
    ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
#endif
    .endm

save_and_update_ptw_el1_sys_regs

/*
 * In case of ERRATA_SPECULATIVE_AT, save SCTLR_EL1 and TCR_EL1
 * registers and update EL1 registers to disable stage1 and stage2
 * page table walk
 */

3.2 check_and_unmask_ea

When esb instruction causes an SError, need to branch to handle_lower_el_ea_esb.

	/*
     * Macro that prepares entry to EL3 upon taking an exception.
     *
     * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
     * instruction. When an error is thus synchronized, the handling is
     * delegated to platform EA handler.
     *
     * Without RAS_EXTENSION, this macro synchronizes pending errors using
         * a DSB, unmasks Asynchronous External Aborts and saves X30 before
     * setting the flag CTX_IS_IN_EL3.
     */
    .macro check_and_unmask_ea
#if RAS_EXTENSION
    /* Synchronize pending External Aborts */
    esb


    /* Unmask the SError interrupt */
    msr daifclr, #DAIF_ABT_BIT

    /*
     * Explicitly save x30 so as to free up a register and to enable
     * branching
     */
    str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]

    /* Check for SErrors synchronized by the ESB instruction */
    mrs x30, DISR_EL1
    tbz x30, #DISR_A_BIT, 1f

    /*
     * Save general purpose and ARMv8.3-PAuth registers (if enabled).
     * If Secure Cycle Counter is not disabled in MDCR_EL3 when
     * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
     */
    bl  save_gp_pmcr_pauth_regs

    bl  handle_lower_el_ea_esb

    /* Restore general purpose, PMCR_EL0 and ARMv8.3-PAuth registers */
    bl  restore_gp_pmcr_pauth_regs
1:
#else
    /*
     * For SoCs which do not implement RAS, use DSB as a barrier to
     * synchronize pending external aborts.
     */
    dsb sy

    /* Unmask the SError interrupt */
    msr daifclr, #DAIF_ABT_BIT

    /* Use ISB for the above unmask operation to take effect immediately */
    isb

    /*
     * Refer Note 1. No need to restore X30 as both handle_sync_exception
     * and handle_interrupt_exception macro which follow this macro modify
     * X30 anyway.
     */
    str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
    mov     x30, #1
    str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
    dmb sy
#endif
    .endm

3.3 handle_sync_exception

Three handler entries:

①smc_handler32②smc_handler64③enter_lower_el_sync_ea

    .macro  handle_sync_exception
#if ENABLE_RUNTIME_INSTRUMENTATION
    /*
     * Read the timestamp value and store it in per-cpu data. The value
     * will be extracted from per-cpu data by the C level SMC handler and
     * saved to the PMF timestamp region.
     */
    mrs x30, cntpct_el0
    str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
    mrs x29, tpidr_el3
    str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
    ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
#endif


    mrs x30, esr_el3
    ubfx    x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH

    /* Handle SMC exceptions separately from other synchronous exceptions */
    cmp x30, #EC_AARCH32_SMC
    b.eq    smc_handler32

    cmp x30, #EC_AARCH64_SMC
    b.eq    smc_handler64

    /* Synchronous exceptions other than the above are assumed to be EA */
    ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
    b   enter_lower_el_sync_ea
    .endm

Q: Why x30 can be used as temporary storage reg in line 8 and line 16 ?

A: The answer you can find in .macro check_and_unmask_ea. X30 is freed in that instruction flow.

3.3.1 smc_handler64

①save_gp_pmcr_pauth_regs: Save general purpose and ARMv8.3-PAuth registers (if enabled).

②Populate params passed to runtime service SMC handler function

③branch to the runtime service SMC handler function

④exit from el3

smc_handler64:
    /* NOTE: The code below must preserve x0-x4 */


    /*
     * Save general purpose and ARMv8.3-PAuth registers (if enabled).
     * If Secure Cycle Counter is not disabled in MDCR_EL3 when
     * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
     */
    bl  save_gp_pmcr_pauth_regs

#if ENABLE_PAUTH
    /* Load and program APIAKey firmware key */
    bl  pauth_load_bl31_apiakey
#endif

    /*
     * Populate the parameters for the SMC handler.
     * We already have x0-x4 in place. x5 will point to a cookie (not used
     * now). x6 will point to the context structure (SP_EL3) and x7 will
     * contain flags we need to pass to the handler.
     */
    mov x5, xzr
    mov x6, sp

    /*
     * Restore the saved C runtime stack value which will become the new
     * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
     * structure prior to the last ERET from EL3.
     */
    ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]

    /* Switch to SP_EL0 */
    msr spsel, #MODE_SP_EL0

    /*
     * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
     * switch during SMC handling.
     * TODO: Revisit if all system registers can be saved later.
     */
    mrs x16, spsr_el3
    mrs x17, elr_el3
    mrs x18, scr_el3
    stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
    str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]

    /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
    bfi x7, x18, #0, #1

    mov sp, x12

    /* Get the unique owning entity number */
    ubfx    x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
    ubfx    x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
    orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH

    /* Load descriptor index from array of indices */
    adrp    x14, rt_svc_descs_indices
    add x14, x14, :lo12:rt_svc_descs_indices
    ldrb    w15, [x14, x16]

    /* Any index greater than 127 is invalid. Check bit 7. */
    tbnz    w15, 7, smc_unknown

    /*
     * Get the descriptor using the index
     * x11 = (base + off), w15 = index
     *
     * handler = (base + off) + (index << log2(size))
     */
    adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
    lsl w10, w15, #RT_SVC_SIZE_LOG2
    ldr x15, [x11, w10, uxtw]

    /*
     * Call the Secure Monitor Call handler and then drop directly into
     * el3_exit() which will program any remaining architectural state
     * prior to issuing the ERET to the desired lower EL.
     */
#if DEBUG
    cbz x15, rt_svc_fw_critical_error
#endif
    blr x15

    b   el3_exit

smc_unknown:
    /*
     * Unknown SMC call. Populate return value with SMC_UNK and call
     * el3_exit() which will restore the remaining architectural state
     * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
         * to the desired lower EL.
     */
    mov x0, #SMC_UNK
    str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
    b   el3_exit

smc_prohibited:
    restore_ptw_el1_sys_regs
    ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
    ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
    mov x0, #SMC_UNK
    exception_return

#if DEBUG
rt_svc_fw_critical_error:
    /* Switch to SP_ELx */
    msr spsel, #MODE_SP_ELX
    no_ret  report_unhandled_exception
#endif
endfunc smc_handler
(1) Prototype for runtime service SMC handler function and how to register
/* Prototype for runtime service initializing function */
typedef int32_t (*rt_svc_init_t)(void);

/*
 * Prototype for runtime service SMC handler function. x0 (SMC Function ID) to
 * x4 are as passed by the caller. Rest of the arguments to SMC and the context
 * can be accessed using the handle pointer. The cookie parameter is reserved
 * for future use
 */
typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid,
				  u_register_t x1,
				  u_register_t x2,
				  u_register_t x3,
				  u_register_t x4,
				  void *cookie,
				  void *handle,
				  u_register_t flags);

typedef struct rt_svc_desc {
	uint8_t start_oen;
	uint8_t end_oen;
	uint8_t call_type;
	const char *name;
	rt_svc_init_t init;
	rt_svc_handle_t handle;
} rt_svc_desc_t;

#define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch)	\
	static const rt_svc_desc_t __svc_desc_ ## _name			\
		__section("rt_svc_descs") __used = {			\
			.start_oen = (_start),				\
			.end_oen = (_end),				\
			.call_type = (_type),				\
			.name = #_name,					\
			.init = (_setup),				\
			.handle = (_smch)				\
		}

For example, a SMC call service can be registered as below:

DECLARE_RT_SVC(
std_svc,

​ OEN_STD_START,
​ OEN_STD_END,
​ SMC_TYPE_FAST,
​ std_svc_setup,
​ std_svc_smc_handler
);

(2)el3_exit

①restore_gp_pmcr_pauth_regs

②exception_return:eret

/* ------------------------------------------------------------------
 * This routine assumes that the SP_EL3 is pointing to a valid
 * context structure from where the gp regs and other special
 * registers can be retrieved.
 * ------------------------------------------------------------------
 */
func el3_exit
#if ENABLE_ASSERTIONS
    /* el3_exit assumes SP_EL0 on entry */
    mrs x17, spsel
    cmp x17, #MODE_SP_EL0
    ASM_ASSERT(eq)
#endif


    /* ----------------------------------------------------------
     * Save the current SP_EL0 i.e. the EL3 runtime stack which
     * will be used for handling the next SMC.
     * Then switch to SP_EL3.
     * ----------------------------------------------------------
     */
    mov x17, sp
    msr spsel, #MODE_SP_ELX
    str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]

    /* ----------------------------------------------------------
     * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
     * ----------------------------------------------------------
     */
    ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
    ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
    msr scr_el3, x18
    msr spsr_el3, x16
    msr elr_el3, x17

#if IMAGE_BL31
    /* ----------------------------------------------------------
     * Restore CPTR_EL3.
     * ZCR is only restored if SVE is supported and enabled.
     * Synchronization is required before zcr_el3 is addressed.
     * ----------------------------------------------------------
     */
    ldp x19, x20, [sp, #CTX_EL3STATE_OFFSET + CTX_CPTR_EL3]
    msr cptr_el3, x19

    ands    x19, x19, #CPTR_EZ_BIT
    beq sve_not_enabled

    isb
    msr S3_6_C1_C2_0, x20 /* zcr_el3 */
sve_not_enabled:
#endif

#if IMAGE_BL31 && DYNAMIC_WORKAROUND_CVE_2018_3639
    /* ----------------------------------------------------------
     * Restore mitigation state as it was on entry to EL3
     * ----------------------------------------------------------
     */
    ldr x17, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
    cbz x17, 1f
    blr x17
1:
#endif
    restore_ptw_el1_sys_regs

    /* ----------------------------------------------------------
     * Restore general purpose (including x30), PMCR_EL0 and
     * ARMv8.3-PAuth registers.
     * Exit EL3 via ERET to a lower exception level.
     * ----------------------------------------------------------
     */
    bl  restore_gp_pmcr_pauth_regs
    ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]

#if IMAGE_BL31 && RAS_EXTENSION
    /* ----------------------------------------------------------
     * Issue Error Synchronization Barrier to synchronize SErrors
     * before exiting EL3. We're running with EAs unmasked, so
     * any synchronized errors would be taken immediately;
     * therefore no need to inspect DISR_EL1 register.
     * ----------------------------------------------------------
     */
    esb
#else
    dsb sy
#endif
#ifdef IMAGE_BL31
    str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3]
#endif
    exception_return

endfunc el3_exit
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值