OpenSBI启动流程分析二

SBI初始化

之前OpenSBI完成汇编阶段的启动,接下来会执行sbi_init函数,初始化当前HART的OpenSBI库,并跳转到下一级启动镜像。

  • 首先判断当前 HART ID是否有效,无效就进入挂起状态
  • 接着判断下一特权模式是否为M、S、U,其他模式就进入挂起状态
  • 然后判断当前HART是否支持下一阶段的冷启动,如果支持,则通过原子交换将coldboot_lottery从0变为1,并将coldboot设置为true,这里只有第一个执行到此的核会满足冷启动条件
  • 然后执行sbi_platform_nascent_init函数进行特定平台下的早期初始化操作
  • 最后如果coldboottrue,执行init_coldboot函数进行冷启动,否则执行init_warmboot函数进行热启动
/**
 * Initialize OpenSBI library for current HART and jump to next
 * booting stage.
 *
 * The function expects following:
 * 1. The 'mscratch' CSR is pointing to sbi_scratch of current HART
 * 2. Stack pointer (SP) is setup for current HART
 * 3. Interrupts are disabled in MSTATUS CSR
 * 4. All interrupts are disabled in MIE CSR
 *
 * @param scratch pointer to sbi_scratch of current HART
 */
void __noreturn sbi_init(struct sbi_scratch *scratch)
{
	u32 i, h;
	bool hartid_valid		= false;
	bool next_mode_supported	= false;
	bool coldboot			= false;
	u32 hartid			= current_hartid();
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);

	for (i = 0; i < plat->hart_count; i++) {
		h = (plat->hart_index2id) ? plat->hart_index2id[i] : i;
		if (h == hartid)
			hartid_valid = true;
	}
	if (!hartid_valid)
		sbi_hart_hang();

	switch (scratch->next_mode) {
	case PRV_M:
		next_mode_supported = true;
		break;
	case PRV_S:
		if (misa_extension('S'))
			next_mode_supported = true;
		break;
	case PRV_U:
		if (misa_extension('U'))
			next_mode_supported = true;
		break;
	default:
		sbi_hart_hang();
	}

	/*
	 * Only the HART supporting privilege mode specified in the
	 * scratch->next_mode should be allowed to become the coldboot
	 * HART because the coldboot HART will be directly jumping to
	 * the next booting stage.
	 *
	 * We use a lottery mechanism to select coldboot HART among
	 * HARTs which satisfy above condition.
	 */

	if (sbi_platform_cold_boot_allowed(plat, hartid)) {
		if (next_mode_supported &&
		    atomic_xchg(&coldboot_lottery, 1) == 0)
			coldboot = true;
	}

	/*
	 * Do platform specific nascent (very early) initialization so
	 * that platform can initialize platform specific per-HART CSRs
	 * or per-HART devices.
	 */
	if (sbi_platform_nascent_init(plat))
		sbi_hart_hang();

	if (coldboot)
		init_coldboot(scratch, hartid);
	else
		init_warmboot(scratch, hartid);
}

早期初始化

通过调用sbi_platform_nascent_init函数执行当前HART的早期初始化,如下:

/**
 * Nascent (very early) initialization for current HART
 *
 * NOTE: This function can be used to do very early initialization of
 * platform specific per-HART CSRs and devices.
 *
 * @param plat pointer to struct sbi_platform
 *
 * @return 0 on success and negative error code on failure
 */
static inline int sbi_platform_nascent_init(const struct sbi_platform *plat)
{
	if (plat && sbi_platform_ops(plat)->nascent_init)
		return sbi_platform_ops(plat)->nascent_init();
	return 0;
}

对于通用generic平台,在platform/generic/platform.c定义了平台相关的操作函数,后续所有平台相关的初始化操作均会调用这些函数:

const struct sbi_platform_operations platform_ops = {
	.cold_boot_allowed	= generic_cold_boot_allowed,
	.nascent_init		= generic_nascent_init,
	.early_init		= generic_early_init,
	.final_init		= generic_final_init,
	.early_exit		= generic_early_exit,
	.final_exit		= generic_final_exit,
	.extensions_init	= generic_extensions_init,
	.domains_init		= generic_domains_init,
	.console_init		= generic_console_init,
	.irqchip_init		= fdt_irqchip_init,
	.irqchip_exit		= fdt_irqchip_exit,
	.ipi_init		= fdt_ipi_init,
	.ipi_exit		= fdt_ipi_exit,
	.pmu_init		= generic_pmu_init,
	.pmu_xlate_to_mhpmevent = generic_pmu_xlate_to_mhpmevent,
	.get_tlbr_flush_limit	= generic_tlbr_flush_limit,
	.get_tlb_num_entries	= generic_tlb_num_entries,
	.timer_init		= fdt_timer_init,
	.timer_exit		= fdt_timer_exit,
	.vendor_ext_check	= generic_vendor_ext_check,
	.vendor_ext_provider	= generic_vendor_ext_provider,
};

struct sbi_platform platform = {
	.opensbi_version	= OPENSBI_VERSION,
	.platform_version	=
		SBI_PLATFORM_VERSION(CONFIG_PLATFORM_GENERIC_MAJOR_VER,
				     CONFIG_PLATFORM_GENERIC_MINOR_VER),
	.name			= CONFIG_PLATFORM_GENERIC_NAME,
	.features		= SBI_PLATFORM_DEFAULT_FEATURES,
	.hart_count		= SBI_HARTMASK_MAX_BITS,
	.hart_index2id		= generic_hart_index2id,
	.hart_stack_size	= SBI_PLATFORM_DEFAULT_HART_STACK_SIZE,
	.heap_size		= SBI_PLATFORM_DEFAULT_HEAP_SIZE(0),
	.platform_ops_addr	= (unsigned long)&platform_ops
};

generic_nascent_init中只判断传入MSI控制器IMSIC是否支持,如果支持,就初始化与IMSIC相关的本地中断:

static int generic_nascent_init(void)
{
	if (platform_has_mlevel_imsic)
		imsic_local_irqchip_init();
	return 0;
}

冷启动初始化

冷启动的初始化操作init_coldboot内容比较多,这里只大致分析一下完成的功能,有些实现细节并没有深入探讨:

static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
{
	int rc;
	unsigned long *count;
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);

	/* Note: This has to be first thing in coldboot init sequence */
	rc = sbi_scratch_init(scratch);
	if (rc)
		sbi_hart_hang();

	/* Note: This has to be second thing in coldboot init sequence */
	rc = sbi_heap_init(scratch);
	if (rc)
		sbi_hart_hang();

	/* Note: This has to be the third thing in coldboot init sequence */
	rc = sbi_domain_init(scratch, hartid);
	if (rc)
		sbi_hart_hang();

	entry_count_offset = sbi_scratch_alloc_offset(__SIZEOF_POINTER__);
	if (!entry_count_offset)
		sbi_hart_hang();

	init_count_offset = sbi_scratch_alloc_offset(__SIZEOF_POINTER__);
	if (!init_count_offset)
		sbi_hart_hang();

	count = sbi_scratch_offset_ptr(scratch, entry_count_offset);
	(*count)++;

	rc = sbi_hsm_init(scratch, hartid, true);
	if (rc)
		sbi_hart_hang();

	/*
	 * All non-coldboot HARTs do HSM initialization (i.e. enter HSM state
	 * machine) at the start of the warmboot path so it is wasteful to
	 * have these HARTs busy spin in wait_for_coldboot() until coldboot
	 * path is completed.
	 */
	wake_coldboot_harts(scratch, hartid);

	rc = sbi_platform_early_init(plat, true);
	if (rc)
		sbi_hart_hang();

	rc = sbi_hart_init(scratch, true);
	if (rc)
		sbi_hart_hang();

	rc = sbi_console_init(scratch);
	if (rc)
		sbi_hart_hang();

	rc = sbi_sse_init(scratch, true);
	if (rc) {
		sbi_printf("%s: sse init failed (error %d)\n", __func__, rc);
		sbi_hart_hang();
	}

	rc = sbi_pmu_init(scratch, true);
	if (rc) {
		sbi_printf("%s: pmu init failed (error %d)\n",
			   __func__, rc);
		sbi_hart_hang();
	}

	rc = sbi_dbtr_init(scratch, true);
	if (rc)
		sbi_hart_hang();

	sbi_boot_print_banner(scratch);

	rc = sbi_irqchip_init(scratch, true);
	if (rc) {
		sbi_printf("%s: irqchip init failed (error %d)\n",
			   __func__, rc);
		sbi_hart_hang();
	}

	rc = sbi_ipi_init(scratch, true);
	if (rc) {
		sbi_printf("%s: ipi init failed (error %d)\n", __func__, rc);
		sbi_hart_hang();
	}

	rc = sbi_tlb_init(scratch, true);
	if (rc) {
		sbi_printf("%s: tlb init failed (error %d)\n", __func__, rc);
		sbi_hart_hang();
	}

	rc = sbi_timer_init(scratch, true);
	if (rc) {
		sbi_printf("%s: timer init failed (error %d)\n", __func__, rc);
		sbi_hart_hang();
	}

	rc = sbi_fwft_init(scratch, true);
	if (rc) {
		sbi_printf("%s: fwft init failed (error %d)\n", __func__, rc);
		sbi_hart_hang();
	}

	/*
	 * Note: Finalize domains after HSM initialization so that we
	 * can startup non-root domains.
	 * Note: Finalize domains before HART PMP configuration so
	 * that we use correct domain for configuring PMP.
	 */
	rc = sbi_domain_finalize(scratch, hartid);
	if (rc) {
		sbi_printf("%s: domain finalize failed (error %d)\n",
			   __func__, rc);
		sbi_hart_hang();
	}

	/*
	 * Note: Platform final initialization should be after finalizing
	 * domains so that it sees correct domain assignment and PMP
	 * configuration for FDT fixups.
	 */
	rc = sbi_platform_final_init(plat, true);
	if (rc) {
		sbi_printf("%s: platform final init failed (error %d)\n",
			   __func__, rc);
		sbi_hart_hang();
	}

	/*
	 * Note: Ecall initialization should be after platform final
	 * initialization so that all available platform devices are
	 * already registered.
	 */
	rc = sbi_ecall_init();
	if (rc) {
		sbi_printf("%s: ecall init failed (error %d)\n", __func__, rc);
		sbi_hart_hang();
	}

	sbi_boot_print_general(scratch);

	sbi_boot_print_domains(scratch);

	sbi_boot_print_hart(scratch, hartid);

	run_all_tests();

	/*
	 * Configure PMP at last because if SMEPMP is detected,
	 * M-mode access to the S/U space will be rescinded.
	 */
	rc = sbi_hart_pmp_configure(scratch);
	if (rc) {
		sbi_printf("%s: PMP configure failed (error %d)\n",
			   __func__, rc);
		sbi_hart_hang();
	}

	count = sbi_scratch_offset_ptr(scratch, init_count_offset);
	(*count)++;

	sbi_hsm_hart_start_finish(scratch, hartid);
}

static void __noreturn init_warm_startup(struct sbi_scratch *scratch,
					 u32 hartid)
{
	int rc;
	unsigned long *count;
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);

	if (!entry_count_offset || !init_count_offset)
		sbi_hart_hang();

	count = sbi_scratch_offset_ptr(scratch, entry_count_offset);
	(*count)++;

	/* Note: This has to be first thing in warmboot init sequence */
	rc = sbi_hsm_init(scratch, hartid, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_platform_early_init(plat, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_hart_init(scratch, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_sse_init(scratch, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_pmu_init(scratch, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_dbtr_init(scratch, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_irqchip_init(scratch, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_ipi_init(scratch, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_tlb_init(scratch, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_timer_init(scratch, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_fwft_init(scratch, false);
	if (rc)
		sbi_hart_hang();

	rc = sbi_platform_final_init(plat, false);
	if (rc)
		sbi_hart_hang();

	/*
	 * Configure PMP at last because if SMEPMP is detected,
	 * M-mode access to the S/U space will be rescinded.
	 */
	rc = sbi_hart_pmp_configure(scratch);
	if (rc)
		sbi_hart_hang();

	count = sbi_scratch_offset_ptr(scratch, init_count_offset);
	(*count)++;

	sbi_hsm_hart_start_finish(scratch, hartid);
}

sbi_scratch_init

初始化sbi scratch,这里主要是计算当前HART scratch空间的首地址,存储到scratch_table中,所有HART scratch空间的首地址都存储在hartindex_to_scratch_table这个数组中。

int sbi_scratch_init(struct sbi_scratch *scratch)
{
	u32 i, h;
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);

	for (i = 0; i < plat->hart_count; i++) {
		h = (plat->hart_index2id) ? plat->hart_index2id[i] : i;
		hartindex_to_hartid_table[i] = h;
		hartindex_to_scratch_table[i] =
			((hartid2scratch)scratch->hartid_to_scratch)(h, i);
	}

	last_hartindex_having_scratch = plat->hart_count - 1;

	return 0;
}

scratch->hartid_to_scratch实现在firmware/fw_base.S中,通过HART scratch索引计算scratch空间首地址:

	.section .entry, "ax", %progbits
	.align 3
	.globl _hartid_to_scratch
_hartid_to_scratch:
	/*
	 * a0 -> HART ID (passed by caller)
	 * a1 -> HART Index (passed by caller)
	 * t0 -> HART Stack Size
	 * t1 -> HART Stack End
	 * t2 -> Temporary
	 */
	lla	t2, platform
#if __riscv_xlen > 32
	lwu	t0, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(t2)
	lwu	t2, SBI_PLATFORM_HART_COUNT_OFFSET(t2)
#else
	lw	t0, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(t2)
	lw	t2, SBI_PLATFORM_HART_COUNT_OFFSET(t2)
#endif
	sub	t2, t2, a1
	mul	t2, t2, t0
	lla	t1, _fw_end
	add	t1, t1, t2
	li	t2, SBI_SCRATCH_SIZE
	sub	a0, t1, t2
	ret

sbi_heap_init

函数sbi_heap_init是初始化堆区域,OpenSBI实现了一个简单的堆内存分配算法。

int sbi_heap_init(struct sbi_scratch *scratch)
{
	unsigned long i;
	struct heap_node *n;

	/* Sanity checks on heap offset and size */
	if (!scratch->fw_heap_size ||
	    (scratch->fw_heap_size & (HEAP_BASE_ALIGN - 1)) ||
	    (scratch->fw_heap_offset < scratch->fw_rw_offset) ||
	    (scratch->fw_size < (scratch->fw_heap_offset + scratch->fw_heap_size)) ||
	    (scratch->fw_heap_offset & (HEAP_BASE_ALIGN - 1)))
		return SBI_EINVAL;

	/* Initialize heap control */
	SPIN_LOCK_INIT(hpctrl.lock);
	hpctrl.base = scratch->fw_start + scratch->fw_heap_offset;
	hpctrl.size = scratch->fw_heap_size;
	hpctrl.hkbase = hpctrl.base;
	hpctrl.hksize = hpctrl.size / HEAP_HOUSEKEEPING_FACTOR;
	hpctrl.hksize &= ~((unsigned long)HEAP_BASE_ALIGN - 1);
	SBI_INIT_LIST_HEAD(&hpctrl.free_node_list);
	SBI_INIT_LIST_HEAD(&hpctrl.free_space_list);
	SBI_INIT_LIST_HEAD(&hpctrl.used_space_list);

	/* Prepare free node list */
	for (i = 0; i < (hpctrl.hksize / sizeof(*n)); i++) {
		n = (struct heap_node *)(hpctrl.hkbase + (sizeof(*n) * i));
		SBI_INIT_LIST_HEAD(&n->head);
		n->addr = n->size = 0;
		sbi_list_add_tail(&n->head, &hpctrl.free_node_list);
	}

	/* Prepare free space list */
	n = sbi_list_first_entry(&hpctrl.free_node_list,
				 struct heap_node, head);
	sbi_list_del(&n->head);
	n->addr = hpctrl.hkbase + hpctrl.hksize;
	n->size = hpctrl.size - hpctrl.hksize;
	sbi_list_add_tail(&n->head, &hpctrl.free_space_list);

	return 0;
}

sbi_domain_init

函数sbi_domain_init初始化domain,在启动过程中,OpenSBI默认构建ROOT域,这里主要是初始化 SBI 根域(root domain)的三个内存区域:

  • 代码段:M模式读和执行权限
  • 读写数据段:M模式读和写权限
  • 剩余内存区域:S模式读、写和执行权限
int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
{
	u32 i;
	int rc;
	struct sbi_hartmask *root_hmask;
	struct sbi_domain_memregion *root_memregs;
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);

	if (scratch->fw_rw_offset == 0 ||
	    (scratch->fw_rw_offset & (scratch->fw_rw_offset - 1)) != 0) {
		sbi_printf("%s: fw_rw_offset is not a power of 2 (0x%lx)\n",
			   __func__, scratch->fw_rw_offset);
		return SBI_EINVAL;
	}

	if ((scratch->fw_start & (scratch->fw_rw_offset - 1)) != 0) {
		sbi_printf("%s: fw_start and fw_rw_offset not aligned\n",
			   __func__);
		return SBI_EINVAL;
	}

	domain_hart_ptr_offset = sbi_scratch_alloc_type_offset(void *);
	if (!domain_hart_ptr_offset)
		return SBI_ENOMEM;

	root_memregs = sbi_calloc(sizeof(*root_memregs), ROOT_REGION_MAX + 1);
	if (!root_memregs) {
		sbi_printf("%s: no memory for root regions\n", __func__);
		rc = SBI_ENOMEM;
		goto fail_free_domain_hart_ptr_offset;
	}
	root.regions = root_memregs;

	root_hmask = sbi_zalloc(sizeof(*root_hmask));
	if (!root_hmask) {
		sbi_printf("%s: no memory for root hartmask\n", __func__);
		rc = SBI_ENOMEM;
		goto fail_free_root_memregs;
	}
	root.possible_harts = root_hmask;

	/* Root domain firmware memory region */
	sbi_domain_memregion_init(scratch->fw_start, scratch->fw_rw_offset,
				  (SBI_DOMAIN_MEMREGION_M_READABLE |
				   SBI_DOMAIN_MEMREGION_M_EXECUTABLE),
				  &root_memregs[root_memregs_count++]);

	sbi_domain_memregion_init((scratch->fw_start + scratch->fw_rw_offset),
				  (scratch->fw_size - scratch->fw_rw_offset),
				  (SBI_DOMAIN_MEMREGION_M_READABLE |
				   SBI_DOMAIN_MEMREGION_M_WRITABLE),
				  &root_memregs[root_memregs_count++]);

	root.fw_region_inited = true;

	/*
	 * Allow SU RWX on rest of the memory region. Since pmp entries
	 * have implicit priority on index, previous entries will
	 * deny access to SU on M-mode region. Also, M-mode will not
	 * have access to SU region while previous entries will allow
	 * access to M-mode regions.
	 */
	sbi_domain_memregion_init(0, ~0UL,
				  (SBI_DOMAIN_MEMREGION_SU_READABLE |
				   SBI_DOMAIN_MEMREGION_SU_WRITABLE |
				   SBI_DOMAIN_MEMREGION_SU_EXECUTABLE),
				  &root_memregs[root_memregs_count++]);

	/* Root domain memory region end */
	root_memregs[root_memregs_count].order = 0;

	/* Root domain boot HART id is same as coldboot HART id */
	root.boot_hartid = cold_hartid;

	/* Root domain next booting stage details */
	root.next_arg1 = scratch->next_arg1;
	root.next_addr = scratch->next_addr;
	root.next_mode = scratch->next_mode;

	/* Root domain possible and assigned HARTs */
	for (i = 0; i < plat->hart_count; i++)
		sbi_hartmask_set_hartindex(i, root_hmask);

	/* Finally register the root domain */
	rc = sbi_domain_register(&root, root_hmask);
	if (rc)
		goto fail_free_root_hmask;

	return 0;

fail_free_root_hmask:
	sbi_free(root_hmask);
fail_free_root_memregs:
	sbi_free(root_memregs);
fail_free_domain_hart_ptr_offset:
	sbi_scratch_free_offset(domain_hart_ptr_offset);
	return rc;
}

sbi_hsm_init

OpenSBI从v0.7版本后,开始支持HSM(Hart状态管理)SBI扩展,其允许S模式软件按照定义的顺序启动所有Harts,而不是随机选择启动Hart。

对于冷启动,如果为启动Hart,切换至等待启动状态,其他Hart切换至结束状态;如果非冷启动,就进入低功耗(wfi)状态。

int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
{
	u32 i;
	struct sbi_scratch *rscratch;
	struct sbi_hsm_data *hdata;

	if (cold_boot) {
		hart_data_offset = sbi_scratch_alloc_offset(sizeof(*hdata));
		if (!hart_data_offset)
			return SBI_ENOMEM;

		/* Initialize hart state data for every hart */
		for (i = 0; i <= sbi_scratch_last_hartindex(); i++) {
			rscratch = sbi_hartindex_to_scratch(i);
			if (!rscratch)
				continue;

			hdata = sbi_scratch_offset_ptr(rscratch,
						       hart_data_offset);
			ATOMIC_INIT(&hdata->state,
				    (sbi_hartindex_to_hartid(i) == hartid) ?
				    SBI_HSM_STATE_START_PENDING :
				    SBI_HSM_STATE_STOPPED);
			ATOMIC_INIT(&hdata->start_ticket, 0);
		}
	} else {
		sbi_hsm_hart_wait(scratch, hartid);
	}

	return 0;
}

wake_coldboot_harts

由于所有非冷启动的Hart已经完成HSM初始化,因此这里可以标记冷启动完成。

static void wake_coldboot_harts(struct sbi_scratch *scratch, u32 hartid)
{
	/* Mark coldboot done */
	__smp_store_release(&coldboot_done, 1);
}

sbi_platform_early_init

执行平台相关的早期初始化,对于通用平台Generic,这里直接返回,未进行任何动作。

/**
 * Early initialization for current HART
 *
 * @param plat pointer to struct sbi_platform
 * @param cold_boot whether cold boot (true) or warm_boot (false)
 *
 * @return 0 on success and negative error code on failure
 */
static inline int sbi_platform_early_init(const struct sbi_platform *plat,
					  bool cold_boot)
{
	if (plat && sbi_platform_ops(plat)->early_init)
		return sbi_platform_ops(plat)->early_init(cold_boot);
	return 0;
}

static int generic_early_init(bool cold_boot)
{
	if (cold_boot)
		fdt_reset_init();

	if (!generic_plat || !generic_plat->early_init)
		return 0;

	return generic_plat->early_init(cold_boot, generic_plat_match);
}

sbi_hart_init

函数sbi_hart_init进行Hart初始化:

  • 先清零MIP中断响应寄存器,放在S模式中断的影响;
  • 接着如果是冷启动,就分配一块内存存储Hart特性(feature);
  • 然后调用hart_detect_features进行特性探测,即查询Hart支持哪些扩展,如Sstc、sscofpmf等;
  • 最后调用sbi_hart_reinit初始化mstatus寄存器,配置支持哪些扩展和性能统计等,初始化浮点运算单元以及进行中断和异常的委托。
int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot)
{
	int rc;

	/*
	 * Clear mip CSR before proceeding with init to avoid any spurious
	 * external interrupts in S-mode.
	 */
	csr_write(CSR_MIP, 0);

	if (cold_boot) {
		if (misa_extension('H'))
			sbi_hart_expected_trap = &__sbi_expected_trap_hext;

		hart_features_offset = sbi_scratch_alloc_offset(
					sizeof(struct sbi_hart_features));
		if (!hart_features_offset)
			return SBI_ENOMEM;
	}

	rc = hart_detect_features(scratch);
	if (rc)
		return rc;

	return sbi_hart_reinit(scratch);
}

sbi_console_init

函数sbi_console_init进行打印终端的初始化,这是由平台自定义的:

int sbi_console_init(struct sbi_scratch *scratch)
{
	int rc = sbi_platform_console_init(sbi_platform_ptr(scratch));

	/* console is not a necessary device */
	if (rc == SBI_ENODEV)
		return 0;

	return rc;
}

sbi_sse_init

函数sbi_sse_init进行SSE(Supervisor Software Event)初始化,SSE扩展提供了一种注入软件事件机制,SBI可以发送SSE给S模式软件,可以抢占其他异常和中断:

int sbi_sse_init(struct sbi_scratch *scratch, bool cold_boot)
{
	int ret;
	void *sse_inject_mem;
	struct sse_hart_state *shs;
	struct sbi_fifo *sse_inject_q;

	if (cold_boot) {
		sse_event_count_init();

		ret = sse_global_init();
		if (ret)
			return ret;

		shs_ptr_off = sbi_scratch_alloc_offset(sizeof(void *));
		if (!shs_ptr_off)
			return SBI_ENOMEM;

		sse_inject_fifo_off =
			sbi_scratch_alloc_offset(sizeof(*sse_inject_q));
		if (!sse_inject_fifo_off) {
			sbi_scratch_free_offset(shs_ptr_off);
			return SBI_ENOMEM;
		}

		sse_inject_fifo_mem_off = sbi_scratch_alloc_offset(
			EVENT_COUNT * sizeof(struct sse_ipi_inject_data));
		if (!sse_inject_fifo_mem_off) {
			sbi_scratch_free_offset(sse_inject_fifo_off);
			sbi_scratch_free_offset(shs_ptr_off);
			return SBI_ENOMEM;
		}

		ret = sbi_ipi_event_create(&sse_ipi_inject_ops);
		if (ret < 0) {
			sbi_scratch_free_offset(shs_ptr_off);
			return ret;
		}
		sse_ipi_inject_event = ret;
	}

	shs = sse_get_hart_state_ptr(scratch);
	if (!shs) {
		/* Allocate per hart state and local events at once */
		shs = sbi_zalloc(sizeof(*shs) + sizeof(struct sbi_sse_event) *
							local_event_count);
		if (!shs)
			return SBI_ENOMEM;

		shs->local_events = (struct sbi_sse_event *)(shs + 1);

		sse_set_hart_state_ptr(scratch, shs);
	}

	sse_local_init(shs);

	sse_inject_q = sbi_scratch_offset_ptr(scratch, sse_inject_fifo_off);
	sse_inject_mem =
		sbi_scratch_offset_ptr(scratch, sse_inject_fifo_mem_off);

	sbi_fifo_init(sse_inject_q, sse_inject_mem, EVENT_COUNT,
		      sizeof(struct sse_ipi_inject_data));

	return 0;
}

sbi_pmu_init

函数sbi_pmu_init进行PMU的初始化,主要是分配一块内存空间记录硬件事件,并初始化相关计数器:

int sbi_pmu_init(struct sbi_scratch *scratch, bool cold_boot)
{
	int hpm_count = sbi_fls(sbi_hart_mhpm_mask(scratch));
	struct sbi_pmu_hart_state *phs;
	const struct sbi_platform *plat;
	int rc;

	if (cold_boot) {
		hw_event_map = sbi_calloc(sizeof(*hw_event_map),
					  SBI_PMU_HW_EVENT_MAX);
		if (!hw_event_map)
			return SBI_ENOMEM;

		phs_ptr_offset = sbi_scratch_alloc_type_offset(void *);
		if (!phs_ptr_offset) {
			sbi_free(hw_event_map);
			return SBI_ENOMEM;
		}

		plat = sbi_platform_ptr(scratch);
		/* Initialize hw pmu events */
		rc = sbi_platform_pmu_init(plat);
		if (rc)
			sbi_dprintf("%s: platform pmu init failed "
				    "(error %d)\n", __func__, rc);

		/* mcycle & minstret is available always */
		if (!hpm_count)
			/* Only CY, TM & IR are implemented in the hw */
			num_hw_ctrs = 3;
		else
			num_hw_ctrs = hpm_count + 1;

		if (num_hw_ctrs > SBI_PMU_HW_CTR_MAX)
			return SBI_EINVAL;

		total_ctrs = num_hw_ctrs + SBI_PMU_FW_CTR_MAX;
	}

	sbi_sse_set_cb_ops(SBI_SSE_EVENT_LOCAL_PMU, &pmu_sse_cb_ops);

	phs = pmu_get_hart_state_ptr(scratch);
	if (!phs) {
		phs = sbi_zalloc(sizeof(*phs));
		if (!phs)
			return SBI_ENOMEM;
		phs->hartid = current_hartid();
		pmu_set_hart_state_ptr(scratch, phs);
	}

	pmu_reset_event_map(phs);

	/* First three counters are fixed by the priv spec and we enable it by default */
	phs->active_events[0] = (SBI_PMU_EVENT_TYPE_HW << SBI_PMU_EVENT_IDX_TYPE_OFFSET) |
				SBI_PMU_HW_CPU_CYCLES;
	phs->active_events[1] = SBI_PMU_EVENT_IDX_INVALID;
	phs->active_events[2] = (SBI_PMU_EVENT_TYPE_HW << SBI_PMU_EVENT_IDX_TYPE_OFFSET) |
				SBI_PMU_HW_INSTRUCTIONS;

	return 0;
}

sbi_dbtr_init

函数sbi_dbtr_init进行调试相关的初始化。

int sbi_dbtr_init(struct sbi_scratch *scratch, bool coldboot)
{
	struct sbi_trap_info trap = {0};
	unsigned long tdata1;
	unsigned long val;
	int i;
	struct sbi_dbtr_hart_triggers_state *hart_state = NULL;

	if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SDTRIG))
		return SBI_SUCCESS;

	if (coldboot) {
		hart_state_ptr_offset = sbi_scratch_alloc_type_offset(void *);
		if (!hart_state_ptr_offset)
			return SBI_ENOMEM;
	}

	hart_state = dbtr_get_hart_state_ptr(scratch);
	if (!hart_state) {
		hart_state = sbi_zalloc(sizeof(*hart_state));
		if (!hart_state)
			return SBI_ENOMEM;
		hart_state->hartid = current_hartid();
		dbtr_set_hart_state_ptr(scratch, hart_state);
	}

	/* disable the shared memory */
	sbi_dbtr_disable_shmem(hart_state);

	/* Skip probing triggers if already probed */
	if (hart_state->probed)
		goto _probed;

	for (i = 0; i < RV_MAX_TRIGGERS; i++) {
		csr_write_allowed(CSR_TSELECT, (ulong)&trap, i);
		if (trap.cause)
			break;

		val = csr_read_allowed(CSR_TSELECT, (ulong)&trap);
		if (trap.cause)
			break;

		/*
		 * Read back tselect and check that it contains the
		 * written value
		 */
		if (val != i)
			break;

		val = csr_read_allowed(CSR_TINFO, (ulong)&trap);
		if (trap.cause) {
			/*
			 * If reading tinfo caused an exception, the
			 * debugger must read tdata1 to discover the
			 * type.
			 */
			tdata1 = csr_read_allowed(CSR_TDATA1,
						  (ulong)&trap);
			if (trap.cause)
				break;

			if (TDATA1_GET_TYPE(tdata1) == 0)
				break;

			sbi_trigger_init(INDEX_TO_TRIGGER(i),
					 BIT(TDATA1_GET_TYPE(tdata1)),
					 i);
			hart_state->total_trigs++;
		} else {
			if (val == 1)
				break;

			sbi_trigger_init(INDEX_TO_TRIGGER(i), val, i);
			hart_state->total_trigs++;
		}
	}

	hart_state->probed = 1;

 _probed:
	hart_state->available_trigs = hart_state->total_trigs;

	return SBI_SUCCESS;
}

sbi_boot_print_banner

打印OpenSBI的Banner,如下:

#define BANNER                                              \
	"   ____                    _____ ____ _____\n"     \
	"  / __ \\                  / ____|  _ \\_   _|\n"  \
	" | |  | |_ __   ___ _ __ | (___ | |_) || |\n"      \
	" | |  | | '_ \\ / _ \\ '_ \\ \\___ \\|  _ < | |\n" \
	" | |__| | |_) |  __/ | | |____) | |_) || |_\n"     \
	"  \\____/| .__/ \\___|_| |_|_____/|____/_____|\n"  \
	"        | |\n"                                     \
	"        |_|\n\n"

static void sbi_boot_print_banner(struct sbi_scratch *scratch)
{
	if (scratch->options & SBI_SCRATCH_NO_BOOT_PRINTS)
		return;

#ifdef OPENSBI_VERSION_GIT
	sbi_printf("\nOpenSBI %s\n", OPENSBI_VERSION_GIT);
#else
	sbi_printf("\nOpenSBI v%d.%d\n", OPENSBI_VERSION_MAJOR,
		   OPENSBI_VERSION_MINOR);
#endif

#ifdef OPENSBI_BUILD_TIME_STAMP
	sbi_printf("Build time: %s\n", OPENSBI_BUILD_TIME_STAMP);
#endif

#ifdef OPENSBI_BUILD_COMPILER_VERSION
	sbi_printf("Build compiler: %s\n", OPENSBI_BUILD_COMPILER_VERSION);
#endif

	sbi_printf(BANNER);
}

sbi_irqchip_init

函数sbi_irqchip_init初始化平台相关的中断控制器,冰球如果重新配置了外部中断处理函数ext_irqfn,就将MEIP置位,表示外部中断处于等待响应状态:

int sbi_irqchip_init(struct sbi_scratch *scratch, bool cold_boot)
{
	int rc;
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);

	rc = sbi_platform_irqchip_init(plat, cold_boot);
	if (rc)
		return rc;

	if (ext_irqfn != default_irqfn)
		csr_set(CSR_MIE, MIP_MEIP);

	return 0;
}

sbi_ipi_init

函数sbi_ipi_init进行核间中断IPI初始化:

int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
{
	int ret;
	struct sbi_ipi_data *ipi_data;

	if (cold_boot) {
		ipi_data_off = sbi_scratch_alloc_offset(sizeof(*ipi_data));
		if (!ipi_data_off)
			return SBI_ENOMEM;
		ret = sbi_ipi_event_create(&ipi_smode_ops);
		if (ret < 0)
			return ret;
		ipi_smode_event = ret;
		ret = sbi_ipi_event_create(&ipi_halt_ops);
		if (ret < 0)
			return ret;
		ipi_halt_event = ret;
	} else {
		if (!ipi_data_off)
			return SBI_ENOMEM;
		if (SBI_IPI_EVENT_MAX <= ipi_smode_event ||
		    SBI_IPI_EVENT_MAX <= ipi_halt_event)
			return SBI_ENOSPC;
	}

	ipi_data = sbi_scratch_offset_ptr(scratch, ipi_data_off);
	ipi_data->ipi_type = 0x00;

	/*
	 * Initialize platform IPI support. This will also clear any
	 * pending IPIs for current/calling HART.
	 */
	ret = sbi_platform_ipi_init(sbi_platform_ptr(scratch), cold_boot);
	if (ret)
		return ret;

	/* Enable software interrupts */
	csr_set(CSR_MIE, MIP_MSIP);

	return 0;
}

sbi_tlb_init

函数sbi_tlb_init进行TLB相关的初始化,主要是管理相关的数据结构和配置:

int sbi_tlb_init(struct sbi_scratch *scratch, bool cold_boot)
{
	int ret;
	void *tlb_mem;
	atomic_t *tlb_sync;
	struct sbi_fifo *tlb_q;
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);

	if (cold_boot) {
		tlb_sync_off = sbi_scratch_alloc_offset(sizeof(*tlb_sync));
		if (!tlb_sync_off)
			return SBI_ENOMEM;
		tlb_fifo_off = sbi_scratch_alloc_offset(sizeof(*tlb_q));
		if (!tlb_fifo_off) {
			sbi_scratch_free_offset(tlb_sync_off);
			return SBI_ENOMEM;
		}
		tlb_fifo_mem_off = sbi_scratch_alloc_offset(sizeof(tlb_mem));
		if (!tlb_fifo_mem_off) {
			sbi_scratch_free_offset(tlb_fifo_off);
			sbi_scratch_free_offset(tlb_sync_off);
			return SBI_ENOMEM;
		}
		ret = sbi_ipi_event_create(&tlb_ops);
		if (ret < 0) {
			sbi_scratch_free_offset(tlb_fifo_mem_off);
			sbi_scratch_free_offset(tlb_fifo_off);
			sbi_scratch_free_offset(tlb_sync_off);
			return ret;
		}
		tlb_event = ret;
		tlb_range_flush_limit = sbi_platform_tlbr_flush_limit(plat);
	} else {
		if (!tlb_sync_off ||
		    !tlb_fifo_off ||
		    !tlb_fifo_mem_off)
			return SBI_ENOMEM;
		if (SBI_IPI_EVENT_MAX <= tlb_event)
			return SBI_ENOSPC;
	}

	tlb_sync = sbi_scratch_offset_ptr(scratch, tlb_sync_off);
	tlb_q = sbi_scratch_offset_ptr(scratch, tlb_fifo_off);
	tlb_mem = sbi_scratch_read_type(scratch, void *, tlb_fifo_mem_off);
	if (!tlb_mem) {
		tlb_mem = sbi_malloc(
				sbi_platform_tlb_fifo_num_entries(plat) * SBI_TLB_INFO_SIZE);
		if (!tlb_mem)
			return SBI_ENOMEM;
		sbi_scratch_write_type(scratch, void *, tlb_fifo_mem_off, tlb_mem);
	}

	ATOMIC_INIT(tlb_sync, 0);

	sbi_fifo_init(tlb_q, tlb_mem,
		      sbi_platform_tlb_fifo_num_entries(plat), SBI_TLB_INFO_SIZE);

	return 0;
}

sbi_timer_init

函数sbi_timer_init初始化计时器,如果是冷启动,就分配一段内存存储时间差值,然后调用平台定义的sbi_platform_timer_init初始化函数:

int sbi_timer_init(struct sbi_scratch *scratch, bool cold_boot)
{
	u64 *time_delta;
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);

	if (cold_boot) {
		time_delta_off = sbi_scratch_alloc_offset(sizeof(*time_delta));
		if (!time_delta_off)
			return SBI_ENOMEM;

		if (sbi_hart_has_extension(scratch, SBI_HART_EXT_ZICNTR))
			get_time_val = get_ticks;
	} else {
		if (!time_delta_off)
			return SBI_ENOMEM;
	}

	time_delta = sbi_scratch_offset_ptr(scratch, time_delta_off);
	*time_delta = 0;

	return sbi_platform_timer_init(plat, cold_boot);
}

sbi_fwft_init

函数sbi_fwft_init初始化固件支持的一些特性,如非对齐指令访问异常委托等:

int sbi_fwft_init(struct sbi_scratch *scratch, bool cold_boot)
{
	int i;
	struct fwft_hart_state *fhs;

	if (cold_boot) {
		fwft_ptr_offset = sbi_scratch_alloc_type_offset(void *);
		if (!fwft_ptr_offset)
			return SBI_ENOMEM;
	}

	fhs = fwft_get_hart_state_ptr(scratch);
	if (!fhs) {
		fhs = sbi_zalloc(sizeof(fhs) + array_size(features) * sizeof(struct fwft_config));
		if (!fhs)
			return SBI_ENOMEM;

		fhs->config_count = array_size(features);
		for (i = 0; i < array_size(features); i++)
			fhs->configs[i].feature = &features[i];

		fwft_set_hart_state_ptr(scratch, fhs);
	}

	return 0;
}

sbi_domain_finalize

函数sbi_domain_finalize初始化和启动多个域:

  • 调用sbi_platform_domains_init函数来初始化并为平台配置域
  • 然后迭代每个域sbi_domain_for_each,启动每个域的boot Hart
  • 最后比较域初始化完成,此后Root域的区域不能再修改
int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
{
	int rc;
	u32 i, dhart;
	struct sbi_domain *dom;
	const struct sbi_platform *plat = sbi_platform_ptr(scratch);

	/* Initialize and populate domains for the platform */
	rc = sbi_platform_domains_init(plat);
	if (rc) {
		sbi_printf("%s: platform domains_init() failed (error %d)\n",
			   __func__, rc);
		return rc;
	}

	/* Startup boot HART of domains */
	sbi_domain_for_each(i, dom) {
		/* Domain boot HART index */
		dhart = sbi_hartid_to_hartindex(dom->boot_hartid);

		/* Ignore of boot HART is off limits */
		if (!sbi_hartindex_valid(dhart))
			continue;

		/* Ignore if boot HART not possible for this domain */
		if (!sbi_hartmask_test_hartindex(dhart, dom->possible_harts))
			continue;

		/* Ignore if boot HART assigned different domain */
		if (sbi_hartindex_to_domain(dhart) != dom)
			continue;

		/* Ignore if boot HART is not part of the assigned HARTs */
		spin_lock(&dom->assigned_harts_lock);
		rc = sbi_hartmask_test_hartindex(dhart, &dom->assigned_harts);
		spin_unlock(&dom->assigned_harts_lock);
		if (!rc)
			continue;

		/* Startup boot HART of domain */
		if (dom->boot_hartid == cold_hartid) {
			scratch->next_addr = dom->next_addr;
			scratch->next_mode = dom->next_mode;
			scratch->next_arg1 = dom->next_arg1;
		} else {
			rc = sbi_hsm_hart_start(scratch, NULL,
						dom->boot_hartid,
						dom->next_addr,
						dom->next_mode,
						dom->next_arg1);
			if (rc) {
				sbi_printf("%s: failed to start boot HART %d"
					   " for %s (error %d)\n", __func__,
					   dom->boot_hartid, dom->name, rc);
				return rc;
			}
		}
	}

	/*
	 * Set the finalized flag so that the root domain
	 * regions can't be changed.
	 */
	domain_finalized = true;

	return 0;
}

sbi_platform_final_init

函数sbi_platform_final_init调用平台定义的最终初始化:

/**
 * Final initialization for current HART
 *
 * @param plat pointer to struct sbi_platform
 * @param cold_boot whether cold boot (true) or warm_boot (false)
 *
 * @return 0 on success and negative error code on failure
 */
static inline int sbi_platform_final_init(const struct sbi_platform *plat,
					  bool cold_boot)
{
	if (plat && sbi_platform_ops(plat)->final_init)
		return sbi_platform_ops(plat)->final_init(cold_boot);
	return 0;
}

sbi_ecall_init

函数sbi_ecall_init进行ECALL的初始化,主要是注册SBI定义的扩展,后续S模式的软件就可以通过ecall指令,执行这些扩展实现的功能:

int sbi_ecall_init(void)
{
	int ret;
	struct sbi_ecall_extension *ext;
	unsigned long i;

	for (i = 0; i < sbi_ecall_exts_size; i++) {
		ext = sbi_ecall_exts[i];
		ret = SBI_ENODEV;

		if (ext->register_extensions)
			ret = ext->register_extensions();
		if (ret)
			return ret;
	}

	return 0;
}

sbi_hart_pmp_configure

函数sbi_hart_pmp_configure对PMP进行了配置,针对是否具有SMEPMP扩展,有两种方式:只有PMP机制调用sbi_hart_oldpmp_configure函数,有SMEPMP机制调用sbi_hart_smepmp_configure函数:

int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
{
	int rc;
	unsigned int pmp_bits, pmp_log2gran;
	unsigned int pmp_count = sbi_hart_pmp_count(scratch);
	unsigned long pmp_addr_max;

	if (!pmp_count)
		return 0;

	pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
	pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
	pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);

	if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
		rc = sbi_hart_smepmp_configure(scratch, pmp_count,
						pmp_log2gran, pmp_addr_max);
	else
		rc = sbi_hart_oldpmp_configure(scratch, pmp_count,
						pmp_log2gran, pmp_addr_max);

	/*
	 * As per section 3.7.2 of privileged specification v1.12,
	 * virtual address translations can be speculatively performed
	 * (even before actual access). These, along with PMP traslations,
	 * can be cached. This can pose a problem with CPU hotplug
	 * and non-retentive suspend scenario because PMP states are
	 * not preserved.
	 * It is advisable to flush the caching structures under such
	 * conditions.
	 */
	if (misa_extension('S')) {
		__asm__ __volatile__("sfence.vma");

		/*
		 * If hypervisor mode is supported, flush caching
		 * structures in guest mode too.
		 */
		if (misa_extension('H'))
			__sbi_hfence_gvma_all();
	}

	return rc;
}

sbi_hsm_hart_start_finish

函数sbi_hsm_hart_start_finish完成Hart的启动,并跳转到下一阶段镜像:

  • 首先切换Hart的状态,从等待启动状态SBI_HSM_STATE_START_PENDING切换到启动状态SBI_HSM_STATE_STARTED
  • 然后释放启动令牌,表示该Hart已经启动完成
  • 最后切换特权模式,即跳转到下一阶段中去
void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch,
					  u32 hartid)
{
	unsigned long next_arg1;
	unsigned long next_addr;
	unsigned long next_mode;
	struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
							    hart_data_offset);

	if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_START_PENDING,
					 SBI_HSM_STATE_STARTED))
		sbi_hart_hang();

	next_arg1 = scratch->next_arg1;
	next_addr = scratch->next_addr;
	next_mode = scratch->next_mode;
	hsm_start_ticket_release(hdata);

	sbi_hart_switch_mode(hartid, next_arg1, next_addr, next_mode, false);
}

至此,OpenSBI冷启动完成,就跳转执行到后续镜像,如u-boot或者Linux。

热启动初始化

热启动初始化操作主要完成:

  • 首先等待冷启动完成,上面冷启动是会执行wake_coldboot_harts函数标记冷启动完成;
  • 然后获取HSM状态,如果是挂起状态SBI_HSM_STATE_SUSPENDED就执行恢复启动流程;
  • 如果是其他状态,先清除核间中断,再调用热启动流程init_warm_startup,调用的初始化函数与冷启动大致一致,就不再分析了。
static void __noreturn init_warmboot(struct sbi_scratch *scratch, u32 hartid)
{
	int hstate;

	wait_for_coldboot(scratch, hartid);

	hstate = sbi_hsm_hart_get_state(sbi_domain_thishart_ptr(), hartid);
	if (hstate < 0)
		sbi_hart_hang();

	if (hstate == SBI_HSM_STATE_SUSPENDED) {
		init_warm_resume(scratch, hartid);
	} else {
		sbi_ipi_raw_clear(sbi_hartid_to_hartindex(hartid));
		init_warm_startup(scratch, hartid);
	}
}

参考

  1. OpenSBI 固件代码分析(一):启动流程
  2. OpenSBI 固件代码分析(二):fw_base.S 源码分析
  3. OpenSBI 固件代码分析(三): sbi_init.c
  4. OpenSBI 固件代码分析(四):coldboot
  5. OpenSBI 固件代码分析(五):最终章
  6. OpenSBI 汇编启动流程
  7. AIA-IMSIC介绍
  8. qemu virt
  9. riscv-sbi-doc
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值