Linux内核启动

kernel版本4.20

linux/arch/x86/boot/header.S

_start -> start_of_setup -> main

	.globl	_start
_start:
		# Explicitly enter this as bytes, or the assembler
		# tries to generate a 3-byte jump here, which causes
		# everything else to push off to the wrong offset.
		.byte	0xeb		# short (2-byte) jump
		.byte	start_of_setup-1f
1:

start_of_setup:
# Jump to C code (should not return)
	calll	main

0xeb是short JMP的机器码,跳转距离是start_of_setup-1f,其中start_of_setup和1f都是符号
Nf(forwards)表示当前指令后距离最近的N,Nb(backwards)表示当前指令前距离最近的N

linux/arch/x86/boot/main.c

main -> go_to_protected_mode

void main(void)
{
	go_to_protected_mode();
}

linux/arch/x86/boot/pm.c

go_to_protected_mode -> protected_mode_jump

void go_to_protected_mode(void)
{
	protected_mode_jump(boot_params.hdr.code32_start,
			    (u32)&boot_params + (ds() << 4));
}

linux/arch/x86/boot/pmjump.S

protected_mode_jump -> in_pm32 -> jmpl *%eax

/*
 * void protected_mode_jump(u32 entrypoint, u32 bootparams);
 */
GLOBAL(protected_mode_jump)
	# Transition to 32-bit mode
	.byte	0x66, 0xea		# ljmpl opcode
2:	.long	in_pm32			# offset
	.word	__BOOT_CS		# segment
ENDPROC(protected_mode_jump)



GLOBAL(in_pm32)
	jmpl	*%eax			# Jump to the 32-bit entrypoint
ENDPROC(in_pm32)

eax是函数的第一个参数,即boot_params.hdr.code32_start

函数调用

linux/arch/x86/entry/calling.h

 x86 function call convention, 64-bit:
 -------------------------------------
  arguments           |  callee-saved      | extra caller-saved | return
 [callee-clobbered]   |                    | [callee-clobbered] |
 ---------------------------------------------------------------------------
 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]

For 32-bit we have the following conventions - kernel is built with
-mregparm=3 and -freg-struct-return:

 x86 function calling convention, 32-bit:
 ----------------------------------------
  arguments         | callee-saved        | extra caller-saved | return
 [callee-clobbered] |                     | [callee-clobbered] |
 -------------------------------------------------------------------------
 eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]

GLOBAL/ENTRY/END/ENDPROC

linux/arch/x86/include/asm/linkage.h

#define GLOBAL(name)	\
	.globl name;	\
	name:

linux/include/linux/linkage.h

#define ASM_NL		 ;

#define __ALIGN		.align 4,0x90

#define ALIGN __ALIGN

#define ENTRY(name) \
	.globl name ASM_NL \
	ALIGN ASM_NL \
	name:

#define END(name) \
	.size name, .-name

#define ENDPROC(name) \
	.type name, @function ASM_NL \
	END(name)

.size设置name的长度,.表示当前指令的地址,.-name表示name的长度
.type设置name的类型,@function表示是函数

linux/arch/x86/boot/header.S

code32_start:				# here loaders can put a different
					# start address for 32-bit code.
		.long	0x100000	# 0x100000 = default for big kernel

linux/arch/x86/boot/compressed/head_64.S

startup_32 -> startup_64 -> jmp *%rax

ENTRY(startup_32)
	pushl	$__KERNEL_CS
	leal	startup_64(%ebp), %eax
	pushl	%eax
	/* Jump from 32bit compatibility mode into 64bit mode. */
	lret
ENDPROC(startup_32)



ENTRY(startup_64)
/*
 * Jump to the relocated address.
 */
	leaq	relocated(%rbx), %rax
	jmp	*%rax

relocated:
/*
 * Do the extraction, and jump to the new kernel..
 */
	call	extract_kernel		/* returns kernel location in %rax */

/*
 * Jump to the decompressed kernel.
 */
	jmp	*%rax

linux/arch/x86/kernel/head_64.S

startup_64 -> secondary_startup_64 -> initial_code -> x86_64_start_kernel

	.globl startup_64
startup_64:
	jmp 1f
ENTRY(secondary_startup_64)
1:
/* Ensure I am executing from virtual addresses */
	movq	$1f, %rax
	jmp	*%rax
1:

1:
.Ljump_to_C_code:
	/*
	 * Jump to run C code and to be on a real kernel address.
	 * Since we are running on identity-mapped space we have to jump
	 * to the full 64bit address, this is only possible as indirect
	 * jump.  In addition we need to ensure %cs is set so we make this
	 * a far return.
	 */
	movq	initial_code(%rip), %rax
	pushq	$__KERNEL_CS	# set correct cs
	pushq	%rax		# target address in negative space
	lretq
END(secondary_startup_64)



GLOBAL(initial_code)
.quad	x86_64_start_kernel

linux/arch/x86/kernel/head64.c

x86_64_start_kernel -> x86_64_start_reservations -> start_kernel

asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
{
	x86_64_start_reservations(real_mode_data);
}

void __init x86_64_start_reservations(char *real_mode_data)
{
	start_kernel();
}

linux/init/main.c

start_kernel

asmlinkage __visible void __init start_kernel(void)
{
	...
}

参考资料

https://blog.csdn.net/richardysteven/article/details/52629731

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值