linux内核启动设置页表,linux内存管理之内核启动4M的页表

#if (PHYS_OFFSET & 0x001fffff)

#error "PHYS_OFFSET must be at an even 2MiB boundary!"

#endif

#define KERNEL_RAM_VADDR(PAGE_OFFSET + TEXT_OFFSET)    /* KERNEL_RAM_VADDR=0xc0008000:kernel在内存中的虚拟地址

PAGE_OFFSET=0xc0000000:内核空间的起始虚拟地址 */

#define KERNEL_RAM_PADDR(PHYS_OFFSET + TEXT_OFFSET)    /* KERNEL_RAM_PADDR=0x30008000:内核在内存中的物理地址

PHYS_OFFSET=0x30000000:内存的起始地址

TEXT_OFFSET=0x00008000: 相对于存储空间的偏移*/

/*

* swapper_pg_dir is the virtual address of the initial page table.

* We place the page tables 16K below KERNEL_RAM_VADDR.  Therefore, we must

* make sure that KERNEL_RAM_VADDR is correctly set.  Currently, we expect

* the least significant 16 bits to be 0x8000, but we could probably

* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.

*/

#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000

#error KERNEL_RAM_VADDR must start at 0xXXXX8000

#endif

.globlswapper_pg_dir /* 是系统中4GB虚拟内存空间的中间(一级/段)页表的虚拟基地址 ,必须是16KB对齐,虚拟

地址空间中的某个地址对应(一级/段)描述符在swapper_pg_dir中的位置由该虚拟地址所

在段的基址决定的,是固定的*/

.equswapper_pg_dir, KERNEL_RAM_VADDR - 0x4000    /* swapper_pg_dir=0x30008000-16K=0x30004000 */

.macropgtbl, rd

ldr\rd, =(KERNEL_RAM_PADDR - 0x4000)   /* KERNEL_RAM_PADDR - 0x4000=0x30004000 */

.endm

#ifdef CONFIG_XIP_KERNEL

#define KERNEL_STARTXIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)

#define KERNEL_END_edata_loc

#else

#define KERNEL_STARTKERNEL_RAM_VADDR    /* 0xc0008000 */

#define KERNEL_END_end    /*内核结束地址,在vmlinux.lds中定义 */

#endif

/*

* Kernel startup entry point.

* ---------------------------

*

* This is normally called from the decompressor code.  The requirements

* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,

* r1 = machine nr, r2 = atags pointer.

*

* This code is mostly position independent, so if you link the kernel at

* 0xc0008000, you call this at __pa(0xc0008000).

*

* See linux/arch/arm/tools/mach-types for the complete list of machine

* numbers for r1.

*

* We're trying to keep crap to a minimum; DO NOT add any machine specific

* crap here - that's what the boot loader (or in extreme, well justified

* circumstances, zImage) is for.

*/

.section ".text.head", "ax"

ENTRY(stext)                       /* 内核的入口点 ,在vmlinux.lds中定义*/

msrcpsr_c, # PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode /* 进入管理模式,并且禁止中断 */

@ and irqs disabled

mrcp15, 0, r9, c0, c0@ get processor id   /* 获取CPU的ID,即读取协处理器寄存器的c0,判断是否支持这个CPU */

bl__lookup_processor_type  @ r5=procinfo r9=cpuid /* 调用__lookup_processor_type函数,确定内核是否支持当前CPU,如果

支持则 r5=返回一个用来描述处理器的结构体proc_info_list的地址,

否则返回r5=0 */

movsr10, r5@ invalid processor (r5=0)?

beq  __error_p@ yes, error 'p'          /* 如果内核不支持该处理器则进入错误处理 */

bl__lookup_machine_type@ r5=machinfo   /* 判断是否支持这个单板?_lookup_machine_type函数处理后如果支持该开

发板则r5=返回一个用来描述开发板的结构体struct machine_desc的地址,

否则r5=0 */

movsr8, r5@ invalid machine (r5=0)?

beq__error_a@ yes, error 'a'        /*如果经过测试不支持该单板则 出错处理 */

bl__vet_atags

bl__create_page_tables    /* 创建页表 */

/*

* The following calls CPU specific code in a position independent

* manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of

* xxx_proc_info structure selected by __lookup_machine_type

* above.  On return, the CPU will be ready for the MMU to be

* turned on, and r0 will hold the CPU control register value.

*/

ldrr13, __switch_data   @ address to jump to after   /*__switch_data在head-common.S中定义,设置好页表,启动MMU后,就进

入这个函数,进而跳到第一个C函数start_kernel去执行tart_kernel 在main.c */

@ mmu has been enabled

adrlr, __enable_mmu@ return (PIC) address

addpc, r10, # PROCINFO_INITFUNC

ENDPROC(stext)

#if defined(CONFIG_SMP)

ENTRY(secondary_startup)

/*

* Common entry point for secondary CPUs.

*

* Ensure that we're in SVC mode, and IRQs are disabled.  Lookup

* the processor type - there is no need to check the machine type

* as it has already been validated by the primary processor.

*/

msrcpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE

mrcp15, 0, r9, c0, c0@ get processor id

bl__lookup_processor_type

movsr10, r5@ invalid processor?

moveqr0, #'p'@ yes, error 'p'

beq__error

/*

* Use the page tables supplied from  __cpu_up.

*/

adrr4, __secondary_data

ldmiar4, {r5, r7, r13}@ address to jump to after

subr4, r4, r5@ mmu has been enabled

ldrr4, [r7, r4]@ get secondary_data.pgdir

adrlr, __enable_mmu@ return address

addpc, r10, #PROCINFO_INITFUNC@ initialise processor

@ (return control reg)

ENDPROC(secondary_startup)

/*

* r6  = &secondary_data

*/

ENTRY(__secondary_switched)

ldrsp, [r7, #4]@ get secondary_data.stack

movfp, #0

bsecondary_start_kernel

ENDPROC(__secondary_switched)

.type__secondary_data, %object

__secondary_data:

.long.

.longsecondary_data

.long__secondary_switched

#endif /* defined(CONFIG_SMP) */

/*

* Setup common bits before finally enabling the MMU.  Essentially

* this is just loading the page table pointer and domain access

* registers.

*/

__enable_mmu:

#ifdef CONFIG_ALIGNMENT_TRAP

orrr0, r0, #CR_A

#else

bicr0, r0, #CR_A

#endif

#ifdef CONFIG_CPU_DCACHE_DISABLE

bicr0, r0, #CR_C

#endif

#ifdef CONFIG_CPU_BPREDICT_DISABLE

bicr0, r0, #CR_Z

#endif

#ifdef CONFIG_CPU_ICACHE_DISABLE

bicr0, r0, #CR_I

#endif

movr5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \

domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \

domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \

domain_val(DOMAIN_IO, DOMAIN_CLIENT))

mcrp15, 0, r5, c3, c0, 0@ load domain access register

mcrp15, 0, r4, c2, c0, 0@ load page table pointer

b__turn_mmu_on

ENDPROC(__enable_mmu)

/*

* Enable the MMU.  This completely changes the structure of the visible

* memory space.  You will not be able to trace execution through this.

* If you have an enquiry about this, *please* check the linux-arm-kernel

* mailing list archives BEFORE sending another post to the list.

*

*  r0  = cp#15 control register

*  r13 = *virtual* address to jump to upon completion

*

* other registers depend on the function called upon completion

*/

.align5

__turn_mmu_on:

movr0, r0

mcrp15, 0, r0, c1, c0, 0@ write control reg

mrcp15, 0, r3, c0, c0, 0@ read id reg

movr3, r3

movr3, r3

movpc, r13

ENDPROC(__turn_mmu_on)

/*

* Setup the initial page tables.  We only setup the barest

* amount which are required to get the kernel running, which

* generally means mapping in the kernel code.

*

* r8  = machinfo     struct machine_desc的基地址

* r9  = cpuid           通过CP15协处理器获得的处理器ID

* r10 = procinfo      struct proc_info_list的基地址

*

* Returns:

*  r0, r3, r6, r7 corrupted

*  r4 = physical page table address

*/

__create_page_tables:     /* 创建页表 -----为系统的内存头4段即头4MB建立一级映射表*/

/*.macropgtbl, rd

ldr\rd, =(KERNEL_RAM_PADDR - 0x4000)

.endm

*/

pgtblr4@ page table address   /* 相当于ldr r4,KERNEL_RAM_PADDR - 0x4000 ===>ldr r4,0x30004000 */

/*

* Clear the 16K level 1 swapper page table

*/

movr0, r4   /* r0=0x30004000 */

movr3, #0

addr6, r0, #0x4000    /* r6=0x30008000 */

/* 初始化0x30004000~0x30008000之间的16K(4096*4=16K)的内存空间为0 */

1:strr3, [r0], #4

strr3, [r0], #4

strr3, [r0], #4

strr3, [r0], #4

teqr0, r6

bne1b

ldrr7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags   /* r7=struct proc_info_list的成员__cpu_mm_mmu_flags的值即=0xc1e=1100 0001 1110 */

/*

* Create identity mapping for first MB of kernel to

* cater for the MMU enable.  This identity mapping

* will be removed by paging_init().  We use our current program

* counter to determine corresponding section base address.

*/

/* 段映射公式为:*(mmu_tlb_base+(virtualaddr>>20)) = phyaddr_and_permission; mmu_tlb_base为页表基地址, phyaddr_and_permission为物理段

基地址|访问权限*/

movr6, pc, lsr #20@ start of kernel section   /* r6=300 (pgd_idx)*/

orrr3, r7, r6, lsl #20@ flags + kernel base              /* 0x30000000+0xc1e=0x30000c1e ,其中r6=300<<20=0x30000000段基址,0xc1e为访问权限(

设置访问内存的权限--->可读写,写回模式)(从这里可以看出映射的

虚拟地址等于物理地址)*/

strr3, [r4, r6, lsl #2]@ identity mapping                /*  r4+r6<<2=r3=0x30000c1e===>0x30004000+300*4(这里乘以4是因为描述符是4字节的)=

0x30000c1e即((unsigned int)r4)[pgd_idx]=0x30000c1e*/

/*

* Now setup the pagetables for our kernel direct

* mapped region.

*/

addr0, r4,  #(KERNEL_START & 0xff000000) >> 18   /* KERNEL_START=0xc0008000,r0=0x30004000+0xc00 */

strr3, [r0, #(KERNEL_START & 0x00f00000) >> 18]!

ldrr6, =(KERNEL_END - 1)

addr0, r0, #4

addr6, r4, r6, lsr #18

1:cmpr0, r6

addr3, r3, #1 << 20

strlsr3, [r0], #4

bls1b

#ifdef CONFIG_XIP_KERNEL

/*

* Map some ram to cover our .data and .bss areas.

*/

orrr3, r7, #(KERNEL_RAM_PADDR & 0xff000000)

.if(KERNEL_RAM_PADDR & 0x00f00000)

orrr3, r3, #(KERNEL_RAM_PADDR & 0x00f00000)

.endif

addr0, r4,  #(KERNEL_RAM_VADDR & 0xff000000) >> 18

strr3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!

ldrr6, =(_end - 1)

addr0, r0, #4

addr6, r4, r6, lsr #18

1:cmpr0, r6

addr3, r3, #1 << 20

strlsr3, [r0], #4

bls1b

#endif

/*

* Then map first 1MB of ram in case it contains our boot params.

*/

addr0, r4, #PAGE_OFFSET >> 18

orrr6, r7, #(PHYS_OFFSET & 0xff000000)

.if(PHYS_OFFSET & 0x00f00000)

orrr6, r6, #(PHYS_OFFSET & 0x00f00000)

.endif

strr6, [r0]

#ifdef CONFIG_DEBUG_LL

ldrr7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags

/*

* Map in IO space for serial debugging.

* This allows debug messages to be output

* via a serial console before paging_init.

*/

ldrr3, [r8, #MACHINFO_PGOFFIO]

addr0, r4, r3

rsbr3, r3, #0x4000@ PTRS_PER_PGD*sizeof(long)

cmpr3, #0x0800@ limit to 512MB

movhir3, #0x0800

addr6, r0, r3

ldrr3, [r8, #MACHINFO_PHYSIO]

orrr3, r3, r7

1:strr3, [r0], #4

addr3, r3, #1 << 20

teqr0, r6

bne1b

#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)

/*

* If we're using the NetWinder or CATS, we also need to map

* in the 16550-type serial port for the debug messages

*/

addr0, r4, #0xff000000 >> 18

orrr3, r7, #0x7c000000

strr3, [r0]

#endif

#ifdef CONFIG_ARCH_RPC

/*

* Map in screen at 0x02000000 & SCREEN2_BASE

* Similar reasons here - for debug.  This is

* only for Acorn RiscPC architectures.

*/

addr0, r4, #0x02000000 >> 18

orrr3, r7, #0x02000000

strr3, [r0]

addr0, r4, #0xd8000000 >> 18

strr3, [r0]

#endif

#endif

movpc, lr

ENDPROC(__create_page_tables)

.ltorg

#include "head-common.S"

/*****************************************************************************************************************************************/

/* head-common.S */

/*

*  linux/arch/arm/kernel/head-common.S

*

*  Copyright (C) 1994-2002 Russell King

*  Copyright (c) 2003 ARM Limited

*  All Rights Reserved

*

* This program is free software; you can redistribute it and/or modify

* it under the terms of the GNU General Public License version 2 as

* published by the Free Software Foundation.

*

*/

#define ATAG_CORE 0x54410001

#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)

.type__switch_data, %object

__switch_data:

.long__mmap_switched

.long__data_loc@ r4

.long_data@ r5

.long__bss_start@ r6

.long_end@ r7

.longprocessor_id@ r4

.long__machine_arch_type@ r5

.long__atags_pointer@ r6

.longcr_alignment@ r7

.longinit_thread_union + THREAD_START_SP @ sp

/*

* The following fragment of code is executed with the MMU on in MMU mode,

* and uses absolute addresses; this is not position independent.

*

*  r0  = cp#15 control register

*  r1  = machine ID

*  r2  = atags pointer

*  r9  = processor ID

*/

__mmap_switched:

adrr3, __switch_data + 4

ldmiar3!, {r4, r5, r6, r7}

cmpr4, r5@ Copy data segment if needed

1:cmpner5, r6

ldrnefp, [r4], #4

strnefp, [r5], #4

bne1b

movfp, #0@ Clear BSS (and zero fp)

1:cmpr6, r7

strcc fp, [r6],#4

bcc1b

ldmiar3, {r4, r5, r6, r7, sp}

strr9, [r4]@ Save processor ID

strr1, [r5]@ Save machine type

strr2, [r6]@ Save atags pointer

bicr4, r0, #CR_A@ Clear 'A' bit

stmiar7, {r0, r4}@ Save control register values

bstart_kernel                   /* 第一个C函数,启动内核 */

ENDPROC(__mmap_switched)

/*

* Exception handling.  Something went wrong and we can't proceed.  We

* ought to tell the user, but since we don't have any guarantee that

* we're even running on the right architecture, we do virtually nothing.

*

* If CONFIG_DEBUG_LL is set we try to print out something about the error

* and hope for the best (useful if bootloader fails to pass a proper

* machine ID for example).

*/

__error_p:   /*即为 获取CPU的ID与内核定义ID不相符即内核不支持该CPU的错误处理函数  */

#ifdef CONFIG_DEBUG_LL

adrr0, str_p1       /* r0指向字符串 "\nError: unrecognized/unsupported processor variant (0x).\n"的首地址*/

blprintascii      /* 打印字符 */

movr0, r9

blprinthex8

adrr0, str_p2

blprintascii

b__error

str_p1:.asciz"\nError: unrecognized/unsupported processor variant (0x"

str_p2:.asciz").\n"

.align

#endif

ENDPROC(__error_p)

__error_a:      /*如果经过测试不支持该单板则 出错处理 */

#ifdef CONFIG_DEBUG_LL

movr4, r1@ preserve machine ID

adrr0, str_a1

blprintascii

movr0, r4

blprinthex8

adrr0, str_a2

blprintascii

adrr3, 3f

ldmiar3, {r4, r5, r6}@ get machine desc list

subr4, r3, r4@ get offset between virt&phys

addr5, r5, r4@ convert virt addresses to

addr6, r6, r4@ physical address space

1:ldrr0, [r5, #MACHINFO_TYPE]@ get machine type

blprinthex8

movr0, #'\t'

blprintch

ldr     r0, [r5, #MACHINFO_NAME]@ get machine name

addr0, r0, r4

blprintascii

movr0, #'\n'

blprintch

addr5, r5, #SIZEOF_MACHINE_DESC@ next machine_desc

cmpr5, r6

blo1b

adrr0, str_a3

blprintascii

b__error

ENDPROC(__error_a)

str_a1:.asciz"\nError: unrecognized/unsupported machine ID (r1 = 0x"

str_a2:.asciz").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"

str_a3:.asciz"\nPlease check your kernel config and/or bootloader.\n"

.align

#endif

__error:

#ifdef CONFIG_ARCH_RPC

/*

* Turn the screen red on a error - RiscPC only.

*/

movr0, #0x02000000

movr3, #0x11

orrr3, r3, r3, lsl #8

orrr3, r3, r3, lsl #16

strr3, [r0], #4

strr3, [r0], #4

strr3, [r0], #4

strr3, [r0], #4

#endif

1:movr0, r0

b1b

ENDPROC(__error)

/*

* Read processor ID register (CP#15, CR0), and look up in the linker-built

* supported processor list.  Note that we can't use the absolute addresses

* for the __proc_info lists since we aren't running with the MMU on

* (and therefore, we are not in the correct address space).  We have to

* calculate the offset.

*

*r9 = cpuid

* Returns:

*r3, r4, r6 corrupted

*r5 = proc_info pointer in physical address space

*r9 = cpuid (preserved)

*/

__lookup_processor_type:   /* 查看cpu类型即看cpu的ID是否匹配 */

adrr3, 3f                         /* r3=后面地址标号3的地址,是物理地址 */

ldmdar3, {r5 - r7}       /* ldmda加载r3指向的地址开始的数据到r5~r7,其中da的意思是每次传送后地址减一,

执行该指令后r5=__proc_info_begin  r6=__proc_info_end   r7=.  ,这些地址都是虚拟地址*/

subr3, r3, r7@ get offset between virt&phys   /* 得到虚拟地址与物理地址的偏差,这里r3<0 */

addr5, r5, r3@ convert virt addresses to       /* r5=__proc_info_begin 对应的物理地址 */

addr6, r6, r3@ physical address space            /* r6=__proc_info_end  对应的物理地址 */

1:ldmiar5, {r3, r4}@ value, mask                     /* 在arch/arm/mm/proc-arm920.S中定义,

r3=cpu_val=0x41009200    r4=cpu_mask=0xff00fff0   */

andr4, r4, r9@ mask wanted bits                     /* r4=r4&r9=cpu_mask&r9=0xff00fff0&0x41129200= 0x41009200=cpu_val */

teqr3, r4

beq2f             /* r4=r4&r9=cpu_mask&r9=0xff00fff0&0x41129200= 0x41009200=cpu_val 跳转到2地址标号处 */

addr5, r5, # PROC_INFO_SZ  @ sizeof(proc_info_list)   /* r5指向下一个proc_info_list结构体 */

cmpr5, r6                           /*是否比较完所有的proc_info_list结构体了  */

blo1b                                 /* 没有则继续比较 */

movr5, #0@ unknown processor

2:movpc, lr                            /* 比较完了,如果没有找到匹配的proc_info_list结构体,则r5=0 */

ENDPROC(__lookup_processor_type)

/*

* This provides a C-API version of the above function.

*/

ENTRY(lookup_processor_type)    /* 提供给C函数调用的接口 */

stmfdsp!, {r4 - r7, r9, lr}

movr9, r0

bl__lookup_processor_type

movr0, r5

ldmfdsp!, {r4 - r7, r9, pc}

ENDPROC(lookup_processor_type)

/*

* Look in and arch/arm/kernel/arch.[ch] for

* more information about the __proc_info and __arch_info structures.

*/

.long__proc_info_begin

.long__proc_info_end

3:.long.

.long__arch_info_begin

.long__arch_info_end

/*

连接脚本中:

__proc_info_begin = .;

*(.proc.info.init)

__proc_info_end = .;

在include/asm-arm/procinfo.h中定义:

struct proc_info_list {

unsigned intcpu_val;

unsigned intcpu_mask;

unsigned long__cpu_mm_mmu_flags;//used by head.S

unsigned long__cpu_io_mmu_flags;//used by head.S

unsigned long__cpu_flush;//used by head.S

const char*arch_name;

const char*elf_name;

unsigned intelf_hwcap;

const char*cpu_name;

struct processor*proc;

struct cpu_tlb_fns*tlb;

struct cpu_user_fns*user;

struct cpu_cache_fns*cache;

};

arch/arm/mm/proc-arm920.S中定义:

.section ".proc.info.init", #alloc, #execinstr

.type__arm920_proc_info,#object

__arm920_proc_info:

.long0x41009200    cpu_val        s3c2410 s3c2440的CPUID均为0x41129200

.long0xff00fff0        cpu_mask

。。。。。。

__arch_info_begin = .;

*(.arch.info.init)

__arch_info_end = .;

include/asm-arm/mach/arch.h中定义:

#define MACHINE_START(_type,_name)\

static const struct machine_desc __mach_desc_##_type\

__used\

__attribute__((__section__(".arch.info.init"))) = {\

.nr= MACH_TYPE_##_type,\

.name= _name,

#define MACHINE_END\

arch/arm/mach-s3c2410/mach-smdk2410中定义:

MACHINE_START(SMDK2410, "SMDK2410") // @TODO: request a new identifier and switch * to SMDK2410

//Maintainer: Jonas Dietsche

.phys_io= S3C2410_PA_UART,

.io_pg_offst= (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,

.boot_params= S3C2410_SDRAM_PA + 0x100,

.map_io= smdk2410_map_io,

.init_irq= s3c24xx_init_irq,

.init_machine= smdk2410_init,

.timer= &s3c24xx_timer,

};

对宏进行展开:

#define MACHINE_START(SMDK2410,"SMDK2410")\

static const struct machine_desc __mach_desc_SMDK2410\

__used\

__attribute__((__section__(".arch.info.init"))) = {\        强制设置该段属性.arch.info.init

.nr= MACH_TYPE_SMDK2410,\       MACH_TYPE_SMDK2410在为193,在u-boot中也有定义

.name= "SMDK2410",

.phys_io= S3C2410_PA_UART,

.io_pg_offst= (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,

.boot_params= S3C2410_SDRAM_PA + 0x100,

.map_io= smdk2410_map_io,

.init_irq= s3c24xx_init_irq,

.init_machine= smdk2410_init,

.timer= &s3c24xx_timer,

};

*/

/*

* Lookup machine architecture in the linker-build list of architectures.

* Note that we can't use the absolute addresses for the __arch_info

* lists since we aren't running with the MMU on (and therefore, we are

* not in the correct address space).  We have to calculate the offset.

*

*  r1 = machine architecture number

* Returns:

*  r3, r4, r6 corrupted

*  r5 = mach_info pointer in physical address space

*/

__lookup_machine_type:

adrr3, 3b                           /* r3=3b的地址,就是上面的3的地址,是物理地址 */

ldmiar3, {r4, r5, r6}      /*r4=.  ,代表3这个标号的虚拟地址 r5=__arch_info_begin  r6=__arch_info_end,这两个地址标号都

是在连接脚本中定义的  */

subr3, r3, r4@ get offset between virt&phys   /* 得到虚拟地址与物理地址的偏差 */

addr5, r5, r3@ convert virt addresses to       /* r5=__arch_info_begin 的物理地址  */

addr6, r6, r3@ physical address space             /*  r6=__arch_info_end 的物理地址 */

1:ldrr3, [r5, #MACHINFO_TYPE]@ get machine type             /* MACHINFO_TYPE=0即获取machine_desc 结构体成员.nr= MACH_TYPE_SMDK2410(在mach-types.h中定义), */

teqr3, r1@ matches loader number?         /* r1就是bootloader传进来的ID=362 */

beq2f@ found     /* 如果 r1就是bootloader传进来的ID=362 = nr= MACH_TYPE_SMDK2410则返回*/

addr5, r5, #SIZEOF_MACHINE_DESC@ next machine_desc   /*r5指向下一个结构体  */

cmpr5, r6     /*是否比较完了所有的machine_desc 结构体 */

blo1b          /* 没有则继续比较 */

movr5, #0   @ unknown machine    /* 比较完毕,但没有匹配的machine_desc 结构,则r5=0 */

2:movpc, lr         /* 返回 */

ENDPROC(__lookup_machine_type)

/*

* This provides a C-API version of the above function.

*/

ENTRY(lookup_machine_type)

stmfdsp!, {r4 - r6, lr}

movr1, r0

bl__lookup_machine_type

movr0, r5

ldmfdsp!, {r4 - r6, pc}

ENDPROC(lookup_machine_type)

/* Determine validity of the r2 atags pointer.  The heuristic requires

* that the pointer be aligned, in the first 16k of physical RAM and

* that the ATAG_CORE marker is first and present.  Future revisions

* of this function may be more lenient with the physical address and

* may also be able to move the ATAGS block if necessary.

*

* r8  = machinfo

*

* Returns:

*  r2 either valid atags pointer, or zero

*  r5, r6 corrupted

*/

__vet_atags:

tstr2, #0x3@ aligned?

bne1f

ldrr5, [r2, #0]@ is first tag ATAG_CORE?

subsr5, r5, #ATAG_CORE_SIZE

bne1f

ldrr5, [r2, #4]

ldrr6, =ATAG_CORE

cmpr5, r6

bne1f

movpc, lr@ atag pointer is ok

1:movr2, #0

movpc, lr

ENDPROC(__vet_atags)

/*****************************************************************************************************************************************/

/* proc-arm920.S */

/*

*  linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920

*

*  Copyright (C) 1999,2000 ARM Limited

*  Copyright (C) 2000 Deep Blue Solutions Ltd.

*  hacked for non-paged-MM by Hyok S. Choi, 2003.

*

* This program is free software; you can redistribute it and/or modify

* it under the terms of the GNU General Public License as published by

* the Free Software Foundation; either version 2 of the License, or

* (at your option) any later version.

*

* This program is distributed in the hope that it will be useful,

* but WITHOUT ANY WARRANTY; without even the implied warranty of

* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

* GNU General Public License for more details.

*

* You should have received a copy of the GNU General Public License

* along with this program; if not, write to the Free Software

* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA

*

*

* These are the low level assembler for performing cache and TLB

* functions on the arm920.

*

*  CONFIG_CPU_ARM920_CPU_IDLE -> nohlt

*/

#include #include #include #include #include #include #include #include #include "proc-macros.S"

/*

* The size of one data cache line.

*/

#define CACHE_DLINESIZE32

/*

* The number of data cache segments.

*/

#define CACHE_DSEGMENTS8

/*

* The number of lines in a cache segment.

*/

#define CACHE_DENTRIES64

/*

* This is the size at which it becomes more efficient to

* clean the whole cache, rather than using the individual

* cache line maintainence instructions.

*/

#define CACHE_DLIMIT65536

.text

/*

* cpu_arm920_proc_init()

*/

ENTRY(cpu_arm920_proc_init)

movpc, lr

/*

* cpu_arm920_proc_fin()

*/

ENTRY(cpu_arm920_proc_fin)

stmfdsp!, {lr}

movip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE

msrcpsr_c, ip

#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH

blarm920_flush_kern_cache_all

#else

blv4wt_flush_kern_cache_all

#endif

mrcp15, 0, r0, c1, c0, 0@ ctrl register

bicr0, r0, #0x1000@ ...i............

bicr0, r0, #0x000e@ ............wca.

mcrp15, 0, r0, c1, c0, 0@ disable caches

ldmfdsp!, {pc}

/*

* cpu_arm920_reset(loc)

*

* Perform a soft reset of the system.  Put the CPU into the

* same state as it would be if it had been reset, and branch

* to what would be the reset vector.

*

* loc: location to jump to for soft reset

*/

.align5

ENTRY(cpu_arm920_reset)

movip, #0

mcrp15, 0, ip, c7, c7, 0@ invalidate I,D caches

mcrp15, 0, ip, c7, c10, 4@ drain WB

#ifdef CONFIG_MMU

mcrp15, 0, ip, c8, c7, 0@ invalidate I & D TLBs

#endif

mrcp15, 0, ip, c1, c0, 0@ ctrl register

bicip, ip, #0x000f@ ............wcam

bicip, ip, #0x1100@ ...i...s........

mcrp15, 0, ip, c1, c0, 0@ ctrl register

movpc, r0

/*

* cpu_arm920_do_idle()

*/

.align5

ENTRY(cpu_arm920_do_idle)

mcrp15, 0, r0, c7, c0, 4@ Wait for interrupt

movpc, lr

#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH

/*

*flush_user_cache_all()

*

*Invalidate all cache entries in a particular address

*space.

*/

ENTRY(arm920_flush_user_cache_all)

/* FALLTHROUGH */

/*

*flush_kern_cache_all()

*

*Clean and invalidate the entire cache.

*/

ENTRY(arm920_flush_kern_cache_all)

movr2, #VM_EXEC

movip, #0

__flush_whole_cache:

movr1, #(CACHE_DSEGMENTS - 1) << 5@ 8 segments

1:orrr3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries

2:mcrp15, 0, r3, c7, c14, 2@ clean+invalidate D index

subsr3, r3, #1 << 26

bcs2b@ entries 63 to 0

subsr1, r1, #1 << 5

bcs1b@ segments 7 to 0

tstr2, #VM_EXEC

mcrnep15, 0, ip, c7, c5, 0@ invalidate I cache

mcrnep15, 0, ip, c7, c10, 4@ drain WB

movpc, lr

/*

*flush_user_cache_range(start, end, flags)

*

*Invalidate a range of cache entries in the specified

*address space.

*

*- start- start address (inclusive)

*- end- end address (exclusive)

*- flags- vm_flags for address space

*/

ENTRY(arm920_flush_user_cache_range)

movip, #0

subr3, r1, r0@ calculate total size

cmpr3, #CACHE_DLIMIT

bhs__flush_whole_cache

1:mcrp15, 0, r0, c7, c14, 1@ clean+invalidate D entry

tstr2, #VM_EXEC

mcrnep15, 0, r0, c7, c5, 1@ invalidate I entry

addr0, r0, #CACHE_DLINESIZE

cmpr0, r1

blo1b

tstr2, #VM_EXEC

mcrnep15, 0, ip, c7, c10, 4@ drain WB

movpc, lr

/*

*coherent_kern_range(start, end)

*

*Ensure coherency between the Icache and the Dcache in the

*region described by start, end.  If you have non-snooping

*Harvard caches, you need to implement this function.

*

*- start- virtual start address

*- end- virtual end address

*/

ENTRY(arm920_coherent_kern_range)

/* FALLTHROUGH */

/*

*coherent_user_range(start, end)

*

*Ensure coherency between the Icache and the Dcache in the

*region described by start, end.  If you have non-snooping

*Harvard caches, you need to implement this function.

*

*- start- virtual start address

*- end- virtual end address

*/

ENTRY(arm920_coherent_user_range)

bicr0, r0, #CACHE_DLINESIZE - 1

1:mcrp15, 0, r0, c7, c10, 1@ clean D entry

mcrp15, 0, r0, c7, c5, 1@ invalidate I entry

addr0, r0, #CACHE_DLINESIZE

cmpr0, r1

blo1b

mcrp15, 0, r0, c7, c10, 4@ drain WB

movpc, lr

/*

*flush_kern_dcache_page(void *page)

*

*Ensure no D cache aliasing occurs, either with itself or

*the I cache

*

*- addr- page aligned address

*/

ENTRY(arm920_flush_kern_dcache_page)

addr1, r0, #PAGE_SZ

1:mcrp15, 0, r0, c7, c14, 1@ clean+invalidate D entry

addr0, r0, #CACHE_DLINESIZE

cmpr0, r1

blo1b

movr0, #0

mcrp15, 0, r0, c7, c5, 0@ invalidate I cache

mcrp15, 0, r0, c7, c10, 4@ drain WB

movpc, lr

/*

*dma_inv_range(start, end)

*

*Invalidate (discard) the specified virtual address range.

*May not write back any entries.  If 'start' or 'end'

*are not cache line aligned, those lines must be written

*back.

*

*- start- virtual start address

*- end- virtual end address

*

* (same as v4wb)

*/

ENTRY(arm920_dma_inv_range)

tstr0, #CACHE_DLINESIZE - 1

bicr0, r0, #CACHE_DLINESIZE - 1

mcrnep15, 0, r0, c7, c10, 1@ clean D entry

tstr1, #CACHE_DLINESIZE - 1

mcrnep15, 0, r1, c7, c10, 1@ clean D entry

1:mcrp15, 0, r0, c7, c6, 1@ invalidate D entry

addr0, r0, #CACHE_DLINESIZE

cmpr0, r1

blo1b

mcrp15, 0, r0, c7, c10, 4@ drain WB

movpc, lr

/*

*dma_clean_range(start, end)

*

*Clean the specified virtual address range.

*

*- start- virtual start address

*- end- virtual end address

*

* (same as v4wb)

*/

ENTRY(arm920_dma_clean_range)

bicr0, r0, #CACHE_DLINESIZE - 1

1:mcrp15, 0, r0, c7, c10, 1@ clean D entry

addr0, r0, #CACHE_DLINESIZE

cmpr0, r1

blo1b

mcrp15, 0, r0, c7, c10, 4@ drain WB

movpc, lr

/*

*dma_flush_range(start, end)

*

*Clean and invalidate the specified virtual address range.

*

*- start- virtual start address

*- end- virtual end address

*/

ENTRY(arm920_dma_flush_range)

bicr0, r0, #CACHE_DLINESIZE - 1

1:mcrp15, 0, r0, c7, c14, 1@ clean+invalidate D entry

addr0, r0, #CACHE_DLINESIZE

cmpr0, r1

blo1b

mcrp15, 0, r0, c7, c10, 4@ drain WB

movpc, lr

ENTRY(arm920_cache_fns)

.longarm920_flush_kern_cache_all

.longarm920_flush_user_cache_all

.longarm920_flush_user_cache_range

.longarm920_coherent_kern_range

.longarm920_coherent_user_range

.longarm920_flush_kern_dcache_page

.longarm920_dma_inv_range

.longarm920_dma_clean_range

.longarm920_dma_flush_range

#endif

ENTRY(cpu_arm920_dcache_clean_area)

1:mcrp15, 0, r0, c7, c10, 1@ clean D entry

addr0, r0, #CACHE_DLINESIZE

subsr1, r1, #CACHE_DLINESIZE

bhi1b

movpc, lr

/* =============================== PageTable ============================== */

/*

* cpu_arm920_switch_mm(pgd)

*

* Set the translation base pointer to be as described by pgd.

*

* pgd: new page tables

*/

.align5

ENTRY(cpu_arm920_switch_mm)

#ifdef CONFIG_MMU

movip, #0

#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH

mcrp15, 0, ip, c7, c6, 0@ invalidate D cache

#else

@ && 'Clean & Invalidate whole DCache'

@ && Re-written to use Index Ops.

@ && Uses registers r1, r3 and ip

movr1, #(CACHE_DSEGMENTS - 1) << 5@ 8 segments

1:orrr3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries

2:mcrp15, 0, r3, c7, c14, 2@ clean & invalidate D index

subsr3, r3, #1 << 26

bcs2b@ entries 63 to 0

subsr1, r1, #1 << 5

bcs1b@ segments 7 to 0

#endif

mcrp15, 0, ip, c7, c5, 0@ invalidate I cache

mcrp15, 0, ip, c7, c10, 4@ drain WB

mcrp15, 0, r0, c2, c0, 0@ load page table pointer

mcrp15, 0, ip, c8, c7, 0@ invalidate I & D TLBs

#endif

movpc, lr

/*

* cpu_arm920_set_pte(ptep, pte, ext)

*

* Set a PTE and flush it out

*/

.align5

ENTRY(cpu_arm920_set_pte_ext)

#ifdef CONFIG_MMU

armv3_set_pte_ext

movr0, r0

mcrp15, 0, r0, c7, c10, 1@ clean D entry

mcrp15, 0, r0, c7, c10, 4@ drain WB

#endif

movpc, lr

__INIT

.type__arm920_setup, #function

__arm920_setup:

movr0, #0

mcrp15, 0, r0, c7, c7@ invalidate I,D caches on v4   /* 使数据及代码段cache无效*/

mcrp15, 0, r0, c7, c10, 4@ drain write buffer on v4  /* 清空写缓存 */

#ifdef CONFIG_MMU

mcrp15, 0, r0, c8, c7@ invalidate I,D TLBs on v4      /*是TLB整个页表无效  */

#endif

adrr5, arm920_crval

ldmiar5, {r5, r6}                                                        /* r5= 0x00003f3f,r6=0x00003135*/

mrcp15, 0, r0, c1, c0@ get control register v4         /* 读取协处理器cp15的寄存器c1(控制寄存器)的值 */

bicr0, r0, r5                                                                  /* 清除bit[13:8] bit[5:0] */

orrr0, r0, r6                                                                  /* 置位bit[13:12],bit[8],bit[5:3],bit[1] */

movpc, lr

.size__arm920_setup, . - __arm920_setup

/*

*  R

* .RVI ZFRS BLDP WCAM

* ..11 0001 ..11 0101

*

*/

.typearm920_crval, #object

arm920_crval:

crvalclear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130   /* crval被宏定义为.word\clear   .word\mmuset ;

所以arm920_crval定义定义为.word  0x00003f3f   .word  0x00003135*/

__INITDATA

/*

* Purpose : Function pointers used to access above functions - all calls

*     come through these

*/

.typearm920_processor_functions, #object

arm920_processor_functions:

.wordv4t_early_abort

.wordpabort_noifar

.wordcpu_arm920_proc_init

.wordcpu_arm920_proc_fin

.wordcpu_arm920_reset

.word   cpu_arm920_do_idle

.wordcpu_arm920_dcache_clean_area

.wordcpu_arm920_switch_mm

.wordcpu_arm920_set_pte_ext

.sizearm920_processor_functions, . - arm920_processor_functions

.section ".rodata"

.typecpu_arch_name, #object

cpu_arch_name:

.asciz"armv4t"

.sizecpu_arch_name, . - cpu_arch_name

.typecpu_elf_name, #object

cpu_elf_name:

.asciz"v4"

.sizecpu_elf_name, . - cpu_elf_name

.typecpu_arm920_name, #object

cpu_arm920_name:

.asciz"ARM920T"

.sizecpu_arm920_name, . - cpu_arm920_name

.align

.section ".proc.info.init", #alloc, #execinstr    /* 设置段的属性为.proc.info.init,在连接脚本中会将所有有此

段属性的段全部组织在一起*/

.type__arm920_proc_info,#object

__arm920_proc_info:   /* 2440架构的(proc_info_list结构体的定义 */

.long0x41009200     /* cpu_val */

.long0xff00fff0     /* cpu_mask */

.long   PMD_TYPE_SECT | \

PMD_SECT_BUFFERABLE | \

PMD_SECT_CACHEABLE | \

PMD_BIT4 | \

PMD_SECT_AP_WRITE | \

PMD_SECT_AP_READ         /* =0xc1e=1100 0001 1110 ===>设置访问内存的权限--->可读写,写回模式*/

.long   PMD_TYPE_SECT | \

PMD_BIT4 | \

PMD_SECT_AP_WRITE | \

PMD_SECT_AP_READ         /* =0xc12=1100 0001 0010 ====>设置访问内存权限--->可读写,no Dcache,no buffer*/

b__arm920_setup                /*  */

.longcpu_arch_name          /* ="armv4t" */

.longcpu_elf_name             /* ="v4" */

.longHWCAP_SWP | HWCAP_HALF | HWCAP_THUMB   /* =7 */

.longcpu_arm920_name        /* ="ARM920T" */

.longarm920_processor_functions       /*  */

.longv4wbi_tlb_fns         /*  */

.longv4wb_user_fns       /*  */

#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH

.longarm920_cache_fns  /*  */

#else

.longv4wt_cache_fns       /*  */

#endif

.size__arm920_proc_info, . - __arm920_proc_info

/*****************************************************************************************************************************************/

/* domain.h:访问权限定义 */

/*

*  arch/arm/include/asm/domain.h

*

*  Copyright (C) 1999 Russell King.

*

* This program is free software; you can redistribute it and/or modify

* it under the terms of the GNU General Public License version 2 as

* published by the Free Software Foundation.

*/

#ifndef __ASM_PROC_DOMAIN_H

#define __ASM_PROC_DOMAIN_H

/*

* Domain numbers

*

*  DOMAIN_IO     - domain 2 includes all IO only

*  DOMAIN_USER   - domain 1 includes all user memory only

*  DOMAIN_KERNEL - domain 0 includes all kernel memory only

*

* The domain numbering depends on whether we support 36 physical

* address for I/O or not.  Addresses above the 32 bit boundary can

* only be mapped using supersections and supersections can only

* be set for domain 0.  We could just default to DOMAIN_IO as zero,

* but there may be systems with supersection support and no 36-bit

* addressing.  In such cases, we want to map system memory with

* supersections to reduce TLB misses and footprint.

*

* 36-bit addressing and supersections are only available on

* CPUs based on ARMv6+ or the Intel XSC3 core.

*/

#ifndef CONFIG_IO_36

#define DOMAIN_KERNEL0

#define DOMAIN_TABLE0

#define DOMAIN_USER1

#define DOMAIN_IO2

#else

#define DOMAIN_KERNEL2   /* 内核空间用的域,使用2号存储域,管理者权限(内部高速SRAM空间、内部mini cache空间、

RAM内存空间、ROM(flash)空间) */

#define DOMAIN_TABLE2   /* 页表空间用的域,使用2号存储域,管理者权限 */

#define DOMAIN_USER1   /* 用户空间用的域,使用1号存储预,客户权限 (低端中断向量空间、高端中断向量空间)*/

#define DOMAIN_IO0          /* I/O空间用的域,使用0号存储域,客户权限 (设备空间)*/

#endif

/*

* Domain types

*/

#define DOMAIN_NOACCESS0  /* 没有访问权限 */

#define DOMAIN_CLIENT1         /* 客户权限:根据CP15的C1控制寄存器中的R和S位以及以及页表中地址变换条目中的访

问控制位AP来确定是否允许各种系统工作模式的存储访问*/

#define DOMAIN_MANAGER3 /* 管理者权限: 不考虑CP15的C1控制寄存器中的R和S位以及一级页表中地址变换条目中的访问

控制位AP*/

#define domain_val(dom,type)((type) << (2*(dom)))

#ifndef __ASSEMBLY__

#ifdef CONFIG_MMU

#define set_domain(x)\

do {\

__asm__ __volatile__(\

"mcrp15, 0, %0, c3, c0@ set domain"\

: : "r" (x));\

isb();\

} while (0)

#define modify_domain(dom,type)\

do {\

struct thread_info *thread = current_thread_info();\

unsigned int domain = thread->cpu_domain;\

domain &= ~domain_val(dom, DOMAIN_MANAGER);\

thread->cpu_domain = domain | domain_val(dom, type);\

set_domain(thread->cpu_domain);\

} while (0)

#else

#define set_domain(x)do { } while (0)

#define modify_domain(dom,type)do { } while (0)

#endif

#endif

#endif /* !__ASSEMBLY__ */

/*****************************************************************************************************************************************/

/* page.h */

/*

*  arch/arm/include/asm/page.h

*

*  Copyright (C) 1995-2003 Russell King

*

* This program is free software; you can redistribute it and/or modify

* it under the terms of the GNU General Public License version 2 as

* published by the Free Software Foundation.

*/

#ifndef _ASMARM_PAGE_H

#define _ASMARM_PAGE_H

/* PAGE_SHIFT determines the page size */

#define PAGE_SHIFT12       /* linux支持4K大小的页,因此其定义为12,它表示一个虚拟地址的页内偏移量的位数 */

#define PAGE_SIZE(1UL << PAGE_SHIFT)  /* 页大小为1<<12=4K( 物理地址被分成离散的单元,称之为页,系统内部许多对内存的操作都是基于单个页的) */

#define PAGE_MASK(~(PAGE_SIZE-1))  /* 页屏蔽码为0xfffff000 */

#ifndef __ASSEMBLY__

#ifndef CONFIG_MMU

#include "page-nommu.h"

#else

#include /*

*User Space Model

*================

*

*This section selects the correct set of functions for dealing with

*page-based copying and clearing for user space for the particular

*processor(s) we're building for.

*

*We have the following to choose from:

*  v3- ARMv3

*  v4wt- ARMv4 with writethrough cache, without minicache

*  v4wb- ARMv4 with writeback cache, without minicache

*  v4_mc- ARMv4 with minicache

*  xscale- Xscale

*  xsc3- XScalev3

*/

#undef _USER

#undef MULTI_USER

#ifdef CONFIG_CPU_COPY_V3

#ifdef _USER

#define MULTI_USER 1

#else

#define _USER v3

#endif

#endif

#ifdef CONFIG_CPU_COPY_V4WT

#ifdef _USER

#define MULTI_USER 1

#else

#define _USER v4wt

#endif

#endif

#ifdef CONFIG_CPU_COPY_V4WB

#ifdef _USER

#define MULTI_USER 1

#else

#define _USER v4wb

#endif

#endif

#ifdef CONFIG_CPU_COPY_FEROCEON

#ifdef _USER

#define MULTI_USER 1

#else

#define _USER feroceon

#endif

#endif

#ifdef CONFIG_CPU_COPY_FA

#ifdef _USER

#define MULTI_USER 1

#else

#define _USER fa

#endif

#endif

#ifdef CONFIG_CPU_SA1100

#ifdef _USER

#define MULTI_USER 1

#else

#define _USER v4_mc

#endif

#endif

#ifdef CONFIG_CPU_XSCALE

#ifdef _USER

#define MULTI_USER 1

#else

#define _USER xscale_mc

#endif

#endif

#ifdef CONFIG_CPU_XSC3

#ifdef _USER

#define MULTI_USER 1

#else

#define _USER xsc3_mc

#endif

#endif

#ifdef CONFIG_CPU_COPY_V6

#define MULTI_USER 1

#endif

#if !defined(_USER) && !defined(MULTI_USER)

#error Unknown user operations model

#endif

struct page;

struct cpu_user_fns {

void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);

void (*cpu_copy_user_highpage)(struct page *to, struct page *from,

unsigned long vaddr);

};

#ifdef MULTI_USER

extern struct cpu_user_fns cpu_user;

#define __cpu_clear_user_highpagecpu_user.cpu_clear_user_highpage

#define __cpu_copy_user_highpagecpu_user.cpu_copy_user_highpage

#else

#define __cpu_clear_user_highpage__glue(_USER,_clear_user_highpage)

#define __cpu_copy_user_highpage__glue(_USER,_copy_user_highpage)

extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);

extern void __cpu_copy_user_highpage(struct page *to, struct page *from,

unsigned long vaddr);

#endif

#define clear_user_highpage(page,vaddr)\

__cpu_clear_user_highpage(page, vaddr)

#define __HAVE_ARCH_COPY_USER_HIGHPAGE

#define copy_user_highpage(to,from,vaddr,vma)\

__cpu_copy_user_highpage(to, from, vaddr)

#define clear_page(page)memset((void *)(page), 0, PAGE_SIZE)

extern void copy_page(void *to, const void *from);

#undef STRICT_MM_TYPECHECKS

#ifdef STRICT_MM_TYPECHECKS

/*

* These are used to make use of C type-checking..

*/

typedef struct { unsigned long pte; } pte_t;  /*二级页表基地址  */

typedef struct { unsigned long pmd; } pmd_t;

typedef struct { unsigned long pgd[2]; } pgd_t;  /* 一级页表基地址,可以看出pgd_t是8字节的,也就是说一个pgd对应两个一级

描述符,就就对应两张二级页表,每张表有两份实现,一份是linux定义的,

一份四硬件定义的*/

typedef struct { unsigned long pgprot; } pgprot_t;

#define pte_val(x)      ((x).pte)

#define pmd_val(x)      ((x).pmd)

#define pgd_val(x)((x).pgd[0])

#define pgprot_val(x)   ((x).pgprot)

#define __pte(x)        ((pte_t) { (x) } )

#define __pmd(x)        ((pmd_t) { (x) } )

#define __pgprot(x)     ((pgprot_t) { (x) } )

#else

/*

* .. while these make it easier on the compiler

*/

typedef unsigned long pte_t;

typedef unsigned long pmd_t;

typedef unsigned long pgd_t[2];

typedef unsigned long pgprot_t;

#define pte_val(x)      (x)

#define pmd_val(x)      (x)

#define pgd_val(x)((x)[0])

#define pgprot_val(x)   (x)

#define __pte(x)        (x)

#define __pmd(x)        (x)

#define __pgprot(x)     (x)

#endif /* STRICT_MM_TYPECHECKS */

#endif /* CONFIG_MMU */

typedef struct page *pgtable_t;

#include #endif /* !__ASSEMBLY__ */

#define VM_DATA_DEFAULT_FLAGS \

(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \

VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#include #endif

/*****************************************************************************************************************************************/

/* pgtable.h */

/*

*  arch/arm/include/asm/pgtable.h

*

*  Copyright (C) 1995-2002 Russell King

*

* This program is free software; you can redistribute it and/or modify

* it under the terms of the GNU General Public License version 2 as

* published by the Free Software Foundation.

*/

#ifndef _ASMARM_PGTABLE_H

#define _ASMARM_PGTABLE_H

#include #include #ifndef CONFIG_MMU

#include "pgtable-nommu.h"

#else

#include #include #include /*

* Just any arbitrary offset to the start of the vmalloc VM area: the

* current 8MB value just means that there will be a 8MB "hole" after the

* physical memory until the kernel virtual memory starts.  That means that

* any out-of-bounds memory accesses will hopefully be caught.

* The vmalloc() routines leaves a hole of 4kB between each vmalloced

* area for the same reason. ;)

*

* Note that platforms may override VMALLOC_START, but they must provide

* VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space,

* which may not overlap IO space.

*/

#ifndef VMALLOC_START

#define VMALLOC_OFFSET(8*1024*1024)  /* vmalloc分配内存的起始地址的的偏移 */

#define VMALLOC_START(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) /* 用于vmalloc分配内存的内存空间起始地址 */

#endif

/*

* Hardware-wise, we have a two level page table structure, where the first

* level has 4096 entries, and the second level has 256 entries.  Each entry

* is one 32-bit word.  Most of the bits in the second level entry are used

* by hardware, and there aren't any "accessed" and "dirty" bits.

*

* Linux on the other hand has a three level page table structure, which can

* be wrapped to fit a two level page table structure easily - using the PGD

* and PTE only.  However, Linux also expects one "PTE" table per page, and

* at least a "dirty" bit.

*

* Therefore, we tweak the implementation slightly - we tell Linux that we

* have 2048 entries in the first level, each of which is 8 bytes (iow, two

* hardware pointers to the second level.)  The second level contains two

* hardware PTE tables arranged contiguously, followed by Linux versions

* which contain the state information Linux needs.  We, therefore, end up

* with 512 entries in the "PTE" level.

*

* This leads to the page tables having the following layout:

*

*    pgd             pte

* |        |

* +--------+ +0

* |        |-----> +------------+ +0

* +- - - - + +4    |  h/w pt 0  |

* |        |-----> +------------+ +1024

* +--------+ +8    |  h/w pt 1  |

* |        |       +------------+ +2048

* +- - - - +       | Linux pt 0 |

* |        |       +------------+ +3072

* +--------+       | Linux pt 1 |

* |        |       +------------+ +4096

*

* See L_PTE_xxx below for definitions of bits in the "Linux pt", and

* PTE_xxx for definitions of bits appearing in the "h/w pt".

*

* PMD_xxx definitions refer to bits in the first level page table.

*

* The "dirty" bit is emulated by only granting hardware write permission

* iff the page is marked "writable" and "dirty" in the Linux PTE.  This

* means that a write to a clean page will cause a permission fault, and

* the Linux MM layer will mark the page dirty via handle_pte_fault().

* For the hardware to notice the permission change, the TLB entry must

* be flushed, and ptep_set_access_flags() does that for us.

*

* The "accessed" or "young" bit is emulated by a similar method; we only

* allow accesses to the page if the "young" bit is set.  Accesses to the

* page will cause a fault, and handle_pte_fault() will set the young bit

* for us as long as the page is marked present in the corresponding Linux

* PTE entry.  Again, ptep_set_access_flags() will ensure that the TLB is

* up to date.

*

* However, when the "young" bit is cleared, we deny access to the page

* by clearing the hardware PTE.  Currently Linux does not flush the TLB

* for us in this case, which means the TLB will retain the transation

* until either the TLB entry is evicted under pressure, or a context

* switch which changes the user space mapping occurs.

*/

#define PTRS_PER_PTE512    /* 表示一个页表中表项的个数 (PTE---page )*/

#define PTRS_PER_PMD1        /* 表示一个页中间目录表中表项的个数(PMD---page middle descriptor) */

#define PTRS_PER_PGD2048  /* 表示一个页全局目录表中表项的个数 (PGD---page descriptor)*/

/*

* PMD_SHIFT determines the size of the area a second-level page table can map

* PGDIR_SHIFT determines what a third-level page table entry can map

*/

#define PMD_SHIFT21

#define PGDIR_SHIFT21

#define LIBRARY_TEXT_START0x0c000000

#ifndef __ASSEMBLY__

extern void __pte_error(const char *file, int line, unsigned long val);

extern void __pmd_error(const char *file, int line, unsigned long val);

extern void __pgd_error(const char *file, int line, unsigned long val);

#define pte_ERROR(pte)__pte_error(__FILE__, __LINE__, pte_val(pte))

#define pmd_ERROR(pmd)__pmd_error(__FILE__, __LINE__, pmd_val(pmd))

#define pgd_ERROR(pgd)__pgd_error(__FILE__, __LINE__, pgd_val(pgd))

#endif /* !__ASSEMBLY__ */

#define PMD_SIZE(1UL << PMD_SHIFT)

#define PMD_MASK(~(PMD_SIZE-1))

#define PGDIR_SIZE(1UL << PGDIR_SHIFT)

#define PGDIR_MASK(~(PGDIR_SIZE-1))

/*

* This is the lowest virtual address we can permit any user space

* mapping to be mapped at.  This is particularly important for

* non-high vector CPUs.

*/

#define FIRST_USER_ADDRESSPAGE_SIZE

#define FIRST_USER_PGD_NR1

#define USER_PTRS_PER_PGD((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)

/*

* section address mask and size definitions.

*/

#define SECTION_SHIFT20

#define SECTION_SIZE(1UL << SECTION_SHIFT)

#define SECTION_MASK(~(SECTION_SIZE-1))

/*

* ARMv6 supersection address mask and size definitions.

*/

#define SUPERSECTION_SHIFT24

#define SUPERSECTION_SIZE(1UL << SUPERSECTION_SHIFT)

#define SUPERSECTION_MASK(~(SUPERSECTION_SIZE-1))

/*

* "Linux" PTE definitions.

*

* We keep two sets of PTEs - the hardware and the linux version.

* This allows greater flexibility in the way we map the Linux bits

* onto the hardware tables, and allows us to have YOUNG and DIRTY

* bits.

*

* The PTE table pointer refers to the hardware entries; the "Linux"

* entries are stored 1024 bytes below.

*/

#define L_PTE_PRESENT(1 << 0)

#define L_PTE_FILE(1 << 1)/* only when !PRESENT */

#define L_PTE_YOUNG(1 << 1)

#define L_PTE_BUFFERABLE(1 << 2)/* obsolete, matches PTE */

#define L_PTE_CACHEABLE(1 << 3)/* obsolete, matches PTE */

#define L_PTE_DIRTY(1 << 6)

#define L_PTE_WRITE(1 << 7)

#define L_PTE_USER(1 << 8)

#define L_PTE_EXEC(1 << 9)

#define L_PTE_SHARED(1 << 10)/* shared(v6), coherent(xsc3) */

/*

* These are the memory types, defined to be compatible with

* pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB

*/

#define L_PTE_MT_UNCACHED(0x00 << 2)/* 0000 */

#define L_PTE_MT_BUFFERABLE(0x01 << 2)/* 0001 */

#define L_PTE_MT_WRITETHROUGH(0x02 << 2)/* 0010 */

#define L_PTE_MT_WRITEBACK(0x03 << 2)/* 0011 */

#define L_PTE_MT_MINICACHE(0x06 << 2)/* 0110 (sa1100, xscale) */

#define L_PTE_MT_WRITEALLOC(0x07 << 2)/* 0111 */

#define L_PTE_MT_DEV_SHARED(0x04 << 2)/* 0100 */

#define L_PTE_MT_DEV_NONSHARED(0x0c << 2)/* 1100 */

#define L_PTE_MT_DEV_WC(0x09 << 2)/* 1001 */

#define L_PTE_MT_DEV_CACHED(0x0b << 2)/* 1011 */

#define L_PTE_MT_MASK(0x0f << 2)

#ifndef __ASSEMBLY__

/*

* The pgprot_* and protection_map entries will be fixed up in runtime

* to include the cachable and bufferable bits based on memory policy,

* as well as any architecture dependent bits like global/ASID and SMP

* shared mapping bits.

*/

#define _L_PTE_DEFAULTL_PTE_PRESENT | L_PTE_YOUNG

extern pgprot_tpgprot_user;

extern pgprot_tpgprot_kernel;

#define _MOD_PROT(p, b)__pgprot(pgprot_val(p) | (b))

#define PAGE_NONEpgprot_user

#define PAGE_SHARED_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)

#define PAGE_SHARED_EXEC_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)

#define PAGE_COPY_MOD_PROT(pgprot_user, L_PTE_USER)

#define PAGE_COPY_EXEC_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)

#define PAGE_READONLY_MOD_PROT(pgprot_user, L_PTE_USER)

#define PAGE_READONLY_EXEC_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)

#define PAGE_KERNELpgprot_kernel

#define PAGE_KERNEL_EXEC_MOD_PROT(pgprot_kernel, L_PTE_EXEC)

#define __PAGE_NONE__pgprot(_L_PTE_DEFAULT)

#define __PAGE_SHARED__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)

#define __PAGE_SHARED_EXEC__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)

#define __PAGE_COPY__pgprot(_L_PTE_DEFAULT | L_PTE_USER)

#define __PAGE_COPY_EXEC__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)

#define __PAGE_READONLY__pgprot(_L_PTE_DEFAULT | L_PTE_USER)

#define __PAGE_READONLY_EXEC__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)

#endif /* __ASSEMBLY__ */

/*

* The table below defines the page protection levels that we insert into our

* Linux page table version.  These get translated into the best that the

* architecture can perform.  Note that on most ARM hardware:

*  1) We cannot do execute protection

*  2) If we could do execute protection, then read is implied

*  3) write implies read permissions

*/

#define __P000  __PAGE_NONE

#define __P001  __PAGE_READONLY

#define __P010  __PAGE_COPY

#define __P011  __PAGE_COPY

#define __P100  __PAGE_READONLY_EXEC

#define __P101  __PAGE_READONLY_EXEC

#define __P110  __PAGE_COPY_EXEC

#define __P111  __PAGE_COPY_EXEC

#define __S000  __PAGE_NONE

#define __S001  __PAGE_READONLY

#define __S010  __PAGE_SHARED

#define __S011  __PAGE_SHARED

#define __S100  __PAGE_READONLY_EXEC

#define __S101  __PAGE_READONLY_EXEC

#define __S110  __PAGE_SHARED_EXEC

#define __S111  __PAGE_SHARED_EXEC

#ifndef __ASSEMBLY__

/*

* ZERO_PAGE is a global shared page that is always zero: used

* for zero-mapped memory areas etc..

*/

extern struct page *empty_zero_page;

#define ZERO_PAGE(vaddr)(empty_zero_page)

#define pte_pfn(pte)(pte_val(pte) >> PAGE_SHIFT)   /* 获取页表项中的页帧号 */

#define pfn_pte(pfn,prot)(__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))  /* 根据页帧号和页面属性,合成页表项 */

#define pte_none(pte)(!pte_val(pte))  /* 判断页表项是否为0 */

#define pte_clear(mm,addr,ptep)set_pte_ext(ptep, __pte(0), 0)  /* 清除页表项的值 */

#define pte_page(pte)(pfn_to_page(pte_pfn(pte)))           /* 从页表项中提取页帧号,并定位该页帧号对应的页框 */

#define pte_offset_kernel(dir,addr)(pmd_page_vaddr(*(dir)) + __pte_index(addr))  /* 在主内核页表中定位内核地址对应的页表项的虚拟地址 */

#define pte_offset_map(dir,addr)(pmd_page_vaddr(*(dir)) + __pte_index(addr))   /* 在进程页表中定位线性地址对应的页表项的地址,

如果页表保存在高端内存中,那么还为页表建立一个临时内核映射 */

#define pte_offset_map_nested(dir,addr)(pmd_page_vaddr(*(dir)) + __pte_index(addr))

#define pte_unmap(pte)do { } while (0)    /* 如果在高端内存中,不解除由 pte_offset_map建立的临时内核映射*/

#define pte_unmap_nested(pte)do { } while (0)

#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)  /* 向一个页表项中写入指定的值 */

#define set_pte_at(mm,addr,ptep,pteval) do { \

set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \

} while (0)

/*

* The following only work if pte_present() is true.

* Undefined behaviour if not..

*/

#define pte_present(pte)(pte_val(pte) & L_PTE_PRESENT)  /* 页表项是否可用,当页在内存中但是不可读写时置此标志,典型的用途是写时复制 */

#define pte_write(pte)(pte_val(pte) & L_PTE_WRITE)  /* 页表项是否有可写标志 */

#define pte_dirty(pte)(pte_val(pte) & L_PTE_DIRTY)          /* 页表项是否为脏 */

#define pte_young(pte)(pte_val(pte) & L_PTE_YOUNG)  /* 页表项是否表示最近没有被访问过 */

#define pte_special(pte)(0)

/*

* The following only works if pte_present() is not true.

*/

#define pte_file(pte)(pte_val(pte) & L_PTE_FILE)

#define pte_to_pgoff(x)(pte_val(x) >> 2)     /* 当页表项映射到文件,并且没有装载进内存时,从页表项中提取文件页号 */

#define pgoff_to_pte(x)__pte(((x) << 2) | L_PTE_FILE)  /* 将页面映射的页号存放到页表项中 */

#define PTE_FILE_MAX_BITS30

#define PTE_BIT_FUNC(fn,op) \

static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }

PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);

PTE_BIT_FUNC(mkwrite,   |= L_PTE_WRITE);

PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);

PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);

PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);

PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);

static inline pte_t pte_mkspecial(pte_t pte) { return pte; }

/*

* Mark the prot value as uncacheable and unbufferable.

*/

#define pgprot_noncached(prot) \

__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)

#define pgprot_writecombine(prot) \

__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)

#define pmd_none(pmd)(!pmd_val(pmd))

#define pmd_present(pmd)(pmd_val(pmd))

#define pmd_bad(pmd)(pmd_val(pmd) & 2)  /* 检查页中间目录项是否指向不可用的页表 */

#define copy_pmd(pmdpd,pmdps)\

do {\

pmdpd[0] = pmdps[0];\

pmdpd[1] = pmdps[1];\

flush_pmd_entry(pmdpd);\

} while (0)

#define pmd_clear(pmdp)\

do {\

pmdp[0] = __pmd(0);\

pmdp[1] = __pmd(0);\

clean_pmd_entry(pmdp);\

} while (0)

static inline pte_t *pmd_page_vaddr(pmd_t pmd)

{

unsigned long ptr;

ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);

ptr += PTRS_PER_PTE * sizeof(void *);

return __va(ptr);

}

#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))  /* 获得页中间目录指向的页表页面 */

/*

* Conversion functions: convert a page and protection to a page entry,

* and a page entry and page directory to the page they refer to.

*/

#define mk_pte(page,prot)pfn_pte(page_to_pfn(page),prot)  /* 根据页框和页面属性,合成页表项 */

/*

* The "pgd_xxx()" functions here are trivial for a folded two-level

* setup: the pgd is never bad, and a pmd always exists (as it's folded

* into the pgd entry)

*/

#define pgd_none(pgd)(0)

#define pgd_bad(pgd)(0)

#define pgd_present(pgd)(1)

#define pgd_clear(pgdp)do { } while (0)

#define set_pgd(pgd,pgdp)do { } while (0)

/* to find an entry in a page-table-directory */

#define pgd_index(addr)((addr) >> PGDIR_SHIFT)   /* 虚拟地址在页全局目录中索引 */

#define pgd_offset(mm, addr)((mm)->pgd+pgd_index(addr))  /* 计算一个进程用户态地址对应的页全局目录项地址*/

/* to find an entry in a kernel page-table-directory */

#define pgd_offset_k(addr)pgd_offset(&init_mm, addr)  /*  计算内核态地址在页全局目录项地址 */

/* Find an entry in the second-level page table.. */

#define pmd_offset(dir, addr)((pmd_t *)(dir))

/* Find an entry in the third-level page table.. */

#define __pte_index(addr)(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))  /* 获得一个线性地址对应的页表项在页表中索引 */

static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)

{

const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;

pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);

return pte;

}

extern pgd_t swapper_pg_dir[PTRS_PER_PGD];  /* 一级页表存放在虚拟地址swapper_pg_dir处,swapper_pg_dir是系统中4GB虚拟内存空间

的中间(一级/段)页表的虚拟基地址 ,必须是16KB对齐,栈16KB空间,虚拟地址

空间中的某个地址对应(一级/段)描述符在swapper_pg_dir中的位置由该虚拟地址所

在段的基址决定的,是固定的(一级页表有4096个描述符,每个描述符表示1M

的物理空间,而PTRS_PER_PGD等于2048(这是因为pgd_t是8个字节的))*/

/* Encode and decode a swap entry.

*

* We support up to 32GB of swap on 4k machines

*/

#define __swp_type(x)(((x).val >> 2) & 0x7f)

#define __swp_offset(x)((x).val >> 9)

#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })

#define __pte_to_swp_entry(pte)((swp_entry_t) { pte_val(pte) })

#define __swp_entry_to_pte(swp)((pte_t) { (swp).val })

/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */

/* FIXME: this is not correct */

#define kern_addr_valid(addr)(1)

#include /*

* We provide our own arch_get_unmapped_area to cope with VIPT caches.

*/

#define HAVE_ARCH_UNMAPPED_AREA

/*

* remap a physical page `pfn' of size `size' with page protection `prot'

* into virtual address `from'

*/

/*为了支持mmap操作,驱动程序需要为它的地址范围建立合适的页表

参数vma是内核根据用户空间传递过来的映射参数即虚拟内存区域,在一定范围内的页将被映射到区域内。

参数addr表示目标用户开始地址

参数pfn为内核物理地址,确切的说应该是虚拟地址应该映射到的物理地址的页面号,实际上就是物理地址

右移PAGE_SHIFT位,在多数请求下,vma结构中的vm_pgoff成员包含了用户需要的值

参数size为映射大小

参数prot为新页所要求的保护属性,驱动程序能够使用vma->vm_page_prot(如果想要把kmalloc申请的内存映射到用户空间,

通常要把相应的内存配置为保留)**/

#define io_remap_pfn_range(vma,from,pfn,size,prot) \

remap_pfn_range(vma, from, pfn, size, prot)

#define pgtable_cache_init() do { } while (0)

#endif /* !__ASSEMBLY__ */

#endif /* CONFIG_MMU */

#endif /* _ASMARM_PGTABLE_H */

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值
>