linux启动armv7,ARMv7 linux的启动过程

VMLINUX

arch/arm/kernel/head.S

59 /*

60 * Kernel startup

entry point.

61 *

---------------------------

62 *

63 * This is normally

called from the decompressor code. The requirements

64 * are: MMU = off,

D-cache = off, I-cache = dont care, r0 = 0,

65 * r1 = machine nr, r2

= atags pointer.

66 *

67 * This code is mostly

position independent, so if you link the kernel at

68 * 0xc0008000, you

call this at __pa(0xc0008000).

69 *

70 * See

linux/arch/arm/tools/mach-types for the complete list of machine

71 * numbers for r1.

72 *

73 * We're trying to

keep crap to a minimum; DO NOT add any machine specific

74 * crap here - that's

what the boot loader (or in extreme, well justified

75 * circumstances,

zImage) is for.

76 */

77 __HEAD

78 ENTRY(stext)

79 setmode

PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode

80

@ and irqs disabled

arch/arm/include/asm/assembler.h

170

#ifdef CONFIG_THUMB2_KERNEL

171

.macro setmode, mode, reg

172

mov \reg, #\mode

173

msr cpsr_c, \reg

174

.endm

175

#else

176

.macro setmode, mode, reg

177

msr cpsr_c, #\mode

178

.endm

179

#endif

81 mrc p15,

0, r9, c0, c0 @ get processor id

82 bl

__lookup_processor_type @ r5=procinfo r9=cpuid

arch/arm/kernel/head-common.S

147

/*

148

* Read processor ID register (CP#15, CR0), and look up in the

linker-built

149

* supported processor list. Note that we can't use the absolute

addresses

150

* for the __proc_info lists since we aren't running with the MMU on

151

* (and therefore, we are not in the correct address space). We have

to

152

* calculate the offset.

153

*

154

* r9 = cpuid

155

* Returns:

156

* r3, r4, r6 corrupted

157

* r5 = proc_info pointer in physical address space

158

* r9 = cpuid (preserved)

159

*/

160

__lookup_processor_type:

161

adr r3, 3f

162

ldmia r3, {r5 - r7}

163

add r3, r3, #8

164

sub r3, r3, r7 @ get offset between

virt&phys

165

add r5, r5, r3 @ convert virt

addresses to

166

add r6, r6, r3 @ physical address

space

167

1: ldmia r5, {r3, r4} @ value, mask

168

and r4, r4, r9 @ mask wanted bits

169

teq r3, r4

170

beq 2f

171

add r5, r5, #PROC_INFO_SZ @

sizeof(proc_info_list)

172

cmp r5, r6

173

blo 1b

174

mov r5, #0 @ unknown processor

175

2: mov pc, lr

176

ENDPROC(__lookup_processor_type)

189

/*

190

* Look in and arch/arm/kernel/arch.[ch] for

191

* more information about the __proc_info and __arch_info structures.

192

*/

193

.align 2

194

3: .long __proc_info_begin

195

.long __proc_info_end

196

4: .long .

arch/arm/kernel/vmlinux.lds.S

38

__proc_info_begin = .;

39

*(.proc.info.init)

40

__proc_info_end = .;

arch/arm/mm/proc-v7.S

327

.section ".proc.info.init", #alloc, #execinstr

328

329

/*

330

* Match any ARMv7 processor core.

331

*/

332

.type __v7_proc_info, #object

333

__v7_proc_info:

334

.long 0x000f0000 @ Required ID value

335

.long 0x000f0000 @ Mask for ID

336

.long PMD_TYPE_SECT | \

337

PMD_SECT_AP_WRITE | \

338

PMD_SECT_AP_READ | \

339

PMD_FLAGS

340

.long PMD_TYPE_SECT | \

341

PMD_SECT_XN | \

342

PMD_SECT_AP_WRITE | \

343

PMD_SECT_AP_READ

344

b __v7_setup

345

.long cpu_arch_name

346

.long cpu_elf_name

347

.long

HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP

348

.long cpu_v7_name

349

.long v7_processor_functions

350

.long v7wbi_tlb_fns

351

.long v6_user_fns

352

.long v7_cache_fns

353

.size __v7_proc_info, . - __v7_proc_info

arch/arm/include/asm/procinfo.h

29

struct proc_info_list {

30

unsigned int cpu_val;

31

unsigned int cpu_mask;

32

unsigned long __cpu_mm_mmu_flags; /* used by

head.S */

33

unsigned long __cpu_io_mmu_flags; /* used by

head.S */

34

unsigned long __cpu_flush; /* used by

head.S */

35

const char *arch_name;

36

const char *elf_name;

37

unsigned int elf_hwcap;

38

const char *cpu_name;

39

struct processor *proc;

40

struct cpu_tlb_fns *tlb;

41

struct cpu_user_fns *user;

42

struct cpu_cache_fns *cache;

43

};

83 movs r10,

r5 @ invalid processor (r5=0)?

84 beq

__error_p @ yes, error 'p'

85 bl

__lookup_machine_type @ r5=machinfo

arch/arm/kernel/head-common.S

196

4: .long .

197

.long __arch_info_begin

198

.long __arch_info_end

arch/arm/kernel/vmlinux.lds.S

41

__arch_info_begin = .;

42

*(.arch.info.init)

43

__arch_info_end = .;

arch/arm/include/asm/mach/arch.h

17

struct machine_desc {

18

/*

19

* Note! The first four elements are used

20

* by assembler code in head.S, head-common.S

21

*/

22

unsigned int nr; /* architecture number

*/

23

unsigned int phys_io; /* start of physical io

*/

24

unsigned int io_pg_offst; /* byte offset for io

25

* page tabe entry

*/

26

27

const char *name; /* architecture name

*/

28

unsigned long boot_params; /* tagged list

*/

29

30

unsigned int video_start; /* start of video RAM

*/

31

unsigned int video_end; /* end of video RAM

*/

32

33

unsigned int reserve_lp0 :1; /* never has lp0

*/

34

unsigned int reserve_lp1 :1; /* never has lp1

*/

35

unsigned int reserve_lp2 :1; /* never has lp2

*/

36

unsigned int soft_reboot :1; /* soft reboot

*/

37

void (*fixup)(struct machine_desc *,

38

struct tag *, char **,

39

struct meminfo *);

40

void (*map_io)(void);/* IO mapping function

*/

41

void (*init_irq)(void);

42

struct sys_timer *timer; /* system tick timer

*/

43

void (*init_machine)(void);

44 };

45

46 /*

47 *

Set of macros to define architecture features. This is built into

48 *

a table by the linker.

49

*/

50

#define MACHINE_START(_type,_name) \

51

static const struct machine_desc __mach_desc_##_type \

52

__used \

53

__attribute__((__section__(".arch.info.init"))) = { \

54

.nr = MACH_TYPE_##_type, \

55

.name = _name,

56

57

#define MACHINE_END

58 };

arch/arm/include/asm/assembler.h

749

MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")

750

.phys_io = 0x48000000,

751

.io_pg_offst = ((0xd8000000) >> 18) & 0xfffc,

752

.boot_params = 0x80000100,

753

.map_io = am3517_evm_map_io,

754

.init_irq = am3517_evm_init_irq,

755

.init_machine = am3517_evm_init,

756

.timer = &omap_timer,

757

MACHINE_END

200

/*

201

* Lookup machine architecture in the linker-build list of

architectures.

202

* Note that we can't use the absolute addresses for the __arch_info

203

* lists since we aren't running with the MMU on (and therefore, we

are

204

* not in the correct address space). We have to calculate the

offset.

205

*

206

* r1 = machine architecture number

207

* Returns:

208

* r3, r4, r6 corrupted

209

* r5 = mach_info pointer in physical address space

210

*/

211

__lookup_machine_type:

212

adr r3, 4b

213

ldmia r3, {r4, r5, r6}

214

sub r3, r3, r4 @ get offset between

virt&phys

215

add r5, r5, r3 @ convert virt

addresses to

216

add r6, r6, r3 @ physical address

space

217

1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type

arch/arm/kernel/asm-offsets.c

99

DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc));

100

DEFINE(MACHINFO_TYPE, offsetof(struct machine_desc, nr));

101

DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name));

218

teq r3, r1 @ matches loader

number?

219

beq 2f @ found

220

add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc

221

cmp r5, r6

222

blo 1b

223

mov r5, #0 @ unknown machine

224

2: mov pc, lr

225

ENDPROC(__lookup_machine_type)

86 movs r8, r5

@ invalid machine (r5=0)?

87 beq

__error_a @ yes, error 'a'

88 bl

__vet_atags

89 bl

__create_page_tables

arch/arm/kernel/head.S

206

/*

207

* Setup the initial page tables. We only setup the barest

208

* amount which are required to get the kernel running, which

209

* generally means mapping in the kernel code.

210

*

211

* r8 = machinfo

212

* r9 = cpuid

213

* r10 = procinfo

214

*

215

* Returns:

216

* r0, r3, r6, r7 corrupted

217

* r4 = physical page table address

218

*/

219

__create_page_tables:

220

pgtbl r4 @ page table address

arch/arm/kernel/head.S

33 /*

34 *

swapper_pg_dir is the virtual address of the initial page table.

35 *

We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we

must

36 *

make sure that KERNEL_RAM_VADDR is correctly set. Currently, we

expect

37 *

the least significant 16 bits to be 0x8000, but we could probably

38 *

relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET +

0x4000.

39

*/

40

#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000

41

#error KERNEL_RAM_VADDR must start at 0xXXXX8000

42

#endif

43

44

.globl swapper_pg_dir

45

.equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000

46

47

.macro pgtbl, rd

48

ldr \rd, =(KERNEL_RAM_PADDR - 0x4000)

49

.endm

29

#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)

30

#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET)

arch/arm/plat-omap/include/plat/memory.h

36 /*

37 *

Physical DRAM offset.

38

*/

39

#if defined(CONFIG_ARCH_OMAP1)

40

#define PHYS_OFFSET UL(0x10000000)

41

#else

42

#define PHYS_OFFSET UL(0x80000000)

43

#endif

arch/arm/Makefile

204 #

The byte offset of the kernel image in RAM from the start of RAM.

205

TEXT_OFFSET := $(textofs-y)

112

textofs-y := 0x00008000

222

/*

223

* Clear the 16K level 1 swapper page table

224

*/

225

mov r0, r4

226

mov r3, #0

227

add r6, r0, #0x4000

228

1: str r3, [r0], #4

229

str r3, [r0], #4

230

str r3, [r0], #4

231

str r3, [r0], #4

232

teq r0, r6

233

bne 1b

234

235

ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags

236

237

/*

238

* Create identity mapping for first MB of kernel to

239

* cater for the MMU enable. This identity mapping

240

* will be removed by paging_init(). We use our current

program

241

* counter to determine corresponding section base address.

242

*/

243

mov r6, pc

244

mov r6, r6, lsr #20 @ start of kernel

section

245

orr r3, r7, r6, lsl #20 @ flags + kernel base

246

str r3, [r4, r6, lsl #2] @ identity mapping

247

248

/*

249

* Now setup the pagetables for our kernel direct

250

* mapped region.

251

*/

252

add r0, r4, #(KERNEL_START & 0xff000000) >> 18

253

str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]!

55

#define KERNEL_START KERNEL_RAM_VADDR

56

#define KERNEL_END _end

254

ldr r6, =(KERNEL_END - 1)

255

add r0, r0, #4

256

add r6, r4, r6, lsr #18

257

1: cmp r0, r6

258

add r3, r3, #1 << 20

259

strls r3, [r0], #4

260

bls 1b

281

/*

282

* Then map first 1MB of ram in case it contains our boot

params.

283

*/

284

add r0, r4, #PAGE_OFFSET >> 18

285

orr r6, r7, #(PHYS_OFFSET & 0xff000000)

286

.if (PHYS_OFFSET & 0x00f00000)

287

orr r6, r6, #(PHYS_OFFSET & 0x00f00000)

288

.endif

289

str r6, [r0]

334

mov pc, lr

335

ENDPROC(__create_page_tables)

91 /*

92 * The

following calls CPU specific code in a position independent

93 * manner.

See arch/arm/mm/proc-*.S for details. r10 = base of

94 *

xxx_proc_info structure selected by __lookup_machine_type

95 * above. On

return, the CPU will be ready for the MMU to be

96 * turned on,

and r0 will hold the CPU control register value.

97 */

98 ldr r13,

__switch_data @ address to jump to after

99

@ mmu has been enabled

100 adr lr,

BSYM(__enable_mmu) @ return (PIC) address

101 ARM( add pc,

r10, #PROCINFO_INITFUNC )

arch/arm/kernel/asm-offsets.c

106

DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list,

__cpu_flush));

102 THUMB( add r12,

r10, #PROCINFO_INITFUNC )

103 THUMB( mov pc,

r12 )

104 ENDPROC(stext)

First, PROCINFO_INITFUNC

will be called. Take ARMV7 as an example.

arch/arm/mm/proc-v7.S

177 /*

178 * __v7_setup

179 *

180 * Initialise

TLB, Caches, and MMU state ready to switch the MMU

181 * on. Return in

r0 the new CP15 C1 control register setting.

182 *

183 * We

automatically detect if we have a Harvard cache, and use the

184 * Harvard cache

control instructions insead of the unified cache

185 * control

instructions.

186 *

187 * This should be

able to cover all ARMv7 cores.

188 *

189 * It is assumed

that:

190 * - cache type

register is implemented

191 */

192 __v7_setup:

193 #ifdef CONFIG_SMP

194 mrc p15,

0, r0, c1, c0, 1

195 tst r0,

#(1 << 6) @ SMP/nAMP mode enabled?

196 orreq r0,

r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and

197 mcreq p15,

0, r0, c1, c0, 1 @ TLB ops broadcasting

198 #endif

199 adr r12,

__v7_setup_stack @ the local stack

300

__v7_setup_stack:

301

.space 4 * 11 @ 11 registers

200 stmia r12,

{r0-r5, r7, r9, r11, lr}

201 bl

v7_flush_dcache_all

arch/arm/mm/cache-v7.S

20

/*

21

* v7_flush_dcache_all()

22

*

23

* Flush the whole D-cache.

24

*

25

* Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)

26

*

27

* - mm - mm_struct describing address space

28

*/

29

ENTRY(v7_flush_dcache_all)

30

dmb @ ensure ordering with

previous memory accesses

31

mrc p15, 1, r0, c0, c0, 1 @ read clidr

The

Cache Level ID Register, which is introduced in ARMv7, identifies the

type of cache, or caches, implemented at each level, up to a maximum

of eight levels, and the Level of Coherency and Level of Unification

for the cache hierarchy. See page B3-92 in armv7_arm.

32

ands r3, r0, #0x7000000 @ extract loc from

clidr

LoC,

bits [26:24], Level of

Coherency for the cache hierarchy, seeClean,

Invalidate, and Clean and Invalidateon

page B2-11 in armv7_arm.

33

mov r3, r3, lsr #23 @ left align loc bit

field

34

beq finished @ if loc is 0, then no

need to clean

35

mov r10, #0 @ start clean at cache

level 0

36

loop1:

37

add r2, r10, r10, lsr #1 @ work out 3x current

cache level

38

mov r1, r0, lsr r2 @ extract cache type

bits from clidr

39

and r1, r1, #7 @ mask of the bits for

current cache only

40

cmp r1, #2 @ see what cache we

have at this level

CtypeX

value | Meaning, cache implemented at this level

000

No cache

001

Instruction cache only

010

Data cache only

011

Separate instruction and data caches

100

Unified cache

101,

11X Reserved

41

blt skip @ skip if no cache, or

just i-cache

42

mcr p15, 2, r10, c0, c0, 0 @ select current cache

level in cssr

Cache

Size Selection Register, CSSELR. See page B3-95 in armv7_arm.

43

isb @ isb to sych the new

cssr&csidr

AnISBinstruction

flushes the pipeline in the processor, so that all instructions that

come after theISBinstruction

in program order are fetched from cache or memory only after theISBinstruction

has completed. Using anISBensures

that the effects of context altering operations executed before theISBare

visible to the instructions fetched after theISBinstruction.

See A3-49 in armv7_arm

44

mrc p15, 1, r1, c0, c0, 0 @ read the new csidr

The

Cache Size ID Registers, CCSIDR, provide information about the

architecture of the caches. See page B3-91 in armv7_arm.

45

and r2, r1, #7 @ extract the length of

the cache lines

LineSize,

bits [2:0],(Log2(Number

of words in cache line)) -2. For example:

• For

a line length of 4 words: Log2(4)

= 2, LineSize entry = 0. This is the minimum line length.

• For

a line length of 8 words: Log2(8)

= 3, LineSize entry = 1.

46

add r2, r2, #4 @ add 4 (line length

offset)

47

ldr r4, =0x3ff

48

ands r4, r4, r1, lsr #3 @ find maximum number

on the way size

Associativity,

bits [12:3],(Associativity

of cache) - 1, therefore a value of 0 indicates an associativity of

1. The associativity does not have to be a power of 2.

49

clz r5, r4 @ find bit position of

way size increment

Count

Leading Zeros returns the number of binary zero bits before the first

binary one bit in a value. See page A8-72 in armv7_arm.

50

ldr r7, =0x7fff

51

ands r7, r7, r1, lsr #13 @ extract max number of

the index size

NumSets,

bits [27:13],(Number

of sets in cache) - 1, therefore a value of 0 indicates 1 set in the

cache. The number of sets does not have to be a power of 2.

52

loop2:

53

mov r9, r4 @ create working copy

of max way size

54

loop3:

55

ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache

number into r11

r10,

the cache level. From 0 to 7.

r9,

Associativity

of cache.

r5,

The number of binary zero bits before the first binary one bit in r9.

56

THUMB( lsl r6, r9, r5 )

57

THUMB( orr r11, r10, r6 ) @ factor way and cache

number into r11

58

ARM( orr r11, r11, r7, lsl r2 ) @ factor index number

into r11

r7,

Number

of sets in cache.

r2,

Log2(Number

of words in cache line)

r11,

Associativity of cache &the

cache level

59

THUMB( lsl r6, r7, r2 )

60

THUMB( orr r11, r11, r6 ) @ factor index number

into r11

61

mcr p15, 0, r11, c7, c14, 2 @ clean &

invalidate by set/way

62

subs r9, r9, #1 @ decrement the way

63

bge loop3

64

subs r7, r7, #1 @ decrement the index

65

bge loop2

66

skip:

67

add r10, r10, #2 @ increment cache

number

68

cmp r3, r10

69

bgt loop1

70

finished:

71

mov r10, #0 @ swith back to cache

level 0

72

mcr p15, 2, r10, c0, c0, 0 @ select current cache

level in cssr

73

dsb

74

isb

75

mov pc, lr

76

ENDPROC(v7_flush_dcache_all)

202 ldmia r12,

{r0-r5, r7, r9, r11, lr}

203

204 mrc p15,

0, r0, c0, c0, 0 @ read main ID register

The

Main ID Register, MIDR, provides identification information for the

processor, including an implementer code for the device and a device

ID number. See page B3-81 in armv7_arm.

205 and r10,

r0, #0xff000000 @ ARM?

206 teq r10,

#0x41000000

Bits

[31:24] ASCII character Implementer

0x41A

ARM Limited

0x44D

Digital Equipment Corporation

0x4DM

Motorola, Freescale Semiconductor Inc.

0x51Q

QUALCOMM Inc.

0x56V

Marvell Semiconductor Inc.

0x69i

Intel Corporation

207 bne 2f

208 and r5,

r0, #0x00f00000 @ variant

Bits

[23:20] Major revision number

209 and r6,

r0, #0x0000000f @ revision

Bits

[3:0] Minor revision number,

210 orr r0,

r6, r5, lsr #20-4 @ combine variant and revision

233 2: mov r10,

#0

234 #ifdef HARVARD_CACHE

235 mcr p15,

0, r10, c7, c5, 0 @ I+BTB cache invalidate

236 #endif

237 dsb

Data

Synchronization Barrier. See page A3-49 in armv7_arm.

238 #ifdef CONFIG_MMU

239 mcr p15,

0, r10, c8, c7, 0 @ invalidate I + D TLBs

240 mcr p15,

0, r10, c2, c0, 2 @ TTB control register

241 orr r4,

r4, #TTB_FLAGS

r4

is page table address.

arch/arm/mm/proc-v7.S

33

#ifndef CONFIG_SMP

34

/* PTWs cacheable, inner WB not shareable, outer WB not shareable */

35

#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB

36

#define PMD_FLAGS PMD_SECT_WB

37

#else

38

/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */

39

#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA

40

#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S

41

#endif

242 mcr p15,

0, r4, c2, c0, 1 @ load TTB1

Translation

Table Base Register 1. See page B3-116 in armv7_arm.

243 mov r10,

#0x1f @ domains 0, 1 = manager

244 mcr p15,

0, r10, c3, c0, 0 @ load domain access register

The

Domain Access Control Register, DACR, defines the access permission

for each of the sixteen memory domains. See page B3-119 in armv7_arm.

245 /*

246 * Memory

region attributes with SCTLR.TRE=1

247 *

248 * n =

TEX[0],C,B

249 * TR =

PRRR[2n+1:2n] - memory type

250 * IR =

NMRR[2n+1:2n] - inner cacheable property

251 * OR =

NMRR[2n+17:2n+16] - outer cacheable property

252 *

253 *

n TR IR OR

254 * UNCACHED

000 00

255 *

BUFFERABLE 001 10 00 00

256 *

WRITETHROUGH 010 10 10 10

257 * WRITEBACK

011 10 11 11

258 * reserved

110

259 *

WRITEALLOC 111 10 01 01

260 *

DEV_SHARED 100 01

261 *

DEV_NONSHARED 100 01

262 * DEV_WC

001 10

263 *

DEV_CACHED 011 10

264 *

265 * Other

attributes:

266 *

267 * DS0 =

PRRR[16] = 0 - device shareable property

268 * DS1 =

PRRR[17] = 1 - device shareable property

269 * NS0 =

PRRR[18] = 0 - normal shareable property

270 * NS1 =

PRRR[19] = 1 - normal shareable property

271 * NOS =

PRRR[24+n] = 1 - not outer shareable

272 */

273 ldr r5,

=0xff0a81a8 @ PRRR

274 ldr r6,

=0x40e040e0 @ NMRR

275 mcr p15,

0, r5, c10, c2, 0 @ write PRRR

276 mcr p15,

0, r6, c10, c2, 1 @ write NMRR

277 #endif

278 adr r5,

v7_crval

290

/* AT

291

* TFR EV X F I D LR S

292

* .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM

293

* rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced

294

* 1 0 110 0011 1100 .111 1101 < we want

295

*/

296

.type v7_crval, #object

297

v7_crval:

298

crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c

279 ldmia r5,

{r5, r6}

280 #ifdef

CONFIG_CPU_ENDIAN_BE8

281 orr r6,

r6, #1 << 25 @ big-endian page tables

282 #endif

EE,

bit [25] Exception Endianness

bit

283 mrc p15,

0, r0, c1, c0, 0 @ read control register

System

Control Register, See page B3-96 in armv7_arm.

284 bic r0,

r0, r5 @ clear bits them

285 orr r0,

r0, r6 @ set them

286 THUMB( orr r0,

r0, #1 << 30 ) @ Thumb exceptions

287 mov pc, lr

@ return to head.S:__ret

288 ENDPROC(__v7_setup)

__enable_mmu was stored in

the register lr.

arch/arm/kernel/head.S

155 /*

156 * Setup common bits

before finally enabling the MMU. Essentially

157 * this is just

loading the page table pointer and domain access

158 * registers.

159 */

160 __enable_mmu:

161 #ifdef

CONFIG_ALIGNMENT_TRAP

162 orr r0,

r0, #CR_A

163 #else

164 bic r0,

r0, #CR_A

165 #endif

166 #ifdef

CONFIG_CPU_DCACHE_DISABLE

167 bic r0,

r0, #CR_C

168 #endif

Cache

enable bit: This is a global enable bit for data and unified caches.

169 #ifdef

CONFIG_CPU_BPREDICT_DISABLE

170 bic r0,

r0, #CR_Z

171 #endif

Branch

prediction enable bit.

172 #ifdef

CONFIG_CPU_ICACHE_DISABLE

173 bic r0,

r0, #CR_I

174 #endif

Instruction

cache enable bit: This is a global enable bit for instruction caches.

175 mov r5,

#(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \

176

domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \

177

domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \

178

domain_val(DOMAIN_IO, DOMAIN_CLIENT))

arch/arm/include/asm/domain.h

13

/*

14

* Domain numbers

15

*

16

* DOMAIN_IO - domain 2 includes all IO only

17

* DOMAIN_USER - domain 1 includes all user memory only

18

* DOMAIN_KERNEL - domain 0 includes all kernel memory only

19

*

20

* The domain numbering depends on whether we support 36 physical

21

* address for I/O or not. Addresses above the 32 bit boundary can

22

* only be mapped using supersections and supersections can only

23

* be set for domain 0. We could just default to DOMAIN_IO as zero,

24

* but there may be systems with supersection support and no 36-bit

25

* addressing. In such cases, we want to map system memory with

26

* supersections to reduce TLB misses and footprint.

27

*

28

* 36-bit addressing and supersections are only available on

29

* CPUs based on ARMv6+ or the Intel XSC3 core.

30

*/

31

#ifndef CONFIG_IO_36

32

#define DOMAIN_KERNEL 0

33

#define DOMAIN_TABLE 0

34

#define DOMAIN_USER 1

35

#define DOMAIN_IO 2

36

#else

37

#define DOMAIN_KERNEL 2

38

#define DOMAIN_TABLE 2

39

#define DOMAIN_USER 1

40

#define DOMAIN_IO 0

41

#endif

42

43

/*

44

* Domain types

45

*/

46

#define DOMAIN_NOACCESS 0

47

#define DOMAIN_CLIENT 1

48

#define DOMAIN_MANAGER 3

49

50

#define domain_val(dom,type) ((type) << (2*(dom)))

179 mcr p15,

0, r5, c3, c0, 0 @ load domain access register

180 mcr p15,

0, r4, c2, c0, 0 @ load page table pointer

Translation

Table Base Register 0. See page B3-113 in armv7_arm.

181 b

__turn_mmu_on

182 ENDPROC(__enable_mmu)

183

184 /*

185 * Enable the MMU.

This completely changes the structure of the visible

186 * memory space. You

will not be able to trace execution through this.

187 * If you have an

enquiry about this, *please* check the linux-arm-kernel

188 * mailing list

archives BEFORE sending another post to the list.

189 *

190 * r0 = cp#15

control register

191 * r13 = *virtual*

address to jump to upon completion

192 *

193 * other registers

depend on the function called upon completion

194 */

195 .align 5

196 __turn_mmu_on:

197 mov r0, r0

198 mcr p15,

0, r0, c1, c0, 0 @ write control reg

199 mrc p15,

0, r3, c0, c0, 0 @ read id reg

200 mov r3, r3

201 mov r3,

r13

202 mov pc, r3

203 ENDPROC(__turn_mmu_on)

__switch_data was stored

in the register r13.

arch/arm/kernel/head-common.S

18 .align 2

19 .type

__switch_data, %object

20 __switch_data:

21 .long

__mmap_switched

22 .long

__data_loc @ r4

23 .long _data

@ r5

24 .long

__bss_start @ r6

25 .long _end

@ r7

26 .long

processor_id @ r4

27 .long

__machine_arch_type @ r5

28 .long

__atags_pointer @ r6

29 .long

cr_alignment @ r7

30 .long

init_thread_union + THREAD_START_SP @ sp

arch/arm/kernel/init_task.c

17

/*

18

* Initial thread structure.

19

*

20

* We need to make sure that this is 8192-byte aligned due to the

21

* way process stacks are handled. This is done by making sure

22

* the linker maps this in the .text segment right after head.S,

23

* and making head.S ensure the proper alignment.

24

*

25

* The things we do for performance..

26

*/

27

union thread_union init_thread_union __init_task_data =

28

{ INIT_THREAD_INFO(init_task) };

We

created the first thread manually!

arch/arm/include/asm/thread_info.h

71

#define INIT_THREAD_INFO(tsk)

\

72 {

\

73

.task = &tsk,

\

74

.exec_domain = &default_exec_domain,

\

75

.flags = 0,

\

76

.preempt_count = INIT_PREEMPT_COUNT,

\

77

.addr_limit = KERNEL_DS,

\

78

.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) |

\

79

domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |

\

80

domain_val(DOMAIN_IO, DOMAIN_CLIENT),

\

81

.restart_block = {

\

82

.fn = do_no_restart_syscall,

\

83

},

\

84 }

include/linux/init_task.h

106

/*

107

* INIT_TASK is used to set up the first task table, touch at

108

* your own risk!. Base=0, limit=0x1fffff (=2MB)

109

*/

110

#define INIT_TASK(tsk) \

111 {

\

112

.state = 0,

\

113

.stack = &init_thread_info,

\

114

.usage = ATOMIC_INIT(2),

\

115

.flags = PF_KTHREAD,

\

116

.lock_depth = -1,

\

117

.prio = MAX_PRIO-20,

\

118

.static_prio = MAX_PRIO-20,

\

119

.normal_prio = MAX_PRIO-20,

\

120

.policy = SCHED_NORMAL,

\

121

.cpus_allowed = CPU_MASK_ALL,

\

122

.mm = NULL,

\

123

.active_mm = &init_mm,

\

124

.se = {

\

125

.group_node = LIST_HEAD_INIT(tsk.se.group_node),

\

126

},

\

127

.rt = {

\

128

.run_list = LIST_HEAD_INIT(tsk.rt.run_list),

\

129

.time_slice = HZ,

\

130

.nr_cpus_allowed = NR_CPUS,

\

131

},

\

132

.tasks = LIST_HEAD_INIT(tsk.tasks),

\

133

.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks,

MAX_PRIO), \

134

.ptraced = LIST_HEAD_INIT(tsk.ptraced),

\

135

.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry),

\

136

.real_parent = &tsk,

\

137

.parent = &tsk,

\

138

.children = LIST_HEAD_INIT(tsk.children),

\

139

.sibling = LIST_HEAD_INIT(tsk.sibling),

\

140

.group_leader = &tsk,

\

141

.real_cred = &init_cred,

\

142

.cred = &init_cred,

\

143

.cred_guard_mutex =

\

144

__MUTEX_INITIALIZER(tsk.cred_guard_mutex),

\

145

.comm = "swapper",

\

146

.thread = INIT_THREAD,

\

147

.fs = &init_fs,

\

148

.files = &init_files,

\

149

.signal = &init_signals,

\

150

.sighand = &init_sighand,

\

151

.nsproxy = &init_nsproxy,

\

152

.pending = {

\

153

.list = LIST_HEAD_INIT(tsk.pending.list),

\

154

.signal = {{0}}},

\

155

.blocked = {{0}},

\

156

.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock),

\

157

.journal_info = NULL,

\

158

.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers),

\

159

.fs_excl = ATOMIC_INIT(0),

\

160

.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),

\

161

.timer_slack_ns = 50000, /* 50 usec default slack */

\

162

.pids = {

\

163

[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID),

\

164

[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),

\

165

[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID),

\

166

},

\

167

.dirties = INIT_PROP_LOCAL_SINGLE(dirties),

\

168

INIT_IDS

\

169

INIT_PERF_EVENTS(tsk)

\

170

INIT_TRACE_IRQFLAGS

\

171

INIT_LOCKDEP

\

172

INIT_FTRACE_GRAPH

\

173

INIT_TRACE_RECURSION

\

174

INIT_TASK_RCU_PREEMPT(tsk)

\

175 }

31

32 /*

33 * The following

fragment of code is executed with the MMU on in MMU mode,

34 * and uses absolute

addresses; this is not position independent.

35 *

36 * r0 = cp#15

control register

37 * r1 = machine ID

38 * r2 = atags

pointer

39 * r9 = processor ID

40 */

41 __mmap_switched:

42 adr r3,

__switch_data + 4

43

44 ldmia r3!,

{r4, r5, r6, r7}

45 cmp r4, r5

@ Copy data segment if needed

46 1: cmpne r5, r6

47 ldrne fp,

[r4], #4

48 strne fp,

[r5], #4

49 bne 1b

50

51 mov fp, #0

@ Clear BSS (and zero fp)

52 1: cmp r6, r7

53 strcc fp,

[r6],#4

54 bcc 1b

55

56 ARM( ldmia r3,

{r4, r5, r6, r7, sp})

We

got the sp. It means we are in the thread 0 now, because the macro

“current” returns task 0.

arch/arm/include/asm/thread_info.h

94

static inline struct thread_info *current_thread_info(void)

95 {

96

register unsigned long sp asm ("sp");

97

return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));

98 }

arch/arm/include/asm/current.h

8

static inline struct task_struct *get_current(void)

9 {

10

return current_thread_info()->task;

11 }

12

13

#define current (get_current())

57 THUMB( ldmia r3,

{r4, r5, r6, r7} )

58 THUMB( ldr sp,

[r3, #16] )

59 str r9,

[r4] @ Save processor ID

60 str r1,

[r5] @ Save machine type

61 str r2,

[r6] @ Save atags pointer

62 bic r4,

r0, #CR_A @ Clear 'A' bit

63 stmia r7,

{r0, r4} @ Save control register values

64 b

start_kernel

65

ENDPROC(__mmap_switched)

Now we enter the C code

finally.

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值