|
|||
Prex Home / Browse Source - Prex Version: 0.9.0 |
|||
root/bsp/hal/arm/arch/locore.S/* [<][>][^][v][top][bottom][index][help] */DEFINITIONSThis source file includes following definitions.
1 /*- 2 * Copyright (c) 2005-2007, Kohsuke Ohtani 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * locore.S - low level platform support 32 */ 33 34 #include <conf/config.h> 35 #include <machine/asm.h> 36 #include <machine/syspage.h> 37 #include <machine/memory.h> 38 #include <sys/errno.h> 39 #include <context.h> 40 #include <trap.h> 41 #include <cpu.h> 42 43 .section ".text","ax" 44 .code 32 45 46 /* 47 * Kernel start point 48 */ 49 ENTRY(kernel_start) 50 #ifdef CONFIG_MMU 51 b reset_entry /* Relative jump */ 52 #endif 53 vector_start: 54 /* 55 * Exception vector 56 * 57 * This table will be copied to an appropriate location. 58 * (the location is platform specific.) 59 */ 60 ldr pc, reset_target /* 0x00 mode: svc */ 61 ldr pc, undefined_target /* 0x04 mode: ? */ 62 ldr pc, swi_target /* 0x08 mode: svc */ 63 ldr pc, prefetch_target /* 0x0c mode: abort */ 64 ldr pc, abort_target /* 0x10 mode: abort */ 65 nop /* 0x14 reserved */ 66 ldr pc, irq_target /* 0x18 mode: irq */ 67 ldr pc, fiq_target /* 0x1c mode: fiq */ 68 69 reset_target: .word reset_entry 70 undefined_target: .word undefined_entry 71 swi_target: .word syscall_entry 72 prefetch_target: .word prefetch_entry 73 abort_target: .word abort_entry 74 irq_target: .word interrupt_entry 75 fiq_target: .word fiq_entry 76 77 vector_end: 78 79 .global bootinfo 80 bootinfo: .word BOOTINFO 81 boot_stack: .word BOOTSTKTOP 82 int_stack: .word INTSTKTOP - 0x100 83 irq_mode_stack: .word INTSTKTOP 84 sys_mode_stack: .word SYSSTKTOP 85 abort_mode_stack: .word ABTSTKTOP 86 irq_nesting: .word irq_nesting_value 87 curspl: .word curspl_value 88 init_done: .word init_done_value 89 #ifdef CONFIG_MMU 90 reload_pc_target: .word reload_pc 91 #endif 92 93 ENTRY(reset_entry) 94 /* 95 * Setup stack pointer for each processor mode 96 */ 97 mov r0, #(PSR_IRQ_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS) 98 msr cpsr, r0 99 ldr sp, irq_mode_stack /* Set IRQ mode stack */ 100 101 mov r0, #(PSR_UND_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS) 102 msr cpsr, r0 103 ldr sp, abort_mode_stack /* Set Undefined mode stack */ 104 105 mov r0, #(PSR_ABT_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS) 106 msr cpsr, r0 107 ldr sp, abort_mode_stack /* Set Abort mode stack */ 108 109 mov r0, #(PSR_SYS_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS) 110 msr cpsr, r0 111 ldr sp, sys_mode_stack /* Set SYS mode stack */ 112 113 mov r0, #(PSR_SVC_MODE|PSR_FIQ_DIS|PSR_IRQ_DIS) 114 msr cpsr, r0 115 ldr sp, boot_stack /* Set SVC mode stack */ 116 117 /* It's svc mode now. */ 118 119 #ifdef CONFIG_MMU 120 /* 121 * Setup control register 122 */ 123 mov r0, #CTL_DEFAULT 124 mcr p15, 0, r0, c1, c0, 0 125 126 /* 127 * Initialize page table 128 * The physical address 0-4M is mapped on virtual address 2G. 129 */ 130 mov r1, #BOOT_PGD_PHYS /* Clear page directory */ 131 mov r2, #(BOOT_PGD_PHYS + 0x4000) /* +16k */ 132 mov r0, #0 133 1: 134 str r0, [r1], #4 135 teq r1, r2 136 bne 1b 137 138 mov r1, #(BOOT_PGD_PHYS + 0x2000) /* Set PTE0 address in pgd */ 139 mov r0, #BOOT_PTE0_PHYS /* WBUF/CACHE/SYSTEM attribute */ 140 orr r0, r0, #0x03 141 str r0, [r1] 142 143 mov r1, #BOOT_PTE0_PHYS /* Fill boot page table entry */ 144 add r2, r1, #0x1000 145 mov r0, #0x1e 146 1: 147 str r0, [r1], #4 148 add r0, r0, #0x1000 149 teq r1, r2 150 bne 1b 151 152 /* 153 * Enable paging 154 * The physical address 0-4M is temporarily mapped to virtial 155 * address 0-4M. This is needed to enable paging. 156 */ 157 mov r1, #BOOT_PGD_PHYS /* Set PTE0 address in pgd */ 158 mov r0, #BOOT_PTE0_PHYS /* WBUF/CACHE/SYSTEM attribute */ 159 orr r0, r0, #0x03 160 str r0, [r1] 161 162 mov r0, #0 163 mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ 164 mcr p15, 0, r0, c8, c7, 0 /* flush I,D TLBs */ 165 mov r1, #BOOT_PGD_PHYS 166 mcr p15, 0, r1, c2, c0, 0 /* load page table pointer */ 167 mov r0, #-1 168 mcr p15, 0, r0, c3, c0 /* load domain access register */ 169 mrc p15, 0, r0, c1, c0, 0 170 orr r0, r0, #0x1000 /* I-cache enable */ 171 orr r0, r0, #0x003d /* Write buffer, mmu */ 172 mcr p15, 0, r0, c1, c0, 0 173 174 /* 175 * Reload PC register for virutal address. 176 */ 177 ldr pc, reload_pc_target /* Reset pc here */ 178 reload_pc: 179 180 /* 181 * Unmap 0-4M. 182 * Since the first page must be accessible for exception 183 * vector, we have to map it later. 184 */ 185 mov r1, #BOOT_PGD_PHYS /* Set PTE0 address in pgd */ 186 add r1, r1, #KERNBASE 187 mov r0, #0 188 str r0, [r1] 189 mcr p15, 0, r0, c8, c7, 0 /* flush I,D TLBs */ 190 191 #endif /* !CONFIG_MMU */ 192 193 /* 194 * Clear kernel BSS 195 */ 196 ldr r1, =__bss 197 ldr r2, =__end 198 mov r0, #0 199 cmp r1, r2 200 beq 2f 201 1: 202 str r0, [r1], #4 203 cmp r1, r2 204 bls 1b 205 2: 206 207 /* 208 * Initilize spl. 209 */ 210 ldr r1, curspl /* curspl = 15 */ 211 mov r2, #15 212 str r2, [r1] 213 214 /* 215 * Jump to kernel main routine 216 */ 217 b main 218 219 /* 220 * Relocate exception vector 221 * 222 * void vector_copy(vaddr_t dest); 223 */ 224 ENTRY(vector_copy) 225 ldr r1, =vector_start 226 ldr r2, =vector_end 227 1: 228 ldmia r1!, {r3} 229 stmia r0!, {r3} 230 teq r1, r2 231 bne 1b 232 mov pc, lr 233 234 #ifdef CONFIG_CACHE 235 /* 236 * Enable cache 237 */ 238 ENTRY(cache_init) 239 mov pc, lr 240 #endif 241 242 /* 243 * Interrupt entry point 244 */ 245 /* 246 * Memo: GBA BIOS interrupt handler. 247 * 248 * stmfd sp!, {r0-r3,r12,lr} 249 * mov r0, #0x4000000 250 * adr lr, IntRet 251 * ldr pc, [r0,#-4] @ pc = [0x3007ffc] 252 *IntRet: 253 * ldmfd sp!, {r0-r3,r12,lr} 254 * subs pc, lr, #4 255 */ 256 ENTRY(interrupt_entry) 257 #ifdef __gba__ 258 ldmfd sp!, {r0-r3,r12,lr} /* Discard GBA BIOS's stack */ 259 #endif 260 stmfd sp, {r0-r4} /* Save work registers */ 261 sub r4, sp, #(4*5) /* r4: Pointer to saved registers */ 262 mrs r0, spsr /* r0: cpsr */ 263 sub r3, lr, #4 /* r3: original pc */ 264 265 mrs r1, cpsr /* Set processor to SVC mode */ 266 bic r1, r1, #PSR_MODE 267 orr r1, r1, #PSR_SVC_MODE 268 msr cpsr_c, r1 269 270 mov r1, sp /* r1: svc_sp */ 271 mov r2, lr /* r2: svc_lr */ 272 stmfd sp!, {r0-r3} /* Push cpsr, svc_sp, svc_lr, pc */ 273 ldmfd r4, {r0-r4} /* Restore work registers */ 274 sub sp, sp, #(4*15) 275 stmia sp, {r0-r14}^ /* Push r0-r14 */ 276 nop /* Instruction gap for stm^ */ 277 278 ldr r4, irq_nesting /* Increment IRQ nesting level */ 279 ldr r5, [r4] /* r5: Previous nesting level */ 280 add r0, r5, #1 281 str r0, [r4] 282 283 mov r7, sp /* Save stack */ 284 ldr r3, int_stack /* Adjust stack for IRQ */ 285 cmp r5, #0 /* Outermost interrupt? */ 286 moveq sp, r3 /* If outermost, switch stack */ 287 bleq sched_lock /* If outermost, lock scheduler */ 288 bl interrupt_handler /* Call main interrupt handler */ 289 290 mov sp, r7 /* Restore stack */ 291 str r5, [r4] /* Restore IRQ nesting level */ 292 cmp r5, #0 /* Outermost interrupt? */ 293 bne interrupt_ret 294 bl sched_unlock /* Try to preempt */ 295 296 ldr r0, [sp, #REG_CPSR] /* Get previous mode */ 297 and r0, r0, #PSR_MODE 298 cmp r0, #PSR_APP_MODE /* Return to application mode? */ 299 bne interrupt_ret 300 301 mrs r5, cpsr /* Enable IRQ */ 302 bic r4, r5, #0xc0 303 msr cpsr_c, r4 304 bl exception_deliver /* Check exception */ 305 msr cpsr_c, r5 /* Restore IRQ */ 306 interrupt_ret: 307 mov r0, sp 308 ldr r1, [r0, #REG_CPSR] /* Restore spsr */ 309 msr spsr_all, r1 310 ldmfd r0, {r0-r14}^ /* Restore user mode registers */ 311 nop /* Instruction gap for ldm^ */ 312 add sp, sp, #(4*16) 313 ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ 314 315 /* 316 * System call entry 317 */ 318 .global syscall_ret 319 ENTRY(syscall_entry) 320 #ifdef __gba__ 321 mov r5, lr /* Syscall stub already saved r5 */ 322 mrs r12, cpsr /* Set processor to SVC mode */ 323 bic r12, r12, #PSR_MODE 324 orr r12, r12, #PSR_SVC_MODE 325 msr cpsr_c, r12 326 mov lr, r5 327 #endif 328 sub sp, sp, #CTXREGS /* Adjust stack */ 329 stmia sp, {r0-r14}^ /* Push r0-r14 */ 330 nop /* Instruction gap for stm^ */ 331 mrs r5, spsr /* Push cpsr */ 332 str r5, [sp, #REG_CPSR] 333 add r5, sp, #CTXREGS 334 str r5, [sp, #REG_SVCSP] /* Push svc_sp */ 335 str lr, [sp, #REG_SVCLR] /* Push svc_lr */ 336 str lr, [sp, #REG_PC] /* Push pc */ 337 #ifndef __gba__ 338 ldr r4, [lr, #-4] /* Get SWI number */ 339 bic r4, r4, #0xff000000 340 341 mrs r5, cpsr /* Enable IRQ */ 342 bic r5, r5, #0xc0 343 msr cpsr_c, r5 344 #endif 345 346 stmfd sp!, {r4} 347 bl syscall_handler /* System call dispatcher */ 348 ldmfd sp!, {r4} 349 350 cmp r4, #0 /* Skip storing error if exception_return */ 351 strne r0, [sp] /* Set return value to r0 */ 352 bl exception_deliver /* Check exception */ 353 syscall_ret: 354 mrs r5, cpsr /* Disable IRQ */ 355 orr r5, r5, #0xc0 356 msr cpsr_c, r5 357 mov r5, sp 358 ldr r1, [r5, #REG_CPSR] /* Restore spsr */ 359 msr spsr_all, r1 360 ldmfd r5, {r0-r14}^ /* Restore user mode registers */ 361 nop /* Instruction gap for ldm^ */ 362 add sp, sp, #REG_SVCSP 363 ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ 364 365 /* 366 * Undefined instruction 367 */ 368 ENTRY(undefined_entry) 369 sub sp, sp, #CTXREGS /* Adjust stack */ 370 stmia sp, {r0-r14}^ /* Push r0-r14 */ 371 nop /* Instruction gap for stm^ */ 372 mov r0, #TRAP_UNDEFINED 373 b trap_common 374 375 /* 376 * Prefetch abort 377 */ 378 ENTRY(prefetch_entry) 379 sub lr, lr, #8 /* Adjust the lr */ 380 sub sp, sp, #CTXREGS /* Adjust stack */ 381 stmia sp, {r0-r14}^ /* Push r0-r14 */ 382 nop /* Instruction gap for stm^ */ 383 mov r0, #TRAP_PREFETCH_ABORT 384 b trap_common 385 386 /* 387 * Data abort 388 */ 389 ENTRY(abort_entry) 390 sub lr, lr, #4 /* Adjust the lr */ 391 sub sp, sp, #CTXREGS /* Adjust stack */ 392 stmia sp, {r0-r14}^ /* Push r0-r14 */ 393 nop /* Instruction gap for stm^ */ 394 mov r0, #TRAP_DATA_ABORT 395 b trap_common 396 397 /* 398 * Common entry for all traps 399 * r0 - trap type 400 */ 401 ENTRY(trap_common) 402 add r5, sp, #CTXREGS 403 str r5, [sp, #REG_SVCSP] /* Push svc_sp */ 404 str lr, [sp, #REG_PC] /* Push pc */ 405 mrs r5, spsr /* Push cpsr */ 406 str r5, [sp, #REG_CPSR] 407 408 str r0, [sp, #REG_R0] /* Set trap type to R0 */ 409 mov r0, sp 410 bl trap_handler 411 412 mov r5, sp 413 ldr r1, [r5, #REG_CPSR] /* Restore cpsr */ 414 msr spsr_all, r1 415 ldr lr, [r5, #REG_PC] /* Restore pc (lr) */ 416 ldr sp, [r5, #REG_SVCSP] /* Restore svc_sp */ 417 ldmfd r5, {r0-r14}^ /* Restore user mode registers */ 418 nop /* Instruction gap for ldm^ */ 419 movs pc, lr /* Exit, with restoring cpsr */ 420 421 ENTRY(fiq_entry) 422 b fiq_entry /* Not support... */ 423 424 /* 425 * Switch register context. 426 * r0 = previous kern_regs, r1 = next kern_regs 427 * Interrupts must be disabled by caller. 428 * 429 * syntax - void cpu_switch(kern_regs *prev, kern_regs *next) 430 * 431 * Note: GCC uses r0-r3,r12 as scratch registers 432 */ 433 ENTRY(cpu_switch) 434 stmia r0, {r4-r11, sp, lr} /* Save previous register context */ 435 ldmia r1, {r4-r11, sp, pc} /* Restore next register context */ 436 437 /* 438 * Entry point for kernel thread 439 */ 440 ENTRY(kernel_thread_entry) 441 mov r0, r5 /* Set argument */ 442 mov pc, r4 /* Jump to kernel thread */ 443 1: 444 b 1b 445 446 447 /* 448 * Copy data from user to kernel space. 449 * Returns 0 on success, or EFAULT on page fault. 450 * 451 * syntax - int copyin(const void *uaddr, void *kaddr, size_t len) 452 */ 453 .global known_fault1 454 ENTRY(copyin) 455 mov r12, sp 456 stmdb sp!, {r4, r11, r12, lr, pc} 457 sub r11, r12, #4 458 cmp r0, #(USERLIMIT) 459 bhi copy_fault 460 mov r12, #0 461 b 2f 462 1: 463 ldrb r3, [r12, r0] 464 known_fault1: /* May be fault here */ 465 strb r3, [r12, r1] 466 add r12, r12, #1 467 2: 468 subs r2, r2, #1 469 bcs 1b 470 mov r0, #0 /* Set no error */ 471 ldmia sp, {r4, r11, sp, pc} 472 473 /* 474 * Copy data to user from kernel space. 475 * Returns 0 on success, or EFAULT on page fault. 476 * 477 * syntax - int copyout(const void *kaddr, void *uaddr, size_t len) 478 */ 479 .global known_fault2 480 ENTRY(copyout) 481 mov r12, sp 482 stmdb sp!, {r4, r11, r12, lr, pc} 483 sub r11, r12, #4 484 cmp r1, #(USERLIMIT) 485 bhi copy_fault 486 mov r12, #0 487 b 2f 488 1: 489 ldrb r3, [r12, r0] 490 known_fault2: /* May be fault here */ 491 strb r3, [r12, r1] 492 add r12, r12, #1 493 2: 494 subs r2, r2, #1 495 bcs 1b 496 mov r0, #0 /* Set no error */ 497 ldmia sp, {r4, r11, sp, pc} 498 499 /* 500 * copyinstr - Copy string from user space. 501 * Returns 0 on success, or EFAULT on page fault, or ENAMETOOLONG. 502 * 503 * syntax - int copyinstr(const char *uaddr, void *kaddr, size_t len); 504 */ 505 .global known_fault3 506 ENTRY(copyinstr) 507 mov r12, sp 508 stmdb sp!, {r4, r11, r12, lr, pc} 509 sub r11, r12, #4 510 cmp r0, #(USERLIMIT) 511 bhi copy_fault 512 mov r12, #0 513 b 2f 514 1: 515 ldrb r3, [r12, r0] 516 known_fault3: /* May be fault here */ 517 strb r3, [r12, r1] 518 cmp r3, #0 519 beq 3f 520 add r12, r12, #1 521 2: 522 subs r2, r2, #1 523 bcs 1b 524 mov r0, #(ENAMETOOLONG) 525 b 4f 526 3: 527 mov r0, #0 /* Set no error */ 528 4: 529 ldmia sp, {r4, r11, sp, pc} 530 531 /* 532 * Fault entry for user access 533 */ 534 ENTRY(copy_fault) 535 mov r0, #(EFAULT) 536 ldmia sp, {r4, r11, sp, pc} 537 538 539 /* 540 * int spl0(void); 541 */ 542 ENTRY(spl0) 543 ldr r1, curspl /* curspl = 0 */ 544 ldr r0, [r1] 545 mov r2, #0 546 str r2, [r1] 547 548 mrs r1, cpsr /* Enable interrupt */ 549 bic r1, r1, #0xc0 550 msr cpsr_c, r1 551 mov pc, lr 552 553 /* 554 * int splhigh(void); 555 */ 556 ENTRY(splhigh) 557 mrs r1, cpsr /* Disable interrupt */ 558 orr r1, r1, #0xc0 559 msr cpsr_c, r1 560 561 ldr r1, curspl /* curspl = 15 */ 562 ldr r0, [r1] 563 mov r2, #15 564 str r2, [r1] 565 mov pc, lr 566 567 /* 568 * void splx(int s); 569 */ 570 ENTRY(splx) 571 mov r3, r0 /* r3: new spl */ 572 ldr r1, curspl 573 ldr r0, [r1] 574 cmp r3, r0 575 moveq pc, lr /* Return if equal */ 576 str r3, [r1] 577 578 cmp r3, #0 579 mrs r1, cpsr 580 bic r1, r1, #0xc0 581 orrne r1, r1, #0xc0 /* Disable interrupt if curspl != 0 */ 582 msr cpsr_c, r1 583 mov pc, lr 584 585 586 /* 587 * void sploff(void); 588 */ 589 ENTRY(sploff) 590 mrs r0, cpsr 591 orr r0, r0, #0xc0 592 msr cpsr_c, r0 593 mov pc, lr 594 595 /* 596 * void splon(void); 597 */ 598 ENTRY(splon) 599 mrs r0, cpsr 600 bic r0, r0, #0xc0 601 msr cpsr_c, r0 602 mov pc, lr 603 604 /* 605 * Interrupt nest counter. 606 * 607 * This counter is incremented in the entry of interrupt handler 608 * to switch the interrupt stack. Since all interrupt handlers 609 * share same one interrupt stack, each handler must pay attention 610 * to the stack overflow. 611 */ 612 .section ".bss" 613 irq_nesting_value: 614 .long 0 615 616 /* 617 * Current spl 618 */ 619 curspl_value: 620 .long 0 621 622 /* 623 * Init flag for debug 624 */ 625 init_done_value: 626 .long 0 627 628 .end /* [<][>][^][v][top][bottom][index][help] */ | |||
Copyright© 2005-2009 Kohsuke Ohtani |