2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Low-level vector interface routines
13 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
14 * it to save wrong values... Be aware!
16 #include <linux/config.h>
19 #include <asm/vfpmacros.h>
20 #include <asm/hardware.h> /* should be moved into entry-macro.S */
21 #include <asm/arch/irqs.h> /* should be moved into entry-macro.S */
22 #include <asm/arch/entry-macro.S>
24 #include "entry-header.S"
27 * Interrupt handling. Preserves r7, r8, r9
30 1: get_irqnr_and_base r0, r6, r5, lr
33 @ routine called with r0 = irq number, r1 = struct pt_regs *
40 * Invalid mode handlers
42 .macro inv_entry, sym, reason
43 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
44 stmia sp, {r0 - lr} @ Save XXX r0 - lr
50 inv_entry abt, BAD_PREFETCH
54 inv_entry abt, BAD_DATA
58 inv_entry irq, BAD_IRQ
62 inv_entry und, BAD_UNDEFINSTR
65 ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0
67 stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0
69 and r2, r6, #31 @ int mode
76 sub sp, sp, #S_FRAME_SIZE
77 stmia sp, {r0 - r12} @ save r0 - r12
79 add r0, sp, #S_FRAME_SIZE
80 ldmia r2, {r2 - r4} @ get pc, cpsr
85 @ We are now ready to fill in the remaining blanks on the stack:
89 @ r2 - lr_<exception>, already fixed up for correct return/restart
90 @ r3 - spsr_<exception>
91 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
101 @ get ready to re-enable interrupts if appropriate
105 biceq r9, r9, #PSR_I_BIT
108 @ Call the processor-specific abort handler:
110 @ r2 - aborted context pc
111 @ r3 - aborted context cpsr
113 @ The abort handler must return the aborted address in r0, and
114 @ the fault status register in r1. r9 must be preserved.
125 @ set desired IRQ state, then call main handler
132 @ IRQs off again before pulling preserved data off the stack
137 @ restore SPSR and restart the instruction
141 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
146 #ifdef CONFIG_PREEMPT
148 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
149 add r7, r8, #1 @ increment it
150 str r7, [tsk, #TI_PREEMPT]
153 #ifdef CONFIG_PREEMPT
154 ldr r0, [tsk, #TI_FLAGS] @ get flags
155 tst r0, #_TIF_NEED_RESCHED
158 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
159 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
161 strne r0, [r0, -r0] @ bug()
163 ldr r0, [sp, #S_PSR] @ irqs are already disabled
165 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
169 #ifdef CONFIG_PREEMPT
171 teq r8, #0 @ was preempt count = 0
172 ldreq r6, .LCirq_stat
174 ldr r0, [r6, #4] @ local_irq_count
175 ldr r1, [r6, #8] @ local_bh_count
178 mov r7, #0 @ preempt_schedule_irq
179 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
180 1: bl preempt_schedule_irq @ irq en/disable is done inside
181 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
182 tst r0, #_TIF_NEED_RESCHED
183 beq preempt_return @ go again
192 @ call emulation code, which returns using r9 if it has emulated
193 @ the instruction, or the more conventional lr if we are to treat
194 @ this as a real undefined instruction
202 mov r0, sp @ struct pt_regs *regs
206 @ IRQs off again before pulling preserved data off the stack
211 @ restore SPSR and restart the instruction
213 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
215 ldmia sp, {r0 - pc}^ @ Restore SVC registers
222 @ re-enable interrupts if appropriate
226 biceq r9, r9, #PSR_I_BIT
230 @ set args, then call main handler
232 @ r0 - address of faulting instruction
233 @ r1 - pointer to registers on stack
235 mov r0, r2 @ address (pc)
237 bl do_PrefetchAbort @ call abort handler
240 @ IRQs off again before pulling preserved data off the stack
245 @ restore SPSR and restart the instruction
249 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
264 #ifdef CONFIG_PREEMPT
272 .macro usr_entry, sym
273 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
274 stmia sp, {r0 - r12} @ save r0 - r12
277 ldmia r7, {r2 - r4} @ Get USR pc, cpsr
279 #if __LINUX_ARM_ARCH__ < 6
280 @ make sure our user space atomic helper is aborted
282 bichs r3, r3, #PSR_Z_BIT
286 @ We are now ready to fill in the remaining blanks on the stack:
288 @ r2 - lr_<exception>, already fixed up for correct return/restart
289 @ r3 - spsr_<exception>
290 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
292 @ Also, separately save sp_usr and lr_usr
298 @ Enable the alignment trap while in kernel mode
300 alignment_trap r7, r0, __temp_\sym
303 @ Clear FP to mark the first stack frame
313 @ Call the processor-specific abort handler:
315 @ r2 - aborted context pc
316 @ r3 - aborted context cpsr
318 @ The abort handler must return the aborted address in r0, and
319 @ the fault status register in r1.
330 @ IRQs on, then call the main handler
334 adr lr, ret_from_exception
342 #ifdef CONFIG_PREEMPT
343 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
344 add r7, r8, #1 @ increment it
345 str r7, [tsk, #TI_PREEMPT]
348 #ifdef CONFIG_PREEMPT
349 ldr r0, [tsk, #TI_PREEMPT]
350 str r8, [tsk, #TI_PREEMPT]
363 tst r3, #PSR_T_BIT @ Thumb mode?
364 bne fpundefinstr @ ignore FP
368 @ fall through to the emulation code, which returns using r9 if
369 @ it has emulated the instruction, or the more conventional lr
370 @ if we are to treat this as a real undefined instruction
375 adr r9, ret_from_exception
378 @ fallthrough to call_fpe
382 * The out of line fixup for the ldrt above.
384 .section .fixup, "ax"
387 .section __ex_table,"a"
392 * Check whether the instruction is a co-processor instruction.
393 * If yes, we need to call the relevant co-processor handler.
395 * Note that we don't do a full check here for the co-processor
396 * instructions; all instructions with bit 27 set are well
397 * defined. The only instructions that should fault are the
398 * co-processor instructions. However, we have to watch out
399 * for the ARM6/ARM7 SWI bug.
401 * Emulators may wish to make use of the following registers:
402 * r0 = instruction opcode.
404 * r10 = this threads thread_info structure.
407 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
408 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
409 and r8, r0, #0x0f000000 @ mask out op-code bits
410 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
413 get_thread_info r10 @ get current thread
414 and r8, r0, #0x00000f00 @ mask out CP number
416 add r6, r10, #TI_USED_CP
417 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
419 @ Test if we need to give access to iWMMXt coprocessors
420 ldr r5, [r10, #TI_FLAGS]
421 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
422 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
423 bcs iwmmxt_task_enable
426 add pc, pc, r8, lsr #6
430 b do_fpe @ CP#1 (FPE)
431 b do_fpe @ CP#2 (FPE)
440 b do_vfp @ CP#10 (VFP)
441 b do_vfp @ CP#11 (VFP)
443 mov pc, lr @ CP#10 (VFP)
444 mov pc, lr @ CP#11 (VFP)
448 mov pc, lr @ CP#14 (Debug)
449 mov pc, lr @ CP#15 (Control)
453 add r10, r10, #TI_FPSTATE @ r10 = workspace
454 ldr pc, [r4] @ Call FP module USR entry point
457 * The FP module is called with these registers set:
460 * r9 = normal "successful" return address
462 * lr = unrecognised FP instruction return address
472 adr lr, ret_from_exception
479 enable_irq @ Enable interrupts
480 mov r0, r2 @ address (pc)
482 bl do_PrefetchAbort @ call abort handler
485 * This is the return code to user mode for abort handlers
487 ENTRY(ret_from_exception)
493 * Register switch for ARMv3 and ARMv4 processors
494 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
495 * previous and next are guaranteed not to be the same.
498 add ip, r1, #TI_CPU_SAVE
499 ldr r3, [r2, #TI_TP_VALUE]
500 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
501 ldr r6, [r2, #TI_CPU_DOMAIN]!
502 #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
506 #if defined(CONFIG_HAS_TLS_REG)
507 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
508 #elif !defined(CONFIG_TLS_REG_EMUL)
510 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
512 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
514 @ Always disable VFP so we can lazily save/restore the old
515 @ state. This occurs in the context of the previous thread.
517 bic r4, r4, #FPEXC_ENABLE
520 #if defined(CONFIG_IWMMXT)
521 bl iwmmxt_task_switch
522 #elif defined(CONFIG_CPU_XSCALE)
523 add r4, r2, #40 @ cpu_context_save->extra
527 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
534 * These are segment of kernel provided user code reachable from user space
535 * at a fixed address in kernel memory. This is used to provide user space
536 * with some operations which require kernel help because of unimplemented
537 * native feature and/or instructions in many ARM CPUs. The idea is for
538 * this code to be executed directly in user mode for best efficiency but
539 * which is too intimate with the kernel counter part to be left to user
540 * libraries. In fact this code might even differ from one CPU to another
541 * depending on the available instruction set and restrictions like on
542 * SMP systems. In other words, the kernel reserves the right to change
543 * this code as needed without warning. Only the entry points and their
544 * results are guaranteed to be stable.
546 * Each segment is 32-byte aligned and will be moved to the top of the high
547 * vector page. New segments (if ever needed) must be added in front of
548 * existing ones. This mechanism should be used only for things that are
549 * really small and justified, and not be abused freely.
551 * User space is expected to implement those things inline when optimizing
552 * for a processor that has the necessary native support, but only if such
553 * resulting binaries are already to be incompatible with earlier ARM
554 * processors due to the use of unsupported instructions other than what
555 * is provided here. In other words don't make binaries unable to run on
556 * earlier processors just for the sake of not using these kernel helpers
557 * if your compiled code is not going to use the new instructions for other
562 .globl __kuser_helper_start
563 __kuser_helper_start:
566 * Reference prototype:
568 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
575 * lr = return address
579 * r0 = returned value (zero or non-zero)
580 * C flag = set if r0 == 0, clear if r0 != 0
586 * Definition and user space usage example:
588 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
589 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
591 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
592 * Return zero if *ptr was changed or non-zero if no exchange happened.
593 * The C flag is also set if *ptr was changed to allow for assembly
594 * optimization in the calling code.
596 * For example, a user space atomic_add implementation could look like this:
598 * #define atomic_add(ptr, val) \
599 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
600 * register unsigned int __result asm("r1"); \
602 * "1: @ atomic_add\n\t" \
603 * "ldr r0, [r2]\n\t" \
604 * "mov r3, #0xffff0fff\n\t" \
605 * "add lr, pc, #4\n\t" \
606 * "add r1, r0, %2\n\t" \
607 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
609 * : "=&r" (__result) \
610 * : "r" (__ptr), "rIL" (val) \
611 * : "r0","r3","ip","lr","cc","memory" ); \
615 __kuser_cmpxchg: @ 0xffff0fc0
617 #if __LINUX_ARM_ARCH__ < 6
619 #ifdef CONFIG_SMP /* sanity check */
620 #error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
624 * Theory of operation:
626 * We set the Z flag before loading oldval. If ever an exception
627 * occurs we can not be sure the loaded value will still be the same
628 * when the exception returns, therefore the user exception handler
629 * will clear the Z flag whenever the interrupted user code was
630 * actually from the kernel address space (see the usr_entry macro).
632 * The post-increment on the str is used to prevent a race with an
633 * exception happening just after the str instruction which would
634 * clear the Z flag although the exchange was done.
636 teq ip, ip @ set Z flag
637 ldr ip, [r2] @ load current val
638 add r3, r2, #1 @ prepare store ptr
639 teqeq ip, r0 @ compare with oldval if still allowed
640 streq r1, [r3, #-1]! @ store newval if still allowed
641 subs r0, r2, r3 @ if r2 == r3 the str occured
657 * Reference prototype:
659 * int __kernel_get_tls(void)
663 * lr = return address
671 * the Z flag might be lost
673 * Definition and user space usage example:
675 * typedef int (__kernel_get_tls_t)(void);
676 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
678 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
680 * This could be used as follows:
682 * #define __kernel_get_tls() \
683 * ({ register unsigned int __val asm("r0"); \
684 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
685 * : "=r" (__val) : : "lr","cc" ); \
689 __kuser_get_tls: @ 0xffff0fe0
691 #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
693 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
698 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
704 .word 0 @ pad up to __kuser_helper_version
708 * Reference declaration:
710 * extern unsigned int __kernel_helper_version;
712 * Definition and user space usage example:
714 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
716 * User space may read this to determine the curent number of helpers
720 __kuser_helper_version: @ 0xffff0ffc
721 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
723 .globl __kuser_helper_end
730 * This code is copied to 0xffff0200 so we can use branches in the
731 * vectors, rather than ldr's. Note that this code must not
732 * exceed 0x300 bytes.
734 * Common stub entry macro:
735 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
737 .macro vector_stub, name, sym, correction=0
743 sub lr, lr, #\correction
745 str lr, [r13] @ save lr_IRQ
747 str lr, [r13, #4] @ save spsr_IRQ
749 @ now branch to the relevant MODE handling routine
752 bic r13, r13, #MODE_MASK
753 orr r13, r13, #SVC_MODE
754 msr spsr_cxsf, r13 @ switch to SVC_32 mode
757 ldr lr, [pc, lr, lsl #2]
758 movs pc, lr @ Changes mode and branches
764 * Interrupt dispatcher
766 vector_stub irq, irq, 4
768 .long __irq_usr @ 0 (USR_26 / USR_32)
769 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
770 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
771 .long __irq_svc @ 3 (SVC_26 / SVC_32)
772 .long __irq_invalid @ 4
773 .long __irq_invalid @ 5
774 .long __irq_invalid @ 6
775 .long __irq_invalid @ 7
776 .long __irq_invalid @ 8
777 .long __irq_invalid @ 9
778 .long __irq_invalid @ a
779 .long __irq_invalid @ b
780 .long __irq_invalid @ c
781 .long __irq_invalid @ d
782 .long __irq_invalid @ e
783 .long __irq_invalid @ f
786 * Data abort dispatcher
787 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
789 vector_stub dabt, abt, 8
791 .long __dabt_usr @ 0 (USR_26 / USR_32)
792 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
793 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
794 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
795 .long __dabt_invalid @ 4
796 .long __dabt_invalid @ 5
797 .long __dabt_invalid @ 6
798 .long __dabt_invalid @ 7
799 .long __dabt_invalid @ 8
800 .long __dabt_invalid @ 9
801 .long __dabt_invalid @ a
802 .long __dabt_invalid @ b
803 .long __dabt_invalid @ c
804 .long __dabt_invalid @ d
805 .long __dabt_invalid @ e
806 .long __dabt_invalid @ f
809 * Prefetch abort dispatcher
810 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
812 vector_stub pabt, abt, 4
814 .long __pabt_usr @ 0 (USR_26 / USR_32)
815 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
816 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
817 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
818 .long __pabt_invalid @ 4
819 .long __pabt_invalid @ 5
820 .long __pabt_invalid @ 6
821 .long __pabt_invalid @ 7
822 .long __pabt_invalid @ 8
823 .long __pabt_invalid @ 9
824 .long __pabt_invalid @ a
825 .long __pabt_invalid @ b
826 .long __pabt_invalid @ c
827 .long __pabt_invalid @ d
828 .long __pabt_invalid @ e
829 .long __pabt_invalid @ f
832 * Undef instr entry dispatcher
833 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
837 .long __und_usr @ 0 (USR_26 / USR_32)
838 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
839 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
840 .long __und_svc @ 3 (SVC_26 / SVC_32)
841 .long __und_invalid @ 4
842 .long __und_invalid @ 5
843 .long __und_invalid @ 6
844 .long __und_invalid @ 7
845 .long __und_invalid @ 8
846 .long __und_invalid @ 9
847 .long __und_invalid @ a
848 .long __und_invalid @ b
849 .long __und_invalid @ c
850 .long __und_invalid @ d
851 .long __und_invalid @ e
852 .long __und_invalid @ f
856 /*=============================================================================
858 *-----------------------------------------------------------------------------
859 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
860 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
861 * Basically to switch modes, we *HAVE* to clobber one register... brain
862 * damage alert! I don't think that we can execute any code in here in any
863 * other mode than FIQ... Ok you can switch to another mode, but you can't
864 * get out of that mode without clobbering one register.
870 /*=============================================================================
871 * Address exception handler
872 *-----------------------------------------------------------------------------
873 * These aren't too critical.
874 * (they're not supposed to happen, and won't happen in 32-bit data mode).
881 * We group all the following data together to optimise
882 * for CPUs with separate I & D caches.
899 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
901 .globl __vectors_start
904 b vector_und + stubs_offset
905 ldr pc, .LCvswi + stubs_offset
906 b vector_pabt + stubs_offset
907 b vector_dabt + stubs_offset
908 b vector_addrexcptn + stubs_offset
909 b vector_irq + stubs_offset
910 b vector_fiq + stubs_offset
918 * Do not reorder these, and do not insert extra data between...
922 .word 0 @ saved lr_irq
923 .word 0 @ saved spsr_irq
926 .word 0 @ Saved lr_und
927 .word 0 @ Saved spsr_und
930 .word 0 @ Saved lr_abt
931 .word 0 @ Saved spsr_abt
935 .globl cr_no_alignment