Remove obsolete #include <linux/config.h>
[safe/jmp/linux-2.6] / arch / arm / kernel / entry-common.S
index 65c58b3..6f5e7c5 100644 (file)
@@ -7,10 +7,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/config.h>
 
-#include <asm/thread_info.h>
-#include <asm/ptrace.h>
 #include <asm/unistd.h>
 
 #include "entry-header.S"
@@ -27,7 +24,15 @@ ret_fast_syscall:
        ldr     r1, [tsk, #TI_FLAGS]
        tst     r1, #_TIF_WORK_MASK
        bne     fast_work_pending
-       fast_restore_user_regs
+
+       @ fast_restore_user_regs
+       ldr     r1, [sp, #S_OFF + S_PSR]        @ get calling cpsr
+       ldr     lr, [sp, #S_OFF + S_PC]!        @ get pc
+       msr     spsr_cxsf, r1                   @ save in spsr_svc
+       ldmdb   sp, {r1 - lr}^                  @ get calling r1 - lr
+       mov     r0, r0
+       add     sp, sp, #S_FRAME_SIZE - S_PC
+       movs    pc, lr                          @ return & move spsr_svc into cpsr
 
 /*
  * Ok, we need to do extra processing, enter the slow path.
@@ -42,8 +47,7 @@ work_pending:
        mov     r0, sp                          @ 'regs'
        mov     r2, why                         @ 'syscall'
        bl      do_notify_resume
-       disable_irq                             @ disable interrupts
-       b       no_work_pending
+       b       ret_slow_syscall                @ Check work again
 
 work_resched:
        bl      schedule
@@ -57,7 +61,14 @@ ret_slow_syscall:
        tst     r1, #_TIF_WORK_MASK
        bne     work_pending
 no_work_pending:
-       slow_restore_user_regs
+       @ slow_restore_user_regs
+       ldr     r1, [sp, #S_PSR]                @ get calling cpsr
+       ldr     lr, [sp, #S_PC]!                @ get pc
+       msr     spsr_cxsf, r1                   @ save in spsr_svc
+       ldmdb   sp, {r0 - lr}^                  @ get calling r1 - lr
+       mov     r0, r0
+       add     sp, sp, #S_FRAME_SIZE - S_PC
+       movs    pc, lr                          @ return & move spsr_svc into cpsr
 
 /*
  * This is how we return from a fork.
@@ -75,7 +86,11 @@ ENTRY(ret_from_fork)
        b       ret_slow_syscall
        
 
+       .equ NR_syscalls,0
+#define CALL(x) .equ NR_syscalls,NR_syscalls+1
 #include "calls.S"
+#undef CALL
+#define CALL(x) .long x
 
 /*=============================================================================
  * SWI handler
@@ -86,43 +101,75 @@ ENTRY(ret_from_fork)
           run on an ARM7 and we can save a couple of instructions.  
                                                                --pb */
 #ifdef CONFIG_CPU_ARM710
-       .macro  arm710_bug_check, instr, temp
-       and     \temp, \instr, #0x0f000000      @ check for SWI
-       teq     \temp, #0x0f000000
-       bne     .Larm700bug
-       .endm
-
-.Larm700bug:
-       ldr     r0, [sp, #S_PSR]                @ Get calling cpsr
-       sub     lr, lr, #4
-       str     lr, [r8]
-       msr     spsr_cxsf, r0
+#define A710(code...) code
+.Larm710bug:
        ldmia   sp, {r0 - lr}^                  @ Get calling r0 - lr
        mov     r0, r0
-       ldr     lr, [sp, #S_PC]                 @ Get PC
        add     sp, sp, #S_FRAME_SIZE
-       movs    pc, lr
+       subs    pc, lr, #4
 #else
-       .macro  arm710_bug_check, instr, temp
-       .endm
+#define A710(code...)
 #endif
 
        .align  5
 ENTRY(vector_swi)
-       save_user_regs
+       sub     sp, sp, #S_FRAME_SIZE
+       stmia   sp, {r0 - r12}                  @ Calling r0 - r12
+       add     r8, sp, #S_PC
+       stmdb   r8, {sp, lr}^                   @ Calling sp, lr
+       mrs     r8, spsr                        @ called from non-FIQ mode, so ok.
+       str     lr, [sp, #S_PC]                 @ Save calling PC
+       str     r8, [sp, #S_PSR]                @ Save CPSR
+       str     r0, [sp, #S_OLD_R0]             @ Save OLD_R0
        zero_fp
 
        /*
         * Get the system call number.
         */
+
+#if defined(CONFIG_OABI_COMPAT)
+
+       /*
+        * If we have CONFIG_OABI_COMPAT then we need to look at the swi
+        * value to determine if it is an EABI or an old ABI call.
+        */
 #ifdef CONFIG_ARM_THUMB
+       tst     r8, #PSR_T_BIT
+       movne   r10, #0                         @ no thumb OABI emulation
+       ldreq   r10, [lr, #-4]                  @ get SWI instruction
+#else
+       ldr     r10, [lr, #-4]                  @ get SWI instruction
+  A710(        and     ip, r10, #0x0f000000            @ check for SWI         )
+  A710(        teq     ip, #0x0f000000                                         )
+  A710(        bne     .Larm710bug                                             )
+#endif
+
+#elif defined(CONFIG_AEABI)
+
+       /*
+        * Pure EABI user space always put syscall number into scno (r7).
+        */
+  A710(        ldr     ip, [lr, #-4]                   @ get SWI instruction   )
+  A710(        and     ip, ip, #0x0f000000             @ check for SWI         )
+  A710(        teq     ip, #0x0f000000                                         )
+  A710(        bne     .Larm710bug                                             )
+
+#elif defined(CONFIG_ARM_THUMB)
+
+       /* Legacy ABI only, possibly thumb mode. */
        tst     r8, #PSR_T_BIT                  @ this is SPSR from save_user_regs
        addne   scno, r7, #__NR_SYSCALL_BASE    @ put OS number in
        ldreq   scno, [lr, #-4]
+
 #else
+
+       /* Legacy ABI only. */
        ldr     scno, [lr, #-4]                 @ get SWI instruction
+  A710(        and     ip, scno, #0x0f000000           @ check for SWI         )
+  A710(        teq     ip, #0x0f000000                                         )
+  A710(        bne     .Larm710bug                                             )
+
 #endif
-       arm710_bug_check scno, ip
 
 #ifdef CONFIG_ALIGNMENT_TRAP
        ldr     ip, __cr_alignment
@@ -131,18 +178,31 @@ ENTRY(vector_swi)
 #endif
        enable_irq
 
-       str     r4, [sp, #-S_OFF]!              @ push fifth arg
-
        get_thread_info tsk
+       adr     tbl, sys_call_table             @ load syscall table pointer
        ldr     ip, [tsk, #TI_FLAGS]            @ check for syscall tracing
+
+#if defined(CONFIG_OABI_COMPAT)
+       /*
+        * If the swi argument is zero, this is an EABI call and we do nothing.
+        *
+        * If this is an old ABI call, get the syscall number into scno and
+        * get the old ABI syscall table address.
+        */
+       bics    r10, r10, #0xff000000
+       eorne   scno, r10, #__NR_OABI_SYSCALL_BASE
+       ldrne   tbl, =sys_oabi_call_table
+#elif !defined(CONFIG_AEABI)
        bic     scno, scno, #0xff000000         @ mask off SWI op-code
        eor     scno, scno, #__NR_SYSCALL_BASE  @ check OS number
-       adr     tbl, sys_call_table             @ load syscall table pointer
+#endif
+
+       stmdb   sp!, {r4, r5}                   @ push fifth and sixth args
        tst     ip, #_TIF_SYSCALL_TRACE         @ are we tracing syscalls?
        bne     __sys_trace
 
-       adr     lr, ret_fast_syscall            @ return address
        cmp     scno, #NR_syscalls              @ check upper syscall limit
+       adr     lr, ret_fast_syscall            @ return address
        ldrcc   pc, [tbl, scno, lsl #2]         @ call sys_* routine
 
        add     r1, sp, #S_OFF
@@ -157,11 +217,13 @@ ENTRY(vector_swi)
         * context switches, and waiting for our parent to respond.
         */
 __sys_trace:
+       mov     r2, scno
        add     r1, sp, #S_OFF
        mov     r0, #0                          @ trace entry [IP = 0]
        bl      syscall_trace
 
        adr     lr, __sys_trace_return          @ return address
+       mov     scno, r0                        @ syscall number (possibly new)
        add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
        cmp     scno, #NR_syscalls              @ check upper syscall limit
        ldmccia r1, {r0 - r3}                   @ have to reload r0 - r3
@@ -170,6 +232,7 @@ __sys_trace:
 
 __sys_trace_return:
        str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
+       mov     r2, scno
        mov     r1, sp
        mov     r0, #1                          @ trace exit [IP = 1]
        bl      syscall_trace
@@ -181,19 +244,33 @@ __sys_trace_return:
 __cr_alignment:
        .word   cr_alignment
 #endif
+       .ltorg
+
+/*
+ * This is the syscall table declaration for native ABI syscalls.
+ * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
+ */
+#define ABI(native, compat) native
+#ifdef CONFIG_AEABI
+#define OBSOLETE(syscall) sys_ni_syscall
+#else
+#define OBSOLETE(syscall) syscall
+#endif
 
        .type   sys_call_table, #object
 ENTRY(sys_call_table)
 #include "calls.S"
+#undef ABI
+#undef OBSOLETE
 
 /*============================================================================
  * Special system call wrappers
  */
 @ r0 = syscall number
-@ r5 = syscall table
+@ r8 = syscall table
                .type   sys_syscall, #function
 sys_syscall:
-               eor     scno, r0, #__NR_SYSCALL_BASE
+               bic     scno, r0, #__NR_OABI_SYSCALL_BASE
                cmp     scno, #__NR_syscall - __NR_SYSCALL_BASE
                cmpne   scno, #NR_syscalls      @ check range
                stmloia sp, {r5, r6}            @ shuffle args
@@ -241,9 +318,15 @@ sys_sigaltstack_wrapper:
                ldr     r2, [sp, #S_OFF + S_SP]
                b       do_sigaltstack
 
-sys_futex_wrapper:
-               str     r5, [sp, #4]            @ push sixth arg
-               b       sys_futex
+sys_statfs64_wrapper:
+               teq     r1, #88
+               moveq   r1, #84
+               b       sys_statfs64
+
+sys_fstatfs64_wrapper:
+               teq     r1, #88
+               moveq   r1, #84
+               b       sys_fstatfs64
 
 /*
  * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
@@ -256,8 +339,54 @@ sys_mmap2:
                streq   r5, [sp, #4]
                beq     do_mmap2
                mov     r0, #-EINVAL
-               RETINSTR(mov,pc, lr)
+               mov     pc, lr
 #else
                str     r5, [sp, #4]
                b       do_mmap2
 #endif
+
+#ifdef CONFIG_OABI_COMPAT
+
+/*
+ * These are syscalls with argument register differences
+ */
+
+sys_oabi_pread64:
+               stmia   sp, {r3, r4}
+               b       sys_pread64
+
+sys_oabi_pwrite64:
+               stmia   sp, {r3, r4}
+               b       sys_pwrite64
+
+sys_oabi_truncate64:
+               mov     r3, r2
+               mov     r2, r1
+               b       sys_truncate64
+
+sys_oabi_ftruncate64:
+               mov     r3, r2
+               mov     r2, r1
+               b       sys_ftruncate64
+
+sys_oabi_readahead:
+               str     r3, [sp]
+               mov     r3, r2
+               mov     r2, r1
+               b       sys_readahead
+
+/*
+ * Let's declare a second syscall table for old ABI binaries
+ * using the compatibility syscall entries.
+ */
+#define ABI(native, compat) compat
+#define OBSOLETE(syscall) syscall
+
+       .type   sys_oabi_call_table, #object
+ENTRY(sys_oabi_call_table)
+#include "calls.S"
+#undef ABI
+#undef OBSOLETE
+
+#endif
+