powerpc: Add VSX context save/restore, ptrace and signal support
[safe/jmp/linux-2.6] / arch / powerpc / kernel / entry_32.S
index 8fed953..ab2d62f 100644 (file)
@@ -19,7 +19,6 @@
  *
  */
 
-#include <linux/config.h>
 #include <linux/errno.h>
 #include <linux/sys.h>
 #include <linux/threads.h>
 #endif
 
 #ifdef CONFIG_BOOKE
-#include "head_booke.h"
-#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level)       \
-       mtspr   exc_level##_SPRG,r8;                    \
-       BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);          \
-       lwz     r0,GPR10-INT_FRAME_SIZE(r8);            \
-       stw     r0,GPR10(r11);                          \
-       lwz     r0,GPR11-INT_FRAME_SIZE(r8);            \
-       stw     r0,GPR11(r11);                          \
-       mfspr   r8,exc_level##_SPRG
-
        .globl  mcheck_transfer_to_handler
 mcheck_transfer_to_handler:
-       TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
-       b       transfer_to_handler_full
+       mfspr   r0,SPRN_DSRR0
+       stw     r0,_DSRR0(r11)
+       mfspr   r0,SPRN_DSRR1
+       stw     r0,_DSRR1(r11)
+       /* fall through */
 
        .globl  debug_transfer_to_handler
 debug_transfer_to_handler:
-       TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
-       b       transfer_to_handler_full
+       mfspr   r0,SPRN_CSRR0
+       stw     r0,_CSRR0(r11)
+       mfspr   r0,SPRN_CSRR1
+       stw     r0,_CSRR1(r11)
+       /* fall through */
 
        .globl  crit_transfer_to_handler
 crit_transfer_to_handler:
-       TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
+#ifdef CONFIG_FSL_BOOKE
+       mfspr   r0,SPRN_MAS0
+       stw     r0,MAS0(r11)
+       mfspr   r0,SPRN_MAS1
+       stw     r0,MAS1(r11)
+       mfspr   r0,SPRN_MAS2
+       stw     r0,MAS2(r11)
+       mfspr   r0,SPRN_MAS3
+       stw     r0,MAS3(r11)
+       mfspr   r0,SPRN_MAS6
+       stw     r0,MAS6(r11)
+#ifdef CONFIG_PHYS_64BIT
+       mfspr   r0,SPRN_MAS7
+       stw     r0,MAS7(r11)
+#endif /* CONFIG_PHYS_64BIT */
+#endif /* CONFIG_FSL_BOOKE */
+#ifdef CONFIG_44x
+       mfspr   r0,SPRN_MMUCR
+       stw     r0,MMUCR(r11)
+#endif
+       mfspr   r0,SPRN_SRR0
+       stw     r0,_SRR0(r11)
+       mfspr   r0,SPRN_SRR1
+       stw     r0,_SRR1(r11)
+
+       mfspr   r8,SPRN_SPRG3
+       lwz     r0,KSP_LIMIT(r8)
+       stw     r0,SAVED_KSP_LIMIT(r11)
+       rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
+       stw     r0,KSP_LIMIT(r8)
        /* fall through */
 #endif
 
@@ -78,6 +102,16 @@ crit_transfer_to_handler:
        stw     r0,GPR10(r11)
        lwz     r0,crit_r11@l(0)
        stw     r0,GPR11(r11)
+       mfspr   r0,SPRN_SRR0
+       stw     r0,crit_srr0@l(0)
+       mfspr   r0,SPRN_SRR1
+       stw     r0,crit_srr1@l(0)
+
+       mfspr   r8,SPRN_SPRG3
+       lwz     r0,KSP_LIMIT(r8)
+       stw     r0,saved_ksp_limit@l(0)
+       rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
+       stw     r0,KSP_LIMIT(r8)
        /* fall through */
 #endif
 
@@ -111,9 +145,9 @@ transfer_to_handler:
        stw     r11,PT_REGS(r12)
 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
        /* Check to see if the dbcr0 register is set up to debug.  Use the
-          single-step bit to do this. */
+          internal debug mode bit to do this. */
        lwz     r12,THREAD_DBCR0(r12)
-       andis.  r12,r12,DBCR0_IC@h
+       andis.  r12,r12,DBCR0_IDM@h
        beq+    3f
        /* From user and task is ptraced - load up global dbcr0 */
        li      r12,-1                  /* clear all pending debug events */
@@ -121,6 +155,12 @@ transfer_to_handler:
        lis     r11,global_dbcr0@ha
        tophys(r11,r11)
        addi    r11,r11,global_dbcr0@l
+#ifdef CONFIG_SMP
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r9,TI_CPU(r9)
+       slwi    r9,r9,3
+       add     r11,r11,r9
+#endif
        lwz     r12,0(r11)
        mtspr   SPRN_DBCR0,r12
        lwz     r12,4(r11)
@@ -128,45 +168,57 @@ transfer_to_handler:
        stw     r12,4(r11)
 #endif
        b       3f
+
 2:     /* if from kernel, check interrupted DOZE/NAP mode and
          * check for stack overflow
          */
-#ifdef CONFIG_6xx
-       mfspr   r11,SPRN_HID0
-       mtcr    r11
-BEGIN_FTR_SECTION
-       bt-     8,power_save_6xx_restore        /* Check DOZE */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
-BEGIN_FTR_SECTION
-       bt-     9,power_save_6xx_restore        /* Check NAP */
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
-#endif /* CONFIG_6xx */
+       lwz     r9,KSP_LIMIT(r12)
+       cmplw   r1,r9                   /* if r1 <= ksp_limit */
+       ble-    stack_ovf               /* then the kernel stack overflowed */
+5:
+#if defined(CONFIG_6xx) || defined(CONFIG_E500)
+       rlwinm  r9,r1,0,0,31-THREAD_SHIFT
+       tophys(r9,r9)                   /* check local flags */
+       lwz     r12,TI_LOCAL_FLAGS(r9)
+       mtcrf   0x01,r12
+       bt-     31-TLF_NAPPING,4f
+       bt-     31-TLF_SLEEPING,7f
+#endif /* CONFIG_6xx || CONFIG_E500 */
        .globl transfer_to_handler_cont
 transfer_to_handler_cont:
-       lwz     r11,THREAD_INFO-THREAD(r12)
-       cmplw   r1,r11                  /* if r1 <= current->thread_info */
-       ble-    stack_ovf               /* then the kernel stack overflowed */
 3:
        mflr    r9
        lwz     r11,0(r9)               /* virtual address of handler */
        lwz     r9,4(r9)                /* where to go when done */
-       FIX_SRR1(r10,r12)
        mtspr   SPRN_SRR0,r11
        mtspr   SPRN_SRR1,r10
        mtlr    r9
        SYNC
        RFI                             /* jump to handler, enable MMU */
 
+#if defined (CONFIG_6xx) || defined(CONFIG_E500)
+4:     rlwinm  r12,r12,0,~_TLF_NAPPING
+       stw     r12,TI_LOCAL_FLAGS(r9)
+       b       power_save_ppc32_restore
+
+7:     rlwinm  r12,r12,0,~_TLF_SLEEPING
+       stw     r12,TI_LOCAL_FLAGS(r9)
+       lwz     r9,_MSR(r11)            /* if sleeping, clear MSR.EE */
+       rlwinm  r9,r9,0,~MSR_EE
+       lwz     r12,_LINK(r11)          /* and return to address in LR */
+       b       fast_exception_return
+#endif
+
 /*
  * On kernel stack overflow, load up an initial stack pointer
  * and call StackOverflow(regs), which should not return.
  */
 stack_ovf:
        /* sometimes we use a statically-allocated stack, which is OK. */
-       lis     r11,_end@h
-       ori     r11,r11,_end@l
-       cmplw   r1,r11
-       ble     3b                      /* r1 <= &_end is OK */
+       lis     r12,_end@h
+       ori     r12,r12,_end@l
+       cmplw   r1,r12
+       ble     5b                      /* r1 <= &_end is OK */
        SAVE_NVGPRS(r11)
        addi    r3,r1,STACK_FRAME_OVERHEAD
        lis     r1,init_thread_union@ha
@@ -189,7 +241,6 @@ stack_ovf:
 0:
 
 _GLOBAL(DoSyscall)
-       stw     r0,THREAD+LAST_SYSCALL(r2)
        stw     r3,ORIG_GPR3(r1)
        li      r12,0
        stw     r12,RESULT(r1)
@@ -227,7 +278,7 @@ ret_from_syscall:
        MTMSRD(r10)
        lwz     r9,TI_FLAGS(r12)
        li      r8,-_LAST_ERRNO
-       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
+       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
        bne-    syscall_exit_work
        cmplw   0,r3,r8
        blt+    syscall_exit_cont
@@ -237,12 +288,22 @@ ret_from_syscall:
        stw     r11,_CCR(r1)
 syscall_exit_cont:
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-       /* If the process has its own DBCR0 value, load it up.  The single
-          step bit tells us that dbcr0 should be loaded. */
+       /* If the process has its own DBCR0 value, load it up.  The internal
+          debug mode bit tells us that dbcr0 should be loaded. */
        lwz     r0,THREAD+THREAD_DBCR0(r2)
-       andis.  r10,r0,DBCR0_IC@h
+       andis.  r10,r0,DBCR0_IDM@h
        bnel-   load_dbcr0
 #endif
+#ifdef CONFIG_44x
+       lis     r4,icache_44x_need_flush@ha
+       lwz     r5,icache_44x_need_flush@l(r4)
+       cmplwi  cr0,r5,0
+       bne-    2f
+1:
+#endif /* CONFIG_44x */
+BEGIN_FTR_SECTION
+       lwarx   r7,0,r1
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
        stwcx.  r0,0,r1                 /* to clear the reservation */
        lwz     r4,_LINK(r1)
        lwz     r5,_CCR(r1)
@@ -257,6 +318,12 @@ syscall_exit_cont:
        mtspr   SPRN_SRR1,r8
        SYNC
        RFI
+#ifdef CONFIG_44x
+2:     li      r7,0
+       iccci   r0,r0
+       stw     r7,icache_44x_need_flush@l(r4)
+       b       1b
+#endif  /* CONFIG_44x */
 
 66:    li      r3,-ENOSYS
        b       ret_from_syscall
@@ -287,8 +354,10 @@ syscall_dotrace:
 
 syscall_exit_work:
        andi.   r0,r9,_TIF_RESTOREALL
-       bne-    2f
-       cmplw   0,r3,r8
+       beq+    0f
+       REST_NVGPRS(r1)
+       b       2f
+0:     cmplw   0,r3,r8
        blt+    1f
        andi.   r0,r9,_TIF_NOERROR
        bne-    1f
@@ -302,9 +371,7 @@ syscall_exit_work:
 2:     andi.   r0,r9,(_TIF_PERSYSCALL_MASK)
        beq     4f
 
-       /* Clear per-syscall TIF flags if any are set, but _leave_
-       _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
-       yet.  */
+       /* Clear per-syscall TIF flags if any are set.  */
 
        li      r11,_TIF_PERSYSCALL_MASK
        addi    r12,r12,TI_FLAGS
@@ -318,8 +385,13 @@ syscall_exit_work:
        subi    r12,r12,TI_FLAGS
        
 4:     /* Anything which requires enabling interrupts? */
-       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
-       beq     7f
+       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+       beq     ret_from_except
+
+       /* Re-enable interrupts */
+       ori     r10,r10,MSR_EE
+       SYNC
+       MTMSRD(r10)
 
        /* Save NVGPRS if they're not saved already */
        lwz     r4,_TRAP(r1)
@@ -328,71 +400,11 @@ syscall_exit_work:
        SAVE_NVGPRS(r1)
        li      r4,0xc00
        stw     r4,_TRAP(r1)
-
-       /* Re-enable interrupts */
-5:     ori     r10,r10,MSR_EE
-       SYNC
-       MTMSRD(r10)
-
-       andi.   r0,r9,_TIF_SAVE_NVGPRS
-       bne     save_user_nvgprs
-
-save_user_nvgprs_cont:
-       andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
-       beq     7f
-
+5:
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      do_syscall_trace_leave
-       REST_NVGPRS(r1)
-
-6:     lwz     r3,GPR3(r1)
-       LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
-       SYNC
-       MTMSRD(r10)             /* disable interrupts again */
-       rlwinm  r12,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
-       lwz     r9,TI_FLAGS(r12)
-7:
-       andi.   r0,r9,_TIF_NEED_RESCHED
-       bne     8f
-       lwz     r5,_MSR(r1)
-       andi.   r5,r5,MSR_PR
-       beq     ret_from_except
-       andi.   r0,r9,_TIF_SIGPENDING
-       beq     ret_from_except
-       b       do_user_signal
-8:
-       ori     r10,r10,MSR_EE
-       SYNC
-       MTMSRD(r10)             /* re-enable interrupts */
-       bl      schedule
-       b       6b
-
-save_user_nvgprs:
-       ld      r8,TI_SIGFRAME(r12)
-
-.macro savewords start, end
-  1:   stw \start,4*(\start)(r8)
-       .section __ex_table,"a"
-       .align  2
-       .long   1b,save_user_nvgprs_fault
-       .previous
-       .if \end - \start
-       savewords "(\start+1)",\end
-       .endif
-.endm  
-       savewords 14,31
-       b       save_user_nvgprs_cont
-
-       
-save_user_nvgprs_fault:
-       li      r3,11           /* SIGSEGV */
-       ld      r4,TI_TASK(r12)
-       bl      force_sigsegv
+       b       ret_from_except_full
 
-       rlwinm  r12,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
-       ld      r9,TI_FLAGS(r12)
-       b       save_user_nvgprs_cont
-       
 #ifdef SHOW_SYSCALLS
 do_show_syscall:
 #ifdef SHOW_SYSCALLS_TASK
@@ -490,6 +502,14 @@ ppc_clone:
        stw     r0,_TRAP(r1)            /* register set saved */
        b       sys_clone
 
+       .globl  ppc_swapcontext
+ppc_swapcontext:
+       SAVE_NVGPRS(r1)
+       lwz     r0,_TRAP(r1)
+       rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
+       stw     r0,_TRAP(r1)            /* register set saved */
+       b       sys_swapcontext
+
 /*
  * Top-level page fault handling.
  * This is in assembler because if do_page_fault tells us that
@@ -550,9 +570,11 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
+BEGIN_FTR_SECTION
        oris    r0,r0,MSR_SPE@h  /* Disable SPE */
        mfspr   r12,SPRN_SPEFSCR /* save spefscr register value */
        stw     r12,THREAD+THREAD_SPEFSCR(r2)
+END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
        and.    r0,r0,r11       /* FP or altivec or SPE enabled? */
        beq+    1f
@@ -588,8 +610,10 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
+BEGIN_FTR_SECTION
        lwz     r0,THREAD+THREAD_SPEFSCR(r2)
        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
+END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
 
        lwz     r0,_CCR(r1)
@@ -642,7 +666,11 @@ fast_exception_return:
        mr      r12,r4          /* restart at exc_exit_restart */
        b       2b
 
-       .comm   fee_restarts,4
+       .section .bss
+       .align  2
+fee_restarts:
+       .space  4
+       .previous
 
 /* aargh, a nonrecoverable interrupt, panic */
 /* aargh, we don't know which trap this is */
@@ -683,15 +711,15 @@ user_exc_return:          /* r10 contains MSR_KERNEL here */
        /* Check current_thread_info()->flags */
        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
        lwz     r9,TI_FLAGS(r9)
-       andi.   r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
+       andi.   r0,r9,_TIF_USER_WORK_MASK
        bne     do_work
 
 restore_user:
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-       /* Check whether this process has its own DBCR0 value.  The single
-          step bit tells us that dbcr0 should be loaded. */
+       /* Check whether this process has its own DBCR0 value.  The internal
+          debug mode bit tells us that dbcr0 should be loaded. */
        lwz     r0,THREAD+THREAD_DBCR0(r2)
-       andis.  r10,r0,DBCR0_IC@h
+       andis.  r10,r0,DBCR0_IDM@h
        bnel-   load_dbcr0
 #endif
 
@@ -721,6 +749,16 @@ resume_kernel:
 
        /* interrupts are hard-disabled at this point */
 restore:
+#ifdef CONFIG_44x
+       lis     r4,icache_44x_need_flush@ha
+       lwz     r5,icache_44x_need_flush@l(r4)
+       cmplwi  cr0,r5,0
+       beq+    1f
+       li      r6,0
+       iccci   r0,r0
+       stw     r6,icache_44x_need_flush@l(r4)
+1:
+#endif  /* CONFIG_44x */
        lwz     r0,GPR0(r1)
        lwz     r2,GPR2(r1)
        REST_4GPRS(3, r1)
@@ -732,6 +770,9 @@ restore:
        mtctr   r11
 
        PPC405_ERR77(0,r1)
+BEGIN_FTR_SECTION
+       lwarx   r11,0,r1
+END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
        stwcx.  r0,0,r1                 /* to clear the reservation */
 
 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
@@ -861,17 +902,90 @@ exc_exit_restart_end:
        exc_lvl_rfi;                                                    \
        b       .;              /* prevent prefetch past exc_lvl_rfi */
 
+#define        RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)                        \
+       lwz     r9,_##exc_lvl_srr0(r1);                                 \
+       lwz     r10,_##exc_lvl_srr1(r1);                                \
+       mtspr   SPRN_##exc_lvl_srr0,r9;                                 \
+       mtspr   SPRN_##exc_lvl_srr1,r10;
+
+#if defined(CONFIG_FSL_BOOKE)
+#ifdef CONFIG_PHYS_64BIT
+#define        RESTORE_MAS7                                                    \
+       lwz     r11,MAS7(r1);                                           \
+       mtspr   SPRN_MAS7,r11;
+#else
+#define        RESTORE_MAS7
+#endif /* CONFIG_PHYS_64BIT */
+#define RESTORE_MMU_REGS                                               \
+       lwz     r9,MAS0(r1);                                            \
+       lwz     r10,MAS1(r1);                                           \
+       lwz     r11,MAS2(r1);                                           \
+       mtspr   SPRN_MAS0,r9;                                           \
+       lwz     r9,MAS3(r1);                                            \
+       mtspr   SPRN_MAS1,r10;                                          \
+       lwz     r10,MAS6(r1);                                           \
+       mtspr   SPRN_MAS2,r11;                                          \
+       mtspr   SPRN_MAS3,r9;                                           \
+       mtspr   SPRN_MAS6,r10;                                          \
+       RESTORE_MAS7;
+#elif defined(CONFIG_44x)
+#define RESTORE_MMU_REGS                                               \
+       lwz     r9,MMUCR(r1);                                           \
+       mtspr   SPRN_MMUCR,r9;
+#else
+#define RESTORE_MMU_REGS
+#endif
+
+#ifdef CONFIG_40x
        .globl  ret_from_crit_exc
 ret_from_crit_exc:
+       mfspr   r9,SPRN_SPRG3
+       lis     r10,saved_ksp_limit@ha;
+       lwz     r10,saved_ksp_limit@l(r10);
+       tovirt(r9,r9);
+       stw     r10,KSP_LIMIT(r9)
+       lis     r9,crit_srr0@ha;
+       lwz     r9,crit_srr0@l(r9);
+       lis     r10,crit_srr1@ha;
+       lwz     r10,crit_srr1@l(r10);
+       mtspr   SPRN_SRR0,r9;
+       mtspr   SPRN_SRR1,r10;
        RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
+#endif /* CONFIG_40x */
 
 #ifdef CONFIG_BOOKE
+       .globl  ret_from_crit_exc
+ret_from_crit_exc:
+       mfspr   r9,SPRN_SPRG3
+       lwz     r10,SAVED_KSP_LIMIT(r1)
+       stw     r10,KSP_LIMIT(r9)
+       RESTORE_xSRR(SRR0,SRR1);
+       RESTORE_MMU_REGS;
+       RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
+
        .globl  ret_from_debug_exc
 ret_from_debug_exc:
+       mfspr   r9,SPRN_SPRG3
+       lwz     r10,SAVED_KSP_LIMIT(r1)
+       stw     r10,KSP_LIMIT(r9)
+       lwz     r9,THREAD_INFO-THREAD(r9)
+       rlwinm  r10,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r10,TI_PREEMPT(r10)
+       stw     r10,TI_PREEMPT(r9)
+       RESTORE_xSRR(SRR0,SRR1);
+       RESTORE_xSRR(CSRR0,CSRR1);
+       RESTORE_MMU_REGS;
        RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
 
        .globl  ret_from_mcheck_exc
 ret_from_mcheck_exc:
+       mfspr   r9,SPRN_SPRG3
+       lwz     r10,SAVED_KSP_LIMIT(r1)
+       stw     r10,KSP_LIMIT(r9)
+       RESTORE_xSRR(SRR0,SRR1);
+       RESTORE_xSRR(CSRR0,CSRR1);
+       RESTORE_xSRR(DSRR0,DSRR1);
+       RESTORE_MMU_REGS;
        RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
 #endif /* CONFIG_BOOKE */
 
@@ -888,6 +1002,12 @@ load_dbcr0:
        mfspr   r10,SPRN_DBCR0
        lis     r11,global_dbcr0@ha
        addi    r11,r11,global_dbcr0@l
+#ifdef CONFIG_SMP
+       rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
+       lwz     r9,TI_CPU(r9)
+       slwi    r9,r9,3
+       add     r11,r11,r9
+#endif
        stw     r10,0(r11)
        mtspr   SPRN_DBCR0,r0
        lwz     r10,4(r11)
@@ -897,7 +1017,11 @@ load_dbcr0:
        mtspr   SPRN_DBSR,r11   /* clear all pending debug events */
        blr
 
-       .comm   global_dbcr0,8
+       .section .bss
+       .align  4
+global_dbcr0:
+       .space  8*NR_CPUS
+       .previous
 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
 
 do_work:                       /* r10 contains MSR_KERNEL here */
@@ -917,7 +1041,7 @@ recheck:
        lwz     r9,TI_FLAGS(r9)
        andi.   r0,r9,_TIF_NEED_RESCHED
        bne-    do_resched
-       andi.   r0,r9,_TIF_SIGPENDING
+       andi.   r0,r9,_TIF_USER_WORK_MASK
        beq     restore_user
 do_user_signal:                        /* r10 contains MSR_KERNEL here */
        ori     r10,r10,MSR_EE
@@ -972,7 +1096,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
        /* shouldn't return */
        b       4b
 
-       .comm   ee_restarts,4
+       .section .bss
+       .align  2
+ee_restarts:
+       .space  4
+       .previous
 
 /*
  * PROM code for specific machines follows.  Put it
@@ -988,7 +1116,7 @@ _GLOBAL(enter_rtas)
        stwu    r1,-INT_FRAME_SIZE(r1)
        mflr    r0
        stw     r0,INT_FRAME_SIZE+4(r1)
-       LOADADDR(r4, rtas)
+       LOAD_REG_ADDR(r4, rtas)
        lis     r6,1f@ha        /* physical return address for rtas */
        addi    r6,r6,1f@l
        tophys(r6,r6)