.text
+#ifdef CONFIG_IRQSTACKS
+_GLOBAL(call_do_softirq)
+ mflr r0
+ stw r0,4(r1)
+ stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
+ mr r1,r3
+ bl __do_softirq
+ lwz r1,0(r1)
+ lwz r0,4(r1)
+ mtlr r0
+ blr
+
+_GLOBAL(call_handle_irq)
+ mflr r0
+ stw r0,4(r1)
+ mtctr r6
+ stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
+ mr r1,r5
+ bctrl
+ lwz r1,0(r1)
+ lwz r0,4(r1)
+ mtlr r0
+ blr
+#endif /* CONFIG_IRQSTACKS */
+
/*
* This returns the high 64 bits of the product of two 64-bit numbers.
*/
mtspr SPRN_HID1,r4
/* Store new HID1 image */
- rlwinm r6,r1,0,0,18
+ rlwinm r6,r1,0,0,(31-THREAD_SHIFT)
lwz r6,TI_CPU(r6)
slwi r6,r6,2
addis r6,r6,nap_save_hid1@ha
isync
blr /* Done */
+#ifdef CONFIG_40x
+
+/*
+ * Do an IO access in real mode
+ */
+_GLOBAL(real_readb)
+ mfmsr r7
+ ori r0,r7,MSR_DR
+ xori r0,r0,MSR_DR
+ sync
+ mtmsr r0
+ sync
+ isync
+ lbz r3,0(r3)
+ sync
+ mtmsr r7
+ sync
+ isync
+ blr
+
+ /*
+ * Do an IO access in real mode
+ */
+_GLOBAL(real_writeb)
+ mfmsr r7
+ ori r0,r7,MSR_DR
+ xori r0,r0,MSR_DR
+ sync
+ mtmsr r0
+ sync
+ isync
+ stb r3,0(r4)
+ sync
+ mtmsr r7
+ sync
+ isync
+ blr
+
+#endif /* CONFIG_40x */
/*
* Flush MMU TLB
*/
+#ifndef CONFIG_FSL_BOOKE
+_GLOBAL(_tlbil_all)
+_GLOBAL(_tlbil_pid)
+#endif
_GLOBAL(_tlbia)
#if defined(CONFIG_40x)
sync /* Flush to memory before changing mapping */
/* Invalidate all entries in TLB1 */
li r3, 0x0c
tlbivax 0,3
- /* Invalidate all entries in TLB2 */
- li r3, 0x14
- tlbivax 0,3
- /* Invalidate all entries in TLB3 */
- li r3, 0x1c
- tlbivax 0,3
msync
#ifdef CONFIG_SMP
tlbsync
#endif /* CONFIG_SMP */
#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
#if defined(CONFIG_SMP)
- rlwinm r8,r1,0,0,18
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
lwz r8,TI_CPU(r8)
oris r8,r8,10
mfmsr r10
/*
* Flush MMU TLB for a particular address
*/
+#ifndef CONFIG_FSL_BOOKE
+_GLOBAL(_tlbil_va)
+#endif
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
tlbwe r3, r3, TLB_TAG
isync
10:
-#elif defined(CONFIG_44x)
- mfspr r4,SPRN_MMUCR
- mfspr r5,SPRN_PID /* Get PID */
- rlwimi r4,r5,0,24,31 /* Set TID */
- mtspr SPRN_MMUCR,r4
+#elif defined(CONFIG_44x)
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
+
+ /* We have to run the search with interrupts disabled, even critical
+ * and debug interrupts (in fact the only critical exceptions we have
+ * are debug and machine check). Otherwise an interrupt which causes
+ * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
+ mfmsr r4
+ lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
+ addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
+ andc r6,r4,r6
+ mtmsr r6
+ mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3
+ mtmsr r4
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64,
#elif defined(CONFIG_FSL_BOOKE)
rlwinm r4, r3, 0, 0, 19
ori r5, r4, 0x08 /* TLBSEL = 1 */
- ori r6, r4, 0x10 /* TLBSEL = 2 */
- ori r7, r4, 0x18 /* TLBSEL = 3 */
tlbivax 0, r4
tlbivax 0, r5
- tlbivax 0, r6
- tlbivax 0, r7
msync
#if defined(CONFIG_SMP)
tlbsync
#endif /* CONFIG_SMP */
#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
#if defined(CONFIG_SMP)
- rlwinm r8,r1,0,0,18
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
lwz r8,TI_CPU(r8)
oris r8,r8,11
mfmsr r10
#endif /* ! CONFIG_40x */
blr
+#if defined(CONFIG_FSL_BOOKE)
+/*
+ * Flush MMU TLB, but only on the local processor (no broadcast)
+ */
+_GLOBAL(_tlbil_all)
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+ li r3,(MMUCSR0_TLBFI)@l
+ mtspr SPRN_MMUCSR0, r3
+1:
+ mfspr r3,SPRN_MMUCSR0
+ andi. r3,r3,MMUCSR0_TLBFI@l
+ bne 1b
+ blr
+
+/*
+ * Flush MMU TLB for a particular process id, but only on the local processor
+ * (no broadcast)
+ */
+_GLOBAL(_tlbil_pid)
+/* we currently do an invalidate all since we don't have per pid invalidate */
+ li r3,(MMUCSR0_TLBFI)@l
+ mtspr SPRN_MMUCSR0, r3
+1:
+ mfspr r3,SPRN_MMUCSR0
+ andi. r3,r3,MMUCSR0_TLBFI@l
+ bne 1b
+ msync
+ isync
+ blr
+
+/*
+ * Flush MMU TLB for a particular address, but only on the local processor
+ * (no broadcast)
+ */
+_GLOBAL(_tlbil_va)
+ mfmsr r10
+ wrteei 0
+ slwi r4,r4,16
+ mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
+ tlbsx 0,r3
+ mfspr r4,SPRN_MAS1 /* check valid */
+ andis. r3,r4,MAS1_VALID@h
+ beq 1f
+ rlwinm r4,r4,0,1,31
+ mtspr SPRN_MAS1,r4
+ tlbwe
+ msync
+ isync
+1: wrtee r10
+ blr
+#endif /* CONFIG_FSL_BOOKE */
+
+
/*
* Flush instruction cache.
* This is a no-op on the 601.
mtspr SPRN_L1CSR0,r3
isync
blr
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
mfspr r3,SPRN_L1CSR1
ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
mtspr SPRN_L1CSR1,r3
*
* flush_icache_range(unsigned long start, unsigned long stop)
*/
-_GLOBAL(__flush_icache_range)
+_KPROBE(__flush_icache_range)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
li r5,L1_CACHE_BYTES-1
andc r3,r3,r5
subf r4,r3,r4
*/
_GLOBAL(__flush_dcache_icache)
BEGIN_FTR_SECTION
- blr /* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+ blr
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
rlwinm r3,r3,0,0,19 /* Get page base address */
li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
addi r3,r3,L1_CACHE_BYTES
bdnz 0b
sync
+#ifndef CONFIG_44x
+ /* We don't flush the icache on 44x. Those have a virtual icache
+ * and we don't have access to the virtual address here (it's
+ * not the page vaddr but where it's mapped in user space). The
+ * flushing of the icache on these is handled elsewhere, when
+ * a change in the address space occurs, before returning to
+ * user space
+ */
mtctr r4
1: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES
bdnz 1b
sync
isync
+#endif /* CONFIG_44x */
blr
/*
_GLOBAL(__flush_dcache_icache_phys)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
-END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
mfmsr r10
rlwinm r0,r10,0,28,26 /* clear DR */
mtmsr r0
or r4,r4,r7 # LSW |= t2
blr
+/*
+ * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
+ * Returns 0 if a < b, 1 if a == b, 2 if a > b.
+ */
+_GLOBAL(__ucmpdi2)
+ cmplw r3,r5
+ li r3,1
+ bne 1f
+ cmplw r4,r6
+ beqlr
+1: li r3,0
+ bltlr
+ li r3,2
+ blr
+
_GLOBAL(abs)
srawi r4,r3,31
xor r3,r3,r4
sub r3,r3,r4
blr
-_GLOBAL(_get_SP)
- mr r3,r1 /* Close enough */
- blr
-
/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
li r4,0 /* new sp (unused) */
li r0,__NR_clone
sc
- cmpwi 0,r3,0 /* parent or child? */
- bne 1f /* return if parent */
+ bns+ 1f /* did system call indicate error? */
+ neg r3,r3 /* if so, make return code negative */
+1: cmpwi 0,r3,0 /* parent or child? */
+ bne 2f /* return if parent */
li r0,0 /* make top-level stack frame */
stwu r0,-16(r1)
mtlr r30 /* fn addr in lr */
li r0,__NR_exit /* exit if function returns */
li r3,0
sc
-1: lwz r30,8(r1)
+2: lwz r30,8(r1)
lwz r31,12(r1)
addi r1,r1,16
blr
-_GLOBAL(kernel_execve)
- li r0,__NR_execve
- sc
- bnslr
- neg r3,r3
- blr
-
/*
* This routine is just here to keep GCC happy - sigh...
*/
/* set a new stack at the bottom of our page... */
/* (not really needed now) */
- addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */
+ addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
stw r0, 0(r1)
/* Do the copies */