ath9k: Unlock sc->mutex on error path
[safe/jmp/linux-2.6] / arch / mips / mm / tlb-r4k.c
index 0870220..9619f66 100644 (file)
@@ -8,7 +8,6 @@
  * Carsten Langgaard, carstenl@mips.com
  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
  */
-#include <linux/config.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 
 extern void build_tlb_refill_handler(void);
 
-/* CP0 hazard avoidance. */
-#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
-                                    "nop; nop; nop; nop; nop; nop;\n\t" \
-                                    ".set reorder\n\t")
+/*
+ * Make sure all entries differ.  If they're not different
+ * MIPS32 will take revenge ...
+ */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+/* Atomicity and interruptability */
+#ifdef CONFIG_MIPS_MT_SMTC
+
+#include <asm/smtc.h>
+#include <asm/mipsmtregs.h>
+
+#define ENTER_CRITICAL(flags) \
+       { \
+       unsigned int mvpflags; \
+       local_irq_save(flags);\
+       mvpflags = dvpe()
+#define EXIT_CRITICAL(flags) \
+       evpe(mvpflags); \
+       local_irq_restore(flags); \
+       }
+#else
+
+#define ENTER_CRITICAL(flags) local_irq_save(flags)
+#define EXIT_CRITICAL(flags) local_irq_restore(flags)
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+#if defined(CONFIG_CPU_LOONGSON2)
+/*
+ * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
+ * unfortrunately, itlb is not totally transparent to software.
+ */
+#define FLUSH_ITLB write_c0_diag(4);
+
+#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
+
+#else
+
+#define FLUSH_ITLB
+#define FLUSH_ITLB_VM(vma)
+
+#endif
 
 void local_flush_tlb_all(void)
 {
@@ -32,7 +70,7 @@ void local_flush_tlb_all(void)
        unsigned long old_ctx;
        int entry;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        /* Save old context and create impossible VPN2 value */
        old_ctx = read_c0_entryhi();
        write_c0_entrylo0(0);
@@ -42,11 +80,8 @@ void local_flush_tlb_all(void)
 
        /* Blast 'em all away. */
        while (entry < current_cpu_data.tlbsize) {
-               /*
-                * Make sure all entries differ.  If they're not different
-                * MIPS32 will take revenge ...
-                */
-               write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
                write_c0_index(entry);
                mtc0_tlbw_hazard();
                tlb_write_indexed();
@@ -54,15 +89,25 @@ void local_flush_tlb_all(void)
        }
        tlbw_use_hazard();
        write_c0_entryhi(old_ctx);
-       local_irq_restore(flags);
+       FLUSH_ITLB;
+       EXIT_CRITICAL(flags);
 }
 
+/* All entries common to a mm share an asid.  To effectively flush
+   these entries, we just bump the asid. */
 void local_flush_tlb_mm(struct mm_struct *mm)
 {
-       int cpu = smp_processor_id();
+       int cpu;
+
+       preempt_disable();
 
-       if (cpu_context(cpu, mm) != 0)
-               drop_mmu_context(mm,cpu);
+       cpu = smp_processor_id();
+
+       if (cpu_context(cpu, mm) != 0) {
+               drop_mmu_context(mm, cpu);
+       }
+
+       preempt_enable();
 }
 
 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -75,7 +120,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                unsigned long flags;
                int size;
 
-               local_irq_save(flags);
+               ENTER_CRITICAL(flags);
                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                size = (size + 1) >> 1;
                if (size <= current_cpu_data.tlbsize/2) {
@@ -92,15 +137,14 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                                start += (PAGE_SIZE << 1);
                                mtc0_tlbw_hazard();
                                tlb_probe();
-                               BARRIER;
+                               tlb_probe_hazard();
                                idx = read_c0_index();
                                write_c0_entrylo0(0);
                                write_c0_entrylo1(0);
                                if (idx < 0)
                                        continue;
                                /* Make sure all entries differ. */
-                               write_c0_entryhi(CKSEG0 +
-                                                (idx << (PAGE_SHIFT + 1)));
+                               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                                mtc0_tlbw_hazard();
                                tlb_write_indexed();
                        }
@@ -109,7 +153,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                } else {
                        drop_mmu_context(mm, cpu);
                }
-               local_irq_restore(flags);
+               FLUSH_ITLB;
+               EXIT_CRITICAL(flags);
        }
 }
 
@@ -118,7 +163,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
        unsigned long flags;
        int size;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        size = (size + 1) >> 1;
        if (size <= current_cpu_data.tlbsize / 2) {
@@ -135,14 +180,14 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
                        start += (PAGE_SIZE << 1);
                        mtc0_tlbw_hazard();
                        tlb_probe();
-                       BARRIER;
+                       tlb_probe_hazard();
                        idx = read_c0_index();
                        write_c0_entrylo0(0);
                        write_c0_entrylo1(0);
                        if (idx < 0)
                                continue;
                        /* Make sure all entries differ. */
-                       write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+                       write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                        mtc0_tlbw_hazard();
                        tlb_write_indexed();
                }
@@ -151,7 +196,8 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
        } else {
                local_flush_tlb_all();
        }
-       local_irq_restore(flags);
+       FLUSH_ITLB;
+       EXIT_CRITICAL(flags);
 }
 
 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
@@ -164,26 +210,27 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 
                newpid = cpu_asid(cpu, vma->vm_mm);
                page &= (PAGE_MASK << 1);
-               local_irq_save(flags);
+               ENTER_CRITICAL(flags);
                oldpid = read_c0_entryhi();
                write_c0_entryhi(page | newpid);
                mtc0_tlbw_hazard();
                tlb_probe();
-               BARRIER;
+               tlb_probe_hazard();
                idx = read_c0_index();
                write_c0_entrylo0(0);
                write_c0_entrylo1(0);
                if (idx < 0)
                        goto finish;
                /* Make sure all entries differ. */
-               write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                mtc0_tlbw_hazard();
                tlb_write_indexed();
                tlbw_use_hazard();
 
        finish:
                write_c0_entryhi(oldpid);
-               local_irq_restore(flags);
+               FLUSH_ITLB_VM(vma);
+               EXIT_CRITICAL(flags);
        }
 }
 
@@ -196,26 +243,26 @@ void local_flush_tlb_one(unsigned long page)
        unsigned long flags;
        int oldpid, idx;
 
-       local_irq_save(flags);
-       page &= (PAGE_MASK << 1);
+       ENTER_CRITICAL(flags);
        oldpid = read_c0_entryhi();
+       page &= (PAGE_MASK << 1);
        write_c0_entryhi(page);
        mtc0_tlbw_hazard();
        tlb_probe();
-       BARRIER;
+       tlb_probe_hazard();
        idx = read_c0_index();
        write_c0_entrylo0(0);
        write_c0_entrylo1(0);
        if (idx >= 0) {
                /* Make sure all entries differ. */
-               write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
                mtc0_tlbw_hazard();
                tlb_write_indexed();
                tlbw_use_hazard();
        }
        write_c0_entryhi(oldpid);
-
-       local_irq_restore(flags);
+       FLUSH_ITLB;
+       EXIT_CRITICAL(flags);
 }
 
 /*
@@ -238,15 +285,15 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        if (current->active_mm != vma->vm_mm)
                return;
 
-       pid = read_c0_entryhi() & ASID_MASK;
+       ENTER_CRITICAL(flags);
 
-       local_irq_save(flags);
+       pid = read_c0_entryhi() & ASID_MASK;
        address &= (PAGE_MASK << 1);
        write_c0_entryhi(address | pid);
        pgdp = pgd_offset(vma->vm_mm, address);
        mtc0_tlbw_hazard();
        tlb_probe();
-       BARRIER;
+       tlb_probe_hazard();
        pudp = pud_offset(pgdp, address);
        pmdp = pmd_offset(pudp, address);
        idx = read_c0_index();
@@ -260,15 +307,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        write_c0_entrylo0(pte_val(*ptep++) >> 6);
        write_c0_entrylo1(pte_val(*ptep) >> 6);
 #endif
-       write_c0_entryhi(address | pid);
        mtc0_tlbw_hazard();
        if (idx < 0)
                tlb_write_random();
        else
                tlb_write_indexed();
        tlbw_use_hazard();
-       write_c0_entryhi(pid);
-       local_irq_restore(flags);
+       FLUSH_ITLB_VM(vma);
+       EXIT_CRITICAL(flags);
 }
 
 #if 0
@@ -282,14 +328,14 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
        pte_t *ptep;
        int idx;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        address &= (PAGE_MASK << 1);
        asid = read_c0_entryhi() & ASID_MASK;
        write_c0_entryhi(address | asid);
        pgdp = pgd_offset(vma->vm_mm, address);
        mtc0_tlbw_hazard();
        tlb_probe();
-       BARRIER;
+       tlb_probe_hazard();
        pmdp = pmd_offset(pgdp, address);
        idx = read_c0_index();
        ptep = pte_offset_map(pmdp, address);
@@ -301,7 +347,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
        else
                tlb_write_indexed();
        tlbw_use_hazard();
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
 }
 #endif
 
@@ -313,14 +359,14 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        unsigned long old_pagemask;
        unsigned long old_ctx;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        /* Save old context and create impossible VPN2 value */
        old_ctx = read_c0_entryhi();
        old_pagemask = read_c0_pagemask();
        wired = read_c0_wired();
        write_c0_wired(wired + 1);
        write_c0_index(wired);
-       BARRIER;
+       tlbw_use_hazard();      /* What is the hazard here? */
        write_c0_pagemask(pagemask);
        write_c0_entryhi(entryhi);
        write_c0_entrylo0(entrylo0);
@@ -330,10 +376,10 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
        tlbw_use_hazard();
 
        write_c0_entryhi(old_ctx);
-       BARRIER;
+       tlbw_use_hazard();      /* What is the hazard here? */
        write_c0_pagemask(old_pagemask);
        local_flush_tlb_all();
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
 }
 
 /*
@@ -342,7 +388,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
  * lifetime of the system
  */
 
-static int temp_tlb_entry __initdata;
+static int temp_tlb_entry __cpuinitdata;
 
 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
                               unsigned long entryhi, unsigned long pagemask)
@@ -353,7 +399,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
        unsigned long old_pagemask;
        unsigned long old_ctx;
 
-       local_irq_save(flags);
+       ENTER_CRITICAL(flags);
        /* Save old context and create impossible VPN2 value */
        old_ctx = read_c0_entryhi();
        old_pagemask = read_c0_pagemask();
@@ -377,11 +423,11 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
        write_c0_entryhi(old_ctx);
        write_c0_pagemask(old_pagemask);
 out:
-       local_irq_restore(flags);
+       EXIT_CRITICAL(flags);
        return ret;
 }
 
-static void __init probe_tlb(unsigned long config)
+static void __cpuinit probe_tlb(unsigned long config)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int reg;
@@ -393,6 +439,14 @@ static void __init probe_tlb(unsigned long config)
         */
        if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
                return;
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * If TLB is shared in SMTC system, total size already
+        * has been calculated and written into cpu_data tlbsize
+        */
+       if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
+               return;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
        reg = read_c0_config1();
        if (!((config >> 7) & 3))
@@ -401,7 +455,16 @@ static void __init probe_tlb(unsigned long config)
        c->tlbsize = ((reg >> 25) & 0x3f) + 1;
 }
 
-void __init tlb_init(void)
+static int __cpuinitdata ntlb = 0;
+static int __init set_ntlb(char *str)
+{
+       get_option(&str, &ntlb);
+       return 1;
+}
+
+__setup("ntlb=", set_ntlb);
+
+void __cpuinit tlb_init(void)
 {
        unsigned int config = read_c0_config();
 
@@ -410,13 +473,31 @@ void __init tlb_init(void)
         *   - On R4600 1.7 the tlbp never hits for pages smaller than
         *     the value in the c0_pagemask register.
         *   - The entire mm handling assumes the c0_pagemask register to
-        *     be set for 4kb pages.
+        *     be set to fixed-size pages.
         */
        probe_tlb(config);
        write_c0_pagemask(PM_DEFAULT_MASK);
        write_c0_wired(0);
+       if (current_cpu_type() == CPU_R10000 ||
+           current_cpu_type() == CPU_R12000 ||
+           current_cpu_type() == CPU_R14000)
+               write_c0_framemask(0);
        temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
+        /* From this point on the ARC firmware is dead.  */
        local_flush_tlb_all();
 
+       /* Did I tell you that ARC SUCKS?  */
+
+       if (ntlb) {
+               if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
+                       int wired = current_cpu_data.tlbsize - ntlb;
+                       write_c0_wired(wired);
+                       write_c0_index(wired-1);
+                       printk("Restricting TLB to %d entries\n", ntlb);
+               } else
+                       printk("Ignoring invalid argument ntlb=%d\n", ntlb);
+       }
+
        build_tlb_refill_handler();
 }