smp_call_function: get rid of the unused nonatomic/retry argument
[safe/jmp/linux-2.6] / arch / mips / mm / c-r4k.c
index 971f6c0..71df339 100644 (file)
@@ -7,12 +7,14 @@
  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
+#include <linux/hardirq.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/kernel.h>
 #include <linux/linkage.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/bitops.h>
 
 #include <asm/bcache.h>
  *    primary cache.
  */
 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
-                                   int retry, int wait)
+                                   int wait)
 {
        preempt_disable();
 
 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
-       smp_call_function(func, info, retry, wait);
+       smp_call_function(func, info, wait);
 #endif
        func(info);
        preempt_enable();
 }
 
+#if defined(CONFIG_MIPS_CMP)
+#define cpu_has_safe_index_cacheops 0
+#else
+#define cpu_has_safe_index_cacheops 1
+#endif
+
 /*
  * Must die.
  */
@@ -92,7 +100,7 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
        blast_dcache32_page(addr);
 }
 
-static void __init r4k_blast_dcache_page_setup(void)
+static void __cpuinit r4k_blast_dcache_page_setup(void)
 {
        unsigned long  dc_lsize = cpu_dcache_line_size();
 
@@ -106,7 +114,7 @@ static void __init r4k_blast_dcache_page_setup(void)
 
 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
 
-static void __init r4k_blast_dcache_page_indexed_setup(void)
+static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
 {
        unsigned long dc_lsize = cpu_dcache_line_size();
 
@@ -120,7 +128,7 @@ static void __init r4k_blast_dcache_page_indexed_setup(void)
 
 static void (* r4k_blast_dcache)(void);
 
-static void __init r4k_blast_dcache_setup(void)
+static void __cpuinit r4k_blast_dcache_setup(void)
 {
        unsigned long dc_lsize = cpu_dcache_line_size();
 
@@ -205,7 +213,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
 
 static void (* r4k_blast_icache_page)(unsigned long addr);
 
-static void __init r4k_blast_icache_page_setup(void)
+static void __cpuinit r4k_blast_icache_page_setup(void)
 {
        unsigned long ic_lsize = cpu_icache_line_size();
 
@@ -222,7 +230,7 @@ static void __init r4k_blast_icache_page_setup(void)
 
 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
 
-static void __init r4k_blast_icache_page_indexed_setup(void)
+static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
 {
        unsigned long ic_lsize = cpu_icache_line_size();
 
@@ -246,7 +254,7 @@ static void __init r4k_blast_icache_page_indexed_setup(void)
 
 static void (* r4k_blast_icache)(void);
 
-static void __init r4k_blast_icache_setup(void)
+static void __cpuinit r4k_blast_icache_setup(void)
 {
        unsigned long ic_lsize = cpu_icache_line_size();
 
@@ -267,7 +275,7 @@ static void __init r4k_blast_icache_setup(void)
 
 static void (* r4k_blast_scache_page)(unsigned long addr);
 
-static void __init r4k_blast_scache_page_setup(void)
+static void __cpuinit r4k_blast_scache_page_setup(void)
 {
        unsigned long sc_lsize = cpu_scache_line_size();
 
@@ -285,7 +293,7 @@ static void __init r4k_blast_scache_page_setup(void)
 
 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
 
-static void __init r4k_blast_scache_page_indexed_setup(void)
+static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
 {
        unsigned long sc_lsize = cpu_scache_line_size();
 
@@ -303,7 +311,7 @@ static void __init r4k_blast_scache_page_indexed_setup(void)
 
 static void (* r4k_blast_scache)(void);
 
-static void __init r4k_blast_scache_setup(void)
+static void __cpuinit r4k_blast_scache_setup(void)
 {
        unsigned long sc_lsize = cpu_scache_line_size();
 
@@ -342,33 +350,61 @@ static inline void local_r4k___flush_cache_all(void * args)
 
 static void r4k___flush_cache_all(void)
 {
-       r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
+       r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
+}
+
+static inline int has_valid_asid(const struct mm_struct *mm)
+{
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+       int i;
+
+       for_each_online_cpu(i)
+               if (cpu_context(i, mm))
+                       return 1;
+
+       return 0;
+#else
+       return cpu_context(smp_processor_id(), mm);
+#endif
+}
+
+static void r4k__flush_cache_vmap(void)
+{
+       r4k_blast_dcache();
+}
+
+static void r4k__flush_cache_vunmap(void)
+{
+       r4k_blast_dcache();
 }
 
 static inline void local_r4k_flush_cache_range(void * args)
 {
        struct vm_area_struct *vma = args;
+       int exec = vma->vm_flags & VM_EXEC;
 
-       if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
+       if (!(has_valid_asid(vma->vm_mm)))
                return;
 
        r4k_blast_dcache();
+       if (exec)
+               r4k_blast_icache();
 }
 
 static void r4k_flush_cache_range(struct vm_area_struct *vma,
        unsigned long start, unsigned long end)
 {
-       if (!cpu_has_dc_aliases)
-               return;
+       int exec = vma->vm_flags & VM_EXEC;
 
-       r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
+       if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
+               r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
 }
 
 static inline void local_r4k_flush_cache_mm(void * args)
 {
        struct mm_struct *mm = args;
 
-       if (!cpu_context(smp_processor_id(), mm))
+       if (!has_valid_asid(mm))
                return;
 
        /*
@@ -393,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
        if (!cpu_has_dc_aliases)
                return;
 
-       r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
 }
 
 struct flush_cache_page_args {
@@ -410,6 +446,7 @@ static inline void local_r4k_flush_cache_page(void *args)
        struct page *page = pfn_to_page(fcp_args->pfn);
        int exec = vma->vm_flags & VM_EXEC;
        struct mm_struct *mm = vma->vm_mm;
+       int map_coherent = 0;
        pgd_t *pgdp;
        pud_t *pudp;
        pmd_t *pmdp;
@@ -420,7 +457,7 @@ static inline void local_r4k_flush_cache_page(void *args)
         * If ownes no valid ASID yet, cannot possibly have gotten
         * this page into the cache.
         */
-       if (cpu_context(smp_processor_id(), mm) == 0)
+       if (!has_valid_asid(mm))
                return;
 
        addr &= PAGE_MASK;
@@ -433,7 +470,7 @@ static inline void local_r4k_flush_cache_page(void *args)
         * If the page isn't marked valid, the page cannot possibly be
         * in the cache.
         */
-       if (!(pte_val(*ptep) & _PAGE_PRESENT))
+       if (!(pte_present(*ptep)))
                return;
 
        if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
@@ -443,7 +480,9 @@ static inline void local_r4k_flush_cache_page(void *args)
                 * Use kmap_coherent or kmap_atomic to do flushes for
                 * another ASID than the current one.
                 */
-               if (cpu_has_dc_aliases)
+               map_coherent = (cpu_has_dc_aliases &&
+                               page_mapped(page) && !Page_dcache_dirty(page));
+               if (map_coherent)
                        vaddr = kmap_coherent(page, addr);
                else
                        vaddr = kmap_atomic(page, KM_USER0);
@@ -466,7 +505,7 @@ static inline void local_r4k_flush_cache_page(void *args)
        }
 
        if (vaddr) {
-               if (cpu_has_dc_aliases)
+               if (map_coherent)
                        kunmap_coherent();
                else
                        kunmap_atomic(vaddr, KM_USER0);
@@ -482,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
        args.addr = addr;
        args.pfn = pfn;
 
-       r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
 }
 
 static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -492,7 +531,11 @@ static inline void local_r4k_flush_data_cache_page(void * addr)
 
 static void r4k_flush_data_cache_page(unsigned long addr)
 {
-       r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
+       if (in_atomic())
+               local_r4k_flush_data_cache_page((void *)addr);
+       else
+               r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
+                               1);
 }
 
 struct flush_icache_range_args {
@@ -513,13 +556,6 @@ static inline void local_r4k_flush_icache_range(void *args)
                        R4600_HIT_CACHEOP_WAR_IMPL;
                        protected_blast_dcache_range(start, end);
                }
-
-               if (!cpu_icache_snoops_remote_store && scache_size) {
-                       if (end - start > scache_size)
-                               r4k_blast_scache();
-                       else
-                               protected_blast_scache_range(start, end);
-               }
        }
 
        if (end - start > icache_size)
@@ -535,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
        args.start = start;
        args.end = end;
 
-       r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1);
        instruction_hazard();
 }
 
@@ -559,7 +595,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
         * subset property so we have to flush the primary caches
         * explicitly
         */
-       if (size >= dcache_size) {
+       if (cpu_has_safe_index_cacheops && size >= dcache_size) {
                r4k_blast_dcache();
        } else {
                R4600_HIT_CACHEOP_WAR_IMPL;
@@ -578,15 +614,15 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
                if (size >= scache_size)
                        r4k_blast_scache();
                else
-                       blast_scache_range(addr, addr + size);
+                       blast_inv_scache_range(addr, addr + size);
                return;
        }
 
-       if (size >= dcache_size) {
+       if (cpu_has_safe_index_cacheops && size >= dcache_size) {
                r4k_blast_dcache();
        } else {
                R4600_HIT_CACHEOP_WAR_IMPL;
-               blast_dcache_range(addr, addr + size);
+               blast_inv_dcache_range(addr, addr + size);
        }
 
        bc_inv(addr, size);
@@ -636,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
 
 static void r4k_flush_cache_sigtramp(unsigned long addr)
 {
-       r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
 }
 
 static void r4k_flush_icache_all(void)
@@ -677,11 +713,11 @@ static inline void rm7k_erratum31(void)
        }
 }
 
-static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
+static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
        "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
 };
 
-static void __init probe_pcache(void)
+static void __cpuinit probe_pcache(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int config = read_c0_config();
@@ -944,6 +980,7 @@ static void __init probe_pcache(void)
        case CPU_24K:
        case CPU_34K:
        case CPU_74K:
+       case CPU_1004K:
                if ((read_c0_config7() & (1 << 16))) {
                        /* effectively physically indexed dcache,
                           thus no virtual aliases. */
@@ -969,6 +1006,8 @@ static void __init probe_pcache(void)
        case CPU_AU1100:
        case CPU_AU1550:
        case CPU_AU1200:
+       case CPU_AU1210:
+       case CPU_AU1250:
                c->icache.flags |= MIPS_CACHE_IC_F_DC;
                break;
        }
@@ -983,11 +1022,15 @@ static void __init probe_pcache(void)
 
        printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
               icache_size >> 10,
-              cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
+              cpu_has_vtag_icache ? "VIVT" : "VIPT",
               way_string[c->icache.ways], c->icache.linesz);
 
-       printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
-              dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
+       printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
+              dcache_size >> 10, way_string[c->dcache.ways],
+              (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
+              (c->dcache.flags & MIPS_CACHE_ALIASES) ?
+                       "cache aliases" : "no aliases",
+              c->dcache.linesz);
 }
 
 /*
@@ -996,7 +1039,7 @@ static void __init probe_pcache(void)
  * executes in KSEG1 space or else you will crash and burn badly.  You have
  * been warned.
  */
-static int __init probe_scache(void)
+static int __cpuinit probe_scache(void)
 {
        unsigned long flags, addr, begin, end, pow2;
        unsigned int config = read_c0_config();
@@ -1075,7 +1118,7 @@ extern int r5k_sc_init(void);
 extern int rm7k_sc_init(void);
 extern int mips_sc_init(void);
 
-static void __init setup_scache(void)
+static void __cpuinit setup_scache(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int config = read_c0_config();
@@ -1084,7 +1127,7 @@ static void __init setup_scache(void)
        /*
         * Do the probing thing on R4000SC and R4400SC processors.  Other
         * processors don't have a S-cache that would be relevant to the
-        * Linux memory managment.
+        * Linux memory management.
         */
        switch (c->cputype) {
        case CPU_R4000SC:
@@ -1186,9 +1229,47 @@ void au1x00_fixup_config_od(void)
        }
 }
 
-static void __init coherency_setup(void)
+/* CP0 hazard avoidance. */
+#define NXP_BARRIER()                                                  \
+        __asm__ __volatile__(                                          \
+       ".set noreorder\n\t"                                            \
+       "nop; nop; nop; nop; nop; nop;\n\t"                             \
+       ".set reorder\n\t")
+
+static void nxp_pr4450_fixup_config(void)
 {
-       change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
+       unsigned long config0;
+
+       config0 = read_c0_config();
+
+       /* clear all three cache coherency fields */
+       config0 &= ~(0x7 | (7 << 25) | (7 << 28));
+       config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
+                   ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
+                   ((_page_cachable_default >> _CACHE_SHIFT) << 28));
+       write_c0_config(config0);
+       NXP_BARRIER();
+}
+
+static int __cpuinitdata cca = -1;
+
+static int __init cca_setup(char *str)
+{
+       get_option(&str, &cca);
+
+       return 1;
+}
+
+__setup("cca=", cca_setup);
+
+static void __cpuinit coherency_setup(void)
+{
+       if (cca < 0 || cca > 7)
+               cca = read_c0_config() & CONF_CM_CMASK;
+       _page_cachable_default = cca << _CACHE_SHIFT;
+
+       pr_debug("Using cache attribute %d\n", cca);
+       change_c0_config(CONF_CM_CMASK, cca);
 
        /*
         * c0_status.cu=0 specifies that updates by the sc instruction use
@@ -1215,10 +1296,28 @@ static void __init coherency_setup(void)
        case CPU_AU1500: /* rev. AB */
                au1x00_fixup_config_od();
                break;
+
+       case PRID_IMP_PR4450:
+               nxp_pr4450_fixup_config();
+               break;
        }
 }
 
-void __init r4k_cache_init(void)
+#if defined(CONFIG_DMA_NONCOHERENT)
+
+static int __cpuinitdata coherentio;
+
+static int __init setcoherentio(char *str)
+{
+       coherentio = 1;
+
+       return 1;
+}
+
+__setup("coherentio", setcoherentio);
+#endif
+
+void __cpuinit r4k_cache_init(void)
 {
        extern void build_clear_page(void);
        extern void build_copy_page(void);
@@ -1261,6 +1360,10 @@ void __init r4k_cache_init(void)
                                        PAGE_SIZE - 1);
        else
                shm_align_mask = PAGE_SIZE-1;
+
+       __flush_cache_vmap      = r4k__flush_cache_vmap;
+       __flush_cache_vunmap    = r4k__flush_cache_vunmap;
+
        flush_cache_all         = cache_noop;
        __flush_cache_all       = r4k___flush_cache_all;
        flush_cache_mm          = r4k_flush_cache_mm;
@@ -1273,14 +1376,22 @@ void __init r4k_cache_init(void)
        flush_data_cache_page   = r4k_flush_data_cache_page;
        flush_icache_range      = r4k_flush_icache_range;
 
-#ifdef CONFIG_DMA_NONCOHERENT
-       _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
-       _dma_cache_wback        = r4k_dma_cache_wback_inv;
-       _dma_cache_inv          = r4k_dma_cache_inv;
+#if defined(CONFIG_DMA_NONCOHERENT)
+       if (coherentio) {
+               _dma_cache_wback_inv    = (void *)cache_noop;
+               _dma_cache_wback        = (void *)cache_noop;
+               _dma_cache_inv          = (void *)cache_noop;
+       } else {
+               _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
+               _dma_cache_wback        = r4k_dma_cache_wback_inv;
+               _dma_cache_inv          = r4k_dma_cache_inv;
+       }
 #endif
 
        build_clear_page();
        build_copy_page();
+#if !defined(CONFIG_MIPS_CMP)
        local_r4k___flush_cache_all(NULL);
+#endif
        coherency_setup();
 }