Blackfin arch: Fix bug - make ksz8893m driver available when bfin_mac is enabled
[safe/jmp/linux-2.6] / arch / mips / mm / c-r4k.c
index 3d3e536..c43f4b2 100644 (file)
  *    primary cache.
  */
 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
-                                   int retry, int wait)
+                                   int wait)
 {
        preempt_disable();
 
 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
-       smp_call_function(func, info, retry, wait);
+       smp_call_function(func, info, wait);
 #endif
        func(info);
        preempt_enable();
 }
 
+#if defined(CONFIG_MIPS_CMP)
+#define cpu_has_safe_index_cacheops 0
+#else
+#define cpu_has_safe_index_cacheops 1
+#endif
+
 /*
  * Must die.
  */
@@ -344,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args)
 
 static void r4k___flush_cache_all(void)
 {
-       r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
+       r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
 }
 
 static inline int has_valid_asid(const struct mm_struct *mm)
@@ -391,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
        int exec = vma->vm_flags & VM_EXEC;
 
        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
-               r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
+               r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
 }
 
 static inline void local_r4k_flush_cache_mm(void * args)
@@ -423,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
        if (!cpu_has_dc_aliases)
                return;
 
-       r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
 }
 
 struct flush_cache_page_args {
@@ -440,6 +446,7 @@ static inline void local_r4k_flush_cache_page(void *args)
        struct page *page = pfn_to_page(fcp_args->pfn);
        int exec = vma->vm_flags & VM_EXEC;
        struct mm_struct *mm = vma->vm_mm;
+       int map_coherent = 0;
        pgd_t *pgdp;
        pud_t *pudp;
        pmd_t *pmdp;
@@ -473,7 +480,9 @@ static inline void local_r4k_flush_cache_page(void *args)
                 * Use kmap_coherent or kmap_atomic to do flushes for
                 * another ASID than the current one.
                 */
-               if (cpu_has_dc_aliases)
+               map_coherent = (cpu_has_dc_aliases &&
+                               page_mapped(page) && !Page_dcache_dirty(page));
+               if (map_coherent)
                        vaddr = kmap_coherent(page, addr);
                else
                        vaddr = kmap_atomic(page, KM_USER0);
@@ -482,6 +491,8 @@ static inline void local_r4k_flush_cache_page(void *args)
 
        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
                r4k_blast_dcache_page(addr);
+               if (exec && !cpu_icache_snoops_remote_store)
+                       r4k_blast_scache_page(addr);
        }
        if (exec) {
                if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
@@ -494,7 +505,7 @@ static inline void local_r4k_flush_cache_page(void *args)
        }
 
        if (vaddr) {
-               if (cpu_has_dc_aliases)
+               if (map_coherent)
                        kunmap_coherent();
                else
                        kunmap_atomic(vaddr, KM_USER0);
@@ -510,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
        args.addr = addr;
        args.pfn = pfn;
 
-       r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
 }
 
 static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -524,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
                local_r4k_flush_data_cache_page((void *)addr);
        else
                r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
-                               1, 1);
+                               1);
 }
 
 struct flush_icache_range_args {
@@ -532,12 +543,8 @@ struct flush_icache_range_args {
        unsigned long end;
 };
 
-static inline void local_r4k_flush_icache_range(void *args)
+static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
 {
-       struct flush_icache_range_args *fir_args = args;
-       unsigned long start = fir_args->start;
-       unsigned long end = fir_args->end;
-
        if (!cpu_has_ic_fills_f_dc) {
                if (end - start >= dcache_size) {
                        r4k_blast_dcache();
@@ -553,6 +560,15 @@ static inline void local_r4k_flush_icache_range(void *args)
                protected_blast_icache_range(start, end);
 }
 
+static inline void local_r4k_flush_icache_range_ipi(void *args)
+{
+       struct flush_icache_range_args *fir_args = args;
+       unsigned long start = fir_args->start;
+       unsigned long end = fir_args->end;
+
+       local_r4k_flush_icache_range(start, end);
+}
+
 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
 {
        struct flush_icache_range_args args;
@@ -560,7 +576,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
        args.start = start;
        args.end = end;
 
-       r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
        instruction_hazard();
 }
 
@@ -584,7 +600,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
         * subset property so we have to flush the primary caches
         * explicitly
         */
-       if (size >= dcache_size) {
+       if (cpu_has_safe_index_cacheops && size >= dcache_size) {
                r4k_blast_dcache();
        } else {
                R4600_HIT_CACHEOP_WAR_IMPL;
@@ -602,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
        if (cpu_has_inclusive_pcaches) {
                if (size >= scache_size)
                        r4k_blast_scache();
-               else
+               else {
+                       unsigned long lsize = cpu_scache_line_size();
+                       unsigned long almask = ~(lsize - 1);
+
+                       /*
+                        * There is no clearly documented alignment requirement
+                        * for the cache instruction on MIPS processors and
+                        * some processors, among them the RM5200 and RM7000
+                        * QED processors will throw an address error for cache
+                        * hit ops with insufficient alignment.  Solved by
+                        * aligning the address to cache line size.
+                        */
+                       cache_op(Hit_Writeback_Inv_SD, addr & almask);
+                       cache_op(Hit_Writeback_Inv_SD,
+                                (addr + size - 1) & almask);
                        blast_inv_scache_range(addr, addr + size);
+               }
                return;
        }
 
-       if (size >= dcache_size) {
+       if (cpu_has_safe_index_cacheops && size >= dcache_size) {
                r4k_blast_dcache();
        } else {
+               unsigned long lsize = cpu_dcache_line_size();
+               unsigned long almask = ~(lsize - 1);
+
                R4600_HIT_CACHEOP_WAR_IMPL;
+               cache_op(Hit_Writeback_Inv_D, addr & almask);
+               cache_op(Hit_Writeback_Inv_D, (addr + size - 1)  & almask);
                blast_inv_dcache_range(addr, addr + size);
        }
 
@@ -661,7 +697,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
 
 static void r4k_flush_cache_sigtramp(unsigned long addr)
 {
-       r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
+       r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
 }
 
 static void r4k_flush_icache_all(void)
@@ -969,6 +1005,7 @@ static void __cpuinit probe_pcache(void)
        case CPU_24K:
        case CPU_34K:
        case CPU_74K:
+       case CPU_1004K:
                if ((read_c0_config7() & (1 << 16))) {
                        /* effectively physically indexed dcache,
                           thus no virtual aliases. */
@@ -1217,6 +1254,28 @@ void au1x00_fixup_config_od(void)
        }
 }
 
+/* CP0 hazard avoidance. */
+#define NXP_BARRIER()                                                  \
+        __asm__ __volatile__(                                          \
+       ".set noreorder\n\t"                                            \
+       "nop; nop; nop; nop; nop; nop;\n\t"                             \
+       ".set reorder\n\t")
+
+static void nxp_pr4450_fixup_config(void)
+{
+       unsigned long config0;
+
+       config0 = read_c0_config();
+
+       /* clear all three cache coherency fields */
+       config0 &= ~(0x7 | (7 << 25) | (7 << 28));
+       config0 |= (((_page_cachable_default >> _CACHE_SHIFT) <<  0) |
+                   ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
+                   ((_page_cachable_default >> _CACHE_SHIFT) << 28));
+       write_c0_config(config0);
+       NXP_BARRIER();
+}
+
 static int __cpuinitdata cca = -1;
 
 static int __init cca_setup(char *str)
@@ -1262,9 +1321,27 @@ static void __cpuinit coherency_setup(void)
        case CPU_AU1500: /* rev. AB */
                au1x00_fixup_config_od();
                break;
+
+       case PRID_IMP_PR4450:
+               nxp_pr4450_fixup_config();
+               break;
        }
 }
 
+#if defined(CONFIG_DMA_NONCOHERENT)
+
+static int __cpuinitdata coherentio;
+
+static int __init setcoherentio(char *str)
+{
+       coherentio = 1;
+
+       return 1;
+}
+
+__setup("coherentio", setcoherentio);
+#endif
+
 void __cpuinit r4k_cache_init(void)
 {
        extern void build_clear_page(void);
@@ -1323,15 +1400,24 @@ void __cpuinit r4k_cache_init(void)
        local_flush_data_cache_page     = local_r4k_flush_data_cache_page;
        flush_data_cache_page   = r4k_flush_data_cache_page;
        flush_icache_range      = r4k_flush_icache_range;
+       local_flush_icache_range        = local_r4k_flush_icache_range;
 
-#ifdef CONFIG_DMA_NONCOHERENT
-       _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
-       _dma_cache_wback        = r4k_dma_cache_wback_inv;
-       _dma_cache_inv          = r4k_dma_cache_inv;
+#if defined(CONFIG_DMA_NONCOHERENT)
+       if (coherentio) {
+               _dma_cache_wback_inv    = (void *)cache_noop;
+               _dma_cache_wback        = (void *)cache_noop;
+               _dma_cache_inv          = (void *)cache_noop;
+       } else {
+               _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
+               _dma_cache_wback        = r4k_dma_cache_wback_inv;
+               _dma_cache_inv          = r4k_dma_cache_inv;
+       }
 #endif
 
        build_clear_page();
        build_copy_page();
+#if !defined(CONFIG_MIPS_CMP)
        local_r4k___flush_cache_all(NULL);
+#endif
        coherency_setup();
 }