MMC: Fix handling of low-voltage cards
[safe/jmp/linux-2.6] / include / asm-mips / r4kcache.h
index cc53196..3c8e3c8 100644 (file)
@@ -14,6 +14,8 @@
 
 #include <asm/asm.h>
 #include <asm/cacheops.h>
+#include <asm/cpu-features.h>
+#include <asm/mipsmtregs.h>
 
 /*
  * This macro return a properly sign-extended address suitable as base address
        "       cache   %0, %1                                  \n"     \
        "       .set    pop                                     \n"     \
        :                                                               \
-       : "i" (op), "m" (*(unsigned char *)(addr)))
+       : "i" (op), "R" (*(unsigned char *)(addr)))
+
+#ifdef CONFIG_MIPS_MT
+/*
+ * Temporary hacks for SMTC debug. Optionally force single-threaded
+ * execution during I-cache flushes.
+ */
+
+#define PROTECT_CACHE_FLUSHES 1
+
+#ifdef PROTECT_CACHE_FLUSHES
+
+extern int mt_protiflush;
+extern int mt_protdflush;
+extern void mt_cflush_lockdown(void);
+extern void mt_cflush_release(void);
+
+#define BEGIN_MT_IPROT \
+       unsigned long flags = 0;                        \
+       unsigned long mtflags = 0;                      \
+       if(mt_protiflush) {                             \
+               local_irq_save(flags);                  \
+               ehb();                                  \
+               mtflags = dvpe();                       \
+               mt_cflush_lockdown();                   \
+       }
+
+#define END_MT_IPROT \
+       if(mt_protiflush) {                             \
+               mt_cflush_release();                    \
+               evpe(mtflags);                          \
+               local_irq_restore(flags);               \
+       }
+
+#define BEGIN_MT_DPROT \
+       unsigned long flags = 0;                        \
+       unsigned long mtflags = 0;                      \
+       if(mt_protdflush) {                             \
+               local_irq_save(flags);                  \
+               ehb();                                  \
+               mtflags = dvpe();                       \
+               mt_cflush_lockdown();                   \
+       }
+
+#define END_MT_DPROT \
+       if(mt_protdflush) {                             \
+               mt_cflush_release();                    \
+               evpe(mtflags);                          \
+               local_irq_restore(flags);               \
+       }
+
+#else
+
+#define BEGIN_MT_IPROT
+#define BEGIN_MT_DPROT
+#define END_MT_IPROT
+#define END_MT_DPROT
+
+#endif /* PROTECT_CACHE_FLUSHES */
+
+#define __iflush_prologue                                              \
+       unsigned long redundance;                                       \
+       extern int mt_n_iflushes;                                       \
+       BEGIN_MT_IPROT                                                  \
+       for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
+
+#define __iflush_epilogue                                              \
+       END_MT_IPROT                                                    \
+       }
+
+#define __dflush_prologue                                              \
+       unsigned long redundance;                                       \
+       extern int mt_n_dflushes;                                       \
+       BEGIN_MT_DPROT                                                  \
+       for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
+
+#define __dflush_epilogue \
+       END_MT_DPROT     \
+       }
+
+#define __inv_dflush_prologue __dflush_prologue
+#define __inv_dflush_epilogue __dflush_epilogue
+#define __sflush_prologue {
+#define __sflush_epilogue }
+#define __inv_sflush_prologue __sflush_prologue
+#define __inv_sflush_epilogue __sflush_epilogue
+
+#else /* CONFIG_MIPS_MT */
+
+#define __iflush_prologue {
+#define __iflush_epilogue }
+#define __dflush_prologue {
+#define __dflush_epilogue }
+#define __inv_dflush_prologue {
+#define __inv_dflush_epilogue }
+#define __sflush_prologue {
+#define __sflush_epilogue }
+#define __inv_sflush_prologue {
+#define __inv_sflush_epilogue }
+
+#endif /* CONFIG_MIPS_MT */
 
 static inline void flush_icache_line_indexed(unsigned long addr)
 {
+       __iflush_prologue
        cache_op(Index_Invalidate_I, addr);
+       __iflush_epilogue
 }
 
 static inline void flush_dcache_line_indexed(unsigned long addr)
 {
+       __dflush_prologue
        cache_op(Index_Writeback_Inv_D, addr);
+       __dflush_epilogue
 }
 
 static inline void flush_scache_line_indexed(unsigned long addr)
@@ -55,17 +161,23 @@ static inline void flush_scache_line_indexed(unsigned long addr)
 
 static inline void flush_icache_line(unsigned long addr)
 {
+       __iflush_prologue
        cache_op(Hit_Invalidate_I, addr);
+       __iflush_epilogue
 }
 
 static inline void flush_dcache_line(unsigned long addr)
 {
+       __dflush_prologue
        cache_op(Hit_Writeback_Inv_D, addr);
+       __dflush_epilogue
 }
 
 static inline void invalidate_dcache_line(unsigned long addr)
 {
+       __dflush_prologue
        cache_op(Hit_Invalidate_D, addr);
+       __dflush_epilogue
 }
 
 static inline void invalidate_scache_line(unsigned long addr)
@@ -78,22 +190,25 @@ static inline void flush_scache_line(unsigned long addr)
        cache_op(Hit_Writeback_Inv_SD, addr);
 }
 
+#define protected_cache_op(op,addr)                            \
+       __asm__ __volatile__(                                   \
+       "       .set    push                    \n"             \
+       "       .set    noreorder               \n"             \
+       "       .set    mips3                   \n"             \
+       "1:     cache   %0, (%1)                \n"             \
+       "2:     .set    pop                     \n"             \
+       "       .section __ex_table,\"a\"       \n"             \
+       "       "STR(PTR)" 1b, 2b               \n"             \
+       "       .previous"                                      \
+       :                                                       \
+       : "i" (op), "r" (addr))
+
 /*
  * The next two are for badland addresses like signal trampolines.
  */
 static inline void protected_flush_icache_line(unsigned long addr)
 {
-       __asm__ __volatile__(
-               "       .set    push                    \n"
-               "       .set    noreorder               \n"
-               "       .set    mips3                   \n"
-               "1:     cache   %0, (%1)                \n"
-               "2:     .set    pop                     \n"
-               "       .section __ex_table,\"a\"       \n"
-               "       "STR(PTR)" 1b, 2b               \n"
-               "       .previous"
-               :
-               : "i" (Hit_Invalidate_I), "r" (addr));
+       protected_cache_op(Hit_Invalidate_I, addr);
 }
 
 /*
@@ -104,32 +219,12 @@ static inline void protected_flush_icache_line(unsigned long addr)
  */
 static inline void protected_writeback_dcache_line(unsigned long addr)
 {
-       __asm__ __volatile__(
-               "       .set    push                    \n"
-               "       .set    noreorder               \n"
-               "       .set    mips3                   \n"
-               "1:     cache   %0, (%1)                \n"
-               "2:     .set    pop                     \n"
-               "       .section __ex_table,\"a\"       \n"
-               "       "STR(PTR)" 1b, 2b               \n"
-               "       .previous"
-               :
-               : "i" (Hit_Writeback_Inv_D), "r" (addr));
+       protected_cache_op(Hit_Writeback_Inv_D, addr);
 }
 
 static inline void protected_writeback_scache_line(unsigned long addr)
 {
-       __asm__ __volatile__(
-               "       .set    push                    \n"
-               "       .set    noreorder               \n"
-               "       .set    mips3                   \n"
-               "1:     cache   %0, (%1)                \n"
-               "2:     .set    pop                     \n"
-               "       .section __ex_table,\"a\"       \n"
-               "       "STR(PTR)" 1b, 2b               \n"
-               "       .previous"
-               :
-               : "i" (Hit_Writeback_Inv_SD), "r" (addr));
+       protected_cache_op(Hit_Writeback_Inv_SD, addr);
 }
 
 /*
@@ -255,9 +350,13 @@ static inline void blast_##pfx##cache##lsize(void)                 \
                               current_cpu_data.desc.waybit;            \
        unsigned long ws, addr;                                         \
                                                                        \
+       __##pfx##flush_prologue                                         \
+                                                                       \
        for (ws = 0; ws < ws_end; ws += ws_inc)                         \
                for (addr = start; addr < end; addr += lsize * 32)      \
                        cache##lsize##_unroll32(addr|ws,indexop);       \
+                                                                       \
+       __##pfx##flush_epilogue                                         \
 }                                                                      \
                                                                        \
 static inline void blast_##pfx##cache##lsize##_page(unsigned long page)        \
@@ -265,24 +364,33 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page)   \
        unsigned long start = page;                                     \
        unsigned long end = page + PAGE_SIZE;                           \
                                                                        \
+       __##pfx##flush_prologue                                         \
+                                                                       \
        do {                                                            \
                cache##lsize##_unroll32(start,hitop);                   \
                start += lsize * 32;                                    \
        } while (start < end);                                          \
+                                                                       \
+       __##pfx##flush_epilogue                                         \
 }                                                                      \
                                                                        \
 static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
 {                                                                      \
-       unsigned long start = page;                                     \
+       unsigned long indexmask = current_cpu_data.desc.waysize - 1;    \
+       unsigned long start = INDEX_BASE + (page & indexmask);          \
        unsigned long end = start + PAGE_SIZE;                          \
        unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
        unsigned long ws_end = current_cpu_data.desc.ways <<            \
                               current_cpu_data.desc.waybit;            \
        unsigned long ws, addr;                                         \
                                                                        \
+       __##pfx##flush_prologue                                         \
+                                                                       \
        for (ws = 0; ws < ws_end; ws += ws_inc)                         \
                for (addr = start; addr < end; addr += lsize * 32)      \
                        cache##lsize##_unroll32(addr|ws,indexop);       \
+                                                                       \
+       __##pfx##flush_epilogue                                         \
 }
 
 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
@@ -295,4 +403,34 @@ __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
 
+/* build blast_xxx_range, protected_blast_xxx_range */
+#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
+static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
+                                                   unsigned long end)  \
+{                                                                      \
+       unsigned long lsize = cpu_##desc##_line_size();                 \
+       unsigned long addr = start & ~(lsize - 1);                      \
+       unsigned long aend = (end - 1) & ~(lsize - 1);                  \
+                                                                       \
+       __##pfx##flush_prologue                                         \
+                                                                       \
+       while (1) {                                                     \
+               prot##cache_op(hitop, addr);                            \
+               if (addr == aend)                                       \
+                       break;                                          \
+               addr += lsize;                                          \
+       }                                                               \
+                                                                       \
+       __##pfx##flush_epilogue                                         \
+}
+
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
+/* blast_inv_dcache_range */
+__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
+__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
+
 #endif /* _ASM_R4KCACHE_H */