[PATCH] FRV: Use the correct preemption primitives in kmap_atomic() and co
authorDavid Howells <dhowells@redhat.com>
Mon, 16 Oct 2006 13:10:49 +0000 (14:10 +0100)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 16 Oct 2006 15:32:29 +0000 (08:32 -0700)
Use inc/dec_preempt_count() rather than preempt_enable/disable() and manually
add in the compiler barriers that were provided by the latter.  This makes FRV
consistent with other archs.

Furthermore, the compiler barrier effects are now there unconditionally - at
least as far as preemption is concerned - because we don't want the compiler
moving memory accesses out of the section of code in which the mapping is in
force - in effect the kmap_atomic() must imply a LOCK-class barrier and the
kunmap_atomic() must imply an UNLOCK-class barrier to the compiler.

Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/asm-frv/highmem.h

index e2247c2..0f390f4 100644 (file)
@@ -82,11 +82,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
        dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V;             \
                                                                                                \
        if (type != __KM_CACHE)                                                                 \
-               asm volatile("movgs %0,dampr"#ampr :: "r"(dampr));                              \
+               asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory");                   \
        else                                                                                    \
                asm volatile("movgs %0,iampr"#ampr"\n"                                          \
                             "movgs %0,dampr"#ampr"\n"                                          \
-                            :: "r"(dampr)                                                      \
+                            :: "r"(dampr) : "memory"                                           \
                             );                                                                 \
                                                                                                \
        asm("movsg damlr"#ampr",%0" : "=r"(damlr));                                             \
@@ -104,7 +104,7 @@ extern struct page *kmap_atomic_to_page(void *ptr);
        asm volatile("movgs %0,tplr \n"                                                           \
                     "movgs %1,tppr \n"                                                           \
                     "tlbpr %0,gr0,#2,#1"                                                         \
-                    : : "r"(damlr), "r"(dampr));                                                 \
+                    : : "r"(damlr), "r"(dampr) : "memory");                                      \
                                                                                                  \
        /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/                      \
                                                                                                  \
@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
 {
        unsigned long paddr;
 
-       preempt_disable();
+       inc_preempt_count();
        paddr = page_to_phys(page);
 
        switch (type) {
@@ -138,16 +138,16 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
        }
 }
 
-#define __kunmap_atomic_primary(type, ampr)                    \
-do {                                                           \
-       asm volatile("movgs gr0,dampr"#ampr"\n");               \
-       if (type == __KM_CACHE)                                 \
-               asm volatile("movgs gr0,iampr"#ampr"\n");       \
+#define __kunmap_atomic_primary(type, ampr)                            \
+do {                                                                   \
+       asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory");          \
+       if (type == __KM_CACHE)                                         \
+               asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory");  \
 } while(0)
 
-#define __kunmap_atomic_secondary(slot, vaddr)                 \
-do {                                                           \
-       asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr));      \
+#define __kunmap_atomic_secondary(slot, vaddr)                         \
+do {                                                                   \
+       asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");   \
 } while(0)
 
 static inline void kunmap_atomic(void *kvaddr, enum km_type type)
@@ -170,7 +170,8 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
        default:
                BUG();
        }
-       preempt_enable();
+       dec_preempt_count();
+       preempt_check_resched();
 }
 
 #endif /* !__ASSEMBLY__ */