include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / sh / mm / ioremap.c
index a130b22..0c99ec2 100644 (file)
@@ -14,6 +14,7 @@
  */
 #include <linux/vmalloc.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/pci.h>
 #include <linux/io.h>
  * caller shouldn't need to know that small detail.
  */
 void __iomem * __init_refok
-__ioremap_caller(unsigned long phys_addr, unsigned long size,
-                unsigned long flags, void *caller)
+__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
+                pgprot_t pgprot, void *caller)
 {
        struct vm_struct *area;
        unsigned long offset, last_addr, addr, orig_addr;
-       pgprot_t pgprot;
+       void __iomem *mapped;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
@@ -47,16 +48,18 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
                return NULL;
 
        /*
-        * If we're in the fixed PCI memory range, mapping through page
-        * tables is not only pointless, but also fundamentally broken.
-        * Just return the physical address instead.
-        *
-        * For boards that map a small PCI memory aperture somewhere in
-        * P1/P2 space, ioremap() will already do the right thing,
-        * and we'll never get this far.
+        * If we can't yet use the regular approach, go the fixmap route.
+        */
+       if (!mem_init_done)
+               return ioremap_fixed(phys_addr, size, pgprot);
+
+       /*
+        * First try to remap through the PMB.
+        * PMB entries are all pre-faulted.
         */
-       if (is_pci_memory_fixed_range(phys_addr, size))
-               return (void __iomem *)phys_addr;
+       mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
+       if (mapped && !IS_ERR(mapped))
+               return mapped;
 
        /*
         * Mappings have to be page-aligned
@@ -66,12 +69,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 
        /*
-        * If we can't yet use the regular approach, go the fixmap route.
-        */
-       if (!mem_init_done)
-               return ioremap_fixed(phys_addr, size, __pgprot(flags));
-
-       /*
         * Ok, go for it..
         */
        area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -80,33 +77,10 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
        area->phys_addr = phys_addr;
        orig_addr = addr = (unsigned long)area->addr;
 
-#ifdef CONFIG_PMB
-       /*
-        * First try to remap through the PMB once a valid VMA has been
-        * established. Smaller allocations (or the rest of the size
-        * remaining after a PMB mapping due to the size not being
-        * perfectly aligned on a PMB size boundary) are then mapped
-        * through the UTLB using conventional page tables.
-        *
-        * PMB entries are all pre-faulted.
-        */
-       if (unlikely(phys_addr >= P1SEG)) {
-               unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
-
-               if (likely(mapped)) {
-                       addr            += mapped;
-                       phys_addr       += mapped;
-                       size            -= mapped;
-               }
+       if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
+               vunmap((void *)orig_addr);
+               return NULL;
        }
-#endif
-
-       pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
-       if (likely(size))
-               if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
-                       vunmap((void *)orig_addr);
-                       return NULL;
-               }
 
        return (void __iomem *)(offset + (char *)orig_addr);
 }
@@ -126,9 +100,6 @@ static inline int iomapping_nontranslatable(unsigned long offset)
                return 1;
 #endif
 
-       if (is_pci_memory_fixed_range(offset, 0))
-               return 1;
-
        return 0;
 }
 
@@ -149,23 +120,11 @@ void __iounmap(void __iomem *addr)
        if (iounmap_fixed(addr) == 0)
                return;
 
-#ifdef CONFIG_PMB
        /*
-        * Purge any PMB entries that may have been established for this
-        * mapping, then proceed with conventional VMA teardown.
-        *
-        * XXX: Note that due to the way that remove_vm_area() does
-        * matching of the resultant VMA, we aren't able to fast-forward
-        * the address past the PMB space until the end of the VMA where
-        * the page tables reside. As such, unmap_vm_area() will be
-        * forced to linearly scan over the area until it finds the page
-        * tables where PTEs that need to be unmapped actually reside,
-        * which is far from optimal. Perhaps we need to use a separate
-        * VMA for the PMB mappings?
-        *                                      -- PFM.
+        * If the PMB handled it, there's nothing else to do.
         */
-       pmb_unmap(vaddr);
-#endif
+       if (pmb_unmap(addr) == 0)
+               return;
 
        p = remove_vm_area((void *)(vaddr & PAGE_MASK));
        if (!p) {