Merge branch 'topic/core-cleanup' into for-linus
[safe/jmp/linux-2.6] / arch / x86 / mm / ioremap.c
index e92aa46..12e4d2d 100644 (file)
 #include <asm/pgalloc.h>
 #include <asm/pat.h>
 
-#ifdef CONFIG_X86_64
-
-unsigned long __phys_addr(unsigned long x)
-{
-       if (x >= __START_KERNEL_map)
-               return x - __START_KERNEL_map + phys_base;
-       return x - PAGE_OFFSET;
-}
-EXPORT_SYMBOL(__phys_addr);
-
-static inline int phys_addr_valid(unsigned long addr)
-{
-       return addr < (1UL << boot_cpu_data.x86_phys_bits);
-}
-
-#else
-
-static inline int phys_addr_valid(unsigned long addr)
-{
-       return 1;
-}
-
-#endif
-
-int page_is_ram(unsigned long pagenr)
-{
-       resource_size_t addr, end;
-       int i;
-
-       /*
-        * A special case is the first 4Kb of memory;
-        * This is a BIOS owned area, not kernel ram, but generally
-        * not listed as such in the E820 table.
-        */
-       if (pagenr == 0)
-               return 0;
-
-       /*
-        * Second special case: Some BIOSen report the PC BIOS
-        * area (640->1Mb) as ram even though it is not.
-        */
-       if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
-                   pagenr < (BIOS_END >> PAGE_SHIFT))
-               return 0;
-
-       for (i = 0; i < e820.nr_map; i++) {
-               /*
-                * Not usable memory:
-                */
-               if (e820.map[i].type != E820_RAM)
-                       continue;
-               addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
-               end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
-
-
-               if ((pagenr >= addr) && (pagenr < end))
-                       return 1;
-       }
-       return 0;
-}
+#include "physaddr.h"
 
 /*
  * Fix up the linear direct mapping of the kernel to avoid cache attribute
@@ -146,10 +87,17 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        /*
         * Don't remap the low PCI/ISA area, it's always mapped..
         */
-       if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
+       if (is_ISA_range(phys_addr, last_addr))
                return (__force void __iomem *)phys_to_virt(phys_addr);
 
        /*
+        * Check if the request spans more than any BAR in the iomem resource
+        * tree.
+        */
+       WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
+                 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
+
+       /*
         * Don't allow anybody to remap normal RAM that we're using..
         */
        for (pfn = phys_addr >> PAGE_SHIFT;
@@ -170,33 +118,22 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        phys_addr &= PAGE_MASK;
        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 
-       retval = reserve_memtype(phys_addr, phys_addr + size,
+       retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
                                                prot_val, &new_prot_val);
        if (retval) {
-               pr_debug("Warning: reserve_memtype returned %d\n", retval);
+               printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
                return NULL;
        }
 
        if (prot_val != new_prot_val) {
-               /*
-                * Do not fallback to certain memory types with certain
-                * requested type:
-                * - request is uc-, return cannot be write-back
-                * - request is uc-, return cannot be write-combine
-                * - request is write-combine, return cannot be write-back
-                */
-               if ((prot_val == _PAGE_CACHE_UC_MINUS &&
-                    (new_prot_val == _PAGE_CACHE_WB ||
-                     new_prot_val == _PAGE_CACHE_WC)) ||
-                   (prot_val == _PAGE_CACHE_WC &&
-                    new_prot_val == _PAGE_CACHE_WB)) {
-                       pr_debug(
+               if (!is_new_memtype_allowed(phys_addr, size,
+                                           prot_val, new_prot_val)) {
+                       printk(KERN_ERR
                "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
                                (unsigned long long)phys_addr,
                                (unsigned long long)(phys_addr + size),
                                prot_val, new_prot_val);
-                       free_memtype(phys_addr, phys_addr + size);
-                       return NULL;
+                       goto err_free_memtype;
                }
                prot_val = new_prot_val;
        }
@@ -204,16 +141,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        switch (prot_val) {
        case _PAGE_CACHE_UC:
        default:
-               prot = PAGE_KERNEL_NOCACHE;
+               prot = PAGE_KERNEL_IO_NOCACHE;
                break;
        case _PAGE_CACHE_UC_MINUS:
-               prot = PAGE_KERNEL_UC_MINUS;
+               prot = PAGE_KERNEL_IO_UC_MINUS;
                break;
        case _PAGE_CACHE_WC:
-               prot = PAGE_KERNEL_WC;
+               prot = PAGE_KERNEL_IO_WC;
                break;
        case _PAGE_CACHE_WB:
-               prot = PAGE_KERNEL;
+               prot = PAGE_KERNEL_IO;
                break;
        }
 
@@ -222,25 +159,25 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
         */
        area = get_vm_area_caller(size, VM_IOREMAP, caller);
        if (!area)
-               return NULL;
+               goto err_free_memtype;
        area->phys_addr = phys_addr;
        vaddr = (unsigned long) area->addr;
-       if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
-               free_memtype(phys_addr, phys_addr + size);
-               free_vm_area(area);
-               return NULL;
-       }
 
-       if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
-               free_memtype(phys_addr, phys_addr + size);
-               vunmap(area->addr);
-               return NULL;
-       }
+       if (kernel_map_sync_memtype(phys_addr, size, prot_val))
+               goto err_free_area;
+
+       if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
+               goto err_free_area;
 
        ret_addr = (void __iomem *) (vaddr + offset);
        mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
 
        return ret_addr;
+err_free_area:
+       free_vm_area(area);
+err_free_memtype:
+       free_memtype(phys_addr, phys_addr + size);
+       return NULL;
 }
 
 /**
@@ -268,7 +205,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
        /*
         * Ideally, this should be:
-        *      pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
+        *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
         *
         * Till we fix all X drivers to use ioremap_wc(), we will use
         * UC MINUS.
@@ -290,9 +227,9 @@ EXPORT_SYMBOL(ioremap_nocache);
  *
  * Must be freed with iounmap.
  */
-void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 {
-       if (pat_wc_enabled)
+       if (pat_enabled)
                return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
                                        __builtin_return_address(0));
        else
@@ -307,6 +244,14 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 }
 EXPORT_SYMBOL(ioremap_cache);
 
+void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
+                               unsigned long prot_val)
+{
+       return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
+                               __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_prot);
+
 /**
  * iounmap - Free a IO remapping
  * @addr: virtual address from ioremap_*
@@ -325,8 +270,8 @@ void iounmap(volatile void __iomem *addr)
         * vm_area and by simply returning an address into the kernel mapping
         * of ISA space.   So handle that here.
         */
-       if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
-           addr < phys_to_virt(ISA_END_ADDRESS))
+       if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
+           (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
                return;
 
        addr = (volatile void __iomem *)
@@ -341,7 +286,7 @@ void iounmap(volatile void __iomem *addr)
           cpa takes care of the direct mappings. */
        read_lock(&vmlist_lock);
        for (p = vmlist; p; p = p->next) {
-               if (p->addr == addr)
+               if (p->addr == (void __force *)addr)
                        break;
        }
        read_unlock(&vmlist_lock);
@@ -355,7 +300,7 @@ void iounmap(volatile void __iomem *addr)
        free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
 
        /* Finally remove it */
-       o = remove_vm_area((void *)addr);
+       o = remove_vm_area((void __force *)addr);
        BUG_ON(p != o || o == NULL);
        kfree(p);
 }
@@ -374,7 +319,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
        if (page_is_ram(start >> PAGE_SHIFT))
                return __va(phys);
 
-       addr = (void *)ioremap(start, PAGE_SIZE);
+       addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
        if (addr)
                addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
 
@@ -390,9 +335,7 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
        return;
 }
 
-#ifdef CONFIG_X86_32
-
-int __initdata early_ioremap_debug;
+static int __initdata early_ioremap_debug;
 
 static int __init early_ioremap_debug_setup(char *str)
 {
@@ -403,8 +346,7 @@ static int __init early_ioremap_debug_setup(char *str)
 early_param("early_ioremap_debug", early_ioremap_debug_setup);
 
 static __initdata int after_paging_init;
-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
-               __section(.bss.page_aligned);
+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
 
 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 {
@@ -422,13 +364,19 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr)
        return &bm_pte[pte_index(addr)];
 }
 
+static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
+
 void __init early_ioremap_init(void)
 {
        pmd_t *pmd;
+       int i;
 
        if (early_ioremap_debug)
                printk(KERN_INFO "early_ioremap_init()\n");
 
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+               slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+
        pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
        memset(bm_pte, 0, sizeof(bm_pte));
        pmd_populate_kernel(&init_mm, pmd, bm_pte);
@@ -437,6 +385,10 @@ void __init early_ioremap_init(void)
         * The boot-ioremap range spans multiple pmds, for which
         * we are not prepared:
         */
+#define __FIXADDR_TOP (-PAGE_SIZE)
+       BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+                    != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+#undef __FIXADDR_TOP
        if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
                WARN_ON(1);
                printk(KERN_WARNING "pmd %p != %p\n",
@@ -452,38 +404,13 @@ void __init early_ioremap_init(void)
        }
 }
 
-void __init early_ioremap_clear(void)
-{
-       pmd_t *pmd;
-
-       if (early_ioremap_debug)
-               printk(KERN_INFO "early_ioremap_clear()\n");
-
-       pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
-       pmd_clear(pmd);
-       paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
-       __flush_tlb_all();
-}
-
 void __init early_ioremap_reset(void)
 {
-       enum fixed_addresses idx;
-       unsigned long addr, phys;
-       pte_t *pte;
-
        after_paging_init = 1;
-       for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
-               addr = fix_to_virt(idx);
-               pte = early_ioremap_pte(addr);
-               if (pte_present(*pte)) {
-                       phys = pte_val(*pte) & PAGE_MASK;
-                       set_fixmap(idx, phys);
-               }
-       }
 }
 
 static void __init __early_set_fixmap(enum fixed_addresses idx,
-                                  unsigned long phys, pgprot_t flags)
+                                     phys_addr_t phys, pgprot_t flags)
 {
        unsigned long addr = __fix_to_virt(idx);
        pte_t *pte;
@@ -493,20 +420,21 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
                return;
        }
        pte = early_ioremap_pte(addr);
+
        if (pgprot_val(flags))
                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
        else
-               pte_clear(NULL, addr, pte);
+               pte_clear(&init_mm, addr, pte);
        __flush_tlb_one(addr);
 }
 
 static inline void __init early_set_fixmap(enum fixed_addresses idx,
-                                       unsigned long phys)
+                                          phys_addr_t phys, pgprot_t prot)
 {
        if (after_paging_init)
-               set_fixmap(idx, phys);
+               __set_fixmap(idx, phys, prot);
        else
-               __early_set_fixmap(idx, phys, PAGE_KERNEL);
+               __early_set_fixmap(idx, phys, prot);
 }
 
 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
@@ -517,37 +445,73 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
                __early_set_fixmap(idx, 0, __pgprot(0));
 }
 
+static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
+static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
 
-int __initdata early_ioremap_nested;
+void __init fixup_early_ioremap(void)
+{
+       int i;
+
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+               if (prev_map[i]) {
+                       WARN_ON(1);
+                       break;
+               }
+       }
+
+       early_ioremap_init();
+}
 
 static int __init check_early_ioremap_leak(void)
 {
-       if (!early_ioremap_nested)
-               return 0;
+       int count = 0;
+       int i;
 
-       printk(KERN_WARNING
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+               if (prev_map[i])
+                       count++;
+
+       if (!count)
+               return 0;
+       WARN(1, KERN_WARNING
               "Debug warning: early ioremap leak of %d areas detected.\n",
-              early_ioremap_nested);
+               count);
        printk(KERN_WARNING
-              "please boot with early_ioremap_debug and report the dmesg.\n");
-       WARN_ON(1);
+               "please boot with early_ioremap_debug and report the dmesg.\n");
 
        return 1;
 }
 late_initcall(check_early_ioremap_leak);
 
-void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
+static void __init __iomem *
+__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
 {
-       unsigned long offset, last_addr;
-       unsigned int nrpages, nesting;
+       unsigned long offset;
+       resource_size_t last_addr;
+       unsigned int nrpages;
        enum fixed_addresses idx0, idx;
+       int i, slot;
 
        WARN_ON(system_state != SYSTEM_BOOTING);
 
-       nesting = early_ioremap_nested;
+       slot = -1;
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+               if (!prev_map[i]) {
+                       slot = i;
+                       break;
+               }
+       }
+
+       if (slot < 0) {
+               printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
+                        (u64)phys_addr, size);
+               WARN_ON(1);
+               return NULL;
+       }
+
        if (early_ioremap_debug) {
-               printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
-                      phys_addr, size, nesting);
+               printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
+                      (u64)phys_addr, size, slot);
                dump_stack();
        }
 
@@ -558,17 +522,13 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
                return NULL;
        }
 
-       if (nesting >= FIX_BTMAPS_NESTING) {
-               WARN_ON(1);
-               return NULL;
-       }
-       early_ioremap_nested++;
+       prev_size[slot] = size;
        /*
         * Mappings have to be page-aligned
         */
        offset = phys_addr & ~PAGE_MASK;
        phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(last_addr) - phys_addr;
+       size = PAGE_ALIGN(last_addr + 1) - phys_addr;
 
        /*
         * Mappings have to fit in the FIX_BTMAP area.
@@ -582,35 +542,68 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
        /*
         * Ok, go for it..
         */
-       idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
+       idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
        idx = idx0;
        while (nrpages > 0) {
-               early_set_fixmap(idx, phys_addr);
+               early_set_fixmap(idx, phys_addr, prot);
                phys_addr += PAGE_SIZE;
                --idx;
                --nrpages;
        }
        if (early_ioremap_debug)
-               printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
+               printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
 
-       return (void *) (offset + fix_to_virt(idx0));
+       prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
+       return prev_map[slot];
 }
 
-void __init early_iounmap(void *addr, unsigned long size)
+/* Remap an IO device */
+void __init __iomem *
+early_ioremap(resource_size_t phys_addr, unsigned long size)
+{
+       return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
+}
+
+/* Remap memory */
+void __init __iomem *
+early_memremap(resource_size_t phys_addr, unsigned long size)
+{
+       return __early_ioremap(phys_addr, size, PAGE_KERNEL);
+}
+
+void __init early_iounmap(void __iomem *addr, unsigned long size)
 {
        unsigned long virt_addr;
        unsigned long offset;
        unsigned int nrpages;
        enum fixed_addresses idx;
-       int nesting;
+       int i, slot;
 
-       nesting = --early_ioremap_nested;
-       if (WARN_ON(nesting < 0))
+       slot = -1;
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+               if (prev_map[i] == addr) {
+                       slot = i;
+                       break;
+               }
+       }
+
+       if (slot < 0) {
+               printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
+                        addr, size);
+               WARN_ON(1);
                return;
+       }
+
+       if (prev_size[slot] != size) {
+               printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
+                        addr, size, slot, prev_size[slot]);
+               WARN_ON(1);
+               return;
+       }
 
        if (early_ioremap_debug) {
                printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
-                      size, nesting);
+                      size, slot);
                dump_stack();
        }
 
@@ -622,17 +615,11 @@ void __init early_iounmap(void *addr, unsigned long size)
        offset = virt_addr & ~PAGE_MASK;
        nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
 
-       idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
+       idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
        while (nrpages > 0) {
                early_clear_fixmap(idx);
                --idx;
                --nrpages;
        }
+       prev_map[slot] = NULL;
 }
-
-void __this_fixmap_does_not_exist(void)
-{
-       WARN_ON(1);
-}
-
-#endif /* CONFIG_X86_32 */