x86-32: make sure virt_addr_valid() returns false for fixmap addresses
[safe/jmp/linux-2.6] / arch / x86 / mm / ioremap.c
index 0561fde..62def57 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/mmiotrace.h>
 
 #include <asm/cacheflush.h>
 #include <asm/e820.h>
 
 #ifdef CONFIG_X86_64
 
+static inline int phys_addr_valid(unsigned long addr)
+{
+       return addr < (1UL << boot_cpu_data.x86_phys_bits);
+}
+
 unsigned long __phys_addr(unsigned long x)
 {
-       if (x >= __START_KERNEL_map)
-               return x - __START_KERNEL_map + phys_base;
-       return x - PAGE_OFFSET;
+       if (x >= __START_KERNEL_map) {
+               x -= __START_KERNEL_map;
+               VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
+               x += phys_base;
+       } else {
+               VIRTUAL_BUG_ON(x < PAGE_OFFSET);
+               x -= PAGE_OFFSET;
+               VIRTUAL_BUG_ON(!phys_addr_valid(x));
+       }
+       return x;
 }
 EXPORT_SYMBOL(__phys_addr);
 
-static inline int phys_addr_valid(unsigned long addr)
+bool __virt_addr_valid(unsigned long x)
 {
-       return addr < (1UL << boot_cpu_data.x86_phys_bits);
+       if (x >= __START_KERNEL_map) {
+               x -= __START_KERNEL_map;
+               if (x >= KERNEL_IMAGE_SIZE)
+                       return false;
+               x += phys_base;
+       } else {
+               if (x < PAGE_OFFSET)
+                       return false;
+               x -= PAGE_OFFSET;
+               if (!phys_addr_valid(x))
+                       return false;
+       }
+
+       return pfn_valid(x >> PAGE_SHIFT);
 }
+EXPORT_SYMBOL(__virt_addr_valid);
 
 #else
 
@@ -43,6 +70,29 @@ static inline int phys_addr_valid(unsigned long addr)
        return 1;
 }
 
+#ifdef CONFIG_DEBUG_VIRTUAL
+unsigned long __phys_addr(unsigned long x)
+{
+       /* VMALLOC_* aren't constants  */
+       VIRTUAL_BUG_ON(x < PAGE_OFFSET);
+       VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
+       return x - PAGE_OFFSET;
+}
+EXPORT_SYMBOL(__phys_addr);
+#endif
+
+bool __virt_addr_valid(unsigned long x)
+{
+       if (x < PAGE_OFFSET)
+               return false;
+       if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
+               return false;
+       if (x >= FIXADDR_START)
+               return false;
+       return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL(__virt_addr_valid);
+
 #endif
 
 int page_is_ram(unsigned long pagenr)
@@ -122,10 +172,13 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
 {
        unsigned long pfn, offset, vaddr;
        resource_size_t last_addr;
+       const resource_size_t unaligned_phys_addr = phys_addr;
+       const unsigned long unaligned_size = size;
        struct vm_struct *area;
        unsigned long new_prot_val;
        pgprot_t prot;
        int retval;
+       void __iomem *ret_addr;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
@@ -146,6 +199,13 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
                return (__force void __iomem *)phys_to_virt(phys_addr);
 
        /*
+        * Check if the request spans more than any BAR in the iomem resource
+        * tree.
+        */
+       WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
+                 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
+
+       /*
         * Don't allow anybody to remap normal RAM that we're using..
         */
        for (pfn = phys_addr >> PAGE_SHIFT;
@@ -166,7 +226,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        phys_addr &= PAGE_MASK;
        size = PAGE_ALIGN(last_addr+1) - phys_addr;
 
-       retval = reserve_memtype(phys_addr, phys_addr + size,
+       retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
                                                prot_val, &new_prot_val);
        if (retval) {
                pr_debug("Warning: reserve_memtype returned %d\n", retval);
@@ -200,16 +260,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        switch (prot_val) {
        case _PAGE_CACHE_UC:
        default:
-               prot = PAGE_KERNEL_NOCACHE;
+               prot = PAGE_KERNEL_IO_NOCACHE;
                break;
        case _PAGE_CACHE_UC_MINUS:
-               prot = PAGE_KERNEL_UC_MINUS;
+               prot = PAGE_KERNEL_IO_UC_MINUS;
                break;
        case _PAGE_CACHE_WC:
-               prot = PAGE_KERNEL_WC;
+               prot = PAGE_KERNEL_IO_WC;
                break;
        case _PAGE_CACHE_WB:
-               prot = PAGE_KERNEL;
+               prot = PAGE_KERNEL_IO;
                break;
        }
 
@@ -233,7 +293,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
                return NULL;
        }
 
-       return (void __iomem *) (vaddr + offset);
+       ret_addr = (void __iomem *) (vaddr + offset);
+       mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
+
+       return ret_addr;
 }
 
 /**
@@ -283,7 +346,7 @@ EXPORT_SYMBOL(ioremap_nocache);
  *
  * Must be freed with iounmap.
  */
-void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 {
        if (pat_enabled)
                return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
@@ -300,6 +363,37 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 }
 EXPORT_SYMBOL(ioremap_cache);
 
+static void __iomem *ioremap_default(resource_size_t phys_addr,
+                                       unsigned long size)
+{
+       unsigned long flags;
+       void __iomem *ret;
+       int err;
+
+       /*
+        * - WB for WB-able memory and no other conflicting mappings
+        * - UC_MINUS for non-WB-able memory with no other conflicting mappings
+        * - Inherit from confliting mappings otherwise
+        */
+       err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
+       if (err < 0)
+               return NULL;
+
+       ret = __ioremap_caller(phys_addr, size, flags,
+                              __builtin_return_address(0));
+
+       free_memtype(phys_addr, phys_addr + size);
+       return ret;
+}
+
+void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
+                               unsigned long prot_val)
+{
+       return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
+                               __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_prot);
+
 /**
  * iounmap - Free a IO remapping
  * @addr: virtual address from ioremap_*
@@ -325,6 +419,8 @@ void iounmap(volatile void __iomem *addr)
        addr = (volatile void __iomem *)
                (PAGE_MASK & (unsigned long __force)addr);
 
+       mmiotrace_iounmap(addr);
+
        /* Use the vm area unlocked, assuming the caller
           ensures there isn't another iounmap for the same address
           in parallel. Reuse of the virtual address is prevented by
@@ -365,7 +461,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
        if (page_is_ram(start >> PAGE_SHIFT))
                return __va(phys);
 
-       addr = (void __force *)ioremap(start, PAGE_SIZE);
+       addr = (void __force *)ioremap_default(start, PAGE_SIZE);
        if (addr)
                addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
 
@@ -381,9 +477,7 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
        return;
 }
 
-#ifdef CONFIG_X86_32
-
-int __initdata early_ioremap_debug;
+static int __initdata early_ioremap_debug;
 
 static int __init early_ioremap_debug_setup(char *str)
 {
@@ -442,34 +536,9 @@ void __init early_ioremap_init(void)
        }
 }
 
-void __init early_ioremap_clear(void)
-{
-       pmd_t *pmd;
-
-       if (early_ioremap_debug)
-               printk(KERN_INFO "early_ioremap_clear()\n");
-
-       pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
-       pmd_clear(pmd);
-       paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
-       __flush_tlb_all();
-}
-
 void __init early_ioremap_reset(void)
 {
-       enum fixed_addresses idx;
-       unsigned long addr, phys;
-       pte_t *pte;
-
        after_paging_init = 1;
-       for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
-               addr = fix_to_virt(idx);
-               pte = early_ioremap_pte(addr);
-               if (pte_present(*pte)) {
-                       phys = pte_val(*pte) & PAGE_MASK;
-                       set_fixmap(idx, phys);
-               }
-       }
 }
 
 static void __init __early_set_fixmap(enum fixed_addresses idx,
@@ -483,20 +552,21 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
                return;
        }
        pte = early_ioremap_pte(addr);
+
        if (pgprot_val(flags))
                set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
        else
-               pte_clear(NULL, addr, pte);
+               pte_clear(&init_mm, addr, pte);
        __flush_tlb_one(addr);
 }
 
 static inline void __init early_set_fixmap(enum fixed_addresses idx,
-                                       unsigned long phys)
+                                          unsigned long phys, pgprot_t prot)
 {
        if (after_paging_init)
-               set_fixmap(idx, phys);
+               __set_fixmap(idx, phys, prot);
        else
-               __early_set_fixmap(idx, phys, PAGE_KERNEL);
+               __early_set_fixmap(idx, phys, prot);
 }
 
 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
@@ -507,37 +577,56 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
                __early_set_fixmap(idx, 0, __pgprot(0));
 }
 
-
-int __initdata early_ioremap_nested;
-
+static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
+static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
 static int __init check_early_ioremap_leak(void)
 {
-       if (!early_ioremap_nested)
-               return 0;
+       int count = 0;
+       int i;
 
-       printk(KERN_WARNING
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+               if (prev_map[i])
+                       count++;
+
+       if (!count)
+               return 0;
+       WARN(1, KERN_WARNING
               "Debug warning: early ioremap leak of %d areas detected.\n",
-              early_ioremap_nested);
+               count);
        printk(KERN_WARNING
-              "please boot with early_ioremap_debug and report the dmesg.\n");
-       WARN_ON(1);
+               "please boot with early_ioremap_debug and report the dmesg.\n");
 
        return 1;
 }
 late_initcall(check_early_ioremap_leak);
 
-void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
+static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
 {
        unsigned long offset, last_addr;
-       unsigned int nrpages, nesting;
+       unsigned int nrpages;
        enum fixed_addresses idx0, idx;
+       int i, slot;
 
        WARN_ON(system_state != SYSTEM_BOOTING);
 
-       nesting = early_ioremap_nested;
+       slot = -1;
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+               if (!prev_map[i]) {
+                       slot = i;
+                       break;
+               }
+       }
+
+       if (slot < 0) {
+               printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
+                        phys_addr, size);
+               WARN_ON(1);
+               return NULL;
+       }
+
        if (early_ioremap_debug) {
                printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
-                      phys_addr, size, nesting);
+                      phys_addr, size, slot);
                dump_stack();
        }
 
@@ -548,17 +637,13 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
                return NULL;
        }
 
-       if (nesting >= FIX_BTMAPS_NESTING) {
-               WARN_ON(1);
-               return NULL;
-       }
-       early_ioremap_nested++;
+       prev_size[slot] = size;
        /*
         * Mappings have to be page-aligned
         */
        offset = phys_addr & ~PAGE_MASK;
        phys_addr &= PAGE_MASK;
-       size = PAGE_ALIGN(last_addr) - phys_addr;
+       size = PAGE_ALIGN(last_addr + 1) - phys_addr;
 
        /*
         * Mappings have to fit in the FIX_BTMAP area.
@@ -572,10 +657,10 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
        /*
         * Ok, go for it..
         */
-       idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
+       idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
        idx = idx0;
        while (nrpages > 0) {
-               early_set_fixmap(idx, phys_addr);
+               early_set_fixmap(idx, phys_addr, prot);
                phys_addr += PAGE_SIZE;
                --idx;
                --nrpages;
@@ -583,24 +668,55 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
        if (early_ioremap_debug)
                printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
 
-       return (void *) (offset + fix_to_virt(idx0));
+       prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
+       return prev_map[slot];
+}
+
+/* Remap an IO device */
+void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
+{
+       return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
+}
+
+/* Remap memory */
+void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
+{
+       return __early_ioremap(phys_addr, size, PAGE_KERNEL);
 }
 
-void __init early_iounmap(void *addr, unsigned long size)
+void __init early_iounmap(void __iomem *addr, unsigned long size)
 {
        unsigned long virt_addr;
        unsigned long offset;
        unsigned int nrpages;
        enum fixed_addresses idx;
-       int nesting;
+       int i, slot;
 
-       nesting = --early_ioremap_nested;
-       if (WARN_ON(nesting < 0))
+       slot = -1;
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+               if (prev_map[i] == addr) {
+                       slot = i;
+                       break;
+               }
+       }
+
+       if (slot < 0) {
+               printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
+                        addr, size);
+               WARN_ON(1);
                return;
+       }
+
+       if (prev_size[slot] != size) {
+               printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
+                        addr, size, slot, prev_size[slot]);
+               WARN_ON(1);
+               return;
+       }
 
        if (early_ioremap_debug) {
                printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
-                      size, nesting);
+                      size, slot);
                dump_stack();
        }
 
@@ -612,17 +728,16 @@ void __init early_iounmap(void *addr, unsigned long size)
        offset = virt_addr & ~PAGE_MASK;
        nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
 
-       idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
+       idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
        while (nrpages > 0) {
                early_clear_fixmap(idx);
                --idx;
                --nrpages;
        }
+       prev_map[slot] = NULL;
 }
 
 void __this_fixmap_does_not_exist(void)
 {
        WARN_ON(1);
 }
-
-#endif /* CONFIG_X86_32 */