x86: remove now unused clear_kernel_mapping
[safe/jmp/linux-2.6] / arch / x86 / mm / ioremap.c
index f4a2082..ee6648f 100644 (file)
 #include <asm/fixmap.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+enum ioremap_mode {
+       IOR_MODE_UNCACHED,
+       IOR_MODE_CACHED,
+};
 
 #ifdef CONFIG_X86_64
 
@@ -31,39 +37,54 @@ EXPORT_SYMBOL(__phys_addr);
 
 #endif
 
+int page_is_ram(unsigned long pagenr)
+{
+       unsigned long addr, end;
+       int i;
+
+       for (i = 0; i < e820.nr_map; i++) {
+               /*
+                * Not usable memory:
+                */
+               if (e820.map[i].type != E820_RAM)
+                       continue;
+               addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
+               end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
+
+               /*
+                * Sanity check: Some BIOSen report areas as RAM that
+                * are not. Notably the 640->1Mb area, which is the
+                * PCI BIOS area.
+                */
+               if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
+                   end < (BIOS_END >> PAGE_SHIFT))
+                       continue;
+
+               if ((pagenr >= addr) && (pagenr < end))
+                       return 1;
+       }
+       return 0;
+}
+
 /*
  * Fix up the linear direct mapping of the kernel to avoid cache attribute
  * conflicts.
  */
-static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
-                              pgprot_t prot)
+static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
+                              enum ioremap_mode mode)
 {
-       unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
-       int err, level;
-
-       /* No change for pages after the last mapping */
-       if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
-               return 0;
-
-       npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       vaddr = (unsigned long) __va(phys_addr);
-
-       /*
-        * If there is no identity map for this address,
-        * change_page_attr_addr is unnecessary
-        */
-       if (!lookup_address(vaddr, &level))
-               return 0;
-
-       /*
-        * Must use an address here and not struct page because the
-        * phys addr can be a in hole between nodes and not have a
-        * memmap entry.
-        */
-       err = change_page_attr_addr(vaddr, npages, prot);
-
-       if (!err)
-               global_flush_tlb();
+       unsigned long nrpages = size >> PAGE_SHIFT;
+       int err;
+
+       switch (mode) {
+       case IOR_MODE_UNCACHED:
+       default:
+               err = set_memory_uc(vaddr, nrpages);
+               break;
+       case IOR_MODE_CACHED:
+               err = set_memory_wb(vaddr, nrpages);
+               break;
+       }
 
        return err;
 }
@@ -77,13 +98,12 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
-                       unsigned long flags)
+static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
+                              enum ioremap_mode mode)
 {
-       void __iomem *addr;
+       unsigned long pfn, offset, last_addr, vaddr;
        struct vm_struct *area;
-       unsigned long offset, last_addr;
-       pgprot_t pgprot;
+       pgprot_t prot;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
@@ -96,25 +116,25 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
        if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
                return (__force void __iomem *)phys_to_virt(phys_addr);
 
-#ifdef CONFIG_X86_32
        /*
         * Don't allow anybody to remap normal RAM that we're using..
         */
-       if (phys_addr <= virt_to_phys(high_memory - 1)) {
-               char *t_addr, *t_end;
-               struct page *page;
-
-               t_addr = __va(phys_addr);
-               t_end = t_addr + (size - 1);
-
-               for (page = virt_to_page(t_addr);
-                    page <= virt_to_page(t_end); page++)
-                       if (!PageReserved(page))
-                               return NULL;
+       for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
+            (pfn << PAGE_SHIFT) < last_addr; pfn++) {
+               if (page_is_ram(pfn) && pfn_valid(pfn) &&
+                   !PageReserved(pfn_to_page(pfn)))
+                       return NULL;
        }
-#endif
 
-       pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
+       switch (mode) {
+       case IOR_MODE_UNCACHED:
+       default:
+               prot = PAGE_KERNEL_NOCACHE;
+               break;
+       case IOR_MODE_CACHED:
+               prot = PAGE_KERNEL;
+               break;
+       }
 
        /*
         * Mappings have to be page-aligned
@@ -130,21 +150,19 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
        if (!area)
                return NULL;
        area->phys_addr = phys_addr;
-       addr = (void __iomem *) area->addr;
-       if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
-                              phys_addr, pgprot)) {
-               remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
+       vaddr = (unsigned long) area->addr;
+       if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
+               remove_vm_area((void *)(vaddr & PAGE_MASK));
                return NULL;
        }
 
-       if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
-               vunmap(addr);
+       if (ioremap_change_attr(vaddr, size, mode) < 0) {
+               vunmap(area->addr);
                return NULL;
        }
 
-       return (void __iomem *) (offset + (char __iomem *)addr);
+       return (void __iomem *) (vaddr + offset);
 }
-EXPORT_SYMBOL(__ioremap);
 
 /**
  * ioremap_nocache     -   map bus memory into CPU space
@@ -169,10 +187,16 @@ EXPORT_SYMBOL(__ioremap);
  */
 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
+       return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
 }
 EXPORT_SYMBOL(ioremap_nocache);
 
+void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
+{
+       return __ioremap(phys_addr, size, IOR_MODE_CACHED);
+}
+EXPORT_SYMBOL(ioremap_cache);
+
 /**
  * iounmap - Free a IO remapping
  * @addr: virtual address from ioremap_*
@@ -216,9 +240,6 @@ void iounmap(volatile void __iomem *addr)
                return;
        }
 
-       /* Reset the direct mapping. Can block */
-       ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
-
        /* Finally remove it */
        o = remove_vm_area((void *)addr);
        BUG_ON(p != o || o == NULL);
@@ -257,7 +278,7 @@ void __init early_ioremap_init(void)
        unsigned long *pgd;
 
        if (early_ioremap_debug)
-               printk(KERN_DEBUG "early_ioremap_init()\n");
+               printk(KERN_INFO "early_ioremap_init()\n");
 
        pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
        *pgd = __pa(bm_pte) | _PAGE_TABLE;
@@ -286,10 +307,11 @@ void __init early_ioremap_clear(void)
        unsigned long *pgd;
 
        if (early_ioremap_debug)
-               printk(KERN_DEBUG "early_ioremap_clear()\n");
+               printk(KERN_INFO "early_ioremap_clear()\n");
 
        pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
        *pgd = 0;
+       paravirt_release_pt(__pa(pgd) >> PAGE_SHIFT);
        __flush_tlb_all();
 }
 
@@ -302,7 +324,7 @@ void __init early_ioremap_reset(void)
        for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
                addr = fix_to_virt(idx);
                pte = early_ioremap_pte(addr);
-               if (!*pte & _PAGE_PRESENT) {
+               if (*pte & _PAGE_PRESENT) {
                        phys = *pte & PAGE_MASK;
                        set_fixmap(idx, phys);
                }
@@ -372,7 +394,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
 
        nesting = early_ioremap_nested;
        if (early_ioremap_debug) {
-               printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
+               printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
                       phys_addr, size, nesting);
                dump_stack();
        }
@@ -434,7 +456,7 @@ void __init early_iounmap(void *addr, unsigned long size)
        WARN_ON(nesting < 0);
 
        if (early_ioremap_debug) {
-               printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
+               printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
                       size, nesting);
                dump_stack();
        }