AMD IOMMU: implement lazy IO/TLB flushing
[safe/jmp/linux-2.6] / arch / x86 / kernel / efi_64.c
index 1f8bbd9..652c528 100644 (file)
@@ -33,9 +33,9 @@
 #include <asm/e820.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
 #include <asm/proto.h>
 #include <asm/efi.h>
+#include <asm/cacheflush.h>
 
 static pgd_t save_pgd __initdata;
 static unsigned long efi_flags __initdata;
@@ -44,22 +44,15 @@ static void __init early_mapping_set_exec(unsigned long start,
                                          unsigned long end,
                                          int executable)
 {
-       pte_t *kpte;
-       int level;
-
-       while (start < end) {
-               kpte = lookup_address((unsigned long)__va(start), &level);
-               BUG_ON(!kpte);
-               if (executable)
-                       set_pte(kpte, pte_mkexec(*kpte));
-               else
-                       set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
-                                           __supported_pte_mask));
-               if (pte_huge(*kpte))
-                       start = (start + PMD_SIZE) & PMD_MASK;
-               else
-                       start = (start + PAGE_SIZE) & PAGE_MASK;
-       }
+       unsigned long num_pages;
+
+       start &= PMD_MASK;
+       end = (end + PMD_SIZE - 1) & PMD_MASK;
+       num_pages = (end - start) >> PAGE_SHIFT;
+       if (executable)
+               set_memory_x((unsigned long)__va(start), num_pages);
+       else
+               set_memory_nx((unsigned long)__va(start), num_pages);
 }
 
 static void __init early_runtime_code_mapping_set_exec(int executable)
@@ -67,12 +60,15 @@ static void __init early_runtime_code_mapping_set_exec(int executable)
        efi_memory_desc_t *md;
        void *p;
 
+       if (!(__supported_pte_mask & _PAGE_NX))
+               return;
+
        /* Make EFI runtime service code area executable */
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                md = p;
                if (md->type == EFI_RUNTIME_SERVICES_CODE) {
                        unsigned long end;
-                       end = md->phys_addr + (md->num_pages << PAGE_SHIFT);
+                       end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
                        early_mapping_set_exec(md->phys_addr, end, executable);
                }
        }
@@ -82,8 +78,8 @@ void __init efi_call_phys_prelog(void)
 {
        unsigned long vaddress;
 
-       local_irq_save(efi_flags);
        early_runtime_code_mapping_set_exec(1);
+       local_irq_save(efi_flags);
        vaddress = (unsigned long)__va(0x0UL);
        save_pgd = *pgd_offset_k(0x0UL);
        set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
@@ -96,62 +92,31 @@ void __init efi_call_phys_epilog(void)
         * After the lock is released, the original page table is restored.
         */
        set_pgd(pgd_offset_k(0x0UL), save_pgd);
-       early_runtime_code_mapping_set_exec(0);
        __flush_tlb_all();
        local_irq_restore(efi_flags);
+       early_runtime_code_mapping_set_exec(0);
 }
 
-/*
- * We need to map the EFI memory map again after init_memory_mapping().
- */
-void __init efi_map_memmap(void)
-{
-       memmap.map = __va(memmap.phys_map);
-       memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
-}
-
-void __init efi_reserve_bootmem(void)
-{
-       reserve_bootmem_generic((unsigned long)memmap.phys_map,
-                               memmap.nr_map * memmap.desc_size);
-}
-
-void __init runtime_code_page_mkexec(void)
-{
-       efi_memory_desc_t *md;
-       void *p;
-
-       /* Make EFI runtime service code area executable */
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               md = p;
-               if (md->type == EFI_RUNTIME_SERVICES_CODE)
-                       change_page_attr_addr(md->virt_addr,
-                                             md->num_pages,
-                                             PAGE_KERNEL_EXEC);
-       }
-       __flush_tlb_all();
-}
-
-void __iomem * __init efi_ioremap(unsigned long offset,
-                                 unsigned long size)
+void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size)
 {
-       static unsigned pages_mapped;
-       unsigned long last_addr;
+       static unsigned pages_mapped __initdata;
        unsigned i, pages;
+       unsigned long offset;
+
+       pages = PFN_UP(phys_addr + size) - PFN_DOWN(phys_addr);
+       offset = phys_addr & ~PAGE_MASK;
+       phys_addr &= PAGE_MASK;
 
-       last_addr = offset + size - 1;
-       offset &= PAGE_MASK;
-       pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT;
        if (pages_mapped + pages > MAX_EFI_IO_PAGES)
                return NULL;
 
        for (i = 0; i < pages; i++) {
-               set_fixmap_nocache(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
-                                  offset);
-               offset += PAGE_SIZE;
+               __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
+                            phys_addr, PAGE_KERNEL);
+               phys_addr += PAGE_SIZE;
                pages_mapped++;
        }
 
        return (void __iomem *)__fix_to_virt(FIX_EFI_IO_MAP_FIRST_PAGE - \
-                                            (pages_mapped - pages));
+                                            (pages_mapped - pages)) + offset;
 }