x86: rename LARGE_PAGE_SIZE to PMD_PAGE_SIZE
authorAndi Kleen <ak@suse.de>
Mon, 4 Feb 2008 15:48:08 +0000 (16:48 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 4 Feb 2008 15:48:08 +0000 (16:48 +0100)
Fix up all users.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/boot/compressed/head_64.S
arch/x86/kernel/head_64.S
arch/x86/kernel/pci-gart_64.c
arch/x86/mm/init_64.c
arch/x86/mm/pageattr.c
include/asm-x86/page.h

index 1ccb38a..e8657b9 100644 (file)
@@ -80,8 +80,8 @@ startup_32:
 
 #ifdef CONFIG_RELOCATABLE
        movl    %ebp, %ebx
-       addl    $(LARGE_PAGE_SIZE -1), %ebx
-       andl    $LARGE_PAGE_MASK, %ebx
+       addl    $(PMD_PAGE_SIZE -1), %ebx
+       andl    $PMD_PAGE_MASK, %ebx
 #else
        movl    $CONFIG_PHYSICAL_START, %ebx
 #endif
@@ -220,8 +220,8 @@ ENTRY(startup_64)
        /* Start with the delta to where the kernel will run at. */
 #ifdef CONFIG_RELOCATABLE
        leaq    startup_32(%rip) /* - $startup_32 */, %rbp
-       addq    $(LARGE_PAGE_SIZE - 1), %rbp
-       andq    $LARGE_PAGE_MASK, %rbp
+       addq    $(PMD_PAGE_SIZE - 1), %rbp
+       andq    $PMD_PAGE_MASK, %rbp
        movq    %rbp, %rbx
 #else
        movq    $CONFIG_PHYSICAL_START, %rbp
index 1d5a7a3..4f283ad 100644 (file)
@@ -63,7 +63,7 @@ startup_64:
 
        /* Is the address not 2M aligned? */
        movq    %rbp, %rax
-       andl    $~LARGE_PAGE_MASK, %eax
+       andl    $~PMD_PAGE_MASK, %eax
        testl   %eax, %eax
        jnz     bad_address
 
@@ -88,7 +88,7 @@ startup_64:
 
        /* Add an Identity mapping if I am above 1G */
        leaq    _text(%rip), %rdi
-       andq    $LARGE_PAGE_MASK, %rdi
+       andq    $PMD_PAGE_MASK, %rdi
 
        movq    %rdi, %rax
        shrq    $PUD_SHIFT, %rax
index 4d5cc71..ae1d3d8 100644 (file)
@@ -501,7 +501,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
        }
 
        a = aper + iommu_size;
-       iommu_size -= round_up(a, LARGE_PAGE_SIZE) - a;
+       iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
 
        if (iommu_size < 64*1024*1024) {
                printk(KERN_WARNING
index eabcaed..b7a7992 100644 (file)
@@ -444,10 +444,10 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
 {
        unsigned long end = address + size;
 
-       BUG_ON(address & ~LARGE_PAGE_MASK);
-       BUG_ON(size & ~LARGE_PAGE_MASK);
+       BUG_ON(address & ~PMD_PAGE_MASK);
+       BUG_ON(size & ~PMD_PAGE_MASK);
 
-       for (; address < end; address += LARGE_PAGE_SIZE) {
+       for (; address < end; address += PMD_PAGE_SIZE) {
                pgd_t *pgd = pgd_offset_k(address);
                pud_t *pud;
                pmd_t *pmd;
index 7d21cd6..74446ea 100644 (file)
@@ -273,8 +273,8 @@ static int try_preserve_large_page(pte_t *kpte, unsigned long address,
 
        switch (level) {
        case PG_LEVEL_2M:
-               psize = LARGE_PAGE_SIZE;
-               pmask = LARGE_PAGE_MASK;
+               psize = PMD_PAGE_SIZE;
+               pmask = PMD_PAGE_MASK;
                break;
        case PG_LEVEL_1G:
        default:
@@ -363,7 +363,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        }
 
        address = __pa(address);
-       addr = address & LARGE_PAGE_MASK;
+       addr = address & PMD_PAGE_MASK;
        pbase = (pte_t *)page_address(base);
 #ifdef CONFIG_X86_32
        paravirt_alloc_pt(&init_mm, page_to_pfn(base));
index c8b30ef..1cb7c51 100644 (file)
@@ -13,8 +13,8 @@
 #define PHYSICAL_PAGE_MASK     (PAGE_MASK & __PHYSICAL_MASK)
 #define PTE_MASK               (_AT(long, PHYSICAL_PAGE_MASK))
 
-#define LARGE_PAGE_SIZE                (_AC(1,UL) << PMD_SHIFT)
-#define LARGE_PAGE_MASK                (~(LARGE_PAGE_SIZE-1))
+#define PMD_PAGE_SIZE          (_AC(1, UL) << PMD_SHIFT)
+#define PMD_PAGE_MASK          (~(PMD_PAGE_SIZE-1))
 
 #define HPAGE_SHIFT            PMD_SHIFT
 #define HPAGE_SIZE             (_AC(1,UL) << HPAGE_SHIFT)