Merge branch 'master' into for-linus
[safe/jmp/linux-2.6] / arch / arm / mm / mmu.c
index e5d61ee..4426ee6 100644 (file)
 #include <linux/mman.h>
 #include <linux/nodemask.h>
 
+#include <asm/cputype.h>
 #include <asm/mach-types.h>
+#include <asm/sections.h>
+#include <asm/cachetype.h>
 #include <asm/setup.h>
 #include <asm/sizes.h>
 #include <asm/tlb.h>
+#include <asm/highmem.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
-extern void _stext, _etext, __data_start, _end;
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
 /*
  * empty_zero_page is a special page that is used for
  * zero-initialized data and COW.
  */
 struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
 
 /*
  * The pmd table for the upper-most set of pages.
@@ -67,27 +69,27 @@ static struct cachepolicy cache_policies[] __initdata = {
                .policy         = "uncached",
                .cr_mask        = CR_W|CR_C,
                .pmd            = PMD_SECT_UNCACHED,
-               .pte            = 0,
+               .pte            = L_PTE_MT_UNCACHED,
        }, {
                .policy         = "buffered",
                .cr_mask        = CR_C,
                .pmd            = PMD_SECT_BUFFERED,
-               .pte            = PTE_BUFFERABLE,
+               .pte            = L_PTE_MT_BUFFERABLE,
        }, {
                .policy         = "writethrough",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WT,
-               .pte            = PTE_CACHEABLE,
+               .pte            = L_PTE_MT_WRITETHROUGH,
        }, {
                .policy         = "writeback",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WB,
-               .pte            = PTE_BUFFERABLE|PTE_CACHEABLE,
+               .pte            = L_PTE_MT_WRITEBACK,
        }, {
                .policy         = "writealloc",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WBWA,
-               .pte            = PTE_BUFFERABLE|PTE_CACHEABLE,
+               .pte            = L_PTE_MT_WRITEALLOC,
        }
 };
 
@@ -181,33 +183,38 @@ void adjust_cr(unsigned long mask, unsigned long set)
 #endif
 
 #define PROT_PTE_DEVICE                L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
-#define PROT_SECT_DEVICE       PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE
+#define PROT_SECT_DEVICE       PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 
 static struct mem_type mem_types[] = {
        [MT_DEVICE] = {           /* Strongly ordered / ARMv6 shared device */
-               .prot_pte       = PROT_PTE_DEVICE,
+               .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+                                 L_PTE_SHARED,
                .prot_l1        = PMD_TYPE_TABLE,
-               .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
+               .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_S,
                .domain         = DOMAIN_IO,
        },
        [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
-               .prot_pte       = PROT_PTE_DEVICE,
-               .prot_pte_ext   = PTE_EXT_TEX(2),
+               .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
                .prot_l1        = PMD_TYPE_TABLE,
-               .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
+               .prot_sect      = PROT_SECT_DEVICE,
                .domain         = DOMAIN_IO,
        },
        [MT_DEVICE_CACHED] = {    /* ioremap_cached */
-               .prot_pte       = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
+               .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
                .prot_l1        = PMD_TYPE_TABLE,
                .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_WB,
                .domain         = DOMAIN_IO,
        },      
-       [MT_DEVICE_IXP2000] = {   /* IXP2400 requires XCB=101 for on-chip I/O */
+       [MT_DEVICE_WC] = {      /* ioremap_wc */
+               .prot_pte       = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
+               .prot_l1        = PMD_TYPE_TABLE,
+               .prot_sect      = PROT_SECT_DEVICE,
+               .domain         = DOMAIN_IO,
+       },
+       [MT_UNCACHED] = {
                .prot_pte       = PROT_PTE_DEVICE,
                .prot_l1        = PMD_TYPE_TABLE,
-               .prot_sect      = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE |
-                                 PMD_SECT_TEX(1),
+               .prot_sect      = PMD_TYPE_SECT | PMD_SECT_XN,
                .domain         = DOMAIN_IO,
        },
        [MT_CACHECLEAN] = {
@@ -238,12 +245,17 @@ static struct mem_type mem_types[] = {
                .prot_sect = PMD_TYPE_SECT,
                .domain    = DOMAIN_KERNEL,
        },
+       [MT_MEMORY_NONCACHED] = {
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .domain    = DOMAIN_KERNEL,
+       },
 };
 
 const struct mem_type *get_mem_type(unsigned int type)
 {
        return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
 }
+EXPORT_SYMBOL(get_mem_type);
 
 /*
  * Adjust the PMD section entries according to the CPU in use.
@@ -252,7 +264,7 @@ static void __init build_mem_type_table(void)
 {
        struct cachepolicy *cp;
        unsigned int cr = get_cr();
-       unsigned int user_pgprot, kern_pgprot;
+       unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
        int cpu_arch = cpu_architecture();
        int i;
 
@@ -270,13 +282,28 @@ static void __init build_mem_type_table(void)
                        cachepolicy = CPOLICY_WRITEBACK;
                ecc_mask = 0;
        }
+#ifdef CONFIG_SMP
+       cachepolicy = CPOLICY_WRITEALLOC;
+#endif
+
+       /*
+        * Strip out features not present on earlier architectures.
+        * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
+        * without extended page tables don't have the 'Shared' bit.
+        */
+       if (cpu_arch < CPU_ARCH_ARMv5)
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+                       mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
+       if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+                       mem_types[i].prot_sect &= ~PMD_SECT_S;
 
        /*
-        * ARMv5 and lower, bit 4 must be set for page tables.
-        * (was: cache "update-able on write" bit on ARM610)
-        * However, Xscale cores require this bit to be cleared.
+        * ARMv5 and lower, bit 4 must be set for page tables (was: cache
+        * "update-able on write" bit on ARM610).  However, Xscale and
+        * Xscale3 require this bit to be cleared.
         */
-       if (cpu_is_xscale()) {
+       if (cpu_is_xscale() || cpu_is_xsc3()) {
                for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
                        mem_types[i].prot_sect &= ~PMD_BIT4;
                        mem_types[i].prot_l1 &= ~PMD_BIT4;
@@ -290,19 +317,81 @@ static void __init build_mem_type_table(void)
                }
        }
 
+       /*
+        * Mark the device areas according to the CPU/architecture.
+        */
+       if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
+               if (!cpu_is_xsc3()) {
+                       /*
+                        * Mark device regions on ARMv6+ as execute-never
+                        * to prevent speculative instruction fetches.
+                        */
+                       mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
+                       mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
+                       mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
+                       mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+               }
+               if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+                       /*
+                        * For ARMv7 with TEX remapping,
+                        * - shared device is SXCB=1100
+                        * - nonshared device is SXCB=0100
+                        * - write combine device mem is SXCB=0001
+                        * (Uncached Normal memory)
+                        */
+                       mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
+                       mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
+                       mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
+               } else if (cpu_is_xsc3()) {
+                       /*
+                        * For Xscale3,
+                        * - shared device is TEXCB=00101
+                        * - nonshared device is TEXCB=01000
+                        * - write combine device mem is TEXCB=00100
+                        * (Inner/Outer Uncacheable in xsc3 parlance)
+                        */
+                       mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
+                       mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
+                       mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+               } else {
+                       /*
+                        * For ARMv6 and ARMv7 without TEX remapping,
+                        * - shared device is TEXCB=00001
+                        * - nonshared device is TEXCB=01000
+                        * - write combine device mem is TEXCB=00100
+                        * (Uncached Normal in ARMv6 parlance).
+                        */
+                       mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
+                       mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
+                       mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+               }
+       } else {
+               /*
+                * On others, write combining is "Uncached/Buffered"
+                */
+               mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
+       }
+
+       /*
+        * Now deal with the memory-type mappings
+        */
        cp = &cache_policies[cachepolicy];
-       kern_pgprot = user_pgprot = cp->pte;
+       vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+
+#ifndef CONFIG_SMP
+       /*
+        * Only use write-through for non-SMP systems
+        */
+       if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
+               vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
+#endif
 
        /*
         * Enable CPU-specific coherency if supported.
         * (Only available on XSC3 at the moment.)
         */
-       if (arch_is_coherent()) {
-               if (cpu_is_xsc3()) {
-                       mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
-                       mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
-               }
-       }
+       if (arch_is_coherent() && cpu_is_xsc3())
+               mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 
        /*
         * ARMv6 and above have extended page tables.
@@ -316,42 +405,43 @@ static void __init build_mem_type_table(void)
                mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
                mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 
-               /*
-                * Mark the device area as "shared device"
-                */
-               mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
-               mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
-
 #ifdef CONFIG_SMP
                /*
                 * Mark memory with the "shared" attribute for SMP systems
                 */
                user_pgprot |= L_PTE_SHARED;
                kern_pgprot |= L_PTE_SHARED;
+               vecs_pgprot |= L_PTE_SHARED;
                mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 #endif
        }
 
+       /*
+        * Non-cacheable Normal - intended for memory areas that must
+        * not cause dirty cache line writebacks when used
+        */
+       if (cpu_arch >= CPU_ARCH_ARMv6) {
+               if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+                       /* Non-cacheable Normal is XCB = 001 */
+                       mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+                               PMD_SECT_BUFFERED;
+               } else {
+                       /* For both ARMv6 and non-TEX-remapping ARMv7 */
+                       mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+                               PMD_SECT_TEX(1);
+               }
+       } else {
+               mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+       }
+
        for (i = 0; i < 16; i++) {
                unsigned long v = pgprot_val(protection_map[i]);
-               v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
-               protection_map[i] = __pgprot(v);
+               protection_map[i] = __pgprot(v | user_pgprot);
        }
 
-       mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
-       mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
-
-       if (cpu_arch >= CPU_ARCH_ARMv5) {
-#ifndef CONFIG_SMP
-               /*
-                * Only use write-through for non-SMP systems
-                */
-               mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
-               mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
-#endif
-       } else {
-               mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
-       }
+       mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
+       mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
 
        pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
        pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
@@ -399,8 +489,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
 
        pte = pte_offset_kernel(pmd, addr);
        do {
-               set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
-                           type->prot_pte_ext);
+               set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
@@ -567,19 +656,120 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
                create_mapping(io_desc + i);
 }
 
-static inline void prepare_page_table(struct meminfo *mi)
+static unsigned long __initdata vmalloc_reserve = SZ_128M;
+
+/*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the vmalloc
+ * area - the default is 128m.
+ */
+static void __init early_vmalloc(char **arg)
+{
+       vmalloc_reserve = memparse(*arg, arg);
+
+       if (vmalloc_reserve < SZ_16M) {
+               vmalloc_reserve = SZ_16M;
+               printk(KERN_WARNING
+                       "vmalloc area too small, limiting to %luMB\n",
+                       vmalloc_reserve >> 20);
+       }
+
+       if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
+               vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
+               printk(KERN_WARNING
+                       "vmalloc area is too big, limiting to %luMB\n",
+                       vmalloc_reserve >> 20);
+       }
+}
+__early_param("vmalloc=", early_vmalloc);
+
+#define VMALLOC_MIN    (void *)(VMALLOC_END - vmalloc_reserve)
+
+static void __init sanity_check_meminfo(void)
+{
+       int i, j, highmem = 0;
+
+       for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
+               struct membank *bank = &meminfo.bank[j];
+               *bank = meminfo.bank[i];
+
+#ifdef CONFIG_HIGHMEM
+               if (__va(bank->start) > VMALLOC_MIN ||
+                   __va(bank->start) < (void *)PAGE_OFFSET)
+                       highmem = 1;
+
+               bank->highmem = highmem;
+
+               /*
+                * Split those memory banks which are partially overlapping
+                * the vmalloc area greatly simplifying things later.
+                */
+               if (__va(bank->start) < VMALLOC_MIN &&
+                   bank->size > VMALLOC_MIN - __va(bank->start)) {
+                       if (meminfo.nr_banks >= NR_BANKS) {
+                               printk(KERN_CRIT "NR_BANKS too low, "
+                                                "ignoring high memory\n");
+                       } else if (cache_is_vipt_aliasing()) {
+                               printk(KERN_CRIT "HIGHMEM is not yet supported "
+                                                "with VIPT aliasing cache, "
+                                                "ignoring high memory\n");
+                       } else {
+                               memmove(bank + 1, bank,
+                                       (meminfo.nr_banks - i) * sizeof(*bank));
+                               meminfo.nr_banks++;
+                               i++;
+                               bank[1].size -= VMALLOC_MIN - __va(bank->start);
+                               bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
+                               bank[1].highmem = highmem = 1;
+                               j++;
+                       }
+                       bank->size = VMALLOC_MIN - __va(bank->start);
+               }
+#else
+               /*
+                * Check whether this memory bank would entirely overlap
+                * the vmalloc area.
+                */
+               if (__va(bank->start) >= VMALLOC_MIN ||
+                   __va(bank->start) < (void *)PAGE_OFFSET) {
+                       printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
+                              "(vmalloc region overlap).\n",
+                              bank->start, bank->start + bank->size - 1);
+                       continue;
+               }
+
+               /*
+                * Check whether this memory bank would partially overlap
+                * the vmalloc area.
+                */
+               if (__va(bank->start + bank->size) > VMALLOC_MIN ||
+                   __va(bank->start + bank->size) < __va(bank->start)) {
+                       unsigned long newsize = VMALLOC_MIN - __va(bank->start);
+                       printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
+                              "to -%.8lx (vmalloc region overlap).\n",
+                              bank->start, bank->start + bank->size - 1,
+                              bank->start + newsize - 1);
+                       bank->size = newsize;
+               }
+#endif
+               j++;
+       }
+       meminfo.nr_banks = j;
+}
+
+static inline void prepare_page_table(void)
 {
        unsigned long addr;
 
        /*
         * Clear out all the mappings below the kernel image.
         */
-       for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
+       for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
                pmd_clear(pmd_off_k(addr));
 
 #ifdef CONFIG_XIP_KERNEL
        /* The XIP kernel is mapped in the module area -- skip over it */
-       addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
+       addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
 #endif
        for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
                pmd_clear(pmd_off_k(addr));
@@ -588,7 +778,7 @@ static inline void prepare_page_table(struct meminfo *mi)
         * Clear out all the kernel space mappings, except for the first
         * memory bank, up to the end of the vmalloc region.
         */
-       for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
+       for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
             addr < VMALLOC_END; addr += PGDIR_SIZE)
                pmd_clear(pmd_off_k(addr));
 }
@@ -605,9 +795,11 @@ void __init reserve_node_zero(pg_data_t *pgdat)
         * Note that this can only be in node 0.
         */
 #ifdef CONFIG_XIP_KERNEL
-       reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
+       reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
+                       BOOTMEM_DEFAULT);
 #else
-       reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
+       reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
+                       BOOTMEM_DEFAULT);
 #endif
 
        /*
@@ -615,7 +807,7 @@ void __init reserve_node_zero(pg_data_t *pgdat)
         * and can only be in node 0.
         */
        reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
-                            PTRS_PER_PGD * sizeof(pgd_t));
+                            PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
 
        /*
         * Hmm... This should go elsewhere, but we really really need to
@@ -638,8 +830,42 @@ void __init reserve_node_zero(pg_data_t *pgdat)
        /* H1940 and RX3715 need to reserve this for suspend */
 
        if (machine_is_h1940() || machine_is_rx3715()) {
-               reserve_bootmem_node(pgdat, 0x30003000, 0x1000);
-               reserve_bootmem_node(pgdat, 0x30081000, 0x1000);
+               reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
+                               BOOTMEM_DEFAULT);
+               reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
+                               BOOTMEM_DEFAULT);
+       }
+
+       if (machine_is_palmld() || machine_is_palmtx()) {
+               reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
+                               BOOTMEM_EXCLUSIVE);
+               reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
+                               BOOTMEM_EXCLUSIVE);
+       }
+
+       if (machine_is_treo680()) {
+               reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
+                               BOOTMEM_EXCLUSIVE);
+               reserve_bootmem_node(pgdat, 0xa2000000, 0x1000,
+                               BOOTMEM_EXCLUSIVE);
+       }
+
+       if (machine_is_palmt5())
+               reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
+                               BOOTMEM_EXCLUSIVE);
+
+       /*
+        * U300 - This platform family can share physical memory
+        * between two ARM cpus, one running Linux and the other
+        * running another OS.
+        */
+       if (machine_is_u300()) {
+#ifdef CONFIG_MACH_U300_SINGLE_RAM
+#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) &&   \
+       CONFIG_MACH_U300_2MB_ALIGNMENT_FIX
+               res_size = 0x00100000;
+#endif
+#endif
        }
 
 #ifdef CONFIG_SA1111
@@ -650,7 +876,8 @@ void __init reserve_node_zero(pg_data_t *pgdat)
        res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
 #endif
        if (res_size)
-               reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
+               reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size,
+                               BOOTMEM_DEFAULT);
 }
 
 /*
@@ -670,7 +897,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         * Allocate the vector page early.
         */
        vectors = alloc_bootmem_low_pages(PAGE_SIZE);
-       BUG_ON(!vectors);
 
        for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
                pmd_clear(pmd_off_k(addr));
@@ -681,8 +907,8 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
         */
 #ifdef CONFIG_XIP_KERNEL
        map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
-       map.virtual = MODULE_START;
-       map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
+       map.virtual = MODULES_VADDR;
+       map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
        map.type = MT_ROM;
        create_mapping(&map);
 #endif
@@ -738,26 +964,39 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
        flush_cache_all();
 }
 
+static void __init kmap_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+       pmd_t *pmd = pmd_off_k(PKMAP_BASE);
+       pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
+       BUG_ON(!pmd_none(*pmd) || !pte);
+       __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
+       pkmap_page_table = pte + PTRS_PER_PTE;
+#endif
+}
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
  */
-void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
+void __init paging_init(struct machine_desc *mdesc)
 {
        void *zero_page;
 
        build_mem_type_table();
-       prepare_page_table(mi);
-       bootmem_init(mi);
+       sanity_check_meminfo();
+       prepare_page_table();
+       bootmem_init();
        devicemaps_init(mdesc);
+       kmap_init();
 
        top_pmd = pmd_off_k(0xffff0000);
 
        /*
-        * allocate the zero page.  Note that we count on this going ok.
+        * allocate the zero page.  Note that this always succeeds and
+        * returns a zeroed result.
         */
        zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
-       memzero(zero_page, PAGE_SIZE);
        empty_zero_page = virt_to_page(zero_page);
        flush_dcache_page(empty_zero_page);
 }