#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <asm/sections.h>
+#include <asm/cachetype.h>
#include <asm/setup.h>
#include <asm/sizes.h>
+#include <asm/smp_plat.h>
#include <asm/tlb.h>
+#include <asm/highmem.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
}
if (i == ARRAY_SIZE(cache_policies))
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
+ /*
+ * This restriction is partly to do with the way we boot; it is
+ * unpredictable to have memory mapped using two different sets of
+ * memory attributes (shared, type, and cache attribs). We can not
+ * change these attributes once the initial assembly has setup the
+ * page tables.
+ */
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
cachepolicy = CPOLICY_WRITEBACK;
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_NONCACHED] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
{
return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
}
+EXPORT_SYMBOL(get_mem_type);
/*
* Adjust the PMD section entries according to the CPU in use.
kern_pgprot |= L_PTE_SHARED;
vecs_pgprot |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
#endif
}
+ /*
+ * Non-cacheable Normal - intended for memory areas that must
+ * not cause dirty cache line writebacks when used
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv6) {
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+ /* Non-cacheable Normal is XCB = 001 */
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+ PMD_SECT_BUFFERED;
+ } else {
+ /* For both ARMv6 and non-TEX-remapping ARMv7 */
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |=
+ PMD_SECT_TEX(1);
+ }
+ } else {
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+ }
+
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot);
static void __init sanity_check_meminfo(void)
{
- int i, j;
+ int i, j, highmem = 0;
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
#ifdef CONFIG_HIGHMEM
+ if (__va(bank->start) > VMALLOC_MIN ||
+ __va(bank->start) < (void *)PAGE_OFFSET)
+ highmem = 1;
+
+ bank->highmem = highmem;
+
/*
* Split those memory banks which are partially overlapping
* the vmalloc area greatly simplifying things later.
i++;
bank[1].size -= VMALLOC_MIN - __va(bank->start);
bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
+ bank[1].highmem = highmem = 1;
j++;
}
bank->size = VMALLOC_MIN - __va(bank->start);
}
#else
+ bank->highmem = highmem;
+
/*
* Check whether this memory bank would entirely overlap
* the vmalloc area.
*/
- if (__va(bank->start) >= VMALLOC_MIN) {
+ if (__va(bank->start) >= VMALLOC_MIN ||
+ __va(bank->start) < (void *)PAGE_OFFSET) {
printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
"(vmalloc region overlap).\n",
bank->start, bank->start + bank->size - 1);
#endif
j++;
}
+#ifdef CONFIG_HIGHMEM
+ if (highmem) {
+ const char *reason = NULL;
+
+ if (cache_is_vipt_aliasing()) {
+ /*
+ * Interactions between kmap and other mappings
+ * make highmem support with aliasing VIPT caches
+ * rather difficult.
+ */
+ reason = "with VIPT aliasing cache";
+#ifdef CONFIG_SMP
+ } else if (tlb_ops_need_broadcast()) {
+ /*
+ * kmap_high needs to occasionally flush TLB entries,
+ * however, if the TLB entries need to be broadcast
+ * we may deadlock:
+ * kmap_high(irqs off)->flush_all_zero_pkmaps->
+ * flush_tlb_kernel_range->smp_call_function_many
+ * (must not be called with irqs off)
+ */
+ reason = "without hardware TLB ops broadcasting";
+#endif
+ }
+ if (reason) {
+ printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
+ reason);
+ while (j > 0 && meminfo.bank[j - 1].highmem)
+ j--;
+ }
+ }
+#endif
meminfo.nr_banks = j;
}
BOOTMEM_DEFAULT);
}
+ if (machine_is_palmld() || machine_is_palmtx()) {
+ reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ }
+
+ if (machine_is_treo680()) {
+ reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ reserve_bootmem_node(pgdat, 0xa2000000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ }
+
+ if (machine_is_palmt5())
+ reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+
+ /*
+ * U300 - This platform family can share physical memory
+ * between two ARM cpus, one running Linux and the other
+ * running another OS.
+ */
+ if (machine_is_u300()) {
+#ifdef CONFIG_MACH_U300_SINGLE_RAM
+#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \
+ CONFIG_MACH_U300_2MB_ALIGNMENT_FIX
+ res_size = 0x00100000;
+#endif
+#endif
+ }
+
#ifdef CONFIG_SA1111
/*
* Because of the SA1111 DMA bug, we want to preserve our
flush_cache_all();
}
+static void __init kmap_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ pmd_t *pmd = pmd_off_k(PKMAP_BASE);
+ pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
+ BUG_ON(!pmd_none(*pmd) || !pte);
+ __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
+ pkmap_page_table = pte + PTRS_PER_PTE;
+#endif
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
prepare_page_table();
bootmem_init();
devicemaps_init(mdesc);
+ kmap_init();
top_pmd = pmd_off_k(0xffff0000);