*
*/
+#undef DEBUG
+
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/poison.h>
#include <linux/lmb.h>
+#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include "mmu_decl.h"
+#ifdef CONFIG_PPC_STD_MMU_64
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
-/* max amount of RAM to use */
-unsigned long __max_memory;
+phys_addr_t memstart_addr = ~0;
+phys_addr_t kernstart_addr;
void free_initmem(void)
{
}
#endif
-#ifdef CONFIG_PROC_KCORE
-static struct kcore_list kcore_vmem;
-
-static int __init setup_kcore(void)
+static void pgd_ctor(void *addr)
{
- int i;
-
- for (i=0; i < lmb.memory.cnt; i++) {
- unsigned long base, size;
- struct kcore_list *kcore_mem;
-
- base = lmb.memory.region[i].base;
- size = lmb.memory.region[i].size;
-
- /* GFP_ATOMIC to avoid might_sleep warnings during boot */
- kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
- if (!kcore_mem)
- panic("%s: kmalloc failed\n", __FUNCTION__);
-
- kclist_add(kcore_mem, __va(base), size);
- }
-
- kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
+ memset(addr, 0, PGD_TABLE_SIZE);
+}
- return 0;
+static void pmd_ctor(void *addr)
+{
+ memset(addr, 0, PMD_TABLE_SIZE);
}
-module_init(setup_kcore);
-#endif
-static void zero_ctor(struct kmem_cache *cache, void *addr)
+struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
+
+/*
+ * Create a kmem_cache() for pagetables. This is not used for PTE
+ * pages - they're linked to struct page, come from the normal free
+ * pages pool and have a different entry size (see real_pte_t) to
+ * everything else. Caches created by this function are used for all
+ * the higher level pagetables, and for hugepage pagetables.
+ */
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
{
- memset(addr, 0, kmem_cache_size(cache));
+ char *name;
+ unsigned long table_size = sizeof(void *) << shift;
+ unsigned long align = table_size;
+
+ /* When batching pgtable pointers for RCU freeing, we store
+ * the index size in the low bits. Table alignment must be
+ * big enough to fit it.
+ *
+ * Likewise, hugeapge pagetable pointers contain a (different)
+ * shift value in the low bits. All tables must be aligned so
+ * as to leave enough 0 bits in the address to contain it. */
+ unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
+ HUGEPD_SHIFT_MASK + 1);
+ struct kmem_cache *new;
+
+ /* It would be nice if this was a BUILD_BUG_ON(), but at the
+ * moment, gcc doesn't seem to recognize is_power_of_2 as a
+ * constant expression, so so much for that. */
+ BUG_ON(!is_power_of_2(minalign));
+ BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
+
+ if (PGT_CACHE(shift))
+ return; /* Already have a cache of this size */
+
+ align = max_t(unsigned long, align, minalign);
+ name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
+ new = kmem_cache_create(name, table_size, align, 0, ctor);
+ PGT_CACHE(shift) = new;
+
+ pr_debug("Allocated pgtable cache for order %d\n", shift);
}
-static const unsigned int pgtable_cache_size[2] = {
- PGD_TABLE_SIZE, PMD_TABLE_SIZE
-};
-static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
-#ifdef CONFIG_PPC_64K_PAGES
- "pgd_cache", "pmd_cache",
-#else
- "pgd_cache", "pud_pmd_cache",
-#endif /* CONFIG_PPC_64K_PAGES */
-};
-
-#ifdef CONFIG_HUGETLB_PAGE
-/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
- * can't put into the tables above, because HPAGE_SHIFT is not compile
- * time constant. */
-struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
-#else
-struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
-#endif
void pgtable_cache_init(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
- int size = pgtable_cache_size[i];
- const char *name = pgtable_cache_name[i];
-
- pr_debug("Allocating page table cache %s (#%d) "
- "for size: %08x...\n", name, i, size);
- pgtable_cache[i] = kmem_cache_create(name,
- size, size,
- SLAB_PANIC,
- zero_ctor);
- }
+ pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
+ pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
+ if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
+ panic("Couldn't allocate pgtable caches");
+
+ /* In all current configs, when the PUD index exists it's the
+ * same size as either the pgd or pmd index. Verify that the
+ * initialization above has also created a PUD cache. This
+ * will need re-examiniation if we add new possibilities for
+ * the pagetable layout. */
+ BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
* do this by hand as the proffered address may not be correctly aligned.
* Subtraction of non-aligned pointers produces undefined results.
*/
-unsigned long __meminit vmemmap_section_start(unsigned long page)
+static unsigned long __meminit vmemmap_section_start(unsigned long page)
{
unsigned long offset = page - ((unsigned long)(vmemmap));
* which overlaps this vmemmap page is initialised then this page is
* initialised already.
*/
-int __meminit vmemmap_populated(unsigned long start, int page_size)
+static int __meminit vmemmap_populated(unsigned long start, int page_size)
{
unsigned long end = start + page_size;
return 0;
}
+/* On hash-based CPUs, the vmemmap is bolted in the hash table.
+ *
+ * On Book3E CPUs, the vmemmap is currently mapped in the top half of
+ * the vmalloc space using normal page tables, though the size of
+ * pages encoded in the PTEs can be different
+ */
+
+#ifdef CONFIG_PPC_BOOK3E
+static void __meminit vmemmap_create_mapping(unsigned long start,
+ unsigned long page_size,
+ unsigned long phys)
+{
+ /* Create a PTE encoding without page size */
+ unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
+ _PAGE_KERNEL_RW;
+
+ /* PTEs only contain page size encodings up to 32M */
+ BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
+
+ /* Encode the size in the PTE */
+ flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
+
+ /* For each PTE for that area, map things. Note that we don't
+ * increment phys because all PTEs are of the large size and
+ * thus must have the low bits clear
+ */
+ for (i = 0; i < page_size; i += PAGE_SIZE)
+ BUG_ON(map_kernel_page(start + i, phys, flags));
+}
+#else /* CONFIG_PPC_BOOK3E */
+static void __meminit vmemmap_create_mapping(unsigned long start,
+ unsigned long page_size,
+ unsigned long phys)
+{
+ int mapped = htab_bolt_mapping(start, start + page_size, phys,
+ PAGE_KERNEL, mmu_vmemmap_psize,
+ mmu_kernel_ssize);
+ BUG_ON(mapped < 0);
+}
+#endif /* CONFIG_PPC_BOOK3E */
+
int __meminit vmemmap_populate(struct page *start_page,
- unsigned long nr_pages, int node)
+ unsigned long nr_pages, int node)
{
- unsigned long mode_rw;
unsigned long start = (unsigned long)start_page;
unsigned long end = (unsigned long)(start_page + nr_pages);
- unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
-
- mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
+ unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
/* Align to the page size of the linear mapping. */
start = _ALIGN_DOWN(start, page_size);
+ pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
+ start_page, nr_pages, node);
+ pr_debug(" -> map %lx..%lx\n", start, end);
+
for (; start < end; start += page_size) {
- int mapped;
void *p;
if (vmemmap_populated(start, page_size))
if (!p)
return -ENOMEM;
- pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
- start, p, __pa(p));
+ pr_debug(" * %016lx..%016lx allocated at %p\n",
+ start, start + page_size, p);
- mapped = htab_bolt_mapping(start, start + page_size,
- __pa(p), mode_rw, mmu_linear_psize,
- mmu_kernel_ssize);
- BUG_ON(mapped < 0);
+ vmemmap_create_mapping(start, page_size, __pa(p));
}
return 0;
}
-#endif
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */