sh: enable LMB region setup via machvec.
[safe/jmp/linux-2.6] / arch / sh / mm / init.c
index 1589466..9c5400b 100644 (file)
@@ -2,7 +2,7 @@
  * linux/arch/sh/mm/init.c
  *
  *  Copyright (C) 1999  Niibe Yutaka
- *  Copyright (C) 2002 - 2007  Paul Mundt
+ *  Copyright (C) 2002 - 2010  Paul Mundt
  *
  *  Based on linux/arch/i386/mm/init.c:
  *   Copyright (C) 1995  Linus Torvalds
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/init.h>
+#include <linux/gfp.h>
 #include <linux/bootmem.h>
 #include <linux/proc_fs.h>
 #include <linux/pagemap.h>
 #include <linux/percpu.h>
 #include <linux/io.h>
+#include <linux/lmb.h>
+#include <linux/dma-mapping.h>
 #include <asm/mmu_context.h>
 #include <asm/tlb.h>
 #include <asm/cacheflush.h>
+#include <asm/sections.h>
 #include <asm/cache.h>
+#include <asm/sizes.h>
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
-void (*copy_page)(void *from, void *to);
-void (*clear_page)(void *to);
-
-void show_mem(void)
+void __init generic_mem_init(void)
 {
-       int total = 0, reserved = 0, free = 0;
-       int shared = 0, cached = 0, slab = 0;
-       pg_data_t *pgdat;
-
-       printk("Mem-info:\n");
-       show_free_areas();
-
-       for_each_online_pgdat(pgdat) {
-               unsigned long flags, i;
-
-               pgdat_resize_lock(pgdat, &flags);
-               for (i = 0; i < pgdat->node_spanned_pages; i++) {
-                       struct page *page = pgdat_page_nr(pgdat, i);
-                       total++;
-                       if (PageReserved(page))
-                               reserved++;
-                       else if (PageSwapCache(page))
-                               cached++;
-                       else if (PageSlab(page))
-                               slab++;
-                       else if (!page_count(page))
-                               free++;
-                       else
-                               shared += page_count(page) - 1;
-               }
-               pgdat_resize_unlock(pgdat, &flags);
-       }
-
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-       printk("%d pages of RAM\n", total);
-       printk("%d free pages\n", free);
-       printk("%d reserved pages\n", reserved);
-       printk("%d slab pages\n", slab);
-       printk("%d pages shared\n", shared);
-       printk("%d pages swap cached\n", cached);
-       printk(KERN_INFO "Total of %ld pages in page table cache\n",
-              quicklist_total_size());
+       lmb_add(__MEMORY_START, __MEMORY_SIZE);
 }
 
 #ifdef CONFIG_MMU
-static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
+static pte_t *__get_pte_phys(unsigned long addr)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -78,47 +44,55 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
        pgd = pgd_offset_k(addr);
        if (pgd_none(*pgd)) {
                pgd_ERROR(*pgd);
-               return;
+               return NULL;
        }
 
        pud = pud_alloc(NULL, pgd, addr);
        if (unlikely(!pud)) {
                pud_ERROR(*pud);
-               return;
+               return NULL;
        }
 
        pmd = pmd_alloc(NULL, pud, addr);
        if (unlikely(!pmd)) {
                pmd_ERROR(*pmd);
-               return;
+               return NULL;
        }
 
        pte = pte_offset_kernel(pmd, addr);
+       return pte;
+}
+
+static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
+{
+       pte_t *pte;
+
+       pte = __get_pte_phys(addr);
        if (!pte_none(*pte)) {
                pte_ERROR(*pte);
                return;
        }
 
        set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
+       local_flush_tlb_one(get_asid(), addr);
 
-       flush_tlb_one(get_asid(), addr);
+       if (pgprot_val(prot) & _PAGE_WIRED)
+               tlb_wire_entry(NULL, addr, *pte);
+}
+
+static void clear_pte_phys(unsigned long addr, pgprot_t prot)
+{
+       pte_t *pte;
+
+       pte = __get_pte_phys(addr);
+
+       if (pgprot_val(prot) & _PAGE_WIRED)
+               tlb_unwire_entry();
+
+       set_pte(pte, pfn_pte(0, __pgprot(0)));
+       local_flush_tlb_one(get_asid(), addr);
 }
 
-/*
- * As a performance optimization, other platforms preserve the fixmap mapping
- * across a context switch, we don't presently do this, but this could be done
- * in a similar fashion as to the wired TLB interface that sh64 uses (by way
- * of the memory mapped UTLB configuration) -- this unfortunately forces us to
- * give up a TLB entry for each mapping we want to preserve. While this may be
- * viable for a small number of fixmaps, it's not particularly useful for
- * everything and needs to be carefully evaluated. (ie, we may want this for
- * the vsyscall page).
- *
- * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
- * in at __set_fixmap() time to determine the appropriate behavior to follow.
- *
- *                                      -- PFM.
- */
 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 {
        unsigned long address = __fix_to_virt(idx);
@@ -130,18 +104,67 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
 
        set_pte_phys(address, phys, prot);
 }
-#endif /* CONFIG_MMU */
 
-/* References to section boundaries */
+void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
+{
+       unsigned long address = __fix_to_virt(idx);
+
+       if (idx >= __end_of_fixed_addresses) {
+               BUG();
+               return;
+       }
 
-extern char _text, _etext, _edata, __bss_start, _end;
-extern char __init_begin, __init_end;
+       clear_pte_phys(address, prot);
+}
+
+void __init page_table_range_init(unsigned long start, unsigned long end,
+                                        pgd_t *pgd_base)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       int i, j, k;
+       unsigned long vaddr;
+
+       vaddr = start;
+       i = __pgd_offset(vaddr);
+       j = __pud_offset(vaddr);
+       k = __pmd_offset(vaddr);
+       pgd = pgd_base + i;
+
+       for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+               pud = (pud_t *)pgd;
+               for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+#ifdef __PAGETABLE_PMD_FOLDED
+                       pmd = (pmd_t *)pud;
+#else
+                       pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+                       pud_populate(&init_mm, pud, pmd);
+                       pmd += k;
+#endif
+                       for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+                               if (pmd_none(*pmd)) {
+                                       pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+                                       pmd_populate_kernel(&init_mm, pmd, pte);
+                                       BUG_ON(pte != pte_offset_kernel(pmd, 0));
+                               }
+                               vaddr += PMD_SIZE;
+                       }
+                       k = 0;
+               }
+               j = 0;
+       }
+}
+#endif /* CONFIG_MMU */
 
 /*
  * paging_init() sets up the page tables
  */
 void __init paging_init(void)
 {
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+       unsigned long vaddr, end;
        int nid;
 
        /* We don't need to map the kernel through the TLB, as
@@ -153,32 +176,56 @@ void __init paging_init(void)
         * check for a null value. */
        set_TTB(swapper_pg_dir);
 
+       /*
+        * Populate the relevant portions of swapper_pg_dir so that
+        * we can use the fixmap entries without calling kmalloc.
+        * pte's will be filled in by __set_fixmap().
+        */
+       vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+       end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
+       page_table_range_init(vaddr, end, swapper_pg_dir);
+
+       kmap_coherent_init();
+
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
-               unsigned long max_zone_pfns[MAX_NR_ZONES];
                unsigned long low, start_pfn;
 
-               memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-
-               start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
+               start_pfn = pgdat->bdata->node_min_pfn;
                low = pgdat->bdata->node_low_pfn;
 
-               max_zone_pfns[ZONE_NORMAL] = low;
+               if (max_zone_pfns[ZONE_NORMAL] < low)
+                       max_zone_pfns[ZONE_NORMAL] = low;
 
                printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
                       nid, start_pfn, low);
-
-               free_area_init_nodes(max_zone_pfns);
        }
+
+       free_area_init_nodes(max_zone_pfns);
+}
+
+/*
+ * Early initialization for any I/O MMUs we might have.
+ */
+static void __init iommu_init(void)
+{
+       no_iommu_init();
 }
 
-static struct kcore_list kcore_mem, kcore_vmalloc;
+unsigned int mem_init_done = 0;
 
 void __init mem_init(void)
 {
        int codesize, datasize, initsize;
        int nid;
 
+       iommu_init();
+
+       num_physpages = 0;
+       high_memory = NULL;
+
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
                unsigned long node_pages = 0;
@@ -191,49 +238,76 @@ void __init mem_init(void)
 
                totalram_pages += node_pages;
 
-               node_high_memory = (void *)((pgdat->node_start_pfn +
-                                            pgdat->node_spanned_pages) <<
-                                               PAGE_SHIFT);
+               node_high_memory = (void *)__va((pgdat->node_start_pfn +
+                                                pgdat->node_spanned_pages) <<
+                                                PAGE_SHIFT);
                if (node_high_memory > high_memory)
                        high_memory = node_high_memory;
        }
 
+       /* Set this up early, so we can take care of the zero page */
+       cpu_cache_init();
+
        /* clear the zero-page */
        memset(empty_zero_page, 0, PAGE_SIZE);
        __flush_wback_region(empty_zero_page, PAGE_SIZE);
 
-       /*
-        * Setup wrappers for copy/clear_page(), these will get overridden
-        * later in the boot process if a better method is available.
-        */
-#ifdef CONFIG_MMU
-       copy_page = copy_page_slow;
-       clear_page = clear_page_slow;
-#else
-       copy_page = copy_page_nommu;
-       clear_page = clear_page_nommu;
-#endif
+       vsyscall_init();
 
        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
        initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
 
-       kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
-       kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
-                  VMALLOC_END - VMALLOC_START);
-
        printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
               "%dk data, %dk init)\n",
-               (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
-               totalram_pages << (PAGE_SHIFT-10),
+               nr_free_pages() << (PAGE_SHIFT-10),
+               num_physpages << (PAGE_SHIFT-10),
                codesize >> 10,
                datasize >> 10,
                initsize >> 10);
 
-       p3_cache_init();
+       printk(KERN_INFO "virtual kernel memory layout:\n"
+               "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+#ifdef CONFIG_HIGHMEM
+               "    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+#endif
+               "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+               "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
+#ifdef CONFIG_UNCACHED_MAPPING
+               "            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
+#endif
+               "      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+               "      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+               "      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
+               FIXADDR_START, FIXADDR_TOP,
+               (FIXADDR_TOP - FIXADDR_START) >> 10,
+
+#ifdef CONFIG_HIGHMEM
+               PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
+               (LAST_PKMAP*PAGE_SIZE) >> 10,
+#endif
 
-       /* Initialize the vDSO */
-       vsyscall_init();
+               (unsigned long)VMALLOC_START, VMALLOC_END,
+               (VMALLOC_END - VMALLOC_START) >> 20,
+
+               (unsigned long)memory_start, (unsigned long)high_memory,
+               ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
+
+#ifdef CONFIG_UNCACHED_MAPPING
+               uncached_start, uncached_end, uncached_size >> 20,
+#endif
+
+               (unsigned long)&__init_begin, (unsigned long)&__init_end,
+               ((unsigned long)&__init_end -
+                (unsigned long)&__init_begin) >> 10,
+
+               (unsigned long)&_etext, (unsigned long)&_edata,
+               ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+
+               (unsigned long)&_text, (unsigned long)&_etext,
+               ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+       mem_init_done = 1;
 }
 
 void free_initmem(void)
@@ -247,7 +321,9 @@ void free_initmem(void)
                free_page(addr);
                totalram_pages++;
        }
-       printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
+       printk("Freeing unused kernel memory: %ldk freed\n",
+              ((unsigned long)&__init_end -
+               (unsigned long)&__init_begin) >> 10);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -260,6 +336,37 @@ void free_initrd_mem(unsigned long start, unsigned long end)
                free_page(p);
                totalram_pages++;
        }
-       printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+       printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
 }
 #endif
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+       pg_data_t *pgdat;
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long nr_pages = size >> PAGE_SHIFT;
+       int ret;
+
+       pgdat = NODE_DATA(nid);
+
+       /* We only have ZONE_NORMAL, so this is easy.. */
+       ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
+                               start_pfn, nr_pages);
+       if (unlikely(ret))
+               printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(arch_add_memory);
+
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 addr)
+{
+       /* Node 0 for now.. */
+       return 0;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
+#endif /* CONFIG_MEMORY_HOTPLUG */