Merge branch 'core/xen' into x86/xen
[safe/jmp/linux-2.6] / arch / sparc64 / mm / init.c
index 8c2b50e..a41df7b 100644 (file)
@@ -1,4 +1,4 @@
-/*  $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
+/*
  *  arch/sparc64/mm/init.c
  *
  *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
@@ -49,6 +49,7 @@
 #include <asm/sstate.h>
 #include <asm/mdesc.h>
 #include <asm/cpudata.h>
+#include <asm/irq.h>
 
 #define MAX_PHYS_ADDRESS       (1UL << 42UL)
 #define KPTE_BITMAP_CHUNK_SZ   (256UL * 1024UL * 1024UL)
@@ -160,6 +161,7 @@ extern unsigned int sparc_ramdisk_image;
 extern unsigned int sparc_ramdisk_size;
 
 struct page *mem_map_zero __read_mostly;
+EXPORT_SYMBOL(mem_map_zero);
 
 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
 
@@ -391,51 +393,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
        }
 }
 
-void show_mem(void)
-{
-       unsigned long total = 0, reserved = 0;
-       unsigned long shared = 0, cached = 0;
-       pg_data_t *pgdat;
-
-       printk(KERN_INFO "Mem-info:\n");
-       show_free_areas();
-       printk(KERN_INFO "Free swap:       %6ldkB\n",
-              nr_swap_pages << (PAGE_SHIFT-10));
-       for_each_online_pgdat(pgdat) {
-               unsigned long i, flags;
-
-               pgdat_resize_lock(pgdat, &flags);
-               for (i = 0; i < pgdat->node_spanned_pages; i++) {
-                       struct page *page = pgdat_page_nr(pgdat, i);
-                       total++;
-                       if (PageReserved(page))
-                               reserved++;
-                       else if (PageSwapCache(page))
-                               cached++;
-                       else if (page_count(page))
-                               shared += page_count(page) - 1;
-               }
-               pgdat_resize_unlock(pgdat, &flags);
-       }
-
-       printk(KERN_INFO "%lu pages of RAM\n", total);
-       printk(KERN_INFO "%lu reserved pages\n", reserved);
-       printk(KERN_INFO "%lu pages shared\n", shared);
-       printk(KERN_INFO "%lu pages swap cached\n", cached);
-
-       printk(KERN_INFO "%lu pages dirty\n",
-              global_page_state(NR_FILE_DIRTY));
-       printk(KERN_INFO "%lu pages writeback\n",
-              global_page_state(NR_WRITEBACK));
-       printk(KERN_INFO "%lu pages mapped\n",
-              global_page_state(NR_FILE_MAPPED));
-       printk(KERN_INFO "%lu pages slab\n",
-               global_page_state(NR_SLAB_RECLAIMABLE) +
-               global_page_state(NR_SLAB_UNRECLAIMABLE));
-       printk(KERN_INFO "%lu pages pagetables\n",
-              global_page_state(NR_PAGETABLE));
-}
-
 void mmu_info(struct seq_file *m)
 {
        if (tlb_type == cheetah)
@@ -609,8 +566,6 @@ static void __init remap_kernel(void)
 
 static void __init inherit_prom_mappings(void)
 {
-       read_obp_translations();
-
        /* Now fixup OBP's idea about where we really are mapped. */
        printk("Remapping the kernel... ");
        remap_kernel();
@@ -769,7 +724,10 @@ static void __init find_ramdisk(unsigned long phys_base)
                initrd_start = ramdisk_image;
                initrd_end = ramdisk_image + sparc_ramdisk_size;
 
-               lmb_reserve(initrd_start, initrd_end);
+               lmb_reserve(initrd_start, sparc_ramdisk_size);
+
+               initrd_start += PAGE_OFFSET;
+               initrd_end += PAGE_OFFSET;
        }
 #endif
 }
@@ -786,7 +744,6 @@ int numa_cpu_lookup_table[NR_CPUS];
 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
-static bootmem_data_t plat_node_bdata[MAX_NUMNODES];
 
 struct mdesc_mblock {
        u64     base;
@@ -839,6 +796,9 @@ static unsigned long nid_range(unsigned long start, unsigned long end,
                start += PAGE_SIZE;
        }
 
+       if (start > end)
+               start = end;
+
        return start;
 }
 #else
@@ -869,7 +829,7 @@ static void __init allocate_node_data(int nid)
        NODE_DATA(nid) = __va(paddr);
        memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 
-       NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
+       NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
 #endif
 
        p = NODE_DATA(nid);
@@ -1743,7 +1703,17 @@ void __init paging_init(void)
 
        lmb_init();
 
-       /* Find available physical memory... */
+       /* Find available physical memory...
+        *
+        * Read it twice in order to work around a bug in openfirmware.
+        * The call to grab this table itself can cause openfirmware to
+        * allocate memory, which in turn can take away some space from
+        * the list of available memory.  Reading it twice makes sure
+        * we really do get the final value.
+        */
+       read_obp_translations();
+       read_obp_memory("reg", &pall[0], &pall_ents);
+       read_obp_memory("available", &pavail[0], &pavail_ents);
        read_obp_memory("available", &pavail[0], &pavail_ents);
 
        phys_base = 0xffffffffffffffffUL;
@@ -1756,8 +1726,7 @@ void __init paging_init(void)
 
        find_ramdisk(phys_base);
 
-       if (cmdline_memory_size)
-               lmb_enforce_memory_limit(phys_base + cmdline_memory_size);
+       lmb_enforce_memory_limit(cmdline_memory_size);
 
        lmb_analyze();
        lmb_dump_all();
@@ -1784,8 +1753,6 @@ void __init paging_init(void)
        
        inherit_prom_mappings();
        
-       read_obp_memory("reg", &pall[0], &pall_ents);
-
        init_kpte_bitmap();
 
        /* Ok, we can use our TLB miss and window trap handlers safely.  */
@@ -1807,6 +1774,16 @@ void __init paging_init(void)
        if (tlb_type == hypervisor)
                sun4v_mdesc_init();
 
+       /* Once the OF device tree and MDESC have been setup, we know
+        * the list of possible cpus.  Therefore we can allocate the
+        * IRQ stacks.
+        */
+       for_each_possible_cpu(i) {
+               /* XXX Use node local allocations... XXX */
+               softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+               hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+       }
+
        /* Setup bootmem... */
        last_valid_pfn = end_pfn = bootmem_init(phys_base);
 
@@ -1866,7 +1843,7 @@ static int pavail_rescan_ents __initdata;
  * memory list again, and make sure it provides at least as much
  * memory as 'pavail' does.
  */
-static void setup_valid_addr_bitmap_from_pavail(void)
+static void __init setup_valid_addr_bitmap_from_pavail(void)
 {
        int i;
 
@@ -1986,6 +1963,15 @@ void __init mem_init(void)
 void free_initmem(void)
 {
        unsigned long addr, initend;
+       int do_free = 1;
+
+       /* If the physical memory maps were trimmed by kernel command
+        * line options, don't even try freeing this initmem stuff up.
+        * The kernel image could have been in the trimmed out region
+        * and if so the freeing below will free invalid page structs.
+        */
+       if (cmdline_memory_size)
+               do_free = 0;
 
        /*
         * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
@@ -2000,13 +1986,16 @@ void free_initmem(void)
                        ((unsigned long) __va(kern_base)) -
                        ((unsigned long) KERNBASE));
                memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
-               p = virt_to_page(page);
 
-               ClearPageReserved(p);
-               init_page_count(p);
-               __free_page(p);
-               num_physpages++;
-               totalram_pages++;
+               if (do_free) {
+                       p = virt_to_page(page);
+
+                       ClearPageReserved(p);
+                       init_page_count(p);
+                       __free_page(p);
+                       num_physpages++;
+                       totalram_pages++;
+               }
        }
 }
 
@@ -2361,16 +2350,3 @@ void __flush_tlb_all(void)
        __asm__ __volatile__("wrpr      %0, 0, %%pstate"
                             : : "r" (pstate));
 }
-
-#ifdef CONFIG_MEMORY_HOTPLUG
-
-void online_page(struct page *page)
-{
-       ClearPageReserved(page);
-       init_page_count(page);
-       __free_page(page);
-       totalram_pages++;
-       num_physpages++;
-}
-
-#endif /* CONFIG_MEMORY_HOTPLUG */