Merge branch 'for-linus' of git://gitorious.org/linux-omap-dss2/linux
[safe/jmp/linux-2.6] / arch / ia64 / mm / init.c
index 65cf839..7c0d481 100644 (file)
@@ -4,7 +4,6 @@
  * Copyright (C) 1998-2003 Hewlett-Packard Co
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 
@@ -20,8 +19,8 @@
 #include <linux/swap.h>
 #include <linux/proc_fs.h>
 #include <linux/bitops.h>
+#include <linux/kexec.h>
 
-#include <asm/a.out.h>
 #include <asm/dma.h>
 #include <asm/ia32.h>
 #include <asm/io.h>
@@ -36,6 +35,7 @@
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/mca.h>
+#include <asm/paravirt.h>
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
@@ -44,64 +44,58 @@ extern void ia64_tlb_init (void);
 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-unsigned long vmalloc_end = VMALLOC_END_INIT;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long VMALLOC_END = VMALLOC_END_INIT;
+EXPORT_SYMBOL(VMALLOC_END);
 struct page *vmem_map;
 EXPORT_SYMBOL(vmem_map);
 #endif
 
-static int pgt_cache_water[2] = { 25, 50 };
-
-struct page *zero_page_memmap_ptr;             /* map entry for zero page */
+struct page *zero_page_memmap_ptr;     /* map entry for zero page */
 EXPORT_SYMBOL(zero_page_memmap_ptr);
 
 void
-check_pgt_cache (void)
-{
-       int low, high;
-
-       low = pgt_cache_water[0];
-       high = pgt_cache_water[1];
-
-       preempt_disable();
-       if (pgtable_cache_size > (u64) high) {
-               do {
-                       if (pgd_quicklist)
-                               free_page((unsigned long)pgd_alloc_one_fast(NULL));
-                       if (pmd_quicklist)
-                               free_page((unsigned long)pmd_alloc_one_fast(NULL, 0));
-               } while (pgtable_cache_size > (u64) low);
-       }
-       preempt_enable();
-}
-
-void
-lazy_mmu_prot_update (pte_t pte)
+__ia64_sync_icache_dcache (pte_t pte)
 {
        unsigned long addr;
        struct page *page;
 
-       if (!pte_exec(pte))
-               return;                         /* not an executable page... */
-
        page = pte_page(pte);
        addr = (unsigned long) page_address(page);
 
        if (test_bit(PG_arch_1, &page->flags))
                return;                         /* i-cache is already coherent with d-cache */
 
-       flush_icache_range(addr, addr + PAGE_SIZE);
+       flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
        set_bit(PG_arch_1, &page->flags);       /* mark page as clean */
 }
 
+/*
+ * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
+ * flush them when they get mapped into an executable vm-area.
+ */
+void
+dma_mark_clean(void *addr, size_t size)
+{
+       unsigned long pg_addr, end;
+
+       pg_addr = PAGE_ALIGN((unsigned long) addr);
+       end = (unsigned long) addr + size;
+       while (pg_addr + PAGE_SIZE <= end) {
+               struct page *page = virt_to_page(pg_addr);
+               set_bit(PG_arch_1, &page->flags);
+               pg_addr += PAGE_SIZE;
+       }
+}
+
 inline void
 ia64_set_rbs_bot (void)
 {
-       unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
+       unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
 
        if (stack_size > MAX_USER_STACK_SIZE)
                stack_size = MAX_USER_STACK_SIZE;
-       current->thread.rbs_bot = STACK_TOP - stack_size;
+       current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
 }
 
 /*
@@ -122,14 +116,13 @@ ia64_init_addr_space (void)
         * the problem.  When the process attempts to write to the register backing store
         * for the first time, it will get a SEGFAULT in this case.
         */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (vma) {
-               memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
-               vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
-               vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP;
+               vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
                down_write(&current->mm->mmap_sem);
                if (insert_vm_struct(current->mm, vma)) {
                        up_write(&current->mm->mmap_sem);
@@ -141,9 +134,8 @@ ia64_init_addr_space (void)
 
        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
        if (!(current->personality & MMAP_PAGE_ZERO)) {
-               vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+               vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
                if (vma) {
-                       memset(vma, 0, sizeof(*vma));
                        vma->vm_mm = current->mm;
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
@@ -168,7 +160,7 @@ free_initmem (void)
        eaddr = (unsigned long) ia64_imva(__init_end);
        while (addr < eaddr) {
                ClearPageReserved(virt_to_page(addr));
-               set_page_count(virt_to_page(addr), 1);
+               init_page_count(virt_to_page(addr));
                free_page(addr);
                ++totalram_pages;
                addr += PAGE_SIZE;
@@ -177,7 +169,7 @@ free_initmem (void)
               (__init_end - __init_begin) >> 10);
 }
 
-void
+void __init
 free_initrd_mem (unsigned long start, unsigned long end)
 {
        struct page *page;
@@ -223,7 +215,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
                        continue;
                page = virt_to_page(start);
                ClearPageReserved(page);
-               set_page_count(page, 1);
+               init_page_count(page);
                free_page(start);
                ++totalram_pages;
        }
@@ -232,7 +224,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
 /*
  * This installs a clean page in the kernel's page table.
  */
-struct page *
+static struct page * __init
 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
 {
        pgd_t *pgd;
@@ -246,46 +238,58 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
 
        pgd = pgd_offset_k(address);            /* note: this is NOT pgd_offset()! */
 
-       spin_lock(&init_mm.page_table_lock);
        {
                pud = pud_alloc(&init_mm, pgd, address);
                if (!pud)
                        goto out;
-
                pmd = pmd_alloc(&init_mm, pud, address);
                if (!pmd)
                        goto out;
-               pte = pte_alloc_map(&init_mm, pmd, address);
+               pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        goto out;
-               if (!pte_none(*pte)) {
-                       pte_unmap(pte);
+               if (!pte_none(*pte))
                        goto out;
-               }
                set_pte(pte, mk_pte(page, pgprot));
-               pte_unmap(pte);
        }
-  out: spin_unlock(&init_mm.page_table_lock);
+  out:
        /* no need for flush_tlb */
        return page;
 }
 
-static void
+static void __init
 setup_gate (void)
 {
+       void *gate_section;
        struct page *page;
 
        /*
-        * Map the gate page twice: once read-only to export the ELF headers etc. and once
-        * execute-only page to enable privilege-promotion via "epc":
+        * Map the gate page twice: once read-only to export the ELF
+        * headers etc. and once execute-only page to enable
+        * privilege-promotion via "epc":
         */
-       page = virt_to_page(ia64_imva(__start_gate_section));
+       gate_section = paravirt_get_gate_section();
+       page = virt_to_page(ia64_imva(gate_section));
        put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
 #ifdef HAVE_BUGGY_SEGREL
-       page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
+       page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
        put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
 #else
        put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
+       /* Fill in the holes (if any) with read-only zero pages: */
+       {
+               unsigned long addr;
+
+               for (addr = GATE_ADDR + PAGE_SIZE;
+                    addr < GATE_ADDR + PERCPU_PAGE_SIZE;
+                    addr += PAGE_SIZE)
+               {
+                       put_kernel_page(ZERO_PAGE(0), addr,
+                                       PAGE_READONLY);
+                       put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
+                                       PAGE_READONLY);
+               }
+       }
 #endif
        ia64_patch_gate();
 }
@@ -293,7 +297,7 @@ setup_gate (void)
 void __devinit
 ia64_mmu_init (void *my_cpu_data)
 {
-       unsigned long psr, pta, impl_va_bits;
+       unsigned long pta, impl_va_bits;
        extern void __devinit tlb_init (void);
 
 #ifdef CONFIG_DISABLE_VHPT
@@ -302,15 +306,6 @@ ia64_mmu_init (void *my_cpu_data)
 #      define VHPT_ENABLE_BIT  1
 #endif
 
-       /* Pin mapping for percpu area into TLB */
-       psr = ia64_clear_ic();
-       ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
-                pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
-                PERCPU_PAGE_SHIFT);
-
-       ia64_set_psr(psr);
-       ia64_srlz_i();
-
        /*
         * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
         * address space.  The IA-64 architecture guarantees that at least 50 bits of
@@ -338,13 +333,22 @@ ia64_mmu_init (void *my_cpu_data)
 
        if (impl_va_bits < 51 || impl_va_bits > 61)
                panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
+       /*
+        * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
+        * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
+        * the test makes sure that our mapped space doesn't overlap the
+        * unimplemented hole in the middle of the region.
+        */
+       if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
+           (mapped_space_bits > impl_va_bits - 1))
+               panic("Cannot build a big enough virtual-linear page table"
+                     " to cover mapped address space.\n"
+                     " Try using a smaller page size.\n");
+
 
        /* place the VMLPT at the end of each page-table mapped region: */
        pta = POW2(61) - POW2(vmlpt_bits);
 
-       if (POW2(mapped_space_bits) >= pta)
-               panic("mm/init: overlap between virtually mapped linear page table and "
-                     "mapped kernel space!");
        /*
         * Set the (virtually mapped linear) page table address.  Bit
         * 8 selects between the short and long format, bits 2-7 the
@@ -362,9 +366,63 @@ ia64_mmu_init (void *my_cpu_data)
 }
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
+int vmemmap_find_next_valid_pfn(int node, int i)
+{
+       unsigned long end_address, hole_next_pfn;
+       unsigned long stop_address;
+       pg_data_t *pgdat = NODE_DATA(node);
 
-int
-create_mem_map_page_table (u64 start, u64 end, void *arg)
+       end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
+       end_address = PAGE_ALIGN(end_address);
+
+       stop_address = (unsigned long) &vmem_map[
+               pgdat->node_start_pfn + pgdat->node_spanned_pages];
+
+       do {
+               pgd_t *pgd;
+               pud_t *pud;
+               pmd_t *pmd;
+               pte_t *pte;
+
+               pgd = pgd_offset_k(end_address);
+               if (pgd_none(*pgd)) {
+                       end_address += PGDIR_SIZE;
+                       continue;
+               }
+
+               pud = pud_offset(pgd, end_address);
+               if (pud_none(*pud)) {
+                       end_address += PUD_SIZE;
+                       continue;
+               }
+
+               pmd = pmd_offset(pud, end_address);
+               if (pmd_none(*pmd)) {
+                       end_address += PMD_SIZE;
+                       continue;
+               }
+
+               pte = pte_offset_kernel(pmd, end_address);
+retry_pte:
+               if (pte_none(*pte)) {
+                       end_address += PAGE_SIZE;
+                       pte++;
+                       if ((end_address < stop_address) &&
+                           (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
+                               goto retry_pte;
+                       continue;
+               }
+               /* Found next valid vmem_map page */
+               break;
+       } while (end_address < stop_address);
+
+       end_address = min(end_address, stop_address);
+       end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
+       hole_next_pfn = end_address / sizeof(struct page);
+       return hole_next_pfn - pgdat->node_start_pfn;
+}
+
+int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
 {
        unsigned long address, start_page, end_page;
        struct page *map_start, *map_end;
@@ -409,8 +467,8 @@ struct memmap_init_callback_data {
        unsigned long zone;
 };
 
-static int
-virtual_memmap_init (u64 start, u64 end, void *arg)
+static int __meminit
+virtual_memmap_init(u64 start, u64 end, void *arg)
 {
        struct memmap_init_callback_data *args;
        struct page *map_start, *map_end;
@@ -435,16 +493,17 @@ virtual_memmap_init (u64 start, u64 end, void *arg)
 
        if (map_start < map_end)
                memmap_init_zone((unsigned long)(map_end - map_start),
-                                args->nid, args->zone, page_to_pfn(map_start));
+                                args->nid, args->zone, page_to_pfn(map_start),
+                                MEMMAP_EARLY);
        return 0;
 }
 
-void
+void __meminit
 memmap_init (unsigned long size, int nid, unsigned long zone,
             unsigned long start_pfn)
 {
        if (!vmem_map)
-               memmap_init_zone(size, nid, zone, start_pfn);
+               memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
        else {
                struct page *start;
                struct memmap_init_callback_data args;
@@ -471,8 +530,7 @@ ia64_pfn_valid (unsigned long pfn)
 }
 EXPORT_SYMBOL(ia64_pfn_valid);
 
-int
-find_largest_hole (u64 start, u64 end, void *arg)
+int __init find_largest_hole(u64 start, u64 end, void *arg)
 {
        u64 *max_gap = arg;
 
@@ -485,10 +543,28 @@ find_largest_hole (u64 start, u64 end, void *arg)
        last_end = end;
        return 0;
 }
+
 #endif /* CONFIG_VIRTUAL_MEM_MAP */
 
-static int
-count_reserved_pages (u64 start, u64 end, void *arg)
+int __init register_active_ranges(u64 start, u64 len, int nid)
+{
+       u64 end = start + len;
+
+#ifdef CONFIG_KEXEC
+       if (start > crashk_res.start && start < crashk_res.end)
+               start = crashk_res.end;
+       if (end > crashk_res.start && end < crashk_res.end)
+               end = crashk_res.start;
+#endif
+
+       if (start < end)
+               add_active_range(nid, __pa(start) >> PAGE_SHIFT,
+                       __pa(end) >> PAGE_SHIFT);
+       return 0;
+}
+
+static int __init
+count_reserved_pages(u64 start, u64 end, void *arg)
 {
        unsigned long num_reserved = 0;
        unsigned long *count = arg;
@@ -500,6 +576,22 @@ count_reserved_pages (u64 start, u64 end, void *arg)
        return 0;
 }
 
+int
+find_max_min_low_pfn (u64 start, u64 end, void *arg)
+{
+       unsigned long pfn_start, pfn_end;
+#ifdef CONFIG_FLATMEM
+       pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
+       pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
+#else
+       pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
+       pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
+#endif
+       min_low_pfn = min(min_low_pfn, pfn_start);
+       max_low_pfn = max(max_low_pfn, pfn_end);
+       return 0;
+}
+
 /*
  * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
  * system call handler.  When this option is in effect, all fsyscalls will end up bubbling
@@ -508,7 +600,7 @@ count_reserved_pages (u64 start, u64 end, void *arg)
  * purposes.
  */
 
-static int nolwsys;
+static int nolwsys __initdata;
 
 static int __init
 nolwsys_setup (char *s)
@@ -519,14 +611,16 @@ nolwsys_setup (char *s)
 
 __setup("nolwsys", nolwsys_setup);
 
-void
+void __init
 mem_init (void)
 {
        long reserved_pages, codesize, datasize, initsize;
-       unsigned long num_pgt_pages;
        pg_data_t *pgdat;
        int i;
-       static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
+
+       BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
+       BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
+       BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
 
 #ifdef CONFIG_PCI
        /*
@@ -537,20 +631,16 @@ mem_init (void)
        platform_dma_init();
 #endif
 
-#ifndef CONFIG_DISCONTIGMEM
-       if (!mem_map)
-               BUG();
+#ifdef CONFIG_FLATMEM
+       BUG_ON(!mem_map);
        max_mapnr = max_low_pfn;
 #endif
 
        high_memory = __va(max_low_pfn * PAGE_SIZE);
 
-       kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
-       kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
-       kclist_add(&kcore_kernel, _stext, _end - _stext);
-
-       for_each_pgdat(pgdat)
-               totalram_pages += free_all_bootmem_node(pgdat);
+       for_each_online_pgdat(pgdat)
+               if (pgdat->bdata->node_bootmem_map)
+                       totalram_pages += free_all_bootmem_node(pgdat);
 
        reserved_pages = 0;
        efi_memmap_walk(count_reserved_pages, &reserved_pages);
@@ -560,22 +650,10 @@ mem_init (void)
        initsize =  (unsigned long) __init_end - (unsigned long) __init_begin;
 
        printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
-              "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
+              "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
               num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
               reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
 
-       /*
-        * Allow for enough (cached) page table pages so that we can map the entire memory
-        * at least once.  Each task also needs a couple of page tables pages, so add in a
-        * fudge factor for that (don't use "threads-max" here; that would be wrong!).
-        * Don't allow the cache to be more than 10% of total memory, though.
-        */
-#      define NUM_TASKS        500     /* typical number of tasks */
-       num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
-       if (num_pgt_pages > nr_free_pages() / 10)
-               num_pgt_pages = nr_free_pages() / 10;
-       if (num_pgt_pages > (u64) pgt_cache_water[1])
-               pgt_cache_water[1] = num_pgt_pages;
 
        /*
         * For fsyscall entrpoints with no light-weight handler, use the ordinary
@@ -583,8 +661,8 @@ mem_init (void)
         * code can tell them apart.
         */
        for (i = 0; i < NR_syscalls; ++i) {
-               extern unsigned long fsyscall_table[NR_syscalls];
                extern unsigned long sys_call_table[NR_syscalls];
+               unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
 
                if (!fsyscall_table[i] || nolwsys)
                        fsyscall_table[i] = sys_call_table[i] | 1;
@@ -595,3 +673,50 @@ mem_init (void)
        ia32_mem_init();
 #endif
 }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+       pg_data_t *pgdat;
+       struct zone *zone;
+       unsigned long start_pfn = start >> PAGE_SHIFT;
+       unsigned long nr_pages = size >> PAGE_SHIFT;
+       int ret;
+
+       pgdat = NODE_DATA(nid);
+
+       zone = pgdat->node_zones + ZONE_NORMAL;
+       ret = __add_pages(nid, zone, start_pfn, nr_pages);
+
+       if (ret)
+               printk("%s: Problem encountered in __add_pages() as ret=%d\n",
+                      __func__,  ret);
+
+       return ret;
+}
+#endif
+
+/*
+ * Even when CONFIG_IA32_SUPPORT is not enabled it is
+ * useful to have the Linux/x86 domain registered to
+ * avoid an attempted module load when emulators call
+ * personality(PER_LINUX32). This saves several milliseconds
+ * on each such call.
+ */
+static struct exec_domain ia32_exec_domain;
+
+static int __init
+per_linux32_init(void)
+{
+       ia32_exec_domain.name = "Linux/x86";
+       ia32_exec_domain.handler = NULL;
+       ia32_exec_domain.pers_low = PER_LINUX32;
+       ia32_exec_domain.pers_high = PER_LINUX32;
+       ia32_exec_domain.signal_map = default_exec_domain.signal_map;
+       ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
+       register_exec_domain(&ia32_exec_domain);
+
+       return 0;
+}
+
+__initcall(per_linux32_init);