Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[safe/jmp/linux-2.6] / arch / x86 / kernel / setup_percpu.c
index efa615f..de3b63a 100644 (file)
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <asm/cpu.h>
 #include <asm/stackprotector.h>
 
-#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-# define DBG(x...) printk(KERN_DEBUG x)
-#else
-# define DBG(x...)
-#endif
-
 DEFINE_PER_CPU(int, cpu_number);
 EXPORT_PER_CPU_SYMBOL(cpu_number);
 
@@ -55,6 +51,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
 #define PERCPU_FIRST_CHUNK_RESERVE     0
 #endif
 
+#ifdef CONFIG_X86_32
 /**
  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
  *
@@ -83,6 +80,7 @@ static bool __init pcpu_need_numa(void)
 #endif
        return false;
 }
+#endif
 
 /**
  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
@@ -114,8 +112,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
        } else {
                ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
                                                   size, align, goal);
-               pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
-                        "%016lx\n", cpu, size, node, __pa(ptr));
+               pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
+                        cpu, size, node, __pa(ptr));
        }
        return ptr;
 #else
@@ -124,270 +122,41 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
 }
 
 /*
- * Remap allocator
- *
- * This allocator uses PMD page as unit.  A PMD page is allocated for
- * each cpu and each is remapped into vmalloc area using PMD mapping.
- * As PMD page is quite large, only part of it is used for the first
- * chunk.  Unused part is returned to the bootmem allocator.
- *
- * So, the PMD pages are mapped twice - once to the physical mapping
- * and to the vmalloc area for the first percpu chunk.  The double
- * mapping does add one more PMD TLB entry pressure but still is much
- * better than only using 4k mappings while still being NUMA friendly.
+ * Helpers for first chunk memory allocation
  */
-#ifdef CONFIG_NEED_MULTIPLE_NODES
-static size_t pcpur_size __initdata;
-static void **pcpur_ptrs __initdata;
-
-static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 {
-       size_t off = (size_t)pageno << PAGE_SHIFT;
-
-       if (off >= pcpur_size)
-               return NULL;
-
-       return virt_to_page(pcpur_ptrs[cpu] + off);
+       return pcpu_alloc_bootmem(cpu, size, align);
 }
 
-static ssize_t __init setup_pcpu_remap(size_t static_size)
+static void __init pcpu_fc_free(void *ptr, size_t size)
 {
-       static struct vm_struct vm;
-       pg_data_t *last;
-       size_t ptrs_size, dyn_size;
-       unsigned int cpu;
-       ssize_t ret;
-
-       /*
-        * If large page isn't supported, there's no benefit in doing
-        * this.  Also, on non-NUMA, embedding is better.
-        */
-       if (!cpu_has_pse || pcpu_need_numa())
-               return -EINVAL;
-
-       last = NULL;
-       for_each_possible_cpu(cpu) {
-               int node = early_cpu_to_node(cpu);
-
-               if (node_online(node) && NODE_DATA(node) &&
-                   last && last != NODE_DATA(node))
-                       goto proceed;
-
-               last = NODE_DATA(node);
-       }
-       return -EINVAL;
-
-proceed:
-       /*
-        * Currently supports only single page.  Supporting multiple
-        * pages won't be too difficult if it ever becomes necessary.
-        */
-       pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
-                              PERCPU_DYNAMIC_RESERVE);
-       if (pcpur_size > PMD_SIZE) {
-               pr_warning("PERCPU: static data is larger than large page, "
-                          "can't use large page\n");
-               return -EINVAL;
-       }
-       dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
-
-       /* allocate pointer array and alloc large pages */
-       ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
-       pcpur_ptrs = alloc_bootmem(ptrs_size);
-
-       for_each_possible_cpu(cpu) {
-               pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
-               if (!pcpur_ptrs[cpu])
-                       goto enomem;
-
-               /*
-                * Only use pcpur_size bytes and give back the rest.
-                *
-                * Ingo: The 2MB up-rounding bootmem is needed to make
-                * sure the partial 2MB page is still fully RAM - it's
-                * not well-specified to have a PAT-incompatible area
-                * (unmapped RAM, device memory, etc.) in that hole.
-                */
-               free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
-                            PMD_SIZE - pcpur_size);
-
-               memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
-       }
-
-       /* allocate address and map */
-       vm.flags = VM_ALLOC;
-       vm.size = num_possible_cpus() * PMD_SIZE;
-       vm_area_register_early(&vm, PMD_SIZE);
-
-       for_each_possible_cpu(cpu) {
-               pmd_t *pmd;
-
-               pmd = populate_extra_pmd((unsigned long)vm.addr
-                                        + cpu * PMD_SIZE);
-               set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
-                                    PAGE_KERNEL_LARGE));
-       }
-
-       /* we're ready, commit */
-       pr_info("PERCPU: Remapped at %p with large pages, static data "
-               "%zu bytes\n", vm.addr, static_size);
-
-       ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
-                                    PERCPU_FIRST_CHUNK_RESERVE,
-                                    PMD_SIZE, dyn_size, vm.addr, NULL);
-       goto out_free_ar;
-
-enomem:
-       for_each_possible_cpu(cpu)
-               if (pcpur_ptrs[cpu])
-                       free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
-       ret = -ENOMEM;
-out_free_ar:
-       free_bootmem(__pa(pcpur_ptrs), ptrs_size);
-       return ret;
-}
+#ifdef CONFIG_NO_BOOTMEM
+       u64 start = __pa(ptr);
+       u64 end = start + size;
+       free_early_partial(start, end);
 #else
-static ssize_t __init setup_pcpu_remap(size_t static_size)
-{
-       return -EINVAL;
-}
+       free_bootmem(__pa(ptr), size);
 #endif
-
-/*
- * Embedding allocator
- *
- * The first chunk is sized to just contain the static area plus
- * module and dynamic reserves, and allocated as a contiguous area
- * using bootmem allocator and used as-is without being mapped into
- * vmalloc area.  This enables the first chunk to piggy back on the
- * linear physical PMD mapping and doesn't add any additional pressure
- * to TLB.  Note that if the needed size is smaller than the minimum
- * unit size, the leftover is returned to the bootmem allocator.
- */
-static void *pcpue_ptr __initdata;
-static size_t pcpue_size __initdata;
-static size_t pcpue_unit_size __initdata;
-
-static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
-{
-       size_t off = (size_t)pageno << PAGE_SHIFT;
-
-       if (off >= pcpue_size)
-               return NULL;
-
-       return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
 }
 
-static ssize_t __init setup_pcpu_embed(size_t static_size)
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
-       unsigned int cpu;
-       size_t dyn_size;
-
-       /*
-        * If large page isn't supported, there's no benefit in doing
-        * this.  Also, embedding allocation doesn't play well with
-        * NUMA.
-        */
-       if (!cpu_has_pse || pcpu_need_numa())
-               return -EINVAL;
-
-       /* allocate and copy */
-       pcpue_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
-                              PERCPU_DYNAMIC_RESERVE);
-       pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
-       dyn_size = pcpue_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
-
-       pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
-                                      PAGE_SIZE);
-       if (!pcpue_ptr)
-               return -ENOMEM;
-
-       for_each_possible_cpu(cpu) {
-               void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
-
-               free_bootmem(__pa(ptr + pcpue_size),
-                            pcpue_unit_size - pcpue_size);
-               memcpy(ptr, __per_cpu_load, static_size);
-       }
-
-       /* we're ready, commit */
-       pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
-               pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
-
-       return pcpu_setup_first_chunk(pcpue_get_page, static_size,
-                                     PERCPU_FIRST_CHUNK_RESERVE,
-                                     pcpue_unit_size, dyn_size,
-                                     pcpue_ptr, NULL);
-}
-
-/*
- * 4k page allocator
- *
- * This is the basic allocator.  Static percpu area is allocated
- * page-by-page and most of initialization is done by the generic
- * setup function.
- */
-static struct page **pcpu4k_pages __initdata;
-static int pcpu4k_nr_static_pages __initdata;
-
-static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
-{
-       if (pageno < pcpu4k_nr_static_pages)
-               return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
-       return NULL;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       if (early_cpu_to_node(from) == early_cpu_to_node(to))
+               return LOCAL_DISTANCE;
+       else
+               return REMOTE_DISTANCE;
+#else
+       return LOCAL_DISTANCE;
+#endif
 }
 
-static void __init pcpu4k_populate_pte(unsigned long addr)
+static void __init pcpup_populate_pte(unsigned long addr)
 {
        populate_extra_pte(addr);
 }
 
-static ssize_t __init setup_pcpu_4k(size_t static_size)
-{
-       size_t pages_size;
-       unsigned int cpu;
-       int i, j;
-       ssize_t ret;
-
-       pcpu4k_nr_static_pages = PFN_UP(static_size);
-
-       /* unaligned allocations can't be freed, round up to page size */
-       pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
-                              * sizeof(pcpu4k_pages[0]));
-       pcpu4k_pages = alloc_bootmem(pages_size);
-
-       /* allocate and copy */
-       j = 0;
-       for_each_possible_cpu(cpu)
-               for (i = 0; i < pcpu4k_nr_static_pages; i++) {
-                       void *ptr;
-
-                       ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
-                       if (!ptr)
-                               goto enomem;
-
-                       memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
-                       pcpu4k_pages[j++] = virt_to_page(ptr);
-               }
-
-       /* we're ready, commit */
-       pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
-               pcpu4k_nr_static_pages, static_size);
-
-       ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
-                                    PERCPU_FIRST_CHUNK_RESERVE, -1, -1, NULL,
-                                    pcpu4k_populate_pte);
-       goto out_free_ar;
-
-enomem:
-       while (--j >= 0)
-               free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
-       ret = -ENOMEM;
-out_free_ar:
-       free_bootmem(__pa(pcpu4k_pages), pages_size);
-       return ret;
-}
-
 static inline void setup_percpu_segment(int cpu)
 {
 #ifdef CONFIG_X86_32
@@ -401,42 +170,50 @@ static inline void setup_percpu_segment(int cpu)
 #endif
 }
 
-/*
- * Great future plan:
- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
- * Always point %gs to its beginning
- */
 void __init setup_per_cpu_areas(void)
 {
-       size_t static_size = __per_cpu_end - __per_cpu_start;
        unsigned int cpu;
        unsigned long delta;
-       size_t pcpu_unit_size;
-       ssize_t ret;
+       int rc;
 
        pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
                NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
 
        /*
-        * Allocate percpu area.  If PSE is supported, try to make use
-        * of large page mappings.  Please read comments on top of
-        * each allocator for details.
+        * Allocate percpu area.  Embedding allocator is our favorite;
+        * however, on NUMA configurations, it can result in very
+        * sparse unit mapping and vmalloc area isn't spacious enough
+        * on 32bit.  Use page in that case.
         */
-       ret = setup_pcpu_remap(static_size);
-       if (ret < 0)
-               ret = setup_pcpu_embed(static_size);
-       if (ret < 0)
-               ret = setup_pcpu_4k(static_size);
-       if (ret < 0)
-               panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
-                     static_size, ret);
-
-       pcpu_unit_size = ret;
+#ifdef CONFIG_X86_32
+       if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
+               pcpu_chosen_fc = PCPU_FC_PAGE;
+#endif
+       rc = -EINVAL;
+       if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+               const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
+               const size_t dyn_size = PERCPU_MODULE_RESERVE +
+                       PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
+
+               rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
+                                           dyn_size, atom_size,
+                                           pcpu_cpu_distance,
+                                           pcpu_fc_alloc, pcpu_fc_free);
+               if (rc < 0)
+                       pr_warning("%s allocator failed (%d), falling back to page size\n",
+                                  pcpu_fc_names[pcpu_chosen_fc], rc);
+       }
+       if (rc < 0)
+               rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
+                                          pcpu_fc_alloc, pcpu_fc_free,
+                                          pcpup_populate_pte);
+       if (rc < 0)
+               panic("cannot initialize percpu area (err=%d)", rc);
 
        /* alrighty, percpu areas up and running */
        delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
        for_each_possible_cpu(cpu) {
-               per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+               per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
                per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
                per_cpu(cpu_number, cpu) = cpu;
                setup_percpu_segment(cpu);
@@ -464,7 +241,7 @@ void __init setup_per_cpu_areas(void)
 #endif
 #endif
                /*
-                * Up to this point, the boot CPU has been using .data.init
+                * Up to this point, the boot CPU has been using .init.data
                 * area.  Reload any changed state for the boot CPU.
                 */
                if (cpu == boot_cpu_id)
@@ -480,6 +257,14 @@ void __init setup_per_cpu_areas(void)
        early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
 #endif
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
+       /*
+        * make sure boot cpu numa_node is right, when boot cpu is on the
+        * node that doesn't have mem installed
+        */
+       set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id));
+#endif
+
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();