Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[safe/jmp/linux-2.6] / arch / x86 / kernel / setup_percpu.c
index 0d1e7ac..de3b63a 100644 (file)
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -7,6 +9,7 @@
 #include <linux/crash_dump.h>
 #include <linux/smp.h>
 #include <linux/topology.h>
+#include <linux/pfn.h>
 #include <asm/sections.h>
 #include <asm/processor.h>
 #include <asm/setup.h>
 #include <asm/proto.h>
 #include <asm/cpumask.h>
 #include <asm/cpu.h>
-
-#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-# define DBG(x...) printk(KERN_DEBUG x)
-#else
-# define DBG(x...)
-#endif
+#include <asm/stackprotector.h>
 
 DEFINE_PER_CPU(int, cpu_number);
 EXPORT_PER_CPU_SYMBOL(cpu_number);
@@ -40,6 +38,125 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
 };
 EXPORT_SYMBOL(__per_cpu_offset);
 
+/*
+ * On x86_64 symbols referenced from code should be reachable using
+ * 32bit relocations.  Reserve space for static percpu variables in
+ * modules so that they are always served from the first chunk which
+ * is located at the percpu segment base.  On x86_32, anything can
+ * address anywhere.  No need to reserve space in the first chunk.
+ */
+#ifdef CONFIG_X86_64
+#define PERCPU_FIRST_CHUNK_RESERVE     PERCPU_MODULE_RESERVE
+#else
+#define PERCPU_FIRST_CHUNK_RESERVE     0
+#endif
+
+#ifdef CONFIG_X86_32
+/**
+ * pcpu_need_numa - determine percpu allocation needs to consider NUMA
+ *
+ * If NUMA is not configured or there is only one NUMA node available,
+ * there is no reason to consider NUMA.  This function determines
+ * whether percpu allocation should consider NUMA or not.
+ *
+ * RETURNS:
+ * true if NUMA should be considered; otherwise, false.
+ */
+static bool __init pcpu_need_numa(void)
+{
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       pg_data_t *last = NULL;
+       unsigned int cpu;
+
+       for_each_possible_cpu(cpu) {
+               int node = early_cpu_to_node(cpu);
+
+               if (node_online(node) && NODE_DATA(node) &&
+                   last && last != NODE_DATA(node))
+                       return true;
+
+               last = NODE_DATA(node);
+       }
+#endif
+       return false;
+}
+#endif
+
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
+                                       unsigned long align)
+{
+       const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       int node = early_cpu_to_node(cpu);
+       void *ptr;
+
+       if (!node_online(node) || !NODE_DATA(node)) {
+               ptr = __alloc_bootmem_nopanic(size, align, goal);
+               pr_info("cpu %d has no node %d or node-local memory\n",
+                       cpu, node);
+               pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+                        cpu, size, __pa(ptr));
+       } else {
+               ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
+                                                  size, align, goal);
+               pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
+                        cpu, size, node, __pa(ptr));
+       }
+       return ptr;
+#else
+       return __alloc_bootmem_nopanic(size, align, goal);
+#endif
+}
+
+/*
+ * Helpers for first chunk memory allocation
+ */
+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
+{
+       return pcpu_alloc_bootmem(cpu, size, align);
+}
+
+static void __init pcpu_fc_free(void *ptr, size_t size)
+{
+#ifdef CONFIG_NO_BOOTMEM
+       u64 start = __pa(ptr);
+       u64 end = start + size;
+       free_early_partial(start, end);
+#else
+       free_bootmem(__pa(ptr), size);
+#endif
+}
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       if (early_cpu_to_node(from) == early_cpu_to_node(to))
+               return LOCAL_DISTANCE;
+       else
+               return REMOTE_DISTANCE;
+#else
+       return LOCAL_DISTANCE;
+#endif
+}
+
+static void __init pcpup_populate_pte(unsigned long addr)
+{
+       populate_extra_pte(addr);
+}
+
 static inline void setup_percpu_segment(int cpu)
 {
 #ifdef CONFIG_X86_32
@@ -53,48 +170,54 @@ static inline void setup_percpu_segment(int cpu)
 #endif
 }
 
-/*
- * Great future plan:
- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
- * Always point %gs to its beginning
- */
 void __init setup_per_cpu_areas(void)
 {
-       ssize_t size;
-       char *ptr;
-       int cpu;
-
-       /* Copy section for each CPU (we discard the original) */
-       size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
+       unsigned int cpu;
+       unsigned long delta;
+       int rc;
 
        pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
                NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
 
-       pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
-
-       for_each_possible_cpu(cpu) {
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-               ptr = alloc_bootmem_pages(size);
-#else
-               int node = early_cpu_to_node(cpu);
-               if (!node_online(node) || !NODE_DATA(node)) {
-                       ptr = alloc_bootmem_pages(size);
-                       pr_info("cpu %d has no node %d or node-local memory\n",
-                               cpu, node);
-                       pr_debug("per cpu data for cpu%d at %016lx\n",
-                                cpu, __pa(ptr));
-               } else {
-                       ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
-                       pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
-                               cpu, node, __pa(ptr));
-               }
+       /*
+        * Allocate percpu area.  Embedding allocator is our favorite;
+        * however, on NUMA configurations, it can result in very
+        * sparse unit mapping and vmalloc area isn't spacious enough
+        * on 32bit.  Use page in that case.
+        */
+#ifdef CONFIG_X86_32
+       if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
+               pcpu_chosen_fc = PCPU_FC_PAGE;
 #endif
+       rc = -EINVAL;
+       if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+               const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
+               const size_t dyn_size = PERCPU_MODULE_RESERVE +
+                       PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
+
+               rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
+                                           dyn_size, atom_size,
+                                           pcpu_cpu_distance,
+                                           pcpu_fc_alloc, pcpu_fc_free);
+               if (rc < 0)
+                       pr_warning("%s allocator failed (%d), falling back to page size\n",
+                                  pcpu_fc_names[pcpu_chosen_fc], rc);
+       }
+       if (rc < 0)
+               rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
+                                          pcpu_fc_alloc, pcpu_fc_free,
+                                          pcpup_populate_pte);
+       if (rc < 0)
+               panic("cannot initialize percpu area (err=%d)", rc);
 
-               memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
-               per_cpu_offset(cpu) = ptr - __per_cpu_start;
+       /* alrighty, percpu areas up and running */
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu) {
+               per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
                per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
                per_cpu(cpu_number, cpu) = cpu;
                setup_percpu_segment(cpu);
+               setup_stack_canary_segment(cpu);
                /*
                 * Copy data used in early init routines from the
                 * initial arrays to the per cpu data areas.  These
@@ -118,13 +241,11 @@ void __init setup_per_cpu_areas(void)
 #endif
 #endif
                /*
-                * Up to this point, the boot CPU has been using .data.init
+                * Up to this point, the boot CPU has been using .init.data
                 * area.  Reload any changed state for the boot CPU.
                 */
                if (cpu == boot_cpu_id)
-                       switch_to_new_gdt();
-
-               DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
+                       switch_to_new_gdt(cpu);
        }
 
        /* indicate the early static arrays will soon be gone */
@@ -136,6 +257,14 @@ void __init setup_per_cpu_areas(void)
        early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
 #endif
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
+       /*
+        * make sure boot cpu numa_node is right, when boot cpu is on the
+        * node that doesn't have mem installed
+        */
+       set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id));
+#endif
+
        /* Setup node to cpumask map */
        setup_node_to_cpumask_map();