include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / sparc / kernel / smp_64.c
index 045fbb5..4c53345 100644 (file)
 #include <linux/cache.h>
 #include <linux/jiffies.h>
 #include <linux/profile.h>
-#include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/vmalloc.h>
 #include <linux/cpu.h>
+#include <linux/slab.h>
 
 #include <asm/head.h>
 #include <asm/ptrace.h>
@@ -47,6 +49,8 @@
 #include <asm/ldc.h>
 #include <asm/hypervisor.h>
 
+#include "cpumap.h"
+
 int sparc64_multi_core __read_mostly;
 
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
@@ -367,7 +371,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
        } else {
                struct device_node *dp = of_find_node_by_cpuid(cpu);
 
-               prom_startcpu(dp->node, entry, cookie);
+               prom_startcpu(dp->phandle, entry, cookie);
        }
 
        for (timeout = 0; timeout < 50000; timeout++) {
@@ -1313,6 +1317,8 @@ int __cpu_disable(void)
        cpu_clear(cpu, cpu_online_map);
        ipi_call_unlock();
 
+       cpu_map_rebuild();
+
        return 0;
 }
 
@@ -1371,36 +1377,114 @@ void smp_send_stop(void)
 {
 }
 
-void __init real_setup_per_cpu_areas(void)
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
+                                       size_t align)
+{
+       const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       int node = cpu_to_node(cpu);
+       void *ptr;
+
+       if (!node_online(node) || !NODE_DATA(node)) {
+               ptr = __alloc_bootmem(size, align, goal);
+               pr_info("cpu %d has no node %d or node-local memory\n",
+                       cpu, node);
+               pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+                        cpu, size, __pa(ptr));
+       } else {
+               ptr = __alloc_bootmem_node(NODE_DATA(node),
+                                          size, align, goal);
+               pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+                        "%016lx\n", cpu, size, node, __pa(ptr));
+       }
+       return ptr;
+#else
+       return __alloc_bootmem(size, align, goal);
+#endif
+}
+
+static void __init pcpu_free_bootmem(void *ptr, size_t size)
+{
+       free_bootmem(__pa(ptr), size);
+}
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
-       unsigned long base, shift, paddr, goal, size, i;
-       char *ptr;
+       if (cpu_to_node(from) == cpu_to_node(to))
+               return LOCAL_DISTANCE;
+       else
+               return REMOTE_DISTANCE;
+}
 
-       /* Copy section for each CPU (we discard the original) */
-       goal = PERCPU_ENOUGH_ROOM;
+static void __init pcpu_populate_pte(unsigned long addr)
+{
+       pgd_t *pgd = pgd_offset_k(addr);
+       pud_t *pud;
+       pmd_t *pmd;
 
-       shift = PAGE_SHIFT;
-       for (size = PAGE_SIZE; size < goal; size <<= 1UL)
-               shift++;
+       pud = pud_offset(pgd, addr);
+       if (pud_none(*pud)) {
+               pmd_t *new;
 
-       paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
-       if (!paddr) {
-               prom_printf("Cannot allocate per-cpu memory.\n");
-               prom_halt();
+               new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+               pud_populate(&init_mm, pud, new);
        }
 
-       ptr = __va(paddr);
-       base = ptr - __per_cpu_start;
+       pmd = pmd_offset(pud, addr);
+       if (!pmd_present(*pmd)) {
+               pte_t *new;
 
-       for (i = 0; i < NR_CPUS; i++, ptr += size) {
-               __per_cpu_offset(i) = base + (i * size);
-               memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+               new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+               pmd_populate_kernel(&init_mm, pmd, new);
+       }
+}
+
+void __init setup_per_cpu_areas(void)
+{
+       unsigned long delta;
+       unsigned int cpu;
+       int rc = -EINVAL;
+
+       if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+               rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+                                           PERCPU_DYNAMIC_RESERVE, 4 << 20,
+                                           pcpu_cpu_distance,
+                                           pcpu_alloc_bootmem,
+                                           pcpu_free_bootmem);
+               if (rc)
+                       pr_warning("PERCPU: %s allocator failed (%d), "
+                                  "falling back to page size\n",
+                                  pcpu_fc_names[pcpu_chosen_fc], rc);
        }
+       if (rc < 0)
+               rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
+                                          pcpu_alloc_bootmem,
+                                          pcpu_free_bootmem,
+                                          pcpu_populate_pte);
+       if (rc < 0)
+               panic("cannot initialize percpu area (err=%d)", rc);
+
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu)
+               __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
 
        /* Setup %g5 for the boot cpu.  */
        __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
 
        of_fill_in_cpu_data();
        if (tlb_type == hypervisor)
-               mdesc_fill_in_cpu_data(CPU_MASK_ALL_PTR);
+               mdesc_fill_in_cpu_data(cpu_all_mask);
 }