sched: INIT_PREEMPT_COUNT
[safe/jmp/linux-2.6] / arch / sparc / kernel / smp_64.c
index bfe99d8..fa44eaf 100644 (file)
@@ -20,7 +20,8 @@
 #include <linux/cache.h>
 #include <linux/jiffies.h>
 #include <linux/profile.h>
-#include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/vmalloc.h>
 #include <linux/cpu.h>
 
 #include <asm/head.h>
 #include <asm/ldc.h>
 #include <asm/hypervisor.h>
 
+#include "cpumap.h"
+
 int sparc64_multi_core __read_mostly;
 
-cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
-cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
-EXPORT_SYMBOL(cpu_possible_map);
-EXPORT_SYMBOL(cpu_online_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_SYMBOL(cpu_core_map);
 
@@ -122,9 +121,9 @@ void __cpuinit smp_callin(void)
        while (!cpu_isset(cpuid, smp_commenced_mask))
                rmb();
 
-       ipi_call_lock();
+       ipi_call_lock_irq();
        cpu_set(cpuid, cpu_online_map);
-       ipi_call_unlock();
+       ipi_call_unlock_irq();
 
        /* idle thread is expected to have preempt disabled */
        preempt_disable();
@@ -282,7 +281,7 @@ static unsigned long kimage_addr_to_ra(void *p)
        return kern_base + (val - KERNBASE);
 }
 
-static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
+static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
 {
        extern unsigned long sparc64_ttable_tl0;
        extern unsigned long kern_locked_tte_data;
@@ -302,12 +301,12 @@ static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread
                       "hvtramp_descr.\n");
                return;
        }
+       *descrp = hdesc;
 
        hdesc->cpu = cpu;
        hdesc->num_mappings = num_kernel_image_mappings;
 
        tb = &trap_block[cpu];
-       tb->hdesc = hdesc;
 
        hdesc->fault_info_va = (unsigned long) &tb->fault_info;
        hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
@@ -345,12 +344,12 @@ static struct thread_info *cpu_new_thread = NULL;
 
 static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
 {
-       struct trap_per_cpu *tb = &trap_block[cpu];
        unsigned long entry =
                (unsigned long)(&sparc64_cpu_startup);
        unsigned long cookie =
                (unsigned long)(&cpu_new_thread);
        struct task_struct *p;
+       void *descr = NULL;
        int timeout, ret;
 
        p = fork_idle(cpu);
@@ -363,7 +362,8 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
                if (ldom_domaining_enabled)
                        ldom_startcpu_cpuid(cpu,
-                                           (unsigned long) cpu_new_thread);
+                                           (unsigned long) cpu_new_thread,
+                                           &descr);
                else
 #endif
                        prom_startcpu_cpuid(cpu, entry, cookie);
@@ -387,10 +387,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
        }
        cpu_new_thread = NULL;
 
-       if (tb->hdesc) {
-               kfree(tb->hdesc);
-               tb->hdesc = NULL;
-       }
+       kfree(descr);
 
        return ret;
 }
@@ -453,7 +450,7 @@ again:
        __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
                             : : "r" (pstate));
        if (stuck == 0) {
-               printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+               printk("CPU[%d]: mondo stuckage result[%016llx]\n",
                       smp_processor_id(), result);
        } else {
                udelay(2);
@@ -588,7 +585,7 @@ retry:
                        /* Busy bits will not clear, continue instead
                         * of freezing up on this cpu.
                         */
-                       printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+                       printk("CPU[%d]: mondo stuckage result[%016llx]\n",
                               smp_processor_id(), dispatch_stat);
                } else {
                        int i, this_busy_nack = 0;
@@ -812,9 +809,9 @@ static void smp_start_sync_tick_client(int cpu)
 
 extern unsigned long xcall_call_function;
 
-void arch_send_call_function_ipi(cpumask_t mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
-       xcall_deliver((u64) &xcall_call_function, 0, 0, &mask);
+       xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
 }
 
 extern unsigned long xcall_call_function_single;
@@ -854,7 +851,7 @@ static void tsb_sync(void *info)
 
 void smp_tsb_sync(struct mm_struct *mm)
 {
-       smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1);
+       smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
 }
 
 extern unsigned long xcall_flush_tlb_mm;
@@ -1035,7 +1032,7 @@ void smp_fetch_global_regs(void)
  *    If the address space is non-shared (ie. mm->count == 1) we avoid
  *    cross calls when we want to flush the currently running process's
  *    tlb state.  This is done by clearing all cpu bits except the current
- *    processor's in current->active_mm->cpu_vm_mask and performing the
+ *    processor's in current->mm->cpu_vm_mask and performing the
  *    flush locally only.  This will force any subsequent cpus which run
  *    this task to flush the context from the local tlb if the process
  *    migrates to another cpu (again).
@@ -1059,13 +1056,13 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
        int cpu = get_cpu();
 
        if (atomic_read(&mm->mm_users) == 1) {
-               mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+               cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
                goto local_flush_and_out;
        }
 
        smp_cross_call_masked(&xcall_flush_tlb_mm,
                              ctx, 0, 0,
-                             &mm->cpu_vm_mask);
+                             mm_cpumask(mm));
 
 local_flush_and_out:
        __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
@@ -1078,12 +1075,12 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
        u32 ctx = CTX_HWBITS(mm->context);
        int cpu = get_cpu();
 
-       if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
-               mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+       if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+               cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
        else
                smp_cross_call_masked(&xcall_flush_tlb_pending,
                                      ctx, nr, (unsigned long) vaddrs,
-                                     &mm->cpu_vm_mask);
+                                     mm_cpumask(mm));
 
        __flush_tlb_pending(ctx, nr, vaddrs);
 
@@ -1319,6 +1316,8 @@ int __cpu_disable(void)
        cpu_clear(cpu, cpu_online_map);
        ipi_call_unlock();
 
+       cpu_map_rebuild();
+
        return 0;
 }
 
@@ -1377,36 +1376,171 @@ void smp_send_stop(void)
 {
 }
 
-unsigned long __per_cpu_base __read_mostly;
-unsigned long __per_cpu_shift __read_mostly;
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
+                                       unsigned long align)
+{
+       const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       int node = cpu_to_node(cpu);
+       void *ptr;
+
+       if (!node_online(node) || !NODE_DATA(node)) {
+               ptr = __alloc_bootmem(size, align, goal);
+               pr_info("cpu %d has no node %d or node-local memory\n",
+                       cpu, node);
+               pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+                        cpu, size, __pa(ptr));
+       } else {
+               ptr = __alloc_bootmem_node(NODE_DATA(node),
+                                          size, align, goal);
+               pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+                        "%016lx\n", cpu, size, node, __pa(ptr));
+       }
+       return ptr;
+#else
+       return __alloc_bootmem(size, align, goal);
+#endif
+}
+
+static size_t pcpur_size __initdata;
+static void **pcpur_ptrs __initdata;
+
+static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
+{
+       size_t off = (size_t)pageno << PAGE_SHIFT;
+
+       if (off >= pcpur_size)
+               return NULL;
 
-EXPORT_SYMBOL(__per_cpu_base);
-EXPORT_SYMBOL(__per_cpu_shift);
+       return virt_to_page(pcpur_ptrs[cpu] + off);
+}
+
+#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL)
 
-void __init real_setup_per_cpu_areas(void)
+static void __init pcpu_map_range(unsigned long start, unsigned long end,
+                                 struct page *page)
 {
-       unsigned long paddr, goal, size, i;
-       char *ptr;
+       unsigned long pfn = page_to_pfn(page);
+       unsigned long pte_base;
+
+       BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL));
 
-       /* Copy section for each CPU (we discard the original) */
-       goal = PERCPU_ENOUGH_ROOM;
+       pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+                   _PAGE_CP_4U | _PAGE_CV_4U |
+                   _PAGE_P_4U | _PAGE_W_4U);
+       if (tlb_type == hypervisor)
+               pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+                           _PAGE_CP_4V | _PAGE_CV_4V |
+                           _PAGE_P_4V | _PAGE_W_4V);
+
+       while (start < end) {
+               pgd_t *pgd = pgd_offset_k(start);
+               unsigned long this_end;
+               pud_t *pud;
+               pmd_t *pmd;
+               pte_t *pte;
+
+               pud = pud_offset(pgd, start);
+               if (pud_none(*pud)) {
+                       pmd_t *new;
+
+                       new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+                       pud_populate(&init_mm, pud, new);
+               }
 
-       __per_cpu_shift = PAGE_SHIFT;
-       for (size = PAGE_SIZE; size < goal; size <<= 1UL)
-               __per_cpu_shift++;
+               pmd = pmd_offset(pud, start);
+               if (!pmd_present(*pmd)) {
+                       pte_t *new;
 
-       paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
-       if (!paddr) {
-               prom_printf("Cannot allocate per-cpu memory.\n");
-               prom_halt();
+                       new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+                       pmd_populate_kernel(&init_mm, pmd, new);
+               }
+
+               pte = pte_offset_kernel(pmd, start);
+               this_end = (start + PMD_SIZE) & PMD_MASK;
+               if (this_end > end)
+                       this_end = end;
+
+               while (start < this_end) {
+                       unsigned long paddr = pfn << PAGE_SHIFT;
+
+                       pte_val(*pte) = (paddr | pte_base);
+
+                       start += PAGE_SIZE;
+                       pte++;
+                       pfn++;
+               }
        }
+}
+
+void __init setup_per_cpu_areas(void)
+{
+       size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start;
+       static struct vm_struct vm;
+       unsigned long delta, cpu;
+       size_t pcpu_unit_size;
+       size_t ptrs_size;
+
+       pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
+                              PERCPU_DYNAMIC_RESERVE);
+       dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
 
-       ptr = __va(paddr);
-       __per_cpu_base = ptr - __per_cpu_start;
 
-       for (i = 0; i < NR_CPUS; i++, ptr += size)
-               memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+       ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
+       pcpur_ptrs = alloc_bootmem(ptrs_size);
+
+       for_each_possible_cpu(cpu) {
+               pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
+                                                    PCPU_CHUNK_SIZE);
+
+               free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
+                            PCPU_CHUNK_SIZE - pcpur_size);
+
+               memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
+       }
+
+       /* allocate address and map */
+       vm.flags = VM_ALLOC;
+       vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE;
+       vm_area_register_early(&vm, PCPU_CHUNK_SIZE);
+
+       for_each_possible_cpu(cpu) {
+               unsigned long start = (unsigned long) vm.addr;
+               unsigned long end;
+
+               start += cpu * PCPU_CHUNK_SIZE;
+               end = start + PCPU_CHUNK_SIZE;
+               pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
+       }
+
+       pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
+                                               PERCPU_MODULE_RESERVE, dyn_size,
+                                               PCPU_CHUNK_SIZE, vm.addr, NULL);
+
+       free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu) {
+               __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+       }
 
        /* Setup %g5 for the boot cpu.  */
        __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
+
+       of_fill_in_cpu_data();
+       if (tlb_type == hypervisor)
+               mdesc_fill_in_cpu_data(cpu_all_mask);
 }