[IA64] Don't go beyond iosapic_intr_info's arraysize
[safe/jmp/linux-2.6] / arch / ia64 / kernel / smp.c
index b49d4dd..da8f020 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 #include <linux/efi.h>
 #include <linux/bitops.h>
+#include <linux/kexec.h>
 
 #include <asm/atomic.h>
 #include <asm/current.h>
 #include <asm/mca.h>
 
 /*
- * Structure and data for smp_call_function(). This is designed to minimise static memory
- * requirements. It also looks cleaner.
+ * Note: alignment of 4 entries/cacheline was empirically determined
+ * to be a good tradeoff between hot cachelines & spreading the array
+ * across too many cacheline.
  */
-static  __cacheline_aligned DEFINE_SPINLOCK(call_lock);
+static struct local_tlb_flush_counts {
+       unsigned int count;
+} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
 
-struct call_data_struct {
-       void (*func) (void *info);
-       void *info;
-       long wait;
-       atomic_t started;
-       atomic_t finished;
-};
-
-static volatile struct call_data_struct *call_data;
+static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
 
 #define IPI_CALL_FUNC          0
 #define IPI_CPU_STOP           1
+#define IPI_CALL_FUNC_SINGLE   2
+#define IPI_KDUMP_CPU_STOP     3
 
 /* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
-static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
+static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
 
 extern void cpu_halt (void);
 
-void
-lock_ipi_calllock(void)
-{
-       spin_lock_irq(&call_lock);
-}
-
-void
-unlock_ipi_calllock(void)
-{
-       spin_unlock_irq(&call_lock);
-}
-
 static void
-stop_this_cpu (void)
+stop_this_cpu(void)
 {
        /*
         * Remove this CPU:
@@ -108,7 +94,7 @@ cpu_die(void)
 }
 
 irqreturn_t
-handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
+handle_IPI (int irq, void *dev_id)
 {
        int this_cpu = get_cpu();
        unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
@@ -124,40 +110,23 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
                        ops &= ~(1 << which);
 
                        switch (which) {
-                             case IPI_CALL_FUNC:
-                             {
-                                     struct call_data_struct *data;
-                                     void (*func)(void *info);
-                                     void *info;
-                                     int wait;
-
-                                     /* release the 'pointer lock' */
-                                     data = (struct call_data_struct *) call_data;
-                                     func = data->func;
-                                     info = data->info;
-                                     wait = data->wait;
-
-                                     mb();
-                                     atomic_inc(&data->started);
-                                     /*
-                                      * At this point the structure may be gone unless
-                                      * wait is true.
-                                      */
-                                     (*func)(info);
-
-                                     /* Notify the sending CPU that the task is done.  */
-                                     mb();
-                                     if (wait)
-                                             atomic_inc(&data->finished);
-                             }
-                             break;
-
-                             case IPI_CPU_STOP:
+                       case IPI_CPU_STOP:
                                stop_this_cpu();
                                break;
-
-                             default:
-                               printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
+                       case IPI_CALL_FUNC:
+                               generic_smp_call_function_interrupt();
+                               break;
+                       case IPI_CALL_FUNC_SINGLE:
+                               generic_smp_call_function_single_interrupt();
+                               break;
+#ifdef CONFIG_KEXEC
+                       case IPI_KDUMP_CPU_STOP:
+                               unw_init_running(kdump_cpu_freeze, NULL);
+                               break;
+#endif
+                       default:
+                               printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
+                                               this_cpu, which);
                                break;
                        }
                } while (ops);
@@ -167,8 +136,10 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
        return IRQ_HANDLED;
 }
 
+
+
 /*
- * Called with preeemption disabled.
+ * Called with preemption disabled.
  */
 static inline void
 send_IPI_single (int dest_cpu, int op)
@@ -178,34 +149,47 @@ send_IPI_single (int dest_cpu, int op)
 }
 
 /*
- * Called with preeemption disabled.
+ * Called with preemption disabled.
  */
 static inline void
 send_IPI_allbutself (int op)
 {
        unsigned int i;
 
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_online(i) && i != smp_processor_id())
+       for_each_online_cpu(i) {
+               if (i != smp_processor_id())
                        send_IPI_single(i, op);
        }
 }
 
 /*
- * Called with preeemption disabled.
+ * Called with preemption disabled.
+ */
+static inline void
+send_IPI_mask(cpumask_t mask, int op)
+{
+       unsigned int cpu;
+
+       for_each_cpu_mask(cpu, mask) {
+                       send_IPI_single(cpu, op);
+       }
+}
+
+/*
+ * Called with preemption disabled.
  */
 static inline void
 send_IPI_all (int op)
 {
        int i;
 
-       for (i = 0; i < NR_CPUS; i++)
-               if (cpu_online(i))
-                       send_IPI_single(i, op);
+       for_each_online_cpu(i) {
+               send_IPI_single(i, op);
+       }
 }
 
 /*
- * Called with preeemption disabled.
+ * Called with preemption disabled.
  */
 static inline void
 send_IPI_self (int op)
@@ -213,8 +197,28 @@ send_IPI_self (int op)
        send_IPI_single(smp_processor_id(), op);
 }
 
+#ifdef CONFIG_KEXEC
+void
+kdump_smp_send_stop(void)
+{
+       send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
+}
+
+void
+kdump_smp_send_init(void)
+{
+       unsigned int cpu, self_cpu;
+       self_cpu = smp_processor_id();
+       for_each_online_cpu(cpu) {
+               if (cpu != self_cpu) {
+                       if(kdump_status[cpu] == 0)
+                               platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
+               }
+       }
+}
+#endif
 /*
- * Called with preeemption disabled.
+ * Called with preemption disabled.
  */
 void
 smp_send_reschedule (int cpu)
@@ -222,22 +226,81 @@ smp_send_reschedule (int cpu)
        platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
 }
 
+/*
+ * Called with preemption disabled.
+ */
+static void
+smp_send_local_flush_tlb (int cpu)
+{
+       platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
+}
+
+void
+smp_local_flush_tlb(void)
+{
+       /*
+        * Use atomic ops. Otherwise, the load/increment/store sequence from
+        * a "++" operation can have the line stolen between the load & store.
+        * The overhead of the atomic op in negligible in this case & offers
+        * significant benefit for the brief periods where lots of cpus
+        * are simultaneously flushing TLBs.
+        */
+       ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
+       local_flush_tlb_all();
+}
+
+#define FLUSH_DELAY    5 /* Usec backoff to eliminate excessive cacheline bouncing */
+
+void
+smp_flush_tlb_cpumask(cpumask_t xcpumask)
+{
+       unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts);
+       cpumask_t cpumask = xcpumask;
+       int mycpu, cpu, flush_mycpu = 0;
+
+       preempt_disable();
+       mycpu = smp_processor_id();
+
+       for_each_cpu_mask(cpu, cpumask)
+               counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
+
+       mb();
+       for_each_cpu_mask(cpu, cpumask) {
+               if (cpu == mycpu)
+                       flush_mycpu = 1;
+               else
+                       smp_send_local_flush_tlb(cpu);
+       }
+
+       if (flush_mycpu)
+               smp_local_flush_tlb();
+
+       for_each_cpu_mask(cpu, cpumask)
+               while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
+                       udelay(FLUSH_DELAY);
+
+       preempt_enable();
+}
+
 void
 smp_flush_tlb_all (void)
 {
-       on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);
+       on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
 }
 
 void
 smp_flush_tlb_mm (struct mm_struct *mm)
 {
+       preempt_disable();
        /* this happens for the common case of a single-threaded fork():  */
        if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
        {
                local_finish_flush_tlb_mm(mm);
+               preempt_enable();
                return;
        }
 
+       preempt_enable();
        /*
         * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
         * have been running in the address space.  It's not clear that this is worth the
@@ -245,120 +308,18 @@ smp_flush_tlb_mm (struct mm_struct *mm)
         * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
         * rather trivial.
         */
-       on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
+       on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
 }
 
-/*
- * Run a function on another CPU
- *  <func>     The function to run. This must be fast and non-blocking.
- *  <info>     An arbitrary pointer to pass to the function.
- *  <nonatomic>        Currently unused.
- *  <wait>     If true, wait until function has completed on other CPUs.
- *  [RETURNS]   0 on success, else a negative status code.
- *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
- */
-
-int
-smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
-                         int wait)
+void arch_send_call_function_single_ipi(int cpu)
 {
-       struct call_data_struct data;
-       int cpus = 1;
-       int me = get_cpu(); /* prevent preemption and reschedule on another processor */
-
-       if (cpuid == me) {
-               printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
-               put_cpu();
-               return -EBUSY;
-       }
-
-       data.func = func;
-       data.info = info;
-       atomic_set(&data.started, 0);
-       data.wait = wait;
-       if (wait)
-               atomic_set(&data.finished, 0);
-
-       spin_lock_bh(&call_lock);
-
-       call_data = &data;
-       mb();   /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
-       send_IPI_single(cpuid, IPI_CALL_FUNC);
-
-       /* Wait for response */
-       while (atomic_read(&data.started) != cpus)
-               cpu_relax();
-
-       if (wait)
-               while (atomic_read(&data.finished) != cpus)
-                       cpu_relax();
-       call_data = NULL;
-
-       spin_unlock_bh(&call_lock);
-       put_cpu();
-       return 0;
+       send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
 }
-EXPORT_SYMBOL(smp_call_function_single);
 
-/*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
-
-/*
- *  [SUMMARY]  Run a function on all other CPUs.
- *  <func>     The function to run. This must be fast and non-blocking.
- *  <info>     An arbitrary pointer to pass to the function.
- *  <nonatomic>        currently unused.
- *  <wait>     If true, wait (atomically) until function has completed on other CPUs.
- *  [RETURNS]   0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func> or are or have
- * executed.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-int
-smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
+void arch_send_call_function_ipi(cpumask_t mask)
 {
-       struct call_data_struct data;
-       int cpus = num_online_cpus()-1;
-
-       if (!cpus)
-               return 0;
-
-       /* Can deadlock when called with interrupts disabled */
-       WARN_ON(irqs_disabled());
-
-       data.func = func;
-       data.info = info;
-       atomic_set(&data.started, 0);
-       data.wait = wait;
-       if (wait)
-               atomic_set(&data.finished, 0);
-
-       spin_lock(&call_lock);
-
-       call_data = &data;
-       mb();   /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
-       send_IPI_allbutself(IPI_CALL_FUNC);
-
-       /* Wait for response */
-       while (atomic_read(&data.started) != cpus)
-               cpu_relax();
-
-       if (wait)
-               while (atomic_read(&data.finished) != cpus)
-                       cpu_relax();
-       call_data = NULL;
-
-       spin_unlock(&call_lock);
-       return 0;
+       send_IPI_mask(mask, IPI_CALL_FUNC);
 }
-EXPORT_SYMBOL(smp_call_function);
 
 /*
  * this function calls the 'stop' function on all other CPUs in the system.
@@ -369,7 +330,7 @@ smp_send_stop (void)
        send_IPI_allbutself(IPI_CPU_STOP);
 }
 
-int __init
+int
 setup_profiling_timer (unsigned int multiplier)
 {
        return -EINVAL;