on_each_cpu(): kill unused 'retry' parameter
[safe/jmp/linux-2.6] / arch / alpha / kernel / smp.c
index 0804b6a..83df541 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/err.h>
 #include <linux/threads.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
@@ -61,6 +62,7 @@ static struct {
 enum ipi_message_type {
        IPI_RESCHEDULE,
        IPI_CALL_FUNC,
+       IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
 };
 
@@ -76,10 +78,6 @@ int smp_num_probed;          /* Internal processor count */
 int smp_num_cpus = 1;          /* Number that came online.  */
 EXPORT_SYMBOL(smp_num_cpus);
 
-extern void calibrate_delay(void);
-
-\f
-
 /*
  * Called by both boot and secondaries to move global data into
  *  per-processor storage.
@@ -438,7 +436,6 @@ setup_smp(void)
                                ((char *)cpubase + i*hwrpb->processor_size);
                        if ((cpu->flags & 0x1cc) == 0x1cc) {
                                smp_num_probed++;
-                               /* Assume here that "whami" == index */
                                cpu_set(i, cpu_present_map);
                                cpu->pal_revision = boot_cpu_palrev;
                        }
@@ -562,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
                wripir(i);
 }
 
-/* Structure and data for smp_call_function.  This is designed to 
-   minimize static memory requirements.  Plus it looks cleaner.  */
-
-struct smp_call_struct {
-       void (*func) (void *info);
-       void *info;
-       long wait;
-       atomic_t unstarted_count;
-       atomic_t unfinished_count;
-};
-
-static struct smp_call_struct *smp_call_function_data;
-
-/* Atomicly drop data into a shared pointer.  The pointer is free if
-   it is initially locked.  If retry, spin until free.  */
-
-static int
-pointer_lock (void *lock, void *data, int retry)
-{
-       void *old, *tmp;
-
-       mb();
- again:
-       /* Compare and swap with zero.  */
-       asm volatile (
-       "1:     ldq_l   %0,%1\n"
-       "       mov     %3,%2\n"
-       "       bne     %0,2f\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,1b\n"
-       "2:"
-       : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
-       : "r"(data)
-       : "memory");
-
-       if (old == 0)
-               return 0;
-       if (! retry)
-               return -EBUSY;
-
-       while (*(void **)lock)
-               barrier();
-       goto again;
-}
-
 void
 handle_ipi(struct pt_regs *regs)
 {
@@ -636,31 +588,12 @@ handle_ipi(struct pt_regs *regs)
                        break;
 
                case IPI_CALL_FUNC:
-                   {
-                       struct smp_call_struct *data;
-                       void (*func)(void *info);
-                       void *info;
-                       int wait;
-
-                       data = smp_call_function_data;
-                       func = data->func;
-                       info = data->info;
-                       wait = data->wait;
-
-                       /* Notify the sending CPU that the data has been
-                          received, and execution is about to begin.  */
-                       mb();
-                       atomic_dec (&data->unstarted_count);
-
-                       /* At this point the structure may be gone unless
-                          wait is true.  */
-                       (*func)(info);
-
-                       /* Notify the sending CPU that the task is done.  */
-                       mb();
-                       if (wait) atomic_dec (&data->unfinished_count);
+                       generic_smp_call_function_interrupt();
+                       break;
+
+               case IPI_CALL_FUNC_SINGLE:
+                       generic_smp_call_function_single_interrupt();
                        break;
-                   }
 
                case IPI_CPU_STOP:
                        halt();
@@ -704,102 +637,15 @@ smp_send_stop(void)
        send_ipi_message(to_whom, IPI_CPU_STOP);
 }
 
-/*
- * Run a function on all other CPUs.
- *  <func>     The function to run. This must be fast and non-blocking.
- *  <info>     An arbitrary pointer to pass to the function.
- *  <retry>    If true, keep retrying until ready.
- *  <wait>     If true, wait until function has completed on other CPUs.
- *  [RETURNS]   0 on success, else a negative status code.
- *
- * Does not return until remote CPUs are nearly ready to execute <func>
- * or are or have executed.
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-
-int
-smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
-                         int wait, cpumask_t to_whom)
+void arch_send_call_function_ipi(cpumask_t mask)
 {
-       struct smp_call_struct data;
-       unsigned long timeout;
-       int num_cpus_to_call;
-       
-       /* Can deadlock when called with interrupts disabled */
-       WARN_ON(irqs_disabled());
-
-       data.func = func;
-       data.info = info;
-       data.wait = wait;
-
-       cpu_clear(smp_processor_id(), to_whom);
-       num_cpus_to_call = cpus_weight(to_whom);
-
-       atomic_set(&data.unstarted_count, num_cpus_to_call);
-       atomic_set(&data.unfinished_count, num_cpus_to_call);
-
-       /* Acquire the smp_call_function_data mutex.  */
-       if (pointer_lock(&smp_call_function_data, &data, retry))
-               return -EBUSY;
-
-       /* Send a message to the requested CPUs.  */
-       send_ipi_message(to_whom, IPI_CALL_FUNC);
-
-       /* Wait for a minimal response.  */
-       timeout = jiffies + HZ;
-       while (atomic_read (&data.unstarted_count) > 0
-              && time_before (jiffies, timeout))
-               barrier();
-
-       /* If there's no response yet, log a message but allow a longer
-        * timeout period -- if we get a response this time, log
-        * a message saying when we got it.. 
-        */
-       if (atomic_read(&data.unstarted_count) > 0) {
-               long start_time = jiffies;
-               printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
-                      __FUNCTION__);
-               timeout = jiffies + 30 * HZ;
-               while (atomic_read(&data.unstarted_count) > 0
-                      && time_before(jiffies, timeout))
-                       barrier();
-               if (atomic_read(&data.unstarted_count) <= 0) {
-                       long delta = jiffies - start_time;
-                       printk(KERN_ERR 
-                              "%s: response %ld.%ld seconds into long wait\n",
-                              __FUNCTION__, delta / HZ,
-                              (100 * (delta - ((delta / HZ) * HZ))) / HZ);
-               }
-       }
-
-       /* We either got one or timed out -- clear the lock. */
-       mb();
-       smp_call_function_data = NULL;
-
-       /* 
-        * If after both the initial and long timeout periods we still don't
-        * have a response, something is very wrong...
-        */
-       BUG_ON(atomic_read (&data.unstarted_count) > 0);
-
-       /* Wait for a complete response, if needed.  */
-       if (wait) {
-               while (atomic_read (&data.unfinished_count) > 0)
-                       barrier();
-       }
-
-       return 0;
+       send_ipi_message(mask, IPI_CALL_FUNC);
 }
-EXPORT_SYMBOL(smp_call_function_on_cpu);
 
-int
-smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+void arch_send_call_function_single_ipi(int cpu)
 {
-       return smp_call_function_on_cpu (func, info, retry, wait,
-                                        cpu_online_map);
+       send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
 }
-EXPORT_SYMBOL(smp_call_function);
 
 static void
 ipi_imb(void *ignored)
@@ -811,7 +657,7 @@ void
 smp_imb(void)
 {
        /* Must wait other processors to flush their icache before continue. */
-       if (on_each_cpu(ipi_imb, NULL, 1, 1))
+       if (on_each_cpu(ipi_imb, NULL, 1))
                printk(KERN_CRIT "smp_imb: timed out\n");
 }
 EXPORT_SYMBOL(smp_imb);
@@ -827,7 +673,7 @@ flush_tlb_all(void)
 {
        /* Although we don't have any data to pass, we do want to
           synchronize with the other processors.  */
-       if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
+       if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
                printk(KERN_CRIT "flush_tlb_all: timed out\n");
        }
 }
@@ -864,7 +710,7 @@ flush_tlb_mm(struct mm_struct *mm)
                }
        }
 
-       if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
+       if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
                printk(KERN_CRIT "flush_tlb_mm: timed out\n");
        }
 
@@ -917,7 +763,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
        data.mm = mm;
        data.addr = addr;
 
-       if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
+       if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
                printk(KERN_CRIT "flush_tlb_page: timed out\n");
        }
 
@@ -969,7 +815,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
                }
        }
 
-       if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
+       if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
                printk(KERN_CRIT "flush_icache_page: timed out\n");
        }