1 #include <linux/init.h>
4 #include <linux/delay.h>
5 #include <linux/spinlock.h>
6 #include <linux/kernel_stat.h>
7 #include <linux/mc146818rtc.h>
8 #include <linux/cache.h>
9 #include <linux/interrupt.h>
10 #include <linux/cpu.h>
13 #include <asm/tlbflush.h>
14 #include <asm/mmu_context.h>
15 #include <asm/proto.h>
17 #include <mach_apic.h>
20 #include <asm/mach_apic.h>
24 * this function sends a 'reschedule' IPI to another CPU.
25 * it goes straight through and wastes no time serializing
26 * anything. Worst case is that we lose a reschedule ...
28 static void native_smp_send_reschedule(int cpu)
30 WARN_ON(cpu_is_offline(cpu));
31 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
35 * Structure and data for smp_call_function(). This is designed to minimise
36 * static memory requirements. It also looks cleaner.
38 static DEFINE_SPINLOCK(call_lock);
40 struct call_data_struct {
41 void (*func) (void *info);
48 void lock_ipi_call_lock(void)
50 spin_lock_irq(&call_lock);
53 void unlock_ipi_call_lock(void)
55 spin_unlock_irq(&call_lock);
58 static struct call_data_struct *call_data;
60 static void __smp_call_function(void (*func) (void *info), void *info,
61 int nonatomic, int wait)
63 struct call_data_struct data;
64 int cpus = num_online_cpus() - 1;
71 atomic_set(&data.started, 0);
74 atomic_set(&data.finished, 0);
79 /* Send a message to all other CPUs and wait for them to respond */
80 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
82 /* Wait for response */
83 while (atomic_read(&data.started) != cpus)
87 while (atomic_read(&data.finished) != cpus)
93 * smp_call_function_mask(): Run a function on a set of other CPUs.
94 * @mask: The set of cpus to run on. Must not include the current cpu.
95 * @func: The function to run. This must be fast and non-blocking.
96 * @info: An arbitrary pointer to pass to the function.
97 * @wait: If true, wait (atomically) until function has completed on other CPUs.
99 * Returns 0 on success, else a negative status code.
101 * If @wait is true, then returns once @func has returned; otherwise
102 * it returns just before the target cpu calls @func.
104 * You must not call this function with disabled interrupts or from a
105 * hardware interrupt handler or from a bottom half handler.
108 native_smp_call_function_mask(cpumask_t mask,
109 void (*func)(void *), void *info,
112 struct call_data_struct data;
113 cpumask_t allbutself;
116 /* Can deadlock when called with interrupts disabled */
117 WARN_ON(irqs_disabled());
119 /* Holding any lock stops cpus from going down. */
120 spin_lock(&call_lock);
122 allbutself = cpu_online_map;
123 cpu_clear(smp_processor_id(), allbutself);
125 cpus_and(mask, mask, allbutself);
126 cpus = cpus_weight(mask);
129 spin_unlock(&call_lock);
135 atomic_set(&data.started, 0);
138 atomic_set(&data.finished, 0);
143 /* Send a message to other CPUs */
144 if (cpus_equal(mask, allbutself))
145 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
147 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
149 /* Wait for response */
150 while (atomic_read(&data.started) != cpus)
154 while (atomic_read(&data.finished) != cpus)
156 spin_unlock(&call_lock);
161 static void stop_this_cpu(void *dummy)
167 cpu_clear(smp_processor_id(), cpu_online_map);
168 disable_local_APIC();
169 if (hlt_works(smp_processor_id()))
175 * this function calls the 'stop' function on all other CPUs in the system.
178 static void native_smp_send_stop(void)
186 /* Don't deadlock on the call lock in panic */
187 nolock = !spin_trylock(&call_lock);
188 local_irq_save(flags);
189 __smp_call_function(stop_this_cpu, NULL, 0, 0);
191 spin_unlock(&call_lock);
192 disable_local_APIC();
193 local_irq_restore(flags);
197 * Reschedule call back. Nothing to do,
198 * all the work is done automatically when
199 * we return from the interrupt.
201 void smp_reschedule_interrupt(struct pt_regs *regs)
205 __get_cpu_var(irq_stat).irq_resched_count++;
207 add_pda(irq_resched_count, 1);
211 void smp_call_function_interrupt(struct pt_regs *regs)
213 void (*func) (void *info) = call_data->func;
214 void *info = call_data->info;
215 int wait = call_data->wait;
219 * Notify initiating CPU that I've grabbed the data and am
220 * about to execute the function
223 atomic_inc(&call_data->started);
225 * At this point the info structure may be out of scope unless wait==1
230 __get_cpu_var(irq_stat).irq_call_count++;
232 add_pda(irq_call_count, 1);
238 atomic_inc(&call_data->finished);
242 struct smp_ops smp_ops = {
243 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
244 .smp_prepare_cpus = native_smp_prepare_cpus,
245 .cpu_up = native_cpu_up,
246 .smp_cpus_done = native_smp_cpus_done,
248 .smp_send_stop = native_smp_send_stop,
249 .smp_send_reschedule = native_smp_send_reschedule,
250 .smp_call_function_mask = native_smp_call_function_mask,
252 EXPORT_SYMBOL_GPL(smp_ops);