softirq: introduce statistics for softirq
[safe/jmp/linux-2.6] / kernel / softirq.c
1 /*
2  *      linux/kernel/softirq.c
3  *
4  *      Copyright (C) 1992 Linus Torvalds
5  *
6  *      Distribute under GPLv2.
7  *
8  *      Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9  *
10  *      Remote softirq infrastructure is by Jens Axboe.
11  */
12
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
27
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
30
31 #include <asm/irq.h>
32 /*
33    - No shared variables, all the data are CPU local.
34    - If a softirq needs serialization, let it serialize itself
35      by its own spinlocks.
36    - Even if softirq is serialized, only local cpu is marked for
37      execution. Hence, we get something sort of weak cpu binding.
38      Though it is still not clear, will it result in better locality
39      or will not.
40
41    Examples:
42    - NET RX softirq. It is multithreaded and does not require
43      any global serialization.
44    - NET TX softirq. It kicks software netdevice queues, hence
45      it is logically serialized per device, but this serialization
46      is invisible to common code.
47    - Tasklets: serialized wrt itself.
48  */
49
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52 EXPORT_SYMBOL(irq_stat);
53 #endif
54
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56
57 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58
59 char *softirq_to_name[NR_SOFTIRQS] = {
60         "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
61         "TASKLET", "SCHED", "HRTIMER",  "RCU"
62 };
63
64 /*
65  * we cannot loop indefinitely here to avoid userspace starvation,
66  * but we also don't want to introduce a worst case 1/HZ latency
67  * to the pending events, so lets the scheduler to balance
68  * the softirq load for us.
69  */
70 void wakeup_softirqd(void)
71 {
72         /* Interrupts are disabled: no need to stop preemption */
73         struct task_struct *tsk = __get_cpu_var(ksoftirqd);
74
75         if (tsk && tsk->state != TASK_RUNNING)
76                 wake_up_process(tsk);
77 }
78
79 /*
80  * This one is for softirq.c-internal use,
81  * where hardirqs are disabled legitimately:
82  */
83 #ifdef CONFIG_TRACE_IRQFLAGS
84 static void __local_bh_disable(unsigned long ip)
85 {
86         unsigned long flags;
87
88         WARN_ON_ONCE(in_irq());
89
90         raw_local_irq_save(flags);
91         /*
92          * The preempt tracer hooks into add_preempt_count and will break
93          * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
94          * is set and before current->softirq_enabled is cleared.
95          * We must manually increment preempt_count here and manually
96          * call the trace_preempt_off later.
97          */
98         preempt_count() += SOFTIRQ_OFFSET;
99         /*
100          * Were softirqs turned off above:
101          */
102         if (softirq_count() == SOFTIRQ_OFFSET)
103                 trace_softirqs_off(ip);
104         raw_local_irq_restore(flags);
105
106         if (preempt_count() == SOFTIRQ_OFFSET)
107                 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
108 }
109 #else /* !CONFIG_TRACE_IRQFLAGS */
110 static inline void __local_bh_disable(unsigned long ip)
111 {
112         add_preempt_count(SOFTIRQ_OFFSET);
113         barrier();
114 }
115 #endif /* CONFIG_TRACE_IRQFLAGS */
116
117 void local_bh_disable(void)
118 {
119         __local_bh_disable((unsigned long)__builtin_return_address(0));
120 }
121
122 EXPORT_SYMBOL(local_bh_disable);
123
124 /*
125  * Special-case - softirqs can safely be enabled in
126  * cond_resched_softirq(), or by __do_softirq(),
127  * without processing still-pending softirqs:
128  */
129 void _local_bh_enable(void)
130 {
131         WARN_ON_ONCE(in_irq());
132         WARN_ON_ONCE(!irqs_disabled());
133
134         if (softirq_count() == SOFTIRQ_OFFSET)
135                 trace_softirqs_on((unsigned long)__builtin_return_address(0));
136         sub_preempt_count(SOFTIRQ_OFFSET);
137 }
138
139 EXPORT_SYMBOL(_local_bh_enable);
140
141 static inline void _local_bh_enable_ip(unsigned long ip)
142 {
143         WARN_ON_ONCE(in_irq() || irqs_disabled());
144 #ifdef CONFIG_TRACE_IRQFLAGS
145         local_irq_disable();
146 #endif
147         /*
148          * Are softirqs going to be turned on now:
149          */
150         if (softirq_count() == SOFTIRQ_OFFSET)
151                 trace_softirqs_on(ip);
152         /*
153          * Keep preemption disabled until we are done with
154          * softirq processing:
155          */
156         sub_preempt_count(SOFTIRQ_OFFSET - 1);
157
158         if (unlikely(!in_interrupt() && local_softirq_pending()))
159                 do_softirq();
160
161         dec_preempt_count();
162 #ifdef CONFIG_TRACE_IRQFLAGS
163         local_irq_enable();
164 #endif
165         preempt_check_resched();
166 }
167
168 void local_bh_enable(void)
169 {
170         _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
171 }
172 EXPORT_SYMBOL(local_bh_enable);
173
174 void local_bh_enable_ip(unsigned long ip)
175 {
176         _local_bh_enable_ip(ip);
177 }
178 EXPORT_SYMBOL(local_bh_enable_ip);
179
180 /*
181  * We restart softirq processing MAX_SOFTIRQ_RESTART times,
182  * and we fall back to softirqd after that.
183  *
184  * This number has been established via experimentation.
185  * The two things to balance is latency against fairness -
186  * we want to handle softirqs as soon as possible, but they
187  * should not be able to lock up the box.
188  */
189 #define MAX_SOFTIRQ_RESTART 10
190
191 asmlinkage void __do_softirq(void)
192 {
193         struct softirq_action *h;
194         __u32 pending;
195         int max_restart = MAX_SOFTIRQ_RESTART;
196         int cpu;
197
198         pending = local_softirq_pending();
199         account_system_vtime(current);
200
201         __local_bh_disable((unsigned long)__builtin_return_address(0));
202         lockdep_softirq_enter();
203
204         cpu = smp_processor_id();
205 restart:
206         /* Reset the pending bitmask before enabling irqs */
207         set_softirq_pending(0);
208
209         local_irq_enable();
210
211         h = softirq_vec;
212
213         do {
214                 if (pending & 1) {
215                         int prev_count = preempt_count();
216                         kstat_incr_softirqs_this_cpu(h - softirq_vec);
217
218                         trace_softirq_entry(h, softirq_vec);
219                         h->action(h);
220                         trace_softirq_exit(h, softirq_vec);
221                         if (unlikely(prev_count != preempt_count())) {
222                                 printk(KERN_ERR "huh, entered softirq %td %s %p"
223                                        "with preempt_count %08x,"
224                                        " exited with %08x?\n", h - softirq_vec,
225                                        softirq_to_name[h - softirq_vec],
226                                        h->action, prev_count, preempt_count());
227                                 preempt_count() = prev_count;
228                         }
229
230                         rcu_bh_qsctr_inc(cpu);
231                 }
232                 h++;
233                 pending >>= 1;
234         } while (pending);
235
236         local_irq_disable();
237
238         pending = local_softirq_pending();
239         if (pending && --max_restart)
240                 goto restart;
241
242         if (pending)
243                 wakeup_softirqd();
244
245         lockdep_softirq_exit();
246
247         account_system_vtime(current);
248         _local_bh_enable();
249 }
250
251 #ifndef __ARCH_HAS_DO_SOFTIRQ
252
253 asmlinkage void do_softirq(void)
254 {
255         __u32 pending;
256         unsigned long flags;
257
258         if (in_interrupt())
259                 return;
260
261         local_irq_save(flags);
262
263         pending = local_softirq_pending();
264
265         if (pending)
266                 __do_softirq();
267
268         local_irq_restore(flags);
269 }
270
271 #endif
272
273 /*
274  * Enter an interrupt context.
275  */
276 void irq_enter(void)
277 {
278         int cpu = smp_processor_id();
279
280         rcu_irq_enter();
281         if (idle_cpu(cpu) && !in_interrupt()) {
282                 __irq_enter();
283                 tick_check_idle(cpu);
284         } else
285                 __irq_enter();
286 }
287
288 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
289 # define invoke_softirq()       __do_softirq()
290 #else
291 # define invoke_softirq()       do_softirq()
292 #endif
293
294 /*
295  * Exit an interrupt context. Process softirqs if needed and possible:
296  */
297 void irq_exit(void)
298 {
299         account_system_vtime(current);
300         trace_hardirq_exit();
301         sub_preempt_count(IRQ_EXIT_OFFSET);
302         if (!in_interrupt() && local_softirq_pending())
303                 invoke_softirq();
304
305 #ifdef CONFIG_NO_HZ
306         /* Make sure that timer wheel updates are propagated */
307         rcu_irq_exit();
308         if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309                 tick_nohz_stop_sched_tick(0);
310 #endif
311         preempt_enable_no_resched();
312 }
313
314 /*
315  * This function must run with irqs disabled!
316  */
317 inline void raise_softirq_irqoff(unsigned int nr)
318 {
319         __raise_softirq_irqoff(nr);
320
321         /*
322          * If we're in an interrupt or softirq, we're done
323          * (this also catches softirq-disabled code). We will
324          * actually run the softirq once we return from
325          * the irq or softirq.
326          *
327          * Otherwise we wake up ksoftirqd to make sure we
328          * schedule the softirq soon.
329          */
330         if (!in_interrupt())
331                 wakeup_softirqd();
332 }
333
334 void raise_softirq(unsigned int nr)
335 {
336         unsigned long flags;
337
338         local_irq_save(flags);
339         raise_softirq_irqoff(nr);
340         local_irq_restore(flags);
341 }
342
343 void open_softirq(int nr, void (*action)(struct softirq_action *))
344 {
345         softirq_vec[nr].action = action;
346 }
347
348 /* Tasklets */
349 struct tasklet_head
350 {
351         struct tasklet_struct *head;
352         struct tasklet_struct **tail;
353 };
354
355 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
356 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
357
358 void __tasklet_schedule(struct tasklet_struct *t)
359 {
360         unsigned long flags;
361
362         local_irq_save(flags);
363         t->next = NULL;
364         *__get_cpu_var(tasklet_vec).tail = t;
365         __get_cpu_var(tasklet_vec).tail = &(t->next);
366         raise_softirq_irqoff(TASKLET_SOFTIRQ);
367         local_irq_restore(flags);
368 }
369
370 EXPORT_SYMBOL(__tasklet_schedule);
371
372 void __tasklet_hi_schedule(struct tasklet_struct *t)
373 {
374         unsigned long flags;
375
376         local_irq_save(flags);
377         t->next = NULL;
378         *__get_cpu_var(tasklet_hi_vec).tail = t;
379         __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
380         raise_softirq_irqoff(HI_SOFTIRQ);
381         local_irq_restore(flags);
382 }
383
384 EXPORT_SYMBOL(__tasklet_hi_schedule);
385
386 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
387 {
388         BUG_ON(!irqs_disabled());
389
390         t->next = __get_cpu_var(tasklet_hi_vec).head;
391         __get_cpu_var(tasklet_hi_vec).head = t;
392         __raise_softirq_irqoff(HI_SOFTIRQ);
393 }
394
395 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
396
397 static void tasklet_action(struct softirq_action *a)
398 {
399         struct tasklet_struct *list;
400
401         local_irq_disable();
402         list = __get_cpu_var(tasklet_vec).head;
403         __get_cpu_var(tasklet_vec).head = NULL;
404         __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
405         local_irq_enable();
406
407         while (list) {
408                 struct tasklet_struct *t = list;
409
410                 list = list->next;
411
412                 if (tasklet_trylock(t)) {
413                         if (!atomic_read(&t->count)) {
414                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
415                                         BUG();
416                                 t->func(t->data);
417                                 tasklet_unlock(t);
418                                 continue;
419                         }
420                         tasklet_unlock(t);
421                 }
422
423                 local_irq_disable();
424                 t->next = NULL;
425                 *__get_cpu_var(tasklet_vec).tail = t;
426                 __get_cpu_var(tasklet_vec).tail = &(t->next);
427                 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
428                 local_irq_enable();
429         }
430 }
431
432 static void tasklet_hi_action(struct softirq_action *a)
433 {
434         struct tasklet_struct *list;
435
436         local_irq_disable();
437         list = __get_cpu_var(tasklet_hi_vec).head;
438         __get_cpu_var(tasklet_hi_vec).head = NULL;
439         __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
440         local_irq_enable();
441
442         while (list) {
443                 struct tasklet_struct *t = list;
444
445                 list = list->next;
446
447                 if (tasklet_trylock(t)) {
448                         if (!atomic_read(&t->count)) {
449                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
450                                         BUG();
451                                 t->func(t->data);
452                                 tasklet_unlock(t);
453                                 continue;
454                         }
455                         tasklet_unlock(t);
456                 }
457
458                 local_irq_disable();
459                 t->next = NULL;
460                 *__get_cpu_var(tasklet_hi_vec).tail = t;
461                 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
462                 __raise_softirq_irqoff(HI_SOFTIRQ);
463                 local_irq_enable();
464         }
465 }
466
467
468 void tasklet_init(struct tasklet_struct *t,
469                   void (*func)(unsigned long), unsigned long data)
470 {
471         t->next = NULL;
472         t->state = 0;
473         atomic_set(&t->count, 0);
474         t->func = func;
475         t->data = data;
476 }
477
478 EXPORT_SYMBOL(tasklet_init);
479
480 void tasklet_kill(struct tasklet_struct *t)
481 {
482         if (in_interrupt())
483                 printk("Attempt to kill tasklet from interrupt\n");
484
485         while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
486                 do {
487                         yield();
488                 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
489         }
490         tasklet_unlock_wait(t);
491         clear_bit(TASKLET_STATE_SCHED, &t->state);
492 }
493
494 EXPORT_SYMBOL(tasklet_kill);
495
496 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
497 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
498
499 static void __local_trigger(struct call_single_data *cp, int softirq)
500 {
501         struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
502
503         list_add_tail(&cp->list, head);
504
505         /* Trigger the softirq only if the list was previously empty.  */
506         if (head->next == &cp->list)
507                 raise_softirq_irqoff(softirq);
508 }
509
510 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
511 static void remote_softirq_receive(void *data)
512 {
513         struct call_single_data *cp = data;
514         unsigned long flags;
515         int softirq;
516
517         softirq = cp->priv;
518
519         local_irq_save(flags);
520         __local_trigger(cp, softirq);
521         local_irq_restore(flags);
522 }
523
524 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
525 {
526         if (cpu_online(cpu)) {
527                 cp->func = remote_softirq_receive;
528                 cp->info = cp;
529                 cp->flags = 0;
530                 cp->priv = softirq;
531
532                 __smp_call_function_single(cpu, cp, 0);
533                 return 0;
534         }
535         return 1;
536 }
537 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
538 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
539 {
540         return 1;
541 }
542 #endif
543
544 /**
545  * __send_remote_softirq - try to schedule softirq work on a remote cpu
546  * @cp: private SMP call function data area
547  * @cpu: the remote cpu
548  * @this_cpu: the currently executing cpu
549  * @softirq: the softirq for the work
550  *
551  * Attempt to schedule softirq work on a remote cpu.  If this cannot be
552  * done, the work is instead queued up on the local cpu.
553  *
554  * Interrupts must be disabled.
555  */
556 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
557 {
558         if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
559                 __local_trigger(cp, softirq);
560 }
561 EXPORT_SYMBOL(__send_remote_softirq);
562
563 /**
564  * send_remote_softirq - try to schedule softirq work on a remote cpu
565  * @cp: private SMP call function data area
566  * @cpu: the remote cpu
567  * @softirq: the softirq for the work
568  *
569  * Like __send_remote_softirq except that disabling interrupts and
570  * computing the current cpu is done for the caller.
571  */
572 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
573 {
574         unsigned long flags;
575         int this_cpu;
576
577         local_irq_save(flags);
578         this_cpu = smp_processor_id();
579         __send_remote_softirq(cp, cpu, this_cpu, softirq);
580         local_irq_restore(flags);
581 }
582 EXPORT_SYMBOL(send_remote_softirq);
583
584 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
585                                                unsigned long action, void *hcpu)
586 {
587         /*
588          * If a CPU goes away, splice its entries to the current CPU
589          * and trigger a run of the softirq
590          */
591         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
592                 int cpu = (unsigned long) hcpu;
593                 int i;
594
595                 local_irq_disable();
596                 for (i = 0; i < NR_SOFTIRQS; i++) {
597                         struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
598                         struct list_head *local_head;
599
600                         if (list_empty(head))
601                                 continue;
602
603                         local_head = &__get_cpu_var(softirq_work_list[i]);
604                         list_splice_init(head, local_head);
605                         raise_softirq_irqoff(i);
606                 }
607                 local_irq_enable();
608         }
609
610         return NOTIFY_OK;
611 }
612
613 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
614         .notifier_call  = remote_softirq_cpu_notify,
615 };
616
617 void __init softirq_init(void)
618 {
619         int cpu;
620
621         for_each_possible_cpu(cpu) {
622                 int i;
623
624                 per_cpu(tasklet_vec, cpu).tail =
625                         &per_cpu(tasklet_vec, cpu).head;
626                 per_cpu(tasklet_hi_vec, cpu).tail =
627                         &per_cpu(tasklet_hi_vec, cpu).head;
628                 for (i = 0; i < NR_SOFTIRQS; i++)
629                         INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
630         }
631
632         register_hotcpu_notifier(&remote_softirq_cpu_notifier);
633
634         open_softirq(TASKLET_SOFTIRQ, tasklet_action);
635         open_softirq(HI_SOFTIRQ, tasklet_hi_action);
636 }
637
638 static int ksoftirqd(void * __bind_cpu)
639 {
640         set_current_state(TASK_INTERRUPTIBLE);
641
642         while (!kthread_should_stop()) {
643                 preempt_disable();
644                 if (!local_softirq_pending()) {
645                         preempt_enable_no_resched();
646                         schedule();
647                         preempt_disable();
648                 }
649
650                 __set_current_state(TASK_RUNNING);
651
652                 while (local_softirq_pending()) {
653                         /* Preempt disable stops cpu going offline.
654                            If already offline, we'll be on wrong CPU:
655                            don't process */
656                         if (cpu_is_offline((long)__bind_cpu))
657                                 goto wait_to_die;
658                         do_softirq();
659                         preempt_enable_no_resched();
660                         cond_resched();
661                         preempt_disable();
662                         rcu_qsctr_inc((long)__bind_cpu);
663                 }
664                 preempt_enable();
665                 set_current_state(TASK_INTERRUPTIBLE);
666         }
667         __set_current_state(TASK_RUNNING);
668         return 0;
669
670 wait_to_die:
671         preempt_enable();
672         /* Wait for kthread_stop */
673         set_current_state(TASK_INTERRUPTIBLE);
674         while (!kthread_should_stop()) {
675                 schedule();
676                 set_current_state(TASK_INTERRUPTIBLE);
677         }
678         __set_current_state(TASK_RUNNING);
679         return 0;
680 }
681
682 #ifdef CONFIG_HOTPLUG_CPU
683 /*
684  * tasklet_kill_immediate is called to remove a tasklet which can already be
685  * scheduled for execution on @cpu.
686  *
687  * Unlike tasklet_kill, this function removes the tasklet
688  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
689  *
690  * When this function is called, @cpu must be in the CPU_DEAD state.
691  */
692 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
693 {
694         struct tasklet_struct **i;
695
696         BUG_ON(cpu_online(cpu));
697         BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
698
699         if (!test_bit(TASKLET_STATE_SCHED, &t->state))
700                 return;
701
702         /* CPU is dead, so no lock needed. */
703         for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
704                 if (*i == t) {
705                         *i = t->next;
706                         /* If this was the tail element, move the tail ptr */
707                         if (*i == NULL)
708                                 per_cpu(tasklet_vec, cpu).tail = i;
709                         return;
710                 }
711         }
712         BUG();
713 }
714
715 static void takeover_tasklets(unsigned int cpu)
716 {
717         /* CPU is dead, so no lock needed. */
718         local_irq_disable();
719
720         /* Find end, append list for that CPU. */
721         if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
722                 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
723                 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
724                 per_cpu(tasklet_vec, cpu).head = NULL;
725                 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
726         }
727         raise_softirq_irqoff(TASKLET_SOFTIRQ);
728
729         if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
730                 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
731                 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
732                 per_cpu(tasklet_hi_vec, cpu).head = NULL;
733                 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
734         }
735         raise_softirq_irqoff(HI_SOFTIRQ);
736
737         local_irq_enable();
738 }
739 #endif /* CONFIG_HOTPLUG_CPU */
740
741 static int __cpuinit cpu_callback(struct notifier_block *nfb,
742                                   unsigned long action,
743                                   void *hcpu)
744 {
745         int hotcpu = (unsigned long)hcpu;
746         struct task_struct *p;
747
748         switch (action) {
749         case CPU_UP_PREPARE:
750         case CPU_UP_PREPARE_FROZEN:
751                 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
752                 if (IS_ERR(p)) {
753                         printk("ksoftirqd for %i failed\n", hotcpu);
754                         return NOTIFY_BAD;
755                 }
756                 kthread_bind(p, hotcpu);
757                 per_cpu(ksoftirqd, hotcpu) = p;
758                 break;
759         case CPU_ONLINE:
760         case CPU_ONLINE_FROZEN:
761                 wake_up_process(per_cpu(ksoftirqd, hotcpu));
762                 break;
763 #ifdef CONFIG_HOTPLUG_CPU
764         case CPU_UP_CANCELED:
765         case CPU_UP_CANCELED_FROZEN:
766                 if (!per_cpu(ksoftirqd, hotcpu))
767                         break;
768                 /* Unbind so it can run.  Fall thru. */
769                 kthread_bind(per_cpu(ksoftirqd, hotcpu),
770                              cpumask_any(cpu_online_mask));
771         case CPU_DEAD:
772         case CPU_DEAD_FROZEN: {
773                 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
774
775                 p = per_cpu(ksoftirqd, hotcpu);
776                 per_cpu(ksoftirqd, hotcpu) = NULL;
777                 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
778                 kthread_stop(p);
779                 takeover_tasklets(hotcpu);
780                 break;
781         }
782 #endif /* CONFIG_HOTPLUG_CPU */
783         }
784         return NOTIFY_OK;
785 }
786
787 static struct notifier_block __cpuinitdata cpu_nfb = {
788         .notifier_call = cpu_callback
789 };
790
791 static __init int spawn_ksoftirqd(void)
792 {
793         void *cpu = (void *)(long)smp_processor_id();
794         int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
795
796         BUG_ON(err == NOTIFY_BAD);
797         cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
798         register_cpu_notifier(&cpu_nfb);
799         return 0;
800 }
801 early_initcall(spawn_ksoftirqd);
802
803 #ifdef CONFIG_SMP
804 /*
805  * Call a function on all processors
806  */
807 int on_each_cpu(void (*func) (void *info), void *info, int wait)
808 {
809         int ret = 0;
810
811         preempt_disable();
812         ret = smp_call_function(func, info, wait);
813         local_irq_disable();
814         func(info);
815         local_irq_enable();
816         preempt_enable();
817         return ret;
818 }
819 EXPORT_SYMBOL(on_each_cpu);
820 #endif
821
822 /*
823  * [ These __weak aliases are kept in a separate compilation unit, so that
824  *   GCC does not inline them incorrectly. ]
825  */
826
827 int __init __weak early_irq_init(void)
828 {
829         return 0;
830 }
831
832 int __init __weak arch_probe_nr_irqs(void)
833 {
834         return 0;
835 }
836
837 int __init __weak arch_early_irq_init(void)
838 {
839         return 0;
840 }
841
842 int __weak arch_init_chip_data(struct irq_desc *desc, int node)
843 {
844         return 0;
845 }