irq: Remove unnecessary bootmem code
[safe/jmp/linux-2.6] / kernel / irq / handle.c
1 /*
2  * linux/kernel/irq/handle.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code.
8  *
9  * Detailed information is available in Documentation/DocBook/genericirq
10  *
11  */
12
13 #include <linux/irq.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/interrupt.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/rculist.h>
21 #include <linux/hash.h>
22 #include <trace/events/irq.h>
23
24 #include "internals.h"
25
26 /*
27  * lockdep: we want to handle all irq_desc locks as a single lock-class:
28  */
29 struct lock_class_key irq_desc_lock_class;
30
31 /**
32  * handle_bad_irq - handle spurious and unhandled irqs
33  * @irq:       the interrupt number
34  * @desc:      description of the interrupt
35  *
36  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
37  */
38 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
39 {
40         print_irq_desc(irq, desc);
41         kstat_incr_irqs_this_cpu(irq, desc);
42         ack_bad_irq(irq);
43 }
44
45 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
46 static void __init init_irq_default_affinity(void)
47 {
48         alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
49         cpumask_setall(irq_default_affinity);
50 }
51 #else
52 static void __init init_irq_default_affinity(void)
53 {
54 }
55 #endif
56
57 /*
58  * Linux has a controller-independent interrupt architecture.
59  * Every controller has a 'controller-template', that is used
60  * by the main code to do the right thing. Each driver-visible
61  * interrupt source is transparently wired to the appropriate
62  * controller. Thus drivers need not be aware of the
63  * interrupt-controller.
64  *
65  * The code is designed to be easily extended with new/different
66  * interrupt controllers, without having to do assembly magic or
67  * having to touch the generic code.
68  *
69  * Controller mappings for all interrupt sources:
70  */
71 int nr_irqs = NR_IRQS;
72 EXPORT_SYMBOL_GPL(nr_irqs);
73
74 #ifdef CONFIG_SPARSE_IRQ
75
76 static struct irq_desc irq_desc_init = {
77         .irq        = -1,
78         .status     = IRQ_DISABLED,
79         .chip       = &no_irq_chip,
80         .handle_irq = handle_bad_irq,
81         .depth      = 1,
82         .lock       = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
83 };
84
85 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
86 {
87         void *ptr;
88
89         ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
90                            GFP_ATOMIC, node);
91
92         /*
93          * don't overwite if can not get new one
94          * init_copy_kstat_irqs() could still use old one
95          */
96         if (ptr) {
97                 printk(KERN_DEBUG "  alloc kstat_irqs on node %d\n", node);
98                 desc->kstat_irqs = ptr;
99         }
100 }
101
102 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
103 {
104         memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
105
106         raw_spin_lock_init(&desc->lock);
107         desc->irq = irq;
108 #ifdef CONFIG_SMP
109         desc->node = node;
110 #endif
111         lockdep_set_class(&desc->lock, &irq_desc_lock_class);
112         init_kstat_irqs(desc, node, nr_cpu_ids);
113         if (!desc->kstat_irqs) {
114                 printk(KERN_ERR "can not alloc kstat_irqs\n");
115                 BUG_ON(1);
116         }
117         if (!alloc_desc_masks(desc, node, false)) {
118                 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
119                 BUG_ON(1);
120         }
121         init_desc_masks(desc);
122         arch_init_chip_data(desc, node);
123 }
124
125 /*
126  * Protect the sparse_irqs:
127  */
128 DEFINE_RAW_SPINLOCK(sparse_irq_lock);
129
130 struct irq_desc **irq_desc_ptrs __read_mostly;
131
132 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
133         [0 ... NR_IRQS_LEGACY-1] = {
134                 .irq        = -1,
135                 .status     = IRQ_DISABLED,
136                 .chip       = &no_irq_chip,
137                 .handle_irq = handle_bad_irq,
138                 .depth      = 1,
139                 .lock       = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
140         }
141 };
142
143 static unsigned int *kstat_irqs_legacy;
144
145 int __init early_irq_init(void)
146 {
147         struct irq_desc *desc;
148         int legacy_count;
149         int node;
150         int i;
151
152         init_irq_default_affinity();
153
154          /* initialize nr_irqs based on nr_cpu_ids */
155         arch_probe_nr_irqs();
156         printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
157
158         desc = irq_desc_legacy;
159         legacy_count = ARRAY_SIZE(irq_desc_legacy);
160         node = first_online_node;
161
162         /* allocate irq_desc_ptrs array based on nr_irqs */
163         irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
164
165         /* allocate based on nr_cpu_ids */
166         kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
167                                           sizeof(int), GFP_NOWAIT, node);
168
169         for (i = 0; i < legacy_count; i++) {
170                 desc[i].irq = i;
171 #ifdef CONFIG_SMP
172                 desc[i].node = node;
173 #endif
174                 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
175                 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
176                 alloc_desc_masks(&desc[i], node, true);
177                 init_desc_masks(&desc[i]);
178                 irq_desc_ptrs[i] = desc + i;
179         }
180
181         for (i = legacy_count; i < nr_irqs; i++)
182                 irq_desc_ptrs[i] = NULL;
183
184         return arch_early_irq_init();
185 }
186
187 struct irq_desc *irq_to_desc(unsigned int irq)
188 {
189         if (irq_desc_ptrs && irq < nr_irqs)
190                 return irq_desc_ptrs[irq];
191
192         return NULL;
193 }
194
195 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
196 {
197         struct irq_desc *desc;
198         unsigned long flags;
199
200         if (irq >= nr_irqs) {
201                 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
202                         irq, nr_irqs);
203                 return NULL;
204         }
205
206         desc = irq_desc_ptrs[irq];
207         if (desc)
208                 return desc;
209
210         raw_spin_lock_irqsave(&sparse_irq_lock, flags);
211
212         /* We have to check it to avoid races with another CPU */
213         desc = irq_desc_ptrs[irq];
214         if (desc)
215                 goto out_unlock;
216
217         desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
218
219         printk(KERN_DEBUG "  alloc irq_desc for %d on node %d\n", irq, node);
220         if (!desc) {
221                 printk(KERN_ERR "can not alloc irq_desc\n");
222                 BUG_ON(1);
223         }
224         init_one_irq_desc(irq, desc, node);
225
226         irq_desc_ptrs[irq] = desc;
227
228 out_unlock:
229         raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
230
231         return desc;
232 }
233
234 #else /* !CONFIG_SPARSE_IRQ */
235
236 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
237         [0 ... NR_IRQS-1] = {
238                 .status = IRQ_DISABLED,
239                 .chip = &no_irq_chip,
240                 .handle_irq = handle_bad_irq,
241                 .depth = 1,
242                 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
243         }
244 };
245
246 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
247 int __init early_irq_init(void)
248 {
249         struct irq_desc *desc;
250         int count;
251         int i;
252
253         init_irq_default_affinity();
254
255         printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
256
257         desc = irq_desc;
258         count = ARRAY_SIZE(irq_desc);
259
260         for (i = 0; i < count; i++) {
261                 desc[i].irq = i;
262                 alloc_desc_masks(&desc[i], 0, true);
263                 init_desc_masks(&desc[i]);
264                 desc[i].kstat_irqs = kstat_irqs_all[i];
265         }
266         return arch_early_irq_init();
267 }
268
269 struct irq_desc *irq_to_desc(unsigned int irq)
270 {
271         return (irq < NR_IRQS) ? irq_desc + irq : NULL;
272 }
273
274 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
275 {
276         return irq_to_desc(irq);
277 }
278 #endif /* !CONFIG_SPARSE_IRQ */
279
280 void clear_kstat_irqs(struct irq_desc *desc)
281 {
282         memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
283 }
284
285 /*
286  * What should we do if we get a hw irq event on an illegal vector?
287  * Each architecture has to answer this themself.
288  */
289 static void ack_bad(unsigned int irq)
290 {
291         struct irq_desc *desc = irq_to_desc(irq);
292
293         print_irq_desc(irq, desc);
294         ack_bad_irq(irq);
295 }
296
297 /*
298  * NOP functions
299  */
300 static void noop(unsigned int irq)
301 {
302 }
303
304 static unsigned int noop_ret(unsigned int irq)
305 {
306         return 0;
307 }
308
309 /*
310  * Generic no controller implementation
311  */
312 struct irq_chip no_irq_chip = {
313         .name           = "none",
314         .startup        = noop_ret,
315         .shutdown       = noop,
316         .enable         = noop,
317         .disable        = noop,
318         .ack            = ack_bad,
319         .end            = noop,
320 };
321
322 /*
323  * Generic dummy implementation which can be used for
324  * real dumb interrupt sources
325  */
326 struct irq_chip dummy_irq_chip = {
327         .name           = "dummy",
328         .startup        = noop_ret,
329         .shutdown       = noop,
330         .enable         = noop,
331         .disable        = noop,
332         .ack            = noop,
333         .mask           = noop,
334         .unmask         = noop,
335         .end            = noop,
336 };
337
338 /*
339  * Special, empty irq handler:
340  */
341 irqreturn_t no_action(int cpl, void *dev_id)
342 {
343         return IRQ_NONE;
344 }
345
346 static void warn_no_thread(unsigned int irq, struct irqaction *action)
347 {
348         if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
349                 return;
350
351         printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
352                "but no thread function available.", irq, action->name);
353 }
354
355 /**
356  * handle_IRQ_event - irq action chain handler
357  * @irq:        the interrupt number
358  * @action:     the interrupt action chain for this irq
359  *
360  * Handles the action chain of an irq event
361  */
362 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
363 {
364         irqreturn_t ret, retval = IRQ_NONE;
365         unsigned int status = 0;
366
367         if (!(action->flags & IRQF_DISABLED))
368                 local_irq_enable_in_hardirq();
369
370         do {
371                 trace_irq_handler_entry(irq, action);
372                 ret = action->handler(irq, action->dev_id);
373                 trace_irq_handler_exit(irq, action, ret);
374
375                 switch (ret) {
376                 case IRQ_WAKE_THREAD:
377                         /*
378                          * Set result to handled so the spurious check
379                          * does not trigger.
380                          */
381                         ret = IRQ_HANDLED;
382
383                         /*
384                          * Catch drivers which return WAKE_THREAD but
385                          * did not set up a thread function
386                          */
387                         if (unlikely(!action->thread_fn)) {
388                                 warn_no_thread(irq, action);
389                                 break;
390                         }
391
392                         /*
393                          * Wake up the handler thread for this
394                          * action. In case the thread crashed and was
395                          * killed we just pretend that we handled the
396                          * interrupt. The hardirq handler above has
397                          * disabled the device interrupt, so no irq
398                          * storm is lurking.
399                          */
400                         if (likely(!test_bit(IRQTF_DIED,
401                                              &action->thread_flags))) {
402                                 set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
403                                 wake_up_process(action->thread);
404                         }
405
406                         /* Fall through to add to randomness */
407                 case IRQ_HANDLED:
408                         status |= action->flags;
409                         break;
410
411                 default:
412                         break;
413                 }
414
415                 retval |= ret;
416                 action = action->next;
417         } while (action);
418
419         if (status & IRQF_SAMPLE_RANDOM)
420                 add_interrupt_randomness(irq);
421         local_irq_disable();
422
423         return retval;
424 }
425
426 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
427
428 #ifdef CONFIG_ENABLE_WARN_DEPRECATED
429 # warning __do_IRQ is deprecated. Please convert to proper flow handlers
430 #endif
431
432 /**
433  * __do_IRQ - original all in one highlevel IRQ handler
434  * @irq:        the interrupt number
435  *
436  * __do_IRQ handles all normal device IRQ's (the special
437  * SMP cross-CPU interrupts have their own specific
438  * handlers).
439  *
440  * This is the original x86 implementation which is used for every
441  * interrupt type.
442  */
443 unsigned int __do_IRQ(unsigned int irq)
444 {
445         struct irq_desc *desc = irq_to_desc(irq);
446         struct irqaction *action;
447         unsigned int status;
448
449         kstat_incr_irqs_this_cpu(irq, desc);
450
451         if (CHECK_IRQ_PER_CPU(desc->status)) {
452                 irqreturn_t action_ret;
453
454                 /*
455                  * No locking required for CPU-local interrupts:
456                  */
457                 if (desc->chip->ack)
458                         desc->chip->ack(irq);
459                 if (likely(!(desc->status & IRQ_DISABLED))) {
460                         action_ret = handle_IRQ_event(irq, desc->action);
461                         if (!noirqdebug)
462                                 note_interrupt(irq, desc, action_ret);
463                 }
464                 desc->chip->end(irq);
465                 return 1;
466         }
467
468         raw_spin_lock(&desc->lock);
469         if (desc->chip->ack)
470                 desc->chip->ack(irq);
471         /*
472          * REPLAY is when Linux resends an IRQ that was dropped earlier
473          * WAITING is used by probe to mark irqs that are being tested
474          */
475         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
476         status |= IRQ_PENDING; /* we _want_ to handle it */
477
478         /*
479          * If the IRQ is disabled for whatever reason, we cannot
480          * use the action we have.
481          */
482         action = NULL;
483         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
484                 action = desc->action;
485                 status &= ~IRQ_PENDING; /* we commit to handling */
486                 status |= IRQ_INPROGRESS; /* we are handling it */
487         }
488         desc->status = status;
489
490         /*
491          * If there is no IRQ handler or it was disabled, exit early.
492          * Since we set PENDING, if another processor is handling
493          * a different instance of this same irq, the other processor
494          * will take care of it.
495          */
496         if (unlikely(!action))
497                 goto out;
498
499         /*
500          * Edge triggered interrupts need to remember
501          * pending events.
502          * This applies to any hw interrupts that allow a second
503          * instance of the same irq to arrive while we are in do_IRQ
504          * or in the handler. But the code here only handles the _second_
505          * instance of the irq, not the third or fourth. So it is mostly
506          * useful for irq hardware that does not mask cleanly in an
507          * SMP environment.
508          */
509         for (;;) {
510                 irqreturn_t action_ret;
511
512                 raw_spin_unlock(&desc->lock);
513
514                 action_ret = handle_IRQ_event(irq, action);
515                 if (!noirqdebug)
516                         note_interrupt(irq, desc, action_ret);
517
518                 raw_spin_lock(&desc->lock);
519                 if (likely(!(desc->status & IRQ_PENDING)))
520                         break;
521                 desc->status &= ~IRQ_PENDING;
522         }
523         desc->status &= ~IRQ_INPROGRESS;
524
525 out:
526         /*
527          * The ->end() handler has to deal with interrupts which got
528          * disabled while the handler was running.
529          */
530         desc->chip->end(irq);
531         raw_spin_unlock(&desc->lock);
532
533         return 1;
534 }
535 #endif
536
537 void early_init_irq_lock_class(void)
538 {
539         struct irq_desc *desc;
540         int i;
541
542         for_each_irq_desc(i, desc) {
543                 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
544         }
545 }
546
547 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
548 {
549         struct irq_desc *desc = irq_to_desc(irq);
550         return desc ? desc->kstat_irqs[cpu] : 0;
551 }
552 EXPORT_SYMBOL(kstat_irqs_cpu);
553