irq: optimize init_kstat_irqs/init_copy_kstat_irqs
[safe/jmp/linux-2.6] / kernel / irq / handle.c
1 /*
2  * linux/kernel/irq/handle.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code.
8  *
9  * Detailed information is available in Documentation/DocBook/genericirq
10  *
11  */
12
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/rculist.h>
19 #include <linux/hash.h>
20
21 #include "internals.h"
22
23 /*
24  * lockdep: we want to handle all irq_desc locks as a single lock-class:
25  */
26 struct lock_class_key irq_desc_lock_class;
27
28 /**
29  * handle_bad_irq - handle spurious and unhandled irqs
30  * @irq:       the interrupt number
31  * @desc:      description of the interrupt
32  *
33  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
34  */
35 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
36 {
37         print_irq_desc(irq, desc);
38         kstat_incr_irqs_this_cpu(irq, desc);
39         ack_bad_irq(irq);
40 }
41
42 /*
43  * Linux has a controller-independent interrupt architecture.
44  * Every controller has a 'controller-template', that is used
45  * by the main code to do the right thing. Each driver-visible
46  * interrupt source is transparently wired to the appropriate
47  * controller. Thus drivers need not be aware of the
48  * interrupt-controller.
49  *
50  * The code is designed to be easily extended with new/different
51  * interrupt controllers, without having to do assembly magic or
52  * having to touch the generic code.
53  *
54  * Controller mappings for all interrupt sources:
55  */
56 int nr_irqs = NR_IRQS;
57 EXPORT_SYMBOL_GPL(nr_irqs);
58
59 #ifdef CONFIG_SPARSE_IRQ
60 static struct irq_desc irq_desc_init = {
61         .irq        = -1,
62         .status     = IRQ_DISABLED,
63         .chip       = &no_irq_chip,
64         .handle_irq = handle_bad_irq,
65         .depth      = 1,
66         .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
67 #ifdef CONFIG_SMP
68         .affinity   = CPU_MASK_ALL
69 #endif
70 };
71
72 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
73 {
74         int node;
75         void *ptr;
76
77         node = cpu_to_node(cpu);
78         ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
79
80         /*
81          * don't overwite if can not get new one
82          * init_copy_kstat_irqs() could still use old one
83          */
84         if (ptr) {
85                 printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n",
86                          cpu, node);
87                 desc->kstat_irqs = ptr;
88         }
89 }
90
91 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
92 {
93         memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
94
95         spin_lock_init(&desc->lock);
96         desc->irq = irq;
97 #ifdef CONFIG_SMP
98         desc->cpu = cpu;
99 #endif
100         lockdep_set_class(&desc->lock, &irq_desc_lock_class);
101         init_kstat_irqs(desc, cpu, nr_cpu_ids);
102         if (!desc->kstat_irqs) {
103                 printk(KERN_ERR "can not alloc kstat_irqs\n");
104                 BUG_ON(1);
105         }
106         arch_init_chip_data(desc, cpu);
107 }
108
109 /*
110  * Protect the sparse_irqs:
111  */
112 DEFINE_SPINLOCK(sparse_irq_lock);
113
114 struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
115
116 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
117         [0 ... NR_IRQS_LEGACY-1] = {
118                 .irq        = -1,
119                 .status     = IRQ_DISABLED,
120                 .chip       = &no_irq_chip,
121                 .handle_irq = handle_bad_irq,
122                 .depth      = 1,
123                 .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
124 #ifdef CONFIG_SMP
125                 .affinity   = CPU_MASK_ALL
126 #endif
127         }
128 };
129
130 /* FIXME: use bootmem alloc ...*/
131 static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
132
133 int __init early_irq_init(void)
134 {
135         struct irq_desc *desc;
136         int legacy_count;
137         int i;
138
139         desc = irq_desc_legacy;
140         legacy_count = ARRAY_SIZE(irq_desc_legacy);
141
142         for (i = 0; i < legacy_count; i++) {
143                 desc[i].irq = i;
144                 desc[i].kstat_irqs = kstat_irqs_legacy[i];
145                 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
146
147                 irq_desc_ptrs[i] = desc + i;
148         }
149
150         for (i = legacy_count; i < NR_IRQS; i++)
151                 irq_desc_ptrs[i] = NULL;
152
153         return arch_early_irq_init();
154 }
155
156 struct irq_desc *irq_to_desc(unsigned int irq)
157 {
158         return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
159 }
160
161 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
162 {
163         struct irq_desc *desc;
164         unsigned long flags;
165         int node;
166
167         if (irq >= NR_IRQS) {
168                 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
169                                 irq, NR_IRQS);
170                 WARN_ON(1);
171                 return NULL;
172         }
173
174         desc = irq_desc_ptrs[irq];
175         if (desc)
176                 return desc;
177
178         spin_lock_irqsave(&sparse_irq_lock, flags);
179
180         /* We have to check it to avoid races with another CPU */
181         desc = irq_desc_ptrs[irq];
182         if (desc)
183                 goto out_unlock;
184
185         node = cpu_to_node(cpu);
186         desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
187         printk(KERN_DEBUG "  alloc irq_desc for %d on cpu %d node %d\n",
188                  irq, cpu, node);
189         if (!desc) {
190                 printk(KERN_ERR "can not alloc irq_desc\n");
191                 BUG_ON(1);
192         }
193         init_one_irq_desc(irq, desc, cpu);
194
195         irq_desc_ptrs[irq] = desc;
196
197 out_unlock:
198         spin_unlock_irqrestore(&sparse_irq_lock, flags);
199
200         return desc;
201 }
202
203 #else /* !CONFIG_SPARSE_IRQ */
204
205 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
206         [0 ... NR_IRQS-1] = {
207                 .status = IRQ_DISABLED,
208                 .chip = &no_irq_chip,
209                 .handle_irq = handle_bad_irq,
210                 .depth = 1,
211                 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
212 #ifdef CONFIG_SMP
213                 .affinity = CPU_MASK_ALL
214 #endif
215         }
216 };
217
218 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
219 int __init early_irq_init(void)
220 {
221         struct irq_desc *desc;
222         int count;
223         int i;
224
225         desc = irq_desc;
226         count = ARRAY_SIZE(irq_desc);
227
228         for (i = 0; i < count; i++) {
229                 desc[i].irq = i;
230                 desc[i].kstat_irqs = kstat_irqs_all[i];
231         }
232
233         return arch_early_irq_init();
234 }
235
236 struct irq_desc *irq_to_desc(unsigned int irq)
237 {
238         return (irq < NR_IRQS) ? irq_desc + irq : NULL;
239 }
240
241 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
242 {
243         return irq_to_desc(irq);
244 }
245 #endif /* !CONFIG_SPARSE_IRQ */
246
247 void clear_kstat_irqs(struct irq_desc *desc)
248 {
249         memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
250 }
251
252 /*
253  * What should we do if we get a hw irq event on an illegal vector?
254  * Each architecture has to answer this themself.
255  */
256 static void ack_bad(unsigned int irq)
257 {
258         struct irq_desc *desc = irq_to_desc(irq);
259
260         print_irq_desc(irq, desc);
261         ack_bad_irq(irq);
262 }
263
264 /*
265  * NOP functions
266  */
267 static void noop(unsigned int irq)
268 {
269 }
270
271 static unsigned int noop_ret(unsigned int irq)
272 {
273         return 0;
274 }
275
276 /*
277  * Generic no controller implementation
278  */
279 struct irq_chip no_irq_chip = {
280         .name           = "none",
281         .startup        = noop_ret,
282         .shutdown       = noop,
283         .enable         = noop,
284         .disable        = noop,
285         .ack            = ack_bad,
286         .end            = noop,
287 };
288
289 /*
290  * Generic dummy implementation which can be used for
291  * real dumb interrupt sources
292  */
293 struct irq_chip dummy_irq_chip = {
294         .name           = "dummy",
295         .startup        = noop_ret,
296         .shutdown       = noop,
297         .enable         = noop,
298         .disable        = noop,
299         .ack            = noop,
300         .mask           = noop,
301         .unmask         = noop,
302         .end            = noop,
303 };
304
305 /*
306  * Special, empty irq handler:
307  */
308 irqreturn_t no_action(int cpl, void *dev_id)
309 {
310         return IRQ_NONE;
311 }
312
313 /**
314  * handle_IRQ_event - irq action chain handler
315  * @irq:        the interrupt number
316  * @action:     the interrupt action chain for this irq
317  *
318  * Handles the action chain of an irq event
319  */
320 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
321 {
322         irqreturn_t ret, retval = IRQ_NONE;
323         unsigned int status = 0;
324
325         if (!(action->flags & IRQF_DISABLED))
326                 local_irq_enable_in_hardirq();
327
328         do {
329                 ret = action->handler(irq, action->dev_id);
330                 if (ret == IRQ_HANDLED)
331                         status |= action->flags;
332                 retval |= ret;
333                 action = action->next;
334         } while (action);
335
336         if (status & IRQF_SAMPLE_RANDOM)
337                 add_interrupt_randomness(irq);
338         local_irq_disable();
339
340         return retval;
341 }
342
343 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
344 /**
345  * __do_IRQ - original all in one highlevel IRQ handler
346  * @irq:        the interrupt number
347  *
348  * __do_IRQ handles all normal device IRQ's (the special
349  * SMP cross-CPU interrupts have their own specific
350  * handlers).
351  *
352  * This is the original x86 implementation which is used for every
353  * interrupt type.
354  */
355 unsigned int __do_IRQ(unsigned int irq)
356 {
357         struct irq_desc *desc = irq_to_desc(irq);
358         struct irqaction *action;
359         unsigned int status;
360
361         kstat_incr_irqs_this_cpu(irq, desc);
362
363         if (CHECK_IRQ_PER_CPU(desc->status)) {
364                 irqreturn_t action_ret;
365
366                 /*
367                  * No locking required for CPU-local interrupts:
368                  */
369                 if (desc->chip->ack) {
370                         desc->chip->ack(irq);
371                         /* get new one */
372                         desc = irq_remap_to_desc(irq, desc);
373                 }
374                 if (likely(!(desc->status & IRQ_DISABLED))) {
375                         action_ret = handle_IRQ_event(irq, desc->action);
376                         if (!noirqdebug)
377                                 note_interrupt(irq, desc, action_ret);
378                 }
379                 desc->chip->end(irq);
380                 return 1;
381         }
382
383         spin_lock(&desc->lock);
384         if (desc->chip->ack) {
385                 desc->chip->ack(irq);
386                 desc = irq_remap_to_desc(irq, desc);
387         }
388         /*
389          * REPLAY is when Linux resends an IRQ that was dropped earlier
390          * WAITING is used by probe to mark irqs that are being tested
391          */
392         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
393         status |= IRQ_PENDING; /* we _want_ to handle it */
394
395         /*
396          * If the IRQ is disabled for whatever reason, we cannot
397          * use the action we have.
398          */
399         action = NULL;
400         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
401                 action = desc->action;
402                 status &= ~IRQ_PENDING; /* we commit to handling */
403                 status |= IRQ_INPROGRESS; /* we are handling it */
404         }
405         desc->status = status;
406
407         /*
408          * If there is no IRQ handler or it was disabled, exit early.
409          * Since we set PENDING, if another processor is handling
410          * a different instance of this same irq, the other processor
411          * will take care of it.
412          */
413         if (unlikely(!action))
414                 goto out;
415
416         /*
417          * Edge triggered interrupts need to remember
418          * pending events.
419          * This applies to any hw interrupts that allow a second
420          * instance of the same irq to arrive while we are in do_IRQ
421          * or in the handler. But the code here only handles the _second_
422          * instance of the irq, not the third or fourth. So it is mostly
423          * useful for irq hardware that does not mask cleanly in an
424          * SMP environment.
425          */
426         for (;;) {
427                 irqreturn_t action_ret;
428
429                 spin_unlock(&desc->lock);
430
431                 action_ret = handle_IRQ_event(irq, action);
432                 if (!noirqdebug)
433                         note_interrupt(irq, desc, action_ret);
434
435                 spin_lock(&desc->lock);
436                 if (likely(!(desc->status & IRQ_PENDING)))
437                         break;
438                 desc->status &= ~IRQ_PENDING;
439         }
440         desc->status &= ~IRQ_INPROGRESS;
441
442 out:
443         /*
444          * The ->end() handler has to deal with interrupts which got
445          * disabled while the handler was running.
446          */
447         desc->chip->end(irq);
448         spin_unlock(&desc->lock);
449
450         return 1;
451 }
452 #endif
453
454 void early_init_irq_lock_class(void)
455 {
456         struct irq_desc *desc;
457         int i;
458
459         for_each_irq_desc(i, desc) {
460                 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
461         }
462 }
463
464 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
465 {
466         struct irq_desc *desc = irq_to_desc(irq);
467         return desc ? desc->kstat_irqs[cpu] : 0;
468 }
469 EXPORT_SYMBOL(kstat_irqs_cpu);
470