2 * linux/kernel/irq/handle.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code.
9 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/rculist.h>
19 #include <linux/hash.h>
21 #include "internals.h"
24 * lockdep: we want to handle all irq_desc locks as a single lock-class:
26 struct lock_class_key irq_desc_lock_class;
29 * handle_bad_irq - handle spurious and unhandled irqs
30 * @irq: the interrupt number
31 * @desc: description of the interrupt
33 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
35 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
37 print_irq_desc(irq, desc);
38 kstat_incr_irqs_this_cpu(irq, desc);
43 * Linux has a controller-independent interrupt architecture.
44 * Every controller has a 'controller-template', that is used
45 * by the main code to do the right thing. Each driver-visible
46 * interrupt source is transparently wired to the appropriate
47 * controller. Thus drivers need not be aware of the
48 * interrupt-controller.
50 * The code is designed to be easily extended with new/different
51 * interrupt controllers, without having to do assembly magic or
52 * having to touch the generic code.
54 * Controller mappings for all interrupt sources:
56 int nr_irqs = NR_IRQS;
57 EXPORT_SYMBOL_GPL(nr_irqs);
59 #ifdef CONFIG_SPARSE_IRQ
60 static struct irq_desc irq_desc_init = {
62 .status = IRQ_DISABLED,
64 .handle_irq = handle_bad_irq,
66 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
68 .affinity = CPU_MASK_ALL
72 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
78 /* Compute how many bytes we need per irq and allocate them */
79 bytes = nr * sizeof(unsigned int);
81 node = cpu_to_node(cpu);
82 ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
83 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
86 desc->kstat_irqs = (unsigned int *)ptr;
89 void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu)
93 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
95 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
97 spin_lock_init(&desc->lock);
102 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
103 init_kstat_irqs(desc, cpu, nr_cpu_ids);
104 if (!desc->kstat_irqs) {
105 printk(KERN_ERR "can not alloc kstat_irqs\n");
108 arch_init_chip_data(desc, cpu);
112 * Protect the sparse_irqs:
114 DEFINE_SPINLOCK(sparse_irq_lock);
116 struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
118 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
119 [0 ... NR_IRQS_LEGACY-1] = {
121 .status = IRQ_DISABLED,
122 .chip = &no_irq_chip,
123 .handle_irq = handle_bad_irq,
125 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
127 .affinity = CPU_MASK_ALL
132 /* FIXME: use bootmem alloc ...*/
133 static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
135 void __init early_irq_init(void)
137 struct irq_desc *desc;
141 desc = irq_desc_legacy;
142 legacy_count = ARRAY_SIZE(irq_desc_legacy);
144 for (i = 0; i < legacy_count; i++) {
146 desc[i].kstat_irqs = kstat_irqs_legacy[i];
148 irq_desc_ptrs[i] = desc + i;
151 for (i = legacy_count; i < NR_IRQS; i++)
152 irq_desc_ptrs[i] = NULL;
154 arch_early_irq_init();
157 struct irq_desc *irq_to_desc(unsigned int irq)
159 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
162 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
164 struct irq_desc *desc;
168 if (irq >= NR_IRQS) {
169 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
175 desc = irq_desc_ptrs[irq];
179 spin_lock_irqsave(&sparse_irq_lock, flags);
181 /* We have to check it to avoid races with another CPU */
182 desc = irq_desc_ptrs[irq];
186 node = cpu_to_node(cpu);
187 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
188 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
191 printk(KERN_ERR "can not alloc irq_desc\n");
194 init_one_irq_desc(irq, desc, cpu);
196 irq_desc_ptrs[irq] = desc;
199 spin_unlock_irqrestore(&sparse_irq_lock, flags);
204 #else /* !CONFIG_SPARSE_IRQ */
206 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
207 [0 ... NR_IRQS-1] = {
208 .status = IRQ_DISABLED,
209 .chip = &no_irq_chip,
210 .handle_irq = handle_bad_irq,
212 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
214 .affinity = CPU_MASK_ALL
219 struct irq_desc *irq_to_desc(unsigned int irq)
221 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
224 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
226 return irq_to_desc(irq);
228 #endif /* !CONFIG_SPARSE_IRQ */
231 * What should we do if we get a hw irq event on an illegal vector?
232 * Each architecture has to answer this themself.
234 static void ack_bad(unsigned int irq)
236 struct irq_desc *desc = irq_to_desc(irq);
238 print_irq_desc(irq, desc);
245 static void noop(unsigned int irq)
249 static unsigned int noop_ret(unsigned int irq)
255 * Generic no controller implementation
257 struct irq_chip no_irq_chip = {
268 * Generic dummy implementation which can be used for
269 * real dumb interrupt sources
271 struct irq_chip dummy_irq_chip = {
284 * Special, empty irq handler:
286 irqreturn_t no_action(int cpl, void *dev_id)
292 * handle_IRQ_event - irq action chain handler
293 * @irq: the interrupt number
294 * @action: the interrupt action chain for this irq
296 * Handles the action chain of an irq event
298 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
300 irqreturn_t ret, retval = IRQ_NONE;
301 unsigned int status = 0;
303 if (!(action->flags & IRQF_DISABLED))
304 local_irq_enable_in_hardirq();
307 ret = action->handler(irq, action->dev_id);
308 if (ret == IRQ_HANDLED)
309 status |= action->flags;
311 action = action->next;
314 if (status & IRQF_SAMPLE_RANDOM)
315 add_interrupt_randomness(irq);
321 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
323 * __do_IRQ - original all in one highlevel IRQ handler
324 * @irq: the interrupt number
326 * __do_IRQ handles all normal device IRQ's (the special
327 * SMP cross-CPU interrupts have their own specific
330 * This is the original x86 implementation which is used for every
333 unsigned int __do_IRQ(unsigned int irq)
335 struct irq_desc *desc = irq_to_desc(irq);
336 struct irqaction *action;
339 kstat_incr_irqs_this_cpu(irq, desc);
341 if (CHECK_IRQ_PER_CPU(desc->status)) {
342 irqreturn_t action_ret;
345 * No locking required for CPU-local interrupts:
347 if (desc->chip->ack) {
348 desc->chip->ack(irq);
350 desc = irq_remap_to_desc(irq, desc);
352 if (likely(!(desc->status & IRQ_DISABLED))) {
353 action_ret = handle_IRQ_event(irq, desc->action);
355 note_interrupt(irq, desc, action_ret);
357 desc->chip->end(irq);
361 spin_lock(&desc->lock);
362 if (desc->chip->ack) {
363 desc->chip->ack(irq);
364 desc = irq_remap_to_desc(irq, desc);
367 * REPLAY is when Linux resends an IRQ that was dropped earlier
368 * WAITING is used by probe to mark irqs that are being tested
370 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
371 status |= IRQ_PENDING; /* we _want_ to handle it */
374 * If the IRQ is disabled for whatever reason, we cannot
375 * use the action we have.
378 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
379 action = desc->action;
380 status &= ~IRQ_PENDING; /* we commit to handling */
381 status |= IRQ_INPROGRESS; /* we are handling it */
383 desc->status = status;
386 * If there is no IRQ handler or it was disabled, exit early.
387 * Since we set PENDING, if another processor is handling
388 * a different instance of this same irq, the other processor
389 * will take care of it.
391 if (unlikely(!action))
395 * Edge triggered interrupts need to remember
397 * This applies to any hw interrupts that allow a second
398 * instance of the same irq to arrive while we are in do_IRQ
399 * or in the handler. But the code here only handles the _second_
400 * instance of the irq, not the third or fourth. So it is mostly
401 * useful for irq hardware that does not mask cleanly in an
405 irqreturn_t action_ret;
407 spin_unlock(&desc->lock);
409 action_ret = handle_IRQ_event(irq, action);
411 note_interrupt(irq, desc, action_ret);
413 spin_lock(&desc->lock);
414 if (likely(!(desc->status & IRQ_PENDING)))
416 desc->status &= ~IRQ_PENDING;
418 desc->status &= ~IRQ_INPROGRESS;
422 * The ->end() handler has to deal with interrupts which got
423 * disabled while the handler was running.
425 desc->chip->end(irq);
426 spin_unlock(&desc->lock);
432 void early_init_irq_lock_class(void)
434 struct irq_desc *desc;
437 for_each_irq_desc(i, desc) {
438 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
442 #ifdef CONFIG_SPARSE_IRQ
443 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
445 struct irq_desc *desc = irq_to_desc(irq);
446 return desc ? desc->kstat_irqs[cpu] : 0;
449 EXPORT_SYMBOL(kstat_irqs_cpu);