2 * linux/kernel/irq/handle.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code.
9 * Detailed information is available in Documentation/DocBook/genericirq
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/rculist.h>
19 #include <linux/hash.h>
21 #include "internals.h"
24 * lockdep: we want to handle all irq_desc locks as a single lock-class:
26 struct lock_class_key irq_desc_lock_class;
29 * handle_bad_irq - handle spurious and unhandled irqs
30 * @irq: the interrupt number
31 * @desc: description of the interrupt
33 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
35 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
37 print_irq_desc(irq, desc);
38 kstat_incr_irqs_this_cpu(irq, desc);
43 * Linux has a controller-independent interrupt architecture.
44 * Every controller has a 'controller-template', that is used
45 * by the main code to do the right thing. Each driver-visible
46 * interrupt source is transparently wired to the appropriate
47 * controller. Thus drivers need not be aware of the
48 * interrupt-controller.
50 * The code is designed to be easily extended with new/different
51 * interrupt controllers, without having to do assembly magic or
52 * having to touch the generic code.
54 * Controller mappings for all interrupt sources:
56 int nr_irqs = NR_IRQS;
57 EXPORT_SYMBOL_GPL(nr_irqs);
59 #ifdef CONFIG_SPARSE_IRQ
60 static struct irq_desc irq_desc_init = {
62 .status = IRQ_DISABLED,
64 .handle_irq = handle_bad_irq,
66 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
68 .affinity = CPU_MASK_ALL
72 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
78 /* Compute how many bytes we need per irq and allocate them */
79 bytes = nr * sizeof(unsigned int);
81 node = cpu_to_node(cpu);
82 ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
83 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
86 desc->kstat_irqs = (unsigned int *)ptr;
89 void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu)
93 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
95 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
100 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
101 init_kstat_irqs(desc, cpu, nr_cpu_ids);
102 if (!desc->kstat_irqs) {
103 printk(KERN_ERR "can not alloc kstat_irqs\n");
106 arch_init_chip_data(desc, cpu);
110 * Protect the sparse_irqs:
112 DEFINE_SPINLOCK(sparse_irq_lock);
114 struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
116 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
117 [0 ... NR_IRQS_LEGACY-1] = {
119 .status = IRQ_DISABLED,
120 .chip = &no_irq_chip,
121 .handle_irq = handle_bad_irq,
123 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
125 .affinity = CPU_MASK_ALL
130 /* FIXME: use bootmem alloc ...*/
131 static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
133 void __init early_irq_init(void)
135 struct irq_desc *desc;
139 desc = irq_desc_legacy;
140 legacy_count = ARRAY_SIZE(irq_desc_legacy);
142 for (i = 0; i < legacy_count; i++) {
144 desc[i].kstat_irqs = kstat_irqs_legacy[i];
146 irq_desc_ptrs[i] = desc + i;
149 for (i = legacy_count; i < NR_IRQS; i++)
150 irq_desc_ptrs[i] = NULL;
152 arch_early_irq_init();
155 struct irq_desc *irq_to_desc(unsigned int irq)
157 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
160 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
162 struct irq_desc *desc;
166 if (irq >= NR_IRQS) {
167 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
173 desc = irq_desc_ptrs[irq];
177 spin_lock_irqsave(&sparse_irq_lock, flags);
179 /* We have to check it to avoid races with another CPU */
180 desc = irq_desc_ptrs[irq];
184 node = cpu_to_node(cpu);
185 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
186 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
189 printk(KERN_ERR "can not alloc irq_desc\n");
192 init_one_irq_desc(irq, desc, cpu);
194 irq_desc_ptrs[irq] = desc;
197 spin_unlock_irqrestore(&sparse_irq_lock, flags);
202 #else /* !CONFIG_SPARSE_IRQ */
204 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
205 [0 ... NR_IRQS-1] = {
206 .status = IRQ_DISABLED,
207 .chip = &no_irq_chip,
208 .handle_irq = handle_bad_irq,
210 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
212 .affinity = CPU_MASK_ALL
217 struct irq_desc *irq_to_desc(unsigned int irq)
219 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
222 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
224 return irq_to_desc(irq);
226 #endif /* !CONFIG_SPARSE_IRQ */
229 * What should we do if we get a hw irq event on an illegal vector?
230 * Each architecture has to answer this themself.
232 static void ack_bad(unsigned int irq)
234 struct irq_desc *desc = irq_to_desc(irq);
236 print_irq_desc(irq, desc);
243 static void noop(unsigned int irq)
247 static unsigned int noop_ret(unsigned int irq)
253 * Generic no controller implementation
255 struct irq_chip no_irq_chip = {
266 * Generic dummy implementation which can be used for
267 * real dumb interrupt sources
269 struct irq_chip dummy_irq_chip = {
282 * Special, empty irq handler:
284 irqreturn_t no_action(int cpl, void *dev_id)
290 * handle_IRQ_event - irq action chain handler
291 * @irq: the interrupt number
292 * @action: the interrupt action chain for this irq
294 * Handles the action chain of an irq event
296 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
298 irqreturn_t ret, retval = IRQ_NONE;
299 unsigned int status = 0;
301 if (!(action->flags & IRQF_DISABLED))
302 local_irq_enable_in_hardirq();
305 ret = action->handler(irq, action->dev_id);
306 if (ret == IRQ_HANDLED)
307 status |= action->flags;
309 action = action->next;
312 if (status & IRQF_SAMPLE_RANDOM)
313 add_interrupt_randomness(irq);
319 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
321 * __do_IRQ - original all in one highlevel IRQ handler
322 * @irq: the interrupt number
324 * __do_IRQ handles all normal device IRQ's (the special
325 * SMP cross-CPU interrupts have their own specific
328 * This is the original x86 implementation which is used for every
331 unsigned int __do_IRQ(unsigned int irq)
333 struct irq_desc *desc = irq_to_desc(irq);
334 struct irqaction *action;
337 kstat_incr_irqs_this_cpu(irq, desc);
339 if (CHECK_IRQ_PER_CPU(desc->status)) {
340 irqreturn_t action_ret;
343 * No locking required for CPU-local interrupts:
345 if (desc->chip->ack) {
346 desc->chip->ack(irq);
348 desc = irq_remap_to_desc(irq, desc);
350 if (likely(!(desc->status & IRQ_DISABLED))) {
351 action_ret = handle_IRQ_event(irq, desc->action);
353 note_interrupt(irq, desc, action_ret);
355 desc->chip->end(irq);
359 spin_lock(&desc->lock);
360 if (desc->chip->ack) {
361 desc->chip->ack(irq);
362 desc = irq_remap_to_desc(irq, desc);
365 * REPLAY is when Linux resends an IRQ that was dropped earlier
366 * WAITING is used by probe to mark irqs that are being tested
368 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
369 status |= IRQ_PENDING; /* we _want_ to handle it */
372 * If the IRQ is disabled for whatever reason, we cannot
373 * use the action we have.
376 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
377 action = desc->action;
378 status &= ~IRQ_PENDING; /* we commit to handling */
379 status |= IRQ_INPROGRESS; /* we are handling it */
381 desc->status = status;
384 * If there is no IRQ handler or it was disabled, exit early.
385 * Since we set PENDING, if another processor is handling
386 * a different instance of this same irq, the other processor
387 * will take care of it.
389 if (unlikely(!action))
393 * Edge triggered interrupts need to remember
395 * This applies to any hw interrupts that allow a second
396 * instance of the same irq to arrive while we are in do_IRQ
397 * or in the handler. But the code here only handles the _second_
398 * instance of the irq, not the third or fourth. So it is mostly
399 * useful for irq hardware that does not mask cleanly in an
403 irqreturn_t action_ret;
405 spin_unlock(&desc->lock);
407 action_ret = handle_IRQ_event(irq, action);
409 note_interrupt(irq, desc, action_ret);
411 spin_lock(&desc->lock);
412 if (likely(!(desc->status & IRQ_PENDING)))
414 desc->status &= ~IRQ_PENDING;
416 desc->status &= ~IRQ_INPROGRESS;
420 * The ->end() handler has to deal with interrupts which got
421 * disabled while the handler was running.
423 desc->chip->end(irq);
424 spin_unlock(&desc->lock);
430 void early_init_irq_lock_class(void)
432 struct irq_desc *desc;
435 for_each_irq_desc(i, desc) {
436 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
440 #ifdef CONFIG_SPARSE_IRQ
441 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
443 struct irq_desc *desc = irq_to_desc(irq);
444 return desc ? desc->kstat_irqs[cpu] : 0;
447 EXPORT_SYMBOL(kstat_irqs_cpu);