*/
#include <linux/irq.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/rculist.h>
+#include <linux/hash.h>
+#include <linux/radix-tree.h>
+#include <trace/events/irq.h>
#include "internals.h"
/*
* lockdep: we want to handle all irq_desc locks as a single lock-class:
*/
-static struct lock_class_key irq_desc_lock_class;
+struct lock_class_key irq_desc_lock_class;
/**
* handle_bad_irq - handle spurious and unhandled irqs
*
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
*/
-void
-handle_bad_irq(unsigned int irq, struct irq_desc *desc)
+void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{
print_irq_desc(irq, desc);
-#ifdef CONFIG_HAVE_DYN_ARRAY
- kstat_irqs_this_cpu(desc)++;
-#else
- kstat_irqs_this_cpu(irq)++;
-#endif
+ kstat_incr_irqs_this_cpu(irq, desc);
ack_bad_irq(irq);
}
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+static void __init init_irq_default_affinity(void)
+{
+ alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+ cpumask_setall(irq_default_affinity);
+}
+#else
+static void __init init_irq_default_affinity(void)
+{
+}
+#endif
+
/*
* Linux has a controller-independent interrupt architecture.
* Every controller has a 'controller-template', that is used
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
-#ifdef CONFIG_HAVE_DYN_ARRAY
+#ifdef CONFIG_SPARSE_IRQ
+
static struct irq_desc irq_desc_init = {
- .irq = -1U,
- .status = IRQ_DISABLED,
- .chip = &no_irq_chip,
+ .irq = -1,
+ .status = IRQ_DISABLED,
+ .chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
- .depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
+ .depth = 1,
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
};
-
-static void init_one_irq_desc(struct irq_desc *desc)
-{
- memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
- lockdep_set_class(&desc->lock, &irq_desc_lock_class);
-}
-
-extern int after_bootmem;
-extern void *__alloc_bootmem_nopanic(unsigned long size,
- unsigned long align,
- unsigned long goal);
-
-static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr)
+void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
{
- unsigned long bytes, total_bytes;
- char *ptr;
- int i;
- unsigned long phys;
-
- /* Compute how many bytes we need per irq and allocate them */
- bytes = nr * sizeof(unsigned int);
- total_bytes = bytes * nr_desc;
- if (after_bootmem)
- ptr = kzalloc(total_bytes, GFP_ATOMIC);
- else
- ptr = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
+ void *ptr;
- if (!ptr)
- panic(" can not allocate kstat_irqs\n");
+ ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
+ GFP_ATOMIC, node);
- phys = __pa(ptr);
- printk(KERN_DEBUG "kstat_irqs ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
-
- for (i = 0; i < nr_desc; i++) {
- desc[i].kstat_irqs = (unsigned int *)ptr;
- ptr += bytes;
+ /*
+ * don't overwite if can not get new one
+ * init_copy_kstat_irqs() could still use old one
+ */
+ if (ptr) {
+ printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
+ desc->kstat_irqs = ptr;
}
}
-#ifdef CONFIG_HAVE_SPARSE_IRQ
-/*
- * Protect the sparse_irqs_free freelist:
- */
-static DEFINE_SPINLOCK(sparse_irq_lock);
-static struct irq_desc *sparse_irqs_free;
-struct irq_desc *sparse_irqs;
-#endif
-
-static void __init init_work(void *data)
+static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
{
- struct dyn_array *da = data;
- int i;
- struct irq_desc *desc;
-
- desc = *da->name;
+ memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
- for (i = 0; i < *da->nr; i++) {
- init_one_irq_desc(&desc[i]);
-#ifndef CONFIG_HAVE_SPARSE_IRQ
- desc[i].irq = i;
+ raw_spin_lock_init(&desc->lock);
+ desc->irq = irq;
+#ifdef CONFIG_SMP
+ desc->node = node;
#endif
+ lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+ init_kstat_irqs(desc, node, nr_cpu_ids);
+ if (!desc->kstat_irqs) {
+ printk(KERN_ERR "can not alloc kstat_irqs\n");
+ BUG_ON(1);
+ }
+ if (!alloc_desc_masks(desc, node, false)) {
+ printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
+ BUG_ON(1);
}
+ init_desc_masks(desc);
+ arch_init_chip_data(desc, node);
+}
- /* init kstat_irqs, nr_cpu_ids is ready already */
- init_kstat_irqs(desc, *da->nr, nr_cpu_ids);
+/*
+ * Protect the sparse_irqs:
+ */
+DEFINE_RAW_SPINLOCK(sparse_irq_lock);
-#ifdef CONFIG_HAVE_SPARSE_IRQ
- for (i = 1; i < *da->nr; i++)
- desc[i-1].next = &desc[i];
+static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
- sparse_irqs_free = sparse_irqs;
- sparse_irqs = NULL;
-#endif
+static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+ radix_tree_insert(&irq_desc_tree, irq, desc);
}
-#ifdef CONFIG_HAVE_SPARSE_IRQ
-static int nr_irq_desc = 32;
+struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ return radix_tree_lookup(&irq_desc_tree, irq);
+}
-static int __init parse_nr_irq_desc(char *arg)
+void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
{
- if (arg)
- nr_irq_desc = simple_strtoul(arg, NULL, 0);
- return 0;
+ void **ptr;
+
+ ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
+ if (ptr)
+ radix_tree_replace_slot(ptr, desc);
}
-early_param("nr_irq_desc", parse_nr_irq_desc);
+static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
+ [0 ... NR_IRQS_LEGACY-1] = {
+ .irq = -1,
+ .status = IRQ_DISABLED,
+ .chip = &no_irq_chip,
+ .handle_irq = handle_bad_irq,
+ .depth = 1,
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
+ }
+};
-DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work);
+static unsigned int *kstat_irqs_legacy;
-struct irq_desc *irq_to_desc(unsigned int irq)
+int __init early_irq_init(void)
{
struct irq_desc *desc;
+ int legacy_count;
+ int node;
+ int i;
- desc = sparse_irqs;
- while (desc) {
- if (desc->irq == irq)
- return desc;
+ init_irq_default_affinity();
- desc = desc->next;
- }
- return NULL;
-}
+ /* initialize nr_irqs based on nr_cpu_ids */
+ arch_probe_nr_irqs();
+ printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
-struct irq_desc *irq_to_desc_alloc(unsigned int irq)
-{
- struct irq_desc *desc, *desc_pri;
- unsigned long flags;
- int count = 0;
- int i;
+ desc = irq_desc_legacy;
+ legacy_count = ARRAY_SIZE(irq_desc_legacy);
+ node = first_online_node;
- desc_pri = desc = sparse_irqs;
- while (desc) {
- if (desc->irq == irq)
- return desc;
+ /* allocate based on nr_cpu_ids */
+ kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
+ sizeof(int), GFP_NOWAIT, node);
- desc_pri = desc;
- desc = desc->next;
- count++;
+ for (i = 0; i < legacy_count; i++) {
+ desc[i].irq = i;
+#ifdef CONFIG_SMP
+ desc[i].node = node;
+#endif
+ desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
+ lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
+ alloc_desc_masks(&desc[i], node, true);
+ init_desc_masks(&desc[i]);
+ set_irq_desc(i, &desc[i]);
}
- spin_lock_irqsave(&sparse_irq_lock, flags);
- /*
- * we run out of pre-allocate ones, allocate more
- */
- if (!sparse_irqs_free) {
- unsigned long phys;
- unsigned long total_bytes;
-
- printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc);
+ return arch_early_irq_init();
+}
- total_bytes = sizeof(struct irq_desc) * nr_irq_desc;
- if (after_bootmem)
- desc = kzalloc(total_bytes, GFP_ATOMIC);
- else
- desc = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
+struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
- if (!desc)
- panic("please boot with nr_irq_desc= %d\n", count * 2);
+ if (irq >= nr_irqs) {
+ WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
+ irq, nr_irqs);
+ return NULL;
+ }
- phys = __pa(desc);
- printk(KERN_DEBUG "irq_desc ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
+ desc = irq_to_desc(irq);
+ if (desc)
+ return desc;
- for (i = 0; i < nr_irq_desc; i++)
- init_one_irq_desc(&desc[i]);
+ raw_spin_lock_irqsave(&sparse_irq_lock, flags);
- for (i = 1; i < nr_irq_desc; i++)
- desc[i-1].next = &desc[i];
+ /* We have to check it to avoid races with another CPU */
+ desc = irq_to_desc(irq);
+ if (desc)
+ goto out_unlock;
- /* init kstat_irqs, nr_cpu_ids is ready already */
- init_kstat_irqs(desc, nr_irq_desc, nr_cpu_ids);
+ desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
- sparse_irqs_free = desc;
+ printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
+ if (!desc) {
+ printk(KERN_ERR "can not alloc irq_desc\n");
+ BUG_ON(1);
}
+ init_one_irq_desc(irq, desc, node);
- desc = sparse_irqs_free;
- sparse_irqs_free = sparse_irqs_free->next;
- desc->next = NULL;
- if (desc_pri)
- desc_pri->next = desc;
- else
- sparse_irqs = desc;
- desc->irq = irq;
+ set_irq_desc(irq, desc);
- spin_unlock_irqrestore(&sparse_irq_lock, flags);
+out_unlock:
+ raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
return desc;
}
-#else
-struct irq_desc *irq_desc;
-DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
-#endif
-
-#else
+#else /* !CONFIG_SPARSE_IRQ */
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS-1] = {
.chip = &no_irq_chip,
.handle_irq = handle_bad_irq,
.depth = 1,
- .lock = __SPIN_LOCK_UNLOCKED(sparse_irqs->lock),
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
}
};
-#endif
+static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
+int __init early_irq_init(void)
+{
+ struct irq_desc *desc;
+ int count;
+ int i;
+
+ init_irq_default_affinity();
+
+ printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
+ desc = irq_desc;
+ count = ARRAY_SIZE(irq_desc);
+
+ for (i = 0; i < count; i++) {
+ desc[i].irq = i;
+ alloc_desc_masks(&desc[i], 0, true);
+ init_desc_masks(&desc[i]);
+ desc[i].kstat_irqs = kstat_irqs_all[i];
+ }
+ return arch_early_irq_init();
+}
-#ifndef CONFIG_HAVE_SPARSE_IRQ
struct irq_desc *irq_to_desc(unsigned int irq)
{
- if (irq < nr_irqs)
- return &irq_desc[irq];
-
- return NULL;
+ return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
-struct irq_desc *irq_to_desc_alloc(unsigned int irq)
+
+struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
{
return irq_to_desc(irq);
}
-#endif
+#endif /* !CONFIG_SPARSE_IRQ */
+
+void clear_kstat_irqs(struct irq_desc *desc)
+{
+ memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
+}
/*
* What should we do if we get a hw irq event on an illegal vector?
*/
static void ack_bad(unsigned int irq)
{
- struct irq_desc *desc;
+ struct irq_desc *desc = irq_to_desc(irq);
- desc = irq_to_desc(irq);
print_irq_desc(irq, desc);
ack_bad_irq(irq);
}
return IRQ_NONE;
}
+static void warn_no_thread(unsigned int irq, struct irqaction *action)
+{
+ if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
+ return;
+
+ printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
+ "but no thread function available.", irq, action->name);
+}
+
/**
* handle_IRQ_event - irq action chain handler
* @irq: the interrupt number
local_irq_enable_in_hardirq();
do {
+ trace_irq_handler_entry(irq, action);
ret = action->handler(irq, action->dev_id);
- if (ret == IRQ_HANDLED)
+ trace_irq_handler_exit(irq, action, ret);
+
+ switch (ret) {
+ case IRQ_WAKE_THREAD:
+ /*
+ * Set result to handled so the spurious check
+ * does not trigger.
+ */
+ ret = IRQ_HANDLED;
+
+ /*
+ * Catch drivers which return WAKE_THREAD but
+ * did not set up a thread function
+ */
+ if (unlikely(!action->thread_fn)) {
+ warn_no_thread(irq, action);
+ break;
+ }
+
+ /*
+ * Wake up the handler thread for this
+ * action. In case the thread crashed and was
+ * killed we just pretend that we handled the
+ * interrupt. The hardirq handler above has
+ * disabled the device interrupt, so no irq
+ * storm is lurking.
+ */
+ if (likely(!test_bit(IRQTF_DIED,
+ &action->thread_flags))) {
+ set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
+ wake_up_process(action->thread);
+ }
+
+ /* Fall through to add to randomness */
+ case IRQ_HANDLED:
status |= action->flags;
+ break;
+
+ default:
+ break;
+ }
+
retval |= ret;
action = action->next;
} while (action);
}
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
+
+#ifdef CONFIG_ENABLE_WARN_DEPRECATED
+# warning __do_IRQ is deprecated. Please convert to proper flow handlers
+#endif
+
/**
* __do_IRQ - original all in one highlevel IRQ handler
* @irq: the interrupt number
struct irqaction *action;
unsigned int status;
-#ifdef CONFIG_HAVE_DYN_ARRAY
- kstat_irqs_this_cpu(desc)++;
-#else
- kstat_irqs_this_cpu(irq)++;
-#endif
+ kstat_incr_irqs_this_cpu(irq, desc);
+
if (CHECK_IRQ_PER_CPU(desc->status)) {
irqreturn_t action_ret;
return 1;
}
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (desc->chip->ack)
desc->chip->ack(irq);
/*
for (;;) {
irqreturn_t action_ret;
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
* disabled while the handler was running.
*/
desc->chip->end(irq);
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
return 1;
}
#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
void early_init_irq_lock_class(void)
{
-#ifndef CONFIG_HAVE_DYN_ARRAY
+ struct irq_desc *desc;
int i;
- for (i = 0; i < nr_irqs; i++)
- lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
-#endif
+ for_each_irq_desc(i, desc) {
+ lockdep_set_class(&desc->lock, &irq_desc_lock_class);
+ }
}
-#endif
-#ifdef CONFIG_HAVE_DYN_ARRAY
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
struct irq_desc *desc = irq_to_desc(irq);
- return desc->kstat_irqs[cpu];
+ return desc ? desc->kstat_irqs[cpu] : 0;
}
-#endif
EXPORT_SYMBOL(kstat_irqs_cpu);