X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=kernel%2Firq%2Fmanage.c;h=cd0cd8dcb3453aee98a20cd6652144cf10e42281;hb=05f3a9af58180d24a9decedd71d4587935782d70;hp=8389d1817fe87702dcdf19830996b4fad88570dc;hpb=0d7012a968d006e277eb0fe20edd7a9b5563c2b7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 8389d18..cd0cd8d 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1,20 +1,30 @@ /* * linux/kernel/irq/manage.c * - * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar + * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar + * Copyright (C) 2005-2006 Thomas Gleixner * * This file contains driver APIs to the irq subsystem. */ -#include #include #include #include #include +#include #include "internals.h" #ifdef CONFIG_SMP +cpumask_var_t irq_default_affinity; + +static int init_irq_default_affinity(void) +{ + alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); + cpumask_setall(irq_default_affinity); + return 0; +} +core_initcall(init_irq_default_affinity); /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) @@ -28,16 +38,136 @@ */ void synchronize_irq(unsigned int irq) { - struct irq_desc *desc = irq_desc + irq; + struct irq_desc *desc = irq_to_desc(irq); + unsigned int status; - if (irq >= NR_IRQS) + if (!desc) return; - while (desc->status & IRQ_INPROGRESS) - cpu_relax(); + do { + unsigned long flags; + + /* + * Wait until we're out of the critical section. This might + * give the wrong answer due to the lack of memory barriers. + */ + while (desc->status & IRQ_INPROGRESS) + cpu_relax(); + + /* Ok, that indicated we're done: double-check carefully. */ + spin_lock_irqsave(&desc->lock, flags); + status = desc->status; + spin_unlock_irqrestore(&desc->lock, flags); + + /* Oops, that failed? */ + } while (status & IRQ_INPROGRESS); } EXPORT_SYMBOL(synchronize_irq); +/** + * irq_can_set_affinity - Check if the affinity of a given irq can be set + * @irq: Interrupt to check + * + */ +int irq_can_set_affinity(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || + !desc->chip->set_affinity) + return 0; + + return 1; +} + +/** + * irq_set_affinity - Set the irq affinity of a given irq + * @irq: Interrupt to set affinity + * @cpumask: cpumask + * + */ +int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + + if (!desc->chip->set_affinity) + return -EINVAL; + + spin_lock_irqsave(&desc->lock, flags); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { + cpumask_copy(&desc->affinity, cpumask); + desc->chip->set_affinity(irq, cpumask); + } else { + desc->status |= IRQ_MOVE_PENDING; + cpumask_copy(&desc->pending_mask, cpumask); + } +#else + cpumask_copy(&desc->affinity, cpumask); + desc->chip->set_affinity(irq, cpumask); +#endif + desc->status |= IRQ_AFFINITY_SET; + spin_unlock_irqrestore(&desc->lock, flags); + return 0; +} + +#ifndef CONFIG_AUTO_IRQ_AFFINITY +/* + * Generic version of the affinity autoselector. + */ +int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) +{ + if (!irq_can_set_affinity(irq)) + return 0; + + /* + * Preserve an userspace affinity setup, but make sure that + * one of the targets is online. + */ + if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { + if (cpumask_any_and(&desc->affinity, cpu_online_mask) + < nr_cpu_ids) + goto set_affinity; + else + desc->status &= ~IRQ_AFFINITY_SET; + } + + cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); +set_affinity: + desc->chip->set_affinity(irq, &desc->affinity); + + return 0; +} +#else +static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) +{ + return irq_select_affinity(irq); +} +#endif + +/* + * Called when affinity is set via /proc/irq + */ +int irq_select_affinity_usr(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + int ret; + + spin_lock_irqsave(&desc->lock, flags); + ret = do_irq_select_affinity(irq, desc); + spin_unlock_irqrestore(&desc->lock, flags); + + return ret; +} + +#else +static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) +{ + return 0; +} #endif /** @@ -53,10 +183,10 @@ EXPORT_SYMBOL(synchronize_irq); */ void disable_irq_nosync(unsigned int irq) { - struct irq_desc *desc = irq_desc + irq; + struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; - if (irq >= NR_IRQS) + if (!desc) return; spin_lock_irqsave(&desc->lock, flags); @@ -82,9 +212,9 @@ EXPORT_SYMBOL(disable_irq_nosync); */ void disable_irq(unsigned int irq) { - struct irq_desc *desc = irq_desc + irq; + struct irq_desc *desc = irq_to_desc(irq); - if (irq >= NR_IRQS) + if (!desc) return; disable_irq_nosync(irq); @@ -93,6 +223,25 @@ void disable_irq(unsigned int irq) } EXPORT_SYMBOL(disable_irq); +static void __enable_irq(struct irq_desc *desc, unsigned int irq) +{ + switch (desc->depth) { + case 0: + WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); + break; + case 1: { + unsigned int status = desc->status & ~IRQ_DISABLED; + + /* Prevent probing on this irq: */ + desc->status = status | IRQ_NOPROBE; + check_irq_resend(desc, irq); + /* fall-through */ + } + default: + desc->depth--; + } +} + /** * enable_irq - enable handling of an irq * @irq: Interrupt to enable @@ -105,34 +254,75 @@ EXPORT_SYMBOL(disable_irq); */ void enable_irq(unsigned int irq) { - struct irq_desc *desc = irq_desc + irq; + struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; - if (irq >= NR_IRQS) + if (!desc) return; spin_lock_irqsave(&desc->lock, flags); - switch (desc->depth) { - case 0: - WARN_ON(1); - break; - case 1: { - unsigned int status = desc->status & ~IRQ_DISABLED; + __enable_irq(desc, irq); + spin_unlock_irqrestore(&desc->lock, flags); +} +EXPORT_SYMBOL(enable_irq); + +static int set_irq_wake_real(unsigned int irq, unsigned int on) +{ + struct irq_desc *desc = irq_to_desc(irq); + int ret = -ENXIO; - desc->status = status; - if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { - desc->status = status | IRQ_REPLAY; - hw_resend_irq(desc->chip,irq); + if (desc->chip->set_wake) + ret = desc->chip->set_wake(irq, on); + + return ret; +} + +/** + * set_irq_wake - control irq power management wakeup + * @irq: interrupt to control + * @on: enable/disable power management wakeup + * + * Enable/disable power management wakeup mode, which is + * disabled by default. Enables and disables must match, + * just as they match for non-wakeup mode support. + * + * Wakeup mode lets this IRQ wake the system from sleep + * states like "suspend to RAM". + */ +int set_irq_wake(unsigned int irq, unsigned int on) +{ + struct irq_desc *desc = irq_to_desc(irq); + unsigned long flags; + int ret = 0; + + /* wakeup-capable irqs can be shared between drivers that + * don't need to have the same sleep mode behaviors. + */ + spin_lock_irqsave(&desc->lock, flags); + if (on) { + if (desc->wake_depth++ == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 0; + else + desc->status |= IRQ_WAKEUP; + } + } else { + if (desc->wake_depth == 0) { + WARN(1, "Unbalanced IRQ %d wake disable\n", irq); + } else if (--desc->wake_depth == 0) { + ret = set_irq_wake_real(irq, on); + if (ret) + desc->wake_depth = 1; + else + desc->status &= ~IRQ_WAKEUP; } - desc->chip->enable(irq); - /* fall-through */ - } - default: - desc->depth--; } + spin_unlock_irqrestore(&desc->lock, flags); + return ret; } -EXPORT_SYMBOL(enable_irq); +EXPORT_SYMBOL(set_irq_wake); /* * Internal function that tells the architecture code whether a @@ -141,41 +331,91 @@ EXPORT_SYMBOL(enable_irq); */ int can_request_irq(unsigned int irq, unsigned long irqflags) { + struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; - if (irq >= NR_IRQS) + if (!desc) return 0; - action = irq_desc[irq].action; + if (desc->status & IRQ_NOREQUEST) + return 0; + + action = desc->action; if (action) - if (irqflags & action->flags & SA_SHIRQ) + if (irqflags & action->flags & IRQF_SHARED) action = NULL; return !action; } +void compat_irq_chip_set_default_handler(struct irq_desc *desc) +{ + /* + * If the architecture still has not overriden + * the flow handler then zap the default. This + * should catch incorrect flow-type setting. + */ + if (desc->handle_irq == &handle_bad_irq) + desc->handle_irq = NULL; +} + +int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, + unsigned long flags) +{ + int ret; + struct irq_chip *chip = desc->chip; + + if (!chip || !chip->set_type) { + /* + * IRQF_TRIGGER_* but the PIC does not support multiple + * flow-types? + */ + pr_debug("No set_type function for IRQ %d (%s)\n", irq, + chip ? (chip->name ? : "unknown") : "unknown"); + return 0; + } + + /* caller masked out all except trigger mode flags */ + ret = chip->set_type(irq, flags); + + if (ret) + pr_err("setting trigger mode %d for irq %u failed (%pF)\n", + (int)flags, irq, chip->set_type); + else { + if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) + flags |= IRQ_LEVEL; + /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ + desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); + desc->status |= flags; + } + + return ret; +} + /* * Internal function to register an irqaction - typically used to * allocate special interrupts that are part of the architecture. */ -int setup_irq(unsigned int irq, struct irqaction *new) +static int +__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) { - struct irq_desc *desc = irq_desc + irq; struct irqaction *old, **p; + const char *old_name = NULL; unsigned long flags; int shared = 0; + int ret; - if (irq >= NR_IRQS) + if (!desc) return -EINVAL; - if (desc->chip == &no_irq_type) + if (desc->chip == &no_irq_chip) return -ENOSYS; /* * Some drivers like serial.c use request_irq() heavily, * so we have to be careful not to interfere with a * running system. */ - if (new->flags & SA_SAMPLE_RANDOM) { + if (new->flags & IRQF_SAMPLE_RANDOM) { /* * This function might sleep, we want to call it first, * outside of the atomic block. @@ -194,13 +434,22 @@ int setup_irq(unsigned int irq, struct irqaction *new) p = &desc->action; old = *p; if (old) { - /* Can't share interrupts unless both agree to */ - if (!(old->flags & new->flags & SA_SHIRQ)) + /* + * Can't share interrupts unless both agree to and are + * the same type (level, edge, polarity). So both flag + * fields must have IRQF_SHARED set and the bits which + * set the trigger type must match. + */ + if (!((old->flags & new->flags) & IRQF_SHARED) || + ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { + old_name = old->name; goto mismatch; + } -#if defined(CONFIG_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) +#if defined(CONFIG_IRQ_PER_CPU) /* All handlers must agree on per-cpuness */ - if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU)) + if ((old->flags & IRQF_PERCPU) != + (new->flags & IRQF_PERCPU)) goto mismatch; #endif @@ -212,39 +461,104 @@ int setup_irq(unsigned int irq, struct irqaction *new) shared = 1; } - *p = new; -#if defined(CONFIG_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) - if (new->flags & SA_PERCPU_IRQ) - desc->status |= IRQ_PER_CPU; -#endif if (!shared) { - desc->depth = 0; - desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | - IRQ_WAITING | IRQ_INPROGRESS); - if (desc->chip->startup) + irq_chip_set_defaults(desc->chip); + + /* Setup the type (level, edge polarity) if configured: */ + if (new->flags & IRQF_TRIGGER_MASK) { + ret = __irq_set_trigger(desc, irq, + new->flags & IRQF_TRIGGER_MASK); + + if (ret) { + spin_unlock_irqrestore(&desc->lock, flags); + return ret; + } + } else + compat_irq_chip_set_default_handler(desc); +#if defined(CONFIG_IRQ_PER_CPU) + if (new->flags & IRQF_PERCPU) + desc->status |= IRQ_PER_CPU; +#endif + + desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | + IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); + + if (!(desc->status & IRQ_NOAUTOEN)) { + desc->depth = 0; + desc->status &= ~IRQ_DISABLED; desc->chip->startup(irq); - else - desc->chip->enable(irq); + } else + /* Undo nested disables: */ + desc->depth = 1; + + /* Exclude IRQ from balancing if requested */ + if (new->flags & IRQF_NOBALANCING) + desc->status |= IRQ_NO_BALANCING; + + /* Set default affinity mask once everything is setup */ + do_irq_select_affinity(irq, desc); + + } else if ((new->flags & IRQF_TRIGGER_MASK) + && (new->flags & IRQF_TRIGGER_MASK) + != (desc->status & IRQ_TYPE_SENSE_MASK)) { + /* hope the handler works with the actual trigger mode... */ + pr_warning("IRQ %d uses trigger mode %d; requested %d\n", + irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), + (int)(new->flags & IRQF_TRIGGER_MASK)); + } + + *p = new; + + /* Reset broken irq detection when installing new handler */ + desc->irq_count = 0; + desc->irqs_unhandled = 0; + + /* + * Check whether we disabled the irq via the spurious handler + * before. Reenable it and give it another chance. + */ + if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { + desc->status &= ~IRQ_SPURIOUS_DISABLED; + __enable_irq(desc, irq); } + spin_unlock_irqrestore(&desc->lock, flags); new->irq = irq; - register_irq_proc(irq); + register_irq_proc(irq, desc); new->dir = NULL; register_handler_proc(irq, new); return 0; mismatch: - spin_unlock_irqrestore(&desc->lock, flags); - if (!(new->flags & SA_PROBEIRQ)) { - printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__); +#ifdef CONFIG_DEBUG_SHIRQ + if (!(new->flags & IRQF_PROBE_SHARED)) { + printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); + if (old_name) + printk(KERN_ERR "current handler: %s\n", old_name); dump_stack(); } +#endif + spin_unlock_irqrestore(&desc->lock, flags); return -EBUSY; } /** + * setup_irq - setup an interrupt + * @irq: Interrupt line to setup + * @act: irqaction for the interrupt + * + * Used to statically setup interrupts in the early boot process. + */ +int setup_irq(unsigned int irq, struct irqaction *act) +{ + struct irq_desc *desc = irq_to_desc(irq); + + return __setup_irq(irq, desc, act); +} + +/** * free_irq - free an interrupt * @irq: Interrupt line to free * @dev_id: Device identity to free @@ -260,15 +574,15 @@ mismatch: */ void free_irq(unsigned int irq, void *dev_id) { - struct irq_desc *desc; + struct irq_desc *desc = irq_to_desc(irq); struct irqaction **p; unsigned long flags; WARN_ON(in_interrupt()); - if (irq >= NR_IRQS) + + if (!desc) return; - desc = irq_desc + irq; spin_lock_irqsave(&desc->lock, flags); p = &desc->action; for (;;) { @@ -302,10 +616,28 @@ void free_irq(unsigned int irq, void *dev_id) /* Make sure it's not being used on another CPU */ synchronize_irq(irq); +#ifdef CONFIG_DEBUG_SHIRQ + /* + * It's a shared IRQ -- the driver ought to be + * prepared for it to happen even now it's + * being freed, so let's make sure.... We do + * this after actually deregistering it, to + * make sure that a 'real' IRQ doesn't run in + * parallel with our fake + */ + if (action->flags & IRQF_SHARED) { + local_irq_save(flags); + action->handler(irq, dev_id); + local_irq_restore(flags); + } +#endif kfree(action); return; } - printk(KERN_ERR "Trying to free free IRQ%d\n", irq); + printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq); +#ifdef CONFIG_DEBUG_SHIRQ + dump_stack(); +#endif spin_unlock_irqrestore(&desc->lock, flags); return; } @@ -336,27 +668,51 @@ EXPORT_SYMBOL(free_irq); * * Flags: * - * SA_SHIRQ Interrupt is shared - * SA_INTERRUPT Disable local interrupts while processing - * SA_SAMPLE_RANDOM The interrupt can be used for entropy + * IRQF_SHARED Interrupt is shared + * IRQF_DISABLED Disable local interrupts while processing + * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy + * IRQF_TRIGGER_* Specify active edge(s) or level * */ -int request_irq(unsigned int irq, - irqreturn_t (*handler)(int, void *, struct pt_regs *), +int request_irq(unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { struct irqaction *action; + struct irq_desc *desc; int retval; /* + * handle_IRQ_event() always ignores IRQF_DISABLED except for + * the _first_ irqaction (sigh). That can cause oopsing, but + * the behavior is classified as "will not fix" so we need to + * start nudging drivers away from using that idiom. + */ + if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) + == (IRQF_SHARED|IRQF_DISABLED)) + pr_warning("IRQ %d/%s: IRQF_DISABLED is not " + "guaranteed on shared IRQs\n", + irq, devname); + +#ifdef CONFIG_LOCKDEP + /* + * Lockdep wants atomic interrupt handlers: + */ + irqflags |= IRQF_DISABLED; +#endif + /* * Sanity-check: shared interrupts must pass in a real dev-ID, * otherwise we'll have trouble later trying to figure out * which interrupt is which (messes up the interrupt freeing * logic etc). */ - if ((irqflags & SA_SHIRQ) && !dev_id) + if ((irqflags & IRQF_SHARED) && !dev_id) + return -EINVAL; + + desc = irq_to_desc(irq); + if (!desc) return -EINVAL; - if (irq >= NR_IRQS) + + if (desc->status & IRQ_NOREQUEST) return -EINVAL; if (!handler) return -EINVAL; @@ -372,13 +728,29 @@ int request_irq(unsigned int irq, action->next = NULL; action->dev_id = dev_id; - select_smp_affinity(irq); - - retval = setup_irq(irq, action); + retval = __setup_irq(irq, desc, action); if (retval) kfree(action); +#ifdef CONFIG_DEBUG_SHIRQ + if (irqflags & IRQF_SHARED) { + /* + * It's a shared IRQ -- the driver ought to be prepared for it + * to happen immediately, so let's make sure.... + * We disable the irq to make sure that a 'real' IRQ doesn't + * run in parallel with our fake. + */ + unsigned long flags; + + disable_irq(irq); + local_irq_save(flags); + + handler(irq, dev_id); + + local_irq_restore(flags); + enable_irq(irq); + } +#endif return retval; } EXPORT_SYMBOL(request_irq); -