X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Firq.h;h=cb2e77a3f7f75a770a38b955d561aed8e77d3907;hb=1ce822fa04fd6878f079461a4b8affe4bb5ec27b;hp=93fe9a943e714c14802869b3a769f24ca4aed2bd;hpb=8c464a4b23ca283b414022ebc77787f3c7040fa7;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/irq.h b/include/linux/irq.h index 93fe9a9..cb2e77a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -17,8 +17,12 @@ #include #include #include +#include #include +#include #include +#include +#include #include #include @@ -62,7 +66,9 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ -#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ +#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ +#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ +#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ #ifdef CONFIG_IRQ_PER_CPU # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) @@ -111,7 +117,8 @@ struct irq_chip { void (*eoi)(unsigned int irq); void (*end)(unsigned int irq); - void (*set_affinity)(unsigned int irq, cpumask_t dest); + int (*set_affinity)(unsigned int irq, + const struct cpumask *dest); int (*retrigger)(unsigned int irq); int (*set_type)(unsigned int irq, unsigned int flow_type); int (*set_wake)(unsigned int irq, unsigned int on); @@ -131,7 +138,10 @@ struct timer_rand_state; struct irq_2_iommu; /** * struct irq_desc - interrupt descriptor - * + * @irq: interrupt number for this descriptor + * @timer_rand_state: pointer to timer rand state struct + * @kstat_irqs: irq stats per cpu + * @irq_2_iommu: iommu with this irq * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] * @chip: low level interrupt hardware access * @msi_desc: MSI descriptor @@ -143,27 +153,23 @@ struct irq_2_iommu; * @depth: disable-depth, for nested irq_disable() calls * @wake_depth: enable depth, for multiple set_irq_wake() callers * @irq_count: stats field to detect stalled irqs - * @irqs_unhandled: stats field for spurious unhandled interrupts * @last_unhandled: aging timer for unhandled count + * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP * @affinity: IRQ affinity on SMP - * @cpu: cpu index useful for balancing + * @node: node index useful for balancing * @pending_mask: pending rebalanced interrupts + * @threads_active: number of irqaction threads currently running + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers * @dir: /proc/irq/ procfs entry - * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP * @name: flow handler name for /proc/interrupts output */ struct irq_desc { unsigned int irq; -#ifdef CONFIG_HAVE_SPARSE_IRQ - struct irq_desc *next; struct timer_rand_state *timer_rand_state; -#endif -#ifdef CONFIG_HAVE_DYN_ARRAY unsigned int *kstat_irqs; -#endif -#if defined(CONFIG_INTR_REMAP) && defined(CONFIG_HAVE_SPARSE_IRQ) - struct irq_2_iommu *irq_2_iommu; +#ifdef CONFIG_INTR_REMAP + struct irq_2_iommu *irq_2_iommu; #endif irq_flow_handler_t handle_irq; struct irq_chip *chip; @@ -176,57 +182,47 @@ struct irq_desc { unsigned int depth; /* nested irq disables */ unsigned int wake_depth; /* nested wake enables */ unsigned int irq_count; /* For detecting broken IRQs */ - unsigned int irqs_unhandled; unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; spinlock_t lock; #ifdef CONFIG_SMP - cpumask_t affinity; - unsigned int cpu; -#endif + cpumask_var_t affinity; + unsigned int node; #ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_t pending_mask; + cpumask_var_t pending_mask; +#endif #endif + atomic_t threads_active; + wait_queue_head_t wait_for_threads; #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif const char *name; } ____cacheline_internodealigned_in_smp; -extern struct irq_desc *irq_to_desc(unsigned int irq); -extern struct irq_desc *irq_to_desc_alloc(unsigned int irq); +extern void arch_init_copy_chip_data(struct irq_desc *old_desc, + struct irq_desc *desc, int node); +extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); -#ifndef CONFIG_HAVE_SPARSE_IRQ - -#ifndef CONFIG_HAVE_DYN_ARRAY -/* could be removed if we get rid of all irq_desc reference */ +#ifndef CONFIG_SPARSE_IRQ extern struct irq_desc irq_desc[NR_IRQS]; -#else -extern struct irq_desc *irq_desc; -#endif - -#ifdef CONFIG_GENERIC_HARDIRQS -#define for_each_irq_desc(irq, desc) \ - for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc = &irq_desc[irq]) #endif +#ifdef CONFIG_NUMA_IRQ_DESC +extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); #else - -extern struct irq_desc *sparse_irqs; -#define for_each_irq_desc(irqX, desc) \ - for (desc = sparse_irqs, irqX = desc->irq; desc; desc = desc->next, irqX = desc ? desc->irq : -1U) - +static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) +{ + return desc; +} #endif -#ifdef CONFIG_HAVE_DYN_ARRAY -#define kstat_irqs_this_cpu(DESC) \ - ((DESC)->kstat_irqs[smp_processor_id()]) -#endif +extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); /* * Migration helpers for obsolete names, they will go away: */ #define hw_interrupt_type irq_chip -typedef struct irq_chip hw_irq_controller; #define no_irq_type no_irq_chip typedef struct irq_desc irq_desc_t; @@ -236,6 +232,7 @@ typedef struct irq_desc irq_desc_t; #include extern int setup_irq(unsigned int irq, struct irqaction *new); +extern void remove_irq(unsigned int irq, struct irqaction *act); #ifdef CONFIG_GENERIC_HARDIRQS @@ -243,7 +240,6 @@ extern int setup_irq(unsigned int irq, struct irqaction *new); #ifdef CONFIG_GENERIC_PENDING_IRQ -void set_pending_irq(unsigned int irq, cpumask_t mask); void move_native_irq(int irq); void move_masked_irq(int irq); @@ -261,10 +257,6 @@ static inline void move_masked_irq(int irq) { } -static inline void set_pending_irq(unsigned int irq, cpumask_t mask) -{ -} - #endif /* CONFIG_GENERIC_PENDING_IRQ */ #else /* CONFIG_SMP */ @@ -285,7 +277,7 @@ static inline int irq_balancing_disabled(unsigned int irq) } /* Handle irq action chains: */ -extern int handle_IRQ_event(unsigned int irq, struct irqaction *action); +extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); /* * Built-in IRQ handlers for various IRQ types, @@ -330,7 +322,7 @@ static inline void generic_handle_irq(unsigned int irq) /* Handling of unhandled and spurious interrupts: */ extern void note_interrupt(unsigned int irq, struct irq_desc *desc, - int action_ret); + irqreturn_t action_ret); /* Resending of interrupts :*/ void check_irq_resend(struct irq_desc *desc, unsigned int irq); @@ -391,7 +383,7 @@ extern void set_irq_noprobe(unsigned int irq); extern void set_irq_probe(unsigned int irq); /* Handle dynamic irq creation and destruction */ -extern unsigned int create_irq_nr(unsigned int irq_want); +extern unsigned int create_irq_nr(unsigned int irq_want, int node); extern int create_irq(void); extern void destroy_irq(unsigned int irq); @@ -418,8 +410,108 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) +#define get_irq_desc_chip(desc) ((desc)->chip) +#define get_irq_desc_chip_data(desc) ((desc)->chip_data) +#define get_irq_desc_data(desc) ((desc)->handler_data) +#define get_irq_desc_msi(desc) ((desc)->msi_desc) + #endif /* CONFIG_GENERIC_HARDIRQS */ #endif /* !CONFIG_S390 */ +#ifdef CONFIG_SMP +/** + * alloc_desc_masks - allocate cpumasks for irq_desc + * @desc: pointer to irq_desc struct + * @node: node which will be handling the cpumasks + * @boot: true if need bootmem + * + * Allocates affinity and pending_mask cpumask if required. + * Returns true if successful (or not required). + */ +static inline bool alloc_desc_masks(struct irq_desc *desc, int node, + bool boot) +{ + gfp_t gfp = GFP_ATOMIC; + + if (boot) + gfp = GFP_NOWAIT; + +#ifdef CONFIG_CPUMASK_OFFSTACK + if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) + return false; + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { + free_cpumask_var(desc->affinity); + return false; + } +#endif +#endif + return true; +} + +static inline void init_desc_masks(struct irq_desc *desc) +{ + cpumask_setall(desc->affinity); +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_clear(desc->pending_mask); +#endif +} + +/** + * init_copy_desc_masks - copy cpumasks for irq_desc + * @old_desc: pointer to old irq_desc struct + * @new_desc: pointer to new irq_desc struct + * + * Insures affinity and pending_masks are copied to new irq_desc. + * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the + * irq_desc struct so the copy is redundant. + */ + +static inline void init_copy_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +#ifdef CONFIG_CPUMASK_OFFSTACK + cpumask_copy(new_desc->affinity, old_desc->affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); +#endif +#endif +} + +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ + free_cpumask_var(old_desc->affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(old_desc->pending_mask); +#endif +} + +#else /* !CONFIG_SMP */ + +static inline bool alloc_desc_masks(struct irq_desc *desc, int node, + bool boot) +{ + return true; +} + +static inline void init_desc_masks(struct irq_desc *desc) +{ +} + +static inline void init_copy_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} + +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} +#endif /* CONFIG_SMP */ + #endif /* _LINUX_IRQ_H */