X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Finterrupt.h;h=7e85a6e89e417f551d8e966fa59827dab19f3d28;hb=968ea6d80e395cf11a51143cfa1b9a14ada676df;hp=d99e7aeb7d338a9fde859840ab4aff555f20e488;hpb=1da177e4c3f41524e886b7f1b8a0c1fc7321cac2;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index d99e7ae..7e85a6e 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -2,39 +2,68 @@ #ifndef _LINUX_INTERRUPT_H #define _LINUX_INTERRUPT_H -#include #include #include #include #include #include +#include +#include #include +#include +#include +#include +#include +#include + #include #include #include /* - * For 2.4.x compatibility, 2.4.x can use - * - * typedef void irqreturn_t; - * #define IRQ_NONE - * #define IRQ_HANDLED - * #define IRQ_RETVAL(x) - * - * To mix old-style and new-style irq handler returns. + * These correspond to the IORESOURCE_IRQ_* defines in + * linux/ioport.h to select the interrupt line behaviour. When + * requesting an interrupt without specifying a IRQF_TRIGGER, the + * setting should be assumed to be "as already configured", which + * may be as per machine or firmware initialisation. + */ +#define IRQF_TRIGGER_NONE 0x00000000 +#define IRQF_TRIGGER_RISING 0x00000001 +#define IRQF_TRIGGER_FALLING 0x00000002 +#define IRQF_TRIGGER_HIGH 0x00000004 +#define IRQF_TRIGGER_LOW 0x00000008 +#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) +#define IRQF_TRIGGER_PROBE 0x00000010 + +/* + * These flags used only by the kernel as part of the + * irq handling routines. * - * IRQ_NONE means we didn't handle it. - * IRQ_HANDLED means that we did have a valid interrupt and handled it. - * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled) + * IRQF_DISABLED - keep irqs disabled when calling the action handler + * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator + * IRQF_SHARED - allow sharing the irq among several devices + * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur + * IRQF_TIMER - Flag to mark this interrupt as timer interrupt + * IRQF_PERCPU - Interrupt is per cpu + * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing + * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is + * registered first in an shared interrupt is considered for + * performance reasons) */ -typedef int irqreturn_t; +#define IRQF_DISABLED 0x00000020 +#define IRQF_SAMPLE_RANDOM 0x00000040 +#define IRQF_SHARED 0x00000080 +#define IRQF_PROBE_SHARED 0x00000100 +#define IRQF_TIMER 0x00000200 +#define IRQF_PERCPU 0x00000400 +#define IRQF_NOBALANCING 0x00000800 +#define IRQF_IRQPOLL 0x00001000 -#define IRQ_NONE (0) -#define IRQ_HANDLED (1) -#define IRQ_RETVAL(x) ((x) != 0) +typedef irqreturn_t (*irq_handler_t)(int, void *); struct irqaction { - irqreturn_t (*handler)(int, void *, struct pt_regs *); + irq_handler_t handler; unsigned long flags; cpumask_t mask; const char *name; @@ -44,55 +73,170 @@ struct irqaction { struct proc_dir_entry *dir; }; -extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs); -extern int request_irq(unsigned int, - irqreturn_t (*handler)(int, void *, struct pt_regs *), +extern irqreturn_t no_action(int cpl, void *dev_id); +extern int __must_check request_irq(unsigned int, irq_handler_t handler, unsigned long, const char *, void *); extern void free_irq(unsigned int, void *); +struct device; + +extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id); +extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); + +/* + * On lockdep we dont want to enable hardirqs in hardirq + * context. Use local_irq_enable_in_hardirq() to annotate + * kernel code that has to do this nevertheless (pretty much + * the only valid case is for old/broken hardware that is + * insanely slow). + * + * NOTE: in theory this might break fragile code that relies + * on hardirq delivery - in practice we dont seem to have such + * places left. So the only effect should be slightly increased + * irqs-off latencies. + */ +#ifdef CONFIG_LOCKDEP +# define local_irq_enable_in_hardirq() do { } while (0) +#else +# define local_irq_enable_in_hardirq() local_irq_enable() +#endif -#ifdef CONFIG_GENERIC_HARDIRQS extern void disable_irq_nosync(unsigned int irq); extern void disable_irq(unsigned int irq); extern void enable_irq(unsigned int irq); -#endif +#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) + +extern cpumask_t irq_default_affinity; + +extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); +extern int irq_can_set_affinity(unsigned int irq); +extern int irq_select_affinity(unsigned int irq); + +#else /* CONFIG_SMP */ + +static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) +{ + return -EINVAL; +} + +static inline int irq_can_set_affinity(unsigned int irq) +{ + return 0; +} + +static inline int irq_select_affinity(unsigned int irq) { return 0; } + +#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ + +#ifdef CONFIG_GENERIC_HARDIRQS /* - * Temporary defines for UP kernels, until all code gets fixed. + * Special lockdep variants of irq disabling/enabling. + * These should be used for locking constructs that + * know that a particular irq context which is disabled, + * and which is the only irq-context user of a lock, + * that it's safe to take the lock in the irq-disabled + * section without disabling hardirqs. + * + * On !CONFIG_LOCKDEP they are equivalent to the normal + * irq disable/enable methods. */ -#ifndef CONFIG_SMP -static inline void __deprecated cli(void) +static inline void disable_irq_nosync_lockdep(unsigned int irq) { + disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP local_irq_disable(); +#endif } -static inline void __deprecated sti(void) + +static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) { + disable_irq_nosync(irq); +#ifdef CONFIG_LOCKDEP + local_irq_save(*flags); +#endif +} + +static inline void disable_irq_lockdep(unsigned int irq) +{ + disable_irq(irq); +#ifdef CONFIG_LOCKDEP + local_irq_disable(); +#endif +} + +static inline void enable_irq_lockdep(unsigned int irq) +{ +#ifdef CONFIG_LOCKDEP local_irq_enable(); +#endif + enable_irq(irq); +} + +static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) +{ +#ifdef CONFIG_LOCKDEP + local_irq_restore(*flags); +#endif + enable_irq(irq); } -static inline void __deprecated save_flags(unsigned long *x) + +/* IRQ wakeup (PM) control: */ +extern int set_irq_wake(unsigned int irq, unsigned int on); + +static inline int enable_irq_wake(unsigned int irq) { - local_save_flags(*x); + return set_irq_wake(irq, 1); } -#define save_flags(x) save_flags(&x); -static inline void __deprecated restore_flags(unsigned long x) + +static inline int disable_irq_wake(unsigned int irq) { - local_irq_restore(x); + return set_irq_wake(irq, 0); +} + +#else /* !CONFIG_GENERIC_HARDIRQS */ +/* + * NOTE: non-genirq architectures, if they want to support the lock + * validator need to define the methods below in their asm/irq.h + * files, under an #ifdef CONFIG_LOCKDEP section. + */ +#ifndef CONFIG_LOCKDEP +# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) +# define disable_irq_nosync_lockdep_irqsave(irq, flags) \ + disable_irq_nosync(irq) +# define disable_irq_lockdep(irq) disable_irq(irq) +# define enable_irq_lockdep(irq) enable_irq(irq) +# define enable_irq_lockdep_irqrestore(irq, flags) \ + enable_irq(irq) +# endif + +static inline int enable_irq_wake(unsigned int irq) +{ + return 0; } -static inline void __deprecated save_and_cli(unsigned long *x) +static inline int disable_irq_wake(unsigned int irq) { - local_irq_save(*x); + return 0; } -#define save_and_cli(x) save_and_cli(&x) -#endif /* CONFIG_SMP */ +#endif /* CONFIG_GENERIC_HARDIRQS */ -/* SoftIRQ primitives. */ -#define local_bh_disable() \ - do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) -#define __local_bh_enable() \ - do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) +#ifndef __ARCH_SET_SOFTIRQ_PENDING +#define set_softirq_pending(x) (local_softirq_pending() = (x)) +#define or_softirq_pending(x) (local_softirq_pending() |= (x)) +#endif -extern void local_bh_enable(void); +/* Some architectures might implement lazy enabling/disabling of + * interrupts. In some cases, such as stop_machine, we might want + * to ensure that after a local_irq_disable(), interrupts have + * really been disabled in hardware. Such architectures need to + * implement the following hook. + */ +#ifndef hard_irq_disable +#define hard_irq_disable() do { } while(0) +#endif /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high frequency threaded job scheduling. For almost all the purposes @@ -106,8 +250,15 @@ enum TIMER_SOFTIRQ, NET_TX_SOFTIRQ, NET_RX_SOFTIRQ, - SCSI_SOFTIRQ, - TASKLET_SOFTIRQ + BLOCK_SOFTIRQ, + TASKLET_SOFTIRQ, + SCHED_SOFTIRQ, +#ifdef CONFIG_HIGH_RES_TIMERS + HRTIMER_SOFTIRQ, +#endif + RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ + + NR_SOFTIRQS }; /* softirq mask and active fields moved to irq_cpustat_t in @@ -117,16 +268,35 @@ enum struct softirq_action { void (*action)(struct softirq_action *); - void *data; }; asmlinkage void do_softirq(void); -extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); +asmlinkage void __do_softirq(void); +extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); -#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0) -extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); -extern void FASTCALL(raise_softirq(unsigned int nr)); +#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) +extern void raise_softirq_irqoff(unsigned int nr); +extern void raise_softirq(unsigned int nr); + +/* This is the worklist that queues up per-cpu softirq work. + * + * send_remote_sendirq() adds work to these lists, and + * the softirq handler itself dequeues from them. The queues + * are protected by disabling local cpu interrupts and they must + * only be accessed by the local cpu that they are for. + */ +DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); +/* Try to send a softirq to a remote cpu. If this cannot be done, the + * work will be queued to the local cpu. + */ +extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); + +/* Like send_remote_softirq(), but the caller must disable local cpu interrupts + * and compute the current cpu, passed in as 'this_cpu'. + */ +extern void __send_remote_softirq(struct call_single_data *cp, int cpu, + int this_cpu, int softirq); /* Tasklets --- multithreaded analogue of BHs. @@ -192,7 +362,7 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t) #define tasklet_unlock(t) do { } while (0) #endif -extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t)); +extern void __tasklet_schedule(struct tasklet_struct *t); static inline void tasklet_schedule(struct tasklet_struct *t) { @@ -200,7 +370,7 @@ static inline void tasklet_schedule(struct tasklet_struct *t) __tasklet_schedule(t); } -extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t)); +extern void __tasklet_hi_schedule(struct tasklet_struct *t); static inline void tasklet_hi_schedule(struct tasklet_struct *t) { @@ -286,4 +456,15 @@ extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ #endif +#ifdef CONFIG_PROC_FS +/* Initialize /proc/irq/ */ +extern void init_irq_proc(void); +#else +static inline void init_irq_proc(void) +{ +} +#endif + +int show_interrupts(struct seq_file *p, void *v); + #endif