X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;ds=sidebyside;f=include%2Flinux%2Finterrupt.h;h=f58a0cf8929a81fb14025ab8683ab32bf8ab539c;hb=2175dd95741bda5f438e4efe388a8c1bb5abf1cc;hp=f1fc7470d26ca540c27673074ac38447d28dcfbd;hpb=eb0f1c442d7cf1f7cb746c26c6120bb42e69c49c;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f1fc747..f58a0cf 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -8,9 +8,12 @@ #include #include #include +#include #include #include #include +#include +#include #include #include #include @@ -104,8 +107,11 @@ extern void enable_irq(unsigned int irq); #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) +extern cpumask_t irq_default_affinity; + extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); extern int irq_can_set_affinity(unsigned int irq); +extern int irq_select_affinity(unsigned int irq); #else /* CONFIG_SMP */ @@ -119,6 +125,8 @@ static inline int irq_can_set_affinity(unsigned int irq) return 0; } +static inline int irq_select_affinity(unsigned int irq) { return 0; } + #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ #ifdef CONFIG_GENERIC_HARDIRQS @@ -218,35 +226,6 @@ static inline int disable_irq_wake(unsigned int irq) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) #endif -/* - * Temporary defines for UP kernels, until all code gets fixed. - */ -#ifndef CONFIG_SMP -static inline void __deprecated cli(void) -{ - local_irq_disable(); -} -static inline void __deprecated sti(void) -{ - local_irq_enable(); -} -static inline void __deprecated save_flags(unsigned long *x) -{ - local_save_flags(*x); -} -#define save_flags(x) save_flags(&x) -static inline void __deprecated restore_flags(unsigned long x) -{ - local_irq_restore(x); -} - -static inline void __deprecated save_and_cli(unsigned long *x) -{ - local_irq_save(*x); -} -#define save_and_cli(x) save_and_cli(&x) -#endif /* CONFIG_SMP */ - /* Some architectures might implement lazy enabling/disabling of * interrupts. In some cases, such as stop_machine, we might want * to ensure that after a local_irq_disable(), interrupts have @@ -276,6 +255,8 @@ enum HRTIMER_SOFTIRQ, #endif RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ + + NR_SOFTIRQS }; /* softirq mask and active fields moved to irq_cpustat_t in @@ -285,17 +266,35 @@ enum struct softirq_action { void (*action)(struct softirq_action *); - void *data; }; asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); -extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); +extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +/* This is the worklist that queues up per-cpu softirq work. + * + * send_remote_sendirq() adds work to these lists, and + * the softirq handler itself dequeues from them. The queues + * are protected by disabling local cpu interrupts and they must + * only be accessed by the local cpu that they are for. + */ +DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); + +/* Try to send a softirq to a remote cpu. If this cannot be done, the + * work will be queued to the local cpu. + */ +extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); + +/* Like send_remote_softirq(), but the caller must disable local cpu interrupts + * and compute the current cpu, passed in as 'this_cpu'. + */ +extern void __send_remote_softirq(struct call_single_data *cp, int cpu, + int this_cpu, int softirq); /* Tasklets --- multithreaded analogue of BHs.