#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
+#include <linux/irqnr.h>
#include <linux/hardirq.h>
-#include <linux/sched.h>
#include <linux/irqflags.h>
-#include <linux/bottom_half.h>
-#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/hrtimer.h>
+
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
* registered first in an shared interrupt is considered for
* performance reasons)
+ * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
+ * Used by threaded interrupts which need to keep the
+ * irq line disabled until the threaded handler has been run.
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
+#define IRQF_ONESHOT 0x00002000
/*
- * Migration helpers. Scheduled for removal in 9/2007
- * Do not use for new code !
+ * Bits used by threaded handlers:
+ * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
+ * IRQTF_DIED - handler thread died
+ * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
+ * IRQTF_AFFINITY - irq thread is requested to adjust affinity
*/
-static inline
-unsigned long __deprecated deprecated_irq_flag(unsigned long flag)
-{
- return flag;
-}
-
-#define SA_INTERRUPT deprecated_irq_flag(IRQF_DISABLED)
-#define SA_SAMPLE_RANDOM deprecated_irq_flag(IRQF_SAMPLE_RANDOM)
-#define SA_SHIRQ deprecated_irq_flag(IRQF_SHARED)
-#define SA_PROBEIRQ deprecated_irq_flag(IRQF_PROBE_SHARED)
-#define SA_PERCPU deprecated_irq_flag(IRQF_PERCPU)
-
-#define SA_TRIGGER_LOW deprecated_irq_flag(IRQF_TRIGGER_LOW)
-#define SA_TRIGGER_HIGH deprecated_irq_flag(IRQF_TRIGGER_HIGH)
-#define SA_TRIGGER_FALLING deprecated_irq_flag(IRQF_TRIGGER_FALLING)
-#define SA_TRIGGER_RISING deprecated_irq_flag(IRQF_TRIGGER_RISING)
-#define SA_TRIGGER_MASK deprecated_irq_flag(IRQF_TRIGGER_MASK)
+enum {
+ IRQTF_RUNTHREAD,
+ IRQTF_DIED,
+ IRQTF_WARNED,
+ IRQTF_AFFINITY,
+};
typedef irqreturn_t (*irq_handler_t)(int, void *);
+/**
+ * struct irqaction - per interrupt action descriptor
+ * @handler: interrupt handler function
+ * @flags: flags (see IRQF_* above)
+ * @name: name of the device
+ * @dev_id: cookie to identify the device
+ * @next: pointer to the next irqaction for shared interrupts
+ * @irq: interrupt number
+ * @dir: pointer to the proc/irq/NN/name entry
+ * @thread_fn: interupt handler function for threaded interrupts
+ * @thread: thread pointer for threaded interrupts
+ * @thread_flags: flags related to @thread
+ */
struct irqaction {
irq_handler_t handler;
unsigned long flags;
- cpumask_t mask;
const char *name;
void *dev_id;
struct irqaction *next;
int irq;
struct proc_dir_entry *dir;
+ irq_handler_t thread_fn;
+ struct task_struct *thread;
+ unsigned long thread_flags;
};
extern irqreturn_t no_action(int cpl, void *dev_id);
-extern int __must_check request_irq(unsigned int, irq_handler_t handler,
- unsigned long, const char *, void *);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+extern int __must_check
+request_threaded_irq(unsigned int irq, irq_handler_t handler,
+ irq_handler_t thread_fn,
+ unsigned long flags, const char *name, void *dev);
+
+static inline int __must_check
+request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev)
+{
+ return request_threaded_irq(irq, handler, NULL, flags, name, dev);
+}
+
+extern void exit_irq_thread(void);
+#else
+
+extern int __must_check
+request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev);
+
+/*
+ * Special function to avoid ifdeffery in kernel/irq/devres.c which
+ * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
+ * m68k). I really love these $@%#!* obvious Makefile references:
+ * ../../../kernel/irq/devres.o
+ */
+static inline int __must_check
+request_threaded_irq(unsigned int irq, irq_handler_t handler,
+ irq_handler_t thread_fn,
+ unsigned long flags, const char *name, void *dev)
+{
+ return request_irq(irq, handler, flags, name, dev);
+}
+
+static inline void exit_irq_thread(void) { }
+#endif
+
extern void free_irq(unsigned int, void *);
-extern int __must_check devm_request_irq(struct device *dev, unsigned int irq,
- irq_handler_t handler, unsigned long irqflags,
- const char *devname, void *dev_id);
+struct device;
+
+extern int __must_check
+devm_request_threaded_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, irq_handler_t thread_fn,
+ unsigned long irqflags, const char *devname,
+ void *dev_id);
+
+static inline int __must_check
+devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
+ unsigned long irqflags, const char *devname, void *dev_id)
+{
+ return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
+ devname, dev_id);
+}
+
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
/*
# define local_irq_enable_in_hardirq() local_irq_enable()
#endif
-#ifdef CONFIG_GENERIC_HARDIRQS
extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
+/* The following three functions are for the core kernel use only. */
+#ifdef CONFIG_GENERIC_HARDIRQS
+extern void suspend_device_irqs(void);
+extern void resume_device_irqs(void);
+#ifdef CONFIG_PM_SLEEP
+extern int check_wakeup_irqs(void);
+#else
+static inline int check_wakeup_irqs(void) { return 0; }
+#endif
+#else
+static inline void suspend_device_irqs(void) { };
+static inline void resume_device_irqs(void) { };
+static inline int check_wakeup_irqs(void) { return 0; }
+#endif
+
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+
+extern cpumask_var_t irq_default_affinity;
+
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
+
+#else /* CONFIG_SMP */
+
+static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
+static inline int irq_can_set_affinity(unsigned int irq)
+{
+ return 0;
+}
+
+static inline int irq_select_affinity(unsigned int irq) { return 0; }
+
+#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
+
+#ifdef CONFIG_GENERIC_HARDIRQS
/*
* Special lockdep variants of irq disabling/enabling.
* These should be used for locking constructs that
enable_irq(irq)
# endif
+static inline int enable_irq_wake(unsigned int irq)
+{
+ return 0;
+}
+
+static inline int disable_irq_wake(unsigned int irq)
+{
+ return 0;
+}
#endif /* CONFIG_GENERIC_HARDIRQS */
#ifndef __ARCH_SET_SOFTIRQ_PENDING
#define or_softirq_pending(x) (local_softirq_pending() |= (x))
#endif
-/*
- * Temporary defines for UP kernels, until all code gets fixed.
- */
-#ifndef CONFIG_SMP
-static inline void __deprecated cli(void)
-{
- local_irq_disable();
-}
-static inline void __deprecated sti(void)
-{
- local_irq_enable();
-}
-static inline void __deprecated save_flags(unsigned long *x)
-{
- local_save_flags(*x);
-}
-#define save_flags(x) save_flags(&x)
-static inline void __deprecated restore_flags(unsigned long x)
-{
- local_irq_restore(x);
-}
-
-static inline void __deprecated save_and_cli(unsigned long *x)
-{
- local_irq_save(*x);
-}
-#define save_and_cli(x) save_and_cli(&x)
-#endif /* CONFIG_SMP */
-
/* Some architectures might implement lazy enabling/disabling of
* interrupts. In some cases, such as stop_machine, we might want
* to ensure that after a local_irq_disable(), interrupts have
NET_TX_SOFTIRQ,
NET_RX_SOFTIRQ,
BLOCK_SOFTIRQ,
+ BLOCK_IOPOLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
-#ifdef CONFIG_HIGH_RES_TIMERS
HRTIMER_SOFTIRQ,
-#endif
+ RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
+
+ NR_SOFTIRQS
};
+/* map softirq index to softirq name. update 'softirq_to_name' in
+ * kernel/softirq.c when adding a new softirq.
+ */
+extern char *softirq_to_name[NR_SOFTIRQS];
+
/* softirq mask and active fields moved to irq_cpustat_t in
* asm/hardirq.h to get better cache usage. KAO
*/
struct softirq_action
{
void (*action)(struct softirq_action *);
- void *data;
};
asmlinkage void do_softirq(void);
-extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
+asmlinkage void __do_softirq(void);
+extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
-extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
-extern void FASTCALL(raise_softirq(unsigned int nr));
+extern void raise_softirq_irqoff(unsigned int nr);
+extern void raise_softirq(unsigned int nr);
+extern void wakeup_softirqd(void);
+
+/* This is the worklist that queues up per-cpu softirq work.
+ *
+ * send_remote_sendirq() adds work to these lists, and
+ * the softirq handler itself dequeues from them. The queues
+ * are protected by disabling local cpu interrupts and they must
+ * only be accessed by the local cpu that they are for.
+ */
+DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
+/* Try to send a softirq to a remote cpu. If this cannot be done, the
+ * work will be queued to the local cpu.
+ */
+extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
+
+/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
+ * and compute the current cpu, passed in as 'this_cpu'.
+ */
+extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
+ int this_cpu, int softirq);
/* Tasklets --- multithreaded analogue of BHs.
#define tasklet_unlock(t) do { } while (0)
#endif
-extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
+extern void __tasklet_schedule(struct tasklet_struct *t);
static inline void tasklet_schedule(struct tasklet_struct *t)
{
__tasklet_schedule(t);
}
-extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
+extern void __tasklet_hi_schedule(struct tasklet_struct *t);
static inline void tasklet_hi_schedule(struct tasklet_struct *t)
{
__tasklet_hi_schedule(t);
}
+extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
+
+/*
+ * This version avoids touching any other tasklets. Needed for kmemcheck
+ * in order not to take any page faults while enqueueing this tasklet;
+ * consider VERY carefully whether you really need this or
+ * tasklet_hi_schedule()...
+ */
+static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+ __tasklet_hi_schedule_first(t);
+}
+
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
+struct tasklet_hrtimer {
+ struct hrtimer timer;
+ struct tasklet_struct tasklet;
+ enum hrtimer_restart (*function)(struct hrtimer *);
+};
+
+extern void
+tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
+ enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t which_clock, enum hrtimer_mode mode);
+
+static inline
+int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+ const enum hrtimer_mode mode)
+{
+ return hrtimer_start(&ttimer->timer, time, mode);
+}
+
+static inline
+void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
+{
+ hrtimer_cancel(&ttimer->timer);
+ tasklet_kill(&ttimer->tasklet);
+}
+
/*
* Autoprobing for irqs:
*
}
#endif
+struct seq_file;
+int show_interrupts(struct seq_file *p, void *v);
+
+struct irq_desc;
+
+extern int early_irq_init(void);
+extern int arch_probe_nr_irqs(void);
+extern int arch_early_irq_init(void);
+extern int arch_init_chip_data(struct irq_desc *desc, int node);
+
#endif