#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
+#include <linux/irqnr.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/irqflags.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/irqnr.h>
+
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
-/*
- * Migration helpers. Scheduled for removal in 9/2007
- * Do not use for new code !
- */
-static inline
-unsigned long __deprecated deprecated_irq_flag(unsigned long flag)
-{
- return flag;
-}
-
-#define SA_INTERRUPT deprecated_irq_flag(IRQF_DISABLED)
-#define SA_SAMPLE_RANDOM deprecated_irq_flag(IRQF_SAMPLE_RANDOM)
-#define SA_SHIRQ deprecated_irq_flag(IRQF_SHARED)
-#define SA_PROBEIRQ deprecated_irq_flag(IRQF_PROBE_SHARED)
-#define SA_PERCPU deprecated_irq_flag(IRQF_PERCPU)
-
-#define SA_TRIGGER_LOW deprecated_irq_flag(IRQF_TRIGGER_LOW)
-#define SA_TRIGGER_HIGH deprecated_irq_flag(IRQF_TRIGGER_HIGH)
-#define SA_TRIGGER_FALLING deprecated_irq_flag(IRQF_TRIGGER_FALLING)
-#define SA_TRIGGER_RISING deprecated_irq_flag(IRQF_TRIGGER_RISING)
-#define SA_TRIGGER_MASK deprecated_irq_flag(IRQF_TRIGGER_MASK)
-
typedef irqreturn_t (*irq_handler_t)(int, void *);
struct irqaction {
extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+
+extern cpumask_t irq_default_affinity;
+
+extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
+extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
+
+#else /* CONFIG_SMP */
+
+static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
+{
+ return -EINVAL;
+}
+
+static inline int irq_can_set_affinity(unsigned int irq)
+{
+ return 0;
+}
+
+static inline int irq_select_affinity(unsigned int irq) { return 0; }
+
+#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
+
#ifdef CONFIG_GENERIC_HARDIRQS
/*
* Special lockdep variants of irq disabling/enabling.
#define or_softirq_pending(x) (local_softirq_pending() |= (x))
#endif
-/*
- * Temporary defines for UP kernels, until all code gets fixed.
- */
-#ifndef CONFIG_SMP
-static inline void __deprecated cli(void)
-{
- local_irq_disable();
-}
-static inline void __deprecated sti(void)
-{
- local_irq_enable();
-}
-static inline void __deprecated save_flags(unsigned long *x)
-{
- local_save_flags(*x);
-}
-#define save_flags(x) save_flags(&x)
-static inline void __deprecated restore_flags(unsigned long x)
-{
- local_irq_restore(x);
-}
-
-static inline void __deprecated save_and_cli(unsigned long *x)
-{
- local_irq_save(*x);
-}
-#define save_and_cli(x) save_and_cli(&x)
-#endif /* CONFIG_SMP */
-
/* Some architectures might implement lazy enabling/disabling of
* interrupts. In some cases, such as stop_machine, we might want
* to ensure that after a local_irq_disable(), interrupts have
BLOCK_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
-#ifdef CONFIG_HIGH_RES_TIMERS
- HRTIMER_SOFTIRQ,
-#endif
+ RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
+
+ NR_SOFTIRQS
};
/* softirq mask and active fields moved to irq_cpustat_t in
struct softirq_action
{
void (*action)(struct softirq_action *);
- void *data;
};
asmlinkage void do_softirq(void);
-extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
+asmlinkage void __do_softirq(void);
+extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
-extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
-extern void FASTCALL(raise_softirq(unsigned int nr));
+extern void raise_softirq_irqoff(unsigned int nr);
+extern void raise_softirq(unsigned int nr);
+/* This is the worklist that queues up per-cpu softirq work.
+ *
+ * send_remote_sendirq() adds work to these lists, and
+ * the softirq handler itself dequeues from them. The queues
+ * are protected by disabling local cpu interrupts and they must
+ * only be accessed by the local cpu that they are for.
+ */
+DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
+
+/* Try to send a softirq to a remote cpu. If this cannot be done, the
+ * work will be queued to the local cpu.
+ */
+extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
+
+/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
+ * and compute the current cpu, passed in as 'this_cpu'.
+ */
+extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
+ int this_cpu, int softirq);
/* Tasklets --- multithreaded analogue of BHs.
#define tasklet_unlock(t) do { } while (0)
#endif
-extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));
+extern void __tasklet_schedule(struct tasklet_struct *t);
static inline void tasklet_schedule(struct tasklet_struct *t)
{
__tasklet_schedule(t);
}
-extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));
+extern void __tasklet_hi_schedule(struct tasklet_struct *t);
static inline void tasklet_hi_schedule(struct tasklet_struct *t)
{
}
#endif
+int show_interrupts(struct seq_file *p, void *v);
+
#endif