X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fhardirq.h;h=9b70b9231693fc07a446f892220d2c91a6300672;hb=64db4cfff99c04cd5f550357edcc8780f96b54a2;hp=5912874ca83c829494775fc479fed5ebb664cca3;hpb=67bc4eb0b1140a4bf364f2dcca152be659ed9057;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 5912874..9b70b92 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -1,9 +1,9 @@ #ifndef LINUX_HARDIRQ_H #define LINUX_HARDIRQ_H -#include #include #include +#include #include #include @@ -28,11 +28,16 @@ #ifndef HARDIRQ_BITS #define HARDIRQ_BITS 12 + +#ifndef MAX_HARDIRQS_PER_CPU +#define MAX_HARDIRQS_PER_CPU NR_IRQS +#endif + /* * The hardirq mask has to be large enough to have space for potentially * all IRQ sources in the system nesting on a single CPU. */ -#if (1 << HARDIRQ_BITS) < NR_IRQS +#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU # error HARDIRQ_BITS is too low! #endif #endif @@ -67,12 +72,30 @@ #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) -# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) +#if defined(CONFIG_PREEMPT) +# define PREEMPT_INATOMIC_BASE kernel_locked() +# define PREEMPT_CHECK_OFFSET 1 #else -# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) +# define PREEMPT_INATOMIC_BASE 0 +# define PREEMPT_CHECK_OFFSET 0 #endif +/* + * Are we running in atomic context? WARNING: this macro cannot + * always detect atomic context; in particular, it cannot know about + * held spinlocks in non-preemptible kernels. Thus it should not be + * used in the general case to determine whether sleeping is possible. + * Do not use in_atomic() in driver code. + */ +#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE) + +/* + * Check whether we were atomic before we did preempt_disable(): + * (used by the scheduler, *after* releasing the kernel lock) + */ +#define in_atomic_preempt_off() \ + ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) + #ifdef CONFIG_PREEMPT # define preemptible() (preempt_count() == 0 && !irqs_disabled()) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) @@ -87,25 +110,60 @@ extern void synchronize_irq(unsigned int irq); # define synchronize_irq(irq) barrier() #endif -#define nmi_enter() irq_enter() -#define nmi_exit() sub_preempt_count(HARDIRQ_OFFSET) +struct task_struct; #ifndef CONFIG_VIRT_CPU_ACCOUNTING -static inline void account_user_vtime(struct task_struct *tsk) -{ -} - static inline void account_system_vtime(struct task_struct *tsk) { } #endif -#define irq_enter() \ +#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) +extern void rcu_irq_enter(void); +extern void rcu_irq_exit(void); +extern void rcu_nmi_enter(void); +extern void rcu_nmi_exit(void); +#else +# define rcu_irq_enter() do { } while (0) +# define rcu_irq_exit() do { } while (0) +# define rcu_nmi_enter() do { } while (0) +# define rcu_nmi_exit() do { } while (0) +#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ + +/* + * It is safe to do non-atomic ops on ->hardirq_context, + * because NMI handlers may not preempt and the ops are + * always balanced, so the interrupted value of ->hardirq_context + * will always be restored. + */ +#define __irq_enter() \ do { \ account_system_vtime(current); \ add_preempt_count(HARDIRQ_OFFSET); \ + trace_hardirq_enter(); \ + } while (0) + +/* + * Enter irq context (on NO_HZ, update jiffies): + */ +extern void irq_enter(void); + +/* + * Exit irq context without processing softirqs: + */ +#define __irq_exit() \ + do { \ + trace_hardirq_exit(); \ + account_system_vtime(current); \ + sub_preempt_count(HARDIRQ_OFFSET); \ } while (0) +/* + * Exit irq context and process softirqs if needed: + */ extern void irq_exit(void); +#define nmi_enter() do { lockdep_off(); rcu_nmi_enter(); __irq_enter(); } while (0) +#define nmi_exit() do { __irq_exit(); rcu_nmi_exit(); lockdep_on(); } while (0) + #endif /* LINUX_HARDIRQ_H */