Merge branch 'core/percpu' into perfcounters/core
[safe/jmp/linux-2.6] / arch / x86 / include / asm / hardirq_64.h
1 #ifndef _ASM_X86_HARDIRQ_64_H
2 #define _ASM_X86_HARDIRQ_64_H
3
4 #include <linux/threads.h>
5 #include <linux/irq.h>
6 #include <asm/apic.h>
7
8 typedef struct {
9         unsigned int __softirq_pending;
10         unsigned int __nmi_count;       /* arch dependent */
11         unsigned int apic_timer_irqs;   /* arch dependent */
12         unsigned int apic_perf_irqs;    /* arch dependent */
13         unsigned int irq0_irqs;
14         unsigned int irq_resched_count;
15         unsigned int irq_call_count;
16         unsigned int irq_tlb_count;
17         unsigned int irq_thermal_count;
18         unsigned int irq_spurious_count;
19         unsigned int irq_threshold_count;
20 } ____cacheline_aligned irq_cpustat_t;
21
22 DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
23
24 /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
25 #define MAX_HARDIRQS_PER_CPU NR_VECTORS
26
27 #define __ARCH_IRQ_STAT 1
28
29 #define inc_irq_stat(member)    percpu_add(irq_stat.member, 1)
30
31 #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
32
33 #define __ARCH_SET_SOFTIRQ_PENDING 1
34
35 #define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
36 #define or_softirq_pending(x)  percpu_or(irq_stat.__softirq_pending, (x))
37
38 extern void ack_bad_irq(unsigned int irq);
39
40 #endif /* _ASM_X86_HARDIRQ_64_H */