#ifdef __KERNEL__
+#include <asm/memory.h>
#define CPU_ARCH_UNKNOWN 0
#define CPU_ARCH_ARMv3 1
#define CPU_ARCH_ARMv5TE 6
#define CPU_ARCH_ARMv5TEJ 7
#define CPU_ARCH_ARMv6 8
+#define CPU_ARCH_ARMv7 9
/*
* CR1 bits (CP#15 CR1)
#define CPUID_TCM 2
#define CPUID_TLBTYPE 3
+#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
({ \
unsigned int __val; \
: "cc"); \
__val; \
})
+#else
+#define read_cpuid(reg) (processor_id)
+#endif
/*
* This is used to ensure the compiler did actually allocate the register we
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <linux/irqflags.h>
+
+/*
+ * The CPU ID never changes at run time, so we might as well tell the
+ * compiler that it's constant. Use this function to read the CPU ID
+ * rather than directly reading processor_id or read_cpuid() directly.
+ */
+static inline unsigned int read_cpuid_id(void) __attribute_const__;
+
+static inline unsigned int read_cpuid_id(void)
+{
+ return read_cpuid(CPUID_ID);
+}
+
+#define __exception __attribute__((section(".exception.text")))
struct thread_info;
struct task_struct;
__attribute__((noreturn));
struct siginfo;
-void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
+void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
unsigned long err, unsigned long trap);
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
-
extern asmlinkage void __backtrace(void);
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
extern int cpu_architecture(void);
extern void cpu_init(void);
+void arm_machine_restart(char mode);
+extern void (*arm_pm_restart)(char str);
+
/*
* Intel's XScale3 core supports some v6 features (supersections, L2)
* but advertises itself as v5 as it does not support the v6 ISA. For
#define cpu_is_xscale() 1
#endif
-#define set_cr(x) \
- __asm__ __volatile__( \
- "mcr p15, 0, %0, c1, c0, 0 @ set CR" \
- : : "r" (x) : "cc")
-
-#define get_cr() \
- ({ \
- unsigned int __val; \
- __asm__ __volatile__( \
- "mrc p15, 0, %0, c1, c0, 0 @ get CR" \
- : "=r" (__val) : : "cc"); \
- __val; \
- })
-
-extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
-extern unsigned long cr_alignment; /* defined in entry-armv.S */
-
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)
#define vectors_high() (0)
#endif
-#if __LINUX_ARM_ARCH__ >= 6
-#define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
- : : "r" (0) : "memory")
+#if __LINUX_ARM_ARCH__ >= 7
+#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
+#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+ : : "r" (0) : "memory")
+#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+ : : "r" (0) : "memory")
+#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+ : : "r" (0) : "memory")
+#else
+#define isb() __asm__ __volatile__ ("" : : : "memory")
+#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+ : : "r" (0) : "memory")
+#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#endif
+
+#ifndef CONFIG_SMP
+#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
#else
-#define mb() __asm__ __volatile__ ("" : : : "memory")
+#define mb() dmb()
+#define rmb() dmb()
+#define wmb() dmb()
+#define smp_mb() dmb()
+#define smp_rmb() dmb()
+#define smp_wmb() dmb()
#endif
-#define rmb() mb()
-#define wmb() mb()
-#define read_barrier_depends() do { } while(0)
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define read_barrier_depends() do { } while(0)
+#define smp_read_barrier_depends() do { } while(0)
+
+#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
+extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
+extern unsigned long cr_alignment; /* defined in entry-armv.S */
+
+static inline unsigned int get_cr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_cr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
+ : : "r" (val) : "cc");
+ isb();
+}
+
+#ifndef CONFIG_SMP
+extern void adjust_cr(unsigned long mask, unsigned long set);
+#endif
+
+#define CPACC_FULL(n) (3 << (n * 2))
+#define CPACC_SVC(n) (1 << (n * 2))
+#define CPACC_DISABLE(n) (0 << (n * 2))
+
+static inline unsigned int get_copro_access(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
+ : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_copro_access(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
+ : : "r" (val) : "cc");
+ isb();
+}
+
/*
* switch_mm() may do a full cache flush over the context switch,
* so enable interrupts over the context switch to avoid high
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
-/*
- * CPU interrupt mask handling.
- */
-#if __LINUX_ARM_ARCH__ >= 6
-
-#define local_irq_save(x) \
- ({ \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_irq_save\n" \
- "cpsid i" \
- : "=r" (x) : : "memory", "cc"); \
- })
-
-#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
-#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
-#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
-#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
-
-#else
-
-/*
- * Save the current interrupt enable state & disable IRQs
- */
-#define local_irq_save(x) \
- ({ \
- unsigned long temp; \
- (void) (&temp == &x); \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_irq_save\n" \
-" orr %1, %0, #128\n" \
-" msr cpsr_c, %1" \
- : "=r" (x), "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-/*
- * Enable IRQs
- */
-#define local_irq_enable() \
- ({ \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_irq_enable\n" \
-" bic %0, %0, #128\n" \
-" msr cpsr_c, %0" \
- : "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-/*
- * Disable IRQs
- */
-#define local_irq_disable() \
- ({ \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_irq_disable\n" \
-" orr %0, %0, #128\n" \
-" msr cpsr_c, %0" \
- : "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-/*
- * Enable FIQs
- */
-#define local_fiq_enable() \
- ({ \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ stf\n" \
-" bic %0, %0, #64\n" \
-" msr cpsr_c, %0" \
- : "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-/*
- * Disable FIQs
- */
-#define local_fiq_disable() \
- ({ \
- unsigned long temp; \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ clf\n" \
-" orr %0, %0, #64\n" \
-" msr cpsr_c, %0" \
- : "=r" (temp) \
- : \
- : "memory", "cc"); \
- })
-
-#endif
-
-/*
- * Save the current interrupt enable state.
- */
-#define local_save_flags(x) \
- ({ \
- __asm__ __volatile__( \
- "mrs %0, cpsr @ local_save_flags" \
- : "=r" (x) : : "memory", "cc"); \
- })
-
-/*
- * restore saved IRQ & FIQ state
- */
-#define local_irq_restore(x) \
- __asm__ __volatile__( \
- "msr cpsr_c, %0 @ local_irq_restore\n" \
- : \
- : "r" (x) \
- : "memory", "cc")
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- (int)(flags & PSR_I_BIT); \
-})
-
-#ifdef CONFIG_SMP
-
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
-
-#else
-
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-
-#endif /* CONFIG_SMP */
-
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
#error SMP is not supported on this platform
#endif
case 1:
- local_irq_save(flags);
+ raw_local_irq_save(flags);
ret = *(volatile unsigned char *)ptr;
*(volatile unsigned char *)ptr = x;
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
break;
case 4:
- local_irq_save(flags);
+ raw_local_irq_save(flags);
ret = *(volatile unsigned long *)ptr;
*(volatile unsigned long *)ptr = x;
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
break;
#else
case 1:
extern void disable_hlt(void);
extern void enable_hlt(void);
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#ifndef CONFIG_SMP
+#include <asm-generic/cmpxchg.h>
+#endif
+
#endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)