X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Fasm-arm%2Fsystem.h;h=6335de9a2bb384b5f8b6f83fdd832402e123eb89;hb=3200f405a1e8e06c8634f11d33614455baa4e6be;hp=cdf49f442fd220db66c2864e3fd6c14168b470ce;hpb=053a7b5b7617a72d7c61b6f84196d1c0f79b9849;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h index cdf49f4..6335de9 100644 --- a/include/asm-arm/system.h +++ b/include/asm-arm/system.h @@ -3,7 +3,7 @@ #ifdef __KERNEL__ -#include +#include #define CPU_ARCH_UNKNOWN 0 #define CPU_ARCH_ARMv3 1 @@ -14,6 +14,7 @@ #define CPU_ARCH_ARMv5TE 6 #define CPU_ARCH_ARMv5TEJ 7 #define CPU_ARCH_ARMv6 8 +#define CPU_ARCH_ARMv7 9 /* * CR1 bits (CP#15 CR1) @@ -47,6 +48,7 @@ #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 +#ifdef CONFIG_CPU_CP15 #define read_cpuid(reg) \ ({ \ unsigned int __val; \ @@ -56,6 +58,9 @@ : "cc"); \ __val; \ }) +#else +#define read_cpuid(reg) (processor_id) +#endif /* * This is used to ensure the compiler did actually allocate the register we @@ -70,6 +75,22 @@ #ifndef __ASSEMBLY__ #include +#include +#include + +/* + * The CPU ID never changes at run time, so we might as well tell the + * compiler that it's constant. Use this function to read the CPU ID + * rather than directly reading processor_id or read_cpuid() directly. + */ +static inline unsigned int read_cpuid_id(void) __attribute_const__; + +static inline unsigned int read_cpuid_id(void) +{ + return read_cpuid(CPUID_ID); +} + +#define __exception __attribute__((section(".exception.text"))) struct thread_info; struct task_struct; @@ -85,43 +106,54 @@ struct pt_regs; void die(const char *msg, struct pt_regs *regs, int err) __attribute__((noreturn)); -void die_if_kernel(const char *str, struct pt_regs *regs, int err); +struct siginfo; +void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, + unsigned long err, unsigned long trap); void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, const char *name); -#include - #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) -#define tas(ptr) (xchg((ptr),1)) - extern asmlinkage void __backtrace(void); extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +struct mm_struct; extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); extern int cpu_architecture(void); extern void cpu_init(void); -#define set_cr(x) \ - __asm__ __volatile__( \ - "mcr p15, 0, %0, c1, c0, 0 @ set CR" \ - : : "r" (x) : "cc") - -#define get_cr() \ - ({ \ - unsigned int __val; \ - __asm__ __volatile__( \ - "mrc p15, 0, %0, c1, c0, 0 @ get CR" \ - : "=r" (__val) : : "cc"); \ - __val; \ - }) +void arm_machine_restart(char mode); +extern void (*arm_pm_restart)(char str); -extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ -extern unsigned long cr_alignment; /* defined in entry-armv.S */ +/* + * Intel's XScale3 core supports some v6 features (supersections, L2) + * but advertises itself as v5 as it does not support the v6 ISA. For + * this reason, we need a way to explicitly test for this type of CPU. + */ +#ifndef CONFIG_CPU_XSC3 +#define cpu_is_xsc3() 0 +#else +static inline int cpu_is_xsc3(void) +{ + extern unsigned int processor_id; + + if ((processor_id & 0xffffe000) == 0x69056000) + return 1; + + return 0; +} +#endif + +#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) +#define cpu_is_xscale() 0 +#else +#define cpu_is_xscale() 1 +#endif #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) @@ -137,14 +169,85 @@ extern unsigned int user_debug; #define vectors_high() (0) #endif -#define mb() __asm__ __volatile__ ("" : : : "memory") -#define rmb() mb() -#define wmb() mb() -#define read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) +#if __LINUX_ARM_ARCH__ >= 7 +#define isb() __asm__ __volatile__ ("isb" : : : "memory") +#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") +#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#else +#define isb() __asm__ __volatile__ ("" : : : "memory") +#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb() __asm__ __volatile__ ("" : : : "memory") +#endif + +#ifndef CONFIG_SMP +#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#else +#define mb() dmb() +#define rmb() dmb() +#define wmb() dmb() +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() +#endif +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); +extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ +extern unsigned long cr_alignment; /* defined in entry-armv.S */ + +static inline unsigned int get_cr(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); + return val; +} + +static inline void set_cr(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" + : : "r" (val) : "cc"); + isb(); +} + +#ifndef CONFIG_SMP +extern void adjust_cr(unsigned long mask, unsigned long set); +#endif + +#define CPACC_FULL(n) (3 << (n * 2)) +#define CPACC_SVC(n) (1 << (n * 2)) +#define CPACC_DISABLE(n) (0 << (n * 2)) + +static inline unsigned int get_copro_access(void) +{ + unsigned int val; + asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" + : "=r" (val) : : "cc"); + return val; +} + +static inline void set_copro_access(unsigned int val) +{ + asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" + : : "r" (val) : "cc"); + isb(); +} + /* * switch_mm() may do a full cache flush over the context switch, * so enable interrupts over the context switch to avoid high @@ -161,150 +264,9 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ - last = __switch_to(prev,prev->thread_info,next->thread_info); \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) -/* - * CPU interrupt mask handling. - */ -#if __LINUX_ARM_ARCH__ >= 6 - -#define local_irq_save(x) \ - ({ \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_save\n" \ - "cpsid i" \ - : "=r" (x) : : "memory", "cc"); \ - }) - -#define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc") -#define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc") -#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") -#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") - -#else - -/* - * Save the current interrupt enable state & disable IRQs - */ -#define local_irq_save(x) \ - ({ \ - unsigned long temp; \ - (void) (&temp == &x); \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_save\n" \ -" orr %1, %0, #128\n" \ -" msr cpsr_c, %1" \ - : "=r" (x), "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Enable IRQs - */ -#define local_irq_enable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_enable\n" \ -" bic %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Disable IRQs - */ -#define local_irq_disable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_irq_disable\n" \ -" orr %0, %0, #128\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Enable FIQs - */ -#define local_fiq_enable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ stf\n" \ -" bic %0, %0, #64\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -/* - * Disable FIQs - */ -#define local_fiq_disable() \ - ({ \ - unsigned long temp; \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ clf\n" \ -" orr %0, %0, #64\n" \ -" msr cpsr_c, %0" \ - : "=r" (temp) \ - : \ - : "memory", "cc"); \ - }) - -#endif - -/* - * Save the current interrupt enable state. - */ -#define local_save_flags(x) \ - ({ \ - __asm__ __volatile__( \ - "mrs %0, cpsr @ local_save_flags" \ - : "=r" (x) : : "memory", "cc"); \ - }) - -/* - * restore saved IRQ & FIQ state - */ -#define local_irq_restore(x) \ - __asm__ __volatile__( \ - "msr cpsr_c, %0 @ local_irq_restore\n" \ - : \ - : "r" (x) \ - : "memory", "cc") - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - (int)(flags & PSR_I_BIT); \ -}) - -#ifdef CONFIG_SMP - -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() - -#else - -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) - -#endif /* CONFIG_SMP */ - #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) /* * On the StrongARM, "swp" is terminally broken since it bypasses the @@ -321,12 +283,8 @@ do { \ * NOTE that this solution won't work on an SMP system, so explcitly * forbid it here. */ -#ifdef CONFIG_SMP -#error SMP is not supported on SA1100/SA110 -#else #define swp_is_buggy #endif -#endif static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) { @@ -335,40 +293,91 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size #ifdef swp_is_buggy unsigned long flags; #endif +#if __LINUX_ARM_ARCH__ >= 6 + unsigned int tmp; +#endif switch (size) { -#ifdef swp_is_buggy - case 1: - local_irq_save(flags); - ret = *(volatile unsigned char *)ptr; - *(volatile unsigned char *)ptr = x; - local_irq_restore(flags); - break; - - case 4: - local_irq_save(flags); - ret = *(volatile unsigned long *)ptr; - *(volatile unsigned long *)ptr = x; - local_irq_restore(flags); - break; +#if __LINUX_ARM_ARCH__ >= 6 + case 1: + asm volatile("@ __xchg1\n" + "1: ldrexb %0, [%3]\n" + " strexb %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + "1: ldrex %0, [%3]\n" + " strex %1, %2, [%3]\n" + " teq %1, #0\n" + " bne 1b" + : "=&r" (ret), "=&r" (tmp) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; +#elif defined(swp_is_buggy) +#ifdef CONFIG_SMP +#error SMP is not supported on this platform +#endif + case 1: + raw_local_irq_save(flags); + ret = *(volatile unsigned char *)ptr; + *(volatile unsigned char *)ptr = x; + raw_local_irq_restore(flags); + break; + + case 4: + raw_local_irq_save(flags); + ret = *(volatile unsigned long *)ptr; + *(volatile unsigned long *)ptr = x; + raw_local_irq_restore(flags); + break; #else - case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; - case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" - : "=&r" (ret) - : "r" (x), "r" (ptr) - : "memory", "cc"); - break; + case 1: + asm volatile("@ __xchg1\n" + " swpb %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; + case 4: + asm volatile("@ __xchg4\n" + " swp %0, %1, [%2]" + : "=&r" (ret) + : "r" (x), "r" (ptr) + : "memory", "cc"); + break; #endif - default: __bad_xchg(ptr, size), ret = 0; + default: + __bad_xchg(ptr, size), ret = 0; + break; } return ret; } +extern void disable_hlt(void); +extern void enable_hlt(void); + +#include + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ + (unsigned long)(n), sizeof(*(ptr)))) +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +#ifndef CONFIG_SMP +#include +#endif + #endif /* __ASSEMBLY__ */ #define arch_align_stack(x) (x)