/*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*/
-#ifndef __PPC_SYSTEM_H
-#define __PPC_SYSTEM_H
+#ifndef _ASM_POWERPC_SYSTEM_H
+#define _ASM_POWERPC_SYSTEM_H
-#include <linux/config.h>
#include <linux/kernel.h>
+#include <linux/irqflags.h>
#include <asm/hw_irq.h>
-#include <asm/ppc_asm.h>
/*
* Memory barrier.
*
* We have to use the sync instructions for mb(), since lwsync doesn't
* order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though. Note that lwsync is interpreted as sync by
- * 32-bit and older 64-bit CPUs.
+ * rmb(), though. Note that rmb() actually uses a sync on 32-bit
+ * architectures.
*
* For wmb(), we use sync since wmb is used in drivers to order
* stores to system memory with respect to writes to the device.
- * However, smp_wmb() can be a lighter-weight eieio barrier on
- * SMP since it is only used to order updates to system memory.
+ * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier
+ * on SMP since it is only used to order updates to system memory.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#ifdef __KERNEL__
+#define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
#ifdef CONFIG_SMP
+
+#ifdef __SUBARCH_HAS_LWSYNC
+# define SMPWMB lwsync
+#else
+# define SMPWMB eieio
+#endif
+
#define smp_mb() mb()
#define smp_rmb() rmb()
-#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */
-#ifdef __KERNEL__
+/*
+ * This is a barrier which prevents following instructions from being
+ * started until the value of the argument x is known. For example, if
+ * x is a variable loaded from memory, this prevents following
+ * instructions from being executed until the load has been performed.
+ */
+#define data_barrier(x) \
+ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+
struct task_struct;
struct pt_regs;
-#ifdef CONFIG_DEBUGGER
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
extern int (*__debugger)(struct pt_regs *regs);
extern int (*__debugger_ipi)(struct pt_regs *regs);
DEBUGGER_BOILERPLATE(debugger_dabr_match)
DEBUGGER_BOILERPLATE(debugger_fault_handler)
-#ifdef CONFIG_XMON
-extern void xmon_init(int enable);
-#endif
-
#else
static inline int debugger(struct pt_regs *regs) { return 0; }
static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
#endif
extern int set_dabr(unsigned long dabr);
+extern void do_dabr(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
extern void print_backtrace(unsigned long *);
extern void show_regs(struct pt_regs * regs);
extern void flush_instruction_cache(void);
#endif
extern void via_cuda_init(void);
-extern void pmac_nvram_init(void);
extern void read_rtc_time(void);
extern void pmac_find_display(void);
extern void giveup_fpu(struct task_struct *);
+extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void enable_kernel_altivec(void);
extern void giveup_altivec(struct task_struct *);
extern void load_up_altivec(struct task_struct *);
+extern int emulate_altivec(struct pt_regs *);
+extern void __giveup_vsx(struct task_struct *);
+extern void giveup_vsx(struct task_struct *);
+extern void enable_kernel_spe(void);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *);
extern int fix_alignment(struct pt_regs *);
-extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
-extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
+extern void cvt_df(double *from, float *to, struct thread_struct *thread);
+
+#ifndef CONFIG_SMP
+extern void discard_lazy_cpu_state(void);
+#else
+static inline void discard_lazy_cpu_state(void)
+{
+}
+#endif
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
}
#endif
+#ifdef CONFIG_VSX
+extern void flush_vsx_to_thread(struct task_struct *);
+#else
+static inline void flush_vsx_to_thread(struct task_struct *t)
+{
+}
+#endif
+
#ifdef CONFIG_SPE
extern void flush_spe_to_thread(struct task_struct *);
#else
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
extern int die(const char *, struct pt_regs *, long);
extern void _exception(int, struct pt_regs *, int, unsigned long);
+extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
+
#ifdef CONFIG_BOOKE_WDT
extern u32 booke_wdt_enabled;
extern u32 booke_wdt_period;
#endif /* CONFIG_BOOKE_WDT */
-/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
-extern unsigned char e2a(unsigned char);
-
struct device_node;
extern void note_scsi_host(struct device_node *, void *);
struct thread_struct *next);
extern unsigned int rtas_data;
+extern int mem_init_done; /* set on boot once kmalloc can be called */
+extern int init_bootmem_done; /* set on !NUMA once bootmem is available */
+extern unsigned long memory_limit;
+extern unsigned long klimit;
+
+extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
+extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
+
+extern int powersave_nap; /* set if nap mode can be used in idle loop */
/*
* Atomic exchange
* Changes the memory location '*ptr' to be val and returns
* the previous value stored there.
*/
-static __inline__ unsigned long
+static __always_inline unsigned long
__xchg_u32(volatile void *p, unsigned long val)
{
unsigned long prev;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
bne- 1b"
ISYNC_ON_SMP
- : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
- : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __always_inline unsigned long
+__xchg_u32_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
: "cc", "memory");
return prev;
}
#ifdef CONFIG_PPC64
-static __inline__ unsigned long
+static __always_inline unsigned long
__xchg_u64(volatile void *p, unsigned long val)
{
unsigned long prev;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
bne- 1b"
ISYNC_ON_SMP
- : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
- : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__xchg_u64_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stdcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
: "cc", "memory");
return prev;
*/
extern void __xchg_called_with_bad_pointer(void);
-static __inline__ unsigned long
+static __always_inline unsigned long
__xchg(volatile void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
return x;
}
+static __always_inline unsigned long
+__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32_local(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64_local(ptr, x);
+#endif
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
#define xchg(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
})
-#define tas(ptr) (xchg((ptr),1))
+#define xchg_local(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), \
+ (unsigned long)_x_, sizeof(*(ptr))); \
+ })
/*
* Compare and exchange - if *p == old, set it to new,
*/
#define __HAVE_ARCH_CMPXCHG 1
-static __inline__ unsigned long
+static __always_inline unsigned long
__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
{
unsigned int prev;
__asm__ __volatile__ (
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
ISYNC_ON_SMP
"\n\
2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
: "cc", "memory");
return prev;
}
#ifdef CONFIG_PPC64
-static __inline__ unsigned long
-__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
+static __always_inline unsigned long
+__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
{
unsigned long prev;
__asm__ __volatile__ (
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
ISYNC_ON_SMP
"\n\
2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
: "cc", "memory");
return prev;
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
-static __inline__ unsigned long
+static __always_inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
return old;
}
-#define cmpxchg(ptr,o,n) \
+static __always_inline unsigned long
+__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_local(ptr, old, new);
+#endif
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(unsigned long)_n_, sizeof(*(ptr))); \
})
+
+#define cmpxchg_local(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
#ifdef CONFIG_PPC64
/*
* We handle most unaligned accesses in hardware. On the other hand
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
+ * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
+ * cacheline alignment of buffers.
*/
-#define NET_IP_ALIGN 0
+#define NET_IP_ALIGN 0
+#define NET_SKB_PAD L1_CACHE_BYTES
+
+#define cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+ })
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
#define arch_align_stack(x) (x)
+/* Used in very early kernel initialization. */
+extern unsigned long reloc_offset(void);
+extern unsigned long add_reloc_offset(unsigned long);
+extern void reloc_got2(unsigned long);
+
+#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
+extern struct dentry *powerpc_debugfs_root;
+
#endif /* __KERNEL__ */
-#endif /* __PPC_SYSTEM_H */
+#endif /* _ASM_POWERPC_SYSTEM_H */