#ifndef _ASM_POWERPC_SYSTEM_H
#define _ASM_POWERPC_SYSTEM_H
-#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/hw_irq.h>
-#include <asm/ppc_asm.h>
+#include <asm/atomic.h>
/*
* Memory barrier.
*
* We have to use the sync instructions for mb(), since lwsync doesn't
* order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though. Note that lwsync is interpreted as sync by
- * 32-bit and older 64-bit CPUs.
+ * rmb(), though. Note that rmb() actually uses a sync on 32-bit
+ * architectures.
*
* For wmb(), we use sync since wmb is used in drivers to order
* stores to system memory with respect to writes to the device.
* SMP since it is only used to order updates to system memory.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
-#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define rmb() __asm__ __volatile__ (__stringify(LWSYNC) : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#ifdef __KERNEL__
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */
-#ifdef __KERNEL__
+/*
+ * This is a barrier which prevents following instructions from being
+ * started until the value of the argument x is known. For example, if
+ * x is a variable loaded from memory, this prevents following
+ * instructions from being executed until the load has been performed.
+ */
+#define data_barrier(x) \
+ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
+
struct task_struct;
struct pt_regs;
DEBUGGER_BOILERPLATE(debugger_dabr_match)
DEBUGGER_BOILERPLATE(debugger_fault_handler)
-#ifdef CONFIG_XMON
-extern void xmon_init(int enable);
-#endif
-
#else
static inline int debugger(struct pt_regs *regs) { return 0; }
static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
extern void enable_kernel_altivec(void);
extern void giveup_altivec(struct task_struct *);
extern void load_up_altivec(struct task_struct *);
+extern int emulate_altivec(struct pt_regs *);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *);
extern int fix_alignment(struct pt_regs *);
-extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
-extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
+extern void cvt_df(double *from, float *to, struct thread_struct *thread);
+
+#ifndef CONFIG_SMP
+extern void discard_lazy_cpu_state(void);
+#else
+static inline void discard_lazy_cpu_state(void)
+{
+}
+#endif
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
extern u32 booke_wdt_period;
#endif /* CONFIG_BOOKE_WDT */
-/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
-extern unsigned char e2a(unsigned char);
-
struct device_node;
extern void note_scsi_host(struct device_node *, void *);
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
extern unsigned int rtas_data;
+extern int mem_init_done; /* set on boot once kmalloc can be called */
+extern unsigned long memory_limit;
+extern unsigned long klimit;
+
+extern int powersave_nap; /* set if nap mode can be used in idle loop */
/*
* Atomic exchange
unsigned long prev;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
bne- 1b"
ISYNC_ON_SMP
- : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
- : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
: "cc", "memory");
return prev;
unsigned long prev;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
bne- 1b"
ISYNC_ON_SMP
- : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
- : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
: "cc", "memory");
return prev;
unsigned int prev;
__asm__ __volatile__ (
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
ISYNC_ON_SMP
"\n\
2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
: "cc", "memory");
return prev;
#ifdef CONFIG_PPC64
static __inline__ unsigned long
-__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
+__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
{
unsigned long prev;
__asm__ __volatile__ (
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
ISYNC_ON_SMP
"\n\
2:"
- : "=&r" (prev), "=m" (*p)
- : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
: "cc", "memory");
return prev;
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
+ * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
+ * cacheline alignment of buffers.
*/
-#define NET_IP_ALIGN 0
+#define NET_IP_ALIGN 0
+#define NET_SKB_PAD L1_CACHE_BYTES
#endif
#define arch_align_stack(x) (x)
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+static inline void create_instruction(unsigned long addr, unsigned int instr)
+{
+ unsigned int *p;
+ p = (unsigned int *)addr;
+ *p = instr;
+ asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p));
+}
+
+/* Flags for create_branch:
+ * "b" == create_branch(addr, target, 0);
+ * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
+ * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
+ * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
+ */
+#define BRANCH_SET_LINK 0x1
+#define BRANCH_ABSOLUTE 0x2
+
+static inline void create_branch(unsigned long addr,
+ unsigned long target, int flags)
+{
+ unsigned int instruction;
+
+ if (! (flags & BRANCH_ABSOLUTE))
+ target = target - addr;
+
+ /* Mask out the flags and target, so they don't step on each other. */
+ instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC);
+
+ create_instruction(addr, instruction);
+}
+
+static inline void create_function_call(unsigned long addr, void * func)
+{
+ unsigned long func_addr;
+
+#ifdef CONFIG_PPC64
+ /*
+ * On PPC64 the function pointer actually points to the function's
+ * descriptor. The first entry in the descriptor is the address
+ * of the function text.
+ */
+ func_addr = *(unsigned long *)func;
+#else
+ func_addr = (unsigned long)func;
+#endif
+ create_branch(addr, func_addr, BRANCH_SET_LINK);
+}
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void account_system_vtime(struct task_struct *);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYSTEM_H */