Blackfin arch: defines and provides entry points for certain user space functions...
authorBernd Schmidt <bernd.schmidt@analog.com>
Thu, 21 Jun 2007 03:34:16 +0000 (11:34 +0800)
committerBryan Wu <bryan.wu@analog.com>
Thu, 21 Jun 2007 03:34:16 +0000 (11:34 +0800)
This patch defines (and provides) entry points for certain user space functions
at fixed addresses.  The Blackfin has no usable atomic instructions, but we can
ensure that these code sequences appear atomic from a user space point of view
by detecting when we're in the process of executing them during the interrupt
handler return path.  This allows much more efficient pthread lock
implementations than the bfin_spinlock syscall we're currently using.

Also provided is a small sys_rt_sigreturn stub which can be used by the signal
handler setup code.  The signal.c part will be committed separately.

Signed-off-by: Bernd Schmidt <bernd.schmidt@analog.com>
Signed-off-by: Bryan Wu <bryan.wu@analog.com>
arch/blackfin/kernel/Makefile
arch/blackfin/kernel/fixed_code.S [new file with mode: 0644]
arch/blackfin/kernel/process.c
arch/blackfin/kernel/setup.c
arch/blackfin/mach-common/entry.S
include/asm-blackfin/Kbuild
include/asm-blackfin/bfin-global.h
include/asm-blackfin/cplbinit.h
include/asm-blackfin/fixed_code.h [new file with mode: 0644]

index 93d2140..b7b6de8 100644 (file)
@@ -6,7 +6,8 @@ extra-y := init_task.o vmlinux.lds
 
 obj-y := \
        entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
-       sys_bfin.o time.o traps.o irqchip.o dma-mapping.o flat.o
+       sys_bfin.o time.o traps.o irqchip.o dma-mapping.o flat.o \
+       fixed_code.o
 
 obj-$(CONFIG_BF53x)                 += bfin_gpio.o
 obj-$(CONFIG_BF561)                 += bfin_gpio.o
diff --git a/arch/blackfin/kernel/fixed_code.S b/arch/blackfin/kernel/fixed_code.S
new file mode 100644 (file)
index 0000000..99ea296
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * This file contains sequences of code that will be copied to a
+ * fixed location, defined in <asm/atomic_seq.h>.  The interrupt
+ * handlers ensure that these sequences appear to be atomic when
+ * executed from userspace.
+ * These are aligned to 16 bytes, so that we have some space to replace
+ * these sequences with something else (e.g. kernel traps if we ever do
+ * BF561 SMP).
+ */
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/unistd.h>
+
+.text
+ENTRY(_fixed_code_start)
+
+.align 16
+ENTRY(_sigreturn_stub)
+       P0 = __NR_rt_sigreturn;
+       EXCPT 0;
+       /* Speculative execution paranoia.  */
+0:     JUMP.S 0b;
+ENDPROC (_sigreturn_stub)
+
+.align 16
+       /*
+        * Atomic swap, 8 bit.
+        * Inputs:      P0: memory address to use
+        *              R1: value to store
+        * Output:      R0: old contents of the memory address, zero extended.
+        */
+ENTRY(_atomic_xchg32)
+       R0 = [P0];
+       [P0] = R1;
+       rts;
+ENDPROC (_atomic_xchg32)
+
+.align 16
+       /*
+        * Compare and swap, 32 bit.
+        * Inputs:      P0: memory address to use
+        *              R1: compare value
+        *              R2: new value to store
+        * The new value is stored if the contents of the memory
+        * address is equal to the compare value.
+        * Output:      R0: old contents of the memory address.
+        */
+ENTRY(_atomic_cas32)
+       R0 = [P0];
+       CC = R0 == R1;
+       IF !CC JUMP 1f;
+       [P0] = R2;
+1:
+       rts;
+ENDPROC (_atomic_cas32)
+
+.align 16
+       /*
+        * Atomic add, 32 bit.
+        * Inputs:      P0: memory address to use
+        *              R0: value to add
+        * Outputs:     R0: new contents of the memory address.
+        *              R1: previous contents of the memory address.
+        */
+ENTRY(_atomic_add32)
+       R1 = [P0];
+       R0 = R1 + R0;
+       [P0] = R0;
+       rts;
+ENDPROC (_atomic_add32)
+
+.align 16
+       /*
+        * Atomic sub, 32 bit.
+        * Inputs:      P0: memory address to use
+        *              R0: value to subtract
+        * Outputs:     R0: new contents of the memory address.
+        *              R1: previous contents of the memory address.
+        */
+ENTRY(_atomic_sub32)
+       R1 = [P0];
+       R0 = R1 - R0;
+       [P0] = R0;
+       rts;
+ENDPROC (_atomic_sub32)
+
+.align 16
+       /*
+        * Atomic ior, 32 bit.
+        * Inputs:      P0: memory address to use
+        *              R0: value to ior
+        * Outputs:     R0: new contents of the memory address.
+        *              R1: previous contents of the memory address.
+        */
+ENTRY(_atomic_ior32)
+       R1 = [P0];
+       R0 = R1 | R0;
+       [P0] = R0;
+       rts;
+ENDPROC (_atomic_ior32)
+
+.align 16
+       /*
+        * Atomic ior, 32 bit.
+        * Inputs:      P0: memory address to use
+        *              R0: value to ior
+        * Outputs:     R0: new contents of the memory address.
+        *              R1: previous contents of the memory address.
+        */
+ENTRY(_atomic_and32)
+       R1 = [P0];
+       R0 = R1 & R0;
+       [P0] = R0;
+       rts;
+ENDPROC (_atomic_ior32)
+
+.align 16
+       /*
+        * Atomic ior, 32 bit.
+        * Inputs:      P0: memory address to use
+        *              R0: value to ior
+        * Outputs:     R0: new contents of the memory address.
+        *              R1: previous contents of the memory address.
+        */
+ENTRY(_atomic_xor32)
+       R1 = [P0];
+       R0 = R1 ^ R0;
+       [P0] = R0;
+       rts;
+ENDPROC (_atomic_ior32)
+
+ENTRY(_fixed_code_end)
index 3eff743..6b7a94a 100644 (file)
@@ -35,6 +35,7 @@
 
 #include <asm/blackfin.h>
 #include <asm/uaccess.h>
+#include <asm/fixed_code.h>
 
 #define        LED_ON  0
 #define        LED_OFF 1
@@ -350,6 +351,70 @@ unsigned long get_wchan(struct task_struct *p)
        return 0;
 }
 
+void finish_atomic_sections (struct pt_regs *regs)
+{
+       if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END)
+               return;
+
+       switch (regs->pc) {
+       case ATOMIC_XCHG32 + 2:
+               put_user(regs->r1, (int *)regs->p0);
+               regs->pc += 2;
+               break;
+
+       case ATOMIC_CAS32 + 2:
+       case ATOMIC_CAS32 + 4:
+               if (regs->r0 == regs->r1)
+                       put_user(regs->r2, (int *)regs->p0);
+               regs->pc = ATOMIC_CAS32 + 8;
+               break;
+       case ATOMIC_CAS32 + 6:
+               put_user(regs->r2, (int *)regs->p0);
+               regs->pc += 2;
+               break;
+
+       case ATOMIC_ADD32 + 2:
+               regs->r0 = regs->r1 + regs->r0;
+               /* fall through */
+       case ATOMIC_ADD32 + 4:
+               put_user(regs->r0, (int *)regs->p0);
+               regs->pc = ATOMIC_ADD32 + 6;
+               break;
+
+       case ATOMIC_SUB32 + 2:
+               regs->r0 = regs->r1 - regs->r0;
+               /* fall through */
+       case ATOMIC_SUB32 + 4:
+               put_user(regs->r0, (int *)regs->p0);
+               regs->pc = ATOMIC_SUB32 + 6;
+               break;
+
+       case ATOMIC_IOR32 + 2:
+               regs->r0 = regs->r1 | regs->r0;
+               /* fall through */
+       case ATOMIC_IOR32 + 4:
+               put_user(regs->r0, (int *)regs->p0);
+               regs->pc = ATOMIC_IOR32 + 6;
+               break;
+
+       case ATOMIC_AND32 + 2:
+               regs->r0 = regs->r1 & regs->r0;
+               /* fall through */
+       case ATOMIC_AND32 + 4:
+               put_user(regs->r0, (int *)regs->p0);
+               regs->pc = ATOMIC_AND32 + 6;
+               break;
+
+       case ATOMIC_XOR32 + 2:
+               regs->r0 = regs->r1 ^ regs->r0;
+               /* fall through */
+       case ATOMIC_XOR32 + 4:
+               put_user(regs->r0, (int *)regs->p0);
+               regs->pc = ATOMIC_XOR32 + 6;
+               break;
+       }
+}
+
 #if defined(CONFIG_ACCESS_CHECK)
 int _access_ok(unsigned long addr, unsigned long size)
 {
index 76bf2ce..534227f 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/cacheflush.h>
 #include <asm/blackfin.h>
 #include <asm/cplbinit.h>
+#include <asm/fixed_code.h>
 
 u16 _bfin_swrst;
 
@@ -404,6 +405,27 @@ void __init setup_arch(char **cmdline_p)
 
        printk(KERN_INFO "Hardware Trace Enabled\n");
        bfin_write_TBUFCTL(0x03);
+
+       /* Copy atomic sequences to their fixed location, and sanity check that
+          these locations are the ones that we advertise to userspace.  */
+       memcpy((void *)FIXED_CODE_START, &fixed_code_start,
+              FIXED_CODE_END - FIXED_CODE_START);
+       BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
+              != SIGRETURN_STUB - FIXED_CODE_START);
+       BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
+              != ATOMIC_XCHG32 - FIXED_CODE_START);
+       BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
+              != ATOMIC_CAS32 - FIXED_CODE_START);
+       BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
+              != ATOMIC_ADD32 - FIXED_CODE_START);
+       BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
+              != ATOMIC_SUB32 - FIXED_CODE_START);
+       BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
+              != ATOMIC_IOR32 - FIXED_CODE_START);
+       BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
+              != ATOMIC_AND32 - FIXED_CODE_START);
+       BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
+              != ATOMIC_XOR32 - FIXED_CODE_START);
 }
 
 static int __init topology_init(void)
index 40045b1..c4a32ea 100644 (file)
@@ -741,6 +741,10 @@ _schedule_and_signal_from_int:
        r0 = [p0];
        sti r0;
 
+       r0 = sp;
+       sp += -12;
+       call _finish_atomic_sections;
+       sp += 12;
        jump.s .Lresume_userspace;
 
 _schedule_and_signal:
index c68e168..71f8fe7 100644 (file)
@@ -1 +1,3 @@
 include include/asm-generic/Kbuild.asm
+
+header-y += fixed_code.h
index 57f37cc..c4d6cbb 100644 (file)
@@ -67,6 +67,18 @@ extern void evt14_softirq(void);
 extern asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
 extern void bfin_gpio_interrupt_setup(int irq, int irq_pfx, int type);
 
+extern asmlinkage void finish_atomic_sections (struct pt_regs *regs);
+extern char fixed_code_start;
+extern char fixed_code_end;
+extern int atomic_xchg32(void);
+extern int atomic_cas32(void);
+extern int atomic_add32(void);
+extern int atomic_sub32(void);
+extern int atomic_ior32(void);
+extern int atomic_and32(void);
+extern int atomic_xor32(void);
+extern void sigreturn_stub(void);
+
 extern void *l1_data_A_sram_alloc(size_t);
 extern void *l1_data_B_sram_alloc(size_t);
 extern void *l1_inst_sram_alloc(size_t);
index 3bad2d1..e14ea39 100644 (file)
@@ -101,8 +101,8 @@ struct s_cplb {
 static struct cplb_desc cplb_data[] = {
        {
                .start = 0,
-               .end = SIZE_4K,
-               .psize = SIZE_4K,
+               .end = SIZE_1K,
+               .psize = SIZE_1K,
                .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
                .i_conf = SDRAM_OOPS,
                .d_conf = SDRAM_OOPS,
diff --git a/include/asm-blackfin/fixed_code.h b/include/asm-blackfin/fixed_code.h
new file mode 100644 (file)
index 0000000..e6df84e
--- /dev/null
@@ -0,0 +1,20 @@
+/* This file defines the fixed addresses where userspace programs can find
+   atomic code sequences.  */
+
+#define FIXED_CODE_START       0x400
+
+#define SIGRETURN_STUB         0x400
+
+#define ATOMIC_SEQS_START      0x410
+
+#define ATOMIC_XCHG32          0x410
+#define ATOMIC_CAS32           0x420
+#define ATOMIC_ADD32           0x430
+#define ATOMIC_SUB32           0x440
+#define ATOMIC_IOR32           0x450
+#define ATOMIC_AND32           0x460
+#define ATOMIC_XOR32           0x470
+
+#define ATOMIC_SEQS_END                0x480
+
+#define FIXED_CODE_END         0x480