+/*
+ * Copyright 2004-2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
#ifndef __ARCH_BLACKFIN_ATOMIC__
#define __ARCH_BLACKFIN_ATOMIC__
+#ifndef CONFIG_SMP
+# include <asm-generic/atomic.h>
+#else
+
#include <linux/types.h>
#include <asm/system.h> /* local_irq_XXX() */
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
- *
- * Generally we do not concern about SMP BFIN systems, so we don't have
- * to deal with that.
- *
- * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001
*/
#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
-static __inline__ void atomic_add(int i, atomic_t * v)
-{
- long flags;
+#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
- local_irq_save(flags);
- v->counter += i;
- local_irq_restore(flags);
-}
-
-static __inline__ void atomic_sub(int i, atomic_t * v)
-{
- long flags;
+asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
- local_irq_save(flags);
- v->counter -= i;
- local_irq_restore(flags);
+asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
-}
+asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
-static inline int atomic_add_return(int i, atomic_t * v)
-{
- int __temp = 0;
- long flags;
+asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
- local_irq_save(flags);
- v->counter += i;
- __temp = v->counter;
- local_irq_restore(flags);
+asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
- return __temp;
+static inline void atomic_add(int i, atomic_t *v)
+{
+ __raw_atomic_update_asm(&v->counter, i);
}
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-static inline int atomic_sub_return(int i, atomic_t * v)
+static inline void atomic_sub(int i, atomic_t *v)
{
- int __temp = 0;
- long flags;
-
- local_irq_save(flags);
- v->counter -= i;
- __temp = v->counter;
- local_irq_restore(flags);
-
- return __temp;
+ __raw_atomic_update_asm(&v->counter, -i);
}
-static __inline__ void atomic_inc(volatile atomic_t * v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
- long flags;
-
- local_irq_save(flags);
- v->counter++;
- local_irq_restore(flags);
+ return __raw_atomic_update_asm(&v->counter, i);
}
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-static __inline__ void atomic_dec(volatile atomic_t * v)
+static inline int atomic_sub_return(int i, atomic_t *v)
{
- long flags;
+ return __raw_atomic_update_asm(&v->counter, -i);
+}
- local_irq_save(flags);
- v->counter--;
- local_irq_restore(flags);
+static inline void atomic_inc(volatile atomic_t *v)
+{
+ __raw_atomic_update_asm(&v->counter, 1);
}
-static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
+static inline void atomic_dec(volatile atomic_t *v)
{
- long flags;
+ __raw_atomic_update_asm(&v->counter, -1);
+}
- local_irq_save(flags);
- v->counter &= ~mask;
- local_irq_restore(flags);
+static inline void atomic_clear_mask(int mask, atomic_t *v)
+{
+ __raw_atomic_clear_asm(&v->counter, mask);
}
-static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
+static inline void atomic_set_mask(int mask, atomic_t *v)
{
- long flags;
+ __raw_atomic_set_asm(&v->counter, mask);
+}
- local_irq_save(flags);
- v->counter |= mask;
- local_irq_restore(flags);
+static inline int atomic_test_mask(int mask, atomic_t *v)
+{
+ return __raw_atomic_test_asm(&v->counter, mask);
}
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-#include <asm-generic/atomic.h>
+#include <asm-generic/atomic-long.h>
+
+#endif
-#endif /* __ARCH_BLACKFIN_ATOMIC __ */
+#endif