#include <linux/compiler.h>
#include <linux/types.h>
-#define xchg(p, v) \
- __atomic_exchange_n(p, v, __ATOMIC_SEQ_CST)
+typedef struct {
+ int counter;
+} atomic_t;
+
+typedef struct {
+ long counter;
+} atomic_long_t;
+
+typedef struct {
+ u64 counter;
+} atomic64_t;
+
+#ifndef C11_ATOMICS
+
+#include <urcu/uatomic.h>
+
+#if (CAA_BITS_PER_LONG != 64)
+#define ATOMIC64_SPINLOCK
+#endif
+
+#define __ATOMIC_READ(p) uatomic_read(p)
+#define __ATOMIC_SET(p, v) uatomic_set(p, v)
+#define __ATOMIC_ADD_RETURN(v, p) uatomic_add_return(p, v)
+#define __ATOMIC_SUB_RETURN(v, p) uatomic_sub_return(p, v)
+#define __ATOMIC_ADD(v, p) uatomic_add(p, v)
+#define __ATOMIC_SUB(v, p) uatomic_sub(p, v)
+#define __ATOMIC_INC(p) uatomic_inc(p)
+#define __ATOMIC_DEC(p) uatomic_dec(p)
+
+#define xchg(p, v) uatomic_xchg(p, v)
+#define xchg_acquire(p, v) uatomic_xchg(p, v)
+#define cmpxchg(p, old, new) uatomic_cmpxchg(p, old, new)
+#define cmpxchg_acquire(p, old, new) uatomic_cmpxchg(p, old, new)
-#define xchg_acquire(p, v) \
- __atomic_exchange_n(p, v, __ATOMIC_ACQUIRE)
+#define smp_mb__before_atomic() cmm_smp_mb__before_uatomic_add()
+#define smp_mb__after_atomic() cmm_smp_mb__after_uatomic_add()
+#define smp_wmb() cmm_smp_wmb()
+#define smp_rmb() cmm_smp_rmb()
+#define smp_mb() cmm_smp_mb()
+#define smp_read_barrier_depends() cmm_smp_read_barrier_depends()
+
+#else /* C11_ATOMICS */
+
+#define __ATOMIC_READ(p) __atomic_load_n(p, __ATOMIC_RELAXED)
+#define __ATOMIC_SET(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
+#define __ATOMIC_ADD_RETURN(v, p) __atomic_add_fetch(p, v, __ATOMIC_RELAXED)
+#define __ATOMIC_ADD_RETURN_RELEASE(v, p) \
+ __atomic_add_fetch(p, v, __ATOMIC_RELEASE)
+#define __ATOMIC_SUB_RETURN(v, p) __atomic_sub_fetch(p, v, __ATOMIC_RELAXED)
+
+#define xchg(p, v) __atomic_exchange_n(p, v, __ATOMIC_SEQ_CST)
+#define xchg_acquire(p, v) __atomic_exchange_n(p, v, __ATOMIC_ACQUIRE)
#define cmpxchg(p, old, new) \
({ \
#define smp_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
#define smp_read_barrier_depends()
+#endif
+
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define smp_load_acquire(p) \
WRITE_ONCE(*p, v); \
} while (0)
-typedef struct {
- int counter;
-} atomic_t;
-
-static inline int atomic_read(const atomic_t *v)
-{
- return __atomic_load_n(&v->counter, __ATOMIC_RELAXED);
-}
-
-static inline void atomic_set(atomic_t *v, int i)
-{
- __atomic_store_n(&v->counter, i, __ATOMIC_RELAXED);
+/* atomic interface: */
+
+#ifndef __ATOMIC_ADD
+#define __ATOMIC_ADD(i, v) __ATOMIC_ADD_RETURN(i, v)
+#endif
+
+#ifndef __ATOMIC_ADD_RETURN_RELEASE
+#define __ATOMIC_ADD_RETURN_RELEASE(i, v) \
+ ({ smp_mb__before_atomic(); __ATOMIC_ADD_RETURN(i, v); })
+#endif
+
+#ifndef __ATOMIC_SUB
+#define __ATOMIC_SUB(i, v) __ATOMIC_SUB_RETURN(i, v)
+#endif
+
+#ifndef __ATOMIC_INC_RETURN
+#define __ATOMIC_INC_RETURN(v) __ATOMIC_ADD_RETURN(1, v)
+#endif
+
+#ifndef __ATOMIC_DEC_RETURN
+#define __ATOMIC_DEC_RETURN(v) __ATOMIC_SUB_RETURN(1, v)
+#endif
+
+#ifndef __ATOMIC_INC
+#define __ATOMIC_INC(v) __ATOMIC_ADD(1, v)
+#endif
+
+#ifndef __ATOMIC_DEC
+#define __ATOMIC_DEC(v) __ATOMIC_SUB(1, v)
+#endif
+
+#define DEF_ATOMIC_OPS(a_type, i_type) \
+static inline i_type a_type##_read(const a_type##_t *v) \
+{ \
+ return __ATOMIC_READ(&v->counter); \
+} \
+ \
+static inline void a_type##_set(a_type##_t *v, i_type i) \
+{ \
+ return __ATOMIC_SET(&v->counter, i); \
+} \
+ \
+static inline i_type a_type##_add_return(i_type i, a_type##_t *v) \
+{ \
+ return __ATOMIC_ADD_RETURN(i, &v->counter); \
+} \
+ \
+static inline i_type a_type##_add_return_release(i_type i, a_type##_t *v)\
+{ \
+ return __ATOMIC_ADD_RETURN_RELEASE(i, &v->counter); \
+} \
+ \
+static inline i_type a_type##_sub_return(i_type i, a_type##_t *v) \
+{ \
+ return __ATOMIC_SUB_RETURN(i, &v->counter); \
+} \
+ \
+static inline void a_type##_add(i_type i, a_type##_t *v) \
+{ \
+ __ATOMIC_ADD(i, &v->counter); \
+} \
+ \
+static inline void a_type##_sub(i_type i, a_type##_t *v) \
+{ \
+ __ATOMIC_SUB(i, &v->counter); \
+} \
+ \
+static inline i_type a_type##_inc_return(a_type##_t *v) \
+{ \
+ return __ATOMIC_INC_RETURN(&v->counter); \
+} \
+ \
+static inline i_type a_type##_dec_return(a_type##_t *v) \
+{ \
+ return __ATOMIC_DEC_RETURN(&v->counter); \
+} \
+ \
+static inline void a_type##_inc(a_type##_t *v) \
+{ \
+ __ATOMIC_INC(&v->counter); \
+} \
+ \
+static inline void a_type##_dec(a_type##_t *v) \
+{ \
+ __ATOMIC_DEC(&v->counter); \
+} \
+ \
+static inline bool a_type##_add_negative(i_type i, a_type##_t *v) \
+{ \
+ return __ATOMIC_ADD_RETURN(i, &v->counter) < 0; \
+} \
+ \
+static inline bool a_type##_sub_and_test(i_type i, a_type##_t *v) \
+{ \
+ return __ATOMIC_SUB_RETURN(i, &v->counter) == 0; \
+} \
+ \
+static inline bool a_type##_inc_and_test(a_type##_t *v) \
+{ \
+ return __ATOMIC_INC_RETURN(&v->counter) == 0; \
+} \
+ \
+static inline bool a_type##_dec_and_test(a_type##_t *v) \
+{ \
+ return __ATOMIC_DEC_RETURN(&v->counter) == 0; \
+} \
+ \
+static inline i_type a_type##_add_unless(a_type##_t *v, i_type a, i_type u)\
+{ \
+ i_type old, c = __ATOMIC_READ(&v->counter); \
+ while (c != u && (old = cmpxchg(&v->counter, c, c + a)) != c) \
+ c = old; \
+ return c; \
+} \
+ \
+static inline bool a_type##_inc_not_zero(a_type##_t *v) \
+{ \
+ return a_type##_add_unless(v, 1, 0); \
+} \
+ \
+static inline i_type a_type##_xchg(a_type##_t *v, i_type i) \
+{ \
+ return xchg(&v->counter, i); \
+} \
+ \
+static inline i_type a_type##_cmpxchg(a_type##_t *v, i_type old, i_type new)\
+{ \
+ return cmpxchg(&v->counter, old, new); \
+} \
+ \
+static inline i_type a_type##_cmpxchg_acquire(a_type##_t *v, i_type old, i_type new)\
+{ \
+ return cmpxchg_acquire(&v->counter, old, new); \
}
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
+DEF_ATOMIC_OPS(atomic, int)
+DEF_ATOMIC_OPS(atomic_long, long)
+
+#ifndef ATOMIC64_SPINLOCK
+DEF_ATOMIC_OPS(atomic64, s64)
+#else
+s64 atomic64_read(const atomic64_t *v);
+void atomic64_set(atomic64_t *v, s64);
+
+s64 atomic64_add_return(s64, atomic64_t *);
+s64 atomic64_sub_return(s64, atomic64_t *);
+void atomic64_add(s64, atomic64_t *);
+void atomic64_sub(s64, atomic64_t *);
+
+s64 atomic64_xchg(atomic64_t *, s64);
+s64 atomic64_cmpxchg(atomic64_t *, s64, s64);
+
+#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+#define atomic64_inc(v) atomic64_add(1LL, (v))
+#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
+#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec(v) atomic64_sub(1LL, (v))
+#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- return __atomic_sub_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
-
-static inline void atomic_inc(atomic_t *v)
-{
- atomic_add(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
- atomic_sub(1, v);
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
- c = old;
- return c;
-}
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-typedef struct {
- long counter;
-} atomic_long_t;
-
-static inline long atomic_long_read(const atomic_long_t *v)
-{
- return __atomic_load_n(&v->counter, __ATOMIC_RELAXED);
-}
-
-static inline void atomic_long_set(atomic_long_t *v, long i)
-{
- __atomic_store_n(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *v)
-{
- return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return __atomic_sub_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline void atomic_long_add(long i, atomic_long_t *v)
-{
- atomic_long_add_return(i, v);
-}
-
-static inline void atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic_long_sub_return(i, v);
-}
-
-static inline void atomic_long_inc(atomic_long_t *v)
-{
- atomic_long_add(1, v);
-}
-
-static inline void atomic_long_dec(atomic_long_t *v)
-{
- atomic_long_sub(1, v);
-}
-
-static inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return cmpxchg(&v->counter, old, new);
-}
-
-static inline bool atomic_long_inc_not_zero(atomic_long_t *i)
-{
- long old, v = atomic_long_read(i);
-
- do {
- if (!(old = v))
- return false;
- } while ((v = atomic_long_cmpxchg(i, old, old + 1)) != old);
-
- return true;
-}
-
-#define atomic_long_sub_and_test(i, v) (atomic_long_sub_return((i), (v)) == 0)
-
-typedef struct {
- u64 counter;
-} atomic64_t;
-
-static inline s64 atomic64_read(const atomic64_t *v)
-{
- return __atomic_load_n(&v->counter, __ATOMIC_RELAXED);
-}
-
-static inline void atomic64_set(atomic64_t *v, s64 i)
-{
- __atomic_store_n(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
-{
- return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
-{
- return __atomic_sub_fetch(&v->counter, i, __ATOMIC_RELAXED);
-}
-
-static inline void atomic64_add(s64 i, atomic64_t *v)
-{
- atomic64_add_return(i, v);
-}
-
-static inline void atomic64_sub(s64 i, atomic64_t *v)
-{
- atomic64_sub_return(i, v);
-}
-
-static inline void atomic64_inc(atomic64_t *v)
-{
- atomic64_add(1, v);
-}
-
-static inline void atomic64_dec(atomic64_t *v)
-{
- atomic64_sub(1, v);
-}
-
-#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1, (v))
-
-static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+static inline s64 atomic64_add_return_release(s64 i, atomic64_t *v)
{
- return cmpxchg(&v->counter, old, new);
+ smp_mb__before_atomic();
+ return atomic64_add_return(i, v);
}
static inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
- return cmpxchg_acquire(&v->counter, old, new);
+ return atomic64_cmpxchg(v, old, new);
}
-static inline s64 atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELEASE);
-}
+#endif
#endif /* __TOOLS_LINUX_ATOMIC_H */
--- /dev/null
+/*
+ * Generic implementation of 64-bit atomics using spinlocks,
+ * useful on processors that don't have 64-bit atomic instructions.
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+
+#ifdef ATOMIC64_SPINLOCK
+
+/*
+ * We use a hashed array of spinlocks to provide exclusive access
+ * to each atomic64_t variable. Since this is expected to used on
+ * systems with small numbers of CPUs (<= 4 or so), we use a
+ * relatively small array of 16 spinlocks to avoid wasting too much
+ * memory on the spinlock array.
+ */
+#define NR_LOCKS 16
+
+/*
+ * Ensure each lock is in a separate cacheline.
+ */
+static union {
+ raw_spinlock_t lock;
+ char pad[L1_CACHE_BYTES];
+} atomic64_lock[NR_LOCKS] ____cacheline_aligned_in_smp = {
+ [0 ... (NR_LOCKS - 1)] = {
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ },
+};
+
+static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+{
+ unsigned long addr = (unsigned long) v;
+
+ addr >>= L1_CACHE_SHIFT;
+ addr ^= (addr >> 8) ^ (addr >> 16);
+ return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
+}
+
+long long atomic64_read(const atomic64_t *v)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+void atomic64_set(atomic64_t *v, long long i)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+
+ raw_spin_lock_irqsave(lock, flags);
+ v->counter = i;
+ raw_spin_unlock_irqrestore(lock, flags);
+}
+
+#define ATOMIC64_OP(op, c_op) \
+void atomic64_##op(long long a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ v->counter c_op a; \
+ raw_spin_unlock_irqrestore(lock, flags); \
+}
+
+#define ATOMIC64_OP_RETURN(op, c_op) \
+long long atomic64_##op##_return(long long a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ long long val; \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ val = (v->counter c_op a); \
+ raw_spin_unlock_irqrestore(lock, flags); \
+ return val; \
+}
+
+#define ATOMIC64_FETCH_OP(op, c_op) \
+long long atomic64_fetch_##op(long long a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ long long val; \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ val = v->counter; \
+ v->counter c_op a; \
+ raw_spin_unlock_irqrestore(lock, flags); \
+ return val; \
+}
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_OP_RETURN(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(add, +=)
+ATOMIC64_OPS(sub, -=)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_OP_RETURN(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &=)
+ATOMIC64_OPS(or, |=)
+ATOMIC64_OPS(xor, ^=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter - 1;
+ if (val >= 0)
+ v->counter = val;
+ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ if (val == o)
+ v->counter = n;
+ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+long long atomic64_xchg(atomic64_t *v, long long new)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ long long val;
+
+ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ v->counter = new;
+ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+
+int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ int ret = 0;
+
+ raw_spin_lock_irqsave(lock, flags);
+ if (v->counter != u) {
+ v->counter += a;
+ ret = 1;
+ }
+ raw_spin_unlock_irqrestore(lock, flags);
+ return ret;
+}
+
+#endif