#define __ATOMIC_SUB(v, p) uatomic_sub(p, v)
#define __ATOMIC_INC(p) uatomic_inc(p)
#define __ATOMIC_DEC(p) uatomic_dec(p)
+#define __ATOMIC_AND(v, p) uatomic_and(p, v)
+#define __ATOMIC_OR(v, p) uatomic_or(p, v)
#define xchg(p, v) uatomic_xchg(p, v)
#define xchg_acquire(p, v) uatomic_xchg(p, v)
#define smp_rmb() cmm_smp_rmb()
#define smp_mb() cmm_smp_mb()
#define smp_read_barrier_depends() cmm_smp_read_barrier_depends()
+#define smp_acquire__after_ctrl_dep() cmm_smp_mb()
#else /* C11_ATOMICS */
#define __ATOMIC_ADD_RETURN_RELEASE(v, p) \
__atomic_add_fetch(p, v, __ATOMIC_RELEASE)
#define __ATOMIC_SUB_RETURN(v, p) __atomic_sub_fetch(p, v, __ATOMIC_RELAXED)
+#define __ATOMIC_SUB_RETURN_RELEASE(v, p) \
+ __atomic_sub_fetch(p, v, __ATOMIC_RELEASE)
+#define __ATOMIC_AND(p) __atomic_and_fetch(p, v, __ATOMIC_RELAXED)
+#define __ATOMIC_OR(p) __atomic_or_fetch(p, v, __ATOMIC_RELAXED)
#define xchg(p, v) __atomic_exchange_n(p, v, __ATOMIC_SEQ_CST)
#define xchg_acquire(p, v) __atomic_exchange_n(p, v, __ATOMIC_ACQUIRE)
({ smp_mb__before_atomic(); __ATOMIC_ADD_RETURN(i, v); })
#endif
+#ifndef __ATOMIC_SUB_RETURN_RELEASE
+#define __ATOMIC_SUB_RETURN_RELEASE(i, v) \
+ ({ smp_mb__before_atomic(); __ATOMIC_SUB_RETURN(i, v); })
+#endif
+
#ifndef __ATOMIC_SUB
#define __ATOMIC_SUB(i, v) __ATOMIC_SUB_RETURN(i, v)
#endif
return __ATOMIC_READ(&v->counter); \
} \
\
+static inline i_type a_type##_read_acquire(const a_type##_t *v) \
+{ \
+ i_type ret = __ATOMIC_READ(&v->counter); \
+ smp_mb__after_atomic(); \
+ return ret; \
+} \
+ \
static inline void a_type##_set(a_type##_t *v, i_type i) \
{ \
return __ATOMIC_SET(&v->counter, i); \
return __ATOMIC_ADD_RETURN_RELEASE(i, &v->counter); \
} \
\
+static inline i_type a_type##_sub_return_release(i_type i, a_type##_t *v)\
+{ \
+ return __ATOMIC_SUB_RETURN_RELEASE(i, &v->counter); \
+} \
+ \
static inline i_type a_type##_sub_return(i_type i, a_type##_t *v) \
{ \
return __ATOMIC_SUB_RETURN(i, &v->counter); \
return __ATOMIC_DEC_RETURN(&v->counter); \
} \
\
+static inline i_type a_type##_dec_return_release(a_type##_t *v) \
+{ \
+ return __ATOMIC_SUB_RETURN_RELEASE(1, &v->counter); \
+} \
+ \
static inline void a_type##_inc(a_type##_t *v) \
{ \
__ATOMIC_INC(&v->counter); \
return a_type##_add_unless(v, 1, 0); \
} \
\
+static inline void a_type##_and(i_type a, a_type##_t *v) \
+{ \
+ __ATOMIC_AND(a, v); \
+} \
+ \
+static inline void a_type##_or(i_type a, a_type##_t *v) \
+{ \
+ __ATOMIC_OR(a, v); \
+} \
+ \
static inline i_type a_type##_xchg(a_type##_t *v, i_type i) \
{ \
return xchg(&v->counter, i); \
static inline i_type a_type##_cmpxchg_acquire(a_type##_t *v, i_type old, i_type new)\
{ \
return cmpxchg_acquire(&v->counter, old, new); \
+} \
+ \
+static inline bool a_type##_try_cmpxchg_acquire(a_type##_t *v, i_type *old, i_type new)\
+{ \
+ i_type prev = *old; \
+ *old = cmpxchg_acquire(&v->counter, *old, new); \
+ return prev == *old; \
}
DEF_ATOMIC_OPS(atomic, int)
DEF_ATOMIC_OPS(atomic64, s64)
#else
s64 atomic64_read(const atomic64_t *v);
+static inline s64 atomic64_read_acquire(const atomic64_t *v)
+{
+ s64 ret = atomic64_read(v);
+ smp_mb__after_atomic();
+ return ret;
+}
+
void atomic64_set(atomic64_t *v, s64);
s64 atomic64_add_return(s64, atomic64_t *);
return atomic64_cmpxchg(v, old, new);
}
+static inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ smp_mb__before_atomic();
+ return atomic64_sub_return(i, v);
+}
+
#endif
#endif /* __TOOLS_LINUX_ATOMIC_H */