2 * Generic implementation of 64-bit atomics using spinlocks,
3 * useful on processors that don't have 64-bit atomic instructions.
5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 #include <linux/types.h>
13 #include <linux/cache.h>
14 #include <linux/spinlock.h>
15 #include <linux/atomic.h>
17 #ifdef ATOMIC64_SPINLOCK
20 * We use a hashed array of spinlocks to provide exclusive access
21 * to each atomic64_t variable. Since this is expected to used on
22 * systems with small numbers of CPUs (<= 4 or so), we use a
23 * relatively small array of 16 spinlocks to avoid wasting too much
24 * memory on the spinlock array.
29 * Ensure each lock is in a separate cacheline.
33 char pad[L1_CACHE_BYTES];
34 } atomic64_lock[NR_LOCKS] ____cacheline_aligned_in_smp = {
35 [0 ... (NR_LOCKS - 1)] = {
36 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
40 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
42 unsigned long addr = (unsigned long) v;
44 addr >>= L1_CACHE_SHIFT;
45 addr ^= (addr >> 8) ^ (addr >> 16);
46 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
49 long long atomic64_read(const atomic64_t *v)
52 raw_spinlock_t *lock = lock_addr(v);
55 raw_spin_lock_irqsave(lock, flags);
57 raw_spin_unlock_irqrestore(lock, flags);
61 void atomic64_set(atomic64_t *v, long long i)
64 raw_spinlock_t *lock = lock_addr(v);
66 raw_spin_lock_irqsave(lock, flags);
68 raw_spin_unlock_irqrestore(lock, flags);
71 #define ATOMIC64_OP(op, c_op) \
72 void atomic64_##op(long long a, atomic64_t *v) \
74 unsigned long flags; \
75 raw_spinlock_t *lock = lock_addr(v); \
77 raw_spin_lock_irqsave(lock, flags); \
79 raw_spin_unlock_irqrestore(lock, flags); \
82 #define ATOMIC64_OP_RETURN(op, c_op) \
83 long long atomic64_##op##_return(long long a, atomic64_t *v) \
85 unsigned long flags; \
86 raw_spinlock_t *lock = lock_addr(v); \
89 raw_spin_lock_irqsave(lock, flags); \
90 val = (v->counter c_op a); \
91 raw_spin_unlock_irqrestore(lock, flags); \
95 #define ATOMIC64_FETCH_OP(op, c_op) \
96 long long atomic64_fetch_##op(long long a, atomic64_t *v) \
98 unsigned long flags; \
99 raw_spinlock_t *lock = lock_addr(v); \
102 raw_spin_lock_irqsave(lock, flags); \
105 raw_spin_unlock_irqrestore(lock, flags); \
109 #define ATOMIC64_OPS(op, c_op) \
110 ATOMIC64_OP(op, c_op) \
111 ATOMIC64_OP_RETURN(op, c_op) \
112 ATOMIC64_FETCH_OP(op, c_op)
114 ATOMIC64_OPS(add, +=)
115 ATOMIC64_OPS(sub, -=)
118 #define ATOMIC64_OPS(op, c_op) \
119 ATOMIC64_OP(op, c_op) \
120 ATOMIC64_OP_RETURN(op, c_op) \
121 ATOMIC64_FETCH_OP(op, c_op)
123 ATOMIC64_OPS(and, &=)
125 ATOMIC64_OPS(xor, ^=)
128 #undef ATOMIC64_FETCH_OP
129 #undef ATOMIC64_OP_RETURN
132 long long atomic64_dec_if_positive(atomic64_t *v)
135 raw_spinlock_t *lock = lock_addr(v);
138 raw_spin_lock_irqsave(lock, flags);
139 val = v->counter - 1;
142 raw_spin_unlock_irqrestore(lock, flags);
146 long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
149 raw_spinlock_t *lock = lock_addr(v);
152 raw_spin_lock_irqsave(lock, flags);
156 raw_spin_unlock_irqrestore(lock, flags);
160 long long atomic64_xchg(atomic64_t *v, long long new)
163 raw_spinlock_t *lock = lock_addr(v);
166 raw_spin_lock_irqsave(lock, flags);
169 raw_spin_unlock_irqrestore(lock, flags);
173 int atomic64_add_unless(atomic64_t *v, long long a, long long u)
176 raw_spinlock_t *lock = lock_addr(v);
179 raw_spin_lock_irqsave(lock, flags);
180 if (v->counter != u) {
184 raw_spin_unlock_irqrestore(lock, flags);