1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consistent set of information
6 * and is willing to retry if the information changes. There are two types
8 * 1. Sequence readers which never block a writer but they may have to retry
9 * if a writer is in progress by detecting change in sequence number.
10 * Writers do not wait for a sequence reader.
11 * 2. Locking readers which will wait if a writer or another locking reader
12 * is in progress. A locking reader in progress will also block a writer
13 * from going forward. Unlike the regular rwlock, the read lock here is
14 * exclusive so that only one locking reader can get it.
16 * This is not as cache friendly as brlock. Also, this may not work well
17 * for data that contains pointers, because any writer could
18 * invalidate a pointer that a reader was following.
20 * Expected non-blocking reader usage:
22 * seq = read_seqbegin(&foo);
24 * } while (read_seqretry(&foo, seq));
27 * On non-SMP the spin locks disappear but the writer still needs
28 * to increment the sequence variables because an interrupt routine could
29 * change the state of the data.
31 * Based on x86_64 vsyscall gettimeofday
32 * by Keith Owens and Andrea Arcangeli
35 #include <linux/spinlock.h>
36 #include <linux/lockdep.h>
37 #include <linux/compiler.h>
40 * Version using sequence counter only.
41 * This can be used when code has its own mutex protecting the
42 * updating starting before the write_seqcountbeqin() and ending
43 * after the write_seqcount_end().
45 typedef struct seqcount {
49 static inline void __seqcount_init(seqcount_t *s, const char *name,
50 struct lock_class_key *key)
55 # define SEQCOUNT_DEP_MAP_INIT(lockname)
56 # define seqcount_init(s) __seqcount_init(s, NULL, NULL)
57 # define seqcount_lockdep_reader_access(x)
59 #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
63 * __read_seqcount_begin - begin a seq-read critical section (without barrier)
64 * @s: pointer to seqcount_t
65 * Returns: count to be passed to read_seqcount_retry
67 * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
68 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
69 * provided before actually loading any of the variables that are to be
70 * protected in this critical section.
72 * Use carefully, only in critical code, and comment how the barrier is
75 static inline unsigned __read_seqcount_begin(const seqcount_t *s)
80 ret = READ_ONCE(s->sequence);
81 if (unlikely(ret & 1)) {
89 * raw_read_seqcount - Read the raw seqcount
90 * @s: pointer to seqcount_t
91 * Returns: count to be passed to read_seqcount_retry
93 * raw_read_seqcount opens a read critical section of the given
94 * seqcount without any lockdep checking and without checking or
95 * masking the LSB. Calling code is responsible for handling that.
97 static inline unsigned raw_read_seqcount(const seqcount_t *s)
99 unsigned ret = READ_ONCE(s->sequence);
105 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
106 * @s: pointer to seqcount_t
107 * Returns: count to be passed to read_seqcount_retry
109 * raw_read_seqcount_begin opens a read critical section of the given
110 * seqcount, but without any lockdep checking. Validity of the critical
111 * section is tested by checking read_seqcount_retry function.
113 static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
115 unsigned ret = __read_seqcount_begin(s);
121 * read_seqcount_begin - begin a seq-read critical section
122 * @s: pointer to seqcount_t
123 * Returns: count to be passed to read_seqcount_retry
125 * read_seqcount_begin opens a read critical section of the given seqcount.
126 * Validity of the critical section is tested by checking read_seqcount_retry
129 static inline unsigned read_seqcount_begin(const seqcount_t *s)
131 seqcount_lockdep_reader_access(s);
132 return raw_read_seqcount_begin(s);
136 * raw_seqcount_begin - begin a seq-read critical section
137 * @s: pointer to seqcount_t
138 * Returns: count to be passed to read_seqcount_retry
140 * raw_seqcount_begin opens a read critical section of the given seqcount.
141 * Validity of the critical section is tested by checking read_seqcount_retry
144 * Unlike read_seqcount_begin(), this function will not wait for the count
145 * to stabilize. If a writer is active when we begin, we will fail the
146 * read_seqcount_retry() instead of stabilizing at the beginning of the
149 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
151 unsigned ret = READ_ONCE(s->sequence);
157 * __read_seqcount_retry - end a seq-read critical section (without barrier)
158 * @s: pointer to seqcount_t
159 * @start: count, from read_seqcount_begin
160 * Returns: 1 if retry is required, else 0
162 * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
163 * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
164 * provided before actually loading any of the variables that are to be
165 * protected in this critical section.
167 * Use carefully, only in critical code, and comment how the barrier is
170 static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
172 return unlikely(s->sequence != start);
176 * read_seqcount_retry - end a seq-read critical section
177 * @s: pointer to seqcount_t
178 * @start: count, from read_seqcount_begin
179 * Returns: 1 if retry is required, else 0
181 * read_seqcount_retry closes a read critical section of the given seqcount.
182 * If the critical section was invalid, it must be ignored (and typically
185 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
188 return __read_seqcount_retry(s, start);
193 static inline void raw_write_seqcount_begin(seqcount_t *s)
199 static inline void raw_write_seqcount_end(seqcount_t *s)
206 * raw_write_seqcount_barrier - do a seq write barrier
207 * @s: pointer to seqcount_t
209 * This can be used to provide an ordering guarantee instead of the
210 * usual consistency guarantee. It is one wmb cheaper, because we can
211 * collapse the two back-to-back wmb()s.
214 * bool X = true, Y = false;
221 * int s = read_seqcount_begin(&seq);
225 * } while (read_seqcount_retry(&seq, s));
234 * raw_write_seqcount_barrier(seq);
239 static inline void raw_write_seqcount_barrier(seqcount_t *s)
246 static inline int raw_read_seqcount_latch(seqcount_t *s)
248 int seq = READ_ONCE(s->sequence);
249 /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
250 smp_read_barrier_depends();
255 * raw_write_seqcount_latch - redirect readers to even/odd copy
256 * @s: pointer to seqcount_t
258 * The latch technique is a multiversion concurrency control method that allows
259 * queries during non-atomic modifications. If you can guarantee queries never
260 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
261 * -- you most likely do not need this.
263 * Where the traditional RCU/lockless data structures rely on atomic
264 * modifications to ensure queries observe either the old or the new state the
265 * latch allows the same for non-atomic updates. The trade-off is doubling the
266 * cost of storage; we have to maintain two copies of the entire data
269 * Very simply put: we first modify one copy and then the other. This ensures
270 * there is always one copy in a stable state, ready to give us an answer.
272 * The basic form is a data structure like:
274 * struct latch_struct {
276 * struct data_struct data[2];
279 * Where a modification, which is assumed to be externally serialized, does the
282 * void latch_modify(struct latch_struct *latch, ...)
284 * smp_wmb(); <- Ensure that the last data[1] update is visible
286 * smp_wmb(); <- Ensure that the seqcount update is visible
288 * modify(latch->data[0], ...);
290 * smp_wmb(); <- Ensure that the data[0] update is visible
292 * smp_wmb(); <- Ensure that the seqcount update is visible
294 * modify(latch->data[1], ...);
297 * The query will have a form like:
299 * struct entry *latch_query(struct latch_struct *latch, ...)
301 * struct entry *entry;
305 * seq = raw_read_seqcount_latch(&latch->seq);
308 * entry = data_query(latch->data[idx], ...);
311 * } while (seq != latch->seq);
316 * So during the modification, queries are first redirected to data[1]. Then we
317 * modify data[0]. When that is complete, we redirect queries back to data[0]
318 * and we can modify data[1].
320 * NOTE: The non-requirement for atomic modifications does _NOT_ include
321 * the publishing of new entries in the case where data is a dynamic
324 * An iteration might start in data[0] and get suspended long enough
325 * to miss an entire modification sequence, once it resumes it might
326 * observe the new entry.
328 * NOTE: When data is a dynamic data structure; one should use regular RCU
329 * patterns to manage the lifetimes of the objects within.
331 static inline void raw_write_seqcount_latch(seqcount_t *s)
333 smp_wmb(); /* prior stores before incrementing "sequence" */
335 smp_wmb(); /* increment "sequence" before following stores */
339 * Sequence counter only version assumes that callers are using their
342 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
344 raw_write_seqcount_begin(s);
347 static inline void write_seqcount_begin(seqcount_t *s)
349 write_seqcount_begin_nested(s, 0);
352 static inline void write_seqcount_end(seqcount_t *s)
354 raw_write_seqcount_end(s);
358 * write_seqcount_invalidate - invalidate in-progress read-side seq operations
359 * @s: pointer to seqcount_t
361 * After write_seqcount_invalidate, no read-side seq operations will complete
362 * successfully and see data older than this.
364 static inline void write_seqcount_invalidate(seqcount_t *s)
371 struct seqcount seqcount;
376 * These macros triggered gcc-3.x compile-time problems. We think these are
377 * OK now. Be cautious.
379 #define __SEQLOCK_UNLOCKED(lockname) \
381 .seqcount = SEQCNT_ZERO(lockname), \
382 .lock = __SPIN_LOCK_UNLOCKED(lockname) \
385 #define seqlock_init(x) \
387 seqcount_init(&(x)->seqcount); \
388 spin_lock_init(&(x)->lock); \
391 #define DEFINE_SEQLOCK(x) \
392 seqlock_t x = __SEQLOCK_UNLOCKED(x)
395 * Read side functions for starting and finalizing a read side section.
397 static inline unsigned read_seqbegin(const seqlock_t *sl)
399 return read_seqcount_begin(&sl->seqcount);
402 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
404 return read_seqcount_retry(&sl->seqcount, start);
408 * Lock out other writers and update the count.
409 * Acts like a normal spin_lock/unlock.
410 * Don't need preempt_disable() because that is in the spin_lock already.
412 static inline void write_seqlock(seqlock_t *sl)
414 spin_lock(&sl->lock);
415 write_seqcount_begin(&sl->seqcount);
418 static inline void write_sequnlock(seqlock_t *sl)
420 write_seqcount_end(&sl->seqcount);
421 spin_unlock(&sl->lock);
424 static inline void write_seqlock_bh(seqlock_t *sl)
426 spin_lock_bh(&sl->lock);
427 write_seqcount_begin(&sl->seqcount);
430 static inline void write_sequnlock_bh(seqlock_t *sl)
432 write_seqcount_end(&sl->seqcount);
433 spin_unlock_bh(&sl->lock);
436 static inline void write_seqlock_irq(seqlock_t *sl)
438 spin_lock_irq(&sl->lock);
439 write_seqcount_begin(&sl->seqcount);
442 static inline void write_sequnlock_irq(seqlock_t *sl)
444 write_seqcount_end(&sl->seqcount);
445 spin_unlock_irq(&sl->lock);
448 static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
452 spin_lock_irqsave(&sl->lock, flags);
453 write_seqcount_begin(&sl->seqcount);
457 #define write_seqlock_irqsave(lock, flags) \
458 do { flags = __write_seqlock_irqsave(lock); } while (0)
461 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
463 write_seqcount_end(&sl->seqcount);
464 spin_unlock_irqrestore(&sl->lock, flags);
468 * A locking reader exclusively locks out other writers and locking readers,
469 * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
470 * Don't need preempt_disable() because that is in the spin_lock already.
472 static inline void read_seqlock_excl(seqlock_t *sl)
474 spin_lock(&sl->lock);
477 static inline void read_sequnlock_excl(seqlock_t *sl)
479 spin_unlock(&sl->lock);
483 * read_seqbegin_or_lock - begin a sequence number check or locking block
484 * @lock: sequence lock
485 * @seq : sequence number to be checked
487 * First try it once optimistically without taking the lock. If that fails,
488 * take the lock. The sequence number is also used as a marker for deciding
489 * whether to be a reader (even) or writer (odd).
490 * N.B. seq must be initialized to an even number to begin with.
492 static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
494 if (!(*seq & 1)) /* Even */
495 *seq = read_seqbegin(lock);
497 read_seqlock_excl(lock);
500 static inline int need_seqretry(seqlock_t *lock, int seq)
502 return !(seq & 1) && read_seqretry(lock, seq);
505 static inline void done_seqretry(seqlock_t *lock, int seq)
508 read_sequnlock_excl(lock);
511 static inline void read_seqlock_excl_bh(seqlock_t *sl)
513 spin_lock_bh(&sl->lock);
516 static inline void read_sequnlock_excl_bh(seqlock_t *sl)
518 spin_unlock_bh(&sl->lock);
521 static inline void read_seqlock_excl_irq(seqlock_t *sl)
523 spin_lock_irq(&sl->lock);
526 static inline void read_sequnlock_excl_irq(seqlock_t *sl)
528 spin_unlock_irq(&sl->lock);
531 static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
535 spin_lock_irqsave(&sl->lock, flags);
539 #define read_seqlock_excl_irqsave(lock, flags) \
540 do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
543 read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
545 spin_unlock_irqrestore(&sl->lock, flags);
548 static inline unsigned long
549 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
551 unsigned long flags = 0;
553 if (!(*seq & 1)) /* Even */
554 *seq = read_seqbegin(lock);
556 read_seqlock_excl_irqsave(lock, flags);
562 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
565 read_sequnlock_excl_irqrestore(lock, flags);
567 #endif /* __LINUX_SEQLOCK_H */