]> git.sesse.net Git - bcachefs-tools-debian/blob - linux/six.c
Update bcachefs sources to 176718966e bcachefs: Re-enable hash_redo_key()
[bcachefs-tools-debian] / linux / six.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/rt.h>
10 #include <linux/six.h>
11 #include <linux/slab.h>
12
13 #ifdef DEBUG
14 #define EBUG_ON(cond)           BUG_ON(cond)
15 #else
16 #define EBUG_ON(cond)           do {} while (0)
17 #endif
18
19 #define six_acquire(l, t)       lock_acquire(l, 0, t, 0, 0, NULL, _RET_IP_)
20 #define six_release(l)          lock_release(l, _RET_IP_)
21
22 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
23
24 struct six_lock_vals {
25         /* Value we add to the lock in order to take the lock: */
26         u64                     lock_val;
27
28         /* If the lock has this value (used as a mask), taking the lock fails: */
29         u64                     lock_fail;
30
31         /* Value we add to the lock in order to release the lock: */
32         u64                     unlock_val;
33
34         /* Mask that indicates lock is held for this type: */
35         u64                     held_mask;
36
37         /* Waitlist we wakeup when releasing the lock: */
38         enum six_lock_type      unlock_wakeup;
39 };
40
41 #define __SIX_LOCK_HELD_read    __SIX_VAL(read_lock, ~0)
42 #define __SIX_LOCK_HELD_intent  __SIX_VAL(intent_lock, ~0)
43 #define __SIX_LOCK_HELD_write   __SIX_VAL(seq, 1)
44
45 #define LOCK_VALS {                                                     \
46         [SIX_LOCK_read] = {                                             \
47                 .lock_val       = __SIX_VAL(read_lock, 1),              \
48                 .lock_fail      = __SIX_LOCK_HELD_write + __SIX_VAL(write_locking, 1),\
49                 .unlock_val     = -__SIX_VAL(read_lock, 1),             \
50                 .held_mask      = __SIX_LOCK_HELD_read,                 \
51                 .unlock_wakeup  = SIX_LOCK_write,                       \
52         },                                                              \
53         [SIX_LOCK_intent] = {                                           \
54                 .lock_val       = __SIX_VAL(intent_lock, 1),            \
55                 .lock_fail      = __SIX_LOCK_HELD_intent,               \
56                 .unlock_val     = -__SIX_VAL(intent_lock, 1),           \
57                 .held_mask      = __SIX_LOCK_HELD_intent,               \
58                 .unlock_wakeup  = SIX_LOCK_intent,                      \
59         },                                                              \
60         [SIX_LOCK_write] = {                                            \
61                 .lock_val       = __SIX_VAL(seq, 1),                    \
62                 .lock_fail      = __SIX_LOCK_HELD_read,                 \
63                 .unlock_val     = __SIX_VAL(seq, 1),                    \
64                 .held_mask      = __SIX_LOCK_HELD_write,                \
65                 .unlock_wakeup  = SIX_LOCK_read,                        \
66         },                                                              \
67 }
68
69 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
70                                  union six_lock_state old,
71                                  struct task_struct *owner)
72 {
73         if (type != SIX_LOCK_intent)
74                 return;
75
76         if (!old.intent_lock) {
77                 EBUG_ON(lock->owner);
78                 lock->owner = owner;
79         } else {
80                 EBUG_ON(lock->owner != current);
81         }
82 }
83
84 static inline unsigned pcpu_read_count(struct six_lock *lock)
85 {
86         unsigned read_count = 0;
87         int cpu;
88
89         for_each_possible_cpu(cpu)
90                 read_count += *per_cpu_ptr(lock->readers, cpu);
91         return read_count;
92 }
93
94 /* This is probably up there with the more evil things I've done */
95 #define waitlist_bitnr(id) ilog2((((union six_lock_state) { .waiters = 1 << (id) }).l))
96
97 static int __do_six_trylock_type(struct six_lock *lock,
98                                  enum six_lock_type type,
99                                  struct task_struct *task,
100                                  bool try)
101 {
102         const struct six_lock_vals l[] = LOCK_VALS;
103         union six_lock_state old, new;
104         int ret;
105         u64 v;
106
107         EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
108         EBUG_ON(type == SIX_LOCK_write && (lock->state.seq & 1));
109         EBUG_ON(type == SIX_LOCK_write && (try != !(lock->state.write_locking)));
110
111         /*
112          * Percpu reader mode:
113          *
114          * The basic idea behind this algorithm is that you can implement a lock
115          * between two threads without any atomics, just memory barriers:
116          *
117          * For two threads you'll need two variables, one variable for "thread a
118          * has the lock" and another for "thread b has the lock".
119          *
120          * To take the lock, a thread sets its variable indicating that it holds
121          * the lock, then issues a full memory barrier, then reads from the
122          * other thread's variable to check if the other thread thinks it has
123          * the lock. If we raced, we backoff and retry/sleep.
124          */
125
126         if (type == SIX_LOCK_read && lock->readers) {
127 retry:
128                 preempt_disable();
129                 this_cpu_inc(*lock->readers); /* signal that we own lock */
130
131                 smp_mb();
132
133                 old.v = READ_ONCE(lock->state.v);
134                 ret = !(old.v & l[type].lock_fail);
135
136                 this_cpu_sub(*lock->readers, !ret);
137                 preempt_enable();
138
139                 /*
140                  * If we failed from the lock path and the waiting bit wasn't
141                  * set, set it:
142                  */
143                 if (!try && !ret) {
144                         v = old.v;
145
146                         do {
147                                 new.v = old.v = v;
148
149                                 if (!(old.v & l[type].lock_fail))
150                                         goto retry;
151
152                                 if (new.waiters & (1 << type))
153                                         break;
154
155                                 new.waiters |= 1 << type;
156                         } while ((v = atomic64_cmpxchg(&lock->state.counter,
157                                                        old.v, new.v)) != old.v);
158                 }
159
160                 /*
161                  * If we failed because a writer was trying to take the
162                  * lock, issue a wakeup because we might have caused a
163                  * spurious trylock failure:
164                  */
165                 if (old.write_locking)
166                         ret = -1 - SIX_LOCK_write;
167         } else if (type == SIX_LOCK_write && lock->readers) {
168                 if (try) {
169                         atomic64_add(__SIX_VAL(write_locking, 1),
170                                      &lock->state.counter);
171                         smp_mb__after_atomic();
172                 }
173
174                 ret = !pcpu_read_count(lock);
175
176                 /*
177                  * On success, we increment lock->seq; also we clear
178                  * write_locking unless we failed from the lock path:
179                  */
180                 v = 0;
181                 if (ret)
182                         v += __SIX_VAL(seq, 1);
183                 if (ret || try)
184                         v -= __SIX_VAL(write_locking, 1);
185
186                 if (!ret && !try && !(lock->state.waiters & (1 << SIX_LOCK_write)))
187                         v += __SIX_VAL(waiters, 1 << SIX_LOCK_write);
188
189                 if (try && !ret) {
190                         old.v = atomic64_add_return(v, &lock->state.counter);
191                         if (old.waiters & (1 << SIX_LOCK_read))
192                                 ret = -1 - SIX_LOCK_read;
193                 } else {
194                         atomic64_add(v, &lock->state.counter);
195                 }
196         } else {
197                 v = READ_ONCE(lock->state.v);
198                 do {
199                         new.v = old.v = v;
200
201                         if (!(old.v & l[type].lock_fail)) {
202                                 new.v += l[type].lock_val;
203
204                                 if (type == SIX_LOCK_write)
205                                         new.write_locking = 0;
206                         } else if (!try && !(new.waiters & (1 << type)))
207                                 new.waiters |= 1 << type;
208                         else
209                                 break; /* waiting bit already set */
210                 } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
211                                         old.v, new.v)) != old.v);
212
213                 ret = !(old.v & l[type].lock_fail);
214
215                 EBUG_ON(ret && !(lock->state.v & l[type].held_mask));
216         }
217
218         if (ret > 0)
219                 six_set_owner(lock, type, old, task);
220
221         EBUG_ON(type == SIX_LOCK_write && (try || ret > 0) && (lock->state.write_locking));
222
223         return ret;
224 }
225
226 static inline void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
227 {
228         struct six_lock_waiter *w, *next;
229         struct task_struct *task;
230         bool saw_one;
231         int ret;
232 again:
233         ret = 0;
234         saw_one = false;
235         raw_spin_lock(&lock->wait_lock);
236
237         list_for_each_entry_safe(w, next, &lock->wait_list, list) {
238                 if (w->lock_want != lock_type)
239                         continue;
240
241                 if (saw_one && lock_type != SIX_LOCK_read)
242                         goto unlock;
243                 saw_one = true;
244
245                 ret = __do_six_trylock_type(lock, lock_type, w->task, false);
246                 if (ret <= 0)
247                         goto unlock;
248
249                 __list_del(w->list.prev, w->list.next);
250                 task = w->task;
251                 /*
252                  * Do no writes to @w besides setting lock_acquired - otherwise
253                  * we would need a memory barrier:
254                  */
255                 barrier();
256                 w->lock_acquired = true;
257                 wake_up_process(task);
258         }
259
260         clear_bit(waitlist_bitnr(lock_type), (unsigned long *) &lock->state.v);
261 unlock:
262         raw_spin_unlock(&lock->wait_lock);
263
264         if (ret < 0) {
265                 lock_type = -ret - 1;
266                 goto again;
267         }
268 }
269
270 static inline void six_lock_wakeup(struct six_lock *lock,
271                                    union six_lock_state state,
272                                    enum six_lock_type lock_type)
273 {
274         if (lock_type == SIX_LOCK_write && state.read_lock)
275                 return;
276
277         if (!(state.waiters & (1 << lock_type)))
278                 return;
279
280         __six_lock_wakeup(lock, lock_type);
281 }
282
283 static bool do_six_trylock_type(struct six_lock *lock,
284                                 enum six_lock_type type,
285                                 bool try)
286 {
287         int ret;
288
289         ret = __do_six_trylock_type(lock, type, current, try);
290         if (ret < 0)
291                 __six_lock_wakeup(lock, -ret - 1);
292
293         return ret > 0;
294 }
295
296 __always_inline __flatten
297 static bool __six_trylock_type(struct six_lock *lock, enum six_lock_type type)
298 {
299         if (!do_six_trylock_type(lock, type, true))
300                 return false;
301
302         if (type != SIX_LOCK_write)
303                 six_acquire(&lock->dep_map, 1);
304         return true;
305 }
306
307 __always_inline __flatten
308 static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
309                               unsigned seq)
310 {
311         const struct six_lock_vals l[] = LOCK_VALS;
312         union six_lock_state old;
313         u64 v;
314
315         EBUG_ON(type == SIX_LOCK_write);
316
317         if (type == SIX_LOCK_read &&
318             lock->readers) {
319                 bool ret;
320
321                 preempt_disable();
322                 this_cpu_inc(*lock->readers);
323
324                 smp_mb();
325
326                 old.v = READ_ONCE(lock->state.v);
327                 ret = !(old.v & l[type].lock_fail) && old.seq == seq;
328
329                 this_cpu_sub(*lock->readers, !ret);
330                 preempt_enable();
331
332                 /*
333                  * Similar to the lock path, we may have caused a spurious write
334                  * lock fail and need to issue a wakeup:
335                  */
336                 if (old.write_locking)
337                         six_lock_wakeup(lock, old, SIX_LOCK_write);
338
339                 if (ret)
340                         six_acquire(&lock->dep_map, 1);
341
342                 return ret;
343         }
344
345         v = READ_ONCE(lock->state.v);
346         do {
347                 old.v = v;
348
349                 if (old.seq != seq || old.v & l[type].lock_fail)
350                         return false;
351         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
352                                 old.v,
353                                 old.v + l[type].lock_val)) != old.v);
354
355         six_set_owner(lock, type, old, current);
356         if (type != SIX_LOCK_write)
357                 six_acquire(&lock->dep_map, 1);
358         return true;
359 }
360
361 #ifdef CONFIG_LOCK_SPIN_ON_OWNER
362
363 static inline bool six_optimistic_spin(struct six_lock *lock,
364                                        struct six_lock_waiter *wait)
365 {
366         struct task_struct *owner, *task = current;
367
368         switch (wait->lock_want) {
369         case SIX_LOCK_read:
370                 break;
371         case SIX_LOCK_intent:
372                 if (lock->wait_list.next != &wait->list)
373                         return false;
374                 break;
375         case SIX_LOCK_write:
376                 return false;
377         }
378
379         rcu_read_lock();
380         owner = READ_ONCE(lock->owner);
381
382         while (owner && lock->owner == owner) {
383                 /*
384                  * Ensure we emit the owner->on_cpu, dereference _after_
385                  * checking lock->owner still matches owner. If that fails,
386                  * owner might point to freed memory. If it still matches,
387                  * the rcu_read_lock() ensures the memory stays valid.
388                  */
389                 barrier();
390
391                 /*
392                  * If we're an RT task that will live-lock because we won't let
393                  * the owner complete.
394                  */
395                 if (wait->lock_acquired ||
396                     !owner->on_cpu ||
397                     rt_task(task) ||
398                     need_resched())
399                         break;
400
401                 cpu_relax();
402         }
403         rcu_read_unlock();
404
405         return wait->lock_acquired;
406 }
407
408 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
409
410 static inline bool six_optimistic_spin(struct six_lock *lock,
411                                        struct six_lock_waiter *wait)
412 {
413         return false;
414 }
415
416 #endif
417
418 noinline
419 static int __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type,
420                                     struct six_lock_waiter *wait,
421                                     six_lock_should_sleep_fn should_sleep_fn, void *p)
422 {
423         union six_lock_state old;
424         int ret = 0;
425
426         if (type == SIX_LOCK_write) {
427                 EBUG_ON(lock->state.write_locking);
428                 atomic64_add(__SIX_VAL(write_locking, 1), &lock->state.counter);
429                 smp_mb__after_atomic();
430         }
431
432         lock_contended(&lock->dep_map, _RET_IP_);
433
434         wait->task              = current;
435         wait->lock_want         = type;
436         wait->lock_acquired     = false;
437
438         raw_spin_lock(&lock->wait_lock);
439         /*
440          * Retry taking the lock after taking waitlist lock, have raced with an
441          * unlock:
442          */
443         ret = __do_six_trylock_type(lock, type, current, false);
444         if (ret <= 0)
445                 list_add_tail(&wait->list, &lock->wait_list);
446         raw_spin_unlock(&lock->wait_lock);
447
448         if (unlikely(ret > 0)) {
449                 ret = 0;
450                 goto out;
451         }
452
453         if (unlikely(ret < 0)) {
454                 __six_lock_wakeup(lock, -ret - 1);
455                 ret = 0;
456         }
457
458         if (six_optimistic_spin(lock, wait))
459                 goto out;
460
461         while (1) {
462                 set_current_state(TASK_UNINTERRUPTIBLE);
463
464                 if (wait->lock_acquired)
465                         break;
466
467                 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
468                 if (unlikely(ret)) {
469                         raw_spin_lock(&lock->wait_lock);
470                         if (!wait->lock_acquired)
471                                 list_del(&wait->list);
472                         raw_spin_unlock(&lock->wait_lock);
473
474                         if (wait->lock_acquired)
475                                 do_six_unlock_type(lock, type);
476                         break;
477                 }
478
479                 schedule();
480         }
481
482         __set_current_state(TASK_RUNNING);
483 out:
484         if (ret && type == SIX_LOCK_write) {
485                 old.v = atomic64_sub_return(__SIX_VAL(write_locking, 1),
486                                             &lock->state.counter);
487                 six_lock_wakeup(lock, old, SIX_LOCK_read);
488         }
489
490         return ret;
491 }
492
493 __always_inline __flatten
494 static int __six_lock_type_waiter(struct six_lock *lock, enum six_lock_type type,
495                          struct six_lock_waiter *wait,
496                          six_lock_should_sleep_fn should_sleep_fn, void *p)
497 {
498         int ret;
499
500         if (type != SIX_LOCK_write)
501                 six_acquire(&lock->dep_map, 0);
502
503         ret = do_six_trylock_type(lock, type, true) ? 0
504                 : __six_lock_type_slowpath(lock, type, wait, should_sleep_fn, p);
505
506         if (ret && type != SIX_LOCK_write)
507                 six_release(&lock->dep_map);
508         if (!ret)
509                 lock_acquired(&lock->dep_map, _RET_IP_);
510
511         return ret;
512 }
513
514 __always_inline
515 static int __six_lock_type(struct six_lock *lock, enum six_lock_type type,
516                            six_lock_should_sleep_fn should_sleep_fn, void *p)
517 {
518         struct six_lock_waiter wait;
519
520         return __six_lock_type_waiter(lock, type, &wait, should_sleep_fn, p);
521 }
522
523 __always_inline __flatten
524 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
525 {
526         const struct six_lock_vals l[] = LOCK_VALS;
527         union six_lock_state state;
528
529         if (type == SIX_LOCK_intent)
530                 lock->owner = NULL;
531
532         if (type == SIX_LOCK_read &&
533             lock->readers) {
534                 smp_mb(); /* unlock barrier */
535                 this_cpu_dec(*lock->readers);
536                 smp_mb(); /* between unlocking and checking for waiters */
537                 state.v = READ_ONCE(lock->state.v);
538         } else {
539                 EBUG_ON(!(lock->state.v & l[type].held_mask));
540                 state.v = atomic64_add_return_release(l[type].unlock_val,
541                                                       &lock->state.counter);
542         }
543
544         six_lock_wakeup(lock, state, l[type].unlock_wakeup);
545 }
546
547 __always_inline __flatten
548 static void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
549 {
550         EBUG_ON(type == SIX_LOCK_write &&
551                 !(lock->state.v & __SIX_LOCK_HELD_intent));
552         EBUG_ON((type == SIX_LOCK_write ||
553                  type == SIX_LOCK_intent) &&
554                 lock->owner != current);
555
556         if (type != SIX_LOCK_write)
557                 six_release(&lock->dep_map);
558
559         if (type == SIX_LOCK_intent &&
560             lock->intent_lock_recurse) {
561                 --lock->intent_lock_recurse;
562                 return;
563         }
564
565         do_six_unlock_type(lock, type);
566 }
567
568 #define __SIX_LOCK(type)                                                \
569 bool six_trylock_##type(struct six_lock *lock)                          \
570 {                                                                       \
571         return __six_trylock_type(lock, SIX_LOCK_##type);               \
572 }                                                                       \
573 EXPORT_SYMBOL_GPL(six_trylock_##type);                                  \
574                                                                         \
575 bool six_relock_##type(struct six_lock *lock, u32 seq)                  \
576 {                                                                       \
577         return __six_relock_type(lock, SIX_LOCK_##type, seq);           \
578 }                                                                       \
579 EXPORT_SYMBOL_GPL(six_relock_##type);                                   \
580                                                                         \
581 int six_lock_##type(struct six_lock *lock,                              \
582                     six_lock_should_sleep_fn should_sleep_fn, void *p)  \
583 {                                                                       \
584         return __six_lock_type(lock, SIX_LOCK_##type, should_sleep_fn, p);\
585 }                                                                       \
586 EXPORT_SYMBOL_GPL(six_lock_##type);                                     \
587                                                                         \
588 int six_lock_waiter_##type(struct six_lock *lock,                       \
589                            struct six_lock_waiter *wait,                \
590                            six_lock_should_sleep_fn should_sleep_fn, void *p)\
591 {                                                                       \
592         return __six_lock_type_waiter(lock, SIX_LOCK_##type, wait, should_sleep_fn, p);\
593 }                                                                       \
594 EXPORT_SYMBOL_GPL(six_lock_waiter_##type);                              \
595                                                                         \
596 void six_unlock_##type(struct six_lock *lock)                           \
597 {                                                                       \
598         __six_unlock_type(lock, SIX_LOCK_##type);                       \
599 }                                                                       \
600 EXPORT_SYMBOL_GPL(six_unlock_##type);
601
602 __SIX_LOCK(read)
603 __SIX_LOCK(intent)
604 __SIX_LOCK(write)
605
606 #undef __SIX_LOCK
607
608 /* Convert from intent to read: */
609 void six_lock_downgrade(struct six_lock *lock)
610 {
611         six_lock_increment(lock, SIX_LOCK_read);
612         six_unlock_intent(lock);
613 }
614 EXPORT_SYMBOL_GPL(six_lock_downgrade);
615
616 bool six_lock_tryupgrade(struct six_lock *lock)
617 {
618         union six_lock_state old, new;
619         u64 v = READ_ONCE(lock->state.v);
620
621         do {
622                 new.v = old.v = v;
623
624                 if (new.intent_lock)
625                         return false;
626
627                 if (!lock->readers) {
628                         EBUG_ON(!new.read_lock);
629                         new.read_lock--;
630                 }
631
632                 new.intent_lock = 1;
633         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
634                                 old.v, new.v)) != old.v);
635
636         if (lock->readers)
637                 this_cpu_dec(*lock->readers);
638
639         six_set_owner(lock, SIX_LOCK_intent, old, current);
640
641         return true;
642 }
643 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
644
645 bool six_trylock_convert(struct six_lock *lock,
646                          enum six_lock_type from,
647                          enum six_lock_type to)
648 {
649         EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
650
651         if (to == from)
652                 return true;
653
654         if (to == SIX_LOCK_read) {
655                 six_lock_downgrade(lock);
656                 return true;
657         } else {
658                 return six_lock_tryupgrade(lock);
659         }
660 }
661 EXPORT_SYMBOL_GPL(six_trylock_convert);
662
663 /*
664  * Increment read/intent lock count, assuming we already have it read or intent
665  * locked:
666  */
667 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
668 {
669         const struct six_lock_vals l[] = LOCK_VALS;
670
671         six_acquire(&lock->dep_map, 0);
672
673         /* XXX: assert already locked, and that we don't overflow: */
674
675         switch (type) {
676         case SIX_LOCK_read:
677                 if (lock->readers) {
678                         this_cpu_inc(*lock->readers);
679                 } else {
680                         EBUG_ON(!lock->state.read_lock &&
681                                 !lock->state.intent_lock);
682                         atomic64_add(l[type].lock_val, &lock->state.counter);
683                 }
684                 break;
685         case SIX_LOCK_intent:
686                 EBUG_ON(!lock->state.intent_lock);
687                 lock->intent_lock_recurse++;
688                 break;
689         case SIX_LOCK_write:
690                 BUG();
691                 break;
692         }
693 }
694 EXPORT_SYMBOL_GPL(six_lock_increment);
695
696 void six_lock_wakeup_all(struct six_lock *lock)
697 {
698         struct six_lock_waiter *w;
699
700         raw_spin_lock(&lock->wait_lock);
701         list_for_each_entry(w, &lock->wait_list, list)
702                 wake_up_process(w->task);
703         raw_spin_unlock(&lock->wait_lock);
704 }
705 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
706
707 void six_lock_pcpu_free(struct six_lock *lock)
708 {
709         BUG_ON(lock->readers && pcpu_read_count(lock));
710         BUG_ON(lock->state.read_lock);
711
712         free_percpu(lock->readers);
713         lock->readers = NULL;
714 }
715 EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
716
717 void six_lock_pcpu_alloc(struct six_lock *lock)
718 {
719 #ifdef __KERNEL__
720         if (!lock->readers)
721                 lock->readers = alloc_percpu(unsigned);
722 #endif
723 }
724 EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);
725
726 /*
727  * Returns lock held counts, for both read and intent
728  */
729 struct six_lock_count six_lock_counts(struct six_lock *lock)
730 {
731         struct six_lock_count ret;
732
733         ret.n[SIX_LOCK_read]    = 0;
734         ret.n[SIX_LOCK_intent]  = lock->state.intent_lock + lock->intent_lock_recurse;
735         ret.n[SIX_LOCK_write]   = lock->state.seq & 1;
736
737         if (!lock->readers)
738                 ret.n[SIX_LOCK_read] += lock->state.read_lock;
739         else {
740                 int cpu;
741
742                 for_each_possible_cpu(cpu)
743                         ret.n[SIX_LOCK_read] += *per_cpu_ptr(lock->readers, cpu);
744         }
745
746         return ret;
747 }
748 EXPORT_SYMBOL_GPL(six_lock_counts);