]> git.sesse.net Git - bcachefs-tools-debian/blob - linux/six.c
If we failed to read /proc/meminfo, just run the shrinkers.
[bcachefs-tools-debian] / linux / six.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/rt.h>
10 #include <linux/six.h>
11 #include <linux/slab.h>
12
13 #ifdef DEBUG
14 #define EBUG_ON(cond)           BUG_ON(cond)
15 #else
16 #define EBUG_ON(cond)           do {} while (0)
17 #endif
18
19 #define six_acquire(l, t)       lock_acquire(l, 0, t, 0, 0, NULL, _RET_IP_)
20 #define six_release(l)          lock_release(l, _RET_IP_)
21
22 struct six_lock_vals {
23         /* Value we add to the lock in order to take the lock: */
24         u64                     lock_val;
25
26         /* If the lock has this value (used as a mask), taking the lock fails: */
27         u64                     lock_fail;
28
29         /* Value we add to the lock in order to release the lock: */
30         u64                     unlock_val;
31
32         /* Mask that indicates lock is held for this type: */
33         u64                     held_mask;
34
35         /* Waitlist we wakeup when releasing the lock: */
36         enum six_lock_type      unlock_wakeup;
37 };
38
39 #define __SIX_LOCK_HELD_read    __SIX_VAL(read_lock, ~0)
40 #define __SIX_LOCK_HELD_intent  __SIX_VAL(intent_lock, ~0)
41 #define __SIX_LOCK_HELD_write   __SIX_VAL(seq, 1)
42
43 #define LOCK_VALS {                                                     \
44         [SIX_LOCK_read] = {                                             \
45                 .lock_val       = __SIX_VAL(read_lock, 1),              \
46                 .lock_fail      = __SIX_LOCK_HELD_write + __SIX_VAL(write_locking, 1),\
47                 .unlock_val     = -__SIX_VAL(read_lock, 1),             \
48                 .held_mask      = __SIX_LOCK_HELD_read,                 \
49                 .unlock_wakeup  = SIX_LOCK_write,                       \
50         },                                                              \
51         [SIX_LOCK_intent] = {                                           \
52                 .lock_val       = __SIX_VAL(intent_lock, 1),            \
53                 .lock_fail      = __SIX_LOCK_HELD_intent,               \
54                 .unlock_val     = -__SIX_VAL(intent_lock, 1),           \
55                 .held_mask      = __SIX_LOCK_HELD_intent,               \
56                 .unlock_wakeup  = SIX_LOCK_intent,                      \
57         },                                                              \
58         [SIX_LOCK_write] = {                                            \
59                 .lock_val       = __SIX_VAL(seq, 1),                    \
60                 .lock_fail      = __SIX_LOCK_HELD_read,                 \
61                 .unlock_val     = __SIX_VAL(seq, 1),                    \
62                 .held_mask      = __SIX_LOCK_HELD_write,                \
63                 .unlock_wakeup  = SIX_LOCK_read,                        \
64         },                                                              \
65 }
66
67 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
68                                  union six_lock_state old)
69 {
70         if (type != SIX_LOCK_intent)
71                 return;
72
73         if (!old.intent_lock) {
74                 EBUG_ON(lock->owner);
75                 lock->owner = current;
76         } else {
77                 EBUG_ON(lock->owner != current);
78         }
79 }
80
81 static inline unsigned pcpu_read_count(struct six_lock *lock)
82 {
83         unsigned read_count = 0;
84         int cpu;
85
86         for_each_possible_cpu(cpu)
87                 read_count += *per_cpu_ptr(lock->readers, cpu);
88         return read_count;
89 }
90
91 struct six_lock_waiter {
92         struct list_head        list;
93         struct task_struct      *task;
94 };
95
96 /* This is probably up there with the more evil things I've done */
97 #define waitlist_bitnr(id) ilog2((((union six_lock_state) { .waiters = 1 << (id) }).l))
98
99 static inline void six_lock_wakeup(struct six_lock *lock,
100                                    union six_lock_state state,
101                                    unsigned waitlist_id)
102 {
103         if (waitlist_id == SIX_LOCK_write) {
104                 if (state.write_locking && !state.read_lock) {
105                         struct task_struct *p = READ_ONCE(lock->owner);
106                         if (p)
107                                 wake_up_process(p);
108                 }
109         } else {
110                 struct list_head *wait_list = &lock->wait_list[waitlist_id];
111                 struct six_lock_waiter *w, *next;
112
113                 if (!(state.waiters & (1 << waitlist_id)))
114                         return;
115
116                 clear_bit(waitlist_bitnr(waitlist_id),
117                           (unsigned long *) &lock->state.v);
118
119                 raw_spin_lock(&lock->wait_lock);
120
121                 list_for_each_entry_safe(w, next, wait_list, list) {
122                         list_del_init(&w->list);
123
124                         if (wake_up_process(w->task) &&
125                             waitlist_id != SIX_LOCK_read) {
126                                 if (!list_empty(wait_list))
127                                         set_bit(waitlist_bitnr(waitlist_id),
128                                                 (unsigned long *) &lock->state.v);
129                                 break;
130                         }
131                 }
132
133                 raw_spin_unlock(&lock->wait_lock);
134         }
135 }
136
137 static __always_inline bool do_six_trylock_type(struct six_lock *lock,
138                                                 enum six_lock_type type,
139                                                 bool try)
140 {
141         const struct six_lock_vals l[] = LOCK_VALS;
142         union six_lock_state old, new;
143         bool ret;
144         u64 v;
145
146         EBUG_ON(type == SIX_LOCK_write && lock->owner != current);
147         EBUG_ON(type == SIX_LOCK_write && (lock->state.seq & 1));
148
149         EBUG_ON(type == SIX_LOCK_write && (try != !(lock->state.write_locking)));
150
151         /*
152          * Percpu reader mode:
153          *
154          * The basic idea behind this algorithm is that you can implement a lock
155          * between two threads without any atomics, just memory barriers:
156          *
157          * For two threads you'll need two variables, one variable for "thread a
158          * has the lock" and another for "thread b has the lock".
159          *
160          * To take the lock, a thread sets its variable indicating that it holds
161          * the lock, then issues a full memory barrier, then reads from the
162          * other thread's variable to check if the other thread thinks it has
163          * the lock. If we raced, we backoff and retry/sleep.
164          */
165
166         if (type == SIX_LOCK_read && lock->readers) {
167 retry:
168                 preempt_disable();
169                 this_cpu_inc(*lock->readers); /* signal that we own lock */
170
171                 smp_mb();
172
173                 old.v = READ_ONCE(lock->state.v);
174                 ret = !(old.v & l[type].lock_fail);
175
176                 this_cpu_sub(*lock->readers, !ret);
177                 preempt_enable();
178
179                 /*
180                  * If we failed because a writer was trying to take the
181                  * lock, issue a wakeup because we might have caused a
182                  * spurious trylock failure:
183                  */
184                 if (old.write_locking) {
185                         struct task_struct *p = READ_ONCE(lock->owner);
186
187                         if (p)
188                                 wake_up_process(p);
189                 }
190
191                 /*
192                  * If we failed from the lock path and the waiting bit wasn't
193                  * set, set it:
194                  */
195                 if (!try && !ret) {
196                         v = old.v;
197
198                         do {
199                                 new.v = old.v = v;
200
201                                 if (!(old.v & l[type].lock_fail))
202                                         goto retry;
203
204                                 if (new.waiters & (1 << type))
205                                         break;
206
207                                 new.waiters |= 1 << type;
208                         } while ((v = atomic64_cmpxchg(&lock->state.counter,
209                                                        old.v, new.v)) != old.v);
210                 }
211         } else if (type == SIX_LOCK_write && lock->readers) {
212                 if (try) {
213                         atomic64_add(__SIX_VAL(write_locking, 1),
214                                      &lock->state.counter);
215                         smp_mb__after_atomic();
216                 }
217
218                 ret = !pcpu_read_count(lock);
219
220                 /*
221                  * On success, we increment lock->seq; also we clear
222                  * write_locking unless we failed from the lock path:
223                  */
224                 v = 0;
225                 if (ret)
226                         v += __SIX_VAL(seq, 1);
227                 if (ret || try)
228                         v -= __SIX_VAL(write_locking, 1);
229
230                 if (try && !ret) {
231                         old.v = atomic64_add_return(v, &lock->state.counter);
232                         six_lock_wakeup(lock, old, SIX_LOCK_read);
233                 } else {
234                         atomic64_add(v, &lock->state.counter);
235                 }
236         } else {
237                 v = READ_ONCE(lock->state.v);
238                 do {
239                         new.v = old.v = v;
240
241                         if (!(old.v & l[type].lock_fail)) {
242                                 new.v += l[type].lock_val;
243
244                                 if (type == SIX_LOCK_write)
245                                         new.write_locking = 0;
246                         } else if (!try && type != SIX_LOCK_write &&
247                                    !(new.waiters & (1 << type)))
248                                 new.waiters |= 1 << type;
249                         else
250                                 break; /* waiting bit already set */
251                 } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
252                                         old.v, new.v)) != old.v);
253
254                 ret = !(old.v & l[type].lock_fail);
255         }
256
257         if (ret)
258                 six_set_owner(lock, type, old);
259
260         EBUG_ON(ret && !(lock->state.v & l[type].held_mask));
261         EBUG_ON(type == SIX_LOCK_write && (try || ret) && (lock->state.write_locking));
262
263         return ret;
264 }
265
266 __always_inline __flatten
267 static bool __six_trylock_type(struct six_lock *lock, enum six_lock_type type)
268 {
269         if (!do_six_trylock_type(lock, type, true))
270                 return false;
271
272         if (type != SIX_LOCK_write)
273                 six_acquire(&lock->dep_map, 1);
274         return true;
275 }
276
277 __always_inline __flatten
278 static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
279                               unsigned seq)
280 {
281         const struct six_lock_vals l[] = LOCK_VALS;
282         union six_lock_state old;
283         u64 v;
284
285         EBUG_ON(type == SIX_LOCK_write);
286
287         if (type == SIX_LOCK_read &&
288             lock->readers) {
289                 bool ret;
290
291                 preempt_disable();
292                 this_cpu_inc(*lock->readers);
293
294                 smp_mb();
295
296                 old.v = READ_ONCE(lock->state.v);
297                 ret = !(old.v & l[type].lock_fail) && old.seq == seq;
298
299                 this_cpu_sub(*lock->readers, !ret);
300                 preempt_enable();
301
302                 /*
303                  * Similar to the lock path, we may have caused a spurious write
304                  * lock fail and need to issue a wakeup:
305                  */
306                 if (old.write_locking) {
307                         struct task_struct *p = READ_ONCE(lock->owner);
308
309                         if (p)
310                                 wake_up_process(p);
311                 }
312
313                 if (ret)
314                         six_acquire(&lock->dep_map, 1);
315
316                 return ret;
317         }
318
319         v = READ_ONCE(lock->state.v);
320         do {
321                 old.v = v;
322
323                 if (old.seq != seq || old.v & l[type].lock_fail)
324                         return false;
325         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
326                                 old.v,
327                                 old.v + l[type].lock_val)) != old.v);
328
329         six_set_owner(lock, type, old);
330         if (type != SIX_LOCK_write)
331                 six_acquire(&lock->dep_map, 1);
332         return true;
333 }
334
335 #ifdef CONFIG_LOCK_SPIN_ON_OWNER
336
337 static inline int six_can_spin_on_owner(struct six_lock *lock)
338 {
339         struct task_struct *owner;
340         int retval = 1;
341
342         if (need_resched())
343                 return 0;
344
345         rcu_read_lock();
346         owner = READ_ONCE(lock->owner);
347         if (owner)
348                 retval = owner->on_cpu;
349         rcu_read_unlock();
350         /*
351          * if lock->owner is not set, the mutex owner may have just acquired
352          * it and not set the owner yet or the mutex has been released.
353          */
354         return retval;
355 }
356
357 static inline bool six_spin_on_owner(struct six_lock *lock,
358                                      struct task_struct *owner)
359 {
360         bool ret = true;
361
362         rcu_read_lock();
363         while (lock->owner == owner) {
364                 /*
365                  * Ensure we emit the owner->on_cpu, dereference _after_
366                  * checking lock->owner still matches owner. If that fails,
367                  * owner might point to freed memory. If it still matches,
368                  * the rcu_read_lock() ensures the memory stays valid.
369                  */
370                 barrier();
371
372                 if (!owner->on_cpu || need_resched()) {
373                         ret = false;
374                         break;
375                 }
376
377                 cpu_relax();
378         }
379         rcu_read_unlock();
380
381         return ret;
382 }
383
384 static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
385 {
386         struct task_struct *task = current;
387
388         if (type == SIX_LOCK_write)
389                 return false;
390
391         preempt_disable();
392         if (!six_can_spin_on_owner(lock))
393                 goto fail;
394
395         if (!osq_lock(&lock->osq))
396                 goto fail;
397
398         while (1) {
399                 struct task_struct *owner;
400
401                 /*
402                  * If there's an owner, wait for it to either
403                  * release the lock or go to sleep.
404                  */
405                 owner = READ_ONCE(lock->owner);
406                 if (owner && !six_spin_on_owner(lock, owner))
407                         break;
408
409                 if (do_six_trylock_type(lock, type, false)) {
410                         osq_unlock(&lock->osq);
411                         preempt_enable();
412                         return true;
413                 }
414
415                 /*
416                  * When there's no owner, we might have preempted between the
417                  * owner acquiring the lock and setting the owner field. If
418                  * we're an RT task that will live-lock because we won't let
419                  * the owner complete.
420                  */
421                 if (!owner && (need_resched() || rt_task(task)))
422                         break;
423
424                 /*
425                  * The cpu_relax() call is a compiler barrier which forces
426                  * everything in this loop to be re-loaded. We don't need
427                  * memory barriers as we'll eventually observe the right
428                  * values at the cost of a few extra spins.
429                  */
430                 cpu_relax();
431         }
432
433         osq_unlock(&lock->osq);
434 fail:
435         preempt_enable();
436
437         /*
438          * If we fell out of the spin path because of need_resched(),
439          * reschedule now, before we try-lock again. This avoids getting
440          * scheduled out right after we obtained the lock.
441          */
442         if (need_resched())
443                 schedule();
444
445         return false;
446 }
447
448 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
449
450 static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
451 {
452         return false;
453 }
454
455 #endif
456
457 noinline
458 static int __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type,
459                                     six_lock_should_sleep_fn should_sleep_fn, void *p)
460 {
461         union six_lock_state old;
462         struct six_lock_waiter wait;
463         int ret = 0;
464
465         if (type == SIX_LOCK_write) {
466                 EBUG_ON(lock->state.write_locking);
467                 atomic64_add(__SIX_VAL(write_locking, 1), &lock->state.counter);
468                 smp_mb__after_atomic();
469         }
470
471         ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
472         if (ret)
473                 goto out_before_sleep;
474
475         if (six_optimistic_spin(lock, type))
476                 goto out_before_sleep;
477
478         lock_contended(&lock->dep_map, _RET_IP_);
479
480         INIT_LIST_HEAD(&wait.list);
481         wait.task = current;
482
483         while (1) {
484                 set_current_state(TASK_UNINTERRUPTIBLE);
485                 if (type == SIX_LOCK_write)
486                         EBUG_ON(lock->owner != current);
487                 else if (list_empty_careful(&wait.list)) {
488                         raw_spin_lock(&lock->wait_lock);
489                         list_add_tail(&wait.list, &lock->wait_list[type]);
490                         raw_spin_unlock(&lock->wait_lock);
491                 }
492
493                 if (do_six_trylock_type(lock, type, false))
494                         break;
495
496                 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
497                 if (ret)
498                         break;
499
500                 schedule();
501         }
502
503         __set_current_state(TASK_RUNNING);
504
505         if (!list_empty_careful(&wait.list)) {
506                 raw_spin_lock(&lock->wait_lock);
507                 list_del_init(&wait.list);
508                 raw_spin_unlock(&lock->wait_lock);
509         }
510 out_before_sleep:
511         if (ret && type == SIX_LOCK_write) {
512                 old.v = atomic64_sub_return(__SIX_VAL(write_locking, 1),
513                                             &lock->state.counter);
514                 six_lock_wakeup(lock, old, SIX_LOCK_read);
515         }
516
517         return ret;
518 }
519
520 __always_inline
521 static int __six_lock_type(struct six_lock *lock, enum six_lock_type type,
522                            six_lock_should_sleep_fn should_sleep_fn, void *p)
523 {
524         int ret;
525
526         if (type != SIX_LOCK_write)
527                 six_acquire(&lock->dep_map, 0);
528
529         ret = do_six_trylock_type(lock, type, true) ? 0
530                 : __six_lock_type_slowpath(lock, type, should_sleep_fn, p);
531
532         if (ret && type != SIX_LOCK_write)
533                 six_release(&lock->dep_map);
534         if (!ret)
535                 lock_acquired(&lock->dep_map, _RET_IP_);
536
537         return ret;
538 }
539
540 __always_inline __flatten
541 static void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
542 {
543         const struct six_lock_vals l[] = LOCK_VALS;
544         union six_lock_state state;
545
546         EBUG_ON(type == SIX_LOCK_write &&
547                 !(lock->state.v & __SIX_LOCK_HELD_intent));
548
549         if (type != SIX_LOCK_write)
550                 six_release(&lock->dep_map);
551
552         if (type == SIX_LOCK_intent) {
553                 EBUG_ON(lock->owner != current);
554
555                 if (lock->intent_lock_recurse) {
556                         --lock->intent_lock_recurse;
557                         return;
558                 }
559
560                 lock->owner = NULL;
561         }
562
563         if (type == SIX_LOCK_read &&
564             lock->readers) {
565                 smp_mb(); /* unlock barrier */
566                 this_cpu_dec(*lock->readers);
567                 smp_mb(); /* between unlocking and checking for waiters */
568                 state.v = READ_ONCE(lock->state.v);
569         } else {
570                 EBUG_ON(!(lock->state.v & l[type].held_mask));
571                 state.v = atomic64_add_return_release(l[type].unlock_val,
572                                                       &lock->state.counter);
573         }
574
575         six_lock_wakeup(lock, state, l[type].unlock_wakeup);
576 }
577
578 #define __SIX_LOCK(type)                                                \
579 bool six_trylock_##type(struct six_lock *lock)                          \
580 {                                                                       \
581         return __six_trylock_type(lock, SIX_LOCK_##type);               \
582 }                                                                       \
583 EXPORT_SYMBOL_GPL(six_trylock_##type);                                  \
584                                                                         \
585 bool six_relock_##type(struct six_lock *lock, u32 seq)                  \
586 {                                                                       \
587         return __six_relock_type(lock, SIX_LOCK_##type, seq);           \
588 }                                                                       \
589 EXPORT_SYMBOL_GPL(six_relock_##type);                                   \
590                                                                         \
591 int six_lock_##type(struct six_lock *lock,                              \
592                     six_lock_should_sleep_fn should_sleep_fn, void *p)  \
593 {                                                                       \
594         return __six_lock_type(lock, SIX_LOCK_##type, should_sleep_fn, p);\
595 }                                                                       \
596 EXPORT_SYMBOL_GPL(six_lock_##type);                                     \
597                                                                         \
598 void six_unlock_##type(struct six_lock *lock)                           \
599 {                                                                       \
600         __six_unlock_type(lock, SIX_LOCK_##type);                       \
601 }                                                                       \
602 EXPORT_SYMBOL_GPL(six_unlock_##type);
603
604 __SIX_LOCK(read)
605 __SIX_LOCK(intent)
606 __SIX_LOCK(write)
607
608 #undef __SIX_LOCK
609
610 /* Convert from intent to read: */
611 void six_lock_downgrade(struct six_lock *lock)
612 {
613         six_lock_increment(lock, SIX_LOCK_read);
614         six_unlock_intent(lock);
615 }
616 EXPORT_SYMBOL_GPL(six_lock_downgrade);
617
618 bool six_lock_tryupgrade(struct six_lock *lock)
619 {
620         union six_lock_state old, new;
621         u64 v = READ_ONCE(lock->state.v);
622
623         do {
624                 new.v = old.v = v;
625
626                 if (new.intent_lock)
627                         return false;
628
629                 if (!lock->readers) {
630                         EBUG_ON(!new.read_lock);
631                         new.read_lock--;
632                 }
633
634                 new.intent_lock = 1;
635         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
636                                 old.v, new.v)) != old.v);
637
638         if (lock->readers)
639                 this_cpu_dec(*lock->readers);
640
641         six_set_owner(lock, SIX_LOCK_intent, old);
642
643         return true;
644 }
645 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
646
647 bool six_trylock_convert(struct six_lock *lock,
648                          enum six_lock_type from,
649                          enum six_lock_type to)
650 {
651         EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
652
653         if (to == from)
654                 return true;
655
656         if (to == SIX_LOCK_read) {
657                 six_lock_downgrade(lock);
658                 return true;
659         } else {
660                 return six_lock_tryupgrade(lock);
661         }
662 }
663 EXPORT_SYMBOL_GPL(six_trylock_convert);
664
665 /*
666  * Increment read/intent lock count, assuming we already have it read or intent
667  * locked:
668  */
669 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
670 {
671         const struct six_lock_vals l[] = LOCK_VALS;
672
673         six_acquire(&lock->dep_map, 0);
674
675         /* XXX: assert already locked, and that we don't overflow: */
676
677         switch (type) {
678         case SIX_LOCK_read:
679                 if (lock->readers) {
680                         this_cpu_inc(*lock->readers);
681                 } else {
682                         EBUG_ON(!lock->state.read_lock &&
683                                 !lock->state.intent_lock);
684                         atomic64_add(l[type].lock_val, &lock->state.counter);
685                 }
686                 break;
687         case SIX_LOCK_intent:
688                 EBUG_ON(!lock->state.intent_lock);
689                 lock->intent_lock_recurse++;
690                 break;
691         case SIX_LOCK_write:
692                 BUG();
693                 break;
694         }
695 }
696 EXPORT_SYMBOL_GPL(six_lock_increment);
697
698 void six_lock_wakeup_all(struct six_lock *lock)
699 {
700         struct six_lock_waiter *w;
701
702         raw_spin_lock(&lock->wait_lock);
703
704         list_for_each_entry(w, &lock->wait_list[0], list)
705                 wake_up_process(w->task);
706         list_for_each_entry(w, &lock->wait_list[1], list)
707                 wake_up_process(w->task);
708
709         raw_spin_unlock(&lock->wait_lock);
710 }
711 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
712
713 struct free_pcpu_rcu {
714         struct rcu_head         rcu;
715         void __percpu           *p;
716 };
717
718 static void free_pcpu_rcu_fn(struct rcu_head *_rcu)
719 {
720         struct free_pcpu_rcu *rcu =
721                 container_of(_rcu, struct free_pcpu_rcu, rcu);
722
723         free_percpu(rcu->p);
724         kfree(rcu);
725 }
726
727 void six_lock_pcpu_free_rcu(struct six_lock *lock)
728 {
729         struct free_pcpu_rcu *rcu = kzalloc(sizeof(*rcu), GFP_KERNEL);
730
731         if (!rcu)
732                 return;
733
734         rcu->p = lock->readers;
735         lock->readers = NULL;
736
737         call_rcu(&rcu->rcu, free_pcpu_rcu_fn);
738 }
739 EXPORT_SYMBOL_GPL(six_lock_pcpu_free_rcu);
740
741 void six_lock_pcpu_free(struct six_lock *lock)
742 {
743         BUG_ON(lock->readers && pcpu_read_count(lock));
744         BUG_ON(lock->state.read_lock);
745
746         free_percpu(lock->readers);
747         lock->readers = NULL;
748 }
749 EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
750
751 void six_lock_pcpu_alloc(struct six_lock *lock)
752 {
753 #ifdef __KERNEL__
754         if (!lock->readers)
755                 lock->readers = alloc_percpu(unsigned);
756 #endif
757 }
758 EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);