]> git.sesse.net Git - bcachefs-tools-debian/blob - linux/six.c
Update bcachefs sources to dbee44d5ab bcachefs: add bcachefs xxhash support
[bcachefs-tools-debian] / linux / six.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/export.h>
4 #include <linux/log2.h>
5 #include <linux/percpu.h>
6 #include <linux/preempt.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/sched/rt.h>
10 #include <linux/six.h>
11 #include <linux/slab.h>
12
13 #ifdef DEBUG
14 #define EBUG_ON(cond)           BUG_ON(cond)
15 #else
16 #define EBUG_ON(cond)           do {} while (0)
17 #endif
18
19 #define six_acquire(l, t)       lock_acquire(l, 0, t, 0, 0, NULL, _RET_IP_)
20 #define six_release(l)          lock_release(l, _RET_IP_)
21
22 struct six_lock_vals {
23         /* Value we add to the lock in order to take the lock: */
24         u64                     lock_val;
25
26         /* If the lock has this value (used as a mask), taking the lock fails: */
27         u64                     lock_fail;
28
29         /* Value we add to the lock in order to release the lock: */
30         u64                     unlock_val;
31
32         /* Mask that indicates lock is held for this type: */
33         u64                     held_mask;
34
35         /* Waitlist we wakeup when releasing the lock: */
36         enum six_lock_type      unlock_wakeup;
37 };
38
39 #define __SIX_LOCK_HELD_read    __SIX_VAL(read_lock, ~0)
40 #define __SIX_LOCK_HELD_intent  __SIX_VAL(intent_lock, ~0)
41 #define __SIX_LOCK_HELD_write   __SIX_VAL(seq, 1)
42
43 #define LOCK_VALS {                                                     \
44         [SIX_LOCK_read] = {                                             \
45                 .lock_val       = __SIX_VAL(read_lock, 1),              \
46                 .lock_fail      = __SIX_LOCK_HELD_write + __SIX_VAL(write_locking, 1),\
47                 .unlock_val     = -__SIX_VAL(read_lock, 1),             \
48                 .held_mask      = __SIX_LOCK_HELD_read,                 \
49                 .unlock_wakeup  = SIX_LOCK_write,                       \
50         },                                                              \
51         [SIX_LOCK_intent] = {                                           \
52                 .lock_val       = __SIX_VAL(intent_lock, 1),            \
53                 .lock_fail      = __SIX_LOCK_HELD_intent,               \
54                 .unlock_val     = -__SIX_VAL(intent_lock, 1),           \
55                 .held_mask      = __SIX_LOCK_HELD_intent,               \
56                 .unlock_wakeup  = SIX_LOCK_intent,                      \
57         },                                                              \
58         [SIX_LOCK_write] = {                                            \
59                 .lock_val       = __SIX_VAL(seq, 1),                    \
60                 .lock_fail      = __SIX_LOCK_HELD_read,                 \
61                 .unlock_val     = __SIX_VAL(seq, 1),                    \
62                 .held_mask      = __SIX_LOCK_HELD_write,                \
63                 .unlock_wakeup  = SIX_LOCK_read,                        \
64         },                                                              \
65 }
66
67 static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
68                                  union six_lock_state old)
69 {
70         if (type != SIX_LOCK_intent)
71                 return;
72
73         if (!old.intent_lock) {
74                 EBUG_ON(lock->owner);
75                 lock->owner = current;
76         } else {
77                 EBUG_ON(lock->owner != current);
78         }
79 }
80
81 static inline unsigned pcpu_read_count(struct six_lock *lock)
82 {
83         unsigned read_count = 0;
84         int cpu;
85
86         for_each_possible_cpu(cpu)
87                 read_count += *per_cpu_ptr(lock->readers, cpu);
88         return read_count;
89 }
90
91 struct six_lock_waiter {
92         struct list_head        list;
93         struct task_struct      *task;
94 };
95
96 /* This is probably up there with the more evil things I've done */
97 #define waitlist_bitnr(id) ilog2((((union six_lock_state) { .waiters = 1 << (id) }).l))
98
99 static inline void six_lock_wakeup(struct six_lock *lock,
100                                    union six_lock_state state,
101                                    unsigned waitlist_id)
102 {
103         if (waitlist_id == SIX_LOCK_write) {
104                 if (state.write_locking && !state.read_lock) {
105                         struct task_struct *p = READ_ONCE(lock->owner);
106                         if (p)
107                                 wake_up_process(p);
108                 }
109         } else {
110                 struct list_head *wait_list = &lock->wait_list[waitlist_id];
111                 struct six_lock_waiter *w, *next;
112
113                 if (!(state.waiters & (1 << waitlist_id)))
114                         return;
115
116                 clear_bit(waitlist_bitnr(waitlist_id),
117                           (unsigned long *) &lock->state.v);
118
119                 raw_spin_lock(&lock->wait_lock);
120
121                 list_for_each_entry_safe(w, next, wait_list, list) {
122                         list_del_init(&w->list);
123
124                         if (wake_up_process(w->task) &&
125                             waitlist_id != SIX_LOCK_read) {
126                                 if (!list_empty(wait_list))
127                                         set_bit(waitlist_bitnr(waitlist_id),
128                                                 (unsigned long *) &lock->state.v);
129                                 break;
130                         }
131                 }
132
133                 raw_spin_unlock(&lock->wait_lock);
134         }
135 }
136
137 static __always_inline bool do_six_trylock_type(struct six_lock *lock,
138                                                 enum six_lock_type type,
139                                                 bool try)
140 {
141         const struct six_lock_vals l[] = LOCK_VALS;
142         union six_lock_state old, new;
143         bool ret;
144         u64 v;
145
146         EBUG_ON(type == SIX_LOCK_write && lock->owner != current);
147         EBUG_ON(type == SIX_LOCK_write && (lock->state.seq & 1));
148
149         EBUG_ON(type == SIX_LOCK_write && (try != !(lock->state.write_locking)));
150
151         /*
152          * Percpu reader mode:
153          *
154          * The basic idea behind this algorithm is that you can implement a lock
155          * between two threads without any atomics, just memory barriers:
156          *
157          * For two threads you'll need two variables, one variable for "thread a
158          * has the lock" and another for "thread b has the lock".
159          *
160          * To take the lock, a thread sets its variable indicating that it holds
161          * the lock, then issues a full memory barrier, then reads from the
162          * other thread's variable to check if the other thread thinks it has
163          * the lock. If we raced, we backoff and retry/sleep.
164          */
165
166         if (type == SIX_LOCK_read && lock->readers) {
167 retry:
168                 preempt_disable();
169                 this_cpu_inc(*lock->readers); /* signal that we own lock */
170
171                 smp_mb();
172
173                 old.v = READ_ONCE(lock->state.v);
174                 ret = !(old.v & l[type].lock_fail);
175
176                 this_cpu_sub(*lock->readers, !ret);
177                 preempt_enable();
178
179                 /*
180                  * If we failed because a writer was trying to take the
181                  * lock, issue a wakeup because we might have caused a
182                  * spurious trylock failure:
183                  */
184                 if (old.write_locking) {
185                         struct task_struct *p = READ_ONCE(lock->owner);
186
187                         if (p)
188                                 wake_up_process(p);
189                 }
190
191                 /*
192                  * If we failed from the lock path and the waiting bit wasn't
193                  * set, set it:
194                  */
195                 if (!try && !ret) {
196                         v = old.v;
197
198                         do {
199                                 new.v = old.v = v;
200
201                                 if (!(old.v & l[type].lock_fail))
202                                         goto retry;
203
204                                 if (new.waiters & (1 << type))
205                                         break;
206
207                                 new.waiters |= 1 << type;
208                         } while ((v = atomic64_cmpxchg(&lock->state.counter,
209                                                        old.v, new.v)) != old.v);
210                 }
211         } else if (type == SIX_LOCK_write && lock->readers) {
212                 if (try) {
213                         atomic64_add(__SIX_VAL(write_locking, 1),
214                                      &lock->state.counter);
215                         smp_mb__after_atomic();
216                 }
217
218                 ret = !pcpu_read_count(lock);
219
220                 /*
221                  * On success, we increment lock->seq; also we clear
222                  * write_locking unless we failed from the lock path:
223                  */
224                 v = 0;
225                 if (ret)
226                         v += __SIX_VAL(seq, 1);
227                 if (ret || try)
228                         v -= __SIX_VAL(write_locking, 1);
229
230                 if (try && !ret) {
231                         old.v = atomic64_add_return(v, &lock->state.counter);
232                         six_lock_wakeup(lock, old, SIX_LOCK_read);
233                 } else {
234                         atomic64_add(v, &lock->state.counter);
235                 }
236         } else {
237                 v = READ_ONCE(lock->state.v);
238                 do {
239                         new.v = old.v = v;
240
241                         if (!(old.v & l[type].lock_fail)) {
242                                 new.v += l[type].lock_val;
243
244                                 if (type == SIX_LOCK_write)
245                                         new.write_locking = 0;
246                         } else if (!try && type != SIX_LOCK_write &&
247                                    !(new.waiters & (1 << type)))
248                                 new.waiters |= 1 << type;
249                         else
250                                 break; /* waiting bit already set */
251                 } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
252                                         old.v, new.v)) != old.v);
253
254                 ret = !(old.v & l[type].lock_fail);
255
256                 EBUG_ON(ret && !(lock->state.v & l[type].held_mask));
257         }
258
259         if (ret)
260                 six_set_owner(lock, type, old);
261
262         EBUG_ON(type == SIX_LOCK_write && (try || ret) && (lock->state.write_locking));
263
264         return ret;
265 }
266
267 __always_inline __flatten
268 static bool __six_trylock_type(struct six_lock *lock, enum six_lock_type type)
269 {
270         if (!do_six_trylock_type(lock, type, true))
271                 return false;
272
273         if (type != SIX_LOCK_write)
274                 six_acquire(&lock->dep_map, 1);
275         return true;
276 }
277
278 __always_inline __flatten
279 static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
280                               unsigned seq)
281 {
282         const struct six_lock_vals l[] = LOCK_VALS;
283         union six_lock_state old;
284         u64 v;
285
286         EBUG_ON(type == SIX_LOCK_write);
287
288         if (type == SIX_LOCK_read &&
289             lock->readers) {
290                 bool ret;
291
292                 preempt_disable();
293                 this_cpu_inc(*lock->readers);
294
295                 smp_mb();
296
297                 old.v = READ_ONCE(lock->state.v);
298                 ret = !(old.v & l[type].lock_fail) && old.seq == seq;
299
300                 this_cpu_sub(*lock->readers, !ret);
301                 preempt_enable();
302
303                 /*
304                  * Similar to the lock path, we may have caused a spurious write
305                  * lock fail and need to issue a wakeup:
306                  */
307                 if (old.write_locking) {
308                         struct task_struct *p = READ_ONCE(lock->owner);
309
310                         if (p)
311                                 wake_up_process(p);
312                 }
313
314                 if (ret)
315                         six_acquire(&lock->dep_map, 1);
316
317                 return ret;
318         }
319
320         v = READ_ONCE(lock->state.v);
321         do {
322                 old.v = v;
323
324                 if (old.seq != seq || old.v & l[type].lock_fail)
325                         return false;
326         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
327                                 old.v,
328                                 old.v + l[type].lock_val)) != old.v);
329
330         six_set_owner(lock, type, old);
331         if (type != SIX_LOCK_write)
332                 six_acquire(&lock->dep_map, 1);
333         return true;
334 }
335
336 #ifdef CONFIG_LOCK_SPIN_ON_OWNER
337
338 static inline int six_can_spin_on_owner(struct six_lock *lock)
339 {
340         struct task_struct *owner;
341         int retval = 1;
342
343         if (need_resched())
344                 return 0;
345
346         rcu_read_lock();
347         owner = READ_ONCE(lock->owner);
348         if (owner)
349                 retval = owner->on_cpu;
350         rcu_read_unlock();
351         /*
352          * if lock->owner is not set, the mutex owner may have just acquired
353          * it and not set the owner yet or the mutex has been released.
354          */
355         return retval;
356 }
357
358 static inline bool six_spin_on_owner(struct six_lock *lock,
359                                      struct task_struct *owner)
360 {
361         bool ret = true;
362
363         rcu_read_lock();
364         while (lock->owner == owner) {
365                 /*
366                  * Ensure we emit the owner->on_cpu, dereference _after_
367                  * checking lock->owner still matches owner. If that fails,
368                  * owner might point to freed memory. If it still matches,
369                  * the rcu_read_lock() ensures the memory stays valid.
370                  */
371                 barrier();
372
373                 if (!owner->on_cpu || need_resched()) {
374                         ret = false;
375                         break;
376                 }
377
378                 cpu_relax();
379         }
380         rcu_read_unlock();
381
382         return ret;
383 }
384
385 static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
386 {
387         struct task_struct *task = current;
388
389         if (type == SIX_LOCK_write)
390                 return false;
391
392         preempt_disable();
393         if (!six_can_spin_on_owner(lock))
394                 goto fail;
395
396         if (!osq_lock(&lock->osq))
397                 goto fail;
398
399         while (1) {
400                 struct task_struct *owner;
401
402                 /*
403                  * If there's an owner, wait for it to either
404                  * release the lock or go to sleep.
405                  */
406                 owner = READ_ONCE(lock->owner);
407                 if (owner && !six_spin_on_owner(lock, owner))
408                         break;
409
410                 if (do_six_trylock_type(lock, type, false)) {
411                         osq_unlock(&lock->osq);
412                         preempt_enable();
413                         return true;
414                 }
415
416                 /*
417                  * When there's no owner, we might have preempted between the
418                  * owner acquiring the lock and setting the owner field. If
419                  * we're an RT task that will live-lock because we won't let
420                  * the owner complete.
421                  */
422                 if (!owner && (need_resched() || rt_task(task)))
423                         break;
424
425                 /*
426                  * The cpu_relax() call is a compiler barrier which forces
427                  * everything in this loop to be re-loaded. We don't need
428                  * memory barriers as we'll eventually observe the right
429                  * values at the cost of a few extra spins.
430                  */
431                 cpu_relax();
432         }
433
434         osq_unlock(&lock->osq);
435 fail:
436         preempt_enable();
437
438         /*
439          * If we fell out of the spin path because of need_resched(),
440          * reschedule now, before we try-lock again. This avoids getting
441          * scheduled out right after we obtained the lock.
442          */
443         if (need_resched())
444                 schedule();
445
446         return false;
447 }
448
449 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
450
451 static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
452 {
453         return false;
454 }
455
456 #endif
457
458 noinline
459 static int __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type,
460                                     six_lock_should_sleep_fn should_sleep_fn, void *p)
461 {
462         union six_lock_state old;
463         struct six_lock_waiter wait;
464         int ret = 0;
465
466         if (type == SIX_LOCK_write) {
467                 EBUG_ON(lock->state.write_locking);
468                 atomic64_add(__SIX_VAL(write_locking, 1), &lock->state.counter);
469                 smp_mb__after_atomic();
470         }
471
472         ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
473         if (ret)
474                 goto out_before_sleep;
475
476         if (six_optimistic_spin(lock, type))
477                 goto out_before_sleep;
478
479         lock_contended(&lock->dep_map, _RET_IP_);
480
481         INIT_LIST_HEAD(&wait.list);
482         wait.task = current;
483
484         while (1) {
485                 set_current_state(TASK_UNINTERRUPTIBLE);
486                 if (type == SIX_LOCK_write)
487                         EBUG_ON(lock->owner != current);
488                 else if (list_empty_careful(&wait.list)) {
489                         raw_spin_lock(&lock->wait_lock);
490                         list_add_tail(&wait.list, &lock->wait_list[type]);
491                         raw_spin_unlock(&lock->wait_lock);
492                 }
493
494                 if (do_six_trylock_type(lock, type, false))
495                         break;
496
497                 ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
498                 if (ret)
499                         break;
500
501                 schedule();
502         }
503
504         __set_current_state(TASK_RUNNING);
505
506         if (!list_empty_careful(&wait.list)) {
507                 raw_spin_lock(&lock->wait_lock);
508                 list_del_init(&wait.list);
509                 raw_spin_unlock(&lock->wait_lock);
510         }
511 out_before_sleep:
512         if (ret && type == SIX_LOCK_write) {
513                 old.v = atomic64_sub_return(__SIX_VAL(write_locking, 1),
514                                             &lock->state.counter);
515                 six_lock_wakeup(lock, old, SIX_LOCK_read);
516         }
517
518         return ret;
519 }
520
521 __always_inline
522 static int __six_lock_type(struct six_lock *lock, enum six_lock_type type,
523                            six_lock_should_sleep_fn should_sleep_fn, void *p)
524 {
525         int ret;
526
527         if (type != SIX_LOCK_write)
528                 six_acquire(&lock->dep_map, 0);
529
530         ret = do_six_trylock_type(lock, type, true) ? 0
531                 : __six_lock_type_slowpath(lock, type, should_sleep_fn, p);
532
533         if (ret && type != SIX_LOCK_write)
534                 six_release(&lock->dep_map);
535         if (!ret)
536                 lock_acquired(&lock->dep_map, _RET_IP_);
537
538         return ret;
539 }
540
541 __always_inline __flatten
542 static void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
543 {
544         const struct six_lock_vals l[] = LOCK_VALS;
545         union six_lock_state state;
546
547         EBUG_ON(type == SIX_LOCK_write &&
548                 !(lock->state.v & __SIX_LOCK_HELD_intent));
549
550         if (type != SIX_LOCK_write)
551                 six_release(&lock->dep_map);
552
553         if (type == SIX_LOCK_intent) {
554                 EBUG_ON(lock->owner != current);
555
556                 if (lock->intent_lock_recurse) {
557                         --lock->intent_lock_recurse;
558                         return;
559                 }
560
561                 lock->owner = NULL;
562         }
563
564         if (type == SIX_LOCK_read &&
565             lock->readers) {
566                 smp_mb(); /* unlock barrier */
567                 this_cpu_dec(*lock->readers);
568                 smp_mb(); /* between unlocking and checking for waiters */
569                 state.v = READ_ONCE(lock->state.v);
570         } else {
571                 EBUG_ON(!(lock->state.v & l[type].held_mask));
572                 state.v = atomic64_add_return_release(l[type].unlock_val,
573                                                       &lock->state.counter);
574         }
575
576         six_lock_wakeup(lock, state, l[type].unlock_wakeup);
577 }
578
579 #define __SIX_LOCK(type)                                                \
580 bool six_trylock_##type(struct six_lock *lock)                          \
581 {                                                                       \
582         return __six_trylock_type(lock, SIX_LOCK_##type);               \
583 }                                                                       \
584 EXPORT_SYMBOL_GPL(six_trylock_##type);                                  \
585                                                                         \
586 bool six_relock_##type(struct six_lock *lock, u32 seq)                  \
587 {                                                                       \
588         return __six_relock_type(lock, SIX_LOCK_##type, seq);           \
589 }                                                                       \
590 EXPORT_SYMBOL_GPL(six_relock_##type);                                   \
591                                                                         \
592 int six_lock_##type(struct six_lock *lock,                              \
593                     six_lock_should_sleep_fn should_sleep_fn, void *p)  \
594 {                                                                       \
595         return __six_lock_type(lock, SIX_LOCK_##type, should_sleep_fn, p);\
596 }                                                                       \
597 EXPORT_SYMBOL_GPL(six_lock_##type);                                     \
598                                                                         \
599 void six_unlock_##type(struct six_lock *lock)                           \
600 {                                                                       \
601         __six_unlock_type(lock, SIX_LOCK_##type);                       \
602 }                                                                       \
603 EXPORT_SYMBOL_GPL(six_unlock_##type);
604
605 __SIX_LOCK(read)
606 __SIX_LOCK(intent)
607 __SIX_LOCK(write)
608
609 #undef __SIX_LOCK
610
611 /* Convert from intent to read: */
612 void six_lock_downgrade(struct six_lock *lock)
613 {
614         six_lock_increment(lock, SIX_LOCK_read);
615         six_unlock_intent(lock);
616 }
617 EXPORT_SYMBOL_GPL(six_lock_downgrade);
618
619 bool six_lock_tryupgrade(struct six_lock *lock)
620 {
621         union six_lock_state old, new;
622         u64 v = READ_ONCE(lock->state.v);
623
624         do {
625                 new.v = old.v = v;
626
627                 if (new.intent_lock)
628                         return false;
629
630                 if (!lock->readers) {
631                         EBUG_ON(!new.read_lock);
632                         new.read_lock--;
633                 }
634
635                 new.intent_lock = 1;
636         } while ((v = atomic64_cmpxchg_acquire(&lock->state.counter,
637                                 old.v, new.v)) != old.v);
638
639         if (lock->readers)
640                 this_cpu_dec(*lock->readers);
641
642         six_set_owner(lock, SIX_LOCK_intent, old);
643
644         return true;
645 }
646 EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
647
648 bool six_trylock_convert(struct six_lock *lock,
649                          enum six_lock_type from,
650                          enum six_lock_type to)
651 {
652         EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
653
654         if (to == from)
655                 return true;
656
657         if (to == SIX_LOCK_read) {
658                 six_lock_downgrade(lock);
659                 return true;
660         } else {
661                 return six_lock_tryupgrade(lock);
662         }
663 }
664 EXPORT_SYMBOL_GPL(six_trylock_convert);
665
666 /*
667  * Increment read/intent lock count, assuming we already have it read or intent
668  * locked:
669  */
670 void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
671 {
672         const struct six_lock_vals l[] = LOCK_VALS;
673
674         six_acquire(&lock->dep_map, 0);
675
676         /* XXX: assert already locked, and that we don't overflow: */
677
678         switch (type) {
679         case SIX_LOCK_read:
680                 if (lock->readers) {
681                         this_cpu_inc(*lock->readers);
682                 } else {
683                         EBUG_ON(!lock->state.read_lock &&
684                                 !lock->state.intent_lock);
685                         atomic64_add(l[type].lock_val, &lock->state.counter);
686                 }
687                 break;
688         case SIX_LOCK_intent:
689                 EBUG_ON(!lock->state.intent_lock);
690                 lock->intent_lock_recurse++;
691                 break;
692         case SIX_LOCK_write:
693                 BUG();
694                 break;
695         }
696 }
697 EXPORT_SYMBOL_GPL(six_lock_increment);
698
699 void six_lock_wakeup_all(struct six_lock *lock)
700 {
701         struct six_lock_waiter *w;
702
703         raw_spin_lock(&lock->wait_lock);
704
705         list_for_each_entry(w, &lock->wait_list[0], list)
706                 wake_up_process(w->task);
707         list_for_each_entry(w, &lock->wait_list[1], list)
708                 wake_up_process(w->task);
709
710         raw_spin_unlock(&lock->wait_lock);
711 }
712 EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
713
714 struct free_pcpu_rcu {
715         struct rcu_head         rcu;
716         void __percpu           *p;
717 };
718
719 static void free_pcpu_rcu_fn(struct rcu_head *_rcu)
720 {
721         struct free_pcpu_rcu *rcu =
722                 container_of(_rcu, struct free_pcpu_rcu, rcu);
723
724         free_percpu(rcu->p);
725         kfree(rcu);
726 }
727
728 void six_lock_pcpu_free_rcu(struct six_lock *lock)
729 {
730         struct free_pcpu_rcu *rcu = kzalloc(sizeof(*rcu), GFP_KERNEL);
731
732         if (!rcu)
733                 return;
734
735         rcu->p = lock->readers;
736         lock->readers = NULL;
737
738         call_rcu(&rcu->rcu, free_pcpu_rcu_fn);
739 }
740 EXPORT_SYMBOL_GPL(six_lock_pcpu_free_rcu);
741
742 void six_lock_pcpu_free(struct six_lock *lock)
743 {
744         BUG_ON(lock->readers && pcpu_read_count(lock));
745         BUG_ON(lock->state.read_lock);
746
747         free_percpu(lock->readers);
748         lock->readers = NULL;
749 }
750 EXPORT_SYMBOL_GPL(six_lock_pcpu_free);
751
752 void six_lock_pcpu_alloc(struct six_lock *lock)
753 {
754 #ifdef __KERNEL__
755         if (!lock->readers)
756                 lock->readers = alloc_percpu(unsigned);
757 #endif
758 }
759 EXPORT_SYMBOL_GPL(six_lock_pcpu_alloc);