+ struct six_lock_count ret;
+
+ ret.n[SIX_LOCK_read] = !lock->readers
+ ? atomic_read(&lock->state) & SIX_LOCK_HELD_read
+ : pcpu_read_count(lock);
+ ret.n[SIX_LOCK_intent] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent) +
+ lock->intent_lock_recurse;
+ ret.n[SIX_LOCK_write] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(six_lock_counts);
+
+/**
+ * six_lock_readers_add - directly manipulate reader count of a lock
+ * @lock: lock to add/subtract readers for
+ * @nr: reader count to add/subtract
+ *
+ * When an upper layer is implementing lock reentrency, we may have both read
+ * and intent locks on the same lock.
+ *
+ * When we need to take a write lock, the read locks will cause self-deadlock,
+ * because six locks themselves do not track which read locks are held by the
+ * current thread and which are held by a different thread - it does no
+ * per-thread tracking of held locks.
+ *
+ * The upper layer that is tracking held locks may however, if trylock() has
+ * failed, count up its own read locks, subtract them, take the write lock, and
+ * then re-add them.
+ *
+ * As in any other situation when taking a write lock, @lock must be held for
+ * intent one (or more) times, so @lock will never be left unlocked.
+ */
+void six_lock_readers_add(struct six_lock *lock, int nr)
+{
+ if (lock->readers) {
+ this_cpu_add(*lock->readers, nr);
+ } else {
+ EBUG_ON((int) (atomic_read(&lock->state) & SIX_LOCK_HELD_read) + nr < 0);
+ /* reader count starts at bit 0 */
+ atomic_add(nr, &lock->state);
+ }
+}
+EXPORT_SYMBOL_GPL(six_lock_readers_add);
+
+/**
+ * six_lock_exit - release resources held by a lock prior to freeing
+ * @lock: lock to exit
+ *
+ * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is
+ * required to free the percpu read counts.
+ */
+void six_lock_exit(struct six_lock *lock)
+{
+ WARN_ON(lock->readers && pcpu_read_count(lock));
+ WARN_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_read);
+
+ free_percpu(lock->readers);
+ lock->readers = NULL;
+}
+EXPORT_SYMBOL_GPL(six_lock_exit);
+
+void __six_lock_init(struct six_lock *lock, const char *name,
+ struct lock_class_key *key, enum six_lock_init_flags flags)
+{
+ atomic_set(&lock->state, 0);
+ raw_spin_lock_init(&lock->wait_lock);
+ INIT_LIST_HEAD(&lock->wait_list);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ debug_check_no_locks_freed((void *) lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+#endif
+
+ /*
+ * Don't assume that we have real percpu variables available in
+ * userspace:
+ */
+#ifdef __KERNEL__
+ if (flags & SIX_LOCK_INIT_PCPU) {
+ /*
+ * We don't return an error here on memory allocation failure
+ * since percpu is an optimization, and locks will work with the
+ * same semantics in non-percpu mode: callers can check for
+ * failure if they wish by checking lock->readers, but generally
+ * will not want to treat it as an error.
+ */
+ lock->readers = alloc_percpu(unsigned);
+ }
+#endif