]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/six.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / six.c
index 458a1de0a6e39c89bb6840be729a4fc3f2a52ec5..3a494c5d12478595c76bebc89fd15b517c5ed6d0 100644 (file)
@@ -163,8 +163,11 @@ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
                this_cpu_sub(*lock->readers, !ret);
                preempt_enable();
 
-               if (!ret && (old & SIX_LOCK_WAITING_write))
-                       ret = -1 - SIX_LOCK_write;
+               if (!ret) {
+                       smp_mb();
+                       if (atomic_read(&lock->state) & SIX_LOCK_WAITING_write)
+                               ret = -1 - SIX_LOCK_write;
+               }
        } else if (type == SIX_LOCK_write && lock->readers) {
                if (try) {
                        atomic_add(SIX_LOCK_HELD_write, &lock->state);
@@ -321,101 +324,57 @@ bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
 }
 EXPORT_SYMBOL_GPL(six_relock_ip);
 
-#ifdef CONFIG_LOCK_SPIN_ON_OWNER
+#ifdef CONFIG_BCACHEFS_SIX_OPTIMISTIC_SPIN
 
-static inline bool six_can_spin_on_owner(struct six_lock *lock)
+static inline bool six_owner_running(struct six_lock *lock)
 {
-       struct task_struct *owner;
-       bool ret;
-
-       if (need_resched())
-               return false;
-
+       /*
+        * When there's no owner, we might have preempted between the owner
+        * acquiring the lock and setting the owner field. If we're an RT task
+        * that will live-lock because we won't let the owner complete.
+        */
        rcu_read_lock();
-       owner = READ_ONCE(lock->owner);
-       ret = !owner || owner_on_cpu(owner);
+       struct task_struct *owner = READ_ONCE(lock->owner);
+       bool ret = owner ? owner_on_cpu(owner) : !rt_task(current);
        rcu_read_unlock();
 
        return ret;
 }
 
-static inline bool six_spin_on_owner(struct six_lock *lock,
-                                    struct task_struct *owner,
-                                    u64 end_time)
+static inline bool six_optimistic_spin(struct six_lock *lock,
+                                      struct six_lock_waiter *wait,
+                                      enum six_lock_type type)
 {
-       bool ret = true;
        unsigned loop = 0;
-
-       rcu_read_lock();
-       while (lock->owner == owner) {
-               /*
-                * Ensure we emit the owner->on_cpu, dereference _after_
-                * checking lock->owner still matches owner. If that fails,
-                * owner might point to freed memory. If it still matches,
-                * the rcu_read_lock() ensures the memory stays valid.
-                */
-               barrier();
-
-               if (!owner_on_cpu(owner) || need_resched()) {
-                       ret = false;
-                       break;
-               }
-
-               if (!(++loop & 0xf) && (time_after64(sched_clock(), end_time))) {
-                       six_set_bitmask(lock, SIX_LOCK_NOSPIN);
-                       ret = false;
-                       break;
-               }
-
-               cpu_relax();
-       }
-       rcu_read_unlock();
-
-       return ret;
-}
-
-static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
-{
-       struct task_struct *task = current;
        u64 end_time;
 
        if (type == SIX_LOCK_write)
                return false;
 
-       preempt_disable();
-       if (!six_can_spin_on_owner(lock))
-               goto fail;
+       if (lock->wait_list.next != &wait->list)
+               return false;
 
-       if (!osq_lock(&lock->osq))
-               goto fail;
+       if (atomic_read(&lock->state) & SIX_LOCK_NOSPIN)
+               return false;
 
+       preempt_disable();
        end_time = sched_clock() + 10 * NSEC_PER_USEC;
 
-       while (1) {
-               struct task_struct *owner;
-
+       while (!need_resched() && six_owner_running(lock)) {
                /*
-                * If there's an owner, wait for it to either
-                * release the lock or go to sleep.
+                * Ensures that writes to the waitlist entry happen after we see
+                * wait->lock_acquired: pairs with the smp_store_release in
+                * __six_lock_wakeup
                 */
-               owner = READ_ONCE(lock->owner);
-               if (owner && !six_spin_on_owner(lock, owner, end_time))
-                       break;
-
-               if (do_six_trylock(lock, type, false)) {
-                       osq_unlock(&lock->osq);
+               if (smp_load_acquire(&wait->lock_acquired)) {
                        preempt_enable();
                        return true;
                }
 
-               /*
-                * When there's no owner, we might have preempted between the
-                * owner acquiring the lock and setting the owner field. If
-                * we're an RT task that will live-lock because we won't let
-                * the owner complete.
-                */
-               if (!owner && (need_resched() || rt_task(task)))
+               if (!(++loop & 0xf) && (time_after64(sched_clock(), end_time))) {
+                       six_set_bitmask(lock, SIX_LOCK_NOSPIN);
                        break;
+               }
 
                /*
                 * The cpu_relax() call is a compiler barrier which forces
@@ -426,24 +385,15 @@ static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type
                cpu_relax();
        }
 
-       osq_unlock(&lock->osq);
-fail:
        preempt_enable();
-
-       /*
-        * If we fell out of the spin path because of need_resched(),
-        * reschedule now, before we try-lock again. This avoids getting
-        * scheduled out right after we obtained the lock.
-        */
-       if (need_resched())
-               schedule();
-
        return false;
 }
 
 #else /* CONFIG_LOCK_SPIN_ON_OWNER */
 
-static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
+static inline bool six_optimistic_spin(struct six_lock *lock,
+                                      struct six_lock_waiter *wait,
+                                      enum six_lock_type type)
 {
        return false;
 }
@@ -467,9 +417,6 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
        trace_contention_begin(lock, 0);
        lock_contended(&lock->dep_map, ip);
 
-       if (six_optimistic_spin(lock, type))
-               goto out;
-
        wait->task              = current;
        wait->lock_want         = type;
        wait->lock_acquired     = false;
@@ -507,6 +454,9 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
                ret = 0;
        }
 
+       if (six_optimistic_spin(lock, wait, type))
+               goto out;
+
        while (1) {
                set_current_state(TASK_UNINTERRUPTIBLE);