]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/clock.c
Disable pristine-tar option in gbp.conf, since there is no pristine-tar branch.
[bcachefs-tools-debian] / libbcachefs / clock.c
index 3c3649f0862c8779307ef2ce8271ba8eee79c460..3636444511064b51e5a004b953eacf94e7c70d12 100644 (file)
@@ -1,12 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
 #include "bcachefs.h"
 #include "clock.h"
 
 #include <linux/freezer.h>
 #include <linux/kthread.h>
+#include <linux/preempt.h>
 
-static inline bool io_timer_cmp(struct io_timer *l, struct io_timer *r)
+static inline long io_timer_cmp(io_timer_heap *h,
+                               struct io_timer *l,
+                               struct io_timer *r)
 {
-       return time_after(l->expire, r->expire);
+       return l->expire - r->expire;
 }
 
 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
@@ -14,11 +18,19 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
        size_t i;
 
        spin_lock(&clock->timer_lock);
+
+       if (time_after_eq((unsigned long) atomic64_read(&clock->now),
+                         timer->expire)) {
+               spin_unlock(&clock->timer_lock);
+               timer->fn(timer);
+               return;
+       }
+
        for (i = 0; i < clock->timers.used; i++)
                if (clock->timers.data[i] == timer)
                        goto out;
 
-       BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp));
+       BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
 out:
        spin_unlock(&clock->timer_lock);
 }
@@ -31,7 +43,7 @@ void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
 
        for (i = 0; i < clock->timers.used; i++)
                if (clock->timers.data[i] == timer) {
-                       heap_del(&clock->timers, i, io_timer_cmp);
+                       heap_del(&clock->timers, i, io_timer_cmp, NULL);
                        break;
                }
 
@@ -39,7 +51,8 @@ void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
 }
 
 struct io_clock_wait {
-       struct io_timer         timer;
+       struct io_timer         io_timer;
+       struct timer_list       cpu_timer;
        struct task_struct      *task;
        int                     expired;
 };
@@ -47,7 +60,16 @@ struct io_clock_wait {
 static void io_clock_wait_fn(struct io_timer *timer)
 {
        struct io_clock_wait *wait = container_of(timer,
-                               struct io_clock_wait, timer);
+                               struct io_clock_wait, io_timer);
+
+       wait->expired = 1;
+       wake_up_process(wait->task);
+}
+
+static void io_clock_cpu_timeout(struct timer_list *timer)
+{
+       struct io_clock_wait *wait = container_of(timer,
+                               struct io_clock_wait, cpu_timer);
 
        wait->expired = 1;
        wake_up_process(wait->task);
@@ -58,35 +80,38 @@ void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
        struct io_clock_wait wait;
 
        /* XXX: calculate sleep time rigorously */
-       wait.timer.expire       = until;
-       wait.timer.fn           = io_clock_wait_fn;
+       wait.io_timer.expire    = until;
+       wait.io_timer.fn        = io_clock_wait_fn;
        wait.task               = current;
        wait.expired            = 0;
-       bch2_io_timer_add(clock, &wait.timer);
+       bch2_io_timer_add(clock, &wait.io_timer);
 
        schedule();
 
-       bch2_io_timer_del(clock, &wait.timer);
+       bch2_io_timer_del(clock, &wait.io_timer);
 }
 
-/*
- * _only_ to be used from a kthread
- */
 void bch2_kthread_io_clock_wait(struct io_clock *clock,
-                              unsigned long until)
+                               unsigned long io_until,
+                               unsigned long cpu_timeout)
 {
+       bool kthread = (current->flags & PF_KTHREAD) != 0;
        struct io_clock_wait wait;
 
-       /* XXX: calculate sleep time rigorously */
-       wait.timer.expire       = until;
-       wait.timer.fn           = io_clock_wait_fn;
+       wait.io_timer.expire    = io_until;
+       wait.io_timer.fn        = io_clock_wait_fn;
        wait.task               = current;
        wait.expired            = 0;
-       bch2_io_timer_add(clock, &wait.timer);
+       bch2_io_timer_add(clock, &wait.io_timer);
+
+       timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
+
+       if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
+               mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
 
-       while (1) {
+       do {
                set_current_state(TASK_INTERRUPTIBLE);
-               if (kthread_should_stop())
+               if (kthread && kthread_should_stop())
                        break;
 
                if (wait.expired)
@@ -94,10 +119,12 @@ void bch2_kthread_io_clock_wait(struct io_clock *clock,
 
                schedule();
                try_to_freeze();
-       }
+       } while (0);
 
        __set_current_state(TASK_RUNNING);
-       bch2_io_timer_del(clock, &wait.timer);
+       del_timer_sync(&wait.cpu_timer);
+       destroy_timer_on_stack(&wait.cpu_timer);
+       bch2_io_timer_del(clock, &wait.io_timer);
 }
 
 static struct io_timer *get_expired_timer(struct io_clock *clock,
@@ -109,34 +136,37 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
 
        if (clock->timers.used &&
            time_after_eq(now, clock->timers.data[0]->expire))
-               heap_pop(&clock->timers, ret, io_timer_cmp);
+               heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
 
        spin_unlock(&clock->timer_lock);
 
        return ret;
 }
 
-void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
+void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
 {
-       struct io_clock *clock = &c->io_clock[rw];
        struct io_timer *timer;
-       unsigned long now;
+       unsigned long now = atomic64_add_return(sectors, &clock->now);
 
-       /* Buffer up one megabyte worth of IO in the percpu counter */
-       preempt_disable();
+       while ((timer = get_expired_timer(clock, now)))
+               timer->fn(timer);
+}
 
-       if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
-                  IO_CLOCK_PCPU_SECTORS)) {
-               preempt_enable();
-               return;
-       }
+void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
+{
+       unsigned long now;
+       unsigned i;
 
-       sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
-       preempt_enable();
-       now = atomic_long_add_return(sectors, &clock->now);
+       out->atomic++;
+       spin_lock(&clock->timer_lock);
+       now = atomic64_read(&clock->now);
 
-       while ((timer = get_expired_timer(clock, now)))
-               timer->fn(timer);
+       for (i = 0; i < clock->timers.used; i++)
+               prt_printf(out, "%ps:\t%li\n",
+                      clock->timers.data[i]->fn,
+                      clock->timers.data[i]->expire - now);
+       spin_unlock(&clock->timer_lock);
+       --out->atomic;
 }
 
 void bch2_io_clock_exit(struct io_clock *clock)
@@ -147,15 +177,17 @@ void bch2_io_clock_exit(struct io_clock *clock)
 
 int bch2_io_clock_init(struct io_clock *clock)
 {
-       atomic_long_set(&clock->now, 0);
+       atomic64_set(&clock->now, 0);
        spin_lock_init(&clock->timer_lock);
 
+       clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
+
        clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
        if (!clock->pcpu_buf)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_io_clock_init;
 
        if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_io_clock_init;
 
        return 0;
 }