]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/clock.c
Disable pristine-tar option in gbp.conf, since there is no pristine-tar branch.
[bcachefs-tools-debian] / libbcachefs / clock.c
index f18266330687f604a1d9e7706a7d94a601f792ff..3636444511064b51e5a004b953eacf94e7c70d12 100644 (file)
@@ -18,6 +18,14 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
        size_t i;
 
        spin_lock(&clock->timer_lock);
+
+       if (time_after_eq((unsigned long) atomic64_read(&clock->now),
+                         timer->expire)) {
+               spin_unlock(&clock->timer_lock);
+               timer->fn(timer);
+               return;
+       }
+
        for (i = 0; i < clock->timers.used; i++)
                if (clock->timers.data[i] == timer)
                        goto out;
@@ -101,7 +109,7 @@ void bch2_kthread_io_clock_wait(struct io_clock *clock,
        if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
                mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
 
-       while (1) {
+       do {
                set_current_state(TASK_INTERRUPTIBLE);
                if (kthread && kthread_should_stop())
                        break;
@@ -111,10 +119,10 @@ void bch2_kthread_io_clock_wait(struct io_clock *clock,
 
                schedule();
                try_to_freeze();
-       }
+       } while (0);
 
        __set_current_state(TASK_RUNNING);
-       del_singleshot_timer_sync(&wait.cpu_timer);
+       del_timer_sync(&wait.cpu_timer);
        destroy_timer_on_stack(&wait.cpu_timer);
        bch2_io_timer_del(clock, &wait.io_timer);
 }
@@ -135,26 +143,30 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
        return ret;
 }
 
-void __bch2_increment_clock(struct io_clock *clock)
+void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
 {
        struct io_timer *timer;
-       unsigned long now;
-       unsigned sectors;
+       unsigned long now = atomic64_add_return(sectors, &clock->now);
 
-       /* Buffer up one megabyte worth of IO in the percpu counter */
-       preempt_disable();
+       while ((timer = get_expired_timer(clock, now)))
+               timer->fn(timer);
+}
 
-       if (this_cpu_read(*clock->pcpu_buf) < IO_CLOCK_PCPU_SECTORS) {
-               preempt_enable();
-               return;
-       }
+void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
+{
+       unsigned long now;
+       unsigned i;
 
-       sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
-       preempt_enable();
-       now = atomic_long_add_return(sectors, &clock->now);
+       out->atomic++;
+       spin_lock(&clock->timer_lock);
+       now = atomic64_read(&clock->now);
 
-       while ((timer = get_expired_timer(clock, now)))
-               timer->fn(timer);
+       for (i = 0; i < clock->timers.used; i++)
+               prt_printf(out, "%ps:\t%li\n",
+                      clock->timers.data[i]->fn,
+                      clock->timers.data[i]->expire - now);
+       spin_unlock(&clock->timer_lock);
+       --out->atomic;
 }
 
 void bch2_io_clock_exit(struct io_clock *clock)
@@ -165,15 +177,17 @@ void bch2_io_clock_exit(struct io_clock *clock)
 
 int bch2_io_clock_init(struct io_clock *clock)
 {
-       atomic_long_set(&clock->now, 0);
+       atomic64_set(&clock->now, 0);
        spin_lock_init(&clock->timer_lock);
 
+       clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
+
        clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
        if (!clock->pcpu_buf)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_io_clock_init;
 
        if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_io_clock_init;
 
        return 0;
 }