4 #include <linux/freezer.h>
5 #include <linux/kthread.h>
7 static inline bool io_timer_cmp(struct io_timer *l, struct io_timer *r)
9 return time_after(l->expire, r->expire);
12 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
16 spin_lock(&clock->timer_lock);
17 for (i = 0; i < clock->timers.used; i++)
18 if (clock->timers.data[i] == timer)
21 BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp));
23 spin_unlock(&clock->timer_lock);
26 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
30 spin_lock(&clock->timer_lock);
32 for (i = 0; i < clock->timers.used; i++)
33 if (clock->timers.data[i] == timer) {
34 heap_del(&clock->timers, i, io_timer_cmp);
38 spin_unlock(&clock->timer_lock);
41 struct io_clock_wait {
42 struct io_timer timer;
43 struct task_struct *task;
47 static void io_clock_wait_fn(struct io_timer *timer)
49 struct io_clock_wait *wait = container_of(timer,
50 struct io_clock_wait, timer);
53 wake_up_process(wait->task);
56 void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
58 struct io_clock_wait wait;
60 /* XXX: calculate sleep time rigorously */
61 wait.timer.expire = until;
62 wait.timer.fn = io_clock_wait_fn;
65 bch2_io_timer_add(clock, &wait.timer);
69 bch2_io_timer_del(clock, &wait.timer);
73 * _only_ to be used from a kthread
75 void bch2_kthread_io_clock_wait(struct io_clock *clock,
78 struct io_clock_wait wait;
80 /* XXX: calculate sleep time rigorously */
81 wait.timer.expire = until;
82 wait.timer.fn = io_clock_wait_fn;
85 bch2_io_timer_add(clock, &wait.timer);
88 set_current_state(TASK_INTERRUPTIBLE);
89 if (kthread_should_stop())
99 __set_current_state(TASK_RUNNING);
100 bch2_io_timer_del(clock, &wait.timer);
103 static struct io_timer *get_expired_timer(struct io_clock *clock,
106 struct io_timer *ret = NULL;
108 spin_lock(&clock->timer_lock);
110 if (clock->timers.used &&
111 time_after_eq(now, clock->timers.data[0]->expire))
112 heap_pop(&clock->timers, ret, io_timer_cmp);
114 spin_unlock(&clock->timer_lock);
119 void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
121 struct io_clock *clock = &c->io_clock[rw];
122 struct io_timer *timer;
125 /* Buffer up one megabyte worth of IO in the percpu counter */
128 if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
129 IO_CLOCK_PCPU_SECTORS)) {
134 sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
136 now = atomic_long_add_return(sectors, &clock->now);
138 while ((timer = get_expired_timer(clock, now)))
142 void bch2_io_clock_exit(struct io_clock *clock)
144 free_heap(&clock->timers);
145 free_percpu(clock->pcpu_buf);
148 int bch2_io_clock_init(struct io_clock *clock)
150 atomic_long_set(&clock->now, 0);
151 spin_lock_init(&clock->timer_lock);
153 clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
154 if (!clock->pcpu_buf)
157 if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))