4 #include <linux/freezer.h>
5 #include <linux/kthread.h>
6 #include <linux/preempt.h>
8 static inline long io_timer_cmp(io_timer_heap *h,
12 return l->expire - r->expire;
15 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
19 spin_lock(&clock->timer_lock);
20 for (i = 0; i < clock->timers.used; i++)
21 if (clock->timers.data[i] == timer)
24 BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp));
26 spin_unlock(&clock->timer_lock);
29 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
33 spin_lock(&clock->timer_lock);
35 for (i = 0; i < clock->timers.used; i++)
36 if (clock->timers.data[i] == timer) {
37 heap_del(&clock->timers, i, io_timer_cmp);
41 spin_unlock(&clock->timer_lock);
44 struct io_clock_wait {
45 struct io_timer timer;
46 struct task_struct *task;
50 static void io_clock_wait_fn(struct io_timer *timer)
52 struct io_clock_wait *wait = container_of(timer,
53 struct io_clock_wait, timer);
56 wake_up_process(wait->task);
59 void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
61 struct io_clock_wait wait;
63 /* XXX: calculate sleep time rigorously */
64 wait.timer.expire = until;
65 wait.timer.fn = io_clock_wait_fn;
68 bch2_io_timer_add(clock, &wait.timer);
72 bch2_io_timer_del(clock, &wait.timer);
76 * _only_ to be used from a kthread
78 void bch2_kthread_io_clock_wait(struct io_clock *clock,
81 struct io_clock_wait wait;
83 /* XXX: calculate sleep time rigorously */
84 wait.timer.expire = until;
85 wait.timer.fn = io_clock_wait_fn;
88 bch2_io_timer_add(clock, &wait.timer);
91 set_current_state(TASK_INTERRUPTIBLE);
92 if (kthread_should_stop())
102 __set_current_state(TASK_RUNNING);
103 bch2_io_timer_del(clock, &wait.timer);
106 static struct io_timer *get_expired_timer(struct io_clock *clock,
109 struct io_timer *ret = NULL;
111 spin_lock(&clock->timer_lock);
113 if (clock->timers.used &&
114 time_after_eq(now, clock->timers.data[0]->expire))
115 heap_pop(&clock->timers, ret, io_timer_cmp);
117 spin_unlock(&clock->timer_lock);
122 void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
124 struct io_clock *clock = &c->io_clock[rw];
125 struct io_timer *timer;
128 /* Buffer up one megabyte worth of IO in the percpu counter */
131 if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
132 IO_CLOCK_PCPU_SECTORS)) {
137 sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
139 now = atomic_long_add_return(sectors, &clock->now);
141 while ((timer = get_expired_timer(clock, now)))
145 void bch2_io_clock_exit(struct io_clock *clock)
147 free_heap(&clock->timers);
148 free_percpu(clock->pcpu_buf);
151 int bch2_io_clock_init(struct io_clock *clock)
153 atomic_long_set(&clock->now, 0);
154 spin_lock_init(&clock->timer_lock);
156 clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
157 if (!clock->pcpu_buf)
160 if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))