4 #include <linux/freezer.h>
5 #include <linux/kthread.h>
6 #include <linux/preempt.h>
8 static inline long io_timer_cmp(io_timer_heap *h,
12 return l->expire - r->expire;
15 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
19 spin_lock(&clock->timer_lock);
20 for (i = 0; i < clock->timers.used; i++)
21 if (clock->timers.data[i] == timer)
24 BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp));
26 spin_unlock(&clock->timer_lock);
29 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
33 spin_lock(&clock->timer_lock);
35 for (i = 0; i < clock->timers.used; i++)
36 if (clock->timers.data[i] == timer) {
37 heap_del(&clock->timers, i, io_timer_cmp);
41 spin_unlock(&clock->timer_lock);
44 struct io_clock_wait {
45 struct io_timer io_timer;
46 struct timer_list cpu_timer;
47 struct task_struct *task;
51 static void io_clock_wait_fn(struct io_timer *timer)
53 struct io_clock_wait *wait = container_of(timer,
54 struct io_clock_wait, io_timer);
57 wake_up_process(wait->task);
60 static void io_clock_cpu_timeout(struct timer_list *timer)
62 struct io_clock_wait *wait = container_of(timer,
63 struct io_clock_wait, cpu_timer);
66 wake_up_process(wait->task);
69 void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
71 struct io_clock_wait wait;
73 /* XXX: calculate sleep time rigorously */
74 wait.io_timer.expire = until;
75 wait.io_timer.fn = io_clock_wait_fn;
78 bch2_io_timer_add(clock, &wait.io_timer);
82 bch2_io_timer_del(clock, &wait.io_timer);
85 void bch2_kthread_io_clock_wait(struct io_clock *clock,
86 unsigned long io_until,
87 unsigned long cpu_timeout)
89 bool kthread = (current->flags & PF_KTHREAD) != 0;
90 struct io_clock_wait wait;
92 wait.io_timer.expire = io_until;
93 wait.io_timer.fn = io_clock_wait_fn;
96 bch2_io_timer_add(clock, &wait.io_timer);
98 timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
100 if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
101 mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
104 set_current_state(TASK_INTERRUPTIBLE);
105 if (kthread && kthread_should_stop())
115 __set_current_state(TASK_RUNNING);
116 del_singleshot_timer_sync(&wait.cpu_timer);
117 destroy_timer_on_stack(&wait.cpu_timer);
118 bch2_io_timer_del(clock, &wait.io_timer);
121 static struct io_timer *get_expired_timer(struct io_clock *clock,
124 struct io_timer *ret = NULL;
126 spin_lock(&clock->timer_lock);
128 if (clock->timers.used &&
129 time_after_eq(now, clock->timers.data[0]->expire))
130 heap_pop(&clock->timers, ret, io_timer_cmp);
132 spin_unlock(&clock->timer_lock);
137 void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
139 struct io_clock *clock = &c->io_clock[rw];
140 struct io_timer *timer;
143 /* Buffer up one megabyte worth of IO in the percpu counter */
146 if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
147 IO_CLOCK_PCPU_SECTORS)) {
152 sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
154 now = atomic_long_add_return(sectors, &clock->now);
156 while ((timer = get_expired_timer(clock, now)))
160 void bch2_io_clock_exit(struct io_clock *clock)
162 free_heap(&clock->timers);
163 free_percpu(clock->pcpu_buf);
166 int bch2_io_clock_init(struct io_clock *clock)
168 atomic_long_set(&clock->now, 0);
169 spin_lock_init(&clock->timer_lock);
171 clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
172 if (!clock->pcpu_buf)
175 if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))