]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/clock.c
Rename from bcache-tools to bcachefs-tools
[bcachefs-tools-debian] / libbcachefs / clock.c
1 #include "bcachefs.h"
2 #include "clock.h"
3
4 #include <linux/freezer.h>
5 #include <linux/kthread.h>
6
7 static inline bool io_timer_cmp(struct io_timer *l, struct io_timer *r)
8 {
9         return time_after(l->expire, r->expire);
10 }
11
12 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
13 {
14         size_t i;
15
16         spin_lock(&clock->timer_lock);
17         for (i = 0; i < clock->timers.used; i++)
18                 if (clock->timers.data[i] == timer)
19                         goto out;
20
21         BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp));
22 out:
23         spin_unlock(&clock->timer_lock);
24 }
25
26 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
27 {
28         size_t i;
29
30         spin_lock(&clock->timer_lock);
31
32         for (i = 0; i < clock->timers.used; i++)
33                 if (clock->timers.data[i] == timer) {
34                         heap_del(&clock->timers, i, io_timer_cmp);
35                         break;
36                 }
37
38         spin_unlock(&clock->timer_lock);
39 }
40
41 struct io_clock_wait {
42         struct io_timer         timer;
43         struct task_struct      *task;
44         int                     expired;
45 };
46
47 static void io_clock_wait_fn(struct io_timer *timer)
48 {
49         struct io_clock_wait *wait = container_of(timer,
50                                 struct io_clock_wait, timer);
51
52         wait->expired = 1;
53         wake_up_process(wait->task);
54 }
55
56 void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
57 {
58         struct io_clock_wait wait;
59
60         /* XXX: calculate sleep time rigorously */
61         wait.timer.expire       = until;
62         wait.timer.fn           = io_clock_wait_fn;
63         wait.task               = current;
64         wait.expired            = 0;
65         bch2_io_timer_add(clock, &wait.timer);
66
67         schedule();
68
69         bch2_io_timer_del(clock, &wait.timer);
70 }
71
72 /*
73  * _only_ to be used from a kthread
74  */
75 void bch2_kthread_io_clock_wait(struct io_clock *clock,
76                                unsigned long until)
77 {
78         struct io_clock_wait wait;
79
80         /* XXX: calculate sleep time rigorously */
81         wait.timer.expire       = until;
82         wait.timer.fn           = io_clock_wait_fn;
83         wait.task               = current;
84         wait.expired            = 0;
85         bch2_io_timer_add(clock, &wait.timer);
86
87         while (1) {
88                 set_current_state(TASK_INTERRUPTIBLE);
89                 if (kthread_should_stop())
90                         break;
91
92                 if (wait.expired)
93                         break;
94
95                 schedule();
96                 try_to_freeze();
97         }
98
99         __set_current_state(TASK_RUNNING);
100         bch2_io_timer_del(clock, &wait.timer);
101 }
102
103 static struct io_timer *get_expired_timer(struct io_clock *clock,
104                                           unsigned long now)
105 {
106         struct io_timer *ret = NULL;
107
108         spin_lock(&clock->timer_lock);
109
110         if (clock->timers.used &&
111             time_after_eq(now, clock->timers.data[0]->expire))
112                 heap_pop(&clock->timers, ret, io_timer_cmp);
113
114         spin_unlock(&clock->timer_lock);
115
116         return ret;
117 }
118
119 void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
120 {
121         struct io_clock *clock = &c->io_clock[rw];
122         struct io_timer *timer;
123         unsigned long now;
124
125         /* Buffer up one megabyte worth of IO in the percpu counter */
126         preempt_disable();
127
128         if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
129                    IO_CLOCK_PCPU_SECTORS)) {
130                 preempt_enable();
131                 return;
132         }
133
134         sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
135         preempt_enable();
136         now = atomic_long_add_return(sectors, &clock->now);
137
138         while ((timer = get_expired_timer(clock, now)))
139                 timer->fn(timer);
140 }
141
142 void bch2_io_clock_exit(struct io_clock *clock)
143 {
144         free_heap(&clock->timers);
145         free_percpu(clock->pcpu_buf);
146 }
147
148 int bch2_io_clock_init(struct io_clock *clock)
149 {
150         atomic_long_set(&clock->now, 0);
151         spin_lock_init(&clock->timer_lock);
152
153         clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
154         if (!clock->pcpu_buf)
155                 return -ENOMEM;
156
157         if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
158                 return -ENOMEM;
159
160         return 0;
161 }