]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/clock.c
Update bcachefs sources to 2e70771b8d
[bcachefs-tools-debian] / libbcachefs / clock.c
1 #include "bcachefs.h"
2 #include "clock.h"
3
4 #include <linux/freezer.h>
5 #include <linux/kthread.h>
6 #include <linux/preempt.h>
7
8 static inline long io_timer_cmp(io_timer_heap *h,
9                                 struct io_timer *l,
10                                 struct io_timer *r)
11 {
12         return l->expire - r->expire;
13 }
14
15 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
16 {
17         size_t i;
18
19         spin_lock(&clock->timer_lock);
20         for (i = 0; i < clock->timers.used; i++)
21                 if (clock->timers.data[i] == timer)
22                         goto out;
23
24         BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp));
25 out:
26         spin_unlock(&clock->timer_lock);
27 }
28
29 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
30 {
31         size_t i;
32
33         spin_lock(&clock->timer_lock);
34
35         for (i = 0; i < clock->timers.used; i++)
36                 if (clock->timers.data[i] == timer) {
37                         heap_del(&clock->timers, i, io_timer_cmp);
38                         break;
39                 }
40
41         spin_unlock(&clock->timer_lock);
42 }
43
44 struct io_clock_wait {
45         struct io_timer         timer;
46         struct task_struct      *task;
47         int                     expired;
48 };
49
50 static void io_clock_wait_fn(struct io_timer *timer)
51 {
52         struct io_clock_wait *wait = container_of(timer,
53                                 struct io_clock_wait, timer);
54
55         wait->expired = 1;
56         wake_up_process(wait->task);
57 }
58
59 void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
60 {
61         struct io_clock_wait wait;
62
63         /* XXX: calculate sleep time rigorously */
64         wait.timer.expire       = until;
65         wait.timer.fn           = io_clock_wait_fn;
66         wait.task               = current;
67         wait.expired            = 0;
68         bch2_io_timer_add(clock, &wait.timer);
69
70         schedule();
71
72         bch2_io_timer_del(clock, &wait.timer);
73 }
74
75 /*
76  * _only_ to be used from a kthread
77  */
78 void bch2_kthread_io_clock_wait(struct io_clock *clock,
79                                unsigned long until)
80 {
81         struct io_clock_wait wait;
82
83         /* XXX: calculate sleep time rigorously */
84         wait.timer.expire       = until;
85         wait.timer.fn           = io_clock_wait_fn;
86         wait.task               = current;
87         wait.expired            = 0;
88         bch2_io_timer_add(clock, &wait.timer);
89
90         while (1) {
91                 set_current_state(TASK_INTERRUPTIBLE);
92                 if (kthread_should_stop())
93                         break;
94
95                 if (wait.expired)
96                         break;
97
98                 schedule();
99                 try_to_freeze();
100         }
101
102         __set_current_state(TASK_RUNNING);
103         bch2_io_timer_del(clock, &wait.timer);
104 }
105
106 static struct io_timer *get_expired_timer(struct io_clock *clock,
107                                           unsigned long now)
108 {
109         struct io_timer *ret = NULL;
110
111         spin_lock(&clock->timer_lock);
112
113         if (clock->timers.used &&
114             time_after_eq(now, clock->timers.data[0]->expire))
115                 heap_pop(&clock->timers, ret, io_timer_cmp);
116
117         spin_unlock(&clock->timer_lock);
118
119         return ret;
120 }
121
122 void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
123 {
124         struct io_clock *clock = &c->io_clock[rw];
125         struct io_timer *timer;
126         unsigned long now;
127
128         /* Buffer up one megabyte worth of IO in the percpu counter */
129         preempt_disable();
130
131         if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
132                    IO_CLOCK_PCPU_SECTORS)) {
133                 preempt_enable();
134                 return;
135         }
136
137         sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
138         preempt_enable();
139         now = atomic_long_add_return(sectors, &clock->now);
140
141         while ((timer = get_expired_timer(clock, now)))
142                 timer->fn(timer);
143 }
144
145 void bch2_io_clock_exit(struct io_clock *clock)
146 {
147         free_heap(&clock->timers);
148         free_percpu(clock->pcpu_buf);
149 }
150
151 int bch2_io_clock_init(struct io_clock *clock)
152 {
153         atomic_long_set(&clock->now, 0);
154         spin_lock_init(&clock->timer_lock);
155
156         clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
157         if (!clock->pcpu_buf)
158                 return -ENOMEM;
159
160         if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
161                 return -ENOMEM;
162
163         return 0;
164 }