]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/clock.c
Update bcachefs sources to b91a514413 bcachefs: Don't try to delete stripes when RO
[bcachefs-tools-debian] / libbcachefs / clock.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "clock.h"
4
5 #include <linux/freezer.h>
6 #include <linux/kthread.h>
7 #include <linux/preempt.h>
8
9 static inline long io_timer_cmp(io_timer_heap *h,
10                                 struct io_timer *l,
11                                 struct io_timer *r)
12 {
13         return l->expire - r->expire;
14 }
15
16 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
17 {
18         size_t i;
19
20         spin_lock(&clock->timer_lock);
21         for (i = 0; i < clock->timers.used; i++)
22                 if (clock->timers.data[i] == timer)
23                         goto out;
24
25         BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
26 out:
27         spin_unlock(&clock->timer_lock);
28 }
29
30 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
31 {
32         size_t i;
33
34         spin_lock(&clock->timer_lock);
35
36         for (i = 0; i < clock->timers.used; i++)
37                 if (clock->timers.data[i] == timer) {
38                         heap_del(&clock->timers, i, io_timer_cmp, NULL);
39                         break;
40                 }
41
42         spin_unlock(&clock->timer_lock);
43 }
44
45 struct io_clock_wait {
46         struct io_timer         io_timer;
47         struct timer_list       cpu_timer;
48         struct task_struct      *task;
49         int                     expired;
50 };
51
52 static void io_clock_wait_fn(struct io_timer *timer)
53 {
54         struct io_clock_wait *wait = container_of(timer,
55                                 struct io_clock_wait, io_timer);
56
57         wait->expired = 1;
58         wake_up_process(wait->task);
59 }
60
61 static void io_clock_cpu_timeout(struct timer_list *timer)
62 {
63         struct io_clock_wait *wait = container_of(timer,
64                                 struct io_clock_wait, cpu_timer);
65
66         wait->expired = 1;
67         wake_up_process(wait->task);
68 }
69
70 void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
71 {
72         struct io_clock_wait wait;
73
74         /* XXX: calculate sleep time rigorously */
75         wait.io_timer.expire    = until;
76         wait.io_timer.fn        = io_clock_wait_fn;
77         wait.task               = current;
78         wait.expired            = 0;
79         bch2_io_timer_add(clock, &wait.io_timer);
80
81         schedule();
82
83         bch2_io_timer_del(clock, &wait.io_timer);
84 }
85
86 void bch2_kthread_io_clock_wait(struct io_clock *clock,
87                                 unsigned long io_until,
88                                 unsigned long cpu_timeout)
89 {
90         bool kthread = (current->flags & PF_KTHREAD) != 0;
91         struct io_clock_wait wait;
92
93         wait.io_timer.expire    = io_until;
94         wait.io_timer.fn        = io_clock_wait_fn;
95         wait.task               = current;
96         wait.expired            = 0;
97         bch2_io_timer_add(clock, &wait.io_timer);
98
99         timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
100
101         if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
102                 mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
103
104         while (1) {
105                 set_current_state(TASK_INTERRUPTIBLE);
106                 if (kthread && kthread_should_stop())
107                         break;
108
109                 if (wait.expired)
110                         break;
111
112                 schedule();
113                 try_to_freeze();
114         }
115
116         __set_current_state(TASK_RUNNING);
117         del_singleshot_timer_sync(&wait.cpu_timer);
118         destroy_timer_on_stack(&wait.cpu_timer);
119         bch2_io_timer_del(clock, &wait.io_timer);
120 }
121
122 static struct io_timer *get_expired_timer(struct io_clock *clock,
123                                           unsigned long now)
124 {
125         struct io_timer *ret = NULL;
126
127         spin_lock(&clock->timer_lock);
128
129         if (clock->timers.used &&
130             time_after_eq(now, clock->timers.data[0]->expire))
131                 heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
132
133         spin_unlock(&clock->timer_lock);
134
135         return ret;
136 }
137
138 void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
139 {
140         struct io_clock *clock = &c->io_clock[rw];
141         struct io_timer *timer;
142         unsigned long now;
143
144         /* Buffer up one megabyte worth of IO in the percpu counter */
145         preempt_disable();
146
147         if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
148                    IO_CLOCK_PCPU_SECTORS)) {
149                 preempt_enable();
150                 return;
151         }
152
153         sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
154         preempt_enable();
155         now = atomic_long_add_return(sectors, &clock->now);
156
157         while ((timer = get_expired_timer(clock, now)))
158                 timer->fn(timer);
159 }
160
161 void bch2_io_clock_exit(struct io_clock *clock)
162 {
163         free_heap(&clock->timers);
164         free_percpu(clock->pcpu_buf);
165 }
166
167 int bch2_io_clock_init(struct io_clock *clock)
168 {
169         atomic_long_set(&clock->now, 0);
170         spin_lock_init(&clock->timer_lock);
171
172         clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
173         if (!clock->pcpu_buf)
174                 return -ENOMEM;
175
176         if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
177                 return -ENOMEM;
178
179         return 0;
180 }