]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/clock.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / clock.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "clock.h"
4
5 #include <linux/freezer.h>
6 #include <linux/kthread.h>
7 #include <linux/preempt.h>
8
9 static inline long io_timer_cmp(io_timer_heap *h,
10                                 struct io_timer *l,
11                                 struct io_timer *r)
12 {
13         return l->expire - r->expire;
14 }
15
16 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
17 {
18         size_t i;
19
20         spin_lock(&clock->timer_lock);
21
22         if (time_after_eq((unsigned long) atomic64_read(&clock->now),
23                           timer->expire)) {
24                 spin_unlock(&clock->timer_lock);
25                 timer->fn(timer);
26                 return;
27         }
28
29         for (i = 0; i < clock->timers.used; i++)
30                 if (clock->timers.data[i] == timer)
31                         goto out;
32
33         BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
34 out:
35         spin_unlock(&clock->timer_lock);
36 }
37
38 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
39 {
40         size_t i;
41
42         spin_lock(&clock->timer_lock);
43
44         for (i = 0; i < clock->timers.used; i++)
45                 if (clock->timers.data[i] == timer) {
46                         heap_del(&clock->timers, i, io_timer_cmp, NULL);
47                         break;
48                 }
49
50         spin_unlock(&clock->timer_lock);
51 }
52
53 struct io_clock_wait {
54         struct io_timer         io_timer;
55         struct timer_list       cpu_timer;
56         struct task_struct      *task;
57         int                     expired;
58 };
59
60 static void io_clock_wait_fn(struct io_timer *timer)
61 {
62         struct io_clock_wait *wait = container_of(timer,
63                                 struct io_clock_wait, io_timer);
64
65         wait->expired = 1;
66         wake_up_process(wait->task);
67 }
68
69 static void io_clock_cpu_timeout(struct timer_list *timer)
70 {
71         struct io_clock_wait *wait = container_of(timer,
72                                 struct io_clock_wait, cpu_timer);
73
74         wait->expired = 1;
75         wake_up_process(wait->task);
76 }
77
78 void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
79 {
80         struct io_clock_wait wait;
81
82         /* XXX: calculate sleep time rigorously */
83         wait.io_timer.expire    = until;
84         wait.io_timer.fn        = io_clock_wait_fn;
85         wait.task               = current;
86         wait.expired            = 0;
87         bch2_io_timer_add(clock, &wait.io_timer);
88
89         schedule();
90
91         bch2_io_timer_del(clock, &wait.io_timer);
92 }
93
94 void bch2_kthread_io_clock_wait(struct io_clock *clock,
95                                 unsigned long io_until,
96                                 unsigned long cpu_timeout)
97 {
98         bool kthread = (current->flags & PF_KTHREAD) != 0;
99         struct io_clock_wait wait;
100
101         wait.io_timer.expire    = io_until;
102         wait.io_timer.fn        = io_clock_wait_fn;
103         wait.task               = current;
104         wait.expired            = 0;
105         bch2_io_timer_add(clock, &wait.io_timer);
106
107         timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
108
109         if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
110                 mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
111
112         while (1) {
113                 set_current_state(TASK_INTERRUPTIBLE);
114                 if (kthread && kthread_should_stop())
115                         break;
116
117                 if (wait.expired)
118                         break;
119
120                 schedule();
121                 try_to_freeze();
122         }
123
124         __set_current_state(TASK_RUNNING);
125         del_timer_sync(&wait.cpu_timer);
126         destroy_timer_on_stack(&wait.cpu_timer);
127         bch2_io_timer_del(clock, &wait.io_timer);
128 }
129
130 static struct io_timer *get_expired_timer(struct io_clock *clock,
131                                           unsigned long now)
132 {
133         struct io_timer *ret = NULL;
134
135         spin_lock(&clock->timer_lock);
136
137         if (clock->timers.used &&
138             time_after_eq(now, clock->timers.data[0]->expire))
139                 heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
140
141         spin_unlock(&clock->timer_lock);
142
143         return ret;
144 }
145
146 void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
147 {
148         struct io_timer *timer;
149         unsigned long now = atomic64_add_return(sectors, &clock->now);
150
151         while ((timer = get_expired_timer(clock, now)))
152                 timer->fn(timer);
153 }
154
155 void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
156 {
157         unsigned long now;
158         unsigned i;
159
160         out->atomic++;
161         spin_lock(&clock->timer_lock);
162         now = atomic64_read(&clock->now);
163
164         for (i = 0; i < clock->timers.used; i++)
165                 prt_printf(out, "%ps:\t%li\n",
166                        clock->timers.data[i]->fn,
167                        clock->timers.data[i]->expire - now);
168         spin_unlock(&clock->timer_lock);
169         --out->atomic;
170 }
171
172 void bch2_io_clock_exit(struct io_clock *clock)
173 {
174         free_heap(&clock->timers);
175         free_percpu(clock->pcpu_buf);
176 }
177
178 int bch2_io_clock_init(struct io_clock *clock)
179 {
180         atomic64_set(&clock->now, 0);
181         spin_lock_init(&clock->timer_lock);
182
183         clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
184
185         clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
186         if (!clock->pcpu_buf)
187                 return -BCH_ERR_ENOMEM_io_clock_init;
188
189         if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
190                 return -BCH_ERR_ENOMEM_io_clock_init;
191
192         return 0;
193 }