]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/clock.c
Update bcachefs sources to ffe09df106 bcachefs: Verify fs hasn't been modified before...
[bcachefs-tools-debian] / libbcachefs / clock.c
1 #include "bcachefs.h"
2 #include "clock.h"
3
4 #include <linux/freezer.h>
5 #include <linux/kthread.h>
6 #include <linux/preempt.h>
7
8 static inline long io_timer_cmp(io_timer_heap *h,
9                                 struct io_timer *l,
10                                 struct io_timer *r)
11 {
12         return l->expire - r->expire;
13 }
14
15 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
16 {
17         size_t i;
18
19         spin_lock(&clock->timer_lock);
20         for (i = 0; i < clock->timers.used; i++)
21                 if (clock->timers.data[i] == timer)
22                         goto out;
23
24         BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
25 out:
26         spin_unlock(&clock->timer_lock);
27 }
28
29 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
30 {
31         size_t i;
32
33         spin_lock(&clock->timer_lock);
34
35         for (i = 0; i < clock->timers.used; i++)
36                 if (clock->timers.data[i] == timer) {
37                         heap_del(&clock->timers, i, io_timer_cmp, NULL);
38                         break;
39                 }
40
41         spin_unlock(&clock->timer_lock);
42 }
43
44 struct io_clock_wait {
45         struct io_timer         io_timer;
46         struct timer_list       cpu_timer;
47         struct task_struct      *task;
48         int                     expired;
49 };
50
51 static void io_clock_wait_fn(struct io_timer *timer)
52 {
53         struct io_clock_wait *wait = container_of(timer,
54                                 struct io_clock_wait, io_timer);
55
56         wait->expired = 1;
57         wake_up_process(wait->task);
58 }
59
60 static void io_clock_cpu_timeout(struct timer_list *timer)
61 {
62         struct io_clock_wait *wait = container_of(timer,
63                                 struct io_clock_wait, cpu_timer);
64
65         wait->expired = 1;
66         wake_up_process(wait->task);
67 }
68
69 void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
70 {
71         struct io_clock_wait wait;
72
73         /* XXX: calculate sleep time rigorously */
74         wait.io_timer.expire    = until;
75         wait.io_timer.fn        = io_clock_wait_fn;
76         wait.task               = current;
77         wait.expired            = 0;
78         bch2_io_timer_add(clock, &wait.io_timer);
79
80         schedule();
81
82         bch2_io_timer_del(clock, &wait.io_timer);
83 }
84
85 void bch2_kthread_io_clock_wait(struct io_clock *clock,
86                                 unsigned long io_until,
87                                 unsigned long cpu_timeout)
88 {
89         bool kthread = (current->flags & PF_KTHREAD) != 0;
90         struct io_clock_wait wait;
91
92         wait.io_timer.expire    = io_until;
93         wait.io_timer.fn        = io_clock_wait_fn;
94         wait.task               = current;
95         wait.expired            = 0;
96         bch2_io_timer_add(clock, &wait.io_timer);
97
98         timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
99
100         if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
101                 mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
102
103         while (1) {
104                 set_current_state(TASK_INTERRUPTIBLE);
105                 if (kthread && kthread_should_stop())
106                         break;
107
108                 if (wait.expired)
109                         break;
110
111                 schedule();
112                 try_to_freeze();
113         }
114
115         __set_current_state(TASK_RUNNING);
116         del_singleshot_timer_sync(&wait.cpu_timer);
117         destroy_timer_on_stack(&wait.cpu_timer);
118         bch2_io_timer_del(clock, &wait.io_timer);
119 }
120
121 static struct io_timer *get_expired_timer(struct io_clock *clock,
122                                           unsigned long now)
123 {
124         struct io_timer *ret = NULL;
125
126         spin_lock(&clock->timer_lock);
127
128         if (clock->timers.used &&
129             time_after_eq(now, clock->timers.data[0]->expire))
130                 heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
131
132         spin_unlock(&clock->timer_lock);
133
134         return ret;
135 }
136
137 void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
138 {
139         struct io_clock *clock = &c->io_clock[rw];
140         struct io_timer *timer;
141         unsigned long now;
142
143         /* Buffer up one megabyte worth of IO in the percpu counter */
144         preempt_disable();
145
146         if (likely(this_cpu_add_return(*clock->pcpu_buf, sectors) <
147                    IO_CLOCK_PCPU_SECTORS)) {
148                 preempt_enable();
149                 return;
150         }
151
152         sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
153         preempt_enable();
154         now = atomic_long_add_return(sectors, &clock->now);
155
156         while ((timer = get_expired_timer(clock, now)))
157                 timer->fn(timer);
158 }
159
160 void bch2_io_clock_exit(struct io_clock *clock)
161 {
162         free_heap(&clock->timers);
163         free_percpu(clock->pcpu_buf);
164 }
165
166 int bch2_io_clock_init(struct io_clock *clock)
167 {
168         atomic_long_set(&clock->now, 0);
169         spin_lock_init(&clock->timer_lock);
170
171         clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
172         if (!clock->pcpu_buf)
173                 return -ENOMEM;
174
175         if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
176                 return -ENOMEM;
177
178         return 0;
179 }