]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
9d9615aa02760ad038e14406754db3568e7b5c50
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_gc.h"
10 #include "buckets.h"
11 #include "clock.h"
12 #include "debug.h"
13 #include "ec.h"
14 #include "error.h"
15 #include "recovery.h"
16
17 #include <linux/kthread.h>
18 #include <linux/math64.h>
19 #include <linux/random.h>
20 #include <linux/rculist.h>
21 #include <linux/rcupdate.h>
22 #include <linux/sched/task.h>
23 #include <linux/sort.h>
24 #include <trace/events/bcachefs.h>
25
26 static const char * const bch2_alloc_field_names[] = {
27 #define x(name, bytes) #name,
28         BCH_ALLOC_FIELDS()
29 #undef x
30         NULL
31 };
32
33 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
34
35 /* Ratelimiting/PD controllers */
36
37 static void pd_controllers_update(struct work_struct *work)
38 {
39         struct bch_fs *c = container_of(to_delayed_work(work),
40                                            struct bch_fs,
41                                            pd_controllers_update);
42         struct bch_dev *ca;
43         unsigned i;
44
45         for_each_member_device(ca, c, i) {
46                 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
47
48                 u64 free = bucket_to_sector(ca,
49                                 __dev_buckets_free(ca, stats)) << 9;
50                 /*
51                  * Bytes of internal fragmentation, which can be
52                  * reclaimed by copy GC
53                  */
54                 s64 fragmented = (bucket_to_sector(ca,
55                                         stats.buckets[BCH_DATA_USER] +
56                                         stats.buckets[BCH_DATA_CACHED]) -
57                                   (stats.sectors[BCH_DATA_USER] +
58                                    stats.sectors[BCH_DATA_CACHED])) << 9;
59
60                 fragmented = max(0LL, fragmented);
61
62                 bch2_pd_controller_update(&ca->copygc_pd,
63                                          free, fragmented, -1);
64         }
65
66         schedule_delayed_work(&c->pd_controllers_update,
67                               c->pd_controllers_update_seconds * HZ);
68 }
69
70 /* Persistent alloc info: */
71
72 static inline u64 get_alloc_field(const struct bch_alloc *a,
73                                   const void **p, unsigned field)
74 {
75         unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
76         u64 v;
77
78         if (!(a->fields & (1 << field)))
79                 return 0;
80
81         switch (bytes) {
82         case 1:
83                 v = *((const u8 *) *p);
84                 break;
85         case 2:
86                 v = le16_to_cpup(*p);
87                 break;
88         case 4:
89                 v = le32_to_cpup(*p);
90                 break;
91         case 8:
92                 v = le64_to_cpup(*p);
93                 break;
94         default:
95                 BUG();
96         }
97
98         *p += bytes;
99         return v;
100 }
101
102 static inline void put_alloc_field(struct bkey_i_alloc *a, void **p,
103                                    unsigned field, u64 v)
104 {
105         unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
106
107         if (!v)
108                 return;
109
110         a->v.fields |= 1 << field;
111
112         switch (bytes) {
113         case 1:
114                 *((u8 *) *p) = v;
115                 break;
116         case 2:
117                 *((__le16 *) *p) = cpu_to_le16(v);
118                 break;
119         case 4:
120                 *((__le32 *) *p) = cpu_to_le32(v);
121                 break;
122         case 8:
123                 *((__le64 *) *p) = cpu_to_le64(v);
124                 break;
125         default:
126                 BUG();
127         }
128
129         *p += bytes;
130 }
131
132 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
133 {
134         struct bkey_alloc_unpacked ret = { .gen = 0 };
135
136         if (k.k->type == KEY_TYPE_alloc) {
137                 const struct bch_alloc *a = bkey_s_c_to_alloc(k).v;
138                 const void *d = a->data;
139                 unsigned idx = 0;
140
141                 ret.gen = a->gen;
142
143 #define x(_name, _bits) ret._name = get_alloc_field(a, &d, idx++);
144                 BCH_ALLOC_FIELDS()
145 #undef  x
146         }
147         return ret;
148 }
149
150 void bch2_alloc_pack(struct bkey_i_alloc *dst,
151                      const struct bkey_alloc_unpacked src)
152 {
153         unsigned idx = 0;
154         void *d = dst->v.data;
155         unsigned bytes;
156
157         dst->v.fields   = 0;
158         dst->v.gen      = src.gen;
159
160 #define x(_name, _bits) put_alloc_field(dst, &d, idx++, src._name);
161         BCH_ALLOC_FIELDS()
162 #undef  x
163
164         bytes = (void *) d - (void *) &dst->v;
165         set_bkey_val_bytes(&dst->k, bytes);
166         memset_u64s_tail(&dst->v, 0, bytes);
167 }
168
169 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
170 {
171         unsigned i, bytes = offsetof(struct bch_alloc, data);
172
173         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_FIELD_BYTES); i++)
174                 if (a->fields & (1 << i))
175                         bytes += BCH_ALLOC_FIELD_BYTES[i];
176
177         return DIV_ROUND_UP(bytes, sizeof(u64));
178 }
179
180 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
181 {
182         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
183
184         if (k.k->p.inode >= c->sb.nr_devices ||
185             !c->devs[k.k->p.inode])
186                 return "invalid device";
187
188         /* allow for unknown fields */
189         if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
190                 return "incorrect value size";
191
192         return NULL;
193 }
194
195 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
196                         struct bkey_s_c k)
197 {
198         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
199         const void *d = a.v->data;
200         unsigned i;
201
202         pr_buf(out, "gen %u", a.v->gen);
203
204         for (i = 0; i < BCH_ALLOC_FIELD_NR; i++)
205                 if (a.v->fields & (1 << i))
206                         pr_buf(out, " %s %llu",
207                                bch2_alloc_field_names[i],
208                                get_alloc_field(a.v, &d, i));
209 }
210
211 static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
212                               unsigned level, struct bkey_s_c k)
213 {
214         if (!level)
215                 bch2_mark_key(c, k, 0, 0, NULL, 0,
216                               BTREE_TRIGGER_ALLOC_READ|
217                               BTREE_TRIGGER_NOATOMIC);
218
219         return 0;
220 }
221
222 int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
223 {
224         struct bch_dev *ca;
225         unsigned i;
226         int ret = 0;
227
228         ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_ALLOC,
229                                           NULL, bch2_alloc_read_fn);
230         if (ret) {
231                 bch_err(c, "error reading alloc info: %i", ret);
232                 return ret;
233         }
234
235         percpu_down_write(&c->mark_lock);
236         bch2_dev_usage_from_buckets(c);
237         percpu_up_write(&c->mark_lock);
238
239         mutex_lock(&c->bucket_clock[READ].lock);
240         for_each_member_device(ca, c, i) {
241                 down_read(&ca->bucket_lock);
242                 bch2_recalc_oldest_io(c, ca, READ);
243                 up_read(&ca->bucket_lock);
244         }
245         mutex_unlock(&c->bucket_clock[READ].lock);
246
247         mutex_lock(&c->bucket_clock[WRITE].lock);
248         for_each_member_device(ca, c, i) {
249                 down_read(&ca->bucket_lock);
250                 bch2_recalc_oldest_io(c, ca, WRITE);
251                 up_read(&ca->bucket_lock);
252         }
253         mutex_unlock(&c->bucket_clock[WRITE].lock);
254
255         return 0;
256 }
257
258 enum alloc_write_ret {
259         ALLOC_WROTE,
260         ALLOC_NOWROTE,
261         ALLOC_END,
262 };
263
264 static int bch2_alloc_write_key(struct btree_trans *trans,
265                                 struct btree_iter *iter,
266                                 unsigned flags)
267 {
268         struct bch_fs *c = trans->c;
269         struct bkey_s_c k;
270         struct bch_dev *ca;
271         struct bucket_array *ba;
272         struct bucket *g;
273         struct bucket_mark m;
274         struct bkey_alloc_unpacked old_u, new_u;
275         __BKEY_PADDED(k, 8) alloc_key; /* hack: */
276         struct bkey_i_alloc *a;
277         int ret;
278 retry:
279         k = bch2_btree_iter_peek_slot(iter);
280         ret = bkey_err(k);
281         if (ret)
282                 goto err;
283
284         old_u = bch2_alloc_unpack(k);
285
286         if (iter->pos.inode >= c->sb.nr_devices ||
287             !c->devs[iter->pos.inode])
288                 return ALLOC_END;
289
290         percpu_down_read(&c->mark_lock);
291         ca      = bch_dev_bkey_exists(c, iter->pos.inode);
292         ba      = bucket_array(ca);
293
294         if (iter->pos.offset >= ba->nbuckets) {
295                 percpu_up_read(&c->mark_lock);
296                 return ALLOC_END;
297         }
298
299         g       = &ba->b[iter->pos.offset];
300         m       = READ_ONCE(g->mark);
301         new_u   = alloc_mem_to_key(g, m);
302         percpu_up_read(&c->mark_lock);
303
304         if (!bkey_alloc_unpacked_cmp(old_u, new_u))
305                 return ALLOC_NOWROTE;
306
307         a = bkey_alloc_init(&alloc_key.k);
308         a->k.p = iter->pos;
309         bch2_alloc_pack(a, new_u);
310
311         bch2_trans_update(trans, iter, &a->k_i,
312                           BTREE_TRIGGER_NORUN);
313         ret = bch2_trans_commit(trans, NULL, NULL,
314                                 BTREE_INSERT_NOFAIL|
315                                 BTREE_INSERT_USE_RESERVE|
316                                 flags);
317 err:
318         if (ret == -EINTR)
319                 goto retry;
320         return ret;
321 }
322
323 int bch2_alloc_write(struct bch_fs *c, unsigned flags, bool *wrote)
324 {
325         struct btree_trans trans;
326         struct btree_iter *iter;
327         struct bch_dev *ca;
328         unsigned i;
329         int ret = 0;
330
331         BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
332
333         bch2_trans_init(&trans, c, 0, 0);
334
335         iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
336                                    BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
337
338         for_each_rw_member(ca, c, i) {
339                 unsigned first_bucket;
340
341                 percpu_down_read(&c->mark_lock);
342                 first_bucket = bucket_array(ca)->first_bucket;
343                 percpu_up_read(&c->mark_lock);
344
345                 bch2_btree_iter_set_pos(iter, POS(i, first_bucket));
346
347                 while (1) {
348                         ret = bch2_alloc_write_key(&trans, iter, flags);
349                         if (ret < 0 || ret == ALLOC_END)
350                                 break;
351                         if (ret == ALLOC_WROTE)
352                                 *wrote = true;
353                         bch2_btree_iter_next_slot(iter);
354                 }
355
356                 if (ret < 0) {
357                         percpu_ref_put(&ca->io_ref);
358                         break;
359                 }
360         }
361
362         bch2_trans_exit(&trans);
363
364         return ret < 0 ? ret : 0;
365 }
366
367 int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
368 {
369         struct btree_trans trans;
370         struct btree_iter *iter;
371         int ret;
372
373         bch2_trans_init(&trans, c, 0, 0);
374
375         iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p,
376                                    BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
377
378         ret = bch2_alloc_write_key(&trans, iter,
379                                    BTREE_INSERT_NOFAIL|
380                                    BTREE_INSERT_LAZY_RW|
381                                    BTREE_INSERT_JOURNAL_REPLAY);
382         bch2_trans_exit(&trans);
383         return ret < 0 ? ret : 0;
384 }
385
386 /* Bucket IO clocks: */
387
388 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
389 {
390         struct bucket_clock *clock = &c->bucket_clock[rw];
391         struct bucket_array *buckets = bucket_array(ca);
392         struct bucket *g;
393         u16 max_last_io = 0;
394         unsigned i;
395
396         lockdep_assert_held(&c->bucket_clock[rw].lock);
397
398         /* Recalculate max_last_io for this device: */
399         for_each_bucket(g, buckets)
400                 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
401
402         ca->max_last_bucket_io[rw] = max_last_io;
403
404         /* Recalculate global max_last_io: */
405         max_last_io = 0;
406
407         for_each_member_device(ca, c, i)
408                 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
409
410         clock->max_last_io = max_last_io;
411 }
412
413 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
414 {
415         struct bucket_clock *clock = &c->bucket_clock[rw];
416         struct bucket_array *buckets;
417         struct bch_dev *ca;
418         struct bucket *g;
419         unsigned i;
420
421         trace_rescale_prios(c);
422
423         for_each_member_device(ca, c, i) {
424                 down_read(&ca->bucket_lock);
425                 buckets = bucket_array(ca);
426
427                 for_each_bucket(g, buckets)
428                         g->io_time[rw] = clock->hand -
429                         bucket_last_io(c, g, rw) / 2;
430
431                 bch2_recalc_oldest_io(c, ca, rw);
432
433                 up_read(&ca->bucket_lock);
434         }
435 }
436
437 static inline u64 bucket_clock_freq(u64 capacity)
438 {
439         return max(capacity >> 10, 2028ULL);
440 }
441
442 static void bch2_inc_clock_hand(struct io_timer *timer)
443 {
444         struct bucket_clock *clock = container_of(timer,
445                                                 struct bucket_clock, rescale);
446         struct bch_fs *c = container_of(clock,
447                                         struct bch_fs, bucket_clock[clock->rw]);
448         struct bch_dev *ca;
449         u64 capacity;
450         unsigned i;
451
452         mutex_lock(&clock->lock);
453
454         /* if clock cannot be advanced more, rescale prio */
455         if (clock->max_last_io >= U16_MAX - 2)
456                 bch2_rescale_bucket_io_times(c, clock->rw);
457
458         BUG_ON(clock->max_last_io >= U16_MAX - 2);
459
460         for_each_member_device(ca, c, i)
461                 ca->max_last_bucket_io[clock->rw]++;
462         clock->max_last_io++;
463         clock->hand++;
464
465         mutex_unlock(&clock->lock);
466
467         capacity = READ_ONCE(c->capacity);
468
469         if (!capacity)
470                 return;
471
472         /*
473          * we only increment when 0.1% of the filesystem capacity has been read
474          * or written too, this determines if it's time
475          *
476          * XXX: we shouldn't really be going off of the capacity of devices in
477          * RW mode (that will be 0 when we're RO, yet we can still service
478          * reads)
479          */
480         timer->expire += bucket_clock_freq(capacity);
481
482         bch2_io_timer_add(&c->io_clock[clock->rw], timer);
483 }
484
485 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
486 {
487         struct bucket_clock *clock = &c->bucket_clock[rw];
488
489         clock->hand             = 1;
490         clock->rw               = rw;
491         clock->rescale.fn       = bch2_inc_clock_hand;
492         clock->rescale.expire   = bucket_clock_freq(c->capacity);
493         mutex_init(&clock->lock);
494 }
495
496 /* Background allocator thread: */
497
498 /*
499  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
500  * (marking them as invalidated on disk), then optionally issues discard
501  * commands to the newly free buckets, then puts them on the various freelists.
502  */
503
504 #define BUCKET_GC_GEN_MAX       96U
505
506 /**
507  * wait_buckets_available - wait on reclaimable buckets
508  *
509  * If there aren't enough available buckets to fill up free_inc, wait until
510  * there are.
511  */
512 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
513 {
514         unsigned long gc_count = c->gc_count;
515         int ret = 0;
516
517         ca->allocator_state = ALLOCATOR_BLOCKED;
518         closure_wake_up(&c->freelist_wait);
519
520         while (1) {
521                 set_current_state(TASK_INTERRUPTIBLE);
522                 if (kthread_should_stop()) {
523                         ret = 1;
524                         break;
525                 }
526
527                 if (gc_count != c->gc_count)
528                         ca->inc_gen_really_needs_gc = 0;
529
530                 if ((ssize_t) (dev_buckets_available(c, ca) -
531                                ca->inc_gen_really_needs_gc) >=
532                     (ssize_t) fifo_free(&ca->free_inc))
533                         break;
534
535                 up_read(&c->gc_lock);
536                 schedule();
537                 try_to_freeze();
538                 down_read(&c->gc_lock);
539         }
540
541         __set_current_state(TASK_RUNNING);
542         ca->allocator_state = ALLOCATOR_RUNNING;
543         closure_wake_up(&c->freelist_wait);
544
545         return ret;
546 }
547
548 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
549                                        size_t bucket,
550                                        struct bucket_mark mark)
551 {
552         u8 gc_gen;
553
554         if (!is_available_bucket(mark))
555                 return false;
556
557         if (ca->buckets_nouse &&
558             test_bit(bucket, ca->buckets_nouse))
559                 return false;
560
561         gc_gen = bucket_gc_gen(ca, bucket);
562
563         if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
564                 ca->inc_gen_needs_gc++;
565
566         if (gc_gen >= BUCKET_GC_GEN_MAX)
567                 ca->inc_gen_really_needs_gc++;
568
569         return gc_gen < BUCKET_GC_GEN_MAX;
570 }
571
572 /*
573  * Determines what order we're going to reuse buckets, smallest bucket_key()
574  * first.
575  *
576  *
577  * - We take into account the read prio of the bucket, which gives us an
578  *   indication of how hot the data is -- we scale the prio so that the prio
579  *   farthest from the clock is worth 1/8th of the closest.
580  *
581  * - The number of sectors of cached data in the bucket, which gives us an
582  *   indication of the cost in cache misses this eviction will cause.
583  *
584  * - If hotness * sectors used compares equal, we pick the bucket with the
585  *   smallest bucket_gc_gen() - since incrementing the same bucket's generation
586  *   number repeatedly forces us to run mark and sweep gc to avoid generation
587  *   number wraparound.
588  */
589
590 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
591                                      size_t b, struct bucket_mark m)
592 {
593         unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
594         unsigned max_last_io = ca->max_last_bucket_io[READ];
595
596         /*
597          * Time since last read, scaled to [0, 8) where larger value indicates
598          * more recently read data:
599          */
600         unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
601
602         /* How much we want to keep the data in this bucket: */
603         unsigned long data_wantness =
604                 (hotness + 1) * bucket_sectors_used(m);
605
606         unsigned long needs_journal_commit =
607                 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
608
609         return  (data_wantness << 9) |
610                 (needs_journal_commit << 8) |
611                 (bucket_gc_gen(ca, b) / 16);
612 }
613
614 static inline int bucket_alloc_cmp(alloc_heap *h,
615                                    struct alloc_heap_entry l,
616                                    struct alloc_heap_entry r)
617 {
618         return  cmp_int(l.key, r.key) ?:
619                 cmp_int(r.nr, l.nr) ?:
620                 cmp_int(l.bucket, r.bucket);
621 }
622
623 static inline int bucket_idx_cmp(const void *_l, const void *_r)
624 {
625         const struct alloc_heap_entry *l = _l, *r = _r;
626
627         return cmp_int(l->bucket, r->bucket);
628 }
629
630 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
631 {
632         struct bucket_array *buckets;
633         struct alloc_heap_entry e = { 0 };
634         size_t b, i, nr = 0;
635
636         ca->alloc_heap.used = 0;
637
638         mutex_lock(&c->bucket_clock[READ].lock);
639         down_read(&ca->bucket_lock);
640
641         buckets = bucket_array(ca);
642
643         bch2_recalc_oldest_io(c, ca, READ);
644
645         /*
646          * Find buckets with lowest read priority, by building a maxheap sorted
647          * by read priority and repeatedly replacing the maximum element until
648          * all buckets have been visited.
649          */
650         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
651                 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
652                 unsigned long key = bucket_sort_key(c, ca, b, m);
653
654                 if (!bch2_can_invalidate_bucket(ca, b, m))
655                         continue;
656
657                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
658                         e.nr++;
659                 } else {
660                         if (e.nr)
661                                 heap_add_or_replace(&ca->alloc_heap, e,
662                                         -bucket_alloc_cmp, NULL);
663
664                         e = (struct alloc_heap_entry) {
665                                 .bucket = b,
666                                 .nr     = 1,
667                                 .key    = key,
668                         };
669                 }
670
671                 cond_resched();
672         }
673
674         if (e.nr)
675                 heap_add_or_replace(&ca->alloc_heap, e,
676                                 -bucket_alloc_cmp, NULL);
677
678         for (i = 0; i < ca->alloc_heap.used; i++)
679                 nr += ca->alloc_heap.data[i].nr;
680
681         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
682                 nr -= ca->alloc_heap.data[0].nr;
683                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
684         }
685
686         up_read(&ca->bucket_lock);
687         mutex_unlock(&c->bucket_clock[READ].lock);
688 }
689
690 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
691 {
692         struct bucket_array *buckets = bucket_array(ca);
693         struct bucket_mark m;
694         size_t b, start;
695
696         if (ca->fifo_last_bucket <  ca->mi.first_bucket ||
697             ca->fifo_last_bucket >= ca->mi.nbuckets)
698                 ca->fifo_last_bucket = ca->mi.first_bucket;
699
700         start = ca->fifo_last_bucket;
701
702         do {
703                 ca->fifo_last_bucket++;
704                 if (ca->fifo_last_bucket == ca->mi.nbuckets)
705                         ca->fifo_last_bucket = ca->mi.first_bucket;
706
707                 b = ca->fifo_last_bucket;
708                 m = READ_ONCE(buckets->b[b].mark);
709
710                 if (bch2_can_invalidate_bucket(ca, b, m)) {
711                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
712
713                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
714                         if (heap_full(&ca->alloc_heap))
715                                 break;
716                 }
717
718                 cond_resched();
719         } while (ca->fifo_last_bucket != start);
720 }
721
722 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
723 {
724         struct bucket_array *buckets = bucket_array(ca);
725         struct bucket_mark m;
726         size_t checked, i;
727
728         for (checked = 0;
729              checked < ca->mi.nbuckets / 2;
730              checked++) {
731                 size_t b = bch2_rand_range(ca->mi.nbuckets -
732                                            ca->mi.first_bucket) +
733                         ca->mi.first_bucket;
734
735                 m = READ_ONCE(buckets->b[b].mark);
736
737                 if (bch2_can_invalidate_bucket(ca, b, m)) {
738                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
739
740                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
741                         if (heap_full(&ca->alloc_heap))
742                                 break;
743                 }
744
745                 cond_resched();
746         }
747
748         sort(ca->alloc_heap.data,
749              ca->alloc_heap.used,
750              sizeof(ca->alloc_heap.data[0]),
751              bucket_idx_cmp, NULL);
752
753         /* remove duplicates: */
754         for (i = 0; i + 1 < ca->alloc_heap.used; i++)
755                 if (ca->alloc_heap.data[i].bucket ==
756                     ca->alloc_heap.data[i + 1].bucket)
757                         ca->alloc_heap.data[i].nr = 0;
758 }
759
760 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
761 {
762         size_t i, nr = 0;
763
764         ca->inc_gen_needs_gc                    = 0;
765
766         switch (ca->mi.replacement) {
767         case CACHE_REPLACEMENT_LRU:
768                 find_reclaimable_buckets_lru(c, ca);
769                 break;
770         case CACHE_REPLACEMENT_FIFO:
771                 find_reclaimable_buckets_fifo(c, ca);
772                 break;
773         case CACHE_REPLACEMENT_RANDOM:
774                 find_reclaimable_buckets_random(c, ca);
775                 break;
776         }
777
778         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
779
780         for (i = 0; i < ca->alloc_heap.used; i++)
781                 nr += ca->alloc_heap.data[i].nr;
782
783         return nr;
784 }
785
786 static inline long next_alloc_bucket(struct bch_dev *ca)
787 {
788         struct alloc_heap_entry e, *top = ca->alloc_heap.data;
789
790         while (ca->alloc_heap.used) {
791                 if (top->nr) {
792                         size_t b = top->bucket;
793
794                         top->bucket++;
795                         top->nr--;
796                         return b;
797                 }
798
799                 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
800         }
801
802         return -1;
803 }
804
805 /*
806  * returns sequence number of most recent journal entry that updated this
807  * bucket:
808  */
809 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
810 {
811         if (m.journal_seq_valid) {
812                 u64 journal_seq = atomic64_read(&c->journal.seq);
813                 u64 bucket_seq  = journal_seq;
814
815                 bucket_seq &= ~((u64) U16_MAX);
816                 bucket_seq |= m.journal_seq;
817
818                 if (bucket_seq > journal_seq)
819                         bucket_seq -= 1 << 16;
820
821                 return bucket_seq;
822         } else {
823                 return 0;
824         }
825 }
826
827 static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
828                                        struct bch_dev *ca,
829                                        struct btree_iter *iter,
830                                        u64 *journal_seq, unsigned flags)
831 {
832 #if 0
833         __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
834 #else
835         /* hack: */
836         __BKEY_PADDED(k, 8) alloc_key;
837 #endif
838         struct bch_fs *c = trans->c;
839         struct bkey_i_alloc *a;
840         struct bkey_alloc_unpacked u;
841         struct bucket *g;
842         struct bucket_mark m;
843         struct bkey_s_c k;
844         bool invalidating_cached_data;
845         size_t b;
846         int ret = 0;
847
848         BUG_ON(!ca->alloc_heap.used ||
849                !ca->alloc_heap.data[0].nr);
850         b = ca->alloc_heap.data[0].bucket;
851
852         /* first, put on free_inc and mark as owned by allocator: */
853         percpu_down_read(&c->mark_lock);
854         spin_lock(&c->freelist_lock);
855
856         verify_not_on_freelist(c, ca, b);
857
858         BUG_ON(!fifo_push(&ca->free_inc, b));
859
860         g = bucket(ca, b);
861         m = READ_ONCE(g->mark);
862
863         bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
864
865         spin_unlock(&c->freelist_lock);
866         percpu_up_read(&c->mark_lock);
867
868         invalidating_cached_data = m.cached_sectors != 0;
869         if (!invalidating_cached_data)
870                 goto out;
871
872         /*
873          * If the read-only path is trying to shut down, we can't be generating
874          * new btree updates:
875          */
876         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
877                 ret = 1;
878                 goto out;
879         }
880
881         BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
882
883         bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
884 retry:
885         k = bch2_btree_iter_peek_slot(iter);
886         ret = bkey_err(k);
887         if (ret)
888                 return ret;
889
890         /*
891          * The allocator has to start before journal replay is finished - thus,
892          * we have to trust the in memory bucket @m, not the version in the
893          * btree:
894          */
895         percpu_down_read(&c->mark_lock);
896         g = bucket(ca, b);
897         m = READ_ONCE(g->mark);
898         u = alloc_mem_to_key(g, m);
899         percpu_up_read(&c->mark_lock);
900
901         invalidating_cached_data = m.cached_sectors != 0;
902
903         u.gen++;
904         u.data_type     = 0;
905         u.dirty_sectors = 0;
906         u.cached_sectors = 0;
907         u.read_time     = c->bucket_clock[READ].hand;
908         u.write_time    = c->bucket_clock[WRITE].hand;
909
910         a = bkey_alloc_init(&alloc_key.k);
911         a->k.p = iter->pos;
912         bch2_alloc_pack(a, u);
913
914         bch2_trans_update(trans, iter, &a->k_i,
915                           BTREE_TRIGGER_BUCKET_INVALIDATE);
916
917         /*
918          * XXX:
919          * when using deferred btree updates, we have journal reclaim doing
920          * btree updates and thus requiring the allocator to make forward
921          * progress, and here the allocator is requiring space in the journal -
922          * so we need a journal pre-reservation:
923          */
924         ret = bch2_trans_commit(trans, NULL,
925                                 invalidating_cached_data ? journal_seq : NULL,
926                                 BTREE_INSERT_NOUNLOCK|
927                                 BTREE_INSERT_NOCHECK_RW|
928                                 BTREE_INSERT_NOFAIL|
929                                 BTREE_INSERT_USE_RESERVE|
930                                 BTREE_INSERT_USE_ALLOC_RESERVE|
931                                 flags);
932         if (ret == -EINTR)
933                 goto retry;
934 out:
935         if (!ret) {
936                 /* remove from alloc_heap: */
937                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
938
939                 top->bucket++;
940                 top->nr--;
941
942                 if (!top->nr)
943                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
944
945                 /*
946                  * Make sure we flush the last journal entry that updated this
947                  * bucket (i.e. deleting the last reference) before writing to
948                  * this bucket again:
949                  */
950                 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
951         } else {
952                 size_t b2;
953
954                 /* remove from free_inc: */
955                 percpu_down_read(&c->mark_lock);
956                 spin_lock(&c->freelist_lock);
957
958                 bch2_mark_alloc_bucket(c, ca, b, false,
959                                        gc_pos_alloc(c, NULL), 0);
960
961                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
962                 BUG_ON(b != b2);
963
964                 spin_unlock(&c->freelist_lock);
965                 percpu_up_read(&c->mark_lock);
966         }
967
968         return ret < 0 ? ret : 0;
969 }
970
971 static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
972                                        size_t bucket, u64 *flush_seq)
973 {
974         struct bucket_mark m;
975
976         percpu_down_read(&c->mark_lock);
977         spin_lock(&c->freelist_lock);
978
979         bch2_invalidate_bucket(c, ca, bucket, &m);
980
981         verify_not_on_freelist(c, ca, bucket);
982         BUG_ON(!fifo_push(&ca->free_inc, bucket));
983
984         spin_unlock(&c->freelist_lock);
985
986         bucket_io_clock_reset(c, ca, bucket, READ);
987         bucket_io_clock_reset(c, ca, bucket, WRITE);
988
989         percpu_up_read(&c->mark_lock);
990
991         *flush_seq = max(*flush_seq, bucket_journal_seq(c, m));
992
993         return m.cached_sectors != 0;
994 }
995
996 /*
997  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
998  */
999 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
1000 {
1001         struct btree_trans trans;
1002         struct btree_iter *iter;
1003         u64 journal_seq = 0;
1004         int ret = 0;
1005
1006         bch2_trans_init(&trans, c, 0, 0);
1007
1008         iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
1009                                    POS(ca->dev_idx, 0),
1010                                    BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1011
1012         /* Only use nowait if we've already invalidated at least one bucket: */
1013         while (!ret &&
1014                !fifo_full(&ca->free_inc) &&
1015                ca->alloc_heap.used)
1016                 ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
1017                                 BTREE_INSERT_GC_LOCK_HELD|
1018                                 (!fifo_empty(&ca->free_inc)
1019                                  ? BTREE_INSERT_NOWAIT : 0));
1020
1021         bch2_trans_exit(&trans);
1022
1023         /* If we used NOWAIT, don't return the error: */
1024         if (!fifo_empty(&ca->free_inc))
1025                 ret = 0;
1026         if (ret) {
1027                 bch_err(ca, "error invalidating buckets: %i", ret);
1028                 return ret;
1029         }
1030
1031         if (journal_seq)
1032                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
1033         if (ret) {
1034                 bch_err(ca, "journal error: %i", ret);
1035                 return ret;
1036         }
1037
1038         return 0;
1039 }
1040
1041 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
1042 {
1043         unsigned i;
1044         int ret = 0;
1045
1046         while (1) {
1047                 set_current_state(TASK_INTERRUPTIBLE);
1048
1049                 spin_lock(&c->freelist_lock);
1050                 for (i = 0; i < RESERVE_NR; i++) {
1051
1052                         /*
1053                          * Don't strand buckets on the copygc freelist until
1054                          * after recovery is finished:
1055                          */
1056                         if (!test_bit(BCH_FS_STARTED, &c->flags) &&
1057                             i == RESERVE_MOVINGGC)
1058                                 continue;
1059
1060                         if (fifo_push(&ca->free[i], bucket)) {
1061                                 fifo_pop(&ca->free_inc, bucket);
1062
1063                                 closure_wake_up(&c->freelist_wait);
1064                                 ca->allocator_state = ALLOCATOR_RUNNING;
1065
1066                                 spin_unlock(&c->freelist_lock);
1067                                 goto out;
1068                         }
1069                 }
1070
1071                 if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
1072                         ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
1073                         closure_wake_up(&c->freelist_wait);
1074                 }
1075
1076                 spin_unlock(&c->freelist_lock);
1077
1078                 if ((current->flags & PF_KTHREAD) &&
1079                     kthread_should_stop()) {
1080                         ret = 1;
1081                         break;
1082                 }
1083
1084                 schedule();
1085                 try_to_freeze();
1086         }
1087 out:
1088         __set_current_state(TASK_RUNNING);
1089         return ret;
1090 }
1091
1092 /*
1093  * Pulls buckets off free_inc, discards them (if enabled), then adds them to
1094  * freelists, waiting until there's room if necessary:
1095  */
1096 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
1097 {
1098         while (!fifo_empty(&ca->free_inc)) {
1099                 size_t bucket = fifo_peek(&ca->free_inc);
1100
1101                 if (ca->mi.discard &&
1102                     blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1103                         blkdev_issue_discard(ca->disk_sb.bdev,
1104                                              bucket_to_sector(ca, bucket),
1105                                              ca->mi.bucket_size, GFP_NOIO, 0);
1106
1107                 if (push_invalidated_bucket(c, ca, bucket))
1108                         return 1;
1109         }
1110
1111         return 0;
1112 }
1113
1114 /**
1115  * bch_allocator_thread - move buckets from free_inc to reserves
1116  *
1117  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1118  * the reserves are depleted by bucket allocation. When we run out
1119  * of free_inc, try to invalidate some buckets and write out
1120  * prios and gens.
1121  */
1122 static int bch2_allocator_thread(void *arg)
1123 {
1124         struct bch_dev *ca = arg;
1125         struct bch_fs *c = ca->fs;
1126         size_t nr;
1127         int ret;
1128
1129         set_freezable();
1130         ca->allocator_state = ALLOCATOR_RUNNING;
1131
1132         while (1) {
1133                 cond_resched();
1134
1135                 pr_debug("discarding %zu invalidated buckets",
1136                          fifo_used(&ca->free_inc));
1137
1138                 ret = discard_invalidated_buckets(c, ca);
1139                 if (ret)
1140                         goto stop;
1141
1142                 down_read(&c->gc_lock);
1143
1144                 ret = bch2_invalidate_buckets(c, ca);
1145                 if (ret) {
1146                         up_read(&c->gc_lock);
1147                         goto stop;
1148                 }
1149
1150                 if (!fifo_empty(&ca->free_inc)) {
1151                         up_read(&c->gc_lock);
1152                         continue;
1153                 }
1154
1155                 pr_debug("free_inc now empty");
1156
1157                 do {
1158                         /*
1159                          * Find some buckets that we can invalidate, either
1160                          * they're completely unused, or only contain clean data
1161                          * that's been written back to the backing device or
1162                          * another cache tier
1163                          */
1164
1165                         pr_debug("scanning for reclaimable buckets");
1166
1167                         nr = find_reclaimable_buckets(c, ca);
1168
1169                         pr_debug("found %zu buckets", nr);
1170
1171                         trace_alloc_batch(ca, nr, ca->alloc_heap.size);
1172
1173                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1174                              ca->inc_gen_really_needs_gc) &&
1175                             c->gc_thread) {
1176                                 atomic_inc(&c->kick_gc);
1177                                 wake_up_process(c->gc_thread);
1178                         }
1179
1180                         /*
1181                          * If we found any buckets, we have to invalidate them
1182                          * before we scan for more - but if we didn't find very
1183                          * many we may want to wait on more buckets being
1184                          * available so we don't spin:
1185                          */
1186                         if (!nr ||
1187                             (nr < ALLOC_SCAN_BATCH(ca) &&
1188                              !fifo_empty(&ca->free[RESERVE_NONE]))) {
1189                                 ret = wait_buckets_available(c, ca);
1190                                 if (ret) {
1191                                         up_read(&c->gc_lock);
1192                                         goto stop;
1193                                 }
1194                         }
1195                 } while (!nr);
1196
1197                 up_read(&c->gc_lock);
1198
1199                 pr_debug("%zu buckets to invalidate", nr);
1200
1201                 /*
1202                  * alloc_heap is now full of newly-invalidated buckets: next,
1203                  * write out the new bucket gens:
1204                  */
1205         }
1206
1207 stop:
1208         pr_debug("alloc thread stopping (ret %i)", ret);
1209         ca->allocator_state = ALLOCATOR_STOPPED;
1210         closure_wake_up(&c->freelist_wait);
1211         return 0;
1212 }
1213
1214 /* Startup/shutdown (ro/rw): */
1215
1216 void bch2_recalc_capacity(struct bch_fs *c)
1217 {
1218         struct bch_dev *ca;
1219         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1220         unsigned bucket_size_max = 0;
1221         unsigned long ra_pages = 0;
1222         unsigned i, j;
1223
1224         lockdep_assert_held(&c->state_lock);
1225
1226         for_each_online_member(ca, c, i) {
1227                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1228
1229                 ra_pages += bdi->ra_pages;
1230         }
1231
1232         bch2_set_ra_pages(c, ra_pages);
1233
1234         for_each_rw_member(ca, c, i) {
1235                 u64 dev_reserve = 0;
1236
1237                 /*
1238                  * We need to reserve buckets (from the number
1239                  * of currently available buckets) against
1240                  * foreground writes so that mainly copygc can
1241                  * make forward progress.
1242                  *
1243                  * We need enough to refill the various reserves
1244                  * from scratch - copygc will use its entire
1245                  * reserve all at once, then run against when
1246                  * its reserve is refilled (from the formerly
1247                  * available buckets).
1248                  *
1249                  * This reserve is just used when considering if
1250                  * allocations for foreground writes must wait -
1251                  * not -ENOSPC calculations.
1252                  */
1253                 for (j = 0; j < RESERVE_NONE; j++)
1254                         dev_reserve += ca->free[j].size;
1255
1256                 dev_reserve += 1;       /* btree write point */
1257                 dev_reserve += 1;       /* copygc write point */
1258                 dev_reserve += 1;       /* rebalance write point */
1259
1260                 dev_reserve *= ca->mi.bucket_size;
1261
1262                 ca->copygc_threshold = dev_reserve;
1263
1264                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1265                                              ca->mi.first_bucket);
1266
1267                 reserved_sectors += dev_reserve * 2;
1268
1269                 bucket_size_max = max_t(unsigned, bucket_size_max,
1270                                         ca->mi.bucket_size);
1271         }
1272
1273         gc_reserve = c->opts.gc_reserve_bytes
1274                 ? c->opts.gc_reserve_bytes >> 9
1275                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1276
1277         reserved_sectors = max(gc_reserve, reserved_sectors);
1278
1279         reserved_sectors = min(reserved_sectors, capacity);
1280
1281         c->capacity = capacity - reserved_sectors;
1282
1283         c->bucket_size_max = bucket_size_max;
1284
1285         if (c->capacity) {
1286                 bch2_io_timer_add(&c->io_clock[READ],
1287                                  &c->bucket_clock[READ].rescale);
1288                 bch2_io_timer_add(&c->io_clock[WRITE],
1289                                  &c->bucket_clock[WRITE].rescale);
1290         } else {
1291                 bch2_io_timer_del(&c->io_clock[READ],
1292                                  &c->bucket_clock[READ].rescale);
1293                 bch2_io_timer_del(&c->io_clock[WRITE],
1294                                  &c->bucket_clock[WRITE].rescale);
1295         }
1296
1297         /* Wake up case someone was waiting for buckets */
1298         closure_wake_up(&c->freelist_wait);
1299 }
1300
1301 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1302 {
1303         struct open_bucket *ob;
1304         bool ret = false;
1305
1306         for (ob = c->open_buckets;
1307              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1308              ob++) {
1309                 spin_lock(&ob->lock);
1310                 if (ob->valid && !ob->on_partial_list &&
1311                     ob->ptr.dev == ca->dev_idx)
1312                         ret = true;
1313                 spin_unlock(&ob->lock);
1314         }
1315
1316         return ret;
1317 }
1318
1319 /* device goes ro: */
1320 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1321 {
1322         unsigned i;
1323
1324         BUG_ON(ca->alloc_thread);
1325
1326         /* First, remove device from allocation groups: */
1327
1328         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1329                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1330
1331         /*
1332          * Capacity is calculated based off of devices in allocation groups:
1333          */
1334         bch2_recalc_capacity(c);
1335
1336         /* Next, close write points that point to this device... */
1337         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1338                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1339
1340         bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
1341         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1342         bch2_writepoint_stop(c, ca, &c->btree_write_point);
1343
1344         mutex_lock(&c->btree_reserve_cache_lock);
1345         while (c->btree_reserve_cache_nr) {
1346                 struct btree_alloc *a =
1347                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1348
1349                 bch2_open_buckets_put(c, &a->ob);
1350         }
1351         mutex_unlock(&c->btree_reserve_cache_lock);
1352
1353         while (1) {
1354                 struct open_bucket *ob;
1355
1356                 spin_lock(&c->freelist_lock);
1357                 if (!ca->open_buckets_partial_nr) {
1358                         spin_unlock(&c->freelist_lock);
1359                         break;
1360                 }
1361                 ob = c->open_buckets +
1362                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1363                 ob->on_partial_list = false;
1364                 spin_unlock(&c->freelist_lock);
1365
1366                 bch2_open_bucket_put(c, ob);
1367         }
1368
1369         bch2_ec_stop_dev(c, ca);
1370
1371         /*
1372          * Wake up threads that were blocked on allocation, so they can notice
1373          * the device can no longer be removed and the capacity has changed:
1374          */
1375         closure_wake_up(&c->freelist_wait);
1376
1377         /*
1378          * journal_res_get() can block waiting for free space in the journal -
1379          * it needs to notice there may not be devices to allocate from anymore:
1380          */
1381         wake_up(&c->journal.wait);
1382
1383         /* Now wait for any in flight writes: */
1384
1385         closure_wait_event(&c->open_buckets_wait,
1386                            !bch2_dev_has_open_write_point(c, ca));
1387 }
1388
1389 /* device goes rw: */
1390 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1391 {
1392         unsigned i;
1393
1394         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1395                 if (ca->mi.data_allowed & (1 << i))
1396                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1397 }
1398
1399 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1400 {
1401         if (ca->alloc_thread)
1402                 closure_wait_event(&c->freelist_wait,
1403                                    ca->allocator_state != ALLOCATOR_RUNNING);
1404 }
1405
1406 /* stop allocator thread: */
1407 void bch2_dev_allocator_stop(struct bch_dev *ca)
1408 {
1409         struct task_struct *p;
1410
1411         p = rcu_dereference_protected(ca->alloc_thread, 1);
1412         ca->alloc_thread = NULL;
1413
1414         /*
1415          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1416          * the thread shutting down to avoid bch2_wake_allocator() racing:
1417          *
1418          * XXX: it would be better to have the rcu barrier be asynchronous
1419          * instead of blocking us here
1420          */
1421         synchronize_rcu();
1422
1423         if (p) {
1424                 kthread_stop(p);
1425                 put_task_struct(p);
1426         }
1427 }
1428
1429 /* start allocator thread: */
1430 int bch2_dev_allocator_start(struct bch_dev *ca)
1431 {
1432         struct task_struct *p;
1433
1434         /*
1435          * allocator thread already started?
1436          */
1437         if (ca->alloc_thread)
1438                 return 0;
1439
1440         p = kthread_create(bch2_allocator_thread, ca,
1441                            "bch_alloc[%s]", ca->name);
1442         if (IS_ERR(p))
1443                 return PTR_ERR(p);
1444
1445         get_task_struct(p);
1446         rcu_assign_pointer(ca->alloc_thread, p);
1447         wake_up_process(p);
1448         return 0;
1449 }
1450
1451 static bool flush_held_btree_writes(struct bch_fs *c)
1452 {
1453         struct bucket_table *tbl;
1454         struct rhash_head *pos;
1455         struct btree *b;
1456         bool nodes_unwritten;
1457         size_t i;
1458 again:
1459         cond_resched();
1460         nodes_unwritten = false;
1461
1462         if (bch2_journal_error(&c->journal))
1463                 return true;
1464
1465         rcu_read_lock();
1466         for_each_cached_btree(b, c, tbl, i, pos)
1467                 if (btree_node_need_write(b)) {
1468                         if (btree_node_may_write(b)) {
1469                                 rcu_read_unlock();
1470                                 btree_node_lock_type(c, b, SIX_LOCK_read);
1471                                 bch2_btree_node_write(c, b, SIX_LOCK_read);
1472                                 six_unlock_read(&b->lock);
1473                                 goto again;
1474                         } else {
1475                                 nodes_unwritten = true;
1476                         }
1477                 }
1478         rcu_read_unlock();
1479
1480         return !nodes_unwritten &&
1481                 !bch2_btree_interior_updates_nr_pending(c);
1482 }
1483
1484 static void allocator_start_issue_discards(struct bch_fs *c)
1485 {
1486         struct bch_dev *ca;
1487         unsigned dev_iter;
1488         size_t bu;
1489
1490         for_each_rw_member(ca, c, dev_iter)
1491                 while (fifo_pop(&ca->free_inc, bu))
1492                         blkdev_issue_discard(ca->disk_sb.bdev,
1493                                              bucket_to_sector(ca, bu),
1494                                              ca->mi.bucket_size, GFP_NOIO, 0);
1495 }
1496
1497 static int resize_free_inc(struct bch_dev *ca)
1498 {
1499         alloc_fifo free_inc;
1500
1501         if (!fifo_full(&ca->free_inc))
1502                 return 0;
1503
1504         if (!init_fifo(&free_inc,
1505                        ca->free_inc.size * 2,
1506                        GFP_KERNEL))
1507                 return -ENOMEM;
1508
1509         fifo_move(&free_inc, &ca->free_inc);
1510         swap(free_inc, ca->free_inc);
1511         free_fifo(&free_inc);
1512         return 0;
1513 }
1514
1515 static bool bch2_fs_allocator_start_fast(struct bch_fs *c)
1516 {
1517         struct bch_dev *ca;
1518         unsigned dev_iter;
1519         bool ret = true;
1520
1521         if (test_alloc_startup(c))
1522                 return false;
1523
1524         down_read(&c->gc_lock);
1525
1526         /* Scan for buckets that are already invalidated: */
1527         for_each_rw_member(ca, c, dev_iter) {
1528                 struct bucket_array *buckets;
1529                 struct bucket_mark m;
1530                 long bu;
1531
1532                 down_read(&ca->bucket_lock);
1533                 buckets = bucket_array(ca);
1534
1535                 for (bu = buckets->first_bucket;
1536                      bu < buckets->nbuckets; bu++) {
1537                         m = READ_ONCE(buckets->b[bu].mark);
1538
1539                         if (!buckets->b[bu].gen_valid ||
1540                             !is_available_bucket(m) ||
1541                             m.cached_sectors ||
1542                             (ca->buckets_nouse &&
1543                              test_bit(bu, ca->buckets_nouse)))
1544                                 continue;
1545
1546                         percpu_down_read(&c->mark_lock);
1547                         bch2_mark_alloc_bucket(c, ca, bu, true,
1548                                         gc_pos_alloc(c, NULL), 0);
1549                         percpu_up_read(&c->mark_lock);
1550
1551                         fifo_push(&ca->free_inc, bu);
1552
1553                         discard_invalidated_buckets(c, ca);
1554
1555                         if (fifo_full(&ca->free[RESERVE_BTREE]))
1556                                 break;
1557                 }
1558                 up_read(&ca->bucket_lock);
1559         }
1560
1561         up_read(&c->gc_lock);
1562
1563         /* did we find enough buckets? */
1564         for_each_rw_member(ca, c, dev_iter)
1565                 if (!fifo_full(&ca->free[RESERVE_BTREE]))
1566                         ret = false;
1567
1568         return ret;
1569 }
1570
1571 int bch2_fs_allocator_start(struct bch_fs *c)
1572 {
1573         struct bch_dev *ca;
1574         unsigned dev_iter;
1575         u64 journal_seq = 0;
1576         bool wrote;
1577         long bu;
1578         int ret = 0;
1579
1580         if (!test_alloc_startup(c) &&
1581             bch2_fs_allocator_start_fast(c))
1582                 return 0;
1583
1584         pr_debug("not enough empty buckets; scanning for reclaimable buckets");
1585
1586         /*
1587          * We're moving buckets to freelists _before_ they've been marked as
1588          * invalidated on disk - we have to so that we can allocate new btree
1589          * nodes to mark them as invalidated on disk.
1590          *
1591          * However, we can't _write_ to any of these buckets yet - they might
1592          * have cached data in them, which is live until they're marked as
1593          * invalidated on disk:
1594          */
1595         set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1596
1597         down_read(&c->gc_lock);
1598         do {
1599                 wrote = false;
1600
1601                 for_each_rw_member(ca, c, dev_iter) {
1602                         find_reclaimable_buckets(c, ca);
1603
1604                         while (!fifo_full(&ca->free[RESERVE_BTREE]) &&
1605                                (bu = next_alloc_bucket(ca)) >= 0) {
1606                                 ret = resize_free_inc(ca);
1607                                 if (ret) {
1608                                         percpu_ref_put(&ca->io_ref);
1609                                         up_read(&c->gc_lock);
1610                                         goto err;
1611                                 }
1612
1613                                 bch2_invalidate_one_bucket(c, ca, bu,
1614                                                            &journal_seq);
1615
1616                                 fifo_push(&ca->free[RESERVE_BTREE], bu);
1617                         }
1618                 }
1619
1620                 pr_debug("done scanning for reclaimable buckets");
1621
1622                 /*
1623                  * XXX: it's possible for this to deadlock waiting on journal reclaim,
1624                  * since we're holding btree writes. What then?
1625                  */
1626                 ret = bch2_alloc_write(c,
1627                                        BTREE_INSERT_NOCHECK_RW|
1628                                        BTREE_INSERT_USE_ALLOC_RESERVE|
1629                                        BTREE_INSERT_NOWAIT, &wrote);
1630
1631                 /*
1632                  * If bch2_alloc_write() did anything, it may have used some
1633                  * buckets, and we need the RESERVE_BTREE freelist full - so we
1634                  * need to loop and scan again.
1635                  * And if it errored, it may have been because there weren't
1636                  * enough buckets, so just scan and loop again as long as it
1637                  * made some progress:
1638                  */
1639         } while (wrote);
1640         up_read(&c->gc_lock);
1641
1642         if (ret)
1643                 goto err;
1644
1645         pr_debug("flushing journal");
1646
1647         ret = bch2_journal_flush(&c->journal);
1648         if (ret)
1649                 goto err;
1650
1651         pr_debug("issuing discards");
1652         allocator_start_issue_discards(c);
1653 err:
1654         clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1655         closure_wait_event(&c->btree_interior_update_wait,
1656                            flush_held_btree_writes(c));
1657
1658         return ret;
1659 }
1660
1661 void bch2_fs_allocator_background_init(struct bch_fs *c)
1662 {
1663         spin_lock_init(&c->freelist_lock);
1664         bch2_bucket_clock_init(c, READ);
1665         bch2_bucket_clock_init(c, WRITE);
1666
1667         c->pd_controllers_update_seconds = 5;
1668         INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);
1669 }