]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
Update bcachefs sources to 6bb1ba5c94 bcachefs: Improve alloc_mem_to_key()
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "clock.h"
13 #include "debug.h"
14 #include "ec.h"
15 #include "error.h"
16 #include "recovery.h"
17 #include "varint.h"
18
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/sched/task.h>
25 #include <linux/sort.h>
26 #include <trace/events/bcachefs.h>
27
28 const char * const bch2_allocator_states[] = {
29 #define x(n)    #n,
30         ALLOC_THREAD_STATES()
31 #undef x
32         NULL
33 };
34
35 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
36 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
37         BCH_ALLOC_FIELDS_V1()
38 #undef x
39 };
40
41 struct bkey_alloc_buf {
42         struct bkey_i   k;
43         struct bch_alloc_v3 v;
44
45 #define x(_name,  _bits)                + _bits / 8
46         u8              _pad[0 + BCH_ALLOC_FIELDS_V2()];
47 #undef  x
48 } __attribute__((packed, aligned(8)));
49
50 /* Persistent alloc info: */
51
52 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
53                                      const void **p, unsigned field)
54 {
55         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
56         u64 v;
57
58         if (!(a->fields & (1 << field)))
59                 return 0;
60
61         switch (bytes) {
62         case 1:
63                 v = *((const u8 *) *p);
64                 break;
65         case 2:
66                 v = le16_to_cpup(*p);
67                 break;
68         case 4:
69                 v = le32_to_cpup(*p);
70                 break;
71         case 8:
72                 v = le64_to_cpup(*p);
73                 break;
74         default:
75                 BUG();
76         }
77
78         *p += bytes;
79         return v;
80 }
81
82 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
83                                       unsigned field, u64 v)
84 {
85         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
86
87         if (!v)
88                 return;
89
90         a->v.fields |= 1 << field;
91
92         switch (bytes) {
93         case 1:
94                 *((u8 *) *p) = v;
95                 break;
96         case 2:
97                 *((__le16 *) *p) = cpu_to_le16(v);
98                 break;
99         case 4:
100                 *((__le32 *) *p) = cpu_to_le32(v);
101                 break;
102         case 8:
103                 *((__le64 *) *p) = cpu_to_le64(v);
104                 break;
105         default:
106                 BUG();
107         }
108
109         *p += bytes;
110 }
111
112 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
113                                  struct bkey_s_c k)
114 {
115         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
116         const void *d = in->data;
117         unsigned idx = 0;
118
119         out->gen = in->gen;
120
121 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
122         BCH_ALLOC_FIELDS_V1()
123 #undef  x
124 }
125
126 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
127                                 struct bkey_s_c k)
128 {
129         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
130         const u8 *in = a.v->data;
131         const u8 *end = bkey_val_end(a);
132         unsigned fieldnr = 0;
133         int ret;
134         u64 v;
135
136         out->gen        = a.v->gen;
137         out->oldest_gen = a.v->oldest_gen;
138         out->data_type  = a.v->data_type;
139
140 #define x(_name, _bits)                                                 \
141         if (fieldnr < a.v->nr_fields) {                                 \
142                 ret = bch2_varint_decode_fast(in, end, &v);             \
143                 if (ret < 0)                                            \
144                         return ret;                                     \
145                 in += ret;                                              \
146         } else {                                                        \
147                 v = 0;                                                  \
148         }                                                               \
149         out->_name = v;                                                 \
150         if (v != out->_name)                                            \
151                 return -1;                                              \
152         fieldnr++;
153
154         BCH_ALLOC_FIELDS_V2()
155 #undef  x
156         return 0;
157 }
158
159 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
160                                 struct bkey_s_c k)
161 {
162         struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
163         const u8 *in = a.v->data;
164         const u8 *end = bkey_val_end(a);
165         unsigned fieldnr = 0;
166         int ret;
167         u64 v;
168
169         out->gen        = a.v->gen;
170         out->oldest_gen = a.v->oldest_gen;
171         out->data_type  = a.v->data_type;
172         out->journal_seq = le64_to_cpu(a.v->journal_seq);
173
174 #define x(_name, _bits)                                                 \
175         if (fieldnr < a.v->nr_fields) {                                 \
176                 ret = bch2_varint_decode_fast(in, end, &v);             \
177                 if (ret < 0)                                            \
178                         return ret;                                     \
179                 in += ret;                                              \
180         } else {                                                        \
181                 v = 0;                                                  \
182         }                                                               \
183         out->_name = v;                                                 \
184         if (v != out->_name)                                            \
185                 return -1;                                              \
186         fieldnr++;
187
188         BCH_ALLOC_FIELDS_V2()
189 #undef  x
190         return 0;
191 }
192
193 static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
194                                const struct bkey_alloc_unpacked src)
195 {
196         struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
197         unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
198         u8 *out = a->v.data;
199         u8 *end = (void *) &dst[1];
200         u8 *last_nonzero_field = out;
201         unsigned bytes;
202
203         a->k.p          = POS(src.dev, src.bucket);
204         a->v.gen        = src.gen;
205         a->v.oldest_gen = src.oldest_gen;
206         a->v.data_type  = src.data_type;
207         a->v.journal_seq = cpu_to_le64(src.journal_seq);
208
209 #define x(_name, _bits)                                                 \
210         nr_fields++;                                                    \
211                                                                         \
212         if (src._name) {                                                \
213                 out += bch2_varint_encode_fast(out, src._name);         \
214                                                                         \
215                 last_nonzero_field = out;                               \
216                 last_nonzero_fieldnr = nr_fields;                       \
217         } else {                                                        \
218                 *out++ = 0;                                             \
219         }
220
221         BCH_ALLOC_FIELDS_V2()
222 #undef  x
223         BUG_ON(out > end);
224
225         out = last_nonzero_field;
226         a->v.nr_fields = last_nonzero_fieldnr;
227
228         bytes = (u8 *) out - (u8 *) &a->v;
229         set_bkey_val_bytes(&a->k, bytes);
230         memset_u64s_tail(&a->v, 0, bytes);
231 }
232
233 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
234 {
235         struct bkey_alloc_unpacked ret = {
236                 .dev    = k.k->p.inode,
237                 .bucket = k.k->p.offset,
238                 .gen    = 0,
239         };
240
241         switch (k.k->type) {
242         case KEY_TYPE_alloc:
243                 bch2_alloc_unpack_v1(&ret, k);
244                 break;
245         case KEY_TYPE_alloc_v2:
246                 bch2_alloc_unpack_v2(&ret, k);
247                 break;
248         case KEY_TYPE_alloc_v3:
249                 bch2_alloc_unpack_v3(&ret, k);
250                 break;
251         }
252
253         return ret;
254 }
255
256 static void bch2_alloc_pack(struct bch_fs *c,
257                             struct bkey_alloc_buf *dst,
258                             const struct bkey_alloc_unpacked src)
259 {
260         bch2_alloc_pack_v3(dst, src);
261 }
262
263 int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter,
264                      struct bkey_alloc_unpacked *u, unsigned trigger_flags)
265 {
266         struct bkey_alloc_buf *a;
267
268         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
269         if (IS_ERR(a))
270                 return PTR_ERR(a);
271
272         bch2_alloc_pack(trans->c, a, *u);
273         return bch2_trans_update(trans, iter, &a->k, trigger_flags);
274 }
275
276 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
277 {
278         unsigned i, bytes = offsetof(struct bch_alloc, data);
279
280         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
281                 if (a->fields & (1 << i))
282                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
283
284         return DIV_ROUND_UP(bytes, sizeof(u64));
285 }
286
287 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
288 {
289         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
290
291         if (k.k->p.inode >= c->sb.nr_devices ||
292             !c->devs[k.k->p.inode])
293                 return "invalid device";
294
295         /* allow for unknown fields */
296         if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
297                 return "incorrect value size";
298
299         return NULL;
300 }
301
302 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
303 {
304         struct bkey_alloc_unpacked u;
305
306         if (k.k->p.inode >= c->sb.nr_devices ||
307             !c->devs[k.k->p.inode])
308                 return "invalid device";
309
310         if (bch2_alloc_unpack_v2(&u, k))
311                 return "unpack error";
312
313         return NULL;
314 }
315
316 const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
317 {
318         struct bkey_alloc_unpacked u;
319
320         if (k.k->p.inode >= c->sb.nr_devices ||
321             !c->devs[k.k->p.inode])
322                 return "invalid device";
323
324         if (bch2_alloc_unpack_v3(&u, k))
325                 return "unpack error";
326
327         return NULL;
328 }
329
330 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
331                            struct bkey_s_c k)
332 {
333         struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
334
335         pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu",
336                u.gen, u.oldest_gen, bch2_data_types[u.data_type],
337                u.journal_seq);
338 #define x(_name, ...)   pr_buf(out, " " #_name " %llu", (u64) u._name);
339         BCH_ALLOC_FIELDS_V2()
340 #undef  x
341 }
342
343 static int bch2_alloc_read_fn(struct btree_trans *trans, struct bkey_s_c k)
344 {
345         struct bch_fs *c = trans->c;
346         struct bch_dev *ca;
347         struct bucket *g;
348         struct bkey_alloc_unpacked u;
349
350         if (!bkey_is_alloc(k.k))
351                 return 0;
352
353         ca = bch_dev_bkey_exists(c, k.k->p.inode);
354         g = bucket(ca, k.k->p.offset);
355         u = bch2_alloc_unpack(k);
356
357         g->_mark.gen            = u.gen;
358         g->_mark.data_type      = u.data_type;
359         g->_mark.dirty_sectors  = u.dirty_sectors;
360         g->_mark.cached_sectors = u.cached_sectors;
361         g->_mark.stripe         = u.stripe != 0;
362         g->stripe               = u.stripe;
363         g->stripe_redundancy    = u.stripe_redundancy;
364         g->io_time[READ]        = u.read_time;
365         g->io_time[WRITE]       = u.write_time;
366         g->oldest_gen           = u.oldest_gen;
367         g->gen_valid            = 1;
368
369         return 0;
370 }
371
372 int bch2_alloc_read(struct bch_fs *c)
373 {
374         struct btree_trans trans;
375         int ret;
376
377         bch2_trans_init(&trans, c, 0, 0);
378         down_read(&c->gc_lock);
379         ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_alloc, bch2_alloc_read_fn);
380         up_read(&c->gc_lock);
381         bch2_trans_exit(&trans);
382         if (ret) {
383                 bch_err(c, "error reading alloc info: %i", ret);
384                 return ret;
385         }
386
387         return 0;
388 }
389
390 static int bch2_alloc_write_key(struct btree_trans *trans,
391                                 struct btree_iter *iter,
392                                 unsigned flags)
393 {
394         struct bch_fs *c = trans->c;
395         struct bkey_s_c k;
396         struct bkey_alloc_unpacked old_u, new_u;
397         int ret;
398 retry:
399         bch2_trans_begin(trans);
400
401         ret = bch2_btree_key_cache_flush(trans,
402                         BTREE_ID_alloc, iter->pos);
403         if (ret)
404                 goto err;
405
406         k = bch2_btree_iter_peek_slot(iter);
407         ret = bkey_err(k);
408         if (ret)
409                 goto err;
410
411         old_u   = bch2_alloc_unpack(k);
412         new_u   = alloc_mem_to_key(c, iter);
413
414         if (!bkey_alloc_unpacked_cmp(old_u, new_u))
415                 return 0;
416
417         ret   = bch2_alloc_write(trans, iter, &new_u,
418                                   BTREE_TRIGGER_NORUN) ?:
419                 bch2_trans_commit(trans, NULL, NULL,
420                                 BTREE_INSERT_NOFAIL|flags);
421 err:
422         if (ret == -EINTR)
423                 goto retry;
424         return ret;
425 }
426
427 int bch2_alloc_write_all(struct bch_fs *c, unsigned flags)
428 {
429         struct btree_trans trans;
430         struct btree_iter iter;
431         struct bch_dev *ca;
432         unsigned i;
433         int ret = 0;
434
435         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
436         bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
437                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
438
439         for_each_member_device(ca, c, i) {
440                 bch2_btree_iter_set_pos(&iter,
441                         POS(ca->dev_idx, ca->mi.first_bucket));
442
443                 while (iter.pos.offset < ca->mi.nbuckets) {
444                         ret = bch2_alloc_write_key(&trans, &iter, flags);
445                         if (ret) {
446                                 percpu_ref_put(&ca->ref);
447                                 goto err;
448                         }
449                         bch2_btree_iter_advance(&iter);
450                 }
451         }
452 err:
453         bch2_trans_iter_exit(&trans, &iter);
454         bch2_trans_exit(&trans);
455         return ret;
456 }
457
458 /* Bucket IO clocks: */
459
460 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
461                               size_t bucket_nr, int rw)
462 {
463         struct bch_fs *c = trans->c;
464         struct btree_iter iter;
465         struct bkey_alloc_unpacked u;
466         u64 *time, now;
467         int ret = 0;
468
469         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
470                              BTREE_ITER_CACHED|
471                              BTREE_ITER_CACHED_NOFILL|
472                              BTREE_ITER_INTENT);
473         ret = bch2_btree_iter_traverse(&iter);
474         if (ret)
475                 goto out;
476
477         u = alloc_mem_to_key(c, &iter);
478
479         time = rw == READ ? &u.read_time : &u.write_time;
480         now = atomic64_read(&c->io_clock[rw].now);
481         if (*time == now)
482                 goto out;
483
484         *time = now;
485
486         ret   = bch2_alloc_write(trans, &iter, &u, 0) ?:
487                 bch2_trans_commit(trans, NULL, NULL, 0);
488 out:
489         bch2_trans_iter_exit(trans, &iter);
490         return ret;
491 }
492
493 /* Background allocator thread: */
494
495 /*
496  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
497  * (marking them as invalidated on disk), then optionally issues discard
498  * commands to the newly free buckets, then puts them on the various freelists.
499  */
500
501 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
502                                        struct bucket_mark m)
503 {
504         u8 gc_gen;
505
506         if (!is_available_bucket(m))
507                 return false;
508
509         if (m.owned_by_allocator)
510                 return false;
511
512         if (ca->buckets_nouse &&
513             test_bit(b, ca->buckets_nouse))
514                 return false;
515
516         gc_gen = bucket_gc_gen(bucket(ca, b));
517
518         ca->inc_gen_needs_gc            += gc_gen >= BUCKET_GC_GEN_MAX / 2;
519         ca->inc_gen_really_needs_gc     += gc_gen >= BUCKET_GC_GEN_MAX;
520
521         return gc_gen < BUCKET_GC_GEN_MAX;
522 }
523
524 /*
525  * Determines what order we're going to reuse buckets, smallest bucket_key()
526  * first.
527  */
528
529 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
530                                 u64 now, u64 last_seq_ondisk)
531 {
532         unsigned used = bucket_sectors_used(m);
533
534         if (used) {
535                 /*
536                  * Prefer to keep buckets that have been read more recently, and
537                  * buckets that have more data in them:
538                  */
539                 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
540                 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
541
542                 return -last_read_scaled;
543         } else {
544                 /*
545                  * Prefer to use buckets with smaller gc_gen so that we don't
546                  * have to walk the btree and recalculate oldest_gen - but shift
547                  * off the low bits so that buckets will still have equal sort
548                  * keys when there's only a small difference, so that we can
549                  * keep sequential buckets together:
550                  */
551                 return  (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
552                         (bucket_gc_gen(g) >> 4);
553         }
554 }
555
556 static inline int bucket_alloc_cmp(alloc_heap *h,
557                                    struct alloc_heap_entry l,
558                                    struct alloc_heap_entry r)
559 {
560         return  cmp_int(l.key, r.key) ?:
561                 cmp_int(r.nr, l.nr) ?:
562                 cmp_int(l.bucket, r.bucket);
563 }
564
565 static inline int bucket_idx_cmp(const void *_l, const void *_r)
566 {
567         const struct alloc_heap_entry *l = _l, *r = _r;
568
569         return cmp_int(l->bucket, r->bucket);
570 }
571
572 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
573 {
574         struct bucket_array *buckets;
575         struct alloc_heap_entry e = { 0 };
576         u64 now, last_seq_ondisk;
577         size_t b, i, nr = 0;
578
579         down_read(&ca->bucket_lock);
580
581         buckets = bucket_array(ca);
582         ca->alloc_heap.used = 0;
583         now = atomic64_read(&c->io_clock[READ].now);
584         last_seq_ondisk = c->journal.last_seq_ondisk;
585
586         /*
587          * Find buckets with lowest read priority, by building a maxheap sorted
588          * by read priority and repeatedly replacing the maximum element until
589          * all buckets have been visited.
590          */
591         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
592                 struct bucket *g = &buckets->b[b];
593                 struct bucket_mark m = READ_ONCE(g->mark);
594                 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
595
596                 cond_resched();
597
598                 if (!bch2_can_invalidate_bucket(ca, b, m))
599                         continue;
600
601                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
602                         e.nr++;
603                 } else {
604                         if (e.nr)
605                                 heap_add_or_replace(&ca->alloc_heap, e,
606                                         -bucket_alloc_cmp, NULL);
607
608                         e = (struct alloc_heap_entry) {
609                                 .bucket = b,
610                                 .nr     = 1,
611                                 .key    = key,
612                         };
613                 }
614         }
615
616         if (e.nr)
617                 heap_add_or_replace(&ca->alloc_heap, e,
618                                 -bucket_alloc_cmp, NULL);
619
620         for (i = 0; i < ca->alloc_heap.used; i++)
621                 nr += ca->alloc_heap.data[i].nr;
622
623         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
624                 nr -= ca->alloc_heap.data[0].nr;
625                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
626         }
627
628         up_read(&ca->bucket_lock);
629 }
630
631 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
632 {
633         struct bucket_array *buckets = bucket_array(ca);
634         struct bucket_mark m;
635         size_t b, start;
636
637         if (ca->fifo_last_bucket <  ca->mi.first_bucket ||
638             ca->fifo_last_bucket >= ca->mi.nbuckets)
639                 ca->fifo_last_bucket = ca->mi.first_bucket;
640
641         start = ca->fifo_last_bucket;
642
643         do {
644                 ca->fifo_last_bucket++;
645                 if (ca->fifo_last_bucket == ca->mi.nbuckets)
646                         ca->fifo_last_bucket = ca->mi.first_bucket;
647
648                 b = ca->fifo_last_bucket;
649                 m = READ_ONCE(buckets->b[b].mark);
650
651                 if (bch2_can_invalidate_bucket(ca, b, m)) {
652                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
653
654                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
655                         if (heap_full(&ca->alloc_heap))
656                                 break;
657                 }
658
659                 cond_resched();
660         } while (ca->fifo_last_bucket != start);
661 }
662
663 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
664 {
665         struct bucket_array *buckets = bucket_array(ca);
666         struct bucket_mark m;
667         size_t checked, i;
668
669         for (checked = 0;
670              checked < ca->mi.nbuckets / 2;
671              checked++) {
672                 size_t b = bch2_rand_range(ca->mi.nbuckets -
673                                            ca->mi.first_bucket) +
674                         ca->mi.first_bucket;
675
676                 m = READ_ONCE(buckets->b[b].mark);
677
678                 if (bch2_can_invalidate_bucket(ca, b, m)) {
679                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
680
681                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
682                         if (heap_full(&ca->alloc_heap))
683                                 break;
684                 }
685
686                 cond_resched();
687         }
688
689         sort(ca->alloc_heap.data,
690              ca->alloc_heap.used,
691              sizeof(ca->alloc_heap.data[0]),
692              bucket_idx_cmp, NULL);
693
694         /* remove duplicates: */
695         for (i = 0; i + 1 < ca->alloc_heap.used; i++)
696                 if (ca->alloc_heap.data[i].bucket ==
697                     ca->alloc_heap.data[i + 1].bucket)
698                         ca->alloc_heap.data[i].nr = 0;
699 }
700
701 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
702 {
703         size_t i, nr = 0;
704
705         ca->inc_gen_needs_gc                    = 0;
706         ca->inc_gen_really_needs_gc             = 0;
707
708         switch (ca->mi.replacement) {
709         case BCH_CACHE_REPLACEMENT_lru:
710                 find_reclaimable_buckets_lru(c, ca);
711                 break;
712         case BCH_CACHE_REPLACEMENT_fifo:
713                 find_reclaimable_buckets_fifo(c, ca);
714                 break;
715         case BCH_CACHE_REPLACEMENT_random:
716                 find_reclaimable_buckets_random(c, ca);
717                 break;
718         }
719
720         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
721
722         for (i = 0; i < ca->alloc_heap.used; i++)
723                 nr += ca->alloc_heap.data[i].nr;
724
725         return nr;
726 }
727
728 /*
729  * returns sequence number of most recent journal entry that updated this
730  * bucket:
731  */
732 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
733 {
734         if (m.journal_seq_valid) {
735                 u64 journal_seq = atomic64_read(&c->journal.seq);
736                 u64 bucket_seq  = journal_seq;
737
738                 bucket_seq &= ~((u64) U16_MAX);
739                 bucket_seq |= m.journal_seq;
740
741                 if (bucket_seq > journal_seq)
742                         bucket_seq -= 1 << 16;
743
744                 return bucket_seq;
745         } else {
746                 return 0;
747         }
748 }
749
750 static int bucket_invalidate_btree(struct btree_trans *trans,
751                                    struct bch_dev *ca, u64 b)
752 {
753         struct bch_fs *c = trans->c;
754         struct bkey_alloc_unpacked u;
755         struct btree_iter iter;
756         int ret;
757
758         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
759                              POS(ca->dev_idx, b),
760                              BTREE_ITER_CACHED|
761                              BTREE_ITER_CACHED_NOFILL|
762                              BTREE_ITER_INTENT);
763
764         ret = bch2_btree_iter_traverse(&iter);
765         if (ret)
766                 goto err;
767
768         u = alloc_mem_to_key(c, &iter);
769
770         u.gen++;
771         u.data_type     = 0;
772         u.dirty_sectors = 0;
773         u.cached_sectors = 0;
774         u.read_time     = atomic64_read(&c->io_clock[READ].now);
775         u.write_time    = atomic64_read(&c->io_clock[WRITE].now);
776
777         ret = bch2_alloc_write(trans, &iter, &u,
778                                BTREE_TRIGGER_BUCKET_INVALIDATE);
779 err:
780         bch2_trans_iter_exit(trans, &iter);
781         return ret;
782 }
783
784 static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
785                                       u64 *journal_seq, unsigned flags)
786 {
787         struct bucket *g;
788         struct bucket_mark m;
789         size_t b;
790         int ret = 0;
791
792         BUG_ON(!ca->alloc_heap.used ||
793                !ca->alloc_heap.data[0].nr);
794         b = ca->alloc_heap.data[0].bucket;
795
796         /* first, put on free_inc and mark as owned by allocator: */
797         percpu_down_read(&c->mark_lock);
798         g = bucket(ca, b);
799         m = READ_ONCE(g->mark);
800
801         BUG_ON(m.dirty_sectors);
802
803         bch2_mark_alloc_bucket(c, ca, b, true);
804
805         spin_lock(&c->freelist_lock);
806         verify_not_on_freelist(c, ca, b);
807         BUG_ON(!fifo_push(&ca->free_inc, b));
808         spin_unlock(&c->freelist_lock);
809
810         /*
811          * If we're not invalidating cached data, we only increment the bucket
812          * gen in memory here, the incremented gen will be updated in the btree
813          * by bch2_trans_mark_pointer():
814          */
815         if (!m.cached_sectors &&
816             !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
817                 BUG_ON(m.data_type);
818                 bucket_cmpxchg(g, m, m.gen++);
819                 percpu_up_read(&c->mark_lock);
820                 goto out;
821         }
822
823         percpu_up_read(&c->mark_lock);
824
825         /*
826          * If the read-only path is trying to shut down, we can't be generating
827          * new btree updates:
828          */
829         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
830                 ret = 1;
831                 goto out;
832         }
833
834         ret = bch2_trans_do(c, NULL, journal_seq,
835                             BTREE_INSERT_NOCHECK_RW|
836                             BTREE_INSERT_NOFAIL|
837                             BTREE_INSERT_JOURNAL_RESERVED|
838                             flags,
839                             bucket_invalidate_btree(&trans, ca, b));
840 out:
841         if (!ret) {
842                 /* remove from alloc_heap: */
843                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
844
845                 top->bucket++;
846                 top->nr--;
847
848                 if (!top->nr)
849                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
850
851                 /*
852                  * Make sure we flush the last journal entry that updated this
853                  * bucket (i.e. deleting the last reference) before writing to
854                  * this bucket again:
855                  */
856                 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
857         } else {
858                 size_t b2;
859
860                 /* remove from free_inc: */
861                 percpu_down_read(&c->mark_lock);
862                 spin_lock(&c->freelist_lock);
863
864                 bch2_mark_alloc_bucket(c, ca, b, false);
865
866                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
867                 BUG_ON(b != b2);
868
869                 spin_unlock(&c->freelist_lock);
870                 percpu_up_read(&c->mark_lock);
871         }
872
873         return ret < 0 ? ret : 0;
874 }
875
876 /*
877  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
878  */
879 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
880 {
881         u64 journal_seq = 0;
882         int ret = 0;
883
884         /* Only use nowait if we've already invalidated at least one bucket: */
885         while (!ret &&
886                !fifo_full(&ca->free_inc) &&
887                ca->alloc_heap.used) {
888                 if (kthread_should_stop()) {
889                         ret = 1;
890                         break;
891                 }
892
893                 ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
894                                 (!fifo_empty(&ca->free_inc)
895                                  ? BTREE_INSERT_NOWAIT : 0));
896                 /*
897                  * We only want to batch up invalidates when they're going to
898                  * require flushing the journal:
899                  */
900                 if (!journal_seq)
901                         break;
902         }
903
904         /* If we used NOWAIT, don't return the error: */
905         if (!fifo_empty(&ca->free_inc))
906                 ret = 0;
907         if (ret < 0)
908                 bch_err(ca, "error invalidating buckets: %i", ret);
909         if (ret)
910                 return ret;
911
912         if (journal_seq)
913                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
914         if (ret) {
915                 bch_err(ca, "journal error: %i", ret);
916                 return ret;
917         }
918
919         return 0;
920 }
921
922 static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
923 {
924         if (ca->allocator_state != new_state) {
925                 ca->allocator_state = new_state;
926                 closure_wake_up(&ca->fs->freelist_wait);
927         }
928 }
929
930 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
931 {
932         unsigned i;
933         int ret = 0;
934
935         spin_lock(&c->freelist_lock);
936         for (i = 0; i < RESERVE_NR; i++) {
937                 /*
938                  * Don't strand buckets on the copygc freelist until
939                  * after recovery is finished:
940                  */
941                 if (i == RESERVE_MOVINGGC &&
942                     !test_bit(BCH_FS_STARTED, &c->flags))
943                         continue;
944
945                 if (fifo_push(&ca->free[i], b)) {
946                         fifo_pop(&ca->free_inc, b);
947                         ret = 1;
948                         break;
949                 }
950         }
951         spin_unlock(&c->freelist_lock);
952
953         ca->allocator_state = ret
954                 ? ALLOCATOR_running
955                 : ALLOCATOR_blocked_full;
956         closure_wake_up(&c->freelist_wait);
957         return ret;
958 }
959
960 static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
961 {
962         if (ca->mi.discard &&
963             blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
964                 blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
965                                      ca->mi.bucket_size, GFP_NOFS, 0);
966 }
967
968 static bool allocator_thread_running(struct bch_dev *ca)
969 {
970         unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
971                 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
972                 ? ALLOCATOR_running
973                 : ALLOCATOR_stopped;
974         alloc_thread_set_state(ca, state);
975         return state == ALLOCATOR_running;
976 }
977
978 static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
979 {
980         s64 available = dev_buckets_reclaimable(ca) -
981                 (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
982         bool ret = available > 0;
983
984         alloc_thread_set_state(ca, ret
985                                ? ALLOCATOR_running
986                                : ALLOCATOR_blocked);
987         return ret;
988 }
989
990 /**
991  * bch_allocator_thread - move buckets from free_inc to reserves
992  *
993  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
994  * the reserves are depleted by bucket allocation. When we run out
995  * of free_inc, try to invalidate some buckets and write out
996  * prios and gens.
997  */
998 static int bch2_allocator_thread(void *arg)
999 {
1000         struct bch_dev *ca = arg;
1001         struct bch_fs *c = ca->fs;
1002         unsigned long gc_count = c->gc_count;
1003         size_t nr;
1004         int ret;
1005
1006         set_freezable();
1007
1008         while (1) {
1009                 ret = kthread_wait_freezable(allocator_thread_running(ca));
1010                 if (ret)
1011                         goto stop;
1012
1013                 while (!ca->alloc_heap.used) {
1014                         cond_resched();
1015
1016                         ret = kthread_wait_freezable(buckets_available(ca, gc_count));
1017                         if (ret)
1018                                 goto stop;
1019
1020                         gc_count = c->gc_count;
1021                         nr = find_reclaimable_buckets(c, ca);
1022
1023                         trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
1024                                          ca->inc_gen_really_needs_gc);
1025
1026                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1027                              ca->inc_gen_really_needs_gc) &&
1028                             c->gc_thread) {
1029                                 atomic_inc(&c->kick_gc);
1030                                 wake_up_process(c->gc_thread);
1031                         }
1032                 }
1033
1034                 ret = bch2_invalidate_buckets(c, ca);
1035                 if (ret)
1036                         goto stop;
1037
1038                 while (!fifo_empty(&ca->free_inc)) {
1039                         u64 b = fifo_peek(&ca->free_inc);
1040
1041                         discard_one_bucket(c, ca, b);
1042
1043                         ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
1044                         if (ret)
1045                                 goto stop;
1046                 }
1047         }
1048 stop:
1049         alloc_thread_set_state(ca, ALLOCATOR_stopped);
1050         return 0;
1051 }
1052
1053 /* Startup/shutdown (ro/rw): */
1054
1055 void bch2_recalc_capacity(struct bch_fs *c)
1056 {
1057         struct bch_dev *ca;
1058         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1059         unsigned bucket_size_max = 0;
1060         unsigned long ra_pages = 0;
1061         unsigned i, j;
1062
1063         lockdep_assert_held(&c->state_lock);
1064
1065         for_each_online_member(ca, c, i) {
1066                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
1067
1068                 ra_pages += bdi->ra_pages;
1069         }
1070
1071         bch2_set_ra_pages(c, ra_pages);
1072
1073         for_each_rw_member(ca, c, i) {
1074                 u64 dev_reserve = 0;
1075
1076                 /*
1077                  * We need to reserve buckets (from the number
1078                  * of currently available buckets) against
1079                  * foreground writes so that mainly copygc can
1080                  * make forward progress.
1081                  *
1082                  * We need enough to refill the various reserves
1083                  * from scratch - copygc will use its entire
1084                  * reserve all at once, then run against when
1085                  * its reserve is refilled (from the formerly
1086                  * available buckets).
1087                  *
1088                  * This reserve is just used when considering if
1089                  * allocations for foreground writes must wait -
1090                  * not -ENOSPC calculations.
1091                  */
1092                 for (j = 0; j < RESERVE_NONE; j++)
1093                         dev_reserve += ca->free[j].size;
1094
1095                 dev_reserve += 1;       /* btree write point */
1096                 dev_reserve += 1;       /* copygc write point */
1097                 dev_reserve += 1;       /* rebalance write point */
1098
1099                 dev_reserve *= ca->mi.bucket_size;
1100
1101                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1102                                              ca->mi.first_bucket);
1103
1104                 reserved_sectors += dev_reserve * 2;
1105
1106                 bucket_size_max = max_t(unsigned, bucket_size_max,
1107                                         ca->mi.bucket_size);
1108         }
1109
1110         gc_reserve = c->opts.gc_reserve_bytes
1111                 ? c->opts.gc_reserve_bytes >> 9
1112                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1113
1114         reserved_sectors = max(gc_reserve, reserved_sectors);
1115
1116         reserved_sectors = min(reserved_sectors, capacity);
1117
1118         c->capacity = capacity - reserved_sectors;
1119
1120         c->bucket_size_max = bucket_size_max;
1121
1122         /* Wake up case someone was waiting for buckets */
1123         closure_wake_up(&c->freelist_wait);
1124 }
1125
1126 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1127 {
1128         struct open_bucket *ob;
1129         bool ret = false;
1130
1131         for (ob = c->open_buckets;
1132              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1133              ob++) {
1134                 spin_lock(&ob->lock);
1135                 if (ob->valid && !ob->on_partial_list &&
1136                     ob->ptr.dev == ca->dev_idx)
1137                         ret = true;
1138                 spin_unlock(&ob->lock);
1139         }
1140
1141         return ret;
1142 }
1143
1144 /* device goes ro: */
1145 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1146 {
1147         unsigned i;
1148
1149         BUG_ON(ca->alloc_thread);
1150
1151         /* First, remove device from allocation groups: */
1152
1153         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1154                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1155
1156         /*
1157          * Capacity is calculated based off of devices in allocation groups:
1158          */
1159         bch2_recalc_capacity(c);
1160
1161         /* Next, close write points that point to this device... */
1162         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1163                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1164
1165         bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1166         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1167         bch2_writepoint_stop(c, ca, &c->btree_write_point);
1168
1169         mutex_lock(&c->btree_reserve_cache_lock);
1170         while (c->btree_reserve_cache_nr) {
1171                 struct btree_alloc *a =
1172                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1173
1174                 bch2_open_buckets_put(c, &a->ob);
1175         }
1176         mutex_unlock(&c->btree_reserve_cache_lock);
1177
1178         while (1) {
1179                 struct open_bucket *ob;
1180
1181                 spin_lock(&c->freelist_lock);
1182                 if (!ca->open_buckets_partial_nr) {
1183                         spin_unlock(&c->freelist_lock);
1184                         break;
1185                 }
1186                 ob = c->open_buckets +
1187                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1188                 ob->on_partial_list = false;
1189                 spin_unlock(&c->freelist_lock);
1190
1191                 bch2_open_bucket_put(c, ob);
1192         }
1193
1194         bch2_ec_stop_dev(c, ca);
1195
1196         /*
1197          * Wake up threads that were blocked on allocation, so they can notice
1198          * the device can no longer be removed and the capacity has changed:
1199          */
1200         closure_wake_up(&c->freelist_wait);
1201
1202         /*
1203          * journal_res_get() can block waiting for free space in the journal -
1204          * it needs to notice there may not be devices to allocate from anymore:
1205          */
1206         wake_up(&c->journal.wait);
1207
1208         /* Now wait for any in flight writes: */
1209
1210         closure_wait_event(&c->open_buckets_wait,
1211                            !bch2_dev_has_open_write_point(c, ca));
1212 }
1213
1214 /* device goes rw: */
1215 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1216 {
1217         unsigned i;
1218
1219         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1220                 if (ca->mi.data_allowed & (1 << i))
1221                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1222 }
1223
1224 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1225 {
1226         if (ca->alloc_thread)
1227                 closure_wait_event(&c->freelist_wait,
1228                                    ca->allocator_state != ALLOCATOR_running);
1229 }
1230
1231 /* stop allocator thread: */
1232 void bch2_dev_allocator_stop(struct bch_dev *ca)
1233 {
1234         struct task_struct *p;
1235
1236         p = rcu_dereference_protected(ca->alloc_thread, 1);
1237         ca->alloc_thread = NULL;
1238
1239         /*
1240          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1241          * the thread shutting down to avoid bch2_wake_allocator() racing:
1242          *
1243          * XXX: it would be better to have the rcu barrier be asynchronous
1244          * instead of blocking us here
1245          */
1246         synchronize_rcu();
1247
1248         if (p) {
1249                 kthread_stop(p);
1250                 put_task_struct(p);
1251         }
1252 }
1253
1254 /* start allocator thread: */
1255 int bch2_dev_allocator_start(struct bch_dev *ca)
1256 {
1257         struct task_struct *p;
1258
1259         /*
1260          * allocator thread already started?
1261          */
1262         if (ca->alloc_thread)
1263                 return 0;
1264
1265         p = kthread_create(bch2_allocator_thread, ca,
1266                            "bch-alloc/%s", ca->name);
1267         if (IS_ERR(p)) {
1268                 bch_err(ca->fs, "error creating allocator thread: %li",
1269                         PTR_ERR(p));
1270                 return PTR_ERR(p);
1271         }
1272
1273         get_task_struct(p);
1274         rcu_assign_pointer(ca->alloc_thread, p);
1275         wake_up_process(p);
1276         return 0;
1277 }
1278
1279 void bch2_fs_allocator_background_init(struct bch_fs *c)
1280 {
1281         spin_lock_init(&c->freelist_lock);
1282 }
1283
1284 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1285 {
1286         struct open_bucket *ob;
1287
1288         for (ob = c->open_buckets;
1289              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1290              ob++) {
1291                 spin_lock(&ob->lock);
1292                 if (ob->valid && !ob->on_partial_list) {
1293                         pr_buf(out, "%zu ref %u type %s\n",
1294                                ob - c->open_buckets,
1295                                atomic_read(&ob->pin),
1296                                bch2_data_types[ob->type]);
1297                 }
1298                 spin_unlock(&ob->lock);
1299         }
1300
1301 }