]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
Update bcachefs sources to aa439f3b94 bcachefs: btree_gc no longer uses main in-memor...
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "clock.h"
13 #include "debug.h"
14 #include "ec.h"
15 #include "error.h"
16 #include "recovery.h"
17 #include "varint.h"
18
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/sched/task.h>
25 #include <linux/sort.h>
26 #include <trace/events/bcachefs.h>
27
28 const char * const bch2_allocator_states[] = {
29 #define x(n)    #n,
30         ALLOC_THREAD_STATES()
31 #undef x
32         NULL
33 };
34
35 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
36 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
37         BCH_ALLOC_FIELDS_V1()
38 #undef x
39 };
40
41 /* Persistent alloc info: */
42
43 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
44                                      const void **p, unsigned field)
45 {
46         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
47         u64 v;
48
49         if (!(a->fields & (1 << field)))
50                 return 0;
51
52         switch (bytes) {
53         case 1:
54                 v = *((const u8 *) *p);
55                 break;
56         case 2:
57                 v = le16_to_cpup(*p);
58                 break;
59         case 4:
60                 v = le32_to_cpup(*p);
61                 break;
62         case 8:
63                 v = le64_to_cpup(*p);
64                 break;
65         default:
66                 BUG();
67         }
68
69         *p += bytes;
70         return v;
71 }
72
73 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
74                                       unsigned field, u64 v)
75 {
76         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
77
78         if (!v)
79                 return;
80
81         a->v.fields |= 1 << field;
82
83         switch (bytes) {
84         case 1:
85                 *((u8 *) *p) = v;
86                 break;
87         case 2:
88                 *((__le16 *) *p) = cpu_to_le16(v);
89                 break;
90         case 4:
91                 *((__le32 *) *p) = cpu_to_le32(v);
92                 break;
93         case 8:
94                 *((__le64 *) *p) = cpu_to_le64(v);
95                 break;
96         default:
97                 BUG();
98         }
99
100         *p += bytes;
101 }
102
103 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
104                                  struct bkey_s_c k)
105 {
106         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
107         const void *d = in->data;
108         unsigned idx = 0;
109
110         out->gen = in->gen;
111
112 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
113         BCH_ALLOC_FIELDS_V1()
114 #undef  x
115 }
116
117 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
118                                 struct bkey_s_c k)
119 {
120         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
121         const u8 *in = a.v->data;
122         const u8 *end = bkey_val_end(a);
123         unsigned fieldnr = 0;
124         int ret;
125         u64 v;
126
127         out->gen        = a.v->gen;
128         out->oldest_gen = a.v->oldest_gen;
129         out->data_type  = a.v->data_type;
130
131 #define x(_name, _bits)                                                 \
132         if (fieldnr < a.v->nr_fields) {                                 \
133                 ret = bch2_varint_decode_fast(in, end, &v);             \
134                 if (ret < 0)                                            \
135                         return ret;                                     \
136                 in += ret;                                              \
137         } else {                                                        \
138                 v = 0;                                                  \
139         }                                                               \
140         out->_name = v;                                                 \
141         if (v != out->_name)                                            \
142                 return -1;                                              \
143         fieldnr++;
144
145         BCH_ALLOC_FIELDS_V2()
146 #undef  x
147         return 0;
148 }
149
150 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
151                                 struct bkey_s_c k)
152 {
153         struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
154         const u8 *in = a.v->data;
155         const u8 *end = bkey_val_end(a);
156         unsigned fieldnr = 0;
157         int ret;
158         u64 v;
159
160         out->gen        = a.v->gen;
161         out->oldest_gen = a.v->oldest_gen;
162         out->data_type  = a.v->data_type;
163         out->journal_seq = le64_to_cpu(a.v->journal_seq);
164
165 #define x(_name, _bits)                                                 \
166         if (fieldnr < a.v->nr_fields) {                                 \
167                 ret = bch2_varint_decode_fast(in, end, &v);             \
168                 if (ret < 0)                                            \
169                         return ret;                                     \
170                 in += ret;                                              \
171         } else {                                                        \
172                 v = 0;                                                  \
173         }                                                               \
174         out->_name = v;                                                 \
175         if (v != out->_name)                                            \
176                 return -1;                                              \
177         fieldnr++;
178
179         BCH_ALLOC_FIELDS_V2()
180 #undef  x
181         return 0;
182 }
183
184 static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
185                                const struct bkey_alloc_unpacked src)
186 {
187         struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
188         unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
189         u8 *out = a->v.data;
190         u8 *end = (void *) &dst[1];
191         u8 *last_nonzero_field = out;
192         unsigned bytes;
193
194         a->k.p          = POS(src.dev, src.bucket);
195         a->v.gen        = src.gen;
196         a->v.oldest_gen = src.oldest_gen;
197         a->v.data_type  = src.data_type;
198         a->v.journal_seq = cpu_to_le64(src.journal_seq);
199
200 #define x(_name, _bits)                                                 \
201         nr_fields++;                                                    \
202                                                                         \
203         if (src._name) {                                                \
204                 out += bch2_varint_encode_fast(out, src._name);         \
205                                                                         \
206                 last_nonzero_field = out;                               \
207                 last_nonzero_fieldnr = nr_fields;                       \
208         } else {                                                        \
209                 *out++ = 0;                                             \
210         }
211
212         BCH_ALLOC_FIELDS_V2()
213 #undef  x
214         BUG_ON(out > end);
215
216         out = last_nonzero_field;
217         a->v.nr_fields = last_nonzero_fieldnr;
218
219         bytes = (u8 *) out - (u8 *) &a->v;
220         set_bkey_val_bytes(&a->k, bytes);
221         memset_u64s_tail(&a->v, 0, bytes);
222 }
223
224 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
225 {
226         struct bkey_alloc_unpacked ret = {
227                 .dev    = k.k->p.inode,
228                 .bucket = k.k->p.offset,
229                 .gen    = 0,
230         };
231
232         switch (k.k->type) {
233         case KEY_TYPE_alloc:
234                 bch2_alloc_unpack_v1(&ret, k);
235                 break;
236         case KEY_TYPE_alloc_v2:
237                 bch2_alloc_unpack_v2(&ret, k);
238                 break;
239         case KEY_TYPE_alloc_v3:
240                 bch2_alloc_unpack_v3(&ret, k);
241                 break;
242         }
243
244         return ret;
245 }
246
247 struct bkey_alloc_buf *bch2_alloc_pack(struct btree_trans *trans,
248                                        const struct bkey_alloc_unpacked src)
249 {
250         struct bkey_alloc_buf *dst;
251
252         dst = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
253         if (!IS_ERR(dst))
254                 bch2_alloc_pack_v3(dst, src);
255
256         return dst;
257 }
258
259 int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter,
260                      struct bkey_alloc_unpacked *u, unsigned trigger_flags)
261 {
262         struct bkey_alloc_buf *a = bch2_alloc_pack(trans, *u);
263
264         return PTR_ERR_OR_ZERO(a) ?:
265                 bch2_trans_update(trans, iter, &a->k, trigger_flags);
266 }
267
268 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
269 {
270         unsigned i, bytes = offsetof(struct bch_alloc, data);
271
272         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
273                 if (a->fields & (1 << i))
274                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
275
276         return DIV_ROUND_UP(bytes, sizeof(u64));
277 }
278
279 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
280 {
281         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
282
283         if (k.k->p.inode >= c->sb.nr_devices ||
284             !c->devs[k.k->p.inode])
285                 return "invalid device";
286
287         /* allow for unknown fields */
288         if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
289                 return "incorrect value size";
290
291         return NULL;
292 }
293
294 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
295 {
296         struct bkey_alloc_unpacked u;
297
298         if (k.k->p.inode >= c->sb.nr_devices ||
299             !c->devs[k.k->p.inode])
300                 return "invalid device";
301
302         if (bch2_alloc_unpack_v2(&u, k))
303                 return "unpack error";
304
305         return NULL;
306 }
307
308 const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
309 {
310         struct bkey_alloc_unpacked u;
311
312         if (k.k->p.inode >= c->sb.nr_devices ||
313             !c->devs[k.k->p.inode])
314                 return "invalid device";
315
316         if (bch2_alloc_unpack_v3(&u, k))
317                 return "unpack error";
318
319         return NULL;
320 }
321
322 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
323                            struct bkey_s_c k)
324 {
325         struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
326
327         pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu",
328                u.gen, u.oldest_gen, bch2_data_types[u.data_type],
329                u.journal_seq);
330 #define x(_name, ...)   pr_buf(out, " " #_name " %llu", (u64) u._name);
331         BCH_ALLOC_FIELDS_V2()
332 #undef  x
333 }
334
335 int bch2_alloc_read(struct bch_fs *c, bool gc, bool metadata_only)
336 {
337         struct btree_trans trans;
338         struct btree_iter iter;
339         struct bkey_s_c k;
340         struct bch_dev *ca;
341         struct bucket *g;
342         struct bkey_alloc_unpacked u;
343         int ret;
344
345         bch2_trans_init(&trans, c, 0, 0);
346
347         for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
348                            BTREE_ITER_PREFETCH, k, ret) {
349                 ca = bch_dev_bkey_exists(c, k.k->p.inode);
350                 g = __bucket(ca, k.k->p.offset, gc);
351                 u = bch2_alloc_unpack(k);
352
353                 if (!gc)
354                         *bucket_gen(ca, k.k->p.offset) = u.gen;
355
356                 g->_mark.gen            = u.gen;
357                 g->io_time[READ]        = u.read_time;
358                 g->io_time[WRITE]       = u.write_time;
359                 g->oldest_gen           = !gc ? u.oldest_gen : u.gen;
360                 g->gen_valid            = 1;
361
362                 if (!gc ||
363                     (metadata_only &&
364                      (u.data_type == BCH_DATA_user ||
365                       u.data_type == BCH_DATA_cached ||
366                       u.data_type == BCH_DATA_parity))) {
367                         g->_mark.data_type      = u.data_type;
368                         g->_mark.dirty_sectors  = u.dirty_sectors;
369                         g->_mark.cached_sectors = u.cached_sectors;
370                         g->_mark.stripe         = u.stripe != 0;
371                         g->stripe               = u.stripe;
372                         g->stripe_redundancy    = u.stripe_redundancy;
373                 }
374
375         }
376         bch2_trans_iter_exit(&trans, &iter);
377
378         bch2_trans_exit(&trans);
379
380         if (ret)
381                 bch_err(c, "error reading alloc info: %i", ret);
382
383         return ret;
384 }
385
386 /* Bucket IO clocks: */
387
388 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
389                               size_t bucket_nr, int rw)
390 {
391         struct bch_fs *c = trans->c;
392         struct btree_iter iter;
393         struct bkey_s_c k;
394         struct bkey_alloc_unpacked u;
395         u64 *time, now;
396         int ret = 0;
397
398         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
399                              BTREE_ITER_CACHED|
400                              BTREE_ITER_INTENT);
401         k = bch2_btree_iter_peek_slot(&iter);
402         ret = bkey_err(k);
403         if (ret)
404                 goto out;
405
406         u = bch2_alloc_unpack(k);
407
408         time = rw == READ ? &u.read_time : &u.write_time;
409         now = atomic64_read(&c->io_clock[rw].now);
410         if (*time == now)
411                 goto out;
412
413         *time = now;
414
415         ret   = bch2_alloc_write(trans, &iter, &u, 0) ?:
416                 bch2_trans_commit(trans, NULL, NULL, 0);
417 out:
418         bch2_trans_iter_exit(trans, &iter);
419         return ret;
420 }
421
422 /* Background allocator thread: */
423
424 /*
425  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
426  * (marking them as invalidated on disk), then optionally issues discard
427  * commands to the newly free buckets, then puts them on the various freelists.
428  */
429
430 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
431                                        struct bucket_mark m)
432 {
433         u8 gc_gen;
434
435         if (!is_available_bucket(m))
436                 return false;
437
438         if (m.owned_by_allocator)
439                 return false;
440
441         if (ca->buckets_nouse &&
442             test_bit(b, ca->buckets_nouse))
443                 return false;
444
445         if (ca->new_fs_bucket_idx) {
446                 /*
447                  * Device or filesystem is still being initialized, and we
448                  * haven't fully marked superblocks & journal:
449                  */
450                 if (is_superblock_bucket(ca, b))
451                         return false;
452
453                 if (b < ca->new_fs_bucket_idx)
454                         return false;
455         }
456
457         gc_gen = bucket_gc_gen(bucket(ca, b));
458
459         ca->inc_gen_needs_gc            += gc_gen >= BUCKET_GC_GEN_MAX / 2;
460         ca->inc_gen_really_needs_gc     += gc_gen >= BUCKET_GC_GEN_MAX;
461
462         return gc_gen < BUCKET_GC_GEN_MAX;
463 }
464
465 /*
466  * Determines what order we're going to reuse buckets, smallest bucket_key()
467  * first.
468  */
469
470 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
471                                 u64 now, u64 last_seq_ondisk)
472 {
473         unsigned used = bucket_sectors_used(m);
474
475         if (used) {
476                 /*
477                  * Prefer to keep buckets that have been read more recently, and
478                  * buckets that have more data in them:
479                  */
480                 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
481                 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
482
483                 return -last_read_scaled;
484         } else {
485                 /*
486                  * Prefer to use buckets with smaller gc_gen so that we don't
487                  * have to walk the btree and recalculate oldest_gen - but shift
488                  * off the low bits so that buckets will still have equal sort
489                  * keys when there's only a small difference, so that we can
490                  * keep sequential buckets together:
491                  */
492                 return  (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
493                         (bucket_gc_gen(g) >> 4);
494         }
495 }
496
497 static inline int bucket_alloc_cmp(alloc_heap *h,
498                                    struct alloc_heap_entry l,
499                                    struct alloc_heap_entry r)
500 {
501         return  cmp_int(l.key, r.key) ?:
502                 cmp_int(r.nr, l.nr) ?:
503                 cmp_int(l.bucket, r.bucket);
504 }
505
506 static inline int bucket_idx_cmp(const void *_l, const void *_r)
507 {
508         const struct alloc_heap_entry *l = _l, *r = _r;
509
510         return cmp_int(l->bucket, r->bucket);
511 }
512
513 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
514 {
515         struct bucket_array *buckets;
516         struct alloc_heap_entry e = { 0 };
517         u64 now, last_seq_ondisk;
518         size_t b, i, nr = 0;
519
520         down_read(&ca->bucket_lock);
521
522         buckets = bucket_array(ca);
523         ca->alloc_heap.used = 0;
524         now = atomic64_read(&c->io_clock[READ].now);
525         last_seq_ondisk = c->journal.flushed_seq_ondisk;
526
527         /*
528          * Find buckets with lowest read priority, by building a maxheap sorted
529          * by read priority and repeatedly replacing the maximum element until
530          * all buckets have been visited.
531          */
532         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
533                 struct bucket *g = &buckets->b[b];
534                 struct bucket_mark m = READ_ONCE(g->mark);
535                 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
536
537                 cond_resched();
538
539                 if (!bch2_can_invalidate_bucket(ca, b, m))
540                         continue;
541
542                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
543                         e.nr++;
544                 } else {
545                         if (e.nr)
546                                 heap_add_or_replace(&ca->alloc_heap, e,
547                                         -bucket_alloc_cmp, NULL);
548
549                         e = (struct alloc_heap_entry) {
550                                 .bucket = b,
551                                 .nr     = 1,
552                                 .key    = key,
553                         };
554                 }
555         }
556
557         if (e.nr)
558                 heap_add_or_replace(&ca->alloc_heap, e,
559                                 -bucket_alloc_cmp, NULL);
560
561         for (i = 0; i < ca->alloc_heap.used; i++)
562                 nr += ca->alloc_heap.data[i].nr;
563
564         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
565                 nr -= ca->alloc_heap.data[0].nr;
566                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
567         }
568
569         up_read(&ca->bucket_lock);
570 }
571
572 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
573 {
574         size_t i, nr = 0;
575
576         ca->inc_gen_needs_gc                    = 0;
577         ca->inc_gen_really_needs_gc             = 0;
578
579         find_reclaimable_buckets_lru(c, ca);
580
581         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
582
583         for (i = 0; i < ca->alloc_heap.used; i++)
584                 nr += ca->alloc_heap.data[i].nr;
585
586         return nr;
587 }
588
589 static int bucket_invalidate_btree(struct btree_trans *trans,
590                                    struct bch_dev *ca, u64 b,
591                                    struct bkey_alloc_unpacked *u)
592 {
593         struct bch_fs *c = trans->c;
594         struct btree_iter iter;
595         struct bkey_s_c k;
596         int ret;
597
598         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
599                              POS(ca->dev_idx, b),
600                              BTREE_ITER_CACHED|
601                              BTREE_ITER_INTENT);
602
603         k = bch2_btree_iter_peek_slot(&iter);
604         ret = bkey_err(k);
605         if (ret)
606                 goto err;
607
608         *u = bch2_alloc_unpack(k);
609         u->gen++;
610         u->data_type            = 0;
611         u->dirty_sectors        = 0;
612         u->cached_sectors       = 0;
613         u->read_time            = atomic64_read(&c->io_clock[READ].now);
614         u->write_time           = atomic64_read(&c->io_clock[WRITE].now);
615
616         ret = bch2_alloc_write(trans, &iter, u,
617                                BTREE_TRIGGER_BUCKET_INVALIDATE);
618 err:
619         bch2_trans_iter_exit(trans, &iter);
620         return ret;
621 }
622
623 static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
624                                       u64 *journal_seq, unsigned flags)
625 {
626         struct bkey_alloc_unpacked u;
627         size_t b;
628         int ret = 0;
629
630         /*
631          * If the read-only path is trying to shut down, we can't be generating
632          * new btree updates:
633          */
634         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags))
635                 return 1;
636
637         BUG_ON(!ca->alloc_heap.used ||
638                !ca->alloc_heap.data[0].nr);
639         b = ca->alloc_heap.data[0].bucket;
640
641         /* first, put on free_inc and mark as owned by allocator: */
642         percpu_down_read(&c->mark_lock);
643
644         bch2_mark_alloc_bucket(c, ca, b, true);
645
646         spin_lock(&c->freelist_lock);
647         verify_not_on_freelist(c, ca, b);
648         BUG_ON(!fifo_push(&ca->free_inc, b));
649         spin_unlock(&c->freelist_lock);
650
651         percpu_up_read(&c->mark_lock);
652
653         ret = bch2_trans_do(c, NULL, journal_seq,
654                             BTREE_INSERT_NOCHECK_RW|
655                             BTREE_INSERT_NOFAIL|
656                             BTREE_INSERT_JOURNAL_RESERVED|
657                             flags,
658                             bucket_invalidate_btree(&trans, ca, b, &u));
659
660         if (!ret) {
661                 /* remove from alloc_heap: */
662                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
663
664                 top->bucket++;
665                 top->nr--;
666
667                 if (!top->nr)
668                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
669
670                 /*
671                  * Make sure we flush the last journal entry that updated this
672                  * bucket (i.e. deleting the last reference) before writing to
673                  * this bucket again:
674                  */
675                 *journal_seq = max(*journal_seq, u.journal_seq);
676         } else {
677                 size_t b2;
678
679                 /* remove from free_inc: */
680                 percpu_down_read(&c->mark_lock);
681                 spin_lock(&c->freelist_lock);
682
683                 bch2_mark_alloc_bucket(c, ca, b, false);
684
685                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
686                 BUG_ON(b != b2);
687
688                 spin_unlock(&c->freelist_lock);
689                 percpu_up_read(&c->mark_lock);
690         }
691
692         return ret < 0 ? ret : 0;
693 }
694
695 /*
696  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
697  */
698 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
699 {
700         u64 journal_seq = 0;
701         int ret = 0;
702
703         /* Only use nowait if we've already invalidated at least one bucket: */
704         while (!ret &&
705                !fifo_full(&ca->free_inc) &&
706                ca->alloc_heap.used) {
707                 if (kthread_should_stop()) {
708                         ret = 1;
709                         break;
710                 }
711
712                 ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
713                                 (!fifo_empty(&ca->free_inc)
714                                  ? BTREE_INSERT_NOWAIT : 0));
715                 /*
716                  * We only want to batch up invalidates when they're going to
717                  * require flushing the journal:
718                  */
719                 if (!journal_seq)
720                         break;
721         }
722
723         /* If we used NOWAIT, don't return the error: */
724         if (!fifo_empty(&ca->free_inc))
725                 ret = 0;
726         if (ret < 0)
727                 bch_err(ca, "error invalidating buckets: %i", ret);
728         if (ret)
729                 return ret;
730
731         if (journal_seq)
732                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
733         if (ret) {
734                 bch_err(ca, "journal error: %i", ret);
735                 return ret;
736         }
737
738         return 0;
739 }
740
741 static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
742 {
743         if (ca->allocator_state != new_state) {
744                 ca->allocator_state = new_state;
745                 closure_wake_up(&ca->fs->freelist_wait);
746         }
747 }
748
749 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
750 {
751         unsigned i;
752         int ret = 0;
753
754         spin_lock(&c->freelist_lock);
755         for (i = 0; i < RESERVE_NR; i++) {
756                 /*
757                  * Don't strand buckets on the copygc freelist until
758                  * after recovery is finished:
759                  */
760                 if (i == RESERVE_MOVINGGC &&
761                     !test_bit(BCH_FS_STARTED, &c->flags))
762                         continue;
763
764                 if (fifo_push(&ca->free[i], b)) {
765                         fifo_pop(&ca->free_inc, b);
766                         ret = 1;
767                         break;
768                 }
769         }
770         spin_unlock(&c->freelist_lock);
771
772         ca->allocator_state = ret
773                 ? ALLOCATOR_running
774                 : ALLOCATOR_blocked_full;
775         closure_wake_up(&c->freelist_wait);
776         return ret;
777 }
778
779 static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
780 {
781         if (ca->mi.discard &&
782             blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
783                 blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
784                                      ca->mi.bucket_size, GFP_NOFS, 0);
785 }
786
787 static bool allocator_thread_running(struct bch_dev *ca)
788 {
789         unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
790                 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
791                 ? ALLOCATOR_running
792                 : ALLOCATOR_stopped;
793         alloc_thread_set_state(ca, state);
794         return state == ALLOCATOR_running;
795 }
796
797 static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
798 {
799         s64 available = dev_buckets_reclaimable(ca) -
800                 (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
801         bool ret = available > 0;
802
803         alloc_thread_set_state(ca, ret
804                                ? ALLOCATOR_running
805                                : ALLOCATOR_blocked);
806         return ret;
807 }
808
809 /**
810  * bch_allocator_thread - move buckets from free_inc to reserves
811  *
812  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
813  * the reserves are depleted by bucket allocation. When we run out
814  * of free_inc, try to invalidate some buckets and write out
815  * prios and gens.
816  */
817 static int bch2_allocator_thread(void *arg)
818 {
819         struct bch_dev *ca = arg;
820         struct bch_fs *c = ca->fs;
821         unsigned long gc_count = c->gc_count;
822         size_t nr;
823         int ret;
824
825         set_freezable();
826
827         while (1) {
828                 ret = kthread_wait_freezable(allocator_thread_running(ca));
829                 if (ret)
830                         goto stop;
831
832                 while (!ca->alloc_heap.used) {
833                         cond_resched();
834
835                         ret = kthread_wait_freezable(buckets_available(ca, gc_count));
836                         if (ret)
837                                 goto stop;
838
839                         gc_count = c->gc_count;
840                         nr = find_reclaimable_buckets(c, ca);
841
842                         trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
843                                          ca->inc_gen_really_needs_gc);
844
845                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
846                              ca->inc_gen_really_needs_gc) &&
847                             c->gc_thread) {
848                                 atomic_inc(&c->kick_gc);
849                                 wake_up_process(c->gc_thread);
850                         }
851                 }
852
853                 ret = bch2_invalidate_buckets(c, ca);
854                 if (ret)
855                         goto stop;
856
857                 while (!fifo_empty(&ca->free_inc)) {
858                         u64 b = fifo_peek(&ca->free_inc);
859
860                         discard_one_bucket(c, ca, b);
861
862                         ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
863                         if (ret)
864                                 goto stop;
865                 }
866         }
867 stop:
868         alloc_thread_set_state(ca, ALLOCATOR_stopped);
869         return 0;
870 }
871
872 /* Startup/shutdown (ro/rw): */
873
874 void bch2_recalc_capacity(struct bch_fs *c)
875 {
876         struct bch_dev *ca;
877         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
878         unsigned bucket_size_max = 0;
879         unsigned long ra_pages = 0;
880         unsigned i, j;
881
882         lockdep_assert_held(&c->state_lock);
883
884         for_each_online_member(ca, c, i) {
885                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
886
887                 ra_pages += bdi->ra_pages;
888         }
889
890         bch2_set_ra_pages(c, ra_pages);
891
892         for_each_rw_member(ca, c, i) {
893                 u64 dev_reserve = 0;
894
895                 /*
896                  * We need to reserve buckets (from the number
897                  * of currently available buckets) against
898                  * foreground writes so that mainly copygc can
899                  * make forward progress.
900                  *
901                  * We need enough to refill the various reserves
902                  * from scratch - copygc will use its entire
903                  * reserve all at once, then run against when
904                  * its reserve is refilled (from the formerly
905                  * available buckets).
906                  *
907                  * This reserve is just used when considering if
908                  * allocations for foreground writes must wait -
909                  * not -ENOSPC calculations.
910                  */
911                 for (j = 0; j < RESERVE_NONE; j++)
912                         dev_reserve += ca->free[j].size;
913
914                 dev_reserve += 1;       /* btree write point */
915                 dev_reserve += 1;       /* copygc write point */
916                 dev_reserve += 1;       /* rebalance write point */
917
918                 dev_reserve *= ca->mi.bucket_size;
919
920                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
921                                              ca->mi.first_bucket);
922
923                 reserved_sectors += dev_reserve * 2;
924
925                 bucket_size_max = max_t(unsigned, bucket_size_max,
926                                         ca->mi.bucket_size);
927         }
928
929         gc_reserve = c->opts.gc_reserve_bytes
930                 ? c->opts.gc_reserve_bytes >> 9
931                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
932
933         reserved_sectors = max(gc_reserve, reserved_sectors);
934
935         reserved_sectors = min(reserved_sectors, capacity);
936
937         c->capacity = capacity - reserved_sectors;
938
939         c->bucket_size_max = bucket_size_max;
940
941         /* Wake up case someone was waiting for buckets */
942         closure_wake_up(&c->freelist_wait);
943 }
944
945 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
946 {
947         struct open_bucket *ob;
948         bool ret = false;
949
950         for (ob = c->open_buckets;
951              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
952              ob++) {
953                 spin_lock(&ob->lock);
954                 if (ob->valid && !ob->on_partial_list &&
955                     ob->dev == ca->dev_idx)
956                         ret = true;
957                 spin_unlock(&ob->lock);
958         }
959
960         return ret;
961 }
962
963 /* device goes ro: */
964 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
965 {
966         unsigned i;
967
968         BUG_ON(ca->alloc_thread);
969
970         /* First, remove device from allocation groups: */
971
972         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
973                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
974
975         /*
976          * Capacity is calculated based off of devices in allocation groups:
977          */
978         bch2_recalc_capacity(c);
979
980         /* Next, close write points that point to this device... */
981         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
982                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
983
984         bch2_writepoint_stop(c, ca, &c->copygc_write_point);
985         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
986         bch2_writepoint_stop(c, ca, &c->btree_write_point);
987
988         mutex_lock(&c->btree_reserve_cache_lock);
989         while (c->btree_reserve_cache_nr) {
990                 struct btree_alloc *a =
991                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
992
993                 bch2_open_buckets_put(c, &a->ob);
994         }
995         mutex_unlock(&c->btree_reserve_cache_lock);
996
997         while (1) {
998                 struct open_bucket *ob;
999
1000                 spin_lock(&c->freelist_lock);
1001                 if (!ca->open_buckets_partial_nr) {
1002                         spin_unlock(&c->freelist_lock);
1003                         break;
1004                 }
1005                 ob = c->open_buckets +
1006                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1007                 ob->on_partial_list = false;
1008                 spin_unlock(&c->freelist_lock);
1009
1010                 bch2_open_bucket_put(c, ob);
1011         }
1012
1013         bch2_ec_stop_dev(c, ca);
1014
1015         /*
1016          * Wake up threads that were blocked on allocation, so they can notice
1017          * the device can no longer be removed and the capacity has changed:
1018          */
1019         closure_wake_up(&c->freelist_wait);
1020
1021         /*
1022          * journal_res_get() can block waiting for free space in the journal -
1023          * it needs to notice there may not be devices to allocate from anymore:
1024          */
1025         wake_up(&c->journal.wait);
1026
1027         /* Now wait for any in flight writes: */
1028
1029         closure_wait_event(&c->open_buckets_wait,
1030                            !bch2_dev_has_open_write_point(c, ca));
1031 }
1032
1033 /* device goes rw: */
1034 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1035 {
1036         unsigned i;
1037
1038         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1039                 if (ca->mi.data_allowed & (1 << i))
1040                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1041 }
1042
1043 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1044 {
1045         if (ca->alloc_thread)
1046                 closure_wait_event(&c->freelist_wait,
1047                                    ca->allocator_state != ALLOCATOR_running);
1048 }
1049
1050 /* stop allocator thread: */
1051 void bch2_dev_allocator_stop(struct bch_dev *ca)
1052 {
1053         struct task_struct *p;
1054
1055         p = rcu_dereference_protected(ca->alloc_thread, 1);
1056         ca->alloc_thread = NULL;
1057
1058         /*
1059          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1060          * the thread shutting down to avoid bch2_wake_allocator() racing:
1061          *
1062          * XXX: it would be better to have the rcu barrier be asynchronous
1063          * instead of blocking us here
1064          */
1065         synchronize_rcu();
1066
1067         if (p) {
1068                 kthread_stop(p);
1069                 put_task_struct(p);
1070         }
1071 }
1072
1073 /* start allocator thread: */
1074 int bch2_dev_allocator_start(struct bch_dev *ca)
1075 {
1076         struct task_struct *p;
1077
1078         /*
1079          * allocator thread already started?
1080          */
1081         if (ca->alloc_thread)
1082                 return 0;
1083
1084         p = kthread_create(bch2_allocator_thread, ca,
1085                            "bch-alloc/%s", ca->name);
1086         if (IS_ERR(p)) {
1087                 bch_err(ca->fs, "error creating allocator thread: %li",
1088                         PTR_ERR(p));
1089                 return PTR_ERR(p);
1090         }
1091
1092         get_task_struct(p);
1093         rcu_assign_pointer(ca->alloc_thread, p);
1094         wake_up_process(p);
1095         return 0;
1096 }
1097
1098 void bch2_fs_allocator_background_init(struct bch_fs *c)
1099 {
1100         spin_lock_init(&c->freelist_lock);
1101 }