]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
Update bcachefs sources to 9d554fa16d bcachefs: Add .to_text() methods for all superb...
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "buckets_waiting_for_journal.h"
13 #include "clock.h"
14 #include "debug.h"
15 #include "ec.h"
16 #include "error.h"
17 #include "recovery.h"
18 #include "varint.h"
19
20 #include <linux/kthread.h>
21 #include <linux/math64.h>
22 #include <linux/random.h>
23 #include <linux/rculist.h>
24 #include <linux/rcupdate.h>
25 #include <linux/sched/task.h>
26 #include <linux/sort.h>
27 #include <trace/events/bcachefs.h>
28
29 const char * const bch2_allocator_states[] = {
30 #define x(n)    #n,
31         ALLOC_THREAD_STATES()
32 #undef x
33         NULL
34 };
35
36 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
37 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
38         BCH_ALLOC_FIELDS_V1()
39 #undef x
40 };
41
42 /* Persistent alloc info: */
43
44 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
45                                      const void **p, unsigned field)
46 {
47         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
48         u64 v;
49
50         if (!(a->fields & (1 << field)))
51                 return 0;
52
53         switch (bytes) {
54         case 1:
55                 v = *((const u8 *) *p);
56                 break;
57         case 2:
58                 v = le16_to_cpup(*p);
59                 break;
60         case 4:
61                 v = le32_to_cpup(*p);
62                 break;
63         case 8:
64                 v = le64_to_cpup(*p);
65                 break;
66         default:
67                 BUG();
68         }
69
70         *p += bytes;
71         return v;
72 }
73
74 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
75                                       unsigned field, u64 v)
76 {
77         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
78
79         if (!v)
80                 return;
81
82         a->v.fields |= 1 << field;
83
84         switch (bytes) {
85         case 1:
86                 *((u8 *) *p) = v;
87                 break;
88         case 2:
89                 *((__le16 *) *p) = cpu_to_le16(v);
90                 break;
91         case 4:
92                 *((__le32 *) *p) = cpu_to_le32(v);
93                 break;
94         case 8:
95                 *((__le64 *) *p) = cpu_to_le64(v);
96                 break;
97         default:
98                 BUG();
99         }
100
101         *p += bytes;
102 }
103
104 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
105                                  struct bkey_s_c k)
106 {
107         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
108         const void *d = in->data;
109         unsigned idx = 0;
110
111         out->gen = in->gen;
112
113 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
114         BCH_ALLOC_FIELDS_V1()
115 #undef  x
116 }
117
118 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
119                                 struct bkey_s_c k)
120 {
121         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
122         const u8 *in = a.v->data;
123         const u8 *end = bkey_val_end(a);
124         unsigned fieldnr = 0;
125         int ret;
126         u64 v;
127
128         out->gen        = a.v->gen;
129         out->oldest_gen = a.v->oldest_gen;
130         out->data_type  = a.v->data_type;
131
132 #define x(_name, _bits)                                                 \
133         if (fieldnr < a.v->nr_fields) {                                 \
134                 ret = bch2_varint_decode_fast(in, end, &v);             \
135                 if (ret < 0)                                            \
136                         return ret;                                     \
137                 in += ret;                                              \
138         } else {                                                        \
139                 v = 0;                                                  \
140         }                                                               \
141         out->_name = v;                                                 \
142         if (v != out->_name)                                            \
143                 return -1;                                              \
144         fieldnr++;
145
146         BCH_ALLOC_FIELDS_V2()
147 #undef  x
148         return 0;
149 }
150
151 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
152                                 struct bkey_s_c k)
153 {
154         struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
155         const u8 *in = a.v->data;
156         const u8 *end = bkey_val_end(a);
157         unsigned fieldnr = 0;
158         int ret;
159         u64 v;
160
161         out->gen        = a.v->gen;
162         out->oldest_gen = a.v->oldest_gen;
163         out->data_type  = a.v->data_type;
164         out->journal_seq = le64_to_cpu(a.v->journal_seq);
165
166 #define x(_name, _bits)                                                 \
167         if (fieldnr < a.v->nr_fields) {                                 \
168                 ret = bch2_varint_decode_fast(in, end, &v);             \
169                 if (ret < 0)                                            \
170                         return ret;                                     \
171                 in += ret;                                              \
172         } else {                                                        \
173                 v = 0;                                                  \
174         }                                                               \
175         out->_name = v;                                                 \
176         if (v != out->_name)                                            \
177                 return -1;                                              \
178         fieldnr++;
179
180         BCH_ALLOC_FIELDS_V2()
181 #undef  x
182         return 0;
183 }
184
185 static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
186                                const struct bkey_alloc_unpacked src)
187 {
188         struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
189         unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
190         u8 *out = a->v.data;
191         u8 *end = (void *) &dst[1];
192         u8 *last_nonzero_field = out;
193         unsigned bytes;
194
195         a->k.p          = POS(src.dev, src.bucket);
196         a->v.gen        = src.gen;
197         a->v.oldest_gen = src.oldest_gen;
198         a->v.data_type  = src.data_type;
199         a->v.journal_seq = cpu_to_le64(src.journal_seq);
200
201 #define x(_name, _bits)                                                 \
202         nr_fields++;                                                    \
203                                                                         \
204         if (src._name) {                                                \
205                 out += bch2_varint_encode_fast(out, src._name);         \
206                                                                         \
207                 last_nonzero_field = out;                               \
208                 last_nonzero_fieldnr = nr_fields;                       \
209         } else {                                                        \
210                 *out++ = 0;                                             \
211         }
212
213         BCH_ALLOC_FIELDS_V2()
214 #undef  x
215         BUG_ON(out > end);
216
217         out = last_nonzero_field;
218         a->v.nr_fields = last_nonzero_fieldnr;
219
220         bytes = (u8 *) out - (u8 *) &a->v;
221         set_bkey_val_bytes(&a->k, bytes);
222         memset_u64s_tail(&a->v, 0, bytes);
223 }
224
225 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
226 {
227         struct bkey_alloc_unpacked ret = {
228                 .dev    = k.k->p.inode,
229                 .bucket = k.k->p.offset,
230                 .gen    = 0,
231         };
232
233         switch (k.k->type) {
234         case KEY_TYPE_alloc:
235                 bch2_alloc_unpack_v1(&ret, k);
236                 break;
237         case KEY_TYPE_alloc_v2:
238                 bch2_alloc_unpack_v2(&ret, k);
239                 break;
240         case KEY_TYPE_alloc_v3:
241                 bch2_alloc_unpack_v3(&ret, k);
242                 break;
243         }
244
245         return ret;
246 }
247
248 struct bkey_alloc_buf *bch2_alloc_pack(struct btree_trans *trans,
249                                        const struct bkey_alloc_unpacked src)
250 {
251         struct bkey_alloc_buf *dst;
252
253         dst = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
254         if (!IS_ERR(dst))
255                 bch2_alloc_pack_v3(dst, src);
256
257         return dst;
258 }
259
260 int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter,
261                      struct bkey_alloc_unpacked *u, unsigned trigger_flags)
262 {
263         struct bkey_alloc_buf *a = bch2_alloc_pack(trans, *u);
264
265         return PTR_ERR_OR_ZERO(a) ?:
266                 bch2_trans_update(trans, iter, &a->k, trigger_flags);
267 }
268
269 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
270 {
271         unsigned i, bytes = offsetof(struct bch_alloc, data);
272
273         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
274                 if (a->fields & (1 << i))
275                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
276
277         return DIV_ROUND_UP(bytes, sizeof(u64));
278 }
279
280 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
281 {
282         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
283
284         if (k.k->p.inode >= c->sb.nr_devices ||
285             !c->devs[k.k->p.inode])
286                 return "invalid device";
287
288         /* allow for unknown fields */
289         if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
290                 return "incorrect value size";
291
292         return NULL;
293 }
294
295 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
296 {
297         struct bkey_alloc_unpacked u;
298
299         if (k.k->p.inode >= c->sb.nr_devices ||
300             !c->devs[k.k->p.inode])
301                 return "invalid device";
302
303         if (bch2_alloc_unpack_v2(&u, k))
304                 return "unpack error";
305
306         return NULL;
307 }
308
309 const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
310 {
311         struct bkey_alloc_unpacked u;
312
313         if (k.k->p.inode >= c->sb.nr_devices ||
314             !c->devs[k.k->p.inode])
315                 return "invalid device";
316
317         if (bch2_alloc_unpack_v3(&u, k))
318                 return "unpack error";
319
320         return NULL;
321 }
322
323 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
324                            struct bkey_s_c k)
325 {
326         struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
327
328         pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu",
329                u.gen, u.oldest_gen, bch2_data_types[u.data_type],
330                u.journal_seq);
331 #define x(_name, ...)   pr_buf(out, " " #_name " %llu", (u64) u._name);
332         BCH_ALLOC_FIELDS_V2()
333 #undef  x
334 }
335
336 int bch2_alloc_read(struct bch_fs *c, bool gc, bool metadata_only)
337 {
338         struct btree_trans trans;
339         struct btree_iter iter;
340         struct bkey_s_c k;
341         struct bch_dev *ca;
342         struct bucket *g;
343         struct bkey_alloc_unpacked u;
344         int ret;
345
346         bch2_trans_init(&trans, c, 0, 0);
347
348         for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
349                            BTREE_ITER_PREFETCH, k, ret) {
350                 ca = bch_dev_bkey_exists(c, k.k->p.inode);
351                 g = __bucket(ca, k.k->p.offset, gc);
352                 u = bch2_alloc_unpack(k);
353
354                 if (!gc)
355                         *bucket_gen(ca, k.k->p.offset) = u.gen;
356
357                 g->_mark.gen            = u.gen;
358                 g->io_time[READ]        = u.read_time;
359                 g->io_time[WRITE]       = u.write_time;
360                 g->oldest_gen           = !gc ? u.oldest_gen : u.gen;
361                 g->gen_valid            = 1;
362
363                 if (!gc ||
364                     (metadata_only &&
365                      (u.data_type == BCH_DATA_user ||
366                       u.data_type == BCH_DATA_cached ||
367                       u.data_type == BCH_DATA_parity))) {
368                         g->_mark.data_type      = u.data_type;
369                         g->_mark.dirty_sectors  = u.dirty_sectors;
370                         g->_mark.cached_sectors = u.cached_sectors;
371                         g->_mark.stripe         = u.stripe != 0;
372                         g->stripe               = u.stripe;
373                         g->stripe_redundancy    = u.stripe_redundancy;
374                 }
375
376         }
377         bch2_trans_iter_exit(&trans, &iter);
378
379         bch2_trans_exit(&trans);
380
381         if (ret)
382                 bch_err(c, "error reading alloc info: %i", ret);
383
384         return ret;
385 }
386
387 /* Bucket IO clocks: */
388
389 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
390                               size_t bucket_nr, int rw)
391 {
392         struct bch_fs *c = trans->c;
393         struct btree_iter iter;
394         struct bkey_s_c k;
395         struct bkey_alloc_unpacked u;
396         u64 *time, now;
397         int ret = 0;
398
399         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
400                              BTREE_ITER_CACHED|
401                              BTREE_ITER_INTENT);
402         k = bch2_btree_iter_peek_slot(&iter);
403         ret = bkey_err(k);
404         if (ret)
405                 goto out;
406
407         u = bch2_alloc_unpack(k);
408
409         time = rw == READ ? &u.read_time : &u.write_time;
410         now = atomic64_read(&c->io_clock[rw].now);
411         if (*time == now)
412                 goto out;
413
414         *time = now;
415
416         ret   = bch2_alloc_write(trans, &iter, &u, 0) ?:
417                 bch2_trans_commit(trans, NULL, NULL, 0);
418 out:
419         bch2_trans_iter_exit(trans, &iter);
420         return ret;
421 }
422
423 /* Background allocator thread: */
424
425 /*
426  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
427  * (marking them as invalidated on disk), then optionally issues discard
428  * commands to the newly free buckets, then puts them on the various freelists.
429  */
430
431 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
432                                        struct bucket_mark m)
433 {
434         u8 gc_gen;
435
436         if (!is_available_bucket(m))
437                 return false;
438
439         if (m.owned_by_allocator)
440                 return false;
441
442         if (ca->buckets_nouse &&
443             test_bit(b, ca->buckets_nouse))
444                 return false;
445
446         if (ca->new_fs_bucket_idx) {
447                 /*
448                  * Device or filesystem is still being initialized, and we
449                  * haven't fully marked superblocks & journal:
450                  */
451                 if (is_superblock_bucket(ca, b))
452                         return false;
453
454                 if (b < ca->new_fs_bucket_idx)
455                         return false;
456         }
457
458         gc_gen = bucket_gc_gen(bucket(ca, b));
459
460         ca->inc_gen_needs_gc            += gc_gen >= BUCKET_GC_GEN_MAX / 2;
461         ca->inc_gen_really_needs_gc     += gc_gen >= BUCKET_GC_GEN_MAX;
462
463         return gc_gen < BUCKET_GC_GEN_MAX;
464 }
465
466 /*
467  * Determines what order we're going to reuse buckets, smallest bucket_key()
468  * first.
469  */
470
471 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
472                                 u64 now, u64 last_seq_ondisk)
473 {
474         unsigned used = m.cached_sectors;
475
476         if (used) {
477                 /*
478                  * Prefer to keep buckets that have been read more recently, and
479                  * buckets that have more data in them:
480                  */
481                 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
482                 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
483
484                 return -last_read_scaled;
485         } else {
486                 /*
487                  * Prefer to use buckets with smaller gc_gen so that we don't
488                  * have to walk the btree and recalculate oldest_gen - but shift
489                  * off the low bits so that buckets will still have equal sort
490                  * keys when there's only a small difference, so that we can
491                  * keep sequential buckets together:
492                  */
493                 return bucket_gc_gen(g) >> 4;
494         }
495 }
496
497 static inline int bucket_alloc_cmp(alloc_heap *h,
498                                    struct alloc_heap_entry l,
499                                    struct alloc_heap_entry r)
500 {
501         return  cmp_int(l.key, r.key) ?:
502                 cmp_int(r.nr, l.nr) ?:
503                 cmp_int(l.bucket, r.bucket);
504 }
505
506 static inline int bucket_idx_cmp(const void *_l, const void *_r)
507 {
508         const struct alloc_heap_entry *l = _l, *r = _r;
509
510         return cmp_int(l->bucket, r->bucket);
511 }
512
513 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
514 {
515         struct bucket_array *buckets;
516         struct alloc_heap_entry e = { 0 };
517         u64 now, last_seq_ondisk;
518         size_t b, i, nr = 0;
519
520         down_read(&ca->bucket_lock);
521
522         buckets = bucket_array(ca);
523         ca->alloc_heap.used = 0;
524         now = atomic64_read(&c->io_clock[READ].now);
525         last_seq_ondisk = c->journal.flushed_seq_ondisk;
526
527         /*
528          * Find buckets with lowest read priority, by building a maxheap sorted
529          * by read priority and repeatedly replacing the maximum element until
530          * all buckets have been visited.
531          */
532         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
533                 struct bucket *g = &buckets->b[b];
534                 struct bucket_mark m = READ_ONCE(g->mark);
535                 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
536
537                 cond_resched();
538
539                 if (!bch2_can_invalidate_bucket(ca, b, m))
540                         continue;
541
542                 if (!m.data_type &&
543                     bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
544                                                      last_seq_ondisk,
545                                                      ca->dev_idx, b)) {
546                         ca->buckets_waiting_on_journal++;
547                         continue;
548                 }
549
550                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
551                         e.nr++;
552                 } else {
553                         if (e.nr)
554                                 heap_add_or_replace(&ca->alloc_heap, e,
555                                         -bucket_alloc_cmp, NULL);
556
557                         e = (struct alloc_heap_entry) {
558                                 .bucket = b,
559                                 .nr     = 1,
560                                 .key    = key,
561                         };
562                 }
563         }
564
565         if (e.nr)
566                 heap_add_or_replace(&ca->alloc_heap, e,
567                                 -bucket_alloc_cmp, NULL);
568
569         for (i = 0; i < ca->alloc_heap.used; i++)
570                 nr += ca->alloc_heap.data[i].nr;
571
572         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
573                 nr -= ca->alloc_heap.data[0].nr;
574                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
575         }
576
577         up_read(&ca->bucket_lock);
578 }
579
580 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
581 {
582         size_t i, nr = 0;
583
584         ca->inc_gen_needs_gc                    = 0;
585         ca->inc_gen_really_needs_gc             = 0;
586         ca->buckets_waiting_on_journal          = 0;
587
588         find_reclaimable_buckets_lru(c, ca);
589
590         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
591
592         for (i = 0; i < ca->alloc_heap.used; i++)
593                 nr += ca->alloc_heap.data[i].nr;
594
595         return nr;
596 }
597
598 static int bucket_invalidate_btree(struct btree_trans *trans,
599                                    struct bch_dev *ca, u64 b,
600                                    struct bkey_alloc_unpacked *u)
601 {
602         struct bch_fs *c = trans->c;
603         struct btree_iter iter;
604         struct bkey_s_c k;
605         int ret;
606
607         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
608                              POS(ca->dev_idx, b),
609                              BTREE_ITER_CACHED|
610                              BTREE_ITER_INTENT);
611
612         k = bch2_btree_iter_peek_slot(&iter);
613         ret = bkey_err(k);
614         if (ret)
615                 goto err;
616
617         *u = bch2_alloc_unpack(k);
618         u->gen++;
619         u->data_type            = 0;
620         u->dirty_sectors        = 0;
621         u->cached_sectors       = 0;
622         u->read_time            = atomic64_read(&c->io_clock[READ].now);
623         u->write_time           = atomic64_read(&c->io_clock[WRITE].now);
624
625         ret = bch2_alloc_write(trans, &iter, u,
626                                BTREE_TRIGGER_BUCKET_INVALIDATE);
627 err:
628         bch2_trans_iter_exit(trans, &iter);
629         return ret;
630 }
631
632 static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
633                                       u64 *journal_seq, unsigned flags)
634 {
635         struct bkey_alloc_unpacked u;
636         size_t b;
637         u64 commit_seq = 0;
638         int ret = 0;
639
640         /*
641          * If the read-only path is trying to shut down, we can't be generating
642          * new btree updates:
643          */
644         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags))
645                 return 1;
646
647         BUG_ON(!ca->alloc_heap.used ||
648                !ca->alloc_heap.data[0].nr);
649         b = ca->alloc_heap.data[0].bucket;
650
651         /* first, put on free_inc and mark as owned by allocator: */
652         percpu_down_read(&c->mark_lock);
653
654         bch2_mark_alloc_bucket(c, ca, b, true);
655
656         spin_lock(&c->freelist_lock);
657         verify_not_on_freelist(c, ca, b);
658         BUG_ON(!fifo_push(&ca->free_inc, b));
659         spin_unlock(&c->freelist_lock);
660
661         percpu_up_read(&c->mark_lock);
662
663         ret = bch2_trans_do(c, NULL, &commit_seq,
664                             BTREE_INSERT_NOCHECK_RW|
665                             BTREE_INSERT_NOFAIL|
666                             BTREE_INSERT_JOURNAL_RESERVED|
667                             flags,
668                             bucket_invalidate_btree(&trans, ca, b, &u));
669
670         if (!ret) {
671                 /* remove from alloc_heap: */
672                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
673
674                 top->bucket++;
675                 top->nr--;
676
677                 if (!top->nr)
678                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
679
680                 /*
681                  * If we invalidating cached data then we need to wait on the
682                  * journal commit:
683                  */
684                 if (u.data_type)
685                         *journal_seq = max(*journal_seq, commit_seq);
686
687                 /*
688                  * We already waiting on u.alloc_seq when we filtered out
689                  * buckets that need journal commit:
690                  */
691                 BUG_ON(*journal_seq > u.journal_seq);
692         } else {
693                 size_t b2;
694
695                 /* remove from free_inc: */
696                 percpu_down_read(&c->mark_lock);
697                 spin_lock(&c->freelist_lock);
698
699                 bch2_mark_alloc_bucket(c, ca, b, false);
700
701                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
702                 BUG_ON(b != b2);
703
704                 spin_unlock(&c->freelist_lock);
705                 percpu_up_read(&c->mark_lock);
706         }
707
708         return ret < 0 ? ret : 0;
709 }
710
711 /*
712  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
713  */
714 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
715 {
716         u64 journal_seq = 0;
717         int ret = 0;
718
719         /* Only use nowait if we've already invalidated at least one bucket: */
720         while (!ret &&
721                !fifo_full(&ca->free_inc) &&
722                ca->alloc_heap.used) {
723                 if (kthread_should_stop()) {
724                         ret = 1;
725                         break;
726                 }
727
728                 ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
729                                 (!fifo_empty(&ca->free_inc)
730                                  ? BTREE_INSERT_NOWAIT : 0));
731                 /*
732                  * We only want to batch up invalidates when they're going to
733                  * require flushing the journal:
734                  */
735                 if (!journal_seq)
736                         break;
737         }
738
739         /* If we used NOWAIT, don't return the error: */
740         if (!fifo_empty(&ca->free_inc))
741                 ret = 0;
742         if (ret < 0)
743                 bch_err(ca, "error invalidating buckets: %i", ret);
744         if (ret)
745                 return ret;
746
747         if (journal_seq)
748                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
749         if (ret) {
750                 bch_err(ca, "journal error: %i", ret);
751                 return ret;
752         }
753
754         return 0;
755 }
756
757 static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
758 {
759         if (ca->allocator_state != new_state) {
760                 ca->allocator_state = new_state;
761                 closure_wake_up(&ca->fs->freelist_wait);
762         }
763 }
764
765 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
766 {
767         unsigned i;
768         int ret = 0;
769
770         spin_lock(&c->freelist_lock);
771         for (i = 0; i < RESERVE_NR; i++) {
772                 /*
773                  * Don't strand buckets on the copygc freelist until
774                  * after recovery is finished:
775                  */
776                 if (i == RESERVE_MOVINGGC &&
777                     !test_bit(BCH_FS_STARTED, &c->flags))
778                         continue;
779
780                 if (fifo_push(&ca->free[i], b)) {
781                         fifo_pop(&ca->free_inc, b);
782                         ret = 1;
783                         break;
784                 }
785         }
786         spin_unlock(&c->freelist_lock);
787
788         ca->allocator_state = ret
789                 ? ALLOCATOR_running
790                 : ALLOCATOR_blocked_full;
791         closure_wake_up(&c->freelist_wait);
792         return ret;
793 }
794
795 static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
796 {
797         if (!c->opts.nochanges &&
798             ca->mi.discard &&
799             blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
800                 blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
801                                      ca->mi.bucket_size, GFP_NOFS, 0);
802 }
803
804 static bool allocator_thread_running(struct bch_dev *ca)
805 {
806         unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
807                 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
808                 ? ALLOCATOR_running
809                 : ALLOCATOR_stopped;
810         alloc_thread_set_state(ca, state);
811         return state == ALLOCATOR_running;
812 }
813
814 static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
815 {
816         s64 available = dev_buckets_reclaimable(ca) -
817                 (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
818         bool ret = available > 0;
819
820         alloc_thread_set_state(ca, ret
821                                ? ALLOCATOR_running
822                                : ALLOCATOR_blocked);
823         return ret;
824 }
825
826 /**
827  * bch_allocator_thread - move buckets from free_inc to reserves
828  *
829  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
830  * the reserves are depleted by bucket allocation. When we run out
831  * of free_inc, try to invalidate some buckets and write out
832  * prios and gens.
833  */
834 static int bch2_allocator_thread(void *arg)
835 {
836         struct bch_dev *ca = arg;
837         struct bch_fs *c = ca->fs;
838         unsigned long gc_count = c->gc_count;
839         size_t nr;
840         int ret;
841
842         set_freezable();
843
844         while (1) {
845                 ret = kthread_wait_freezable(allocator_thread_running(ca));
846                 if (ret)
847                         goto stop;
848
849                 while (!ca->alloc_heap.used) {
850                         cond_resched();
851
852                         ret = kthread_wait_freezable(buckets_available(ca, gc_count));
853                         if (ret)
854                                 goto stop;
855
856                         gc_count = c->gc_count;
857                         nr = find_reclaimable_buckets(c, ca);
858
859                         if (!nr && ca->buckets_waiting_on_journal) {
860                                 ret = bch2_journal_flush(&c->journal);
861                                 if (ret)
862                                         goto stop;
863                         } else if (nr < (ca->mi.nbuckets >> 6) &&
864                                    ca->buckets_waiting_on_journal >= nr / 2) {
865                                 bch2_journal_flush_async(&c->journal, NULL);
866                         }
867
868                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
869                              ca->inc_gen_really_needs_gc) &&
870                             c->gc_thread) {
871                                 atomic_inc(&c->kick_gc);
872                                 wake_up_process(c->gc_thread);
873                         }
874
875                         trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
876                                          ca->inc_gen_really_needs_gc);
877                 }
878
879                 ret = bch2_invalidate_buckets(c, ca);
880                 if (ret)
881                         goto stop;
882
883                 while (!fifo_empty(&ca->free_inc)) {
884                         u64 b = fifo_peek(&ca->free_inc);
885
886                         discard_one_bucket(c, ca, b);
887
888                         ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
889                         if (ret)
890                                 goto stop;
891                 }
892         }
893 stop:
894         alloc_thread_set_state(ca, ALLOCATOR_stopped);
895         return 0;
896 }
897
898 /* Startup/shutdown (ro/rw): */
899
900 void bch2_recalc_capacity(struct bch_fs *c)
901 {
902         struct bch_dev *ca;
903         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
904         unsigned bucket_size_max = 0;
905         unsigned long ra_pages = 0;
906         unsigned i, j;
907
908         lockdep_assert_held(&c->state_lock);
909
910         for_each_online_member(ca, c, i) {
911                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
912
913                 ra_pages += bdi->ra_pages;
914         }
915
916         bch2_set_ra_pages(c, ra_pages);
917
918         for_each_rw_member(ca, c, i) {
919                 u64 dev_reserve = 0;
920
921                 /*
922                  * We need to reserve buckets (from the number
923                  * of currently available buckets) against
924                  * foreground writes so that mainly copygc can
925                  * make forward progress.
926                  *
927                  * We need enough to refill the various reserves
928                  * from scratch - copygc will use its entire
929                  * reserve all at once, then run against when
930                  * its reserve is refilled (from the formerly
931                  * available buckets).
932                  *
933                  * This reserve is just used when considering if
934                  * allocations for foreground writes must wait -
935                  * not -ENOSPC calculations.
936                  */
937                 for (j = 0; j < RESERVE_NONE; j++)
938                         dev_reserve += ca->free[j].size;
939
940                 dev_reserve += 1;       /* btree write point */
941                 dev_reserve += 1;       /* copygc write point */
942                 dev_reserve += 1;       /* rebalance write point */
943
944                 dev_reserve *= ca->mi.bucket_size;
945
946                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
947                                              ca->mi.first_bucket);
948
949                 reserved_sectors += dev_reserve * 2;
950
951                 bucket_size_max = max_t(unsigned, bucket_size_max,
952                                         ca->mi.bucket_size);
953         }
954
955         gc_reserve = c->opts.gc_reserve_bytes
956                 ? c->opts.gc_reserve_bytes >> 9
957                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
958
959         reserved_sectors = max(gc_reserve, reserved_sectors);
960
961         reserved_sectors = min(reserved_sectors, capacity);
962
963         c->capacity = capacity - reserved_sectors;
964
965         c->bucket_size_max = bucket_size_max;
966
967         /* Wake up case someone was waiting for buckets */
968         closure_wake_up(&c->freelist_wait);
969 }
970
971 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
972 {
973         struct open_bucket *ob;
974         bool ret = false;
975
976         for (ob = c->open_buckets;
977              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
978              ob++) {
979                 spin_lock(&ob->lock);
980                 if (ob->valid && !ob->on_partial_list &&
981                     ob->dev == ca->dev_idx)
982                         ret = true;
983                 spin_unlock(&ob->lock);
984         }
985
986         return ret;
987 }
988
989 /* device goes ro: */
990 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
991 {
992         unsigned i;
993
994         BUG_ON(ca->alloc_thread);
995
996         /* First, remove device from allocation groups: */
997
998         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
999                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1000
1001         /*
1002          * Capacity is calculated based off of devices in allocation groups:
1003          */
1004         bch2_recalc_capacity(c);
1005
1006         /* Next, close write points that point to this device... */
1007         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1008                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1009
1010         bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1011         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1012         bch2_writepoint_stop(c, ca, &c->btree_write_point);
1013
1014         mutex_lock(&c->btree_reserve_cache_lock);
1015         while (c->btree_reserve_cache_nr) {
1016                 struct btree_alloc *a =
1017                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1018
1019                 bch2_open_buckets_put(c, &a->ob);
1020         }
1021         mutex_unlock(&c->btree_reserve_cache_lock);
1022
1023         while (1) {
1024                 struct open_bucket *ob;
1025
1026                 spin_lock(&c->freelist_lock);
1027                 if (!ca->open_buckets_partial_nr) {
1028                         spin_unlock(&c->freelist_lock);
1029                         break;
1030                 }
1031                 ob = c->open_buckets +
1032                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1033                 ob->on_partial_list = false;
1034                 spin_unlock(&c->freelist_lock);
1035
1036                 bch2_open_bucket_put(c, ob);
1037         }
1038
1039         bch2_ec_stop_dev(c, ca);
1040
1041         /*
1042          * Wake up threads that were blocked on allocation, so they can notice
1043          * the device can no longer be removed and the capacity has changed:
1044          */
1045         closure_wake_up(&c->freelist_wait);
1046
1047         /*
1048          * journal_res_get() can block waiting for free space in the journal -
1049          * it needs to notice there may not be devices to allocate from anymore:
1050          */
1051         wake_up(&c->journal.wait);
1052
1053         /* Now wait for any in flight writes: */
1054
1055         closure_wait_event(&c->open_buckets_wait,
1056                            !bch2_dev_has_open_write_point(c, ca));
1057 }
1058
1059 /* device goes rw: */
1060 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1061 {
1062         unsigned i;
1063
1064         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1065                 if (ca->mi.data_allowed & (1 << i))
1066                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1067 }
1068
1069 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1070 {
1071         if (ca->alloc_thread)
1072                 closure_wait_event(&c->freelist_wait,
1073                                    ca->allocator_state != ALLOCATOR_running);
1074 }
1075
1076 /* stop allocator thread: */
1077 void bch2_dev_allocator_stop(struct bch_dev *ca)
1078 {
1079         struct task_struct *p;
1080
1081         p = rcu_dereference_protected(ca->alloc_thread, 1);
1082         ca->alloc_thread = NULL;
1083
1084         /*
1085          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1086          * the thread shutting down to avoid bch2_wake_allocator() racing:
1087          *
1088          * XXX: it would be better to have the rcu barrier be asynchronous
1089          * instead of blocking us here
1090          */
1091         synchronize_rcu();
1092
1093         if (p) {
1094                 kthread_stop(p);
1095                 put_task_struct(p);
1096         }
1097 }
1098
1099 /* start allocator thread: */
1100 int bch2_dev_allocator_start(struct bch_dev *ca)
1101 {
1102         struct task_struct *p;
1103
1104         /*
1105          * allocator thread already started?
1106          */
1107         if (ca->alloc_thread)
1108                 return 0;
1109
1110         p = kthread_create(bch2_allocator_thread, ca,
1111                            "bch-alloc/%s", ca->name);
1112         if (IS_ERR(p)) {
1113                 bch_err(ca->fs, "error creating allocator thread: %li",
1114                         PTR_ERR(p));
1115                 return PTR_ERR(p);
1116         }
1117
1118         get_task_struct(p);
1119         rcu_assign_pointer(ca->alloc_thread, p);
1120         wake_up_process(p);
1121         return 0;
1122 }
1123
1124 void bch2_fs_allocator_background_init(struct bch_fs *c)
1125 {
1126         spin_lock_init(&c->freelist_lock);
1127 }