]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
7ad16c21eb08cf2fd8d85e98b77cc907aecb59fd
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "buckets_waiting_for_journal.h"
13 #include "clock.h"
14 #include "debug.h"
15 #include "ec.h"
16 #include "error.h"
17 #include "recovery.h"
18 #include "varint.h"
19
20 #include <linux/kthread.h>
21 #include <linux/math64.h>
22 #include <linux/random.h>
23 #include <linux/rculist.h>
24 #include <linux/rcupdate.h>
25 #include <linux/sched/task.h>
26 #include <linux/sort.h>
27 #include <trace/events/bcachefs.h>
28
29 const char * const bch2_allocator_states[] = {
30 #define x(n)    #n,
31         ALLOC_THREAD_STATES()
32 #undef x
33         NULL
34 };
35
36 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
37 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
38         BCH_ALLOC_FIELDS_V1()
39 #undef x
40 };
41
42 struct bkey_alloc_buf {
43         struct bkey_i   k;
44         struct bch_alloc_v3 v;
45
46 #define x(_name,  _bits)                + _bits / 8
47         u8              _pad[0 + BCH_ALLOC_FIELDS_V2()];
48 #undef  x
49 } __attribute__((packed, aligned(8)));
50
51 /* Persistent alloc info: */
52
53 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
54                                      const void **p, unsigned field)
55 {
56         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
57         u64 v;
58
59         if (!(a->fields & (1 << field)))
60                 return 0;
61
62         switch (bytes) {
63         case 1:
64                 v = *((const u8 *) *p);
65                 break;
66         case 2:
67                 v = le16_to_cpup(*p);
68                 break;
69         case 4:
70                 v = le32_to_cpup(*p);
71                 break;
72         case 8:
73                 v = le64_to_cpup(*p);
74                 break;
75         default:
76                 BUG();
77         }
78
79         *p += bytes;
80         return v;
81 }
82
83 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
84                                       unsigned field, u64 v)
85 {
86         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
87
88         if (!v)
89                 return;
90
91         a->v.fields |= 1 << field;
92
93         switch (bytes) {
94         case 1:
95                 *((u8 *) *p) = v;
96                 break;
97         case 2:
98                 *((__le16 *) *p) = cpu_to_le16(v);
99                 break;
100         case 4:
101                 *((__le32 *) *p) = cpu_to_le32(v);
102                 break;
103         case 8:
104                 *((__le64 *) *p) = cpu_to_le64(v);
105                 break;
106         default:
107                 BUG();
108         }
109
110         *p += bytes;
111 }
112
113 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
114                                  struct bkey_s_c k)
115 {
116         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
117         const void *d = in->data;
118         unsigned idx = 0;
119
120         out->gen = in->gen;
121
122 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
123         BCH_ALLOC_FIELDS_V1()
124 #undef  x
125 }
126
127 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
128                                 struct bkey_s_c k)
129 {
130         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
131         const u8 *in = a.v->data;
132         const u8 *end = bkey_val_end(a);
133         unsigned fieldnr = 0;
134         int ret;
135         u64 v;
136
137         out->gen        = a.v->gen;
138         out->oldest_gen = a.v->oldest_gen;
139         out->data_type  = a.v->data_type;
140
141 #define x(_name, _bits)                                                 \
142         if (fieldnr < a.v->nr_fields) {                                 \
143                 ret = bch2_varint_decode_fast(in, end, &v);             \
144                 if (ret < 0)                                            \
145                         return ret;                                     \
146                 in += ret;                                              \
147         } else {                                                        \
148                 v = 0;                                                  \
149         }                                                               \
150         out->_name = v;                                                 \
151         if (v != out->_name)                                            \
152                 return -1;                                              \
153         fieldnr++;
154
155         BCH_ALLOC_FIELDS_V2()
156 #undef  x
157         return 0;
158 }
159
160 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
161                                 struct bkey_s_c k)
162 {
163         struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
164         const u8 *in = a.v->data;
165         const u8 *end = bkey_val_end(a);
166         unsigned fieldnr = 0;
167         int ret;
168         u64 v;
169
170         out->gen        = a.v->gen;
171         out->oldest_gen = a.v->oldest_gen;
172         out->data_type  = a.v->data_type;
173         out->journal_seq = le64_to_cpu(a.v->journal_seq);
174
175 #define x(_name, _bits)                                                 \
176         if (fieldnr < a.v->nr_fields) {                                 \
177                 ret = bch2_varint_decode_fast(in, end, &v);             \
178                 if (ret < 0)                                            \
179                         return ret;                                     \
180                 in += ret;                                              \
181         } else {                                                        \
182                 v = 0;                                                  \
183         }                                                               \
184         out->_name = v;                                                 \
185         if (v != out->_name)                                            \
186                 return -1;                                              \
187         fieldnr++;
188
189         BCH_ALLOC_FIELDS_V2()
190 #undef  x
191         return 0;
192 }
193
194 static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
195                                const struct bkey_alloc_unpacked src)
196 {
197         struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
198         unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
199         u8 *out = a->v.data;
200         u8 *end = (void *) &dst[1];
201         u8 *last_nonzero_field = out;
202         unsigned bytes;
203
204         a->k.p          = POS(src.dev, src.bucket);
205         a->v.gen        = src.gen;
206         a->v.oldest_gen = src.oldest_gen;
207         a->v.data_type  = src.data_type;
208         a->v.journal_seq = cpu_to_le64(src.journal_seq);
209
210 #define x(_name, _bits)                                                 \
211         nr_fields++;                                                    \
212                                                                         \
213         if (src._name) {                                                \
214                 out += bch2_varint_encode_fast(out, src._name);         \
215                                                                         \
216                 last_nonzero_field = out;                               \
217                 last_nonzero_fieldnr = nr_fields;                       \
218         } else {                                                        \
219                 *out++ = 0;                                             \
220         }
221
222         BCH_ALLOC_FIELDS_V2()
223 #undef  x
224         BUG_ON(out > end);
225
226         out = last_nonzero_field;
227         a->v.nr_fields = last_nonzero_fieldnr;
228
229         bytes = (u8 *) out - (u8 *) &a->v;
230         set_bkey_val_bytes(&a->k, bytes);
231         memset_u64s_tail(&a->v, 0, bytes);
232 }
233
234 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
235 {
236         struct bkey_alloc_unpacked ret = {
237                 .dev    = k.k->p.inode,
238                 .bucket = k.k->p.offset,
239                 .gen    = 0,
240         };
241
242         switch (k.k->type) {
243         case KEY_TYPE_alloc:
244                 bch2_alloc_unpack_v1(&ret, k);
245                 break;
246         case KEY_TYPE_alloc_v2:
247                 bch2_alloc_unpack_v2(&ret, k);
248                 break;
249         case KEY_TYPE_alloc_v3:
250                 bch2_alloc_unpack_v3(&ret, k);
251                 break;
252         }
253
254         return ret;
255 }
256
257 static void bch2_alloc_pack(struct bch_fs *c,
258                             struct bkey_alloc_buf *dst,
259                             const struct bkey_alloc_unpacked src)
260 {
261         bch2_alloc_pack_v3(dst, src);
262 }
263
264 int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter,
265                      struct bkey_alloc_unpacked *u, unsigned trigger_flags)
266 {
267         struct bkey_alloc_buf *a;
268
269         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
270         if (IS_ERR(a))
271                 return PTR_ERR(a);
272
273         bch2_alloc_pack(trans->c, a, *u);
274         return bch2_trans_update(trans, iter, &a->k, trigger_flags);
275 }
276
277 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
278 {
279         unsigned i, bytes = offsetof(struct bch_alloc, data);
280
281         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
282                 if (a->fields & (1 << i))
283                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
284
285         return DIV_ROUND_UP(bytes, sizeof(u64));
286 }
287
288 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
289 {
290         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
291
292         if (k.k->p.inode >= c->sb.nr_devices ||
293             !c->devs[k.k->p.inode])
294                 return "invalid device";
295
296         /* allow for unknown fields */
297         if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
298                 return "incorrect value size";
299
300         return NULL;
301 }
302
303 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
304 {
305         struct bkey_alloc_unpacked u;
306
307         if (k.k->p.inode >= c->sb.nr_devices ||
308             !c->devs[k.k->p.inode])
309                 return "invalid device";
310
311         if (bch2_alloc_unpack_v2(&u, k))
312                 return "unpack error";
313
314         return NULL;
315 }
316
317 const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
318 {
319         struct bkey_alloc_unpacked u;
320
321         if (k.k->p.inode >= c->sb.nr_devices ||
322             !c->devs[k.k->p.inode])
323                 return "invalid device";
324
325         if (bch2_alloc_unpack_v3(&u, k))
326                 return "unpack error";
327
328         return NULL;
329 }
330
331 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
332                            struct bkey_s_c k)
333 {
334         struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
335
336         pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu",
337                u.gen, u.oldest_gen, bch2_data_types[u.data_type],
338                u.journal_seq);
339 #define x(_name, ...)   pr_buf(out, " " #_name " %llu", (u64) u._name);
340         BCH_ALLOC_FIELDS_V2()
341 #undef  x
342 }
343
344 int bch2_alloc_read(struct bch_fs *c)
345 {
346         struct btree_trans trans;
347         struct btree_iter iter;
348         struct bkey_s_c k;
349         struct bch_dev *ca;
350         struct bucket *g;
351         struct bkey_alloc_unpacked u;
352         int ret;
353
354         bch2_trans_init(&trans, c, 0, 0);
355         down_read(&c->gc_lock);
356
357         for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
358                            BTREE_ITER_PREFETCH, k, ret) {
359                 if (!bkey_is_alloc(k.k))
360                         continue;
361
362                 ca = bch_dev_bkey_exists(c, k.k->p.inode);
363                 g = bucket(ca, k.k->p.offset);
364                 u = bch2_alloc_unpack(k);
365
366                 *bucket_gen(ca, k.k->p.offset) = u.gen;
367                 g->_mark.gen            = u.gen;
368                 g->_mark.data_type      = u.data_type;
369                 g->_mark.dirty_sectors  = u.dirty_sectors;
370                 g->_mark.cached_sectors = u.cached_sectors;
371                 g->_mark.stripe         = u.stripe != 0;
372                 g->stripe               = u.stripe;
373                 g->stripe_redundancy    = u.stripe_redundancy;
374                 g->io_time[READ]        = u.read_time;
375                 g->io_time[WRITE]       = u.write_time;
376                 g->oldest_gen           = u.oldest_gen;
377                 g->gen_valid            = 1;
378         }
379         bch2_trans_iter_exit(&trans, &iter);
380
381         up_read(&c->gc_lock);
382         bch2_trans_exit(&trans);
383
384         if (ret) {
385                 bch_err(c, "error reading alloc info: %i", ret);
386                 return ret;
387         }
388
389         return 0;
390 }
391
392 static int bch2_alloc_write_key(struct btree_trans *trans,
393                                 struct btree_iter *iter,
394                                 unsigned flags)
395 {
396         struct bch_fs *c = trans->c;
397         struct bkey_s_c k;
398         struct bkey_alloc_unpacked old_u, new_u;
399         int ret;
400 retry:
401         bch2_trans_begin(trans);
402
403         ret = bch2_btree_key_cache_flush(trans,
404                         BTREE_ID_alloc, iter->pos);
405         if (ret)
406                 goto err;
407
408         k = bch2_btree_iter_peek_slot(iter);
409         ret = bkey_err(k);
410         if (ret)
411                 goto err;
412
413         old_u   = bch2_alloc_unpack(k);
414         new_u   = alloc_mem_to_key(c, iter);
415
416         if (!bkey_alloc_unpacked_cmp(old_u, new_u))
417                 return 0;
418
419         ret   = bch2_alloc_write(trans, iter, &new_u,
420                                   BTREE_TRIGGER_NORUN) ?:
421                 bch2_trans_commit(trans, NULL, NULL,
422                                 BTREE_INSERT_NOFAIL|flags);
423 err:
424         if (ret == -EINTR)
425                 goto retry;
426         return ret;
427 }
428
429 int bch2_alloc_write_all(struct bch_fs *c, unsigned flags)
430 {
431         struct btree_trans trans;
432         struct btree_iter iter;
433         struct bch_dev *ca;
434         unsigned i;
435         int ret = 0;
436
437         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
438         bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
439                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
440
441         for_each_member_device(ca, c, i) {
442                 bch2_btree_iter_set_pos(&iter,
443                         POS(ca->dev_idx, ca->mi.first_bucket));
444
445                 while (iter.pos.offset < ca->mi.nbuckets) {
446                         ret = bch2_alloc_write_key(&trans, &iter, flags);
447                         if (ret) {
448                                 percpu_ref_put(&ca->ref);
449                                 goto err;
450                         }
451                         bch2_btree_iter_advance(&iter);
452                 }
453         }
454 err:
455         bch2_trans_iter_exit(&trans, &iter);
456         bch2_trans_exit(&trans);
457         return ret;
458 }
459
460 /* Bucket IO clocks: */
461
462 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
463                               size_t bucket_nr, int rw)
464 {
465         struct bch_fs *c = trans->c;
466         struct btree_iter iter;
467         struct bkey_s_c k;
468         struct bkey_alloc_unpacked u;
469         u64 *time, now;
470         int ret = 0;
471
472         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
473                              BTREE_ITER_CACHED|
474                              BTREE_ITER_INTENT);
475         k = bch2_btree_iter_peek_slot(&iter);
476         ret = bkey_err(k);
477         if (ret)
478                 goto out;
479
480         u = bch2_alloc_unpack(k);
481
482         time = rw == READ ? &u.read_time : &u.write_time;
483         now = atomic64_read(&c->io_clock[rw].now);
484         if (*time == now)
485                 goto out;
486
487         *time = now;
488
489         ret   = bch2_alloc_write(trans, &iter, &u, 0) ?:
490                 bch2_trans_commit(trans, NULL, NULL, 0);
491 out:
492         bch2_trans_iter_exit(trans, &iter);
493         return ret;
494 }
495
496 /* Background allocator thread: */
497
498 /*
499  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
500  * (marking them as invalidated on disk), then optionally issues discard
501  * commands to the newly free buckets, then puts them on the various freelists.
502  */
503
504 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
505                                        struct bucket_mark m)
506 {
507         u8 gc_gen;
508
509         if (!is_available_bucket(m))
510                 return false;
511
512         if (m.owned_by_allocator)
513                 return false;
514
515         if (ca->buckets_nouse &&
516             test_bit(b, ca->buckets_nouse))
517                 return false;
518
519         if (ca->new_fs_bucket_idx) {
520                 /*
521                  * Device or filesystem is still being initialized, and we
522                  * haven't fully marked superblocks & journal:
523                  */
524                 if (is_superblock_bucket(ca, b))
525                         return false;
526
527                 if (b < ca->new_fs_bucket_idx)
528                         return false;
529         }
530
531         gc_gen = bucket_gc_gen(bucket(ca, b));
532
533         ca->inc_gen_needs_gc            += gc_gen >= BUCKET_GC_GEN_MAX / 2;
534         ca->inc_gen_really_needs_gc     += gc_gen >= BUCKET_GC_GEN_MAX;
535
536         return gc_gen < BUCKET_GC_GEN_MAX;
537 }
538
539 /*
540  * Determines what order we're going to reuse buckets, smallest bucket_key()
541  * first.
542  */
543
544 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
545                                 u64 now, u64 last_seq_ondisk)
546 {
547         unsigned used = m.cached_sectors;
548
549         if (used) {
550                 /*
551                  * Prefer to keep buckets that have been read more recently, and
552                  * buckets that have more data in them:
553                  */
554                 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
555                 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
556
557                 return -last_read_scaled;
558         } else {
559                 /*
560                  * Prefer to use buckets with smaller gc_gen so that we don't
561                  * have to walk the btree and recalculate oldest_gen - but shift
562                  * off the low bits so that buckets will still have equal sort
563                  * keys when there's only a small difference, so that we can
564                  * keep sequential buckets together:
565                  */
566                 return bucket_gc_gen(g) >> 4;
567         }
568 }
569
570 static inline int bucket_alloc_cmp(alloc_heap *h,
571                                    struct alloc_heap_entry l,
572                                    struct alloc_heap_entry r)
573 {
574         return  cmp_int(l.key, r.key) ?:
575                 cmp_int(r.nr, l.nr) ?:
576                 cmp_int(l.bucket, r.bucket);
577 }
578
579 static inline int bucket_idx_cmp(const void *_l, const void *_r)
580 {
581         const struct alloc_heap_entry *l = _l, *r = _r;
582
583         return cmp_int(l->bucket, r->bucket);
584 }
585
586 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
587 {
588         struct bucket_array *buckets;
589         struct alloc_heap_entry e = { 0 };
590         u64 now, last_seq_ondisk;
591         size_t b, i, nr = 0;
592
593         down_read(&ca->bucket_lock);
594
595         buckets = bucket_array(ca);
596         ca->alloc_heap.used = 0;
597         now = atomic64_read(&c->io_clock[READ].now);
598         last_seq_ondisk = c->journal.flushed_seq_ondisk;
599
600         /*
601          * Find buckets with lowest read priority, by building a maxheap sorted
602          * by read priority and repeatedly replacing the maximum element until
603          * all buckets have been visited.
604          */
605         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
606                 struct bucket *g = &buckets->b[b];
607                 struct bucket_mark m = READ_ONCE(g->mark);
608                 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
609
610                 cond_resched();
611
612                 if (!bch2_can_invalidate_bucket(ca, b, m))
613                         continue;
614
615                 if (!m.data_type &&
616                     bch2_bucket_needs_journal_commit(c, last_seq_ondisk,
617                                                      ca->dev_idx, b)) {
618                         ca->buckets_waiting_on_journal++;
619                         continue;
620                 }
621
622                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
623                         e.nr++;
624                 } else {
625                         if (e.nr)
626                                 heap_add_or_replace(&ca->alloc_heap, e,
627                                         -bucket_alloc_cmp, NULL);
628
629                         e = (struct alloc_heap_entry) {
630                                 .bucket = b,
631                                 .nr     = 1,
632                                 .key    = key,
633                         };
634                 }
635         }
636
637         if (e.nr)
638                 heap_add_or_replace(&ca->alloc_heap, e,
639                                 -bucket_alloc_cmp, NULL);
640
641         for (i = 0; i < ca->alloc_heap.used; i++)
642                 nr += ca->alloc_heap.data[i].nr;
643
644         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
645                 nr -= ca->alloc_heap.data[0].nr;
646                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
647         }
648
649         up_read(&ca->bucket_lock);
650 }
651
652 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
653 {
654         size_t i, nr = 0;
655
656         ca->inc_gen_needs_gc                    = 0;
657         ca->inc_gen_really_needs_gc             = 0;
658         ca->buckets_waiting_on_journal          = 0;
659
660         find_reclaimable_buckets_lru(c, ca);
661
662         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
663
664         for (i = 0; i < ca->alloc_heap.used; i++)
665                 nr += ca->alloc_heap.data[i].nr;
666
667         return nr;
668 }
669
670 static int bucket_invalidate_btree(struct btree_trans *trans,
671                                    struct bch_dev *ca, u64 b,
672                                    struct bkey_alloc_unpacked *u)
673 {
674         struct bch_fs *c = trans->c;
675         struct btree_iter iter;
676         struct bkey_s_c k;
677         int ret;
678
679         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
680                              POS(ca->dev_idx, b),
681                              BTREE_ITER_CACHED|
682                              BTREE_ITER_INTENT);
683
684         k = bch2_btree_iter_peek_slot(&iter);
685         ret = bkey_err(k);
686         if (ret)
687                 goto err;
688
689         *u = bch2_alloc_unpack(k);
690         u->gen++;
691         u->data_type            = 0;
692         u->dirty_sectors        = 0;
693         u->cached_sectors       = 0;
694         u->read_time            = atomic64_read(&c->io_clock[READ].now);
695         u->write_time           = atomic64_read(&c->io_clock[WRITE].now);
696
697         ret = bch2_alloc_write(trans, &iter, u,
698                                BTREE_TRIGGER_BUCKET_INVALIDATE);
699 err:
700         bch2_trans_iter_exit(trans, &iter);
701         return ret;
702 }
703
704 static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
705                                       u64 *journal_seq, unsigned flags)
706 {
707         struct bkey_alloc_unpacked u;
708         size_t b;
709         u64 commit_seq = 0;
710         int ret = 0;
711
712         /*
713          * If the read-only path is trying to shut down, we can't be generating
714          * new btree updates:
715          */
716         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags))
717                 return 1;
718
719         BUG_ON(!ca->alloc_heap.used ||
720                !ca->alloc_heap.data[0].nr);
721         b = ca->alloc_heap.data[0].bucket;
722
723         /* first, put on free_inc and mark as owned by allocator: */
724         percpu_down_read(&c->mark_lock);
725
726         bch2_mark_alloc_bucket(c, ca, b, true);
727
728         spin_lock(&c->freelist_lock);
729         verify_not_on_freelist(c, ca, b);
730         BUG_ON(!fifo_push(&ca->free_inc, b));
731         spin_unlock(&c->freelist_lock);
732
733         percpu_up_read(&c->mark_lock);
734
735         ret = bch2_trans_do(c, NULL, &commit_seq,
736                             BTREE_INSERT_NOCHECK_RW|
737                             BTREE_INSERT_NOFAIL|
738                             BTREE_INSERT_JOURNAL_RESERVED|
739                             flags,
740                             bucket_invalidate_btree(&trans, ca, b, &u));
741
742         if (!ret) {
743                 /* remove from alloc_heap: */
744                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
745
746                 top->bucket++;
747                 top->nr--;
748
749                 if (!top->nr)
750                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
751
752                 /*
753                  * If we invalidating cached data then we need to wait on the
754                  * journal commit:
755                  */
756                 if (u.data_type)
757                         *journal_seq = max(*journal_seq, commit_seq);
758
759                 /*
760                  * We already waiting on u.alloc_seq when we filtered out
761                  * buckets that need journal commit:
762                  */
763                 BUG_ON(*journal_seq > u.journal_seq);
764         } else {
765                 size_t b2;
766
767                 /* remove from free_inc: */
768                 percpu_down_read(&c->mark_lock);
769                 spin_lock(&c->freelist_lock);
770
771                 bch2_mark_alloc_bucket(c, ca, b, false);
772
773                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
774                 BUG_ON(b != b2);
775
776                 spin_unlock(&c->freelist_lock);
777                 percpu_up_read(&c->mark_lock);
778         }
779
780         return ret < 0 ? ret : 0;
781 }
782
783 /*
784  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
785  */
786 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
787 {
788         u64 journal_seq = 0;
789         int ret = 0;
790
791         /* Only use nowait if we've already invalidated at least one bucket: */
792         while (!ret &&
793                !fifo_full(&ca->free_inc) &&
794                ca->alloc_heap.used) {
795                 if (kthread_should_stop()) {
796                         ret = 1;
797                         break;
798                 }
799
800                 ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
801                                 (!fifo_empty(&ca->free_inc)
802                                  ? BTREE_INSERT_NOWAIT : 0));
803                 /*
804                  * We only want to batch up invalidates when they're going to
805                  * require flushing the journal:
806                  */
807                 if (!journal_seq)
808                         break;
809         }
810
811         /* If we used NOWAIT, don't return the error: */
812         if (!fifo_empty(&ca->free_inc))
813                 ret = 0;
814         if (ret < 0)
815                 bch_err(ca, "error invalidating buckets: %i", ret);
816         if (ret)
817                 return ret;
818
819         if (journal_seq)
820                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
821         if (ret) {
822                 bch_err(ca, "journal error: %i", ret);
823                 return ret;
824         }
825
826         return 0;
827 }
828
829 static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
830 {
831         if (ca->allocator_state != new_state) {
832                 ca->allocator_state = new_state;
833                 closure_wake_up(&ca->fs->freelist_wait);
834         }
835 }
836
837 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
838 {
839         unsigned i;
840         int ret = 0;
841
842         spin_lock(&c->freelist_lock);
843         for (i = 0; i < RESERVE_NR; i++) {
844                 /*
845                  * Don't strand buckets on the copygc freelist until
846                  * after recovery is finished:
847                  */
848                 if (i == RESERVE_MOVINGGC &&
849                     !test_bit(BCH_FS_STARTED, &c->flags))
850                         continue;
851
852                 if (fifo_push(&ca->free[i], b)) {
853                         fifo_pop(&ca->free_inc, b);
854                         ret = 1;
855                         break;
856                 }
857         }
858         spin_unlock(&c->freelist_lock);
859
860         ca->allocator_state = ret
861                 ? ALLOCATOR_running
862                 : ALLOCATOR_blocked_full;
863         closure_wake_up(&c->freelist_wait);
864         return ret;
865 }
866
867 static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
868 {
869         if (ca->mi.discard &&
870             blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
871                 blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
872                                      ca->mi.bucket_size, GFP_NOFS, 0);
873 }
874
875 static bool allocator_thread_running(struct bch_dev *ca)
876 {
877         unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
878                 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
879                 ? ALLOCATOR_running
880                 : ALLOCATOR_stopped;
881         alloc_thread_set_state(ca, state);
882         return state == ALLOCATOR_running;
883 }
884
885 static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
886 {
887         s64 available = dev_buckets_reclaimable(ca) -
888                 (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
889         bool ret = available > 0;
890
891         alloc_thread_set_state(ca, ret
892                                ? ALLOCATOR_running
893                                : ALLOCATOR_blocked);
894         return ret;
895 }
896
897 /**
898  * bch_allocator_thread - move buckets from free_inc to reserves
899  *
900  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
901  * the reserves are depleted by bucket allocation. When we run out
902  * of free_inc, try to invalidate some buckets and write out
903  * prios and gens.
904  */
905 static int bch2_allocator_thread(void *arg)
906 {
907         struct bch_dev *ca = arg;
908         struct bch_fs *c = ca->fs;
909         unsigned long gc_count = c->gc_count;
910         size_t nr;
911         int ret;
912
913         set_freezable();
914
915         while (1) {
916                 ret = kthread_wait_freezable(allocator_thread_running(ca));
917                 if (ret)
918                         goto stop;
919
920                 while (!ca->alloc_heap.used) {
921                         cond_resched();
922
923                         ret = kthread_wait_freezable(buckets_available(ca, gc_count));
924                         if (ret)
925                                 goto stop;
926
927                         gc_count = c->gc_count;
928                         nr = find_reclaimable_buckets(c, ca);
929
930                         if (!nr && ca->buckets_waiting_on_journal) {
931                                 ret = bch2_journal_flush(&c->journal);
932                                 if (ret)
933                                         goto stop;
934                         } else if (nr < (ca->mi.nbuckets >> 6) &&
935                                    ca->buckets_waiting_on_journal >= nr / 2) {
936                                 bch2_journal_flush_async(&c->journal, NULL);
937                         }
938
939                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
940                              ca->inc_gen_really_needs_gc) &&
941                             c->gc_thread) {
942                                 atomic_inc(&c->kick_gc);
943                                 wake_up_process(c->gc_thread);
944                         }
945
946                         trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
947                                          ca->inc_gen_really_needs_gc);
948                 }
949
950                 ret = bch2_invalidate_buckets(c, ca);
951                 if (ret)
952                         goto stop;
953
954                 while (!fifo_empty(&ca->free_inc)) {
955                         u64 b = fifo_peek(&ca->free_inc);
956
957                         discard_one_bucket(c, ca, b);
958
959                         ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
960                         if (ret)
961                                 goto stop;
962                 }
963         }
964 stop:
965         alloc_thread_set_state(ca, ALLOCATOR_stopped);
966         return 0;
967 }
968
969 /* Startup/shutdown (ro/rw): */
970
971 void bch2_recalc_capacity(struct bch_fs *c)
972 {
973         struct bch_dev *ca;
974         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
975         unsigned bucket_size_max = 0;
976         unsigned long ra_pages = 0;
977         unsigned i, j;
978
979         lockdep_assert_held(&c->state_lock);
980
981         for_each_online_member(ca, c, i) {
982                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
983
984                 ra_pages += bdi->ra_pages;
985         }
986
987         bch2_set_ra_pages(c, ra_pages);
988
989         for_each_rw_member(ca, c, i) {
990                 u64 dev_reserve = 0;
991
992                 /*
993                  * We need to reserve buckets (from the number
994                  * of currently available buckets) against
995                  * foreground writes so that mainly copygc can
996                  * make forward progress.
997                  *
998                  * We need enough to refill the various reserves
999                  * from scratch - copygc will use its entire
1000                  * reserve all at once, then run against when
1001                  * its reserve is refilled (from the formerly
1002                  * available buckets).
1003                  *
1004                  * This reserve is just used when considering if
1005                  * allocations for foreground writes must wait -
1006                  * not -ENOSPC calculations.
1007                  */
1008                 for (j = 0; j < RESERVE_NONE; j++)
1009                         dev_reserve += ca->free[j].size;
1010
1011                 dev_reserve += 1;       /* btree write point */
1012                 dev_reserve += 1;       /* copygc write point */
1013                 dev_reserve += 1;       /* rebalance write point */
1014
1015                 dev_reserve *= ca->mi.bucket_size;
1016
1017                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1018                                              ca->mi.first_bucket);
1019
1020                 reserved_sectors += dev_reserve * 2;
1021
1022                 bucket_size_max = max_t(unsigned, bucket_size_max,
1023                                         ca->mi.bucket_size);
1024         }
1025
1026         gc_reserve = c->opts.gc_reserve_bytes
1027                 ? c->opts.gc_reserve_bytes >> 9
1028                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1029
1030         reserved_sectors = max(gc_reserve, reserved_sectors);
1031
1032         reserved_sectors = min(reserved_sectors, capacity);
1033
1034         c->capacity = capacity - reserved_sectors;
1035
1036         c->bucket_size_max = bucket_size_max;
1037
1038         /* Wake up case someone was waiting for buckets */
1039         closure_wake_up(&c->freelist_wait);
1040 }
1041
1042 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1043 {
1044         struct open_bucket *ob;
1045         bool ret = false;
1046
1047         for (ob = c->open_buckets;
1048              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1049              ob++) {
1050                 spin_lock(&ob->lock);
1051                 if (ob->valid && !ob->on_partial_list &&
1052                     ob->dev == ca->dev_idx)
1053                         ret = true;
1054                 spin_unlock(&ob->lock);
1055         }
1056
1057         return ret;
1058 }
1059
1060 /* device goes ro: */
1061 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1062 {
1063         unsigned i;
1064
1065         BUG_ON(ca->alloc_thread);
1066
1067         /* First, remove device from allocation groups: */
1068
1069         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1070                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1071
1072         /*
1073          * Capacity is calculated based off of devices in allocation groups:
1074          */
1075         bch2_recalc_capacity(c);
1076
1077         /* Next, close write points that point to this device... */
1078         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1079                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1080
1081         bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1082         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1083         bch2_writepoint_stop(c, ca, &c->btree_write_point);
1084
1085         mutex_lock(&c->btree_reserve_cache_lock);
1086         while (c->btree_reserve_cache_nr) {
1087                 struct btree_alloc *a =
1088                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1089
1090                 bch2_open_buckets_put(c, &a->ob);
1091         }
1092         mutex_unlock(&c->btree_reserve_cache_lock);
1093
1094         while (1) {
1095                 struct open_bucket *ob;
1096
1097                 spin_lock(&c->freelist_lock);
1098                 if (!ca->open_buckets_partial_nr) {
1099                         spin_unlock(&c->freelist_lock);
1100                         break;
1101                 }
1102                 ob = c->open_buckets +
1103                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1104                 ob->on_partial_list = false;
1105                 spin_unlock(&c->freelist_lock);
1106
1107                 bch2_open_bucket_put(c, ob);
1108         }
1109
1110         bch2_ec_stop_dev(c, ca);
1111
1112         /*
1113          * Wake up threads that were blocked on allocation, so they can notice
1114          * the device can no longer be removed and the capacity has changed:
1115          */
1116         closure_wake_up(&c->freelist_wait);
1117
1118         /*
1119          * journal_res_get() can block waiting for free space in the journal -
1120          * it needs to notice there may not be devices to allocate from anymore:
1121          */
1122         wake_up(&c->journal.wait);
1123
1124         /* Now wait for any in flight writes: */
1125
1126         closure_wait_event(&c->open_buckets_wait,
1127                            !bch2_dev_has_open_write_point(c, ca));
1128 }
1129
1130 /* device goes rw: */
1131 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1132 {
1133         unsigned i;
1134
1135         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1136                 if (ca->mi.data_allowed & (1 << i))
1137                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1138 }
1139
1140 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1141 {
1142         if (ca->alloc_thread)
1143                 closure_wait_event(&c->freelist_wait,
1144                                    ca->allocator_state != ALLOCATOR_running);
1145 }
1146
1147 /* stop allocator thread: */
1148 void bch2_dev_allocator_stop(struct bch_dev *ca)
1149 {
1150         struct task_struct *p;
1151
1152         p = rcu_dereference_protected(ca->alloc_thread, 1);
1153         ca->alloc_thread = NULL;
1154
1155         /*
1156          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1157          * the thread shutting down to avoid bch2_wake_allocator() racing:
1158          *
1159          * XXX: it would be better to have the rcu barrier be asynchronous
1160          * instead of blocking us here
1161          */
1162         synchronize_rcu();
1163
1164         if (p) {
1165                 kthread_stop(p);
1166                 put_task_struct(p);
1167         }
1168 }
1169
1170 /* start allocator thread: */
1171 int bch2_dev_allocator_start(struct bch_dev *ca)
1172 {
1173         struct task_struct *p;
1174
1175         /*
1176          * allocator thread already started?
1177          */
1178         if (ca->alloc_thread)
1179                 return 0;
1180
1181         p = kthread_create(bch2_allocator_thread, ca,
1182                            "bch-alloc/%s", ca->name);
1183         if (IS_ERR(p)) {
1184                 bch_err(ca->fs, "error creating allocator thread: %li",
1185                         PTR_ERR(p));
1186                 return PTR_ERR(p);
1187         }
1188
1189         get_task_struct(p);
1190         rcu_assign_pointer(ca->alloc_thread, p);
1191         wake_up_process(p);
1192         return 0;
1193 }
1194
1195 void bch2_fs_allocator_background_init(struct bch_fs *c)
1196 {
1197         spin_lock_init(&c->freelist_lock);
1198 }