]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
Update bcachefs sources to 5fd0c70102 bcachefs: Fix __remove_dirent()
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "clock.h"
13 #include "debug.h"
14 #include "ec.h"
15 #include "error.h"
16 #include "recovery.h"
17 #include "varint.h"
18
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/sched/task.h>
25 #include <linux/sort.h>
26 #include <trace/events/bcachefs.h>
27
28 const char * const bch2_allocator_states[] = {
29 #define x(n)    #n,
30         ALLOC_THREAD_STATES()
31 #undef x
32         NULL
33 };
34
35 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
36 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
37         BCH_ALLOC_FIELDS_V1()
38 #undef x
39 };
40
41 /* Persistent alloc info: */
42
43 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
44                                      const void **p, unsigned field)
45 {
46         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
47         u64 v;
48
49         if (!(a->fields & (1 << field)))
50                 return 0;
51
52         switch (bytes) {
53         case 1:
54                 v = *((const u8 *) *p);
55                 break;
56         case 2:
57                 v = le16_to_cpup(*p);
58                 break;
59         case 4:
60                 v = le32_to_cpup(*p);
61                 break;
62         case 8:
63                 v = le64_to_cpup(*p);
64                 break;
65         default:
66                 BUG();
67         }
68
69         *p += bytes;
70         return v;
71 }
72
73 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
74                                       unsigned field, u64 v)
75 {
76         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
77
78         if (!v)
79                 return;
80
81         a->v.fields |= 1 << field;
82
83         switch (bytes) {
84         case 1:
85                 *((u8 *) *p) = v;
86                 break;
87         case 2:
88                 *((__le16 *) *p) = cpu_to_le16(v);
89                 break;
90         case 4:
91                 *((__le32 *) *p) = cpu_to_le32(v);
92                 break;
93         case 8:
94                 *((__le64 *) *p) = cpu_to_le64(v);
95                 break;
96         default:
97                 BUG();
98         }
99
100         *p += bytes;
101 }
102
103 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
104                                  struct bkey_s_c k)
105 {
106         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
107         const void *d = in->data;
108         unsigned idx = 0;
109
110         out->gen = in->gen;
111
112 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
113         BCH_ALLOC_FIELDS_V1()
114 #undef  x
115 }
116
117 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
118                                 struct bkey_s_c k)
119 {
120         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
121         const u8 *in = a.v->data;
122         const u8 *end = bkey_val_end(a);
123         unsigned fieldnr = 0;
124         int ret;
125         u64 v;
126
127         out->gen        = a.v->gen;
128         out->oldest_gen = a.v->oldest_gen;
129         out->data_type  = a.v->data_type;
130
131 #define x(_name, _bits)                                                 \
132         if (fieldnr < a.v->nr_fields) {                                 \
133                 ret = bch2_varint_decode_fast(in, end, &v);             \
134                 if (ret < 0)                                            \
135                         return ret;                                     \
136                 in += ret;                                              \
137         } else {                                                        \
138                 v = 0;                                                  \
139         }                                                               \
140         out->_name = v;                                                 \
141         if (v != out->_name)                                            \
142                 return -1;                                              \
143         fieldnr++;
144
145         BCH_ALLOC_FIELDS_V2()
146 #undef  x
147         return 0;
148 }
149
150 static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
151                                const struct bkey_alloc_unpacked src)
152 {
153         struct bkey_i_alloc_v2 *a = bkey_alloc_v2_init(&dst->k);
154         unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
155         u8 *out = a->v.data;
156         u8 *end = (void *) &dst[1];
157         u8 *last_nonzero_field = out;
158         unsigned bytes;
159
160         a->k.p          = POS(src.dev, src.bucket);
161         a->v.gen        = src.gen;
162         a->v.oldest_gen = src.oldest_gen;
163         a->v.data_type  = src.data_type;
164
165 #define x(_name, _bits)                                                 \
166         nr_fields++;                                                    \
167                                                                         \
168         if (src._name) {                                                \
169                 out += bch2_varint_encode_fast(out, src._name);         \
170                                                                         \
171                 last_nonzero_field = out;                               \
172                 last_nonzero_fieldnr = nr_fields;                       \
173         } else {                                                        \
174                 *out++ = 0;                                             \
175         }
176
177         BCH_ALLOC_FIELDS_V2()
178 #undef  x
179         BUG_ON(out > end);
180
181         out = last_nonzero_field;
182         a->v.nr_fields = last_nonzero_fieldnr;
183
184         bytes = (u8 *) out - (u8 *) &a->v;
185         set_bkey_val_bytes(&a->k, bytes);
186         memset_u64s_tail(&a->v, 0, bytes);
187 }
188
189 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
190 {
191         struct bkey_alloc_unpacked ret = {
192                 .dev    = k.k->p.inode,
193                 .bucket = k.k->p.offset,
194                 .gen    = 0,
195         };
196
197         if (k.k->type == KEY_TYPE_alloc_v2)
198                 bch2_alloc_unpack_v2(&ret, k);
199         else if (k.k->type == KEY_TYPE_alloc)
200                 bch2_alloc_unpack_v1(&ret, k);
201
202         return ret;
203 }
204
205 void bch2_alloc_pack(struct bch_fs *c,
206                      struct bkey_alloc_buf *dst,
207                      const struct bkey_alloc_unpacked src)
208 {
209         bch2_alloc_pack_v2(dst, src);
210 }
211
212 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
213 {
214         unsigned i, bytes = offsetof(struct bch_alloc, data);
215
216         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
217                 if (a->fields & (1 << i))
218                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
219
220         return DIV_ROUND_UP(bytes, sizeof(u64));
221 }
222
223 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
224 {
225         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
226
227         if (k.k->p.inode >= c->sb.nr_devices ||
228             !c->devs[k.k->p.inode])
229                 return "invalid device";
230
231         /* allow for unknown fields */
232         if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
233                 return "incorrect value size";
234
235         return NULL;
236 }
237
238 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
239 {
240         struct bkey_alloc_unpacked u;
241
242         if (k.k->p.inode >= c->sb.nr_devices ||
243             !c->devs[k.k->p.inode])
244                 return "invalid device";
245
246         if (bch2_alloc_unpack_v2(&u, k))
247                 return "unpack error";
248
249         return NULL;
250 }
251
252 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
253                            struct bkey_s_c k)
254 {
255         struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
256
257         pr_buf(out, "gen %u oldest_gen %u data_type %s",
258                u.gen, u.oldest_gen, bch2_data_types[u.data_type]);
259 #define x(_name, ...)   pr_buf(out, " " #_name " %llu", (u64) u._name);
260         BCH_ALLOC_FIELDS_V2()
261 #undef  x
262 }
263
264 static int bch2_alloc_read_fn(struct btree_trans *trans, struct bkey_s_c k)
265 {
266         struct bch_fs *c = trans->c;
267         struct bch_dev *ca;
268         struct bucket *g;
269         struct bkey_alloc_unpacked u;
270
271         if (k.k->type != KEY_TYPE_alloc &&
272             k.k->type != KEY_TYPE_alloc_v2)
273                 return 0;
274
275         ca = bch_dev_bkey_exists(c, k.k->p.inode);
276         g = bucket(ca, k.k->p.offset);
277         u = bch2_alloc_unpack(k);
278
279         g->_mark.gen            = u.gen;
280         g->_mark.data_type      = u.data_type;
281         g->_mark.dirty_sectors  = u.dirty_sectors;
282         g->_mark.cached_sectors = u.cached_sectors;
283         g->io_time[READ]        = u.read_time;
284         g->io_time[WRITE]       = u.write_time;
285         g->oldest_gen           = u.oldest_gen;
286         g->gen_valid            = 1;
287
288         return 0;
289 }
290
291 int bch2_alloc_read(struct bch_fs *c)
292 {
293         struct btree_trans trans;
294         int ret;
295
296         bch2_trans_init(&trans, c, 0, 0);
297         down_read(&c->gc_lock);
298         ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_alloc, bch2_alloc_read_fn);
299         up_read(&c->gc_lock);
300         bch2_trans_exit(&trans);
301         if (ret) {
302                 bch_err(c, "error reading alloc info: %i", ret);
303                 return ret;
304         }
305
306         return 0;
307 }
308
309 static int bch2_alloc_write_key(struct btree_trans *trans,
310                                 struct btree_iter *iter,
311                                 unsigned flags)
312 {
313         struct bch_fs *c = trans->c;
314         struct bkey_s_c k;
315         struct bch_dev *ca;
316         struct bucket *g;
317         struct bucket_mark m;
318         struct bkey_alloc_unpacked old_u, new_u;
319         struct bkey_alloc_buf a;
320         int ret;
321 retry:
322         bch2_trans_begin(trans);
323
324         ret = bch2_btree_key_cache_flush(trans,
325                         BTREE_ID_alloc, iter->pos);
326         if (ret)
327                 goto err;
328
329         k = bch2_btree_iter_peek_slot(iter);
330         ret = bkey_err(k);
331         if (ret)
332                 goto err;
333
334         old_u = bch2_alloc_unpack(k);
335
336         percpu_down_read(&c->mark_lock);
337         ca      = bch_dev_bkey_exists(c, iter->pos.inode);
338         g       = bucket(ca, iter->pos.offset);
339         m       = READ_ONCE(g->mark);
340         new_u   = alloc_mem_to_key(iter, g, m);
341         percpu_up_read(&c->mark_lock);
342
343         if (!bkey_alloc_unpacked_cmp(old_u, new_u))
344                 return 0;
345
346         bch2_alloc_pack(c, &a, new_u);
347         ret   = bch2_trans_update(trans, iter, &a.k,
348                                   BTREE_TRIGGER_NORUN) ?:
349                 bch2_trans_commit(trans, NULL, NULL,
350                                 BTREE_INSERT_NOFAIL|flags);
351 err:
352         if (ret == -EINTR)
353                 goto retry;
354         return ret;
355 }
356
357 int bch2_alloc_write(struct bch_fs *c, unsigned flags)
358 {
359         struct btree_trans trans;
360         struct btree_iter iter;
361         struct bch_dev *ca;
362         unsigned i;
363         int ret = 0;
364
365         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
366         bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
367                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
368
369         for_each_member_device(ca, c, i) {
370                 bch2_btree_iter_set_pos(&iter,
371                         POS(ca->dev_idx, ca->mi.first_bucket));
372
373                 while (iter.pos.offset < ca->mi.nbuckets) {
374                         ret = bch2_alloc_write_key(&trans, &iter, flags);
375                         if (ret) {
376                                 percpu_ref_put(&ca->ref);
377                                 goto err;
378                         }
379                         bch2_btree_iter_advance(&iter);
380                 }
381         }
382 err:
383         bch2_trans_iter_exit(&trans, &iter);
384         bch2_trans_exit(&trans);
385         return ret;
386 }
387
388 /* Bucket IO clocks: */
389
390 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
391                               size_t bucket_nr, int rw)
392 {
393         struct bch_fs *c = trans->c;
394         struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
395         struct btree_iter iter;
396         struct bucket *g;
397         struct bkey_alloc_buf *a;
398         struct bkey_alloc_unpacked u;
399         u64 *time, now;
400         int ret = 0;
401
402         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
403                              BTREE_ITER_CACHED|
404                              BTREE_ITER_CACHED_NOFILL|
405                              BTREE_ITER_INTENT);
406         ret = bch2_btree_iter_traverse(&iter);
407         if (ret)
408                 goto out;
409
410         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
411         ret = PTR_ERR_OR_ZERO(a);
412         if (ret)
413                 goto out;
414
415         percpu_down_read(&c->mark_lock);
416         g = bucket(ca, bucket_nr);
417         u = alloc_mem_to_key(&iter, g, READ_ONCE(g->mark));
418         percpu_up_read(&c->mark_lock);
419
420         time = rw == READ ? &u.read_time : &u.write_time;
421         now = atomic64_read(&c->io_clock[rw].now);
422         if (*time == now)
423                 goto out;
424
425         *time = now;
426
427         bch2_alloc_pack(c, a, u);
428         ret   = bch2_trans_update(trans, &iter, &a->k, 0) ?:
429                 bch2_trans_commit(trans, NULL, NULL, 0);
430 out:
431         bch2_trans_iter_exit(trans, &iter);
432         return ret;
433 }
434
435 /* Background allocator thread: */
436
437 /*
438  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
439  * (marking them as invalidated on disk), then optionally issues discard
440  * commands to the newly free buckets, then puts them on the various freelists.
441  */
442
443 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
444                                        struct bucket_mark m)
445 {
446         u8 gc_gen;
447
448         if (!is_available_bucket(m))
449                 return false;
450
451         if (m.owned_by_allocator)
452                 return false;
453
454         if (ca->buckets_nouse &&
455             test_bit(b, ca->buckets_nouse))
456                 return false;
457
458         gc_gen = bucket_gc_gen(bucket(ca, b));
459
460         ca->inc_gen_needs_gc            += gc_gen >= BUCKET_GC_GEN_MAX / 2;
461         ca->inc_gen_really_needs_gc     += gc_gen >= BUCKET_GC_GEN_MAX;
462
463         return gc_gen < BUCKET_GC_GEN_MAX;
464 }
465
466 /*
467  * Determines what order we're going to reuse buckets, smallest bucket_key()
468  * first.
469  */
470
471 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
472                                 u64 now, u64 last_seq_ondisk)
473 {
474         unsigned used = bucket_sectors_used(m);
475
476         if (used) {
477                 /*
478                  * Prefer to keep buckets that have been read more recently, and
479                  * buckets that have more data in them:
480                  */
481                 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
482                 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
483
484                 return -last_read_scaled;
485         } else {
486                 /*
487                  * Prefer to use buckets with smaller gc_gen so that we don't
488                  * have to walk the btree and recalculate oldest_gen - but shift
489                  * off the low bits so that buckets will still have equal sort
490                  * keys when there's only a small difference, so that we can
491                  * keep sequential buckets together:
492                  */
493                 return  (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
494                         (bucket_gc_gen(g) >> 4);
495         }
496 }
497
498 static inline int bucket_alloc_cmp(alloc_heap *h,
499                                    struct alloc_heap_entry l,
500                                    struct alloc_heap_entry r)
501 {
502         return  cmp_int(l.key, r.key) ?:
503                 cmp_int(r.nr, l.nr) ?:
504                 cmp_int(l.bucket, r.bucket);
505 }
506
507 static inline int bucket_idx_cmp(const void *_l, const void *_r)
508 {
509         const struct alloc_heap_entry *l = _l, *r = _r;
510
511         return cmp_int(l->bucket, r->bucket);
512 }
513
514 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
515 {
516         struct bucket_array *buckets;
517         struct alloc_heap_entry e = { 0 };
518         u64 now, last_seq_ondisk;
519         size_t b, i, nr = 0;
520
521         down_read(&ca->bucket_lock);
522
523         buckets = bucket_array(ca);
524         ca->alloc_heap.used = 0;
525         now = atomic64_read(&c->io_clock[READ].now);
526         last_seq_ondisk = c->journal.last_seq_ondisk;
527
528         /*
529          * Find buckets with lowest read priority, by building a maxheap sorted
530          * by read priority and repeatedly replacing the maximum element until
531          * all buckets have been visited.
532          */
533         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
534                 struct bucket *g = &buckets->b[b];
535                 struct bucket_mark m = READ_ONCE(g->mark);
536                 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
537
538                 cond_resched();
539
540                 if (!bch2_can_invalidate_bucket(ca, b, m))
541                         continue;
542
543                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
544                         e.nr++;
545                 } else {
546                         if (e.nr)
547                                 heap_add_or_replace(&ca->alloc_heap, e,
548                                         -bucket_alloc_cmp, NULL);
549
550                         e = (struct alloc_heap_entry) {
551                                 .bucket = b,
552                                 .nr     = 1,
553                                 .key    = key,
554                         };
555                 }
556         }
557
558         if (e.nr)
559                 heap_add_or_replace(&ca->alloc_heap, e,
560                                 -bucket_alloc_cmp, NULL);
561
562         for (i = 0; i < ca->alloc_heap.used; i++)
563                 nr += ca->alloc_heap.data[i].nr;
564
565         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
566                 nr -= ca->alloc_heap.data[0].nr;
567                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
568         }
569
570         up_read(&ca->bucket_lock);
571 }
572
573 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
574 {
575         struct bucket_array *buckets = bucket_array(ca);
576         struct bucket_mark m;
577         size_t b, start;
578
579         if (ca->fifo_last_bucket <  ca->mi.first_bucket ||
580             ca->fifo_last_bucket >= ca->mi.nbuckets)
581                 ca->fifo_last_bucket = ca->mi.first_bucket;
582
583         start = ca->fifo_last_bucket;
584
585         do {
586                 ca->fifo_last_bucket++;
587                 if (ca->fifo_last_bucket == ca->mi.nbuckets)
588                         ca->fifo_last_bucket = ca->mi.first_bucket;
589
590                 b = ca->fifo_last_bucket;
591                 m = READ_ONCE(buckets->b[b].mark);
592
593                 if (bch2_can_invalidate_bucket(ca, b, m)) {
594                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
595
596                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
597                         if (heap_full(&ca->alloc_heap))
598                                 break;
599                 }
600
601                 cond_resched();
602         } while (ca->fifo_last_bucket != start);
603 }
604
605 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
606 {
607         struct bucket_array *buckets = bucket_array(ca);
608         struct bucket_mark m;
609         size_t checked, i;
610
611         for (checked = 0;
612              checked < ca->mi.nbuckets / 2;
613              checked++) {
614                 size_t b = bch2_rand_range(ca->mi.nbuckets -
615                                            ca->mi.first_bucket) +
616                         ca->mi.first_bucket;
617
618                 m = READ_ONCE(buckets->b[b].mark);
619
620                 if (bch2_can_invalidate_bucket(ca, b, m)) {
621                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
622
623                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
624                         if (heap_full(&ca->alloc_heap))
625                                 break;
626                 }
627
628                 cond_resched();
629         }
630
631         sort(ca->alloc_heap.data,
632              ca->alloc_heap.used,
633              sizeof(ca->alloc_heap.data[0]),
634              bucket_idx_cmp, NULL);
635
636         /* remove duplicates: */
637         for (i = 0; i + 1 < ca->alloc_heap.used; i++)
638                 if (ca->alloc_heap.data[i].bucket ==
639                     ca->alloc_heap.data[i + 1].bucket)
640                         ca->alloc_heap.data[i].nr = 0;
641 }
642
643 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
644 {
645         size_t i, nr = 0;
646
647         ca->inc_gen_needs_gc                    = 0;
648         ca->inc_gen_really_needs_gc             = 0;
649
650         switch (ca->mi.replacement) {
651         case BCH_CACHE_REPLACEMENT_lru:
652                 find_reclaimable_buckets_lru(c, ca);
653                 break;
654         case BCH_CACHE_REPLACEMENT_fifo:
655                 find_reclaimable_buckets_fifo(c, ca);
656                 break;
657         case BCH_CACHE_REPLACEMENT_random:
658                 find_reclaimable_buckets_random(c, ca);
659                 break;
660         }
661
662         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
663
664         for (i = 0; i < ca->alloc_heap.used; i++)
665                 nr += ca->alloc_heap.data[i].nr;
666
667         return nr;
668 }
669
670 /*
671  * returns sequence number of most recent journal entry that updated this
672  * bucket:
673  */
674 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
675 {
676         if (m.journal_seq_valid) {
677                 u64 journal_seq = atomic64_read(&c->journal.seq);
678                 u64 bucket_seq  = journal_seq;
679
680                 bucket_seq &= ~((u64) U16_MAX);
681                 bucket_seq |= m.journal_seq;
682
683                 if (bucket_seq > journal_seq)
684                         bucket_seq -= 1 << 16;
685
686                 return bucket_seq;
687         } else {
688                 return 0;
689         }
690 }
691
692 static int bucket_invalidate_btree(struct btree_trans *trans,
693                                    struct bch_dev *ca, u64 b)
694 {
695         struct bch_fs *c = trans->c;
696         struct bkey_alloc_buf *a;
697         struct bkey_alloc_unpacked u;
698         struct bucket *g;
699         struct bucket_mark m;
700         struct btree_iter iter;
701         int ret;
702
703         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
704                              POS(ca->dev_idx, b),
705                              BTREE_ITER_CACHED|
706                              BTREE_ITER_CACHED_NOFILL|
707                              BTREE_ITER_INTENT);
708
709         a = bch2_trans_kmalloc(trans, sizeof(*a));
710         ret = PTR_ERR_OR_ZERO(a);
711         if (ret)
712                 goto err;
713
714         ret = bch2_btree_iter_traverse(&iter);
715         if (ret)
716                 goto err;
717
718         percpu_down_read(&c->mark_lock);
719         g = bucket(ca, b);
720         m = READ_ONCE(g->mark);
721         u = alloc_mem_to_key(&iter, g, m);
722         percpu_up_read(&c->mark_lock);
723
724         u.gen++;
725         u.data_type     = 0;
726         u.dirty_sectors = 0;
727         u.cached_sectors = 0;
728         u.read_time     = atomic64_read(&c->io_clock[READ].now);
729         u.write_time    = atomic64_read(&c->io_clock[WRITE].now);
730
731         bch2_alloc_pack(c, a, u);
732         ret = bch2_trans_update(trans, &iter, &a->k,
733                                 BTREE_TRIGGER_BUCKET_INVALIDATE);
734 err:
735         bch2_trans_iter_exit(trans, &iter);
736         return ret;
737 }
738
739 static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
740                                       u64 *journal_seq, unsigned flags)
741 {
742         struct bucket *g;
743         struct bucket_mark m;
744         size_t b;
745         int ret = 0;
746
747         BUG_ON(!ca->alloc_heap.used ||
748                !ca->alloc_heap.data[0].nr);
749         b = ca->alloc_heap.data[0].bucket;
750
751         /* first, put on free_inc and mark as owned by allocator: */
752         percpu_down_read(&c->mark_lock);
753         g = bucket(ca, b);
754         m = READ_ONCE(g->mark);
755
756         BUG_ON(m.dirty_sectors);
757
758         bch2_mark_alloc_bucket(c, ca, b, true);
759
760         spin_lock(&c->freelist_lock);
761         verify_not_on_freelist(c, ca, b);
762         BUG_ON(!fifo_push(&ca->free_inc, b));
763         spin_unlock(&c->freelist_lock);
764
765         /*
766          * If we're not invalidating cached data, we only increment the bucket
767          * gen in memory here, the incremented gen will be updated in the btree
768          * by bch2_trans_mark_pointer():
769          */
770         if (!m.cached_sectors &&
771             !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
772                 BUG_ON(m.data_type);
773                 bucket_cmpxchg(g, m, m.gen++);
774                 percpu_up_read(&c->mark_lock);
775                 goto out;
776         }
777
778         percpu_up_read(&c->mark_lock);
779
780         /*
781          * If the read-only path is trying to shut down, we can't be generating
782          * new btree updates:
783          */
784         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
785                 ret = 1;
786                 goto out;
787         }
788
789         ret = bch2_trans_do(c, NULL, journal_seq,
790                             BTREE_INSERT_NOCHECK_RW|
791                             BTREE_INSERT_NOFAIL|
792                             BTREE_INSERT_JOURNAL_RESERVED|
793                             flags,
794                             bucket_invalidate_btree(&trans, ca, b));
795 out:
796         if (!ret) {
797                 /* remove from alloc_heap: */
798                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
799
800                 top->bucket++;
801                 top->nr--;
802
803                 if (!top->nr)
804                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
805
806                 /*
807                  * Make sure we flush the last journal entry that updated this
808                  * bucket (i.e. deleting the last reference) before writing to
809                  * this bucket again:
810                  */
811                 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
812         } else {
813                 size_t b2;
814
815                 /* remove from free_inc: */
816                 percpu_down_read(&c->mark_lock);
817                 spin_lock(&c->freelist_lock);
818
819                 bch2_mark_alloc_bucket(c, ca, b, false);
820
821                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
822                 BUG_ON(b != b2);
823
824                 spin_unlock(&c->freelist_lock);
825                 percpu_up_read(&c->mark_lock);
826         }
827
828         return ret < 0 ? ret : 0;
829 }
830
831 /*
832  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
833  */
834 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
835 {
836         u64 journal_seq = 0;
837         int ret = 0;
838
839         /* Only use nowait if we've already invalidated at least one bucket: */
840         while (!ret &&
841                !fifo_full(&ca->free_inc) &&
842                ca->alloc_heap.used) {
843                 if (kthread_should_stop()) {
844                         ret = 1;
845                         break;
846                 }
847
848                 ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
849                                 (!fifo_empty(&ca->free_inc)
850                                  ? BTREE_INSERT_NOWAIT : 0));
851                 /*
852                  * We only want to batch up invalidates when they're going to
853                  * require flushing the journal:
854                  */
855                 if (!journal_seq)
856                         break;
857         }
858
859         /* If we used NOWAIT, don't return the error: */
860         if (!fifo_empty(&ca->free_inc))
861                 ret = 0;
862         if (ret < 0)
863                 bch_err(ca, "error invalidating buckets: %i", ret);
864         if (ret)
865                 return ret;
866
867         if (journal_seq)
868                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
869         if (ret) {
870                 bch_err(ca, "journal error: %i", ret);
871                 return ret;
872         }
873
874         return 0;
875 }
876
877 static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
878 {
879         if (ca->allocator_state != new_state) {
880                 ca->allocator_state = new_state;
881                 closure_wake_up(&ca->fs->freelist_wait);
882         }
883 }
884
885 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
886 {
887         unsigned i;
888         int ret = 0;
889
890         spin_lock(&c->freelist_lock);
891         for (i = 0; i < RESERVE_NR; i++) {
892                 /*
893                  * Don't strand buckets on the copygc freelist until
894                  * after recovery is finished:
895                  */
896                 if (i == RESERVE_MOVINGGC &&
897                     !test_bit(BCH_FS_STARTED, &c->flags))
898                         continue;
899
900                 if (fifo_push(&ca->free[i], b)) {
901                         fifo_pop(&ca->free_inc, b);
902                         ret = 1;
903                         break;
904                 }
905         }
906         spin_unlock(&c->freelist_lock);
907
908         ca->allocator_state = ret
909                 ? ALLOCATOR_running
910                 : ALLOCATOR_blocked_full;
911         closure_wake_up(&c->freelist_wait);
912         return ret;
913 }
914
915 static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
916 {
917         if (ca->mi.discard &&
918             blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
919                 blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
920                                      ca->mi.bucket_size, GFP_NOFS, 0);
921 }
922
923 static bool allocator_thread_running(struct bch_dev *ca)
924 {
925         unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
926                 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
927                 ? ALLOCATOR_running
928                 : ALLOCATOR_stopped;
929         alloc_thread_set_state(ca, state);
930         return state == ALLOCATOR_running;
931 }
932
933 static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
934 {
935         s64 available = dev_buckets_reclaimable(ca) -
936                 (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
937         bool ret = available > 0;
938
939         alloc_thread_set_state(ca, ret
940                                ? ALLOCATOR_running
941                                : ALLOCATOR_blocked);
942         return ret;
943 }
944
945 /**
946  * bch_allocator_thread - move buckets from free_inc to reserves
947  *
948  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
949  * the reserves are depleted by bucket allocation. When we run out
950  * of free_inc, try to invalidate some buckets and write out
951  * prios and gens.
952  */
953 static int bch2_allocator_thread(void *arg)
954 {
955         struct bch_dev *ca = arg;
956         struct bch_fs *c = ca->fs;
957         unsigned long gc_count = c->gc_count;
958         size_t nr;
959         int ret;
960
961         set_freezable();
962
963         while (1) {
964                 ret = kthread_wait_freezable(allocator_thread_running(ca));
965                 if (ret)
966                         goto stop;
967
968                 while (!ca->alloc_heap.used) {
969                         cond_resched();
970
971                         ret = kthread_wait_freezable(buckets_available(ca, gc_count));
972                         if (ret)
973                                 goto stop;
974
975                         gc_count = c->gc_count;
976                         nr = find_reclaimable_buckets(c, ca);
977
978                         trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
979                                          ca->inc_gen_really_needs_gc);
980
981                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
982                              ca->inc_gen_really_needs_gc) &&
983                             c->gc_thread) {
984                                 atomic_inc(&c->kick_gc);
985                                 wake_up_process(c->gc_thread);
986                         }
987                 }
988
989                 ret = bch2_invalidate_buckets(c, ca);
990                 if (ret)
991                         goto stop;
992
993                 while (!fifo_empty(&ca->free_inc)) {
994                         u64 b = fifo_peek(&ca->free_inc);
995
996                         discard_one_bucket(c, ca, b);
997
998                         ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
999                         if (ret)
1000                                 goto stop;
1001                 }
1002         }
1003 stop:
1004         alloc_thread_set_state(ca, ALLOCATOR_stopped);
1005         return 0;
1006 }
1007
1008 /* Startup/shutdown (ro/rw): */
1009
1010 void bch2_recalc_capacity(struct bch_fs *c)
1011 {
1012         struct bch_dev *ca;
1013         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1014         unsigned bucket_size_max = 0;
1015         unsigned long ra_pages = 0;
1016         unsigned i, j;
1017
1018         lockdep_assert_held(&c->state_lock);
1019
1020         for_each_online_member(ca, c, i) {
1021                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
1022
1023                 ra_pages += bdi->ra_pages;
1024         }
1025
1026         bch2_set_ra_pages(c, ra_pages);
1027
1028         for_each_rw_member(ca, c, i) {
1029                 u64 dev_reserve = 0;
1030
1031                 /*
1032                  * We need to reserve buckets (from the number
1033                  * of currently available buckets) against
1034                  * foreground writes so that mainly copygc can
1035                  * make forward progress.
1036                  *
1037                  * We need enough to refill the various reserves
1038                  * from scratch - copygc will use its entire
1039                  * reserve all at once, then run against when
1040                  * its reserve is refilled (from the formerly
1041                  * available buckets).
1042                  *
1043                  * This reserve is just used when considering if
1044                  * allocations for foreground writes must wait -
1045                  * not -ENOSPC calculations.
1046                  */
1047                 for (j = 0; j < RESERVE_NONE; j++)
1048                         dev_reserve += ca->free[j].size;
1049
1050                 dev_reserve += 1;       /* btree write point */
1051                 dev_reserve += 1;       /* copygc write point */
1052                 dev_reserve += 1;       /* rebalance write point */
1053
1054                 dev_reserve *= ca->mi.bucket_size;
1055
1056                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1057                                              ca->mi.first_bucket);
1058
1059                 reserved_sectors += dev_reserve * 2;
1060
1061                 bucket_size_max = max_t(unsigned, bucket_size_max,
1062                                         ca->mi.bucket_size);
1063         }
1064
1065         gc_reserve = c->opts.gc_reserve_bytes
1066                 ? c->opts.gc_reserve_bytes >> 9
1067                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1068
1069         reserved_sectors = max(gc_reserve, reserved_sectors);
1070
1071         reserved_sectors = min(reserved_sectors, capacity);
1072
1073         c->capacity = capacity - reserved_sectors;
1074
1075         c->bucket_size_max = bucket_size_max;
1076
1077         /* Wake up case someone was waiting for buckets */
1078         closure_wake_up(&c->freelist_wait);
1079 }
1080
1081 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1082 {
1083         struct open_bucket *ob;
1084         bool ret = false;
1085
1086         for (ob = c->open_buckets;
1087              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1088              ob++) {
1089                 spin_lock(&ob->lock);
1090                 if (ob->valid && !ob->on_partial_list &&
1091                     ob->ptr.dev == ca->dev_idx)
1092                         ret = true;
1093                 spin_unlock(&ob->lock);
1094         }
1095
1096         return ret;
1097 }
1098
1099 /* device goes ro: */
1100 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1101 {
1102         unsigned i;
1103
1104         BUG_ON(ca->alloc_thread);
1105
1106         /* First, remove device from allocation groups: */
1107
1108         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1109                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1110
1111         /*
1112          * Capacity is calculated based off of devices in allocation groups:
1113          */
1114         bch2_recalc_capacity(c);
1115
1116         /* Next, close write points that point to this device... */
1117         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1118                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1119
1120         bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1121         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1122         bch2_writepoint_stop(c, ca, &c->btree_write_point);
1123
1124         mutex_lock(&c->btree_reserve_cache_lock);
1125         while (c->btree_reserve_cache_nr) {
1126                 struct btree_alloc *a =
1127                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1128
1129                 bch2_open_buckets_put(c, &a->ob);
1130         }
1131         mutex_unlock(&c->btree_reserve_cache_lock);
1132
1133         while (1) {
1134                 struct open_bucket *ob;
1135
1136                 spin_lock(&c->freelist_lock);
1137                 if (!ca->open_buckets_partial_nr) {
1138                         spin_unlock(&c->freelist_lock);
1139                         break;
1140                 }
1141                 ob = c->open_buckets +
1142                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1143                 ob->on_partial_list = false;
1144                 spin_unlock(&c->freelist_lock);
1145
1146                 bch2_open_bucket_put(c, ob);
1147         }
1148
1149         bch2_ec_stop_dev(c, ca);
1150
1151         /*
1152          * Wake up threads that were blocked on allocation, so they can notice
1153          * the device can no longer be removed and the capacity has changed:
1154          */
1155         closure_wake_up(&c->freelist_wait);
1156
1157         /*
1158          * journal_res_get() can block waiting for free space in the journal -
1159          * it needs to notice there may not be devices to allocate from anymore:
1160          */
1161         wake_up(&c->journal.wait);
1162
1163         /* Now wait for any in flight writes: */
1164
1165         closure_wait_event(&c->open_buckets_wait,
1166                            !bch2_dev_has_open_write_point(c, ca));
1167 }
1168
1169 /* device goes rw: */
1170 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1171 {
1172         unsigned i;
1173
1174         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1175                 if (ca->mi.data_allowed & (1 << i))
1176                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1177 }
1178
1179 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1180 {
1181         if (ca->alloc_thread)
1182                 closure_wait_event(&c->freelist_wait,
1183                                    ca->allocator_state != ALLOCATOR_running);
1184 }
1185
1186 /* stop allocator thread: */
1187 void bch2_dev_allocator_stop(struct bch_dev *ca)
1188 {
1189         struct task_struct *p;
1190
1191         p = rcu_dereference_protected(ca->alloc_thread, 1);
1192         ca->alloc_thread = NULL;
1193
1194         /*
1195          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1196          * the thread shutting down to avoid bch2_wake_allocator() racing:
1197          *
1198          * XXX: it would be better to have the rcu barrier be asynchronous
1199          * instead of blocking us here
1200          */
1201         synchronize_rcu();
1202
1203         if (p) {
1204                 kthread_stop(p);
1205                 put_task_struct(p);
1206         }
1207 }
1208
1209 /* start allocator thread: */
1210 int bch2_dev_allocator_start(struct bch_dev *ca)
1211 {
1212         struct task_struct *p;
1213
1214         /*
1215          * allocator thread already started?
1216          */
1217         if (ca->alloc_thread)
1218                 return 0;
1219
1220         p = kthread_create(bch2_allocator_thread, ca,
1221                            "bch-alloc/%s", ca->name);
1222         if (IS_ERR(p)) {
1223                 bch_err(ca->fs, "error creating allocator thread: %li",
1224                         PTR_ERR(p));
1225                 return PTR_ERR(p);
1226         }
1227
1228         get_task_struct(p);
1229         rcu_assign_pointer(ca->alloc_thread, p);
1230         wake_up_process(p);
1231         return 0;
1232 }
1233
1234 void bch2_fs_allocator_background_init(struct bch_fs *c)
1235 {
1236         spin_lock_init(&c->freelist_lock);
1237 }
1238
1239 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1240 {
1241         struct open_bucket *ob;
1242
1243         for (ob = c->open_buckets;
1244              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1245              ob++) {
1246                 spin_lock(&ob->lock);
1247                 if (ob->valid && !ob->on_partial_list) {
1248                         pr_buf(out, "%zu ref %u type %s\n",
1249                                ob - c->open_buckets,
1250                                atomic_read(&ob->pin),
1251                                bch2_data_types[ob->type]);
1252                 }
1253                 spin_unlock(&ob->lock);
1254         }
1255
1256 }