]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
Update bcachefs sources to 3f3f969859 bcachefs: Fix some compiler warnings
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "clock.h"
13 #include "debug.h"
14 #include "ec.h"
15 #include "error.h"
16 #include "recovery.h"
17 #include "varint.h"
18
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/sched/task.h>
25 #include <linux/sort.h>
26 #include <trace/events/bcachefs.h>
27
28 const char * const bch2_allocator_states[] = {
29 #define x(n)    #n,
30         ALLOC_THREAD_STATES()
31 #undef x
32         NULL
33 };
34
35 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
36 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
37         BCH_ALLOC_FIELDS_V1()
38 #undef x
39 };
40
41 /* Persistent alloc info: */
42
43 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
44                                      const void **p, unsigned field)
45 {
46         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
47         u64 v;
48
49         if (!(a->fields & (1 << field)))
50                 return 0;
51
52         switch (bytes) {
53         case 1:
54                 v = *((const u8 *) *p);
55                 break;
56         case 2:
57                 v = le16_to_cpup(*p);
58                 break;
59         case 4:
60                 v = le32_to_cpup(*p);
61                 break;
62         case 8:
63                 v = le64_to_cpup(*p);
64                 break;
65         default:
66                 BUG();
67         }
68
69         *p += bytes;
70         return v;
71 }
72
73 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
74                                       unsigned field, u64 v)
75 {
76         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
77
78         if (!v)
79                 return;
80
81         a->v.fields |= 1 << field;
82
83         switch (bytes) {
84         case 1:
85                 *((u8 *) *p) = v;
86                 break;
87         case 2:
88                 *((__le16 *) *p) = cpu_to_le16(v);
89                 break;
90         case 4:
91                 *((__le32 *) *p) = cpu_to_le32(v);
92                 break;
93         case 8:
94                 *((__le64 *) *p) = cpu_to_le64(v);
95                 break;
96         default:
97                 BUG();
98         }
99
100         *p += bytes;
101 }
102
103 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
104                                  struct bkey_s_c k)
105 {
106         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
107         const void *d = in->data;
108         unsigned idx = 0;
109
110         out->gen = in->gen;
111
112 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
113         BCH_ALLOC_FIELDS_V1()
114 #undef  x
115 }
116
117 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
118                                 struct bkey_s_c k)
119 {
120         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
121         const u8 *in = a.v->data;
122         const u8 *end = bkey_val_end(a);
123         unsigned fieldnr = 0;
124         int ret;
125         u64 v;
126
127         out->gen        = a.v->gen;
128         out->oldest_gen = a.v->oldest_gen;
129         out->data_type  = a.v->data_type;
130
131 #define x(_name, _bits)                                                 \
132         if (fieldnr < a.v->nr_fields) {                                 \
133                 ret = bch2_varint_decode_fast(in, end, &v);             \
134                 if (ret < 0)                                            \
135                         return ret;                                     \
136                 in += ret;                                              \
137         } else {                                                        \
138                 v = 0;                                                  \
139         }                                                               \
140         out->_name = v;                                                 \
141         if (v != out->_name)                                            \
142                 return -1;                                              \
143         fieldnr++;
144
145         BCH_ALLOC_FIELDS_V2()
146 #undef  x
147         return 0;
148 }
149
150 static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
151                                const struct bkey_alloc_unpacked src)
152 {
153         struct bkey_i_alloc_v2 *a = bkey_alloc_v2_init(&dst->k);
154         unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
155         u8 *out = a->v.data;
156         u8 *end = (void *) &dst[1];
157         u8 *last_nonzero_field = out;
158         unsigned bytes;
159
160         a->k.p          = POS(src.dev, src.bucket);
161         a->v.gen        = src.gen;
162         a->v.oldest_gen = src.oldest_gen;
163         a->v.data_type  = src.data_type;
164
165 #define x(_name, _bits)                                                 \
166         nr_fields++;                                                    \
167                                                                         \
168         if (src._name) {                                                \
169                 out += bch2_varint_encode_fast(out, src._name);         \
170                                                                         \
171                 last_nonzero_field = out;                               \
172                 last_nonzero_fieldnr = nr_fields;                       \
173         } else {                                                        \
174                 *out++ = 0;                                             \
175         }
176
177         BCH_ALLOC_FIELDS_V2()
178 #undef  x
179         BUG_ON(out > end);
180
181         out = last_nonzero_field;
182         a->v.nr_fields = last_nonzero_fieldnr;
183
184         bytes = (u8 *) out - (u8 *) &a->v;
185         set_bkey_val_bytes(&a->k, bytes);
186         memset_u64s_tail(&a->v, 0, bytes);
187 }
188
189 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
190 {
191         struct bkey_alloc_unpacked ret = {
192                 .dev    = k.k->p.inode,
193                 .bucket = k.k->p.offset,
194                 .gen    = 0,
195         };
196
197         if (k.k->type == KEY_TYPE_alloc_v2)
198                 bch2_alloc_unpack_v2(&ret, k);
199         else if (k.k->type == KEY_TYPE_alloc)
200                 bch2_alloc_unpack_v1(&ret, k);
201
202         return ret;
203 }
204
205 void bch2_alloc_pack(struct bch_fs *c,
206                      struct bkey_alloc_buf *dst,
207                      const struct bkey_alloc_unpacked src)
208 {
209         bch2_alloc_pack_v2(dst, src);
210 }
211
212 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
213 {
214         unsigned i, bytes = offsetof(struct bch_alloc, data);
215
216         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
217                 if (a->fields & (1 << i))
218                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
219
220         return DIV_ROUND_UP(bytes, sizeof(u64));
221 }
222
223 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
224 {
225         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
226
227         if (k.k->p.inode >= c->sb.nr_devices ||
228             !c->devs[k.k->p.inode])
229                 return "invalid device";
230
231         /* allow for unknown fields */
232         if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
233                 return "incorrect value size";
234
235         return NULL;
236 }
237
238 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
239 {
240         struct bkey_alloc_unpacked u;
241
242         if (k.k->p.inode >= c->sb.nr_devices ||
243             !c->devs[k.k->p.inode])
244                 return "invalid device";
245
246         if (bch2_alloc_unpack_v2(&u, k))
247                 return "unpack error";
248
249         return NULL;
250 }
251
252 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
253                            struct bkey_s_c k)
254 {
255         struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
256
257         pr_buf(out, "gen %u oldest_gen %u data_type %s",
258                u.gen, u.oldest_gen, bch2_data_types[u.data_type]);
259 #define x(_name, ...)   pr_buf(out, " " #_name " %llu", (u64) u._name);
260         BCH_ALLOC_FIELDS_V2()
261 #undef  x
262 }
263
264 static int bch2_alloc_read_fn(struct bch_fs *c, struct bkey_s_c k)
265 {
266         struct bch_dev *ca;
267         struct bucket *g;
268         struct bkey_alloc_unpacked u;
269
270         if (k.k->type != KEY_TYPE_alloc &&
271             k.k->type != KEY_TYPE_alloc_v2)
272                 return 0;
273
274         ca = bch_dev_bkey_exists(c, k.k->p.inode);
275         g = bucket(ca, k.k->p.offset);
276         u = bch2_alloc_unpack(k);
277
278         g->_mark.gen            = u.gen;
279         g->_mark.data_type      = u.data_type;
280         g->_mark.dirty_sectors  = u.dirty_sectors;
281         g->_mark.cached_sectors = u.cached_sectors;
282         g->io_time[READ]        = u.read_time;
283         g->io_time[WRITE]       = u.write_time;
284         g->oldest_gen           = u.oldest_gen;
285         g->gen_valid            = 1;
286
287         return 0;
288 }
289
290 int bch2_alloc_read(struct bch_fs *c)
291 {
292         int ret;
293
294         down_read(&c->gc_lock);
295         ret = bch2_btree_and_journal_walk(c, BTREE_ID_alloc, bch2_alloc_read_fn);
296         up_read(&c->gc_lock);
297         if (ret) {
298                 bch_err(c, "error reading alloc info: %i", ret);
299                 return ret;
300         }
301
302         return 0;
303 }
304
305 static int bch2_alloc_write_key(struct btree_trans *trans,
306                                 struct btree_iter *iter,
307                                 unsigned flags)
308 {
309         struct bch_fs *c = trans->c;
310         struct bkey_s_c k;
311         struct bch_dev *ca;
312         struct bucket *g;
313         struct bucket_mark m;
314         struct bkey_alloc_unpacked old_u, new_u;
315         struct bkey_alloc_buf a;
316         int ret;
317 retry:
318         bch2_trans_begin(trans);
319
320         ret = bch2_btree_key_cache_flush(trans,
321                         BTREE_ID_alloc, iter->pos);
322         if (ret)
323                 goto err;
324
325         k = bch2_btree_iter_peek_slot(iter);
326         ret = bkey_err(k);
327         if (ret)
328                 goto err;
329
330         old_u = bch2_alloc_unpack(k);
331
332         percpu_down_read(&c->mark_lock);
333         ca      = bch_dev_bkey_exists(c, iter->pos.inode);
334         g       = bucket(ca, iter->pos.offset);
335         m       = READ_ONCE(g->mark);
336         new_u   = alloc_mem_to_key(iter, g, m);
337         percpu_up_read(&c->mark_lock);
338
339         if (!bkey_alloc_unpacked_cmp(old_u, new_u))
340                 return 0;
341
342         bch2_alloc_pack(c, &a, new_u);
343         ret   = bch2_trans_update(trans, iter, &a.k,
344                                   BTREE_TRIGGER_NORUN) ?:
345                 bch2_trans_commit(trans, NULL, NULL,
346                                 BTREE_INSERT_NOFAIL|flags);
347 err:
348         if (ret == -EINTR)
349                 goto retry;
350         return ret;
351 }
352
353 int bch2_alloc_write(struct bch_fs *c, unsigned flags)
354 {
355         struct btree_trans trans;
356         struct btree_iter iter;
357         struct bch_dev *ca;
358         unsigned i;
359         int ret = 0;
360
361         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
362         bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
363                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
364
365         for_each_member_device(ca, c, i) {
366                 bch2_btree_iter_set_pos(&iter,
367                         POS(ca->dev_idx, ca->mi.first_bucket));
368
369                 while (iter.pos.offset < ca->mi.nbuckets) {
370                         bch2_trans_cond_resched(&trans);
371
372                         ret = bch2_alloc_write_key(&trans, &iter, flags);
373                         if (ret) {
374                                 percpu_ref_put(&ca->ref);
375                                 goto err;
376                         }
377                         bch2_btree_iter_advance(&iter);
378                 }
379         }
380 err:
381         bch2_trans_iter_exit(&trans, &iter);
382         bch2_trans_exit(&trans);
383         return ret;
384 }
385
386 /* Bucket IO clocks: */
387
388 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
389                               size_t bucket_nr, int rw)
390 {
391         struct bch_fs *c = trans->c;
392         struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
393         struct btree_iter iter;
394         struct bucket *g;
395         struct bkey_alloc_buf *a;
396         struct bkey_alloc_unpacked u;
397         u64 *time, now;
398         int ret = 0;
399
400         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
401                              BTREE_ITER_CACHED|
402                              BTREE_ITER_CACHED_NOFILL|
403                              BTREE_ITER_INTENT);
404         ret = bch2_btree_iter_traverse(&iter);
405         if (ret)
406                 goto out;
407
408         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
409         ret = PTR_ERR_OR_ZERO(a);
410         if (ret)
411                 goto out;
412
413         percpu_down_read(&c->mark_lock);
414         g = bucket(ca, bucket_nr);
415         u = alloc_mem_to_key(&iter, g, READ_ONCE(g->mark));
416         percpu_up_read(&c->mark_lock);
417
418         time = rw == READ ? &u.read_time : &u.write_time;
419         now = atomic64_read(&c->io_clock[rw].now);
420         if (*time == now)
421                 goto out;
422
423         *time = now;
424
425         bch2_alloc_pack(c, a, u);
426         ret   = bch2_trans_update(trans, &iter, &a->k, 0) ?:
427                 bch2_trans_commit(trans, NULL, NULL, 0);
428 out:
429         bch2_trans_iter_exit(trans, &iter);
430         return ret;
431 }
432
433 /* Background allocator thread: */
434
435 /*
436  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
437  * (marking them as invalidated on disk), then optionally issues discard
438  * commands to the newly free buckets, then puts them on the various freelists.
439  */
440
441 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
442                                        struct bucket_mark m)
443 {
444         u8 gc_gen;
445
446         if (!is_available_bucket(m))
447                 return false;
448
449         if (m.owned_by_allocator)
450                 return false;
451
452         if (ca->buckets_nouse &&
453             test_bit(b, ca->buckets_nouse))
454                 return false;
455
456         gc_gen = bucket_gc_gen(bucket(ca, b));
457
458         ca->inc_gen_needs_gc            += gc_gen >= BUCKET_GC_GEN_MAX / 2;
459         ca->inc_gen_really_needs_gc     += gc_gen >= BUCKET_GC_GEN_MAX;
460
461         return gc_gen < BUCKET_GC_GEN_MAX;
462 }
463
464 /*
465  * Determines what order we're going to reuse buckets, smallest bucket_key()
466  * first.
467  */
468
469 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
470                                 u64 now, u64 last_seq_ondisk)
471 {
472         unsigned used = bucket_sectors_used(m);
473
474         if (used) {
475                 /*
476                  * Prefer to keep buckets that have been read more recently, and
477                  * buckets that have more data in them:
478                  */
479                 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
480                 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
481
482                 return -last_read_scaled;
483         } else {
484                 /*
485                  * Prefer to use buckets with smaller gc_gen so that we don't
486                  * have to walk the btree and recalculate oldest_gen - but shift
487                  * off the low bits so that buckets will still have equal sort
488                  * keys when there's only a small difference, so that we can
489                  * keep sequential buckets together:
490                  */
491                 return  (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
492                         (bucket_gc_gen(g) >> 4);
493         }
494 }
495
496 static inline int bucket_alloc_cmp(alloc_heap *h,
497                                    struct alloc_heap_entry l,
498                                    struct alloc_heap_entry r)
499 {
500         return  cmp_int(l.key, r.key) ?:
501                 cmp_int(r.nr, l.nr) ?:
502                 cmp_int(l.bucket, r.bucket);
503 }
504
505 static inline int bucket_idx_cmp(const void *_l, const void *_r)
506 {
507         const struct alloc_heap_entry *l = _l, *r = _r;
508
509         return cmp_int(l->bucket, r->bucket);
510 }
511
512 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
513 {
514         struct bucket_array *buckets;
515         struct alloc_heap_entry e = { 0 };
516         u64 now, last_seq_ondisk;
517         size_t b, i, nr = 0;
518
519         down_read(&ca->bucket_lock);
520
521         buckets = bucket_array(ca);
522         ca->alloc_heap.used = 0;
523         now = atomic64_read(&c->io_clock[READ].now);
524         last_seq_ondisk = c->journal.last_seq_ondisk;
525
526         /*
527          * Find buckets with lowest read priority, by building a maxheap sorted
528          * by read priority and repeatedly replacing the maximum element until
529          * all buckets have been visited.
530          */
531         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
532                 struct bucket *g = &buckets->b[b];
533                 struct bucket_mark m = READ_ONCE(g->mark);
534                 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
535
536                 cond_resched();
537
538                 if (!bch2_can_invalidate_bucket(ca, b, m))
539                         continue;
540
541                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
542                         e.nr++;
543                 } else {
544                         if (e.nr)
545                                 heap_add_or_replace(&ca->alloc_heap, e,
546                                         -bucket_alloc_cmp, NULL);
547
548                         e = (struct alloc_heap_entry) {
549                                 .bucket = b,
550                                 .nr     = 1,
551                                 .key    = key,
552                         };
553                 }
554         }
555
556         if (e.nr)
557                 heap_add_or_replace(&ca->alloc_heap, e,
558                                 -bucket_alloc_cmp, NULL);
559
560         for (i = 0; i < ca->alloc_heap.used; i++)
561                 nr += ca->alloc_heap.data[i].nr;
562
563         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
564                 nr -= ca->alloc_heap.data[0].nr;
565                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
566         }
567
568         up_read(&ca->bucket_lock);
569 }
570
571 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
572 {
573         struct bucket_array *buckets = bucket_array(ca);
574         struct bucket_mark m;
575         size_t b, start;
576
577         if (ca->fifo_last_bucket <  ca->mi.first_bucket ||
578             ca->fifo_last_bucket >= ca->mi.nbuckets)
579                 ca->fifo_last_bucket = ca->mi.first_bucket;
580
581         start = ca->fifo_last_bucket;
582
583         do {
584                 ca->fifo_last_bucket++;
585                 if (ca->fifo_last_bucket == ca->mi.nbuckets)
586                         ca->fifo_last_bucket = ca->mi.first_bucket;
587
588                 b = ca->fifo_last_bucket;
589                 m = READ_ONCE(buckets->b[b].mark);
590
591                 if (bch2_can_invalidate_bucket(ca, b, m)) {
592                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
593
594                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
595                         if (heap_full(&ca->alloc_heap))
596                                 break;
597                 }
598
599                 cond_resched();
600         } while (ca->fifo_last_bucket != start);
601 }
602
603 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
604 {
605         struct bucket_array *buckets = bucket_array(ca);
606         struct bucket_mark m;
607         size_t checked, i;
608
609         for (checked = 0;
610              checked < ca->mi.nbuckets / 2;
611              checked++) {
612                 size_t b = bch2_rand_range(ca->mi.nbuckets -
613                                            ca->mi.first_bucket) +
614                         ca->mi.first_bucket;
615
616                 m = READ_ONCE(buckets->b[b].mark);
617
618                 if (bch2_can_invalidate_bucket(ca, b, m)) {
619                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
620
621                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
622                         if (heap_full(&ca->alloc_heap))
623                                 break;
624                 }
625
626                 cond_resched();
627         }
628
629         sort(ca->alloc_heap.data,
630              ca->alloc_heap.used,
631              sizeof(ca->alloc_heap.data[0]),
632              bucket_idx_cmp, NULL);
633
634         /* remove duplicates: */
635         for (i = 0; i + 1 < ca->alloc_heap.used; i++)
636                 if (ca->alloc_heap.data[i].bucket ==
637                     ca->alloc_heap.data[i + 1].bucket)
638                         ca->alloc_heap.data[i].nr = 0;
639 }
640
641 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
642 {
643         size_t i, nr = 0;
644
645         ca->inc_gen_needs_gc                    = 0;
646         ca->inc_gen_really_needs_gc             = 0;
647
648         switch (ca->mi.replacement) {
649         case BCH_CACHE_REPLACEMENT_lru:
650                 find_reclaimable_buckets_lru(c, ca);
651                 break;
652         case BCH_CACHE_REPLACEMENT_fifo:
653                 find_reclaimable_buckets_fifo(c, ca);
654                 break;
655         case BCH_CACHE_REPLACEMENT_random:
656                 find_reclaimable_buckets_random(c, ca);
657                 break;
658         }
659
660         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
661
662         for (i = 0; i < ca->alloc_heap.used; i++)
663                 nr += ca->alloc_heap.data[i].nr;
664
665         return nr;
666 }
667
668 /*
669  * returns sequence number of most recent journal entry that updated this
670  * bucket:
671  */
672 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
673 {
674         if (m.journal_seq_valid) {
675                 u64 journal_seq = atomic64_read(&c->journal.seq);
676                 u64 bucket_seq  = journal_seq;
677
678                 bucket_seq &= ~((u64) U16_MAX);
679                 bucket_seq |= m.journal_seq;
680
681                 if (bucket_seq > journal_seq)
682                         bucket_seq -= 1 << 16;
683
684                 return bucket_seq;
685         } else {
686                 return 0;
687         }
688 }
689
690 static int bucket_invalidate_btree(struct btree_trans *trans,
691                                    struct bch_dev *ca, u64 b)
692 {
693         struct bch_fs *c = trans->c;
694         struct bkey_alloc_buf *a;
695         struct bkey_alloc_unpacked u;
696         struct bucket *g;
697         struct bucket_mark m;
698         struct btree_iter iter;
699         int ret;
700
701         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
702                              POS(ca->dev_idx, b),
703                              BTREE_ITER_CACHED|
704                              BTREE_ITER_CACHED_NOFILL|
705                              BTREE_ITER_INTENT);
706
707         a = bch2_trans_kmalloc(trans, sizeof(*a));
708         ret = PTR_ERR_OR_ZERO(a);
709         if (ret)
710                 goto err;
711
712         ret = bch2_btree_iter_traverse(&iter);
713         if (ret)
714                 goto err;
715
716         percpu_down_read(&c->mark_lock);
717         g = bucket(ca, b);
718         m = READ_ONCE(g->mark);
719         u = alloc_mem_to_key(&iter, g, m);
720         percpu_up_read(&c->mark_lock);
721
722         u.gen++;
723         u.data_type     = 0;
724         u.dirty_sectors = 0;
725         u.cached_sectors = 0;
726         u.read_time     = atomic64_read(&c->io_clock[READ].now);
727         u.write_time    = atomic64_read(&c->io_clock[WRITE].now);
728
729         bch2_alloc_pack(c, a, u);
730         ret = bch2_trans_update(trans, &iter, &a->k,
731                                 BTREE_TRIGGER_BUCKET_INVALIDATE);
732 err:
733         bch2_trans_iter_exit(trans, &iter);
734         return ret;
735 }
736
737 static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
738                                       u64 *journal_seq, unsigned flags)
739 {
740         struct bucket *g;
741         struct bucket_mark m;
742         size_t b;
743         int ret = 0;
744
745         BUG_ON(!ca->alloc_heap.used ||
746                !ca->alloc_heap.data[0].nr);
747         b = ca->alloc_heap.data[0].bucket;
748
749         /* first, put on free_inc and mark as owned by allocator: */
750         percpu_down_read(&c->mark_lock);
751         g = bucket(ca, b);
752         m = READ_ONCE(g->mark);
753
754         BUG_ON(m.dirty_sectors);
755
756         bch2_mark_alloc_bucket(c, ca, b, true);
757
758         spin_lock(&c->freelist_lock);
759         verify_not_on_freelist(c, ca, b);
760         BUG_ON(!fifo_push(&ca->free_inc, b));
761         spin_unlock(&c->freelist_lock);
762
763         /*
764          * If we're not invalidating cached data, we only increment the bucket
765          * gen in memory here, the incremented gen will be updated in the btree
766          * by bch2_trans_mark_pointer():
767          */
768         if (!m.cached_sectors &&
769             !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
770                 BUG_ON(m.data_type);
771                 bucket_cmpxchg(g, m, m.gen++);
772                 percpu_up_read(&c->mark_lock);
773                 goto out;
774         }
775
776         percpu_up_read(&c->mark_lock);
777
778         /*
779          * If the read-only path is trying to shut down, we can't be generating
780          * new btree updates:
781          */
782         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
783                 ret = 1;
784                 goto out;
785         }
786
787         ret = bch2_trans_do(c, NULL, journal_seq,
788                             BTREE_INSERT_NOCHECK_RW|
789                             BTREE_INSERT_NOFAIL|
790                             BTREE_INSERT_JOURNAL_RESERVED|
791                             flags,
792                             bucket_invalidate_btree(&trans, ca, b));
793 out:
794         if (!ret) {
795                 /* remove from alloc_heap: */
796                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
797
798                 top->bucket++;
799                 top->nr--;
800
801                 if (!top->nr)
802                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
803
804                 /*
805                  * Make sure we flush the last journal entry that updated this
806                  * bucket (i.e. deleting the last reference) before writing to
807                  * this bucket again:
808                  */
809                 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
810         } else {
811                 size_t b2;
812
813                 /* remove from free_inc: */
814                 percpu_down_read(&c->mark_lock);
815                 spin_lock(&c->freelist_lock);
816
817                 bch2_mark_alloc_bucket(c, ca, b, false);
818
819                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
820                 BUG_ON(b != b2);
821
822                 spin_unlock(&c->freelist_lock);
823                 percpu_up_read(&c->mark_lock);
824         }
825
826         return ret < 0 ? ret : 0;
827 }
828
829 /*
830  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
831  */
832 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
833 {
834         u64 journal_seq = 0;
835         int ret = 0;
836
837         /* Only use nowait if we've already invalidated at least one bucket: */
838         while (!ret &&
839                !fifo_full(&ca->free_inc) &&
840                ca->alloc_heap.used) {
841                 if (kthread_should_stop()) {
842                         ret = 1;
843                         break;
844                 }
845
846                 ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
847                                 (!fifo_empty(&ca->free_inc)
848                                  ? BTREE_INSERT_NOWAIT : 0));
849                 /*
850                  * We only want to batch up invalidates when they're going to
851                  * require flushing the journal:
852                  */
853                 if (!journal_seq)
854                         break;
855         }
856
857         /* If we used NOWAIT, don't return the error: */
858         if (!fifo_empty(&ca->free_inc))
859                 ret = 0;
860         if (ret) {
861                 bch_err(ca, "error invalidating buckets: %i", ret);
862                 return ret;
863         }
864
865         if (journal_seq)
866                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
867         if (ret) {
868                 bch_err(ca, "journal error: %i", ret);
869                 return ret;
870         }
871
872         return 0;
873 }
874
875 static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
876 {
877         if (ca->allocator_state != new_state) {
878                 ca->allocator_state = new_state;
879                 closure_wake_up(&ca->fs->freelist_wait);
880         }
881 }
882
883 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
884 {
885         unsigned i;
886         int ret = 0;
887
888         spin_lock(&c->freelist_lock);
889         for (i = 0; i < RESERVE_NR; i++) {
890                 /*
891                  * Don't strand buckets on the copygc freelist until
892                  * after recovery is finished:
893                  */
894                 if (i == RESERVE_MOVINGGC &&
895                     !test_bit(BCH_FS_STARTED, &c->flags))
896                         continue;
897
898                 if (fifo_push(&ca->free[i], b)) {
899                         fifo_pop(&ca->free_inc, b);
900                         ret = 1;
901                         break;
902                 }
903         }
904         spin_unlock(&c->freelist_lock);
905
906         ca->allocator_state = ret
907                 ? ALLOCATOR_running
908                 : ALLOCATOR_blocked_full;
909         closure_wake_up(&c->freelist_wait);
910         return ret;
911 }
912
913 static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
914 {
915         if (ca->mi.discard &&
916             blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
917                 blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
918                                      ca->mi.bucket_size, GFP_NOFS, 0);
919 }
920
921 static bool allocator_thread_running(struct bch_dev *ca)
922 {
923         unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
924                 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
925                 ? ALLOCATOR_running
926                 : ALLOCATOR_stopped;
927         alloc_thread_set_state(ca, state);
928         return state == ALLOCATOR_running;
929 }
930
931 static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
932 {
933         s64 available = dev_buckets_reclaimable(ca) -
934                 (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
935         bool ret = available > 0;
936
937         alloc_thread_set_state(ca, ret
938                                ? ALLOCATOR_running
939                                : ALLOCATOR_blocked);
940         return ret;
941 }
942
943 /**
944  * bch_allocator_thread - move buckets from free_inc to reserves
945  *
946  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
947  * the reserves are depleted by bucket allocation. When we run out
948  * of free_inc, try to invalidate some buckets and write out
949  * prios and gens.
950  */
951 static int bch2_allocator_thread(void *arg)
952 {
953         struct bch_dev *ca = arg;
954         struct bch_fs *c = ca->fs;
955         unsigned long gc_count = c->gc_count;
956         size_t nr;
957         int ret;
958
959         set_freezable();
960
961         while (1) {
962                 ret = kthread_wait_freezable(allocator_thread_running(ca));
963                 if (ret)
964                         goto stop;
965
966                 while (!ca->alloc_heap.used) {
967                         cond_resched();
968
969                         ret = kthread_wait_freezable(buckets_available(ca, gc_count));
970                         if (ret)
971                                 goto stop;
972
973                         gc_count = c->gc_count;
974                         nr = find_reclaimable_buckets(c, ca);
975
976                         trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
977                                          ca->inc_gen_really_needs_gc);
978
979                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
980                              ca->inc_gen_really_needs_gc) &&
981                             c->gc_thread) {
982                                 atomic_inc(&c->kick_gc);
983                                 wake_up_process(c->gc_thread);
984                         }
985                 }
986
987                 ret = bch2_invalidate_buckets(c, ca);
988                 if (ret)
989                         goto stop;
990
991                 while (!fifo_empty(&ca->free_inc)) {
992                         u64 b = fifo_peek(&ca->free_inc);
993
994                         discard_one_bucket(c, ca, b);
995
996                         ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
997                         if (ret)
998                                 goto stop;
999                 }
1000         }
1001 stop:
1002         alloc_thread_set_state(ca, ALLOCATOR_stopped);
1003         return 0;
1004 }
1005
1006 /* Startup/shutdown (ro/rw): */
1007
1008 void bch2_recalc_capacity(struct bch_fs *c)
1009 {
1010         struct bch_dev *ca;
1011         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1012         unsigned bucket_size_max = 0;
1013         unsigned long ra_pages = 0;
1014         unsigned i, j;
1015
1016         lockdep_assert_held(&c->state_lock);
1017
1018         for_each_online_member(ca, c, i) {
1019                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1020
1021                 ra_pages += bdi->ra_pages;
1022         }
1023
1024         bch2_set_ra_pages(c, ra_pages);
1025
1026         for_each_rw_member(ca, c, i) {
1027                 u64 dev_reserve = 0;
1028
1029                 /*
1030                  * We need to reserve buckets (from the number
1031                  * of currently available buckets) against
1032                  * foreground writes so that mainly copygc can
1033                  * make forward progress.
1034                  *
1035                  * We need enough to refill the various reserves
1036                  * from scratch - copygc will use its entire
1037                  * reserve all at once, then run against when
1038                  * its reserve is refilled (from the formerly
1039                  * available buckets).
1040                  *
1041                  * This reserve is just used when considering if
1042                  * allocations for foreground writes must wait -
1043                  * not -ENOSPC calculations.
1044                  */
1045                 for (j = 0; j < RESERVE_NONE; j++)
1046                         dev_reserve += ca->free[j].size;
1047
1048                 dev_reserve += 1;       /* btree write point */
1049                 dev_reserve += 1;       /* copygc write point */
1050                 dev_reserve += 1;       /* rebalance write point */
1051
1052                 dev_reserve *= ca->mi.bucket_size;
1053
1054                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1055                                              ca->mi.first_bucket);
1056
1057                 reserved_sectors += dev_reserve * 2;
1058
1059                 bucket_size_max = max_t(unsigned, bucket_size_max,
1060                                         ca->mi.bucket_size);
1061         }
1062
1063         gc_reserve = c->opts.gc_reserve_bytes
1064                 ? c->opts.gc_reserve_bytes >> 9
1065                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1066
1067         reserved_sectors = max(gc_reserve, reserved_sectors);
1068
1069         reserved_sectors = min(reserved_sectors, capacity);
1070
1071         c->capacity = capacity - reserved_sectors;
1072
1073         c->bucket_size_max = bucket_size_max;
1074
1075         /* Wake up case someone was waiting for buckets */
1076         closure_wake_up(&c->freelist_wait);
1077 }
1078
1079 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1080 {
1081         struct open_bucket *ob;
1082         bool ret = false;
1083
1084         for (ob = c->open_buckets;
1085              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1086              ob++) {
1087                 spin_lock(&ob->lock);
1088                 if (ob->valid && !ob->on_partial_list &&
1089                     ob->ptr.dev == ca->dev_idx)
1090                         ret = true;
1091                 spin_unlock(&ob->lock);
1092         }
1093
1094         return ret;
1095 }
1096
1097 /* device goes ro: */
1098 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1099 {
1100         unsigned i;
1101
1102         BUG_ON(ca->alloc_thread);
1103
1104         /* First, remove device from allocation groups: */
1105
1106         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1107                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1108
1109         /*
1110          * Capacity is calculated based off of devices in allocation groups:
1111          */
1112         bch2_recalc_capacity(c);
1113
1114         /* Next, close write points that point to this device... */
1115         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1116                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1117
1118         bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1119         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1120         bch2_writepoint_stop(c, ca, &c->btree_write_point);
1121
1122         mutex_lock(&c->btree_reserve_cache_lock);
1123         while (c->btree_reserve_cache_nr) {
1124                 struct btree_alloc *a =
1125                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1126
1127                 bch2_open_buckets_put(c, &a->ob);
1128         }
1129         mutex_unlock(&c->btree_reserve_cache_lock);
1130
1131         while (1) {
1132                 struct open_bucket *ob;
1133
1134                 spin_lock(&c->freelist_lock);
1135                 if (!ca->open_buckets_partial_nr) {
1136                         spin_unlock(&c->freelist_lock);
1137                         break;
1138                 }
1139                 ob = c->open_buckets +
1140                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1141                 ob->on_partial_list = false;
1142                 spin_unlock(&c->freelist_lock);
1143
1144                 bch2_open_bucket_put(c, ob);
1145         }
1146
1147         bch2_ec_stop_dev(c, ca);
1148
1149         /*
1150          * Wake up threads that were blocked on allocation, so they can notice
1151          * the device can no longer be removed and the capacity has changed:
1152          */
1153         closure_wake_up(&c->freelist_wait);
1154
1155         /*
1156          * journal_res_get() can block waiting for free space in the journal -
1157          * it needs to notice there may not be devices to allocate from anymore:
1158          */
1159         wake_up(&c->journal.wait);
1160
1161         /* Now wait for any in flight writes: */
1162
1163         closure_wait_event(&c->open_buckets_wait,
1164                            !bch2_dev_has_open_write_point(c, ca));
1165 }
1166
1167 /* device goes rw: */
1168 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1169 {
1170         unsigned i;
1171
1172         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1173                 if (ca->mi.data_allowed & (1 << i))
1174                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1175 }
1176
1177 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1178 {
1179         if (ca->alloc_thread)
1180                 closure_wait_event(&c->freelist_wait,
1181                                    ca->allocator_state != ALLOCATOR_running);
1182 }
1183
1184 /* stop allocator thread: */
1185 void bch2_dev_allocator_stop(struct bch_dev *ca)
1186 {
1187         struct task_struct *p;
1188
1189         p = rcu_dereference_protected(ca->alloc_thread, 1);
1190         ca->alloc_thread = NULL;
1191
1192         /*
1193          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1194          * the thread shutting down to avoid bch2_wake_allocator() racing:
1195          *
1196          * XXX: it would be better to have the rcu barrier be asynchronous
1197          * instead of blocking us here
1198          */
1199         synchronize_rcu();
1200
1201         if (p) {
1202                 kthread_stop(p);
1203                 put_task_struct(p);
1204         }
1205 }
1206
1207 /* start allocator thread: */
1208 int bch2_dev_allocator_start(struct bch_dev *ca)
1209 {
1210         struct task_struct *p;
1211
1212         /*
1213          * allocator thread already started?
1214          */
1215         if (ca->alloc_thread)
1216                 return 0;
1217
1218         p = kthread_create(bch2_allocator_thread, ca,
1219                            "bch-alloc/%s", ca->name);
1220         if (IS_ERR(p)) {
1221                 bch_err(ca->fs, "error creating allocator thread: %li",
1222                         PTR_ERR(p));
1223                 return PTR_ERR(p);
1224         }
1225
1226         get_task_struct(p);
1227         rcu_assign_pointer(ca->alloc_thread, p);
1228         wake_up_process(p);
1229         return 0;
1230 }
1231
1232 void bch2_fs_allocator_background_init(struct bch_fs *c)
1233 {
1234         spin_lock_init(&c->freelist_lock);
1235 }
1236
1237 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1238 {
1239         struct open_bucket *ob;
1240
1241         for (ob = c->open_buckets;
1242              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1243              ob++) {
1244                 spin_lock(&ob->lock);
1245                 if (ob->valid && !ob->on_partial_list) {
1246                         pr_buf(out, "%zu ref %u type %s\n",
1247                                ob - c->open_buckets,
1248                                atomic_read(&ob->pin),
1249                                bch2_data_types[ob->type]);
1250                 }
1251                 spin_unlock(&ob->lock);
1252         }
1253
1254 }