]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
Update bcachefs sources to 50d6a25d9c bcachefs: Erasure coding fixes
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "clock.h"
13 #include "debug.h"
14 #include "ec.h"
15 #include "error.h"
16 #include "recovery.h"
17 #include "varint.h"
18
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/sched/task.h>
25 #include <linux/sort.h>
26 #include <trace/events/bcachefs.h>
27
28 const char * const bch2_allocator_states[] = {
29 #define x(n)    #n,
30         ALLOC_THREAD_STATES()
31 #undef x
32         NULL
33 };
34
35 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
36 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
37         BCH_ALLOC_FIELDS_V1()
38 #undef x
39 };
40
41 /* Persistent alloc info: */
42
43 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
44                                      const void **p, unsigned field)
45 {
46         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
47         u64 v;
48
49         if (!(a->fields & (1 << field)))
50                 return 0;
51
52         switch (bytes) {
53         case 1:
54                 v = *((const u8 *) *p);
55                 break;
56         case 2:
57                 v = le16_to_cpup(*p);
58                 break;
59         case 4:
60                 v = le32_to_cpup(*p);
61                 break;
62         case 8:
63                 v = le64_to_cpup(*p);
64                 break;
65         default:
66                 BUG();
67         }
68
69         *p += bytes;
70         return v;
71 }
72
73 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
74                                       unsigned field, u64 v)
75 {
76         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
77
78         if (!v)
79                 return;
80
81         a->v.fields |= 1 << field;
82
83         switch (bytes) {
84         case 1:
85                 *((u8 *) *p) = v;
86                 break;
87         case 2:
88                 *((__le16 *) *p) = cpu_to_le16(v);
89                 break;
90         case 4:
91                 *((__le32 *) *p) = cpu_to_le32(v);
92                 break;
93         case 8:
94                 *((__le64 *) *p) = cpu_to_le64(v);
95                 break;
96         default:
97                 BUG();
98         }
99
100         *p += bytes;
101 }
102
103 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
104                                  struct bkey_s_c k)
105 {
106         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
107         const void *d = in->data;
108         unsigned idx = 0;
109
110         out->gen = in->gen;
111
112 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
113         BCH_ALLOC_FIELDS_V1()
114 #undef  x
115 }
116
117 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
118                                 struct bkey_s_c k)
119 {
120         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
121         const u8 *in = a.v->data;
122         const u8 *end = bkey_val_end(a);
123         unsigned fieldnr = 0;
124         int ret;
125         u64 v;
126
127         out->gen        = a.v->gen;
128         out->oldest_gen = a.v->oldest_gen;
129         out->data_type  = a.v->data_type;
130
131 #define x(_name, _bits)                                                 \
132         if (fieldnr < a.v->nr_fields) {                                 \
133                 ret = bch2_varint_decode_fast(in, end, &v);             \
134                 if (ret < 0)                                            \
135                         return ret;                                     \
136                 in += ret;                                              \
137         } else {                                                        \
138                 v = 0;                                                  \
139         }                                                               \
140         out->_name = v;                                                 \
141         if (v != out->_name)                                            \
142                 return -1;                                              \
143         fieldnr++;
144
145         BCH_ALLOC_FIELDS_V2()
146 #undef  x
147         return 0;
148 }
149
150 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
151                                 struct bkey_s_c k)
152 {
153         struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
154         const u8 *in = a.v->data;
155         const u8 *end = bkey_val_end(a);
156         unsigned fieldnr = 0;
157         int ret;
158         u64 v;
159
160         out->gen        = a.v->gen;
161         out->oldest_gen = a.v->oldest_gen;
162         out->data_type  = a.v->data_type;
163         out->journal_seq = le64_to_cpu(a.v->journal_seq);
164
165 #define x(_name, _bits)                                                 \
166         if (fieldnr < a.v->nr_fields) {                                 \
167                 ret = bch2_varint_decode_fast(in, end, &v);             \
168                 if (ret < 0)                                            \
169                         return ret;                                     \
170                 in += ret;                                              \
171         } else {                                                        \
172                 v = 0;                                                  \
173         }                                                               \
174         out->_name = v;                                                 \
175         if (v != out->_name)                                            \
176                 return -1;                                              \
177         fieldnr++;
178
179         BCH_ALLOC_FIELDS_V2()
180 #undef  x
181         return 0;
182 }
183
184 static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
185                                const struct bkey_alloc_unpacked src)
186 {
187         struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
188         unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
189         u8 *out = a->v.data;
190         u8 *end = (void *) &dst[1];
191         u8 *last_nonzero_field = out;
192         unsigned bytes;
193
194         a->k.p          = POS(src.dev, src.bucket);
195         a->v.gen        = src.gen;
196         a->v.oldest_gen = src.oldest_gen;
197         a->v.data_type  = src.data_type;
198         a->v.journal_seq = cpu_to_le64(src.journal_seq);
199
200 #define x(_name, _bits)                                                 \
201         nr_fields++;                                                    \
202                                                                         \
203         if (src._name) {                                                \
204                 out += bch2_varint_encode_fast(out, src._name);         \
205                                                                         \
206                 last_nonzero_field = out;                               \
207                 last_nonzero_fieldnr = nr_fields;                       \
208         } else {                                                        \
209                 *out++ = 0;                                             \
210         }
211
212         BCH_ALLOC_FIELDS_V2()
213 #undef  x
214         BUG_ON(out > end);
215
216         out = last_nonzero_field;
217         a->v.nr_fields = last_nonzero_fieldnr;
218
219         bytes = (u8 *) out - (u8 *) &a->v;
220         set_bkey_val_bytes(&a->k, bytes);
221         memset_u64s_tail(&a->v, 0, bytes);
222 }
223
224 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
225 {
226         struct bkey_alloc_unpacked ret = {
227                 .dev    = k.k->p.inode,
228                 .bucket = k.k->p.offset,
229                 .gen    = 0,
230         };
231
232         switch (k.k->type) {
233         case KEY_TYPE_alloc:
234                 bch2_alloc_unpack_v1(&ret, k);
235                 break;
236         case KEY_TYPE_alloc_v2:
237                 bch2_alloc_unpack_v2(&ret, k);
238                 break;
239         case KEY_TYPE_alloc_v3:
240                 bch2_alloc_unpack_v3(&ret, k);
241                 break;
242         }
243
244         return ret;
245 }
246
247 void bch2_alloc_pack(struct bch_fs *c,
248                      struct bkey_alloc_buf *dst,
249                      const struct bkey_alloc_unpacked src)
250 {
251         bch2_alloc_pack_v3(dst, src);
252 }
253
254 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
255 {
256         unsigned i, bytes = offsetof(struct bch_alloc, data);
257
258         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
259                 if (a->fields & (1 << i))
260                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
261
262         return DIV_ROUND_UP(bytes, sizeof(u64));
263 }
264
265 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
266 {
267         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
268
269         if (k.k->p.inode >= c->sb.nr_devices ||
270             !c->devs[k.k->p.inode])
271                 return "invalid device";
272
273         /* allow for unknown fields */
274         if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
275                 return "incorrect value size";
276
277         return NULL;
278 }
279
280 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
281 {
282         struct bkey_alloc_unpacked u;
283
284         if (k.k->p.inode >= c->sb.nr_devices ||
285             !c->devs[k.k->p.inode])
286                 return "invalid device";
287
288         if (bch2_alloc_unpack_v2(&u, k))
289                 return "unpack error";
290
291         return NULL;
292 }
293
294 const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
295 {
296         struct bkey_alloc_unpacked u;
297
298         if (k.k->p.inode >= c->sb.nr_devices ||
299             !c->devs[k.k->p.inode])
300                 return "invalid device";
301
302         if (bch2_alloc_unpack_v3(&u, k))
303                 return "unpack error";
304
305         return NULL;
306 }
307
308 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
309                            struct bkey_s_c k)
310 {
311         struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
312
313         pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu",
314                u.gen, u.oldest_gen, bch2_data_types[u.data_type],
315                u.journal_seq);
316 #define x(_name, ...)   pr_buf(out, " " #_name " %llu", (u64) u._name);
317         BCH_ALLOC_FIELDS_V2()
318 #undef  x
319 }
320
321 static int bch2_alloc_read_fn(struct btree_trans *trans, struct bkey_s_c k)
322 {
323         struct bch_fs *c = trans->c;
324         struct bch_dev *ca;
325         struct bucket *g;
326         struct bkey_alloc_unpacked u;
327
328         if (!bkey_is_alloc(k.k))
329                 return 0;
330
331         ca = bch_dev_bkey_exists(c, k.k->p.inode);
332         g = bucket(ca, k.k->p.offset);
333         u = bch2_alloc_unpack(k);
334
335         g->_mark.gen            = u.gen;
336         g->_mark.data_type      = u.data_type;
337         g->_mark.dirty_sectors  = u.dirty_sectors;
338         g->_mark.cached_sectors = u.cached_sectors;
339         g->_mark.stripe         = u.stripe != 0;
340         g->stripe               = u.stripe;
341         g->stripe_redundancy    = u.stripe_redundancy;
342         g->io_time[READ]        = u.read_time;
343         g->io_time[WRITE]       = u.write_time;
344         g->oldest_gen           = u.oldest_gen;
345         g->gen_valid            = 1;
346
347         return 0;
348 }
349
350 int bch2_alloc_read(struct bch_fs *c)
351 {
352         struct btree_trans trans;
353         int ret;
354
355         bch2_trans_init(&trans, c, 0, 0);
356         down_read(&c->gc_lock);
357         ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_alloc, bch2_alloc_read_fn);
358         up_read(&c->gc_lock);
359         bch2_trans_exit(&trans);
360         if (ret) {
361                 bch_err(c, "error reading alloc info: %i", ret);
362                 return ret;
363         }
364
365         return 0;
366 }
367
368 static int bch2_alloc_write_key(struct btree_trans *trans,
369                                 struct btree_iter *iter,
370                                 unsigned flags)
371 {
372         struct bch_fs *c = trans->c;
373         struct bkey_s_c k;
374         struct bch_dev *ca;
375         struct bucket *g;
376         struct bucket_mark m;
377         struct bkey_alloc_unpacked old_u, new_u;
378         struct bkey_alloc_buf a;
379         int ret;
380 retry:
381         bch2_trans_begin(trans);
382
383         ret = bch2_btree_key_cache_flush(trans,
384                         BTREE_ID_alloc, iter->pos);
385         if (ret)
386                 goto err;
387
388         k = bch2_btree_iter_peek_slot(iter);
389         ret = bkey_err(k);
390         if (ret)
391                 goto err;
392
393         old_u = bch2_alloc_unpack(k);
394
395         percpu_down_read(&c->mark_lock);
396         ca      = bch_dev_bkey_exists(c, iter->pos.inode);
397         g       = bucket(ca, iter->pos.offset);
398         m       = READ_ONCE(g->mark);
399         new_u   = alloc_mem_to_key(iter, g, m);
400         percpu_up_read(&c->mark_lock);
401
402         if (!bkey_alloc_unpacked_cmp(old_u, new_u))
403                 return 0;
404
405         bch2_alloc_pack(c, &a, new_u);
406         ret   = bch2_trans_update(trans, iter, &a.k,
407                                   BTREE_TRIGGER_NORUN) ?:
408                 bch2_trans_commit(trans, NULL, NULL,
409                                 BTREE_INSERT_NOFAIL|flags);
410 err:
411         if (ret == -EINTR)
412                 goto retry;
413         return ret;
414 }
415
416 int bch2_alloc_write(struct bch_fs *c, unsigned flags)
417 {
418         struct btree_trans trans;
419         struct btree_iter iter;
420         struct bch_dev *ca;
421         unsigned i;
422         int ret = 0;
423
424         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
425         bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
426                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
427
428         for_each_member_device(ca, c, i) {
429                 bch2_btree_iter_set_pos(&iter,
430                         POS(ca->dev_idx, ca->mi.first_bucket));
431
432                 while (iter.pos.offset < ca->mi.nbuckets) {
433                         ret = bch2_alloc_write_key(&trans, &iter, flags);
434                         if (ret) {
435                                 percpu_ref_put(&ca->ref);
436                                 goto err;
437                         }
438                         bch2_btree_iter_advance(&iter);
439                 }
440         }
441 err:
442         bch2_trans_iter_exit(&trans, &iter);
443         bch2_trans_exit(&trans);
444         return ret;
445 }
446
447 /* Bucket IO clocks: */
448
449 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
450                               size_t bucket_nr, int rw)
451 {
452         struct bch_fs *c = trans->c;
453         struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
454         struct btree_iter iter;
455         struct bucket *g;
456         struct bkey_alloc_buf *a;
457         struct bkey_alloc_unpacked u;
458         u64 *time, now;
459         int ret = 0;
460
461         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
462                              BTREE_ITER_CACHED|
463                              BTREE_ITER_CACHED_NOFILL|
464                              BTREE_ITER_INTENT);
465         ret = bch2_btree_iter_traverse(&iter);
466         if (ret)
467                 goto out;
468
469         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
470         ret = PTR_ERR_OR_ZERO(a);
471         if (ret)
472                 goto out;
473
474         percpu_down_read(&c->mark_lock);
475         g = bucket(ca, bucket_nr);
476         u = alloc_mem_to_key(&iter, g, READ_ONCE(g->mark));
477         percpu_up_read(&c->mark_lock);
478
479         time = rw == READ ? &u.read_time : &u.write_time;
480         now = atomic64_read(&c->io_clock[rw].now);
481         if (*time == now)
482                 goto out;
483
484         *time = now;
485
486         bch2_alloc_pack(c, a, u);
487         ret   = bch2_trans_update(trans, &iter, &a->k, 0) ?:
488                 bch2_trans_commit(trans, NULL, NULL, 0);
489 out:
490         bch2_trans_iter_exit(trans, &iter);
491         return ret;
492 }
493
494 /* Background allocator thread: */
495
496 /*
497  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
498  * (marking them as invalidated on disk), then optionally issues discard
499  * commands to the newly free buckets, then puts them on the various freelists.
500  */
501
502 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
503                                        struct bucket_mark m)
504 {
505         u8 gc_gen;
506
507         if (!is_available_bucket(m))
508                 return false;
509
510         if (m.owned_by_allocator)
511                 return false;
512
513         if (ca->buckets_nouse &&
514             test_bit(b, ca->buckets_nouse))
515                 return false;
516
517         gc_gen = bucket_gc_gen(bucket(ca, b));
518
519         ca->inc_gen_needs_gc            += gc_gen >= BUCKET_GC_GEN_MAX / 2;
520         ca->inc_gen_really_needs_gc     += gc_gen >= BUCKET_GC_GEN_MAX;
521
522         return gc_gen < BUCKET_GC_GEN_MAX;
523 }
524
525 /*
526  * Determines what order we're going to reuse buckets, smallest bucket_key()
527  * first.
528  */
529
530 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
531                                 u64 now, u64 last_seq_ondisk)
532 {
533         unsigned used = bucket_sectors_used(m);
534
535         if (used) {
536                 /*
537                  * Prefer to keep buckets that have been read more recently, and
538                  * buckets that have more data in them:
539                  */
540                 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
541                 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
542
543                 return -last_read_scaled;
544         } else {
545                 /*
546                  * Prefer to use buckets with smaller gc_gen so that we don't
547                  * have to walk the btree and recalculate oldest_gen - but shift
548                  * off the low bits so that buckets will still have equal sort
549                  * keys when there's only a small difference, so that we can
550                  * keep sequential buckets together:
551                  */
552                 return  (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
553                         (bucket_gc_gen(g) >> 4);
554         }
555 }
556
557 static inline int bucket_alloc_cmp(alloc_heap *h,
558                                    struct alloc_heap_entry l,
559                                    struct alloc_heap_entry r)
560 {
561         return  cmp_int(l.key, r.key) ?:
562                 cmp_int(r.nr, l.nr) ?:
563                 cmp_int(l.bucket, r.bucket);
564 }
565
566 static inline int bucket_idx_cmp(const void *_l, const void *_r)
567 {
568         const struct alloc_heap_entry *l = _l, *r = _r;
569
570         return cmp_int(l->bucket, r->bucket);
571 }
572
573 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
574 {
575         struct bucket_array *buckets;
576         struct alloc_heap_entry e = { 0 };
577         u64 now, last_seq_ondisk;
578         size_t b, i, nr = 0;
579
580         down_read(&ca->bucket_lock);
581
582         buckets = bucket_array(ca);
583         ca->alloc_heap.used = 0;
584         now = atomic64_read(&c->io_clock[READ].now);
585         last_seq_ondisk = c->journal.last_seq_ondisk;
586
587         /*
588          * Find buckets with lowest read priority, by building a maxheap sorted
589          * by read priority and repeatedly replacing the maximum element until
590          * all buckets have been visited.
591          */
592         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
593                 struct bucket *g = &buckets->b[b];
594                 struct bucket_mark m = READ_ONCE(g->mark);
595                 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
596
597                 cond_resched();
598
599                 if (!bch2_can_invalidate_bucket(ca, b, m))
600                         continue;
601
602                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
603                         e.nr++;
604                 } else {
605                         if (e.nr)
606                                 heap_add_or_replace(&ca->alloc_heap, e,
607                                         -bucket_alloc_cmp, NULL);
608
609                         e = (struct alloc_heap_entry) {
610                                 .bucket = b,
611                                 .nr     = 1,
612                                 .key    = key,
613                         };
614                 }
615         }
616
617         if (e.nr)
618                 heap_add_or_replace(&ca->alloc_heap, e,
619                                 -bucket_alloc_cmp, NULL);
620
621         for (i = 0; i < ca->alloc_heap.used; i++)
622                 nr += ca->alloc_heap.data[i].nr;
623
624         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
625                 nr -= ca->alloc_heap.data[0].nr;
626                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
627         }
628
629         up_read(&ca->bucket_lock);
630 }
631
632 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
633 {
634         struct bucket_array *buckets = bucket_array(ca);
635         struct bucket_mark m;
636         size_t b, start;
637
638         if (ca->fifo_last_bucket <  ca->mi.first_bucket ||
639             ca->fifo_last_bucket >= ca->mi.nbuckets)
640                 ca->fifo_last_bucket = ca->mi.first_bucket;
641
642         start = ca->fifo_last_bucket;
643
644         do {
645                 ca->fifo_last_bucket++;
646                 if (ca->fifo_last_bucket == ca->mi.nbuckets)
647                         ca->fifo_last_bucket = ca->mi.first_bucket;
648
649                 b = ca->fifo_last_bucket;
650                 m = READ_ONCE(buckets->b[b].mark);
651
652                 if (bch2_can_invalidate_bucket(ca, b, m)) {
653                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
654
655                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
656                         if (heap_full(&ca->alloc_heap))
657                                 break;
658                 }
659
660                 cond_resched();
661         } while (ca->fifo_last_bucket != start);
662 }
663
664 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
665 {
666         struct bucket_array *buckets = bucket_array(ca);
667         struct bucket_mark m;
668         size_t checked, i;
669
670         for (checked = 0;
671              checked < ca->mi.nbuckets / 2;
672              checked++) {
673                 size_t b = bch2_rand_range(ca->mi.nbuckets -
674                                            ca->mi.first_bucket) +
675                         ca->mi.first_bucket;
676
677                 m = READ_ONCE(buckets->b[b].mark);
678
679                 if (bch2_can_invalidate_bucket(ca, b, m)) {
680                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
681
682                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
683                         if (heap_full(&ca->alloc_heap))
684                                 break;
685                 }
686
687                 cond_resched();
688         }
689
690         sort(ca->alloc_heap.data,
691              ca->alloc_heap.used,
692              sizeof(ca->alloc_heap.data[0]),
693              bucket_idx_cmp, NULL);
694
695         /* remove duplicates: */
696         for (i = 0; i + 1 < ca->alloc_heap.used; i++)
697                 if (ca->alloc_heap.data[i].bucket ==
698                     ca->alloc_heap.data[i + 1].bucket)
699                         ca->alloc_heap.data[i].nr = 0;
700 }
701
702 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
703 {
704         size_t i, nr = 0;
705
706         ca->inc_gen_needs_gc                    = 0;
707         ca->inc_gen_really_needs_gc             = 0;
708
709         switch (ca->mi.replacement) {
710         case BCH_CACHE_REPLACEMENT_lru:
711                 find_reclaimable_buckets_lru(c, ca);
712                 break;
713         case BCH_CACHE_REPLACEMENT_fifo:
714                 find_reclaimable_buckets_fifo(c, ca);
715                 break;
716         case BCH_CACHE_REPLACEMENT_random:
717                 find_reclaimable_buckets_random(c, ca);
718                 break;
719         }
720
721         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
722
723         for (i = 0; i < ca->alloc_heap.used; i++)
724                 nr += ca->alloc_heap.data[i].nr;
725
726         return nr;
727 }
728
729 /*
730  * returns sequence number of most recent journal entry that updated this
731  * bucket:
732  */
733 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
734 {
735         if (m.journal_seq_valid) {
736                 u64 journal_seq = atomic64_read(&c->journal.seq);
737                 u64 bucket_seq  = journal_seq;
738
739                 bucket_seq &= ~((u64) U16_MAX);
740                 bucket_seq |= m.journal_seq;
741
742                 if (bucket_seq > journal_seq)
743                         bucket_seq -= 1 << 16;
744
745                 return bucket_seq;
746         } else {
747                 return 0;
748         }
749 }
750
751 static int bucket_invalidate_btree(struct btree_trans *trans,
752                                    struct bch_dev *ca, u64 b)
753 {
754         struct bch_fs *c = trans->c;
755         struct bkey_alloc_buf *a;
756         struct bkey_alloc_unpacked u;
757         struct bucket *g;
758         struct bucket_mark m;
759         struct btree_iter iter;
760         int ret;
761
762         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
763                              POS(ca->dev_idx, b),
764                              BTREE_ITER_CACHED|
765                              BTREE_ITER_CACHED_NOFILL|
766                              BTREE_ITER_INTENT);
767
768         a = bch2_trans_kmalloc(trans, sizeof(*a));
769         ret = PTR_ERR_OR_ZERO(a);
770         if (ret)
771                 goto err;
772
773         ret = bch2_btree_iter_traverse(&iter);
774         if (ret)
775                 goto err;
776
777         percpu_down_read(&c->mark_lock);
778         g = bucket(ca, b);
779         m = READ_ONCE(g->mark);
780         u = alloc_mem_to_key(&iter, g, m);
781         percpu_up_read(&c->mark_lock);
782
783         u.gen++;
784         u.data_type     = 0;
785         u.dirty_sectors = 0;
786         u.cached_sectors = 0;
787         u.read_time     = atomic64_read(&c->io_clock[READ].now);
788         u.write_time    = atomic64_read(&c->io_clock[WRITE].now);
789
790         bch2_alloc_pack(c, a, u);
791         ret = bch2_trans_update(trans, &iter, &a->k,
792                                 BTREE_TRIGGER_BUCKET_INVALIDATE);
793 err:
794         bch2_trans_iter_exit(trans, &iter);
795         return ret;
796 }
797
798 static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
799                                       u64 *journal_seq, unsigned flags)
800 {
801         struct bucket *g;
802         struct bucket_mark m;
803         size_t b;
804         int ret = 0;
805
806         BUG_ON(!ca->alloc_heap.used ||
807                !ca->alloc_heap.data[0].nr);
808         b = ca->alloc_heap.data[0].bucket;
809
810         /* first, put on free_inc and mark as owned by allocator: */
811         percpu_down_read(&c->mark_lock);
812         g = bucket(ca, b);
813         m = READ_ONCE(g->mark);
814
815         BUG_ON(m.dirty_sectors);
816
817         bch2_mark_alloc_bucket(c, ca, b, true);
818
819         spin_lock(&c->freelist_lock);
820         verify_not_on_freelist(c, ca, b);
821         BUG_ON(!fifo_push(&ca->free_inc, b));
822         spin_unlock(&c->freelist_lock);
823
824         /*
825          * If we're not invalidating cached data, we only increment the bucket
826          * gen in memory here, the incremented gen will be updated in the btree
827          * by bch2_trans_mark_pointer():
828          */
829         if (!m.cached_sectors &&
830             !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
831                 BUG_ON(m.data_type);
832                 bucket_cmpxchg(g, m, m.gen++);
833                 percpu_up_read(&c->mark_lock);
834                 goto out;
835         }
836
837         percpu_up_read(&c->mark_lock);
838
839         /*
840          * If the read-only path is trying to shut down, we can't be generating
841          * new btree updates:
842          */
843         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
844                 ret = 1;
845                 goto out;
846         }
847
848         ret = bch2_trans_do(c, NULL, journal_seq,
849                             BTREE_INSERT_NOCHECK_RW|
850                             BTREE_INSERT_NOFAIL|
851                             BTREE_INSERT_JOURNAL_RESERVED|
852                             flags,
853                             bucket_invalidate_btree(&trans, ca, b));
854 out:
855         if (!ret) {
856                 /* remove from alloc_heap: */
857                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
858
859                 top->bucket++;
860                 top->nr--;
861
862                 if (!top->nr)
863                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
864
865                 /*
866                  * Make sure we flush the last journal entry that updated this
867                  * bucket (i.e. deleting the last reference) before writing to
868                  * this bucket again:
869                  */
870                 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
871         } else {
872                 size_t b2;
873
874                 /* remove from free_inc: */
875                 percpu_down_read(&c->mark_lock);
876                 spin_lock(&c->freelist_lock);
877
878                 bch2_mark_alloc_bucket(c, ca, b, false);
879
880                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
881                 BUG_ON(b != b2);
882
883                 spin_unlock(&c->freelist_lock);
884                 percpu_up_read(&c->mark_lock);
885         }
886
887         return ret < 0 ? ret : 0;
888 }
889
890 /*
891  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
892  */
893 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
894 {
895         u64 journal_seq = 0;
896         int ret = 0;
897
898         /* Only use nowait if we've already invalidated at least one bucket: */
899         while (!ret &&
900                !fifo_full(&ca->free_inc) &&
901                ca->alloc_heap.used) {
902                 if (kthread_should_stop()) {
903                         ret = 1;
904                         break;
905                 }
906
907                 ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
908                                 (!fifo_empty(&ca->free_inc)
909                                  ? BTREE_INSERT_NOWAIT : 0));
910                 /*
911                  * We only want to batch up invalidates when they're going to
912                  * require flushing the journal:
913                  */
914                 if (!journal_seq)
915                         break;
916         }
917
918         /* If we used NOWAIT, don't return the error: */
919         if (!fifo_empty(&ca->free_inc))
920                 ret = 0;
921         if (ret < 0)
922                 bch_err(ca, "error invalidating buckets: %i", ret);
923         if (ret)
924                 return ret;
925
926         if (journal_seq)
927                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
928         if (ret) {
929                 bch_err(ca, "journal error: %i", ret);
930                 return ret;
931         }
932
933         return 0;
934 }
935
936 static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
937 {
938         if (ca->allocator_state != new_state) {
939                 ca->allocator_state = new_state;
940                 closure_wake_up(&ca->fs->freelist_wait);
941         }
942 }
943
944 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
945 {
946         unsigned i;
947         int ret = 0;
948
949         spin_lock(&c->freelist_lock);
950         for (i = 0; i < RESERVE_NR; i++) {
951                 /*
952                  * Don't strand buckets on the copygc freelist until
953                  * after recovery is finished:
954                  */
955                 if (i == RESERVE_MOVINGGC &&
956                     !test_bit(BCH_FS_STARTED, &c->flags))
957                         continue;
958
959                 if (fifo_push(&ca->free[i], b)) {
960                         fifo_pop(&ca->free_inc, b);
961                         ret = 1;
962                         break;
963                 }
964         }
965         spin_unlock(&c->freelist_lock);
966
967         ca->allocator_state = ret
968                 ? ALLOCATOR_running
969                 : ALLOCATOR_blocked_full;
970         closure_wake_up(&c->freelist_wait);
971         return ret;
972 }
973
974 static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
975 {
976         if (ca->mi.discard &&
977             blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
978                 blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
979                                      ca->mi.bucket_size, GFP_NOFS, 0);
980 }
981
982 static bool allocator_thread_running(struct bch_dev *ca)
983 {
984         unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
985                 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
986                 ? ALLOCATOR_running
987                 : ALLOCATOR_stopped;
988         alloc_thread_set_state(ca, state);
989         return state == ALLOCATOR_running;
990 }
991
992 static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
993 {
994         s64 available = dev_buckets_reclaimable(ca) -
995                 (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
996         bool ret = available > 0;
997
998         alloc_thread_set_state(ca, ret
999                                ? ALLOCATOR_running
1000                                : ALLOCATOR_blocked);
1001         return ret;
1002 }
1003
1004 /**
1005  * bch_allocator_thread - move buckets from free_inc to reserves
1006  *
1007  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1008  * the reserves are depleted by bucket allocation. When we run out
1009  * of free_inc, try to invalidate some buckets and write out
1010  * prios and gens.
1011  */
1012 static int bch2_allocator_thread(void *arg)
1013 {
1014         struct bch_dev *ca = arg;
1015         struct bch_fs *c = ca->fs;
1016         unsigned long gc_count = c->gc_count;
1017         size_t nr;
1018         int ret;
1019
1020         set_freezable();
1021
1022         while (1) {
1023                 ret = kthread_wait_freezable(allocator_thread_running(ca));
1024                 if (ret)
1025                         goto stop;
1026
1027                 while (!ca->alloc_heap.used) {
1028                         cond_resched();
1029
1030                         ret = kthread_wait_freezable(buckets_available(ca, gc_count));
1031                         if (ret)
1032                                 goto stop;
1033
1034                         gc_count = c->gc_count;
1035                         nr = find_reclaimable_buckets(c, ca);
1036
1037                         trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
1038                                          ca->inc_gen_really_needs_gc);
1039
1040                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1041                              ca->inc_gen_really_needs_gc) &&
1042                             c->gc_thread) {
1043                                 atomic_inc(&c->kick_gc);
1044                                 wake_up_process(c->gc_thread);
1045                         }
1046                 }
1047
1048                 ret = bch2_invalidate_buckets(c, ca);
1049                 if (ret)
1050                         goto stop;
1051
1052                 while (!fifo_empty(&ca->free_inc)) {
1053                         u64 b = fifo_peek(&ca->free_inc);
1054
1055                         discard_one_bucket(c, ca, b);
1056
1057                         ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
1058                         if (ret)
1059                                 goto stop;
1060                 }
1061         }
1062 stop:
1063         alloc_thread_set_state(ca, ALLOCATOR_stopped);
1064         return 0;
1065 }
1066
1067 /* Startup/shutdown (ro/rw): */
1068
1069 void bch2_recalc_capacity(struct bch_fs *c)
1070 {
1071         struct bch_dev *ca;
1072         u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1073         unsigned bucket_size_max = 0;
1074         unsigned long ra_pages = 0;
1075         unsigned i, j;
1076
1077         lockdep_assert_held(&c->state_lock);
1078
1079         for_each_online_member(ca, c, i) {
1080                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
1081
1082                 ra_pages += bdi->ra_pages;
1083         }
1084
1085         bch2_set_ra_pages(c, ra_pages);
1086
1087         for_each_rw_member(ca, c, i) {
1088                 u64 dev_reserve = 0;
1089
1090                 /*
1091                  * We need to reserve buckets (from the number
1092                  * of currently available buckets) against
1093                  * foreground writes so that mainly copygc can
1094                  * make forward progress.
1095                  *
1096                  * We need enough to refill the various reserves
1097                  * from scratch - copygc will use its entire
1098                  * reserve all at once, then run against when
1099                  * its reserve is refilled (from the formerly
1100                  * available buckets).
1101                  *
1102                  * This reserve is just used when considering if
1103                  * allocations for foreground writes must wait -
1104                  * not -ENOSPC calculations.
1105                  */
1106                 for (j = 0; j < RESERVE_NONE; j++)
1107                         dev_reserve += ca->free[j].size;
1108
1109                 dev_reserve += 1;       /* btree write point */
1110                 dev_reserve += 1;       /* copygc write point */
1111                 dev_reserve += 1;       /* rebalance write point */
1112
1113                 dev_reserve *= ca->mi.bucket_size;
1114
1115                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1116                                              ca->mi.first_bucket);
1117
1118                 reserved_sectors += dev_reserve * 2;
1119
1120                 bucket_size_max = max_t(unsigned, bucket_size_max,
1121                                         ca->mi.bucket_size);
1122         }
1123
1124         gc_reserve = c->opts.gc_reserve_bytes
1125                 ? c->opts.gc_reserve_bytes >> 9
1126                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1127
1128         reserved_sectors = max(gc_reserve, reserved_sectors);
1129
1130         reserved_sectors = min(reserved_sectors, capacity);
1131
1132         c->capacity = capacity - reserved_sectors;
1133
1134         c->bucket_size_max = bucket_size_max;
1135
1136         /* Wake up case someone was waiting for buckets */
1137         closure_wake_up(&c->freelist_wait);
1138 }
1139
1140 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1141 {
1142         struct open_bucket *ob;
1143         bool ret = false;
1144
1145         for (ob = c->open_buckets;
1146              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1147              ob++) {
1148                 spin_lock(&ob->lock);
1149                 if (ob->valid && !ob->on_partial_list &&
1150                     ob->ptr.dev == ca->dev_idx)
1151                         ret = true;
1152                 spin_unlock(&ob->lock);
1153         }
1154
1155         return ret;
1156 }
1157
1158 /* device goes ro: */
1159 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1160 {
1161         unsigned i;
1162
1163         BUG_ON(ca->alloc_thread);
1164
1165         /* First, remove device from allocation groups: */
1166
1167         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1168                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1169
1170         /*
1171          * Capacity is calculated based off of devices in allocation groups:
1172          */
1173         bch2_recalc_capacity(c);
1174
1175         /* Next, close write points that point to this device... */
1176         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1177                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1178
1179         bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1180         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1181         bch2_writepoint_stop(c, ca, &c->btree_write_point);
1182
1183         mutex_lock(&c->btree_reserve_cache_lock);
1184         while (c->btree_reserve_cache_nr) {
1185                 struct btree_alloc *a =
1186                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1187
1188                 bch2_open_buckets_put(c, &a->ob);
1189         }
1190         mutex_unlock(&c->btree_reserve_cache_lock);
1191
1192         while (1) {
1193                 struct open_bucket *ob;
1194
1195                 spin_lock(&c->freelist_lock);
1196                 if (!ca->open_buckets_partial_nr) {
1197                         spin_unlock(&c->freelist_lock);
1198                         break;
1199                 }
1200                 ob = c->open_buckets +
1201                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1202                 ob->on_partial_list = false;
1203                 spin_unlock(&c->freelist_lock);
1204
1205                 bch2_open_bucket_put(c, ob);
1206         }
1207
1208         bch2_ec_stop_dev(c, ca);
1209
1210         /*
1211          * Wake up threads that were blocked on allocation, so they can notice
1212          * the device can no longer be removed and the capacity has changed:
1213          */
1214         closure_wake_up(&c->freelist_wait);
1215
1216         /*
1217          * journal_res_get() can block waiting for free space in the journal -
1218          * it needs to notice there may not be devices to allocate from anymore:
1219          */
1220         wake_up(&c->journal.wait);
1221
1222         /* Now wait for any in flight writes: */
1223
1224         closure_wait_event(&c->open_buckets_wait,
1225                            !bch2_dev_has_open_write_point(c, ca));
1226 }
1227
1228 /* device goes rw: */
1229 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1230 {
1231         unsigned i;
1232
1233         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1234                 if (ca->mi.data_allowed & (1 << i))
1235                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1236 }
1237
1238 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1239 {
1240         if (ca->alloc_thread)
1241                 closure_wait_event(&c->freelist_wait,
1242                                    ca->allocator_state != ALLOCATOR_running);
1243 }
1244
1245 /* stop allocator thread: */
1246 void bch2_dev_allocator_stop(struct bch_dev *ca)
1247 {
1248         struct task_struct *p;
1249
1250         p = rcu_dereference_protected(ca->alloc_thread, 1);
1251         ca->alloc_thread = NULL;
1252
1253         /*
1254          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1255          * the thread shutting down to avoid bch2_wake_allocator() racing:
1256          *
1257          * XXX: it would be better to have the rcu barrier be asynchronous
1258          * instead of blocking us here
1259          */
1260         synchronize_rcu();
1261
1262         if (p) {
1263                 kthread_stop(p);
1264                 put_task_struct(p);
1265         }
1266 }
1267
1268 /* start allocator thread: */
1269 int bch2_dev_allocator_start(struct bch_dev *ca)
1270 {
1271         struct task_struct *p;
1272
1273         /*
1274          * allocator thread already started?
1275          */
1276         if (ca->alloc_thread)
1277                 return 0;
1278
1279         p = kthread_create(bch2_allocator_thread, ca,
1280                            "bch-alloc/%s", ca->name);
1281         if (IS_ERR(p)) {
1282                 bch_err(ca->fs, "error creating allocator thread: %li",
1283                         PTR_ERR(p));
1284                 return PTR_ERR(p);
1285         }
1286
1287         get_task_struct(p);
1288         rcu_assign_pointer(ca->alloc_thread, p);
1289         wake_up_process(p);
1290         return 0;
1291 }
1292
1293 void bch2_fs_allocator_background_init(struct bch_fs *c)
1294 {
1295         spin_lock_init(&c->freelist_lock);
1296 }
1297
1298 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1299 {
1300         struct open_bucket *ob;
1301
1302         for (ob = c->open_buckets;
1303              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1304              ob++) {
1305                 spin_lock(&ob->lock);
1306                 if (ob->valid && !ob->on_partial_list) {
1307                         pr_buf(out, "%zu ref %u type %s\n",
1308                                ob - c->open_buckets,
1309                                atomic_read(&ob->pin),
1310                                bch2_data_types[ob->type]);
1311                 }
1312                 spin_unlock(&ob->lock);
1313         }
1314
1315 }