]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
Update bcachefs sources to 0a9f0fc68a bcachefs: Don't unconditially version_upgrade...
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "clock.h"
13 #include "debug.h"
14 #include "ec.h"
15 #include "error.h"
16 #include "recovery.h"
17 #include "varint.h"
18
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/sched/task.h>
25 #include <linux/sort.h>
26 #include <trace/events/bcachefs.h>
27
28 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
29 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
30         BCH_ALLOC_FIELDS_V1()
31 #undef x
32 };
33
34 /* Ratelimiting/PD controllers */
35
36 static void pd_controllers_update(struct work_struct *work)
37 {
38         struct bch_fs *c = container_of(to_delayed_work(work),
39                                            struct bch_fs,
40                                            pd_controllers_update);
41         struct bch_dev *ca;
42         s64 free = 0, fragmented = 0;
43         unsigned i;
44
45         for_each_member_device(ca, c, i) {
46                 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
47
48                 free += bucket_to_sector(ca,
49                                 __dev_buckets_free(ca, stats)) << 9;
50                 /*
51                  * Bytes of internal fragmentation, which can be
52                  * reclaimed by copy GC
53                  */
54                 fragmented += max_t(s64, 0, (bucket_to_sector(ca,
55                                         stats.d[BCH_DATA_user].buckets +
56                                         stats.d[BCH_DATA_cached].buckets) -
57                                   (stats.d[BCH_DATA_user].sectors +
58                                    stats.d[BCH_DATA_cached].sectors)) << 9);
59         }
60
61         bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1);
62         schedule_delayed_work(&c->pd_controllers_update,
63                               c->pd_controllers_update_seconds * HZ);
64 }
65
66 /* Persistent alloc info: */
67
68 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
69                                      const void **p, unsigned field)
70 {
71         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
72         u64 v;
73
74         if (!(a->fields & (1 << field)))
75                 return 0;
76
77         switch (bytes) {
78         case 1:
79                 v = *((const u8 *) *p);
80                 break;
81         case 2:
82                 v = le16_to_cpup(*p);
83                 break;
84         case 4:
85                 v = le32_to_cpup(*p);
86                 break;
87         case 8:
88                 v = le64_to_cpup(*p);
89                 break;
90         default:
91                 BUG();
92         }
93
94         *p += bytes;
95         return v;
96 }
97
98 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
99                                       unsigned field, u64 v)
100 {
101         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
102
103         if (!v)
104                 return;
105
106         a->v.fields |= 1 << field;
107
108         switch (bytes) {
109         case 1:
110                 *((u8 *) *p) = v;
111                 break;
112         case 2:
113                 *((__le16 *) *p) = cpu_to_le16(v);
114                 break;
115         case 4:
116                 *((__le32 *) *p) = cpu_to_le32(v);
117                 break;
118         case 8:
119                 *((__le64 *) *p) = cpu_to_le64(v);
120                 break;
121         default:
122                 BUG();
123         }
124
125         *p += bytes;
126 }
127
128 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
129                                  struct bkey_s_c k)
130 {
131         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
132         const void *d = in->data;
133         unsigned idx = 0;
134
135         out->gen = in->gen;
136
137 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
138         BCH_ALLOC_FIELDS_V1()
139 #undef  x
140 }
141
142 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
143                                 struct bkey_s_c k)
144 {
145         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
146         const u8 *in = a.v->data;
147         const u8 *end = bkey_val_end(a);
148         unsigned fieldnr = 0;
149         int ret;
150         u64 v;
151
152         out->gen        = a.v->gen;
153         out->oldest_gen = a.v->oldest_gen;
154         out->data_type  = a.v->data_type;
155
156 #define x(_name, _bits)                                                 \
157         if (fieldnr < a.v->nr_fields) {                                 \
158                 ret = bch2_varint_decode(in, end, &v);                  \
159                 if (ret < 0)                                            \
160                         return ret;                                     \
161                 in += ret;                                              \
162         } else {                                                        \
163                 v = 0;                                                  \
164         }                                                               \
165         out->_name = v;                                                 \
166         if (v != out->_name)                                            \
167                 return -1;                                              \
168         fieldnr++;
169
170         BCH_ALLOC_FIELDS_V2()
171 #undef  x
172         return 0;
173 }
174
175 static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
176                                const struct bkey_alloc_unpacked src)
177 {
178         struct bkey_i_alloc_v2 *a = bkey_alloc_v2_init(&dst->k);
179         unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
180         u8 *out = a->v.data;
181         u8 *end = (void *) &dst[1];
182         u8 *last_nonzero_field = out;
183         unsigned bytes;
184
185         a->k.p          = POS(src.dev, src.bucket);
186         a->v.gen        = src.gen;
187         a->v.oldest_gen = src.oldest_gen;
188         a->v.data_type  = src.data_type;
189
190 #define x(_name, _bits)                                                 \
191         nr_fields++;                                                    \
192                                                                         \
193         if (src._name) {                                                \
194                 out += bch2_varint_encode(out, src._name);              \
195                                                                         \
196                 last_nonzero_field = out;                               \
197                 last_nonzero_fieldnr = nr_fields;                       \
198         } else {                                                        \
199                 *out++ = 0;                                             \
200         }
201
202         BCH_ALLOC_FIELDS_V2()
203 #undef  x
204         BUG_ON(out > end);
205
206         out = last_nonzero_field;
207         a->v.nr_fields = last_nonzero_fieldnr;
208
209         bytes = (u8 *) out - (u8 *) &a->v;
210         set_bkey_val_bytes(&a->k, bytes);
211         memset_u64s_tail(&a->v, 0, bytes);
212 }
213
214 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
215 {
216         struct bkey_alloc_unpacked ret = {
217                 .dev    = k.k->p.inode,
218                 .bucket = k.k->p.offset,
219                 .gen    = 0,
220         };
221
222         if (k.k->type == KEY_TYPE_alloc_v2)
223                 bch2_alloc_unpack_v2(&ret, k);
224         else if (k.k->type == KEY_TYPE_alloc)
225                 bch2_alloc_unpack_v1(&ret, k);
226
227         return ret;
228 }
229
230 void bch2_alloc_pack(struct bch_fs *c,
231                      struct bkey_alloc_buf *dst,
232                      const struct bkey_alloc_unpacked src)
233 {
234         bch2_alloc_pack_v2(dst, src);
235 }
236
237 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
238 {
239         unsigned i, bytes = offsetof(struct bch_alloc, data);
240
241         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
242                 if (a->fields & (1 << i))
243                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
244
245         return DIV_ROUND_UP(bytes, sizeof(u64));
246 }
247
248 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
249 {
250         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
251
252         if (k.k->p.inode >= c->sb.nr_devices ||
253             !c->devs[k.k->p.inode])
254                 return "invalid device";
255
256         /* allow for unknown fields */
257         if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
258                 return "incorrect value size";
259
260         return NULL;
261 }
262
263 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
264 {
265         struct bkey_alloc_unpacked u;
266
267         if (k.k->p.inode >= c->sb.nr_devices ||
268             !c->devs[k.k->p.inode])
269                 return "invalid device";
270
271         if (bch2_alloc_unpack_v2(&u, k))
272                 return "unpack error";
273
274         return NULL;
275 }
276
277 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
278                            struct bkey_s_c k)
279 {
280         struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
281
282         pr_buf(out, "gen %u oldest_gen %u data_type %u",
283                u.gen, u.oldest_gen, u.data_type);
284 #define x(_name, ...)   pr_buf(out, #_name " %llu ", (u64) u._name);
285         BCH_ALLOC_FIELDS_V2()
286 #undef  x
287 }
288
289 static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
290                               unsigned level, struct bkey_s_c k)
291 {
292         struct bch_dev *ca;
293         struct bucket *g;
294         struct bkey_alloc_unpacked u;
295
296         if (level ||
297             (k.k->type != KEY_TYPE_alloc &&
298              k.k->type != KEY_TYPE_alloc_v2))
299                 return 0;
300
301         ca = bch_dev_bkey_exists(c, k.k->p.inode);
302         g = bucket(ca, k.k->p.offset);
303         u = bch2_alloc_unpack(k);
304
305         g->_mark.gen            = u.gen;
306         g->_mark.data_type      = u.data_type;
307         g->_mark.dirty_sectors  = u.dirty_sectors;
308         g->_mark.cached_sectors = u.cached_sectors;
309         g->io_time[READ]        = u.read_time;
310         g->io_time[WRITE]       = u.write_time;
311         g->oldest_gen           = u.oldest_gen;
312         g->gen_valid            = 1;
313
314         return 0;
315 }
316
317 int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
318 {
319         int ret;
320
321         down_read(&c->gc_lock);
322         ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_alloc,
323                                           NULL, bch2_alloc_read_fn);
324         up_read(&c->gc_lock);
325
326         if (ret) {
327                 bch_err(c, "error reading alloc info: %i", ret);
328                 return ret;
329         }
330
331         return 0;
332 }
333
334 static int bch2_alloc_write_key(struct btree_trans *trans,
335                                 struct btree_iter *iter,
336                                 unsigned flags)
337 {
338         struct bch_fs *c = trans->c;
339         struct bkey_s_c k;
340         struct bch_dev *ca;
341         struct bucket *g;
342         struct bucket_mark m;
343         struct bkey_alloc_unpacked old_u, new_u;
344         struct bkey_alloc_buf a;
345         int ret;
346 retry:
347         bch2_trans_begin(trans);
348
349         ret = bch2_btree_key_cache_flush(trans,
350                         BTREE_ID_alloc, iter->pos);
351         if (ret)
352                 goto err;
353
354         k = bch2_btree_iter_peek_slot(iter);
355         ret = bkey_err(k);
356         if (ret)
357                 goto err;
358
359         old_u = bch2_alloc_unpack(k);
360
361         percpu_down_read(&c->mark_lock);
362         ca      = bch_dev_bkey_exists(c, iter->pos.inode);
363         g       = bucket(ca, iter->pos.offset);
364         m       = READ_ONCE(g->mark);
365         new_u   = alloc_mem_to_key(iter, g, m);
366         percpu_up_read(&c->mark_lock);
367
368         if (!bkey_alloc_unpacked_cmp(old_u, new_u))
369                 return 0;
370
371         bch2_alloc_pack(c, &a, new_u);
372         bch2_trans_update(trans, iter, &a.k,
373                           BTREE_TRIGGER_NORUN);
374         ret = bch2_trans_commit(trans, NULL, NULL,
375                                 BTREE_INSERT_NOFAIL|flags);
376 err:
377         if (ret == -EINTR)
378                 goto retry;
379         return ret;
380 }
381
382 int bch2_alloc_write(struct bch_fs *c, unsigned flags)
383 {
384         struct btree_trans trans;
385         struct btree_iter *iter;
386         struct bch_dev *ca;
387         unsigned i;
388         int ret = 0;
389
390         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
391         iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, POS_MIN,
392                                    BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
393
394         for_each_member_device(ca, c, i) {
395                 bch2_btree_iter_set_pos(iter,
396                         POS(ca->dev_idx, ca->mi.first_bucket));
397
398                 while (iter->pos.offset < ca->mi.nbuckets) {
399                         bch2_trans_cond_resched(&trans);
400
401                         ret = bch2_alloc_write_key(&trans, iter, flags);
402                         if (ret) {
403                                 percpu_ref_put(&ca->io_ref);
404                                 goto err;
405                         }
406                         bch2_btree_iter_next_slot(iter);
407                 }
408         }
409 err:
410         bch2_trans_iter_put(&trans, iter);
411         bch2_trans_exit(&trans);
412         return ret;
413 }
414
415 /* Bucket IO clocks: */
416
417 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
418                               size_t bucket_nr, int rw)
419 {
420         struct bch_fs *c = trans->c;
421         struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
422         struct btree_iter *iter;
423         struct bucket *g;
424         struct bkey_alloc_buf *a;
425         struct bkey_alloc_unpacked u;
426         u64 *time, now;
427         int ret = 0;
428
429         iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, POS(dev, bucket_nr),
430                                    BTREE_ITER_CACHED|
431                                    BTREE_ITER_CACHED_NOFILL|
432                                    BTREE_ITER_INTENT);
433         ret = bch2_btree_iter_traverse(iter);
434         if (ret)
435                 goto out;
436
437         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
438         ret = PTR_ERR_OR_ZERO(a);
439         if (ret)
440                 goto out;
441
442         percpu_down_read(&c->mark_lock);
443         g = bucket(ca, bucket_nr);
444         u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
445         percpu_up_read(&c->mark_lock);
446
447         time = rw == READ ? &u.read_time : &u.write_time;
448         now = atomic64_read(&c->io_clock[rw].now);
449         if (*time == now)
450                 goto out;
451
452         *time = now;
453
454         bch2_alloc_pack(c, a, u);
455         ret   = bch2_trans_update(trans, iter, &a->k, 0) ?:
456                 bch2_trans_commit(trans, NULL, NULL, 0);
457 out:
458         bch2_trans_iter_put(trans, iter);
459         return ret;
460 }
461
462 /* Background allocator thread: */
463
464 /*
465  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
466  * (marking them as invalidated on disk), then optionally issues discard
467  * commands to the newly free buckets, then puts them on the various freelists.
468  */
469
470 /**
471  * wait_buckets_available - wait on reclaimable buckets
472  *
473  * If there aren't enough available buckets to fill up free_inc, wait until
474  * there are.
475  */
476 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
477 {
478         unsigned long gc_count = c->gc_count;
479         s64 available;
480         unsigned i;
481         int ret = 0;
482
483         ca->allocator_state = ALLOCATOR_BLOCKED;
484         closure_wake_up(&c->freelist_wait);
485
486         while (1) {
487                 set_current_state(TASK_INTERRUPTIBLE);
488                 if (kthread_should_stop()) {
489                         ret = 1;
490                         break;
491                 }
492
493                 if (gc_count != c->gc_count)
494                         ca->inc_gen_really_needs_gc = 0;
495
496                 available  = dev_buckets_available(ca);
497                 available -= ca->inc_gen_really_needs_gc;
498
499                 spin_lock(&c->freelist_lock);
500                 for (i = 0; i < RESERVE_NR; i++)
501                         available -= fifo_used(&ca->free[i]);
502                 spin_unlock(&c->freelist_lock);
503
504                 available = max(available, 0LL);
505
506                 if (available > fifo_free(&ca->free_inc) ||
507                     (available &&
508                      !fifo_full(&ca->free[RESERVE_MOVINGGC])))
509                         break;
510
511                 up_read(&c->gc_lock);
512                 schedule();
513                 try_to_freeze();
514                 down_read(&c->gc_lock);
515         }
516
517         __set_current_state(TASK_RUNNING);
518         ca->allocator_state = ALLOCATOR_RUNNING;
519         closure_wake_up(&c->freelist_wait);
520
521         return ret;
522 }
523
524 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
525                                        struct bucket_mark m)
526 {
527         u8 gc_gen;
528
529         if (!is_available_bucket(m))
530                 return false;
531
532         if (m.owned_by_allocator)
533                 return false;
534
535         if (ca->buckets_nouse &&
536             test_bit(b, ca->buckets_nouse))
537                 return false;
538
539         gc_gen = bucket_gc_gen(bucket(ca, b));
540
541         if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
542                 ca->inc_gen_needs_gc++;
543
544         if (gc_gen >= BUCKET_GC_GEN_MAX)
545                 ca->inc_gen_really_needs_gc++;
546
547         return gc_gen < BUCKET_GC_GEN_MAX;
548 }
549
550 /*
551  * Determines what order we're going to reuse buckets, smallest bucket_key()
552  * first.
553  */
554
555 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
556                                 u64 now, u64 last_seq_ondisk)
557 {
558         unsigned used = bucket_sectors_used(m);
559
560         if (used) {
561                 /*
562                  * Prefer to keep buckets that have been read more recently, and
563                  * buckets that have more data in them:
564                  */
565                 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
566                 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
567
568                 return -last_read_scaled;
569         } else {
570                 /*
571                  * Prefer to use buckets with smaller gc_gen so that we don't
572                  * have to walk the btree and recalculate oldest_gen - but shift
573                  * off the low bits so that buckets will still have equal sort
574                  * keys when there's only a small difference, so that we can
575                  * keep sequential buckets together:
576                  */
577                 return  (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
578                         (bucket_gc_gen(g) >> 4);
579         }
580 }
581
582 static inline int bucket_alloc_cmp(alloc_heap *h,
583                                    struct alloc_heap_entry l,
584                                    struct alloc_heap_entry r)
585 {
586         return  cmp_int(l.key, r.key) ?:
587                 cmp_int(r.nr, l.nr) ?:
588                 cmp_int(l.bucket, r.bucket);
589 }
590
591 static inline int bucket_idx_cmp(const void *_l, const void *_r)
592 {
593         const struct alloc_heap_entry *l = _l, *r = _r;
594
595         return cmp_int(l->bucket, r->bucket);
596 }
597
598 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
599 {
600         struct bucket_array *buckets;
601         struct alloc_heap_entry e = { 0 };
602         u64 now, last_seq_ondisk;
603         size_t b, i, nr = 0;
604
605         down_read(&ca->bucket_lock);
606
607         buckets = bucket_array(ca);
608         ca->alloc_heap.used = 0;
609         now = atomic64_read(&c->io_clock[READ].now);
610         last_seq_ondisk = c->journal.last_seq_ondisk;
611
612         /*
613          * Find buckets with lowest read priority, by building a maxheap sorted
614          * by read priority and repeatedly replacing the maximum element until
615          * all buckets have been visited.
616          */
617         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
618                 struct bucket *g = &buckets->b[b];
619                 struct bucket_mark m = READ_ONCE(g->mark);
620                 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
621
622                 if (!bch2_can_invalidate_bucket(ca, b, m))
623                         continue;
624
625                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
626                         e.nr++;
627                 } else {
628                         if (e.nr)
629                                 heap_add_or_replace(&ca->alloc_heap, e,
630                                         -bucket_alloc_cmp, NULL);
631
632                         e = (struct alloc_heap_entry) {
633                                 .bucket = b,
634                                 .nr     = 1,
635                                 .key    = key,
636                         };
637                 }
638
639                 cond_resched();
640         }
641
642         if (e.nr)
643                 heap_add_or_replace(&ca->alloc_heap, e,
644                                 -bucket_alloc_cmp, NULL);
645
646         for (i = 0; i < ca->alloc_heap.used; i++)
647                 nr += ca->alloc_heap.data[i].nr;
648
649         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
650                 nr -= ca->alloc_heap.data[0].nr;
651                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
652         }
653
654         up_read(&ca->bucket_lock);
655 }
656
657 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
658 {
659         struct bucket_array *buckets = bucket_array(ca);
660         struct bucket_mark m;
661         size_t b, start;
662
663         if (ca->fifo_last_bucket <  ca->mi.first_bucket ||
664             ca->fifo_last_bucket >= ca->mi.nbuckets)
665                 ca->fifo_last_bucket = ca->mi.first_bucket;
666
667         start = ca->fifo_last_bucket;
668
669         do {
670                 ca->fifo_last_bucket++;
671                 if (ca->fifo_last_bucket == ca->mi.nbuckets)
672                         ca->fifo_last_bucket = ca->mi.first_bucket;
673
674                 b = ca->fifo_last_bucket;
675                 m = READ_ONCE(buckets->b[b].mark);
676
677                 if (bch2_can_invalidate_bucket(ca, b, m)) {
678                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
679
680                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
681                         if (heap_full(&ca->alloc_heap))
682                                 break;
683                 }
684
685                 cond_resched();
686         } while (ca->fifo_last_bucket != start);
687 }
688
689 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
690 {
691         struct bucket_array *buckets = bucket_array(ca);
692         struct bucket_mark m;
693         size_t checked, i;
694
695         for (checked = 0;
696              checked < ca->mi.nbuckets / 2;
697              checked++) {
698                 size_t b = bch2_rand_range(ca->mi.nbuckets -
699                                            ca->mi.first_bucket) +
700                         ca->mi.first_bucket;
701
702                 m = READ_ONCE(buckets->b[b].mark);
703
704                 if (bch2_can_invalidate_bucket(ca, b, m)) {
705                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
706
707                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
708                         if (heap_full(&ca->alloc_heap))
709                                 break;
710                 }
711
712                 cond_resched();
713         }
714
715         sort(ca->alloc_heap.data,
716              ca->alloc_heap.used,
717              sizeof(ca->alloc_heap.data[0]),
718              bucket_idx_cmp, NULL);
719
720         /* remove duplicates: */
721         for (i = 0; i + 1 < ca->alloc_heap.used; i++)
722                 if (ca->alloc_heap.data[i].bucket ==
723                     ca->alloc_heap.data[i + 1].bucket)
724                         ca->alloc_heap.data[i].nr = 0;
725 }
726
727 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
728 {
729         size_t i, nr = 0;
730
731         ca->inc_gen_needs_gc                    = 0;
732
733         switch (ca->mi.replacement) {
734         case BCH_CACHE_REPLACEMENT_lru:
735                 find_reclaimable_buckets_lru(c, ca);
736                 break;
737         case BCH_CACHE_REPLACEMENT_fifo:
738                 find_reclaimable_buckets_fifo(c, ca);
739                 break;
740         case BCH_CACHE_REPLACEMENT_random:
741                 find_reclaimable_buckets_random(c, ca);
742                 break;
743         }
744
745         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
746
747         for (i = 0; i < ca->alloc_heap.used; i++)
748                 nr += ca->alloc_heap.data[i].nr;
749
750         return nr;
751 }
752
753 static inline long next_alloc_bucket(struct bch_dev *ca)
754 {
755         struct alloc_heap_entry e, *top = ca->alloc_heap.data;
756
757         while (ca->alloc_heap.used) {
758                 if (top->nr) {
759                         size_t b = top->bucket;
760
761                         top->bucket++;
762                         top->nr--;
763                         return b;
764                 }
765
766                 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
767         }
768
769         return -1;
770 }
771
772 /*
773  * returns sequence number of most recent journal entry that updated this
774  * bucket:
775  */
776 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
777 {
778         if (m.journal_seq_valid) {
779                 u64 journal_seq = atomic64_read(&c->journal.seq);
780                 u64 bucket_seq  = journal_seq;
781
782                 bucket_seq &= ~((u64) U16_MAX);
783                 bucket_seq |= m.journal_seq;
784
785                 if (bucket_seq > journal_seq)
786                         bucket_seq -= 1 << 16;
787
788                 return bucket_seq;
789         } else {
790                 return 0;
791         }
792 }
793
794 static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
795                                        struct bch_dev *ca,
796                                        struct btree_iter *iter,
797                                        u64 *journal_seq, unsigned flags)
798 {
799         struct bch_fs *c = trans->c;
800         struct bkey_alloc_buf a;
801         struct bkey_alloc_unpacked u;
802         struct bucket *g;
803         struct bucket_mark m;
804         bool invalidating_cached_data;
805         size_t b;
806         int ret = 0;
807
808         BUG_ON(!ca->alloc_heap.used ||
809                !ca->alloc_heap.data[0].nr);
810         b = ca->alloc_heap.data[0].bucket;
811
812         /* first, put on free_inc and mark as owned by allocator: */
813         percpu_down_read(&c->mark_lock);
814         g = bucket(ca, b);
815         m = READ_ONCE(g->mark);
816
817         BUG_ON(m.dirty_sectors);
818
819         bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
820
821         spin_lock(&c->freelist_lock);
822         verify_not_on_freelist(c, ca, b);
823         BUG_ON(!fifo_push(&ca->free_inc, b));
824         spin_unlock(&c->freelist_lock);
825
826         /*
827          * If we're not invalidating cached data, we only increment the bucket
828          * gen in memory here, the incremented gen will be updated in the btree
829          * by bch2_trans_mark_pointer():
830          */
831         if (!m.cached_sectors &&
832             !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
833                 BUG_ON(m.data_type);
834                 bucket_cmpxchg(g, m, m.gen++);
835                 percpu_up_read(&c->mark_lock);
836                 goto out;
837         }
838
839         percpu_up_read(&c->mark_lock);
840
841         /*
842          * If the read-only path is trying to shut down, we can't be generating
843          * new btree updates:
844          */
845         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
846                 ret = 1;
847                 goto out;
848         }
849
850         bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
851 retry:
852         ret = bch2_btree_iter_traverse(iter);
853         if (ret)
854                 return ret;
855
856         percpu_down_read(&c->mark_lock);
857         g = bucket(ca, iter->pos.offset);
858         m = READ_ONCE(g->mark);
859         u = alloc_mem_to_key(iter, g, m);
860
861         percpu_up_read(&c->mark_lock);
862
863         invalidating_cached_data = u.cached_sectors != 0;
864
865         u.gen++;
866         u.data_type     = 0;
867         u.dirty_sectors = 0;
868         u.cached_sectors = 0;
869         u.read_time     = atomic64_read(&c->io_clock[READ].now);
870         u.write_time    = atomic64_read(&c->io_clock[WRITE].now);
871
872         bch2_alloc_pack(c, &a, u);
873         bch2_trans_update(trans, iter, &a.k,
874                           BTREE_TRIGGER_BUCKET_INVALIDATE);
875
876         /*
877          * XXX:
878          * when using deferred btree updates, we have journal reclaim doing
879          * btree updates and thus requiring the allocator to make forward
880          * progress, and here the allocator is requiring space in the journal -
881          * so we need a journal pre-reservation:
882          */
883         ret = bch2_trans_commit(trans, NULL,
884                                 invalidating_cached_data ? journal_seq : NULL,
885                                 BTREE_INSERT_NOUNLOCK|
886                                 BTREE_INSERT_NOCHECK_RW|
887                                 BTREE_INSERT_NOFAIL|
888                                 BTREE_INSERT_JOURNAL_RESERVED|
889                                 flags);
890         if (ret == -EINTR)
891                 goto retry;
892 out:
893         if (!ret) {
894                 /* remove from alloc_heap: */
895                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
896
897                 top->bucket++;
898                 top->nr--;
899
900                 if (!top->nr)
901                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
902
903                 /*
904                  * Make sure we flush the last journal entry that updated this
905                  * bucket (i.e. deleting the last reference) before writing to
906                  * this bucket again:
907                  */
908                 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
909         } else {
910                 size_t b2;
911
912                 /* remove from free_inc: */
913                 percpu_down_read(&c->mark_lock);
914                 spin_lock(&c->freelist_lock);
915
916                 bch2_mark_alloc_bucket(c, ca, b, false,
917                                        gc_pos_alloc(c, NULL), 0);
918
919                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
920                 BUG_ON(b != b2);
921
922                 spin_unlock(&c->freelist_lock);
923                 percpu_up_read(&c->mark_lock);
924         }
925
926         return ret < 0 ? ret : 0;
927 }
928
929 /*
930  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
931  */
932 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
933 {
934         struct btree_trans trans;
935         struct btree_iter *iter;
936         u64 journal_seq = 0;
937         int ret = 0;
938
939         bch2_trans_init(&trans, c, 0, 0);
940         iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc,
941                                    POS(ca->dev_idx, 0),
942                                    BTREE_ITER_CACHED|
943                                    BTREE_ITER_CACHED_NOFILL|
944                                    BTREE_ITER_INTENT);
945
946         /* Only use nowait if we've already invalidated at least one bucket: */
947         while (!ret &&
948                !fifo_full(&ca->free_inc) &&
949                ca->alloc_heap.used)
950                 ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
951                                 BTREE_INSERT_GC_LOCK_HELD|
952                                 (!fifo_empty(&ca->free_inc)
953                                  ? BTREE_INSERT_NOWAIT : 0));
954
955         bch2_trans_iter_put(&trans, iter);
956         bch2_trans_exit(&trans);
957
958         /* If we used NOWAIT, don't return the error: */
959         if (!fifo_empty(&ca->free_inc))
960                 ret = 0;
961         if (ret) {
962                 bch_err(ca, "error invalidating buckets: %i", ret);
963                 return ret;
964         }
965
966         if (journal_seq)
967                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
968         if (ret) {
969                 bch_err(ca, "journal error: %i", ret);
970                 return ret;
971         }
972
973         return 0;
974 }
975
976 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
977 {
978         unsigned i;
979         int ret = 0;
980
981         while (1) {
982                 set_current_state(TASK_INTERRUPTIBLE);
983
984                 spin_lock(&c->freelist_lock);
985                 for (i = 0; i < RESERVE_NR; i++) {
986
987                         /*
988                          * Don't strand buckets on the copygc freelist until
989                          * after recovery is finished:
990                          */
991                         if (!test_bit(BCH_FS_STARTED, &c->flags) &&
992                             i == RESERVE_MOVINGGC)
993                                 continue;
994
995                         if (fifo_push(&ca->free[i], bucket)) {
996                                 fifo_pop(&ca->free_inc, bucket);
997
998                                 closure_wake_up(&c->freelist_wait);
999                                 ca->allocator_state = ALLOCATOR_RUNNING;
1000
1001                                 spin_unlock(&c->freelist_lock);
1002                                 goto out;
1003                         }
1004                 }
1005
1006                 if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
1007                         ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
1008                         closure_wake_up(&c->freelist_wait);
1009                 }
1010
1011                 spin_unlock(&c->freelist_lock);
1012
1013                 if ((current->flags & PF_KTHREAD) &&
1014                     kthread_should_stop()) {
1015                         ret = 1;
1016                         break;
1017                 }
1018
1019                 schedule();
1020                 try_to_freeze();
1021         }
1022 out:
1023         __set_current_state(TASK_RUNNING);
1024         return ret;
1025 }
1026
1027 /*
1028  * Pulls buckets off free_inc, discards them (if enabled), then adds them to
1029  * freelists, waiting until there's room if necessary:
1030  */
1031 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
1032 {
1033         while (!fifo_empty(&ca->free_inc)) {
1034                 size_t bucket = fifo_peek(&ca->free_inc);
1035
1036                 if (ca->mi.discard &&
1037                     blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1038                         blkdev_issue_discard(ca->disk_sb.bdev,
1039                                              bucket_to_sector(ca, bucket),
1040                                              ca->mi.bucket_size, GFP_NOIO, 0);
1041
1042                 if (push_invalidated_bucket(c, ca, bucket))
1043                         return 1;
1044         }
1045
1046         return 0;
1047 }
1048
1049 static inline bool allocator_thread_running(struct bch_dev *ca)
1050 {
1051         return ca->mi.state == BCH_MEMBER_STATE_rw &&
1052                 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags);
1053 }
1054
1055 /**
1056  * bch_allocator_thread - move buckets from free_inc to reserves
1057  *
1058  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1059  * the reserves are depleted by bucket allocation. When we run out
1060  * of free_inc, try to invalidate some buckets and write out
1061  * prios and gens.
1062  */
1063 static int bch2_allocator_thread(void *arg)
1064 {
1065         struct bch_dev *ca = arg;
1066         struct bch_fs *c = ca->fs;
1067         size_t nr;
1068         int ret;
1069
1070         set_freezable();
1071
1072         while (1) {
1073                 if (!allocator_thread_running(ca)) {
1074                         ca->allocator_state = ALLOCATOR_STOPPED;
1075                         if (kthread_wait_freezable(allocator_thread_running(ca)))
1076                                 break;
1077                 }
1078
1079                 ca->allocator_state = ALLOCATOR_RUNNING;
1080
1081                 cond_resched();
1082                 if (kthread_should_stop())
1083                         break;
1084
1085                 pr_debug("discarding %zu invalidated buckets",
1086                          fifo_used(&ca->free_inc));
1087
1088                 ret = discard_invalidated_buckets(c, ca);
1089                 if (ret)
1090                         goto stop;
1091
1092                 down_read(&c->gc_lock);
1093
1094                 ret = bch2_invalidate_buckets(c, ca);
1095                 if (ret) {
1096                         up_read(&c->gc_lock);
1097                         goto stop;
1098                 }
1099
1100                 if (!fifo_empty(&ca->free_inc)) {
1101                         up_read(&c->gc_lock);
1102                         continue;
1103                 }
1104
1105                 pr_debug("free_inc now empty");
1106
1107                 do {
1108                         /*
1109                          * Find some buckets that we can invalidate, either
1110                          * they're completely unused, or only contain clean data
1111                          * that's been written back to the backing device or
1112                          * another cache tier
1113                          */
1114
1115                         pr_debug("scanning for reclaimable buckets");
1116
1117                         nr = find_reclaimable_buckets(c, ca);
1118
1119                         pr_debug("found %zu buckets", nr);
1120
1121                         trace_alloc_batch(ca, nr, ca->alloc_heap.size);
1122
1123                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1124                              ca->inc_gen_really_needs_gc) &&
1125                             c->gc_thread) {
1126                                 atomic_inc(&c->kick_gc);
1127                                 wake_up_process(c->gc_thread);
1128                         }
1129
1130                         /*
1131                          * If we found any buckets, we have to invalidate them
1132                          * before we scan for more - but if we didn't find very
1133                          * many we may want to wait on more buckets being
1134                          * available so we don't spin:
1135                          */
1136                         if (!nr ||
1137                             (nr < ALLOC_SCAN_BATCH(ca) &&
1138                              !fifo_empty(&ca->free[RESERVE_NONE]))) {
1139                                 ret = wait_buckets_available(c, ca);
1140                                 if (ret) {
1141                                         up_read(&c->gc_lock);
1142                                         goto stop;
1143                                 }
1144                         }
1145                 } while (!nr);
1146
1147                 up_read(&c->gc_lock);
1148
1149                 pr_debug("%zu buckets to invalidate", nr);
1150
1151                 /*
1152                  * alloc_heap is now full of newly-invalidated buckets: next,
1153                  * write out the new bucket gens:
1154                  */
1155         }
1156
1157 stop:
1158         pr_debug("alloc thread stopping (ret %i)", ret);
1159         ca->allocator_state = ALLOCATOR_STOPPED;
1160         closure_wake_up(&c->freelist_wait);
1161         return 0;
1162 }
1163
1164 /* Startup/shutdown (ro/rw): */
1165
1166 void bch2_recalc_capacity(struct bch_fs *c)
1167 {
1168         struct bch_dev *ca;
1169         u64 capacity = 0, reserved_sectors = 0, gc_reserve, copygc_threshold = 0;
1170         unsigned bucket_size_max = 0;
1171         unsigned long ra_pages = 0;
1172         unsigned i, j;
1173
1174         lockdep_assert_held(&c->state_lock);
1175
1176         for_each_online_member(ca, c, i) {
1177                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1178
1179                 ra_pages += bdi->ra_pages;
1180         }
1181
1182         bch2_set_ra_pages(c, ra_pages);
1183
1184         for_each_rw_member(ca, c, i) {
1185                 u64 dev_reserve = 0;
1186
1187                 /*
1188                  * We need to reserve buckets (from the number
1189                  * of currently available buckets) against
1190                  * foreground writes so that mainly copygc can
1191                  * make forward progress.
1192                  *
1193                  * We need enough to refill the various reserves
1194                  * from scratch - copygc will use its entire
1195                  * reserve all at once, then run against when
1196                  * its reserve is refilled (from the formerly
1197                  * available buckets).
1198                  *
1199                  * This reserve is just used when considering if
1200                  * allocations for foreground writes must wait -
1201                  * not -ENOSPC calculations.
1202                  */
1203                 for (j = 0; j < RESERVE_NONE; j++)
1204                         dev_reserve += ca->free[j].size;
1205
1206                 dev_reserve += 1;       /* btree write point */
1207                 dev_reserve += 1;       /* copygc write point */
1208                 dev_reserve += 1;       /* rebalance write point */
1209
1210                 dev_reserve *= ca->mi.bucket_size;
1211
1212                 copygc_threshold += dev_reserve;
1213
1214                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1215                                              ca->mi.first_bucket);
1216
1217                 reserved_sectors += dev_reserve * 2;
1218
1219                 bucket_size_max = max_t(unsigned, bucket_size_max,
1220                                         ca->mi.bucket_size);
1221         }
1222
1223         gc_reserve = c->opts.gc_reserve_bytes
1224                 ? c->opts.gc_reserve_bytes >> 9
1225                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1226
1227         reserved_sectors = max(gc_reserve, reserved_sectors);
1228
1229         reserved_sectors = min(reserved_sectors, capacity);
1230
1231         c->copygc_threshold = copygc_threshold;
1232         c->capacity = capacity - reserved_sectors;
1233
1234         c->bucket_size_max = bucket_size_max;
1235
1236         /* Wake up case someone was waiting for buckets */
1237         closure_wake_up(&c->freelist_wait);
1238 }
1239
1240 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1241 {
1242         struct open_bucket *ob;
1243         bool ret = false;
1244
1245         for (ob = c->open_buckets;
1246              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1247              ob++) {
1248                 spin_lock(&ob->lock);
1249                 if (ob->valid && !ob->on_partial_list &&
1250                     ob->ptr.dev == ca->dev_idx)
1251                         ret = true;
1252                 spin_unlock(&ob->lock);
1253         }
1254
1255         return ret;
1256 }
1257
1258 /* device goes ro: */
1259 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1260 {
1261         unsigned i;
1262
1263         BUG_ON(ca->alloc_thread);
1264
1265         /* First, remove device from allocation groups: */
1266
1267         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1268                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1269
1270         /*
1271          * Capacity is calculated based off of devices in allocation groups:
1272          */
1273         bch2_recalc_capacity(c);
1274
1275         /* Next, close write points that point to this device... */
1276         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1277                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1278
1279         bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1280         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1281         bch2_writepoint_stop(c, ca, &c->btree_write_point);
1282
1283         mutex_lock(&c->btree_reserve_cache_lock);
1284         while (c->btree_reserve_cache_nr) {
1285                 struct btree_alloc *a =
1286                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1287
1288                 bch2_open_buckets_put(c, &a->ob);
1289         }
1290         mutex_unlock(&c->btree_reserve_cache_lock);
1291
1292         while (1) {
1293                 struct open_bucket *ob;
1294
1295                 spin_lock(&c->freelist_lock);
1296                 if (!ca->open_buckets_partial_nr) {
1297                         spin_unlock(&c->freelist_lock);
1298                         break;
1299                 }
1300                 ob = c->open_buckets +
1301                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1302                 ob->on_partial_list = false;
1303                 spin_unlock(&c->freelist_lock);
1304
1305                 bch2_open_bucket_put(c, ob);
1306         }
1307
1308         bch2_ec_stop_dev(c, ca);
1309
1310         /*
1311          * Wake up threads that were blocked on allocation, so they can notice
1312          * the device can no longer be removed and the capacity has changed:
1313          */
1314         closure_wake_up(&c->freelist_wait);
1315
1316         /*
1317          * journal_res_get() can block waiting for free space in the journal -
1318          * it needs to notice there may not be devices to allocate from anymore:
1319          */
1320         wake_up(&c->journal.wait);
1321
1322         /* Now wait for any in flight writes: */
1323
1324         closure_wait_event(&c->open_buckets_wait,
1325                            !bch2_dev_has_open_write_point(c, ca));
1326 }
1327
1328 /* device goes rw: */
1329 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1330 {
1331         unsigned i;
1332
1333         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1334                 if (ca->mi.data_allowed & (1 << i))
1335                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1336 }
1337
1338 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1339 {
1340         if (ca->alloc_thread)
1341                 closure_wait_event(&c->freelist_wait,
1342                                    ca->allocator_state != ALLOCATOR_RUNNING);
1343 }
1344
1345 /* stop allocator thread: */
1346 void bch2_dev_allocator_stop(struct bch_dev *ca)
1347 {
1348         struct task_struct *p;
1349
1350         p = rcu_dereference_protected(ca->alloc_thread, 1);
1351         ca->alloc_thread = NULL;
1352
1353         /*
1354          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1355          * the thread shutting down to avoid bch2_wake_allocator() racing:
1356          *
1357          * XXX: it would be better to have the rcu barrier be asynchronous
1358          * instead of blocking us here
1359          */
1360         synchronize_rcu();
1361
1362         if (p) {
1363                 kthread_stop(p);
1364                 put_task_struct(p);
1365         }
1366 }
1367
1368 /* start allocator thread: */
1369 int bch2_dev_allocator_start(struct bch_dev *ca)
1370 {
1371         struct task_struct *p;
1372
1373         /*
1374          * allocator thread already started?
1375          */
1376         if (ca->alloc_thread)
1377                 return 0;
1378
1379         p = kthread_create(bch2_allocator_thread, ca,
1380                            "bch-alloc/%s", ca->name);
1381         if (IS_ERR(p)) {
1382                 bch_err(ca->fs, "error creating allocator thread: %li",
1383                         PTR_ERR(p));
1384                 return PTR_ERR(p);
1385         }
1386
1387         get_task_struct(p);
1388         rcu_assign_pointer(ca->alloc_thread, p);
1389         wake_up_process(p);
1390         return 0;
1391 }
1392
1393 void bch2_fs_allocator_background_init(struct bch_fs *c)
1394 {
1395         spin_lock_init(&c->freelist_lock);
1396
1397         c->pd_controllers_update_seconds = 5;
1398         INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);
1399 }