]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_background.c
Update bcachefs sources to 8eca47e4d5 bcachefs: Improved check_directory_structure()
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
6 #include "btree_io.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "clock.h"
13 #include "debug.h"
14 #include "ec.h"
15 #include "error.h"
16 #include "recovery.h"
17 #include "varint.h"
18
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/sched/task.h>
25 #include <linux/sort.h>
26 #include <trace/events/bcachefs.h>
27
28 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
29 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
30         BCH_ALLOC_FIELDS_V1()
31 #undef x
32 };
33
34 /* Ratelimiting/PD controllers */
35
36 static void pd_controllers_update(struct work_struct *work)
37 {
38         struct bch_fs *c = container_of(to_delayed_work(work),
39                                            struct bch_fs,
40                                            pd_controllers_update);
41         struct bch_dev *ca;
42         s64 free = 0, fragmented = 0;
43         unsigned i;
44
45         for_each_member_device(ca, c, i) {
46                 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
47
48                 free += bucket_to_sector(ca,
49                                 __dev_buckets_available(ca, stats)) << 9;
50                 /*
51                  * Bytes of internal fragmentation, which can be
52                  * reclaimed by copy GC
53                  */
54                 fragmented += max_t(s64, 0, (bucket_to_sector(ca,
55                                         stats.d[BCH_DATA_user].buckets +
56                                         stats.d[BCH_DATA_cached].buckets) -
57                                   (stats.d[BCH_DATA_user].sectors +
58                                    stats.d[BCH_DATA_cached].sectors)) << 9);
59         }
60
61         bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1);
62         schedule_delayed_work(&c->pd_controllers_update,
63                               c->pd_controllers_update_seconds * HZ);
64 }
65
66 /* Persistent alloc info: */
67
68 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
69                                      const void **p, unsigned field)
70 {
71         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
72         u64 v;
73
74         if (!(a->fields & (1 << field)))
75                 return 0;
76
77         switch (bytes) {
78         case 1:
79                 v = *((const u8 *) *p);
80                 break;
81         case 2:
82                 v = le16_to_cpup(*p);
83                 break;
84         case 4:
85                 v = le32_to_cpup(*p);
86                 break;
87         case 8:
88                 v = le64_to_cpup(*p);
89                 break;
90         default:
91                 BUG();
92         }
93
94         *p += bytes;
95         return v;
96 }
97
98 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
99                                       unsigned field, u64 v)
100 {
101         unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
102
103         if (!v)
104                 return;
105
106         a->v.fields |= 1 << field;
107
108         switch (bytes) {
109         case 1:
110                 *((u8 *) *p) = v;
111                 break;
112         case 2:
113                 *((__le16 *) *p) = cpu_to_le16(v);
114                 break;
115         case 4:
116                 *((__le32 *) *p) = cpu_to_le32(v);
117                 break;
118         case 8:
119                 *((__le64 *) *p) = cpu_to_le64(v);
120                 break;
121         default:
122                 BUG();
123         }
124
125         *p += bytes;
126 }
127
128 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
129                                  struct bkey_s_c k)
130 {
131         const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
132         const void *d = in->data;
133         unsigned idx = 0;
134
135         out->gen = in->gen;
136
137 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
138         BCH_ALLOC_FIELDS_V1()
139 #undef  x
140 }
141
142 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
143                                 struct bkey_s_c k)
144 {
145         struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
146         const u8 *in = a.v->data;
147         const u8 *end = bkey_val_end(a);
148         unsigned fieldnr = 0;
149         int ret;
150         u64 v;
151
152         out->gen        = a.v->gen;
153         out->oldest_gen = a.v->oldest_gen;
154         out->data_type  = a.v->data_type;
155
156 #define x(_name, _bits)                                                 \
157         if (fieldnr < a.v->nr_fields) {                                 \
158                 ret = bch2_varint_decode(in, end, &v);                  \
159                 if (ret < 0)                                            \
160                         return ret;                                     \
161                 in += ret;                                              \
162         } else {                                                        \
163                 v = 0;                                                  \
164         }                                                               \
165         out->_name = v;                                                 \
166         if (v != out->_name)                                            \
167                 return -1;                                              \
168         fieldnr++;
169
170         BCH_ALLOC_FIELDS_V2()
171 #undef  x
172         return 0;
173 }
174
175 static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
176                                const struct bkey_alloc_unpacked src)
177 {
178         struct bkey_i_alloc_v2 *a = bkey_alloc_v2_init(&dst->k);
179         unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
180         u8 *out = a->v.data;
181         u8 *end = (void *) &dst[1];
182         u8 *last_nonzero_field = out;
183         unsigned bytes;
184
185         a->k.p          = POS(src.dev, src.bucket);
186         a->v.gen        = src.gen;
187         a->v.oldest_gen = src.oldest_gen;
188         a->v.data_type  = src.data_type;
189
190 #define x(_name, _bits)                                                 \
191         nr_fields++;                                                    \
192                                                                         \
193         if (src._name) {                                                \
194                 out += bch2_varint_encode(out, src._name);              \
195                                                                         \
196                 last_nonzero_field = out;                               \
197                 last_nonzero_fieldnr = nr_fields;                       \
198         } else {                                                        \
199                 *out++ = 0;                                             \
200         }
201
202         BCH_ALLOC_FIELDS_V2()
203 #undef  x
204         BUG_ON(out > end);
205
206         out = last_nonzero_field;
207         a->v.nr_fields = last_nonzero_fieldnr;
208
209         bytes = (u8 *) out - (u8 *) &a->v;
210         set_bkey_val_bytes(&a->k, bytes);
211         memset_u64s_tail(&a->v, 0, bytes);
212 }
213
214 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
215 {
216         struct bkey_alloc_unpacked ret = {
217                 .dev    = k.k->p.inode,
218                 .bucket = k.k->p.offset,
219                 .gen    = 0,
220         };
221
222         if (k.k->type == KEY_TYPE_alloc_v2)
223                 bch2_alloc_unpack_v2(&ret, k);
224         else if (k.k->type == KEY_TYPE_alloc)
225                 bch2_alloc_unpack_v1(&ret, k);
226
227         return ret;
228 }
229
230 void bch2_alloc_pack(struct bch_fs *c,
231                      struct bkey_alloc_buf *dst,
232                      const struct bkey_alloc_unpacked src)
233 {
234         bch2_alloc_pack_v2(dst, src);
235 }
236
237 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
238 {
239         unsigned i, bytes = offsetof(struct bch_alloc, data);
240
241         for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
242                 if (a->fields & (1 << i))
243                         bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
244
245         return DIV_ROUND_UP(bytes, sizeof(u64));
246 }
247
248 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
249 {
250         struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
251
252         if (k.k->p.inode >= c->sb.nr_devices ||
253             !c->devs[k.k->p.inode])
254                 return "invalid device";
255
256         /* allow for unknown fields */
257         if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
258                 return "incorrect value size";
259
260         return NULL;
261 }
262
263 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
264 {
265         struct bkey_alloc_unpacked u;
266
267         if (k.k->p.inode >= c->sb.nr_devices ||
268             !c->devs[k.k->p.inode])
269                 return "invalid device";
270
271         if (bch2_alloc_unpack_v2(&u, k))
272                 return "unpack error";
273
274         return NULL;
275 }
276
277 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
278                            struct bkey_s_c k)
279 {
280         struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
281
282         pr_buf(out, "gen %u oldest_gen %u data_type %u",
283                u.gen, u.oldest_gen, u.data_type);
284 #define x(_name, ...)   pr_buf(out, #_name " %llu ", (u64) u._name);
285         BCH_ALLOC_FIELDS_V2()
286 #undef  x
287 }
288
289 static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
290                               unsigned level, struct bkey_s_c k)
291 {
292         struct bch_dev *ca;
293         struct bucket *g;
294         struct bkey_alloc_unpacked u;
295
296         if (level ||
297             (k.k->type != KEY_TYPE_alloc &&
298              k.k->type != KEY_TYPE_alloc_v2))
299                 return 0;
300
301         ca = bch_dev_bkey_exists(c, k.k->p.inode);
302         g = bucket(ca, k.k->p.offset);
303         u = bch2_alloc_unpack(k);
304
305         g->_mark.gen            = u.gen;
306         g->_mark.data_type      = u.data_type;
307         g->_mark.dirty_sectors  = u.dirty_sectors;
308         g->_mark.cached_sectors = u.cached_sectors;
309         g->io_time[READ]        = u.read_time;
310         g->io_time[WRITE]       = u.write_time;
311         g->oldest_gen           = u.oldest_gen;
312         g->gen_valid            = 1;
313
314         return 0;
315 }
316
317 int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
318 {
319         int ret;
320
321         down_read(&c->gc_lock);
322         ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_alloc,
323                                           NULL, bch2_alloc_read_fn);
324         up_read(&c->gc_lock);
325
326         if (ret) {
327                 bch_err(c, "error reading alloc info: %i", ret);
328                 return ret;
329         }
330
331         return 0;
332 }
333
334 static int bch2_alloc_write_key(struct btree_trans *trans,
335                                 struct btree_iter *iter,
336                                 unsigned flags)
337 {
338         struct bch_fs *c = trans->c;
339         struct bkey_s_c k;
340         struct bch_dev *ca;
341         struct bucket *g;
342         struct bucket_mark m;
343         struct bkey_alloc_unpacked old_u, new_u;
344         struct bkey_alloc_buf a;
345         int ret;
346 retry:
347         bch2_trans_begin(trans);
348
349         ret = bch2_btree_key_cache_flush(trans,
350                         BTREE_ID_alloc, iter->pos);
351         if (ret)
352                 goto err;
353
354         k = bch2_btree_iter_peek_slot(iter);
355         ret = bkey_err(k);
356         if (ret)
357                 goto err;
358
359         old_u = bch2_alloc_unpack(k);
360
361         percpu_down_read(&c->mark_lock);
362         ca      = bch_dev_bkey_exists(c, iter->pos.inode);
363         g       = bucket(ca, iter->pos.offset);
364         m       = READ_ONCE(g->mark);
365         new_u   = alloc_mem_to_key(iter, g, m);
366         percpu_up_read(&c->mark_lock);
367
368         if (!bkey_alloc_unpacked_cmp(old_u, new_u))
369                 return 0;
370
371         bch2_alloc_pack(c, &a, new_u);
372         bch2_trans_update(trans, iter, &a.k,
373                           BTREE_TRIGGER_NORUN);
374         ret = bch2_trans_commit(trans, NULL, NULL,
375                                 BTREE_INSERT_NOFAIL|flags);
376 err:
377         if (ret == -EINTR)
378                 goto retry;
379         return ret;
380 }
381
382 int bch2_alloc_write(struct bch_fs *c, unsigned flags)
383 {
384         struct btree_trans trans;
385         struct btree_iter *iter;
386         struct bch_dev *ca;
387         unsigned i;
388         int ret = 0;
389
390         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
391         iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, POS_MIN,
392                                    BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
393
394         for_each_member_device(ca, c, i) {
395                 bch2_btree_iter_set_pos(iter,
396                         POS(ca->dev_idx, ca->mi.first_bucket));
397
398                 while (iter->pos.offset < ca->mi.nbuckets) {
399                         bch2_trans_cond_resched(&trans);
400
401                         ret = bch2_alloc_write_key(&trans, iter, flags);
402                         if (ret) {
403                                 percpu_ref_put(&ca->io_ref);
404                                 goto err;
405                         }
406                         bch2_btree_iter_next_slot(iter);
407                 }
408         }
409 err:
410         bch2_trans_iter_put(&trans, iter);
411         bch2_trans_exit(&trans);
412         return ret;
413 }
414
415 /* Bucket IO clocks: */
416
417 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
418                               size_t bucket_nr, int rw)
419 {
420         struct bch_fs *c = trans->c;
421         struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
422         struct btree_iter *iter;
423         struct bucket *g;
424         struct bkey_alloc_buf *a;
425         struct bkey_alloc_unpacked u;
426         u64 *time, now;
427         int ret = 0;
428
429         iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, POS(dev, bucket_nr),
430                                    BTREE_ITER_CACHED|
431                                    BTREE_ITER_CACHED_NOFILL|
432                                    BTREE_ITER_INTENT);
433         ret = bch2_btree_iter_traverse(iter);
434         if (ret)
435                 goto out;
436
437         a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
438         ret = PTR_ERR_OR_ZERO(a);
439         if (ret)
440                 goto out;
441
442         percpu_down_read(&c->mark_lock);
443         g = bucket(ca, bucket_nr);
444         u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
445         percpu_up_read(&c->mark_lock);
446
447         time = rw == READ ? &u.read_time : &u.write_time;
448         now = atomic64_read(&c->io_clock[rw].now);
449         if (*time == now)
450                 goto out;
451
452         *time = now;
453
454         bch2_alloc_pack(c, a, u);
455         ret   = bch2_trans_update(trans, iter, &a->k, 0) ?:
456                 bch2_trans_commit(trans, NULL, NULL, 0);
457 out:
458         bch2_trans_iter_put(trans, iter);
459         return ret;
460 }
461
462 /* Background allocator thread: */
463
464 /*
465  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
466  * (marking them as invalidated on disk), then optionally issues discard
467  * commands to the newly free buckets, then puts them on the various freelists.
468  */
469
470 /**
471  * wait_buckets_available - wait on reclaimable buckets
472  *
473  * If there aren't enough available buckets to fill up free_inc, wait until
474  * there are.
475  */
476 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
477 {
478         unsigned long gc_count = c->gc_count;
479         s64 available;
480         int ret = 0;
481
482         ca->allocator_state = ALLOCATOR_BLOCKED;
483         closure_wake_up(&c->freelist_wait);
484
485         while (1) {
486                 set_current_state(TASK_INTERRUPTIBLE);
487                 if (kthread_should_stop()) {
488                         ret = 1;
489                         break;
490                 }
491
492                 if (gc_count != c->gc_count)
493                         ca->inc_gen_really_needs_gc = 0;
494
495                 available  = dev_buckets_reclaimable(ca);
496                 available -= ca->inc_gen_really_needs_gc;
497
498                 available = max(available, 0LL);
499
500                 if (available)
501                         break;
502
503                 up_read(&c->gc_lock);
504                 schedule();
505                 try_to_freeze();
506                 down_read(&c->gc_lock);
507         }
508
509         __set_current_state(TASK_RUNNING);
510         ca->allocator_state = ALLOCATOR_RUNNING;
511         closure_wake_up(&c->freelist_wait);
512
513         return ret;
514 }
515
516 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
517                                        struct bucket_mark m)
518 {
519         u8 gc_gen;
520
521         if (!is_available_bucket(m))
522                 return false;
523
524         if (m.owned_by_allocator)
525                 return false;
526
527         if (ca->buckets_nouse &&
528             test_bit(b, ca->buckets_nouse))
529                 return false;
530
531         gc_gen = bucket_gc_gen(bucket(ca, b));
532
533         if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
534                 ca->inc_gen_needs_gc++;
535
536         if (gc_gen >= BUCKET_GC_GEN_MAX)
537                 ca->inc_gen_really_needs_gc++;
538
539         return gc_gen < BUCKET_GC_GEN_MAX;
540 }
541
542 /*
543  * Determines what order we're going to reuse buckets, smallest bucket_key()
544  * first.
545  */
546
547 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
548                                 u64 now, u64 last_seq_ondisk)
549 {
550         unsigned used = bucket_sectors_used(m);
551
552         if (used) {
553                 /*
554                  * Prefer to keep buckets that have been read more recently, and
555                  * buckets that have more data in them:
556                  */
557                 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
558                 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
559
560                 return -last_read_scaled;
561         } else {
562                 /*
563                  * Prefer to use buckets with smaller gc_gen so that we don't
564                  * have to walk the btree and recalculate oldest_gen - but shift
565                  * off the low bits so that buckets will still have equal sort
566                  * keys when there's only a small difference, so that we can
567                  * keep sequential buckets together:
568                  */
569                 return  (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
570                         (bucket_gc_gen(g) >> 4);
571         }
572 }
573
574 static inline int bucket_alloc_cmp(alloc_heap *h,
575                                    struct alloc_heap_entry l,
576                                    struct alloc_heap_entry r)
577 {
578         return  cmp_int(l.key, r.key) ?:
579                 cmp_int(r.nr, l.nr) ?:
580                 cmp_int(l.bucket, r.bucket);
581 }
582
583 static inline int bucket_idx_cmp(const void *_l, const void *_r)
584 {
585         const struct alloc_heap_entry *l = _l, *r = _r;
586
587         return cmp_int(l->bucket, r->bucket);
588 }
589
590 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
591 {
592         struct bucket_array *buckets;
593         struct alloc_heap_entry e = { 0 };
594         u64 now, last_seq_ondisk;
595         size_t b, i, nr = 0;
596
597         down_read(&ca->bucket_lock);
598
599         buckets = bucket_array(ca);
600         ca->alloc_heap.used = 0;
601         now = atomic64_read(&c->io_clock[READ].now);
602         last_seq_ondisk = c->journal.last_seq_ondisk;
603
604         /*
605          * Find buckets with lowest read priority, by building a maxheap sorted
606          * by read priority and repeatedly replacing the maximum element until
607          * all buckets have been visited.
608          */
609         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
610                 struct bucket *g = &buckets->b[b];
611                 struct bucket_mark m = READ_ONCE(g->mark);
612                 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
613
614                 if (!bch2_can_invalidate_bucket(ca, b, m))
615                         continue;
616
617                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
618                         e.nr++;
619                 } else {
620                         if (e.nr)
621                                 heap_add_or_replace(&ca->alloc_heap, e,
622                                         -bucket_alloc_cmp, NULL);
623
624                         e = (struct alloc_heap_entry) {
625                                 .bucket = b,
626                                 .nr     = 1,
627                                 .key    = key,
628                         };
629                 }
630
631                 cond_resched();
632         }
633
634         if (e.nr)
635                 heap_add_or_replace(&ca->alloc_heap, e,
636                                 -bucket_alloc_cmp, NULL);
637
638         for (i = 0; i < ca->alloc_heap.used; i++)
639                 nr += ca->alloc_heap.data[i].nr;
640
641         while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
642                 nr -= ca->alloc_heap.data[0].nr;
643                 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
644         }
645
646         up_read(&ca->bucket_lock);
647 }
648
649 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
650 {
651         struct bucket_array *buckets = bucket_array(ca);
652         struct bucket_mark m;
653         size_t b, start;
654
655         if (ca->fifo_last_bucket <  ca->mi.first_bucket ||
656             ca->fifo_last_bucket >= ca->mi.nbuckets)
657                 ca->fifo_last_bucket = ca->mi.first_bucket;
658
659         start = ca->fifo_last_bucket;
660
661         do {
662                 ca->fifo_last_bucket++;
663                 if (ca->fifo_last_bucket == ca->mi.nbuckets)
664                         ca->fifo_last_bucket = ca->mi.first_bucket;
665
666                 b = ca->fifo_last_bucket;
667                 m = READ_ONCE(buckets->b[b].mark);
668
669                 if (bch2_can_invalidate_bucket(ca, b, m)) {
670                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
671
672                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
673                         if (heap_full(&ca->alloc_heap))
674                                 break;
675                 }
676
677                 cond_resched();
678         } while (ca->fifo_last_bucket != start);
679 }
680
681 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
682 {
683         struct bucket_array *buckets = bucket_array(ca);
684         struct bucket_mark m;
685         size_t checked, i;
686
687         for (checked = 0;
688              checked < ca->mi.nbuckets / 2;
689              checked++) {
690                 size_t b = bch2_rand_range(ca->mi.nbuckets -
691                                            ca->mi.first_bucket) +
692                         ca->mi.first_bucket;
693
694                 m = READ_ONCE(buckets->b[b].mark);
695
696                 if (bch2_can_invalidate_bucket(ca, b, m)) {
697                         struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
698
699                         heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
700                         if (heap_full(&ca->alloc_heap))
701                                 break;
702                 }
703
704                 cond_resched();
705         }
706
707         sort(ca->alloc_heap.data,
708              ca->alloc_heap.used,
709              sizeof(ca->alloc_heap.data[0]),
710              bucket_idx_cmp, NULL);
711
712         /* remove duplicates: */
713         for (i = 0; i + 1 < ca->alloc_heap.used; i++)
714                 if (ca->alloc_heap.data[i].bucket ==
715                     ca->alloc_heap.data[i + 1].bucket)
716                         ca->alloc_heap.data[i].nr = 0;
717 }
718
719 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
720 {
721         size_t i, nr = 0;
722
723         ca->inc_gen_needs_gc                    = 0;
724
725         switch (ca->mi.replacement) {
726         case BCH_CACHE_REPLACEMENT_lru:
727                 find_reclaimable_buckets_lru(c, ca);
728                 break;
729         case BCH_CACHE_REPLACEMENT_fifo:
730                 find_reclaimable_buckets_fifo(c, ca);
731                 break;
732         case BCH_CACHE_REPLACEMENT_random:
733                 find_reclaimable_buckets_random(c, ca);
734                 break;
735         }
736
737         heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
738
739         for (i = 0; i < ca->alloc_heap.used; i++)
740                 nr += ca->alloc_heap.data[i].nr;
741
742         return nr;
743 }
744
745 static inline long next_alloc_bucket(struct bch_dev *ca)
746 {
747         struct alloc_heap_entry e, *top = ca->alloc_heap.data;
748
749         while (ca->alloc_heap.used) {
750                 if (top->nr) {
751                         size_t b = top->bucket;
752
753                         top->bucket++;
754                         top->nr--;
755                         return b;
756                 }
757
758                 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
759         }
760
761         return -1;
762 }
763
764 /*
765  * returns sequence number of most recent journal entry that updated this
766  * bucket:
767  */
768 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
769 {
770         if (m.journal_seq_valid) {
771                 u64 journal_seq = atomic64_read(&c->journal.seq);
772                 u64 bucket_seq  = journal_seq;
773
774                 bucket_seq &= ~((u64) U16_MAX);
775                 bucket_seq |= m.journal_seq;
776
777                 if (bucket_seq > journal_seq)
778                         bucket_seq -= 1 << 16;
779
780                 return bucket_seq;
781         } else {
782                 return 0;
783         }
784 }
785
786 static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
787                                        struct bch_dev *ca,
788                                        struct btree_iter *iter,
789                                        u64 *journal_seq, unsigned flags)
790 {
791         struct bch_fs *c = trans->c;
792         struct bkey_alloc_buf a;
793         struct bkey_alloc_unpacked u;
794         struct bucket *g;
795         struct bucket_mark m;
796         bool invalidating_cached_data;
797         size_t b;
798         int ret = 0;
799
800         BUG_ON(!ca->alloc_heap.used ||
801                !ca->alloc_heap.data[0].nr);
802         b = ca->alloc_heap.data[0].bucket;
803
804         /* first, put on free_inc and mark as owned by allocator: */
805         percpu_down_read(&c->mark_lock);
806         g = bucket(ca, b);
807         m = READ_ONCE(g->mark);
808
809         BUG_ON(m.dirty_sectors);
810
811         bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
812
813         spin_lock(&c->freelist_lock);
814         verify_not_on_freelist(c, ca, b);
815         BUG_ON(!fifo_push(&ca->free_inc, b));
816         spin_unlock(&c->freelist_lock);
817
818         /*
819          * If we're not invalidating cached data, we only increment the bucket
820          * gen in memory here, the incremented gen will be updated in the btree
821          * by bch2_trans_mark_pointer():
822          */
823         if (!m.cached_sectors &&
824             !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
825                 BUG_ON(m.data_type);
826                 bucket_cmpxchg(g, m, m.gen++);
827                 percpu_up_read(&c->mark_lock);
828                 goto out;
829         }
830
831         percpu_up_read(&c->mark_lock);
832
833         /*
834          * If the read-only path is trying to shut down, we can't be generating
835          * new btree updates:
836          */
837         if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
838                 ret = 1;
839                 goto out;
840         }
841
842         bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
843 retry:
844         ret = bch2_btree_iter_traverse(iter);
845         if (ret)
846                 return ret;
847
848         percpu_down_read(&c->mark_lock);
849         g = bucket(ca, iter->pos.offset);
850         m = READ_ONCE(g->mark);
851         u = alloc_mem_to_key(iter, g, m);
852
853         percpu_up_read(&c->mark_lock);
854
855         invalidating_cached_data = u.cached_sectors != 0;
856
857         u.gen++;
858         u.data_type     = 0;
859         u.dirty_sectors = 0;
860         u.cached_sectors = 0;
861         u.read_time     = atomic64_read(&c->io_clock[READ].now);
862         u.write_time    = atomic64_read(&c->io_clock[WRITE].now);
863
864         bch2_alloc_pack(c, &a, u);
865         bch2_trans_update(trans, iter, &a.k,
866                           BTREE_TRIGGER_BUCKET_INVALIDATE);
867
868         /*
869          * XXX:
870          * when using deferred btree updates, we have journal reclaim doing
871          * btree updates and thus requiring the allocator to make forward
872          * progress, and here the allocator is requiring space in the journal -
873          * so we need a journal pre-reservation:
874          */
875         ret = bch2_trans_commit(trans, NULL,
876                                 invalidating_cached_data ? journal_seq : NULL,
877                                 BTREE_INSERT_NOUNLOCK|
878                                 BTREE_INSERT_NOCHECK_RW|
879                                 BTREE_INSERT_NOFAIL|
880                                 BTREE_INSERT_JOURNAL_RESERVED|
881                                 flags);
882         if (ret == -EINTR)
883                 goto retry;
884 out:
885         if (!ret) {
886                 /* remove from alloc_heap: */
887                 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
888
889                 top->bucket++;
890                 top->nr--;
891
892                 if (!top->nr)
893                         heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
894
895                 /*
896                  * Make sure we flush the last journal entry that updated this
897                  * bucket (i.e. deleting the last reference) before writing to
898                  * this bucket again:
899                  */
900                 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
901         } else {
902                 size_t b2;
903
904                 /* remove from free_inc: */
905                 percpu_down_read(&c->mark_lock);
906                 spin_lock(&c->freelist_lock);
907
908                 bch2_mark_alloc_bucket(c, ca, b, false,
909                                        gc_pos_alloc(c, NULL), 0);
910
911                 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
912                 BUG_ON(b != b2);
913
914                 spin_unlock(&c->freelist_lock);
915                 percpu_up_read(&c->mark_lock);
916         }
917
918         return ret < 0 ? ret : 0;
919 }
920
921 /*
922  * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
923  */
924 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
925 {
926         struct btree_trans trans;
927         struct btree_iter *iter;
928         u64 journal_seq = 0;
929         int ret = 0;
930
931         bch2_trans_init(&trans, c, 0, 0);
932         iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc,
933                                    POS(ca->dev_idx, 0),
934                                    BTREE_ITER_CACHED|
935                                    BTREE_ITER_CACHED_NOFILL|
936                                    BTREE_ITER_INTENT);
937
938         /* Only use nowait if we've already invalidated at least one bucket: */
939         while (!ret &&
940                !fifo_full(&ca->free_inc) &&
941                ca->alloc_heap.used)
942                 ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
943                                 BTREE_INSERT_GC_LOCK_HELD|
944                                 (!fifo_empty(&ca->free_inc)
945                                  ? BTREE_INSERT_NOWAIT : 0));
946
947         bch2_trans_iter_put(&trans, iter);
948         bch2_trans_exit(&trans);
949
950         /* If we used NOWAIT, don't return the error: */
951         if (!fifo_empty(&ca->free_inc))
952                 ret = 0;
953         if (ret) {
954                 bch_err(ca, "error invalidating buckets: %i", ret);
955                 return ret;
956         }
957
958         if (journal_seq)
959                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
960         if (ret) {
961                 bch_err(ca, "journal error: %i", ret);
962                 return ret;
963         }
964
965         return 0;
966 }
967
968 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
969 {
970         unsigned i;
971         int ret = 0;
972
973         while (1) {
974                 set_current_state(TASK_INTERRUPTIBLE);
975
976                 spin_lock(&c->freelist_lock);
977                 for (i = 0; i < RESERVE_NR; i++) {
978
979                         /*
980                          * Don't strand buckets on the copygc freelist until
981                          * after recovery is finished:
982                          */
983                         if (!test_bit(BCH_FS_STARTED, &c->flags) &&
984                             i == RESERVE_MOVINGGC)
985                                 continue;
986
987                         if (fifo_push(&ca->free[i], bucket)) {
988                                 fifo_pop(&ca->free_inc, bucket);
989
990                                 closure_wake_up(&c->freelist_wait);
991                                 ca->allocator_state = ALLOCATOR_RUNNING;
992
993                                 spin_unlock(&c->freelist_lock);
994                                 goto out;
995                         }
996                 }
997
998                 if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
999                         ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
1000                         closure_wake_up(&c->freelist_wait);
1001                 }
1002
1003                 spin_unlock(&c->freelist_lock);
1004
1005                 if ((current->flags & PF_KTHREAD) &&
1006                     kthread_should_stop()) {
1007                         ret = 1;
1008                         break;
1009                 }
1010
1011                 schedule();
1012                 try_to_freeze();
1013         }
1014 out:
1015         __set_current_state(TASK_RUNNING);
1016         return ret;
1017 }
1018
1019 /*
1020  * Pulls buckets off free_inc, discards them (if enabled), then adds them to
1021  * freelists, waiting until there's room if necessary:
1022  */
1023 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
1024 {
1025         while (!fifo_empty(&ca->free_inc)) {
1026                 size_t bucket = fifo_peek(&ca->free_inc);
1027
1028                 if (ca->mi.discard &&
1029                     blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1030                         blkdev_issue_discard(ca->disk_sb.bdev,
1031                                              bucket_to_sector(ca, bucket),
1032                                              ca->mi.bucket_size, GFP_NOIO, 0);
1033
1034                 if (push_invalidated_bucket(c, ca, bucket))
1035                         return 1;
1036         }
1037
1038         return 0;
1039 }
1040
1041 static inline bool allocator_thread_running(struct bch_dev *ca)
1042 {
1043         return ca->mi.state == BCH_MEMBER_STATE_rw &&
1044                 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags);
1045 }
1046
1047 /**
1048  * bch_allocator_thread - move buckets from free_inc to reserves
1049  *
1050  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1051  * the reserves are depleted by bucket allocation. When we run out
1052  * of free_inc, try to invalidate some buckets and write out
1053  * prios and gens.
1054  */
1055 static int bch2_allocator_thread(void *arg)
1056 {
1057         struct bch_dev *ca = arg;
1058         struct bch_fs *c = ca->fs;
1059         size_t nr;
1060         int ret;
1061
1062         set_freezable();
1063
1064         while (1) {
1065                 if (!allocator_thread_running(ca)) {
1066                         ca->allocator_state = ALLOCATOR_STOPPED;
1067                         if (kthread_wait_freezable(allocator_thread_running(ca)))
1068                                 break;
1069                 }
1070
1071                 ca->allocator_state = ALLOCATOR_RUNNING;
1072
1073                 cond_resched();
1074                 if (kthread_should_stop())
1075                         break;
1076
1077                 pr_debug("discarding %zu invalidated buckets",
1078                          fifo_used(&ca->free_inc));
1079
1080                 ret = discard_invalidated_buckets(c, ca);
1081                 if (ret)
1082                         goto stop;
1083
1084                 down_read(&c->gc_lock);
1085
1086                 ret = bch2_invalidate_buckets(c, ca);
1087                 if (ret) {
1088                         up_read(&c->gc_lock);
1089                         goto stop;
1090                 }
1091
1092                 if (!fifo_empty(&ca->free_inc)) {
1093                         up_read(&c->gc_lock);
1094                         continue;
1095                 }
1096
1097                 pr_debug("free_inc now empty");
1098
1099                 while (1) {
1100                         cond_resched();
1101                         /*
1102                          * Find some buckets that we can invalidate, either
1103                          * they're completely unused, or only contain clean data
1104                          * that's been written back to the backing device or
1105                          * another cache tier
1106                          */
1107
1108                         pr_debug("scanning for reclaimable buckets");
1109
1110                         nr = find_reclaimable_buckets(c, ca);
1111
1112                         pr_debug("found %zu buckets", nr);
1113
1114                         trace_alloc_batch(ca, nr, ca->alloc_heap.size);
1115
1116                         if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1117                              ca->inc_gen_really_needs_gc) &&
1118                             c->gc_thread) {
1119                                 atomic_inc(&c->kick_gc);
1120                                 wake_up_process(c->gc_thread);
1121                         }
1122
1123                         if (nr)
1124                                 break;
1125
1126                         /*
1127                          * If we found any buckets, we have to invalidate them
1128                          * before we scan for more - but if we didn't find very
1129                          * many we may want to wait on more buckets being
1130                          * available so we don't spin:
1131                          */
1132                         ret = wait_buckets_available(c, ca);
1133                         if (ret) {
1134                                 up_read(&c->gc_lock);
1135                                 goto stop;
1136                         }
1137                 }
1138
1139                 up_read(&c->gc_lock);
1140
1141                 pr_debug("%zu buckets to invalidate", nr);
1142
1143                 /*
1144                  * alloc_heap is now full of newly-invalidated buckets: next,
1145                  * write out the new bucket gens:
1146                  */
1147         }
1148
1149 stop:
1150         pr_debug("alloc thread stopping (ret %i)", ret);
1151         ca->allocator_state = ALLOCATOR_STOPPED;
1152         closure_wake_up(&c->freelist_wait);
1153         return 0;
1154 }
1155
1156 /* Startup/shutdown (ro/rw): */
1157
1158 void bch2_recalc_capacity(struct bch_fs *c)
1159 {
1160         struct bch_dev *ca;
1161         u64 capacity = 0, reserved_sectors = 0, gc_reserve, copygc_threshold = 0;
1162         unsigned bucket_size_max = 0;
1163         unsigned long ra_pages = 0;
1164         unsigned i, j;
1165
1166         lockdep_assert_held(&c->state_lock);
1167
1168         for_each_online_member(ca, c, i) {
1169                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1170
1171                 ra_pages += bdi->ra_pages;
1172         }
1173
1174         bch2_set_ra_pages(c, ra_pages);
1175
1176         for_each_rw_member(ca, c, i) {
1177                 u64 dev_reserve = 0;
1178
1179                 /*
1180                  * We need to reserve buckets (from the number
1181                  * of currently available buckets) against
1182                  * foreground writes so that mainly copygc can
1183                  * make forward progress.
1184                  *
1185                  * We need enough to refill the various reserves
1186                  * from scratch - copygc will use its entire
1187                  * reserve all at once, then run against when
1188                  * its reserve is refilled (from the formerly
1189                  * available buckets).
1190                  *
1191                  * This reserve is just used when considering if
1192                  * allocations for foreground writes must wait -
1193                  * not -ENOSPC calculations.
1194                  */
1195                 for (j = 0; j < RESERVE_NONE; j++)
1196                         dev_reserve += ca->free[j].size;
1197
1198                 dev_reserve += 1;       /* btree write point */
1199                 dev_reserve += 1;       /* copygc write point */
1200                 dev_reserve += 1;       /* rebalance write point */
1201
1202                 dev_reserve *= ca->mi.bucket_size;
1203
1204                 copygc_threshold += dev_reserve;
1205
1206                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1207                                              ca->mi.first_bucket);
1208
1209                 reserved_sectors += dev_reserve * 2;
1210
1211                 bucket_size_max = max_t(unsigned, bucket_size_max,
1212                                         ca->mi.bucket_size);
1213         }
1214
1215         gc_reserve = c->opts.gc_reserve_bytes
1216                 ? c->opts.gc_reserve_bytes >> 9
1217                 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1218
1219         reserved_sectors = max(gc_reserve, reserved_sectors);
1220
1221         reserved_sectors = min(reserved_sectors, capacity);
1222
1223         c->copygc_threshold = copygc_threshold;
1224         c->capacity = capacity - reserved_sectors;
1225
1226         c->bucket_size_max = bucket_size_max;
1227
1228         /* Wake up case someone was waiting for buckets */
1229         closure_wake_up(&c->freelist_wait);
1230 }
1231
1232 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1233 {
1234         struct open_bucket *ob;
1235         bool ret = false;
1236
1237         for (ob = c->open_buckets;
1238              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1239              ob++) {
1240                 spin_lock(&ob->lock);
1241                 if (ob->valid && !ob->on_partial_list &&
1242                     ob->ptr.dev == ca->dev_idx)
1243                         ret = true;
1244                 spin_unlock(&ob->lock);
1245         }
1246
1247         return ret;
1248 }
1249
1250 /* device goes ro: */
1251 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1252 {
1253         unsigned i;
1254
1255         BUG_ON(ca->alloc_thread);
1256
1257         /* First, remove device from allocation groups: */
1258
1259         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1260                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1261
1262         /*
1263          * Capacity is calculated based off of devices in allocation groups:
1264          */
1265         bch2_recalc_capacity(c);
1266
1267         /* Next, close write points that point to this device... */
1268         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1269                 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1270
1271         bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1272         bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1273         bch2_writepoint_stop(c, ca, &c->btree_write_point);
1274
1275         mutex_lock(&c->btree_reserve_cache_lock);
1276         while (c->btree_reserve_cache_nr) {
1277                 struct btree_alloc *a =
1278                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1279
1280                 bch2_open_buckets_put(c, &a->ob);
1281         }
1282         mutex_unlock(&c->btree_reserve_cache_lock);
1283
1284         while (1) {
1285                 struct open_bucket *ob;
1286
1287                 spin_lock(&c->freelist_lock);
1288                 if (!ca->open_buckets_partial_nr) {
1289                         spin_unlock(&c->freelist_lock);
1290                         break;
1291                 }
1292                 ob = c->open_buckets +
1293                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1294                 ob->on_partial_list = false;
1295                 spin_unlock(&c->freelist_lock);
1296
1297                 bch2_open_bucket_put(c, ob);
1298         }
1299
1300         bch2_ec_stop_dev(c, ca);
1301
1302         /*
1303          * Wake up threads that were blocked on allocation, so they can notice
1304          * the device can no longer be removed and the capacity has changed:
1305          */
1306         closure_wake_up(&c->freelist_wait);
1307
1308         /*
1309          * journal_res_get() can block waiting for free space in the journal -
1310          * it needs to notice there may not be devices to allocate from anymore:
1311          */
1312         wake_up(&c->journal.wait);
1313
1314         /* Now wait for any in flight writes: */
1315
1316         closure_wait_event(&c->open_buckets_wait,
1317                            !bch2_dev_has_open_write_point(c, ca));
1318 }
1319
1320 /* device goes rw: */
1321 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1322 {
1323         unsigned i;
1324
1325         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1326                 if (ca->mi.data_allowed & (1 << i))
1327                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1328 }
1329
1330 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1331 {
1332         if (ca->alloc_thread)
1333                 closure_wait_event(&c->freelist_wait,
1334                                    ca->allocator_state != ALLOCATOR_RUNNING);
1335 }
1336
1337 /* stop allocator thread: */
1338 void bch2_dev_allocator_stop(struct bch_dev *ca)
1339 {
1340         struct task_struct *p;
1341
1342         p = rcu_dereference_protected(ca->alloc_thread, 1);
1343         ca->alloc_thread = NULL;
1344
1345         /*
1346          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1347          * the thread shutting down to avoid bch2_wake_allocator() racing:
1348          *
1349          * XXX: it would be better to have the rcu barrier be asynchronous
1350          * instead of blocking us here
1351          */
1352         synchronize_rcu();
1353
1354         if (p) {
1355                 kthread_stop(p);
1356                 put_task_struct(p);
1357         }
1358 }
1359
1360 /* start allocator thread: */
1361 int bch2_dev_allocator_start(struct bch_dev *ca)
1362 {
1363         struct task_struct *p;
1364
1365         /*
1366          * allocator thread already started?
1367          */
1368         if (ca->alloc_thread)
1369                 return 0;
1370
1371         p = kthread_create(bch2_allocator_thread, ca,
1372                            "bch-alloc/%s", ca->name);
1373         if (IS_ERR(p)) {
1374                 bch_err(ca->fs, "error creating allocator thread: %li",
1375                         PTR_ERR(p));
1376                 return PTR_ERR(p);
1377         }
1378
1379         get_task_struct(p);
1380         rcu_assign_pointer(ca->alloc_thread, p);
1381         wake_up_process(p);
1382         return 0;
1383 }
1384
1385 void bch2_fs_allocator_background_init(struct bch_fs *c)
1386 {
1387         spin_lock_init(&c->freelist_lock);
1388
1389         c->pd_controllers_update_seconds = 5;
1390         INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);
1391 }