2 * Primary bucket allocation code
4 * Copyright 2012 Google, Inc.
6 * Allocation in bcache is done in terms of buckets:
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
28 * free_inc isn't the only freelist - if it was, we'd often have to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
35 * It's important to ensure that gens don't wrap around - with respect to
36 * either the oldest gen in the btree or the gen on disk. This is quite
37 * difficult to do in practice, but we explicitly guard against it anyways - if
38 * a bucket is in danger of wrapping around we simply skip invalidating it that
39 * time around, and we garbage collect or rewrite the priorities sooner than we
40 * would have otherwise.
42 * bch2_bucket_alloc() allocates a single bucket from a specific device.
44 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
45 * in a given filesystem.
47 * invalidate_buckets() drives all the processes described above. It's called
48 * from bch2_bucket_alloc() and a few other places that need to make sure free
51 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
52 * invalidated, and then invalidate them and stick them on the free_inc list -
53 * in either lru or fifo order.
58 #include "btree_cache.h"
60 #include "btree_update.h"
61 #include "btree_update_interior.h"
67 #include "disk_groups.h"
72 #include "journal_io.h"
75 #include <linux/blkdev.h>
76 #include <linux/kthread.h>
77 #include <linux/math64.h>
78 #include <linux/random.h>
79 #include <linux/rculist.h>
80 #include <linux/rcupdate.h>
81 #include <linux/sched/task.h>
82 #include <linux/sort.h>
83 #include <trace/events/bcachefs.h>
85 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
87 /* Ratelimiting/PD controllers */
89 static void pd_controllers_update(struct work_struct *work)
91 struct bch_fs *c = container_of(to_delayed_work(work),
93 pd_controllers_update);
97 for_each_member_device(ca, c, i) {
98 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
100 u64 free = bucket_to_sector(ca,
101 __dev_buckets_free(ca, stats)) << 9;
103 * Bytes of internal fragmentation, which can be
104 * reclaimed by copy GC
106 s64 fragmented = (bucket_to_sector(ca,
107 stats.buckets[BCH_DATA_USER] +
108 stats.buckets[BCH_DATA_CACHED]) -
109 (stats.sectors[BCH_DATA_USER] +
110 stats.sectors[BCH_DATA_CACHED])) << 9;
112 fragmented = max(0LL, fragmented);
114 bch2_pd_controller_update(&ca->copygc_pd,
115 free, fragmented, -1);
118 schedule_delayed_work(&c->pd_controllers_update,
119 c->pd_controllers_update_seconds * HZ);
122 /* Persistent alloc info: */
124 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
126 unsigned bytes = offsetof(struct bch_alloc, data);
128 if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
130 if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
133 return DIV_ROUND_UP(bytes, sizeof(u64));
136 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
138 if (k.k->p.inode >= c->sb.nr_devices ||
139 !c->devs[k.k->p.inode])
140 return "invalid device";
144 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
146 if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k))
147 return "incorrect value size";
151 return "invalid type";
157 void bch2_alloc_to_text(struct bch_fs *c, char *buf,
158 size_t size, struct bkey_s_c k)
168 static inline unsigned get_alloc_field(const u8 **p, unsigned bytes)
177 v = le16_to_cpup((void *) *p);
180 v = le32_to_cpup((void *) *p);
190 static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v)
197 *((__le16 *) *p) = cpu_to_le16(v);
200 *((__le32 *) *p) = cpu_to_le32(v);
209 static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
212 struct bkey_s_c_alloc a;
213 struct bucket_mark new;
217 if (k.k->type != BCH_ALLOC)
220 a = bkey_s_c_to_alloc(k);
221 ca = bch_dev_bkey_exists(c, a.k->p.inode);
223 if (a.k->p.offset >= ca->mi.nbuckets)
226 percpu_down_read_preempt_disable(&c->usage_lock);
228 g = bucket(ca, a.k->p.offset);
229 bucket_cmpxchg(g, new, ({
235 if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
236 g->io_time[READ] = get_alloc_field(&d, 2);
237 if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
238 g->io_time[WRITE] = get_alloc_field(&d, 2);
240 percpu_up_read_preempt_enable(&c->usage_lock);
243 int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
245 struct journal_replay *r;
246 struct btree_iter iter;
252 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
253 bch2_alloc_read_key(c, k);
254 bch2_btree_iter_cond_resched(&iter);
257 ret = bch2_btree_iter_unlock(&iter);
261 list_for_each_entry(r, journal_replay_list, list) {
262 struct bkey_i *k, *n;
263 struct jset_entry *entry;
265 for_each_jset_key(k, n, entry, &r->j)
266 if (entry->btree_id == BTREE_ID_ALLOC)
267 bch2_alloc_read_key(c, bkey_i_to_s_c(k));
270 mutex_lock(&c->bucket_clock[READ].lock);
271 for_each_member_device(ca, c, i) {
272 down_read(&ca->bucket_lock);
273 bch2_recalc_oldest_io(c, ca, READ);
274 up_read(&ca->bucket_lock);
276 mutex_unlock(&c->bucket_clock[READ].lock);
278 mutex_lock(&c->bucket_clock[WRITE].lock);
279 for_each_member_device(ca, c, i) {
280 down_read(&ca->bucket_lock);
281 bch2_recalc_oldest_io(c, ca, WRITE);
282 up_read(&ca->bucket_lock);
284 mutex_unlock(&c->bucket_clock[WRITE].lock);
289 static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
290 size_t b, struct btree_iter *iter,
291 u64 *journal_seq, bool nowait)
293 struct bucket_mark m;
294 __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key;
296 struct bkey_i_alloc *a;
299 unsigned flags = BTREE_INSERT_ATOMIC|
301 BTREE_INSERT_USE_RESERVE|
302 BTREE_INSERT_USE_ALLOC_RESERVE;
305 flags |= BTREE_INSERT_NOWAIT;
307 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
310 ret = btree_iter_err(bch2_btree_iter_peek_slot(iter));
314 percpu_down_read_preempt_disable(&c->usage_lock);
317 /* read mark under btree node lock: */
318 m = READ_ONCE(g->mark);
319 a = bkey_alloc_init(&alloc_key.k);
323 set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v));
326 if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
327 put_alloc_field(&d, 2, g->io_time[READ]);
328 if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
329 put_alloc_field(&d, 2, g->io_time[WRITE]);
330 percpu_up_read_preempt_enable(&c->usage_lock);
332 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, flags,
333 BTREE_INSERT_ENTRY(iter, &a->k_i));
334 bch2_btree_iter_cond_resched(iter);
335 } while (ret == -EINTR);
340 int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
343 struct btree_iter iter;
346 if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
349 ca = bch_dev_bkey_exists(c, pos.inode);
351 if (pos.offset >= ca->mi.nbuckets)
354 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
355 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
357 ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter,
359 bch2_btree_iter_unlock(&iter);
363 int bch2_alloc_write(struct bch_fs *c)
369 for_each_rw_member(ca, c, i) {
370 struct btree_iter iter;
371 unsigned long bucket;
373 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
374 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
376 down_read(&ca->bucket_lock);
377 for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
378 ret = __bch2_alloc_write_key(c, ca, bucket, &iter,
383 clear_bit(bucket, ca->buckets_dirty);
385 up_read(&ca->bucket_lock);
386 bch2_btree_iter_unlock(&iter);
389 percpu_ref_put(&ca->io_ref);
397 /* Bucket IO clocks: */
399 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
401 struct bucket_clock *clock = &c->bucket_clock[rw];
402 struct bucket_array *buckets = bucket_array(ca);
407 lockdep_assert_held(&c->bucket_clock[rw].lock);
409 /* Recalculate max_last_io for this device: */
410 for_each_bucket(g, buckets)
411 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
413 ca->max_last_bucket_io[rw] = max_last_io;
415 /* Recalculate global max_last_io: */
418 for_each_member_device(ca, c, i)
419 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
421 clock->max_last_io = max_last_io;
424 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
426 struct bucket_clock *clock = &c->bucket_clock[rw];
427 struct bucket_array *buckets;
432 trace_rescale_prios(c);
434 for_each_member_device(ca, c, i) {
435 down_read(&ca->bucket_lock);
436 buckets = bucket_array(ca);
438 for_each_bucket(g, buckets)
439 g->io_time[rw] = clock->hand -
440 bucket_last_io(c, g, rw) / 2;
442 bch2_recalc_oldest_io(c, ca, rw);
444 up_read(&ca->bucket_lock);
448 static void bch2_inc_clock_hand(struct io_timer *timer)
450 struct bucket_clock *clock = container_of(timer,
451 struct bucket_clock, rescale);
452 struct bch_fs *c = container_of(clock,
453 struct bch_fs, bucket_clock[clock->rw]);
458 mutex_lock(&clock->lock);
460 /* if clock cannot be advanced more, rescale prio */
461 if (clock->max_last_io >= U16_MAX - 2)
462 bch2_rescale_bucket_io_times(c, clock->rw);
464 BUG_ON(clock->max_last_io >= U16_MAX - 2);
466 for_each_member_device(ca, c, i)
467 ca->max_last_bucket_io[clock->rw]++;
468 clock->max_last_io++;
471 mutex_unlock(&clock->lock);
473 capacity = READ_ONCE(c->capacity);
479 * we only increment when 0.1% of the filesystem capacity has been read
480 * or written too, this determines if it's time
482 * XXX: we shouldn't really be going off of the capacity of devices in
483 * RW mode (that will be 0 when we're RO, yet we can still service
486 timer->expire += capacity >> 10;
488 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
491 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
493 struct bucket_clock *clock = &c->bucket_clock[rw];
497 clock->rescale.fn = bch2_inc_clock_hand;
498 clock->rescale.expire = c->capacity >> 10;
499 mutex_init(&clock->lock);
502 /* Background allocator thread: */
505 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
506 * (marking them as invalidated on disk), then optionally issues discard
507 * commands to the newly free buckets, then puts them on the various freelists.
510 static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
513 if (expensive_debug_checks(c) &&
514 test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
519 for (j = 0; j < RESERVE_NR; j++)
520 fifo_for_each_entry(i, &ca->free[j], iter)
522 fifo_for_each_entry(i, &ca->free_inc, iter)
527 #define BUCKET_GC_GEN_MAX 96U
530 * wait_buckets_available - wait on reclaimable buckets
532 * If there aren't enough available buckets to fill up free_inc, wait until
535 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
537 unsigned long gc_count = c->gc_count;
541 set_current_state(TASK_INTERRUPTIBLE);
542 if (kthread_should_stop()) {
547 if (gc_count != c->gc_count)
548 ca->inc_gen_really_needs_gc = 0;
550 if ((ssize_t) (dev_buckets_available(c, ca) -
551 ca->inc_gen_really_needs_gc) >=
552 (ssize_t) fifo_free(&ca->free_inc))
555 up_read(&c->gc_lock);
558 down_read(&c->gc_lock);
561 __set_current_state(TASK_RUNNING);
565 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
567 struct bucket_mark mark)
571 if (!is_available_bucket(mark))
574 gc_gen = bucket_gc_gen(ca, bucket);
576 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
577 ca->inc_gen_needs_gc++;
579 if (gc_gen >= BUCKET_GC_GEN_MAX)
580 ca->inc_gen_really_needs_gc++;
582 return gc_gen < BUCKET_GC_GEN_MAX;
585 static void bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
588 struct bucket_mark m;
590 percpu_down_read_preempt_disable(&c->usage_lock);
591 spin_lock(&c->freelist_lock);
593 if (!bch2_invalidate_bucket(c, ca, bucket, &m)) {
594 spin_unlock(&c->freelist_lock);
595 percpu_up_read_preempt_enable(&c->usage_lock);
599 verify_not_on_freelist(c, ca, bucket);
600 BUG_ON(!fifo_push(&ca->free_inc, bucket));
602 spin_unlock(&c->freelist_lock);
603 percpu_up_read_preempt_enable(&c->usage_lock);
606 bucket_io_clock_reset(c, ca, bucket, READ);
607 bucket_io_clock_reset(c, ca, bucket, WRITE);
609 if (m.cached_sectors) {
610 ca->allocator_invalidating_data = true;
611 } else if (m.journal_seq_valid) {
612 u64 journal_seq = atomic64_read(&c->journal.seq);
613 u64 bucket_seq = journal_seq;
615 bucket_seq &= ~((u64) U16_MAX);
616 bucket_seq |= m.journal_seq;
618 if (bucket_seq > journal_seq)
619 bucket_seq -= 1 << 16;
621 ca->allocator_journal_seq_flush =
622 max(ca->allocator_journal_seq_flush, bucket_seq);
627 * Determines what order we're going to reuse buckets, smallest bucket_key()
631 * - We take into account the read prio of the bucket, which gives us an
632 * indication of how hot the data is -- we scale the prio so that the prio
633 * farthest from the clock is worth 1/8th of the closest.
635 * - The number of sectors of cached data in the bucket, which gives us an
636 * indication of the cost in cache misses this eviction will cause.
638 * - If hotness * sectors used compares equal, we pick the bucket with the
639 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
640 * number repeatedly forces us to run mark and sweep gc to avoid generation
644 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
645 size_t b, struct bucket_mark m)
647 unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
648 unsigned max_last_io = ca->max_last_bucket_io[READ];
651 * Time since last read, scaled to [0, 8) where larger value indicates
652 * more recently read data:
654 unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
656 /* How much we want to keep the data in this bucket: */
657 unsigned long data_wantness =
658 (hotness + 1) * bucket_sectors_used(m);
660 unsigned long needs_journal_commit =
661 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
663 return (data_wantness << 9) |
664 (needs_journal_commit << 8) |
665 bucket_gc_gen(ca, b);
668 static inline int bucket_alloc_cmp(alloc_heap *h,
669 struct alloc_heap_entry l,
670 struct alloc_heap_entry r)
672 return (l.key > r.key) - (l.key < r.key) ?:
673 (l.nr < r.nr) - (l.nr > r.nr) ?:
674 (l.bucket > r.bucket) - (l.bucket < r.bucket);
677 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
679 struct bucket_array *buckets;
680 struct alloc_heap_entry e = { 0 };
683 ca->alloc_heap.used = 0;
685 mutex_lock(&c->bucket_clock[READ].lock);
686 down_read(&ca->bucket_lock);
688 buckets = bucket_array(ca);
690 bch2_recalc_oldest_io(c, ca, READ);
693 * Find buckets with lowest read priority, by building a maxheap sorted
694 * by read priority and repeatedly replacing the maximum element until
695 * all buckets have been visited.
697 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
698 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
699 unsigned long key = bucket_sort_key(c, ca, b, m);
701 if (!bch2_can_invalidate_bucket(ca, b, m))
704 if (e.nr && e.bucket + e.nr == b && e.key == key) {
708 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
710 e = (struct alloc_heap_entry) {
721 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
723 up_read(&ca->bucket_lock);
724 mutex_unlock(&c->bucket_clock[READ].lock);
726 heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
728 while (heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp)) {
732 if (fifo_full(&ca->free_inc))
735 bch2_invalidate_one_bucket(c, ca, b);
740 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
742 struct bucket_array *buckets = bucket_array(ca);
743 struct bucket_mark m;
747 checked < ca->mi.nbuckets && !fifo_full(&ca->free_inc);
749 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
750 ca->fifo_last_bucket >= ca->mi.nbuckets)
751 ca->fifo_last_bucket = ca->mi.first_bucket;
753 b = ca->fifo_last_bucket++;
755 m = READ_ONCE(buckets->b[b].mark);
757 if (bch2_can_invalidate_bucket(ca, b, m))
758 bch2_invalidate_one_bucket(c, ca, b);
764 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
766 struct bucket_array *buckets = bucket_array(ca);
767 struct bucket_mark m;
771 checked < ca->mi.nbuckets / 2 && !fifo_full(&ca->free_inc);
773 size_t b = bch2_rand_range(ca->mi.nbuckets -
774 ca->mi.first_bucket) +
777 m = READ_ONCE(buckets->b[b].mark);
779 if (bch2_can_invalidate_bucket(ca, b, m))
780 bch2_invalidate_one_bucket(c, ca, b);
786 static void find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
788 ca->inc_gen_needs_gc = 0;
789 ca->inc_gen_really_needs_gc = 0;
791 switch (ca->mi.replacement) {
792 case CACHE_REPLACEMENT_LRU:
793 find_reclaimable_buckets_lru(c, ca);
795 case CACHE_REPLACEMENT_FIFO:
796 find_reclaimable_buckets_fifo(c, ca);
798 case CACHE_REPLACEMENT_RANDOM:
799 find_reclaimable_buckets_random(c, ca);
804 static int size_t_cmp(const void *_l, const void *_r)
806 const size_t *l = _l, *r = _r;
808 return (*l > *r) - (*l < *r);
811 static void sort_free_inc(struct bch_fs *c, struct bch_dev *ca)
813 BUG_ON(ca->free_inc.front);
815 spin_lock(&c->freelist_lock);
816 sort(ca->free_inc.data,
818 sizeof(ca->free_inc.data[0]),
820 spin_unlock(&c->freelist_lock);
823 static int bch2_invalidate_free_inc(struct bch_fs *c, struct bch_dev *ca,
824 u64 *journal_seq, size_t nr,
827 struct btree_iter iter;
830 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
831 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
833 /* Only use nowait if we've already invalidated at least one bucket: */
834 while (ca->nr_invalidated < min(nr, fifo_used(&ca->free_inc))) {
835 size_t b = fifo_idx_entry(&ca->free_inc, ca->nr_invalidated);
837 ret = __bch2_alloc_write_key(c, ca, b, &iter, journal_seq,
838 nowait && ca->nr_invalidated);
842 ca->nr_invalidated++;
845 bch2_btree_iter_unlock(&iter);
847 /* If we used NOWAIT, don't return the error: */
848 return ca->nr_invalidated ? 0 : ret;
851 static bool __push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
856 * Don't remove from free_inc until after it's added to
857 * freelist, so gc can find it:
859 spin_lock(&c->freelist_lock);
860 for (i = 0; i < RESERVE_NR; i++)
861 if (fifo_push(&ca->free[i], bucket)) {
862 fifo_pop(&ca->free_inc, bucket);
863 --ca->nr_invalidated;
864 closure_wake_up(&c->freelist_wait);
865 spin_unlock(&c->freelist_lock);
868 spin_unlock(&c->freelist_lock);
873 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
878 set_current_state(TASK_INTERRUPTIBLE);
880 if (__push_invalidated_bucket(c, ca, bucket))
883 if ((current->flags & PF_KTHREAD) &&
884 kthread_should_stop()) {
893 __set_current_state(TASK_RUNNING);
898 * Given an invalidated, ready to use bucket: issue a discard to it if enabled,
899 * then add it to the freelist, waiting until there's room if necessary:
901 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
903 while (ca->nr_invalidated) {
904 size_t bucket = fifo_peek(&ca->free_inc);
906 BUG_ON(fifo_empty(&ca->free_inc) || !ca->nr_invalidated);
908 if (ca->mi.discard &&
909 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
910 blkdev_issue_discard(ca->disk_sb.bdev,
911 bucket_to_sector(ca, bucket),
912 ca->mi.bucket_size, GFP_NOIO, 0);
914 if (push_invalidated_bucket(c, ca, bucket))
922 * bch_allocator_thread - move buckets from free_inc to reserves
924 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
925 * the reserves are depleted by bucket allocation. When we run out
926 * of free_inc, try to invalidate some buckets and write out
929 static int bch2_allocator_thread(void *arg)
931 struct bch_dev *ca = arg;
932 struct bch_fs *c = ca->fs;
942 pr_debug("discarding %zu invalidated buckets",
945 ret = discard_invalidated_buckets(c, ca);
949 if (fifo_empty(&ca->free_inc))
952 pr_debug("invalidating %zu buckets",
953 fifo_used(&ca->free_inc));
956 ret = bch2_invalidate_free_inc(c, ca, &journal_seq,
959 bch_err(ca, "error invalidating buckets: %i", ret);
963 if (!ca->nr_invalidated) {
964 bch_err(ca, "allocator thread unable to make forward progress!");
968 if (ca->allocator_invalidating_data)
969 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
970 else if (ca->allocator_journal_seq_flush)
971 ret = bch2_journal_flush_seq(&c->journal,
972 ca->allocator_journal_seq_flush);
975 * journal error - buckets haven't actually been
976 * invalidated, can't discard them:
979 bch_err(ca, "journal error: %i", ret);
984 pr_debug("free_inc now empty");
986 /* Reset front/back so we can easily sort fifo entries later: */
987 ca->free_inc.front = ca->free_inc.back = 0;
988 ca->allocator_journal_seq_flush = 0;
989 ca->allocator_invalidating_data = false;
991 down_read(&c->gc_lock);
993 size_t prev = fifo_used(&ca->free_inc);
995 if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) {
996 up_read(&c->gc_lock);
997 bch_err(ca, "gc failure");
1002 * Find some buckets that we can invalidate, either
1003 * they're completely unused, or only contain clean data
1004 * that's been written back to the backing device or
1005 * another cache tier
1008 pr_debug("scanning for reclaimable buckets");
1010 find_reclaimable_buckets(c, ca);
1012 pr_debug("found %zu buckets (free_inc %zu/%zu)",
1013 fifo_used(&ca->free_inc) - prev,
1014 fifo_used(&ca->free_inc), ca->free_inc.size);
1016 trace_alloc_batch(ca, fifo_used(&ca->free_inc),
1019 if ((ca->inc_gen_needs_gc >= ca->free_inc.size ||
1020 (!fifo_full(&ca->free_inc) &&
1021 ca->inc_gen_really_needs_gc >=
1022 fifo_free(&ca->free_inc))) &&
1024 atomic_inc(&c->kick_gc);
1025 wake_up_process(c->gc_thread);
1028 if (fifo_full(&ca->free_inc))
1031 if (!fifo_empty(&ca->free_inc) &&
1032 !fifo_full(&ca->free[RESERVE_MOVINGGC]))
1036 * copygc may be waiting until either its reserve fills
1037 * up, or we can't make forward progress:
1039 ca->allocator_blocked = true;
1040 closure_wake_up(&c->freelist_wait);
1042 ret = wait_buckets_available(c, ca);
1044 up_read(&c->gc_lock);
1049 ca->allocator_blocked = false;
1050 up_read(&c->gc_lock);
1052 pr_debug("free_inc now %zu/%zu",
1053 fifo_used(&ca->free_inc),
1056 sort_free_inc(c, ca);
1059 * free_inc is now full of newly-invalidated buckets: next,
1060 * write out the new bucket gens:
1065 pr_debug("alloc thread stopping (ret %i)", ret);
1072 * Open buckets represent a bucket that's currently being allocated from. They
1073 * serve two purposes:
1075 * - They track buckets that have been partially allocated, allowing for
1076 * sub-bucket sized allocations - they're used by the sector allocator below
1078 * - They provide a reference to the buckets they own that mark and sweep GC
1079 * can find, until the new allocation has a pointer to it inserted into the
1082 * When allocating some space with the sector allocator, the allocation comes
1083 * with a reference to an open bucket - the caller is required to put that
1084 * reference _after_ doing the index update that makes its allocation reachable.
1087 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
1089 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1091 percpu_down_read_preempt_disable(&c->usage_lock);
1092 spin_lock(&ob->lock);
1094 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
1095 false, gc_pos_alloc(c, ob), 0);
1098 spin_unlock(&ob->lock);
1099 percpu_up_read_preempt_enable(&c->usage_lock);
1101 spin_lock(&c->freelist_lock);
1102 ob->freelist = c->open_buckets_freelist;
1103 c->open_buckets_freelist = ob - c->open_buckets;
1104 c->open_buckets_nr_free++;
1105 spin_unlock(&c->freelist_lock);
1107 closure_wake_up(&c->open_buckets_wait);
1110 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
1112 struct open_bucket *ob;
1114 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
1116 ob = c->open_buckets + c->open_buckets_freelist;
1117 c->open_buckets_freelist = ob->freelist;
1118 atomic_set(&ob->pin, 1);
1120 c->open_buckets_nr_free--;
1124 /* _only_ for allocating the journal on a new device: */
1125 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
1127 struct bucket_array *buckets;
1131 buckets = bucket_array(ca);
1133 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
1134 if (is_available_bucket(buckets->b[b].mark))
1142 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
1148 return BTREE_NODE_RESERVE / 2;
1150 return BTREE_NODE_RESERVE;
1155 * bch_bucket_alloc - allocate a single bucket from a specific device
1157 * Returns index of bucket on success, 0 on failure
1159 int bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
1160 enum alloc_reserve reserve,
1161 bool may_alloc_partial,
1164 struct bucket_array *buckets;
1165 struct open_bucket *ob;
1168 spin_lock(&c->freelist_lock);
1170 if (may_alloc_partial &&
1171 ca->open_buckets_partial_nr) {
1172 int ret = ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1173 c->open_buckets[ret].on_partial_list = false;
1174 spin_unlock(&c->freelist_lock);
1178 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
1180 closure_wait(&c->open_buckets_wait, cl);
1181 spin_unlock(&c->freelist_lock);
1182 trace_open_bucket_alloc_fail(ca, reserve);
1183 return OPEN_BUCKETS_EMPTY;
1186 if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
1191 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1195 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
1196 ca->free[RESERVE_BTREE].size &&
1197 fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1200 case RESERVE_MOVINGGC:
1201 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
1209 closure_wait(&c->freelist_wait, cl);
1211 spin_unlock(&c->freelist_lock);
1213 trace_bucket_alloc_fail(ca, reserve);
1214 return FREELIST_EMPTY;
1216 verify_not_on_freelist(c, ca, bucket);
1218 ob = bch2_open_bucket_alloc(c);
1220 spin_lock(&ob->lock);
1221 buckets = bucket_array(ca);
1224 ob->sectors_free = ca->mi.bucket_size;
1225 ob->ptr = (struct bch_extent_ptr) {
1226 .gen = buckets->b[bucket].mark.gen,
1227 .offset = bucket_to_sector(ca, bucket),
1231 bucket_io_clock_reset(c, ca, bucket, READ);
1232 bucket_io_clock_reset(c, ca, bucket, WRITE);
1233 spin_unlock(&ob->lock);
1235 spin_unlock(&c->freelist_lock);
1237 bch2_wake_allocator(ca);
1239 trace_bucket_alloc(ca, reserve);
1240 return ob - c->open_buckets;
1243 static int __dev_alloc_cmp(struct write_point *wp,
1244 unsigned l, unsigned r)
1246 return ((wp->next_alloc[l] > wp->next_alloc[r]) -
1247 (wp->next_alloc[l] < wp->next_alloc[r]));
1250 #define dev_alloc_cmp(l, r) __dev_alloc_cmp(wp, l, r)
1252 struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
1253 struct write_point *wp,
1254 struct bch_devs_mask *devs)
1256 struct dev_alloc_list ret = { .nr = 0 };
1260 for_each_member_device_rcu(ca, c, i, devs)
1261 ret.devs[ret.nr++] = i;
1263 bubble_sort(ret.devs, ret.nr, dev_alloc_cmp);
1267 void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca,
1268 struct write_point *wp)
1270 u64 *v = wp->next_alloc + ca->dev_idx;
1271 u64 free_space = dev_buckets_free(c, ca);
1272 u64 free_space_inv = free_space
1273 ? div64_u64(1ULL << 48, free_space)
1277 if (*v + free_space_inv >= *v)
1278 *v += free_space_inv;
1282 for (v = wp->next_alloc;
1283 v < wp->next_alloc + ARRAY_SIZE(wp->next_alloc); v++)
1284 *v = *v < scale ? 0 : *v - scale;
1287 static enum bucket_alloc_ret bch2_bucket_alloc_set(struct bch_fs *c,
1288 struct write_point *wp,
1289 unsigned nr_replicas,
1290 enum alloc_reserve reserve,
1291 struct bch_devs_mask *devs,
1294 enum bucket_alloc_ret ret = NO_DEVICES;
1295 struct dev_alloc_list devs_sorted;
1297 unsigned i, nr_ptrs_effective = 0;
1298 bool have_cache_dev = false;
1300 BUG_ON(nr_replicas > ARRAY_SIZE(wp->ptrs));
1302 for (i = wp->first_ptr; i < wp->nr_ptrs; i++) {
1303 ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1305 nr_ptrs_effective += ca->mi.durability;
1306 have_cache_dev |= !ca->mi.durability;
1309 if (nr_ptrs_effective >= nr_replicas)
1310 return ALLOC_SUCCESS;
1312 devs_sorted = bch2_wp_alloc_list(c, wp, devs);
1314 for (i = 0; i < devs_sorted.nr; i++) {
1317 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1321 if (!ca->mi.durability &&
1323 wp->type != BCH_DATA_USER))
1326 ob = bch2_bucket_alloc(c, ca, reserve,
1327 wp->type == BCH_DATA_USER, cl);
1330 if (ret == OPEN_BUCKETS_EMPTY)
1335 BUG_ON(ob <= 0 || ob > U8_MAX);
1336 BUG_ON(wp->nr_ptrs >= ARRAY_SIZE(wp->ptrs));
1338 wp->ptrs[wp->nr_ptrs++] = c->open_buckets + ob;
1340 bch2_wp_rescale(c, ca, wp);
1342 nr_ptrs_effective += ca->mi.durability;
1343 have_cache_dev |= !ca->mi.durability;
1345 __clear_bit(ca->dev_idx, devs->d);
1347 if (nr_ptrs_effective >= nr_replicas) {
1348 ret = ALLOC_SUCCESS;
1353 EBUG_ON(reserve == RESERVE_MOVINGGC &&
1354 ret != ALLOC_SUCCESS &&
1355 ret != OPEN_BUCKETS_EMPTY);
1362 case FREELIST_EMPTY:
1363 case OPEN_BUCKETS_EMPTY:
1364 return cl ? -EAGAIN : -ENOSPC;
1370 /* Sector allocator */
1372 static void writepoint_drop_ptr(struct bch_fs *c,
1373 struct write_point *wp,
1376 struct open_bucket *ob = wp->ptrs[i];
1377 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1379 BUG_ON(ca->open_buckets_partial_nr >=
1380 ARRAY_SIZE(ca->open_buckets_partial));
1382 if (wp->type == BCH_DATA_USER) {
1383 spin_lock(&c->freelist_lock);
1384 ob->on_partial_list = true;
1385 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
1386 ob - c->open_buckets;
1387 spin_unlock(&c->freelist_lock);
1389 closure_wake_up(&c->open_buckets_wait);
1390 closure_wake_up(&c->freelist_wait);
1392 bch2_open_bucket_put(c, ob);
1395 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1397 if (i < wp->first_ptr)
1401 static void writepoint_drop_ptrs(struct bch_fs *c,
1402 struct write_point *wp,
1403 u16 target, bool in_target)
1407 for (i = wp->first_ptr - 1; i >= 0; --i)
1408 if (bch2_dev_in_target(c, wp->ptrs[i]->ptr.dev,
1409 target) == in_target)
1410 writepoint_drop_ptr(c, wp, i);
1413 static void verify_not_stale(struct bch_fs *c, const struct write_point *wp)
1415 #ifdef CONFIG_BCACHEFS_DEBUG
1416 struct open_bucket *ob;
1419 writepoint_for_each_ptr_all(wp, ob, i) {
1420 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1422 BUG_ON(ptr_stale(ca, &ob->ptr));
1427 static int open_bucket_add_buckets(struct bch_fs *c,
1429 struct write_point *wp,
1430 struct bch_devs_list *devs_have,
1431 unsigned nr_replicas,
1432 enum alloc_reserve reserve,
1435 struct bch_devs_mask devs = c->rw_devs[wp->type];
1436 const struct bch_devs_mask *t;
1437 struct open_bucket *ob;
1441 percpu_down_read_preempt_disable(&c->usage_lock);
1444 /* Don't allocate from devices we already have pointers to: */
1445 for (i = 0; i < devs_have->nr; i++)
1446 __clear_bit(devs_have->devs[i], devs.d);
1448 writepoint_for_each_ptr_all(wp, ob, i)
1449 __clear_bit(ob->ptr.dev, devs.d);
1451 t = bch2_target_to_mask(c, target);
1453 bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX);
1455 ret = bch2_bucket_alloc_set(c, wp, nr_replicas, reserve, &devs, cl);
1458 percpu_up_read_preempt_enable(&c->usage_lock);
1463 static struct write_point *__writepoint_find(struct hlist_head *head,
1464 unsigned long write_point)
1466 struct write_point *wp;
1468 hlist_for_each_entry_rcu(wp, head, node)
1469 if (wp->write_point == write_point)
1475 static struct hlist_head *writepoint_hash(struct bch_fs *c,
1476 unsigned long write_point)
1479 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1481 return &c->write_points_hash[hash];
1484 static struct write_point *writepoint_find(struct bch_fs *c,
1485 unsigned long write_point)
1487 struct write_point *wp, *oldest;
1488 struct hlist_head *head;
1490 if (!(write_point & 1UL)) {
1491 wp = (struct write_point *) write_point;
1492 mutex_lock(&wp->lock);
1496 head = writepoint_hash(c, write_point);
1498 wp = __writepoint_find(head, write_point);
1501 mutex_lock(&wp->lock);
1502 if (wp->write_point == write_point)
1504 mutex_unlock(&wp->lock);
1509 for (wp = c->write_points;
1510 wp < c->write_points + ARRAY_SIZE(c->write_points);
1512 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1515 mutex_lock(&oldest->lock);
1516 mutex_lock(&c->write_points_hash_lock);
1517 wp = __writepoint_find(head, write_point);
1518 if (wp && wp != oldest) {
1519 mutex_unlock(&c->write_points_hash_lock);
1520 mutex_unlock(&oldest->lock);
1525 hlist_del_rcu(&wp->node);
1526 wp->write_point = write_point;
1527 hlist_add_head_rcu(&wp->node, head);
1528 mutex_unlock(&c->write_points_hash_lock);
1530 wp->last_used = sched_clock();
1535 * Get us an open_bucket we can allocate from, return with it locked:
1537 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1539 struct write_point_specifier write_point,
1540 struct bch_devs_list *devs_have,
1541 unsigned nr_replicas,
1542 unsigned nr_replicas_required,
1543 enum alloc_reserve reserve,
1547 struct write_point *wp;
1548 struct open_bucket *ob;
1550 unsigned nr_ptrs_have, nr_ptrs_effective;
1551 int ret, i, cache_idx = -1;
1553 BUG_ON(!nr_replicas || !nr_replicas_required);
1555 wp = writepoint_find(c, write_point.v);
1559 /* does writepoint have ptrs we can't use? */
1560 writepoint_for_each_ptr(wp, ob, i)
1561 if (bch2_dev_list_has_dev(*devs_have, ob->ptr.dev)) {
1562 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1566 nr_ptrs_have = wp->first_ptr;
1568 /* does writepoint have ptrs we don't want to use? */
1570 writepoint_for_each_ptr(wp, ob, i)
1571 if (!bch2_dev_in_target(c, ob->ptr.dev, target)) {
1572 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1576 if (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS) {
1577 ret = open_bucket_add_buckets(c, target, wp, devs_have,
1578 nr_replicas, reserve, cl);
1580 ret = open_bucket_add_buckets(c, target, wp, devs_have,
1581 nr_replicas, reserve, NULL);
1585 wp->first_ptr = nr_ptrs_have;
1587 ret = open_bucket_add_buckets(c, 0, wp, devs_have,
1588 nr_replicas, reserve, cl);
1591 if (ret && ret != -EROFS)
1594 /* check for more than one cache: */
1595 for (i = wp->nr_ptrs - 1; i >= wp->first_ptr; --i) {
1596 ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1598 if (ca->mi.durability)
1602 * if we ended up with more than one cache device, prefer the
1603 * one in the target we want:
1605 if (cache_idx >= 0) {
1606 if (!bch2_dev_in_target(c, wp->ptrs[i]->ptr.dev,
1608 writepoint_drop_ptr(c, wp, i);
1610 writepoint_drop_ptr(c, wp, cache_idx);
1618 /* we might have more effective replicas than required: */
1619 nr_ptrs_effective = 0;
1620 writepoint_for_each_ptr(wp, ob, i) {
1621 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1622 nr_ptrs_effective += ca->mi.durability;
1625 if (ret == -EROFS &&
1626 nr_ptrs_effective >= nr_replicas_required)
1632 if (nr_ptrs_effective > nr_replicas) {
1633 writepoint_for_each_ptr(wp, ob, i) {
1634 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1636 if (ca->mi.durability &&
1637 ca->mi.durability <= nr_ptrs_effective - nr_replicas &&
1638 !bch2_dev_in_target(c, ob->ptr.dev, target)) {
1639 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1641 nr_ptrs_effective -= ca->mi.durability;
1646 if (nr_ptrs_effective > nr_replicas) {
1647 writepoint_for_each_ptr(wp, ob, i) {
1648 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1650 if (ca->mi.durability &&
1651 ca->mi.durability <= nr_ptrs_effective - nr_replicas) {
1652 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1654 nr_ptrs_effective -= ca->mi.durability;
1659 /* Remove pointers we don't want to use: */
1661 writepoint_drop_ptrs(c, wp, target, false);
1663 BUG_ON(wp->first_ptr >= wp->nr_ptrs);
1664 BUG_ON(nr_ptrs_effective < nr_replicas_required);
1666 wp->sectors_free = UINT_MAX;
1668 writepoint_for_each_ptr(wp, ob, i)
1669 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1671 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1673 verify_not_stale(c, wp);
1677 mutex_unlock(&wp->lock);
1678 return ERR_PTR(ret);
1682 * Append pointers to the space we just allocated to @k, and mark @sectors space
1683 * as allocated out of @ob
1685 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1686 struct bkey_i_extent *e, unsigned sectors)
1688 struct open_bucket *ob;
1691 BUG_ON(sectors > wp->sectors_free);
1692 wp->sectors_free -= sectors;
1694 writepoint_for_each_ptr(wp, ob, i) {
1695 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1696 struct bch_extent_ptr tmp = ob->ptr;
1698 EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
1700 tmp.cached = bkey_extent_is_cached(&e->k) ||
1701 (!ca->mi.durability && wp->type == BCH_DATA_USER);
1703 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
1704 extent_ptr_append(e, tmp);
1706 BUG_ON(sectors > ob->sectors_free);
1707 ob->sectors_free -= sectors;
1712 * Append pointers to the space we just allocated to @k, and mark @sectors space
1713 * as allocated out of @ob
1715 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1719 for (i = wp->nr_ptrs - 1; i >= 0; --i) {
1720 struct open_bucket *ob = wp->ptrs[i];
1722 if (!ob->sectors_free) {
1723 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1724 bch2_open_bucket_put(c, ob);
1728 mutex_unlock(&wp->lock);
1731 /* Startup/shutdown (ro/rw): */
1733 void bch2_recalc_capacity(struct bch_fs *c)
1736 u64 total_capacity, capacity = 0, reserved_sectors = 0;
1737 unsigned long ra_pages = 0;
1740 lockdep_assert_held(&c->state_lock);
1742 for_each_online_member(ca, c, i) {
1743 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1745 ra_pages += bdi->ra_pages;
1748 bch2_set_ra_pages(c, ra_pages);
1750 for_each_rw_member(ca, c, i) {
1754 * We need to reserve buckets (from the number
1755 * of currently available buckets) against
1756 * foreground writes so that mainly copygc can
1757 * make forward progress.
1759 * We need enough to refill the various reserves
1760 * from scratch - copygc will use its entire
1761 * reserve all at once, then run against when
1762 * its reserve is refilled (from the formerly
1763 * available buckets).
1765 * This reserve is just used when considering if
1766 * allocations for foreground writes must wait -
1767 * not -ENOSPC calculations.
1769 for (j = 0; j < RESERVE_NONE; j++)
1770 reserve += ca->free[j].size;
1772 reserve += ca->free_inc.size;
1774 reserve += ARRAY_SIZE(c->write_points);
1776 reserve += 1; /* btree write point */
1778 reserved_sectors += bucket_to_sector(ca, reserve);
1780 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1781 ca->mi.first_bucket);
1784 total_capacity = capacity;
1786 capacity *= (100 - c->opts.gc_reserve_percent);
1787 capacity = div64_u64(capacity, 100);
1789 BUG_ON(reserved_sectors > total_capacity);
1791 capacity = min(capacity, total_capacity - reserved_sectors);
1793 c->capacity = capacity;
1796 bch2_io_timer_add(&c->io_clock[READ],
1797 &c->bucket_clock[READ].rescale);
1798 bch2_io_timer_add(&c->io_clock[WRITE],
1799 &c->bucket_clock[WRITE].rescale);
1801 bch2_io_timer_del(&c->io_clock[READ],
1802 &c->bucket_clock[READ].rescale);
1803 bch2_io_timer_del(&c->io_clock[WRITE],
1804 &c->bucket_clock[WRITE].rescale);
1807 /* Wake up case someone was waiting for buckets */
1808 closure_wake_up(&c->freelist_wait);
1811 static void bch2_stop_write_point(struct bch_fs *c, struct bch_dev *ca,
1812 struct write_point *wp)
1814 struct bch_devs_mask not_self;
1816 bitmap_complement(not_self.d, ca->self.d, BCH_SB_MEMBERS_MAX);
1818 mutex_lock(&wp->lock);
1819 wp->first_ptr = wp->nr_ptrs;
1820 writepoint_drop_ptrs(c, wp, dev_to_target(ca->dev_idx), true);
1821 mutex_unlock(&wp->lock);
1824 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1826 struct open_bucket *ob;
1829 for (ob = c->open_buckets;
1830 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1832 spin_lock(&ob->lock);
1833 if (ob->valid && !ob->on_partial_list &&
1834 ob->ptr.dev == ca->dev_idx)
1836 spin_unlock(&ob->lock);
1842 /* device goes ro: */
1843 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1847 BUG_ON(ca->alloc_thread);
1849 /* First, remove device from allocation groups: */
1851 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1852 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1855 * Capacity is calculated based off of devices in allocation groups:
1857 bch2_recalc_capacity(c);
1859 /* Next, close write points that point to this device... */
1860 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1861 bch2_stop_write_point(c, ca, &c->write_points[i]);
1863 bch2_stop_write_point(c, ca, &ca->copygc_write_point);
1864 bch2_stop_write_point(c, ca, &c->rebalance_write_point);
1865 bch2_stop_write_point(c, ca, &c->btree_write_point);
1867 mutex_lock(&c->btree_reserve_cache_lock);
1868 while (c->btree_reserve_cache_nr) {
1869 struct btree_alloc *a =
1870 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1872 bch2_open_bucket_put_refs(c, &a->ob.nr, a->ob.refs);
1874 mutex_unlock(&c->btree_reserve_cache_lock);
1877 * Wake up threads that were blocked on allocation, so they can notice
1878 * the device can no longer be removed and the capacity has changed:
1880 closure_wake_up(&c->freelist_wait);
1883 * journal_res_get() can block waiting for free space in the journal -
1884 * it needs to notice there may not be devices to allocate from anymore:
1886 wake_up(&c->journal.wait);
1888 /* Now wait for any in flight writes: */
1890 closure_wait_event(&c->open_buckets_wait,
1891 !bch2_dev_has_open_write_point(c, ca));
1894 /* device goes rw: */
1895 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1899 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1900 if (ca->mi.data_allowed & (1 << i))
1901 set_bit(ca->dev_idx, c->rw_devs[i].d);
1904 /* stop allocator thread: */
1905 void bch2_dev_allocator_stop(struct bch_dev *ca)
1907 struct task_struct *p;
1909 p = rcu_dereference_protected(ca->alloc_thread, 1);
1910 ca->alloc_thread = NULL;
1913 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1914 * the thread shutting down to avoid bch2_wake_allocator() racing:
1916 * XXX: it would be better to have the rcu barrier be asynchronous
1917 * instead of blocking us here
1927 /* start allocator thread: */
1928 int bch2_dev_allocator_start(struct bch_dev *ca)
1930 struct task_struct *p;
1933 * allocator thread already started?
1935 if (ca->alloc_thread)
1938 p = kthread_create(bch2_allocator_thread, ca,
1939 "bch_alloc[%s]", ca->name);
1944 rcu_assign_pointer(ca->alloc_thread, p);
1949 static void allocator_start_issue_discards(struct bch_fs *c)
1955 for_each_rw_member(ca, c, dev_iter) {
1958 fifo_for_each_entry(bu, &ca->free_inc, i) {
1959 if (done == ca->nr_invalidated)
1962 blkdev_issue_discard(ca->disk_sb.bdev,
1963 bucket_to_sector(ca, bu),
1964 ca->mi.bucket_size, GFP_NOIO, 0);
1970 static int __bch2_fs_allocator_start(struct bch_fs *c)
1975 u64 journal_seq = 0;
1976 bool invalidating_data = false;
1979 if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
1982 /* Scan for buckets that are already invalidated: */
1983 for_each_rw_member(ca, c, dev_iter) {
1984 struct btree_iter iter;
1985 struct bucket_mark m;
1988 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) {
1989 if (k.k->type != BCH_ALLOC)
1993 m = READ_ONCE(bucket(ca, bu)->mark);
1995 if (!is_available_bucket(m) || m.cached_sectors)
1998 percpu_down_read_preempt_disable(&c->usage_lock);
1999 bch2_mark_alloc_bucket(c, ca, bu, true,
2000 gc_pos_alloc(c, NULL),
2001 BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
2002 BCH_BUCKET_MARK_GC_LOCK_HELD);
2003 percpu_up_read_preempt_enable(&c->usage_lock);
2005 fifo_push(&ca->free_inc, bu);
2006 ca->nr_invalidated++;
2008 if (fifo_full(&ca->free_inc))
2011 bch2_btree_iter_unlock(&iter);
2014 /* did we find enough buckets? */
2015 for_each_rw_member(ca, c, dev_iter)
2016 if (fifo_used(&ca->free_inc) < ca->free[RESERVE_BTREE].size) {
2017 percpu_ref_put(&ca->io_ref);
2023 pr_debug("did not find enough empty buckets; issuing discards");
2025 /* clear out free_inc - find_reclaimable_buckets() assumes it's empty */
2026 for_each_rw_member(ca, c, dev_iter)
2027 discard_invalidated_buckets(c, ca);
2029 pr_debug("scanning for reclaimable buckets");
2031 for_each_rw_member(ca, c, dev_iter) {
2032 BUG_ON(!fifo_empty(&ca->free_inc));
2033 ca->free_inc.front = ca->free_inc.back = 0;
2035 find_reclaimable_buckets(c, ca);
2036 sort_free_inc(c, ca);
2038 invalidating_data |= ca->allocator_invalidating_data;
2040 fifo_for_each_entry(bu, &ca->free_inc, i)
2041 if (!fifo_push(&ca->free[RESERVE_BTREE], bu))
2045 pr_debug("done scanning for reclaimable buckets");
2048 * We're moving buckets to freelists _before_ they've been marked as
2049 * invalidated on disk - we have to so that we can allocate new btree
2050 * nodes to mark them as invalidated on disk.
2052 * However, we can't _write_ to any of these buckets yet - they might
2053 * have cached data in them, which is live until they're marked as
2054 * invalidated on disk:
2056 if (invalidating_data) {
2057 pr_debug("invalidating existing data");
2058 set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
2060 pr_debug("issuing discards");
2061 allocator_start_issue_discards(c);
2065 * XXX: it's possible for this to deadlock waiting on journal reclaim,
2066 * since we're holding btree writes. What then?
2069 for_each_rw_member(ca, c, dev_iter) {
2070 ret = bch2_invalidate_free_inc(c, ca, &journal_seq,
2071 ca->free[RESERVE_BTREE].size,
2074 percpu_ref_put(&ca->io_ref);
2079 if (invalidating_data) {
2080 pr_debug("flushing journal");
2082 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
2086 pr_debug("issuing discards");
2087 allocator_start_issue_discards(c);
2090 for_each_rw_member(ca, c, dev_iter)
2091 while (ca->nr_invalidated) {
2092 BUG_ON(!fifo_pop(&ca->free_inc, bu));
2093 ca->nr_invalidated--;
2096 set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
2098 /* now flush dirty btree nodes: */
2099 if (invalidating_data) {
2100 struct bucket_table *tbl;
2101 struct rhash_head *pos;
2104 size_t nr_pending_updates;
2106 clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
2108 pr_debug("flushing dirty btree nodes");
2111 flush_updates = false;
2112 nr_pending_updates = bch2_btree_interior_updates_nr_pending(c);
2116 for_each_cached_btree(b, c, tbl, i, pos)
2117 if (btree_node_dirty(b) && (!b->written || b->level)) {
2118 if (btree_node_may_write(b)) {
2120 btree_node_lock_type(c, b, SIX_LOCK_read);
2121 bch2_btree_node_write(c, b, SIX_LOCK_read);
2122 six_unlock_read(&b->lock);
2125 flush_updates = true;
2131 * This is ugly, but it's needed to flush btree node writes
2132 * without spinning...
2134 if (flush_updates) {
2135 closure_wait_event(&c->btree_interior_update_wait,
2136 bch2_btree_interior_updates_nr_pending(c) <
2137 nr_pending_updates);
2145 int bch2_fs_allocator_start(struct bch_fs *c)
2151 down_read(&c->gc_lock);
2152 ret = __bch2_fs_allocator_start(c);
2153 up_read(&c->gc_lock);
2158 for_each_rw_member(ca, c, i) {
2159 ret = bch2_dev_allocator_start(ca);
2161 percpu_ref_put(&ca->io_ref);
2166 return bch2_alloc_write(c);
2169 void bch2_fs_allocator_init(struct bch_fs *c)
2171 struct open_bucket *ob;
2172 struct write_point *wp;
2174 mutex_init(&c->write_points_hash_lock);
2175 spin_lock_init(&c->freelist_lock);
2176 bch2_bucket_clock_init(c, READ);
2177 bch2_bucket_clock_init(c, WRITE);
2179 /* open bucket 0 is a sentinal NULL: */
2180 spin_lock_init(&c->open_buckets[0].lock);
2182 for (ob = c->open_buckets + 1;
2183 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
2184 spin_lock_init(&ob->lock);
2185 c->open_buckets_nr_free++;
2187 ob->freelist = c->open_buckets_freelist;
2188 c->open_buckets_freelist = ob - c->open_buckets;
2191 writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
2192 writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
2194 for (wp = c->write_points;
2195 wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) {
2196 writepoint_init(wp, BCH_DATA_USER);
2198 wp->last_used = sched_clock();
2199 wp->write_point = (unsigned long) wp;
2200 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
2203 c->pd_controllers_update_seconds = 5;
2204 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);