2 * Primary bucket allocation code
4 * Copyright 2012 Google, Inc.
6 * Allocation in bcache is done in terms of buckets:
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
28 * free_inc isn't the only freelist - if it was, we'd often have to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
35 * It's important to ensure that gens don't wrap around - with respect to
36 * either the oldest gen in the btree or the gen on disk. This is quite
37 * difficult to do in practice, but we explicitly guard against it anyways - if
38 * a bucket is in danger of wrapping around we simply skip invalidating it that
39 * time around, and we garbage collect or rewrite the priorities sooner than we
40 * would have otherwise.
42 * bch2_bucket_alloc() allocates a single bucket from a specific device.
44 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
45 * in a given filesystem.
47 * invalidate_buckets() drives all the processes described above. It's called
48 * from bch2_bucket_alloc() and a few other places that need to make sure free
51 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
52 * invalidated, and then invalidate them and stick them on the free_inc list -
53 * in either lru or fifo order.
58 #include "btree_update.h"
70 #include <linux/blkdev.h>
71 #include <linux/kthread.h>
72 #include <linux/math64.h>
73 #include <linux/random.h>
74 #include <linux/rculist.h>
75 #include <linux/rcupdate.h>
76 #include <linux/sched/task.h>
77 #include <linux/sort.h>
78 #include <trace/events/bcachefs.h>
80 static void bch2_recalc_min_prio(struct bch_fs *, struct bch_dev *, int);
82 /* Ratelimiting/PD controllers */
84 static void pd_controllers_update(struct work_struct *work)
86 struct bch_fs *c = container_of(to_delayed_work(work),
88 pd_controllers_update);
92 /* All units are in bytes */
93 u64 faster_tiers_size = 0;
94 u64 faster_tiers_dirty = 0;
96 u64 copygc_can_free = 0;
99 for (i = 0; i < ARRAY_SIZE(c->tiers); i++) {
100 bch2_pd_controller_update(&c->tiers[i].pd,
101 div_u64(faster_tiers_size *
102 c->tiering_percent, 100),
106 for_each_member_device_rcu(ca, c, iter, &c->tiers[i].devs) {
107 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
109 u64 size = bucket_to_sector(ca, ca->mi.nbuckets -
110 ca->mi.first_bucket) << 9;
111 u64 dirty = bucket_to_sector(ca,
112 stats.buckets[BCH_DATA_USER]) << 9;
113 u64 free = bucket_to_sector(ca,
114 __dev_buckets_free(ca, stats)) << 9;
116 * Bytes of internal fragmentation, which can be
117 * reclaimed by copy GC
119 s64 fragmented = (bucket_to_sector(ca,
120 stats.buckets[BCH_DATA_USER] +
121 stats.buckets[BCH_DATA_CACHED]) -
122 (stats.sectors[BCH_DATA_USER] +
123 stats.sectors[BCH_DATA_CACHED])) << 9;
125 fragmented = max(0LL, fragmented);
127 bch2_pd_controller_update(&ca->copygc_pd,
128 free, fragmented, -1);
130 faster_tiers_size += size;
131 faster_tiers_dirty += dirty;
133 copygc_can_free += fragmented;
140 * Throttle foreground writes if tier 0 is running out of free buckets,
141 * and either tiering or copygc can free up space.
143 * Target will be small if there isn't any work to do - we don't want to
144 * throttle foreground writes if we currently have all the free space
145 * we're ever going to have.
147 * Otherwise, if there's work to do, try to keep 20% of tier0 available
148 * for foreground writes.
151 copygc_can_free = U64_MAX;
153 schedule_delayed_work(&c->pd_controllers_update,
154 c->pd_controllers_update_seconds * HZ);
157 /* Persistent alloc info: */
159 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
161 unsigned bytes = offsetof(struct bch_alloc, data);
163 if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
165 if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
168 return DIV_ROUND_UP(bytes, sizeof(u64));
171 static const char *bch2_alloc_invalid(const struct bch_fs *c,
174 if (k.k->p.inode >= c->sb.nr_devices ||
175 !c->devs[k.k->p.inode])
176 return "invalid device";
180 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
182 if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k))
183 return "incorrect value size";
187 return "invalid type";
193 static void bch2_alloc_to_text(struct bch_fs *c, char *buf,
194 size_t size, struct bkey_s_c k)
204 const struct bkey_ops bch2_bkey_alloc_ops = {
205 .key_invalid = bch2_alloc_invalid,
206 .val_to_text = bch2_alloc_to_text,
209 static inline unsigned get_alloc_field(const u8 **p, unsigned bytes)
218 v = le16_to_cpup((void *) *p);
221 v = le32_to_cpup((void *) *p);
231 static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v)
238 *((__le16 *) *p) = cpu_to_le16(v);
241 *((__le32 *) *p) = cpu_to_le32(v);
250 static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
253 struct bkey_s_c_alloc a;
254 struct bucket_mark new;
258 if (k.k->type != BCH_ALLOC)
261 a = bkey_s_c_to_alloc(k);
262 ca = bch_dev_bkey_exists(c, a.k->p.inode);
264 if (a.k->p.offset >= ca->mi.nbuckets)
267 lg_local_lock(&c->usage_lock);
269 g = bucket(ca, a.k->p.offset);
270 bucket_cmpxchg(g, new, ({
276 if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
277 g->prio[READ] = get_alloc_field(&d, 2);
278 if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
279 g->prio[WRITE] = get_alloc_field(&d, 2);
281 lg_local_unlock(&c->usage_lock);
284 int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
286 struct journal_replay *r;
287 struct btree_iter iter;
293 if (!c->btree_roots[BTREE_ID_ALLOC].b)
296 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
297 bch2_alloc_read_key(c, k);
298 bch2_btree_iter_cond_resched(&iter);
301 ret = bch2_btree_iter_unlock(&iter);
305 list_for_each_entry(r, journal_replay_list, list) {
306 struct bkey_i *k, *n;
307 struct jset_entry *entry;
309 for_each_jset_key(k, n, entry, &r->j)
310 if (entry->btree_id == BTREE_ID_ALLOC)
311 bch2_alloc_read_key(c, bkey_i_to_s_c(k));
314 mutex_lock(&c->prio_clock[READ].lock);
315 for_each_member_device(ca, c, i) {
316 down_read(&ca->bucket_lock);
317 bch2_recalc_min_prio(c, ca, READ);
318 up_read(&ca->bucket_lock);
320 mutex_unlock(&c->prio_clock[READ].lock);
322 mutex_lock(&c->prio_clock[WRITE].lock);
323 for_each_member_device(ca, c, i) {
324 down_read(&ca->bucket_lock);
325 bch2_recalc_min_prio(c, ca, WRITE);
326 up_read(&ca->bucket_lock);
328 mutex_unlock(&c->prio_clock[WRITE].lock);
333 static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
334 size_t b, struct btree_iter *iter,
337 struct bucket_mark m;
338 __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key;
340 struct bkey_i_alloc *a;
344 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
347 ret = bch2_btree_iter_traverse(iter);
351 lg_local_lock(&c->usage_lock);
354 /* read mark under btree node lock: */
355 m = READ_ONCE(g->mark);
356 a = bkey_alloc_init(&alloc_key.k);
360 set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v));
363 if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
364 put_alloc_field(&d, 2, g->prio[READ]);
365 if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
366 put_alloc_field(&d, 2, g->prio[WRITE]);
367 lg_local_unlock(&c->usage_lock);
369 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
372 BTREE_INSERT_USE_RESERVE|
373 BTREE_INSERT_USE_ALLOC_RESERVE|
375 BTREE_INSERT_ENTRY(iter, &a->k_i));
376 bch2_btree_iter_cond_resched(iter);
377 } while (ret == -EINTR);
382 int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
385 struct btree_iter iter;
388 if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
391 ca = bch_dev_bkey_exists(c, pos.inode);
393 if (pos.offset >= ca->mi.nbuckets)
396 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
399 ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL);
400 bch2_btree_iter_unlock(&iter);
404 static int bch2_alloc_write(struct bch_fs *c, struct bch_dev *ca, u64 *journal_seq)
406 struct btree_iter iter;
407 unsigned long bucket;
410 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
413 down_read(&ca->bucket_lock);
414 for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
415 ret = __bch2_alloc_write_key(c, ca, bucket, &iter, journal_seq);
419 clear_bit(bucket, ca->buckets_dirty);
421 up_read(&ca->bucket_lock);
423 bch2_btree_iter_unlock(&iter);
427 /* Bucket IO clocks: */
429 static void bch2_recalc_min_prio(struct bch_fs *c, struct bch_dev *ca, int rw)
431 struct prio_clock *clock = &c->prio_clock[rw];
432 struct bucket_array *buckets = bucket_array(ca);
437 lockdep_assert_held(&c->prio_clock[rw].lock);
439 /* Determine min prio for this particular device */
440 for_each_bucket(g, buckets)
441 max_delta = max(max_delta, (u16) (clock->hand - g->prio[rw]));
443 ca->min_prio[rw] = clock->hand - max_delta;
446 * This may possibly increase the min prio for the whole device, check
451 for_each_member_device(ca, c, i)
452 max_delta = max(max_delta,
453 (u16) (clock->hand - ca->min_prio[rw]));
455 clock->min_prio = clock->hand - max_delta;
458 static void bch2_rescale_prios(struct bch_fs *c, int rw)
460 struct prio_clock *clock = &c->prio_clock[rw];
461 struct bucket_array *buckets;
466 trace_rescale_prios(c);
468 for_each_member_device(ca, c, i) {
469 down_read(&ca->bucket_lock);
470 buckets = bucket_array(ca);
472 for_each_bucket(g, buckets)
473 g->prio[rw] = clock->hand -
474 (clock->hand - g->prio[rw]) / 2;
476 bch2_recalc_min_prio(c, ca, rw);
478 up_read(&ca->bucket_lock);
482 static void bch2_inc_clock_hand(struct io_timer *timer)
484 struct prio_clock *clock = container_of(timer,
485 struct prio_clock, rescale);
486 struct bch_fs *c = container_of(clock,
487 struct bch_fs, prio_clock[clock->rw]);
490 mutex_lock(&clock->lock);
494 /* if clock cannot be advanced more, rescale prio */
495 if (clock->hand == (u16) (clock->min_prio - 1))
496 bch2_rescale_prios(c, clock->rw);
498 mutex_unlock(&clock->lock);
500 capacity = READ_ONCE(c->capacity);
506 * we only increment when 0.1% of the filesystem capacity has been read
507 * or written too, this determines if it's time
509 * XXX: we shouldn't really be going off of the capacity of devices in
510 * RW mode (that will be 0 when we're RO, yet we can still service
513 timer->expire += capacity >> 10;
515 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
518 static void bch2_prio_timer_init(struct bch_fs *c, int rw)
520 struct prio_clock *clock = &c->prio_clock[rw];
524 clock->rescale.fn = bch2_inc_clock_hand;
525 clock->rescale.expire = c->capacity >> 10;
526 mutex_init(&clock->lock);
529 /* Background allocator thread: */
532 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
533 * (marking them as invalidated on disk), then optionally issues discard
534 * commands to the newly free buckets, then puts them on the various freelists.
537 static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
540 if (expensive_debug_checks(c)) {
545 for (j = 0; j < RESERVE_NR; j++)
546 fifo_for_each_entry(i, &ca->free[j], iter)
548 fifo_for_each_entry(i, &ca->free_inc, iter)
553 #define BUCKET_GC_GEN_MAX 96U
556 * wait_buckets_available - wait on reclaimable buckets
558 * If there aren't enough available buckets to fill up free_inc, wait until
561 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
563 unsigned long gc_count = c->gc_count;
567 set_current_state(TASK_INTERRUPTIBLE);
568 if (kthread_should_stop()) {
573 if (gc_count != c->gc_count)
574 ca->inc_gen_really_needs_gc = 0;
576 if ((ssize_t) (dev_buckets_available(c, ca) -
577 ca->inc_gen_really_needs_gc) >=
578 (ssize_t) fifo_free(&ca->free_inc))
581 up_read(&c->gc_lock);
584 down_read(&c->gc_lock);
587 __set_current_state(TASK_RUNNING);
591 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
593 struct bucket_mark mark)
597 if (!is_available_bucket(mark))
600 gc_gen = bucket_gc_gen(ca, bucket);
602 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
603 ca->inc_gen_needs_gc++;
605 if (gc_gen >= BUCKET_GC_GEN_MAX)
606 ca->inc_gen_really_needs_gc++;
608 return gc_gen < BUCKET_GC_GEN_MAX;
611 static void bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
614 struct bucket_mark m;
616 spin_lock(&c->freelist_lock);
617 if (!bch2_invalidate_bucket(c, ca, bucket, &m)) {
618 spin_unlock(&c->freelist_lock);
622 verify_not_on_freelist(c, ca, bucket);
623 BUG_ON(!fifo_push(&ca->free_inc, bucket));
624 spin_unlock(&c->freelist_lock);
627 bucket_io_clock_reset(c, ca, bucket, READ);
628 bucket_io_clock_reset(c, ca, bucket, WRITE);
630 if (m.cached_sectors) {
631 ca->allocator_invalidating_data = true;
632 } else if (m.journal_seq_valid) {
633 u64 journal_seq = atomic64_read(&c->journal.seq);
634 u64 bucket_seq = journal_seq;
636 bucket_seq &= ~((u64) U16_MAX);
637 bucket_seq |= m.journal_seq;
639 if (bucket_seq > journal_seq)
640 bucket_seq -= 1 << 16;
642 ca->allocator_journal_seq_flush =
643 max(ca->allocator_journal_seq_flush, bucket_seq);
648 * Determines what order we're going to reuse buckets, smallest bucket_key()
652 * - We take into account the read prio of the bucket, which gives us an
653 * indication of how hot the data is -- we scale the prio so that the prio
654 * farthest from the clock is worth 1/8th of the closest.
656 * - The number of sectors of cached data in the bucket, which gives us an
657 * indication of the cost in cache misses this eviction will cause.
659 * - If hotness * sectors used compares equal, we pick the bucket with the
660 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
661 * number repeatedly forces us to run mark and sweep gc to avoid generation
665 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
666 size_t b, struct bucket_mark m)
669 * Time since last read, scaled to [0, 8) where larger value indicates
670 * more recently read data:
672 unsigned long hotness =
673 (bucket(ca, b)->prio[READ] - ca->min_prio[READ]) * 7 /
674 (c->prio_clock[READ].hand - ca->min_prio[READ]);
676 /* How much we want to keep the data in this bucket: */
677 unsigned long data_wantness =
678 (hotness + 1) * bucket_sectors_used(m);
680 unsigned long needs_journal_commit =
681 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
683 return (data_wantness << 9) |
684 (needs_journal_commit << 8) |
685 bucket_gc_gen(ca, b);
688 static inline int bucket_alloc_cmp(alloc_heap *h,
689 struct alloc_heap_entry l,
690 struct alloc_heap_entry r)
692 return (l.key > r.key) - (l.key < r.key);
695 static void invalidate_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
697 struct bucket_array *buckets;
698 struct alloc_heap_entry e;
701 ca->alloc_heap.used = 0;
703 mutex_lock(&c->prio_clock[READ].lock);
704 down_read(&ca->bucket_lock);
706 buckets = bucket_array(ca);
708 bch2_recalc_min_prio(c, ca, READ);
711 * Find buckets with lowest read priority, by building a maxheap sorted
712 * by read priority and repeatedly replacing the maximum element until
713 * all buckets have been visited.
715 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
716 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
718 if (!bch2_can_invalidate_bucket(ca, b, m))
721 e = (struct alloc_heap_entry) {
723 .key = bucket_sort_key(c, ca, b, m)
726 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
729 up_read(&ca->bucket_lock);
730 mutex_unlock(&c->prio_clock[READ].lock);
732 heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
735 * If we run out of buckets to invalidate, bch2_allocator_thread() will
736 * kick stuff and retry us
738 while (!fifo_full(&ca->free_inc) &&
739 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp))
740 bch2_invalidate_one_bucket(c, ca, e.bucket);
743 static void invalidate_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
745 struct bucket_array *buckets = bucket_array(ca);
746 struct bucket_mark m;
750 checked < ca->mi.nbuckets && !fifo_full(&ca->free_inc);
752 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
753 ca->fifo_last_bucket >= ca->mi.nbuckets)
754 ca->fifo_last_bucket = ca->mi.first_bucket;
756 b = ca->fifo_last_bucket++;
758 m = READ_ONCE(buckets->b[b].mark);
760 if (bch2_can_invalidate_bucket(ca, b, m))
761 bch2_invalidate_one_bucket(c, ca, b);
765 static void invalidate_buckets_random(struct bch_fs *c, struct bch_dev *ca)
767 struct bucket_array *buckets = bucket_array(ca);
768 struct bucket_mark m;
772 checked < ca->mi.nbuckets / 2 && !fifo_full(&ca->free_inc);
774 size_t b = bch2_rand_range(ca->mi.nbuckets -
775 ca->mi.first_bucket) +
778 m = READ_ONCE(buckets->b[b].mark);
780 if (bch2_can_invalidate_bucket(ca, b, m))
781 bch2_invalidate_one_bucket(c, ca, b);
785 static void invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
787 ca->inc_gen_needs_gc = 0;
788 ca->inc_gen_really_needs_gc = 0;
790 switch (ca->mi.replacement) {
791 case CACHE_REPLACEMENT_LRU:
792 invalidate_buckets_lru(c, ca);
794 case CACHE_REPLACEMENT_FIFO:
795 invalidate_buckets_fifo(c, ca);
797 case CACHE_REPLACEMENT_RANDOM:
798 invalidate_buckets_random(c, ca);
803 static int size_t_cmp(const void *_l, const void *_r)
805 const size_t *l = _l, *r = _r;
807 return (*l > *r) - (*l < *r);
810 static int bch2_invalidate_free_inc(struct bch_fs *c, struct bch_dev *ca,
813 struct btree_iter iter;
814 unsigned nr_invalidated = 0;
818 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
821 fifo_for_each_entry(b, &ca->free_inc, i) {
822 ret = __bch2_alloc_write_key(c, ca, b, &iter, journal_seq);
829 bch2_btree_iter_unlock(&iter);
830 return nr_invalidated ?: ret;
834 * Given an invalidated, ready to use bucket: issue a discard to it if enabled,
835 * then add it to the freelist, waiting until there's room if necessary:
837 static void discard_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca,
840 if (ca->mi.discard &&
841 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
842 blkdev_issue_discard(ca->disk_sb.bdev,
843 bucket_to_sector(ca, bucket),
844 ca->mi.bucket_size, GFP_NOIO, 0);
850 set_current_state(TASK_INTERRUPTIBLE);
853 * Don't remove from free_inc until after it's added to
854 * freelist, so gc can find it:
856 spin_lock(&c->freelist_lock);
857 for (i = 0; i < RESERVE_NR; i++)
858 if (fifo_push(&ca->free[i], bucket)) {
859 fifo_pop(&ca->free_inc, bucket);
860 closure_wake_up(&c->freelist_wait);
864 spin_unlock(&c->freelist_lock);
869 if (kthread_should_stop())
876 __set_current_state(TASK_RUNNING);
880 * bch_allocator_thread - move buckets from free_inc to reserves
882 * The free_inc FIFO is populated by invalidate_buckets(), and
883 * the reserves are depleted by bucket allocation. When we run out
884 * of free_inc, try to invalidate some buckets and write out
887 static int bch2_allocator_thread(void *arg)
889 struct bch_dev *ca = arg;
890 struct bch_fs *c = ca->fs;
899 while (ca->nr_invalidated) {
900 BUG_ON(fifo_empty(&ca->free_inc));
902 bucket = fifo_peek(&ca->free_inc);
903 discard_invalidated_bucket(c, ca, bucket);
904 if (kthread_should_stop())
906 --ca->nr_invalidated;
909 if (fifo_empty(&ca->free_inc))
913 ret = bch2_invalidate_free_inc(c, ca, &journal_seq);
917 ca->nr_invalidated = ret;
919 if (ca->nr_invalidated == fifo_used(&ca->free_inc)) {
920 ca->alloc_thread_started = true;
921 bch2_alloc_write(c, ca, &journal_seq);
924 if (ca->allocator_invalidating_data)
925 bch2_journal_flush_seq(&c->journal, journal_seq);
926 else if (ca->allocator_journal_seq_flush)
927 bch2_journal_flush_seq(&c->journal,
928 ca->allocator_journal_seq_flush);
931 /* Reset front/back so we can easily sort fifo entries later: */
932 ca->free_inc.front = ca->free_inc.back = 0;
933 ca->allocator_journal_seq_flush = 0;
934 ca->allocator_invalidating_data = false;
936 down_read(&c->gc_lock);
937 if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) {
938 up_read(&c->gc_lock);
944 * Find some buckets that we can invalidate, either
945 * they're completely unused, or only contain clean data
946 * that's been written back to the backing device or
950 invalidate_buckets(c, ca);
951 trace_alloc_batch(ca, fifo_used(&ca->free_inc),
954 if ((ca->inc_gen_needs_gc >= ca->free_inc.size ||
955 (!fifo_full(&ca->free_inc) &&
956 ca->inc_gen_really_needs_gc >=
957 fifo_free(&ca->free_inc))) &&
959 atomic_inc(&c->kick_gc);
960 wake_up_process(c->gc_thread);
963 if (fifo_full(&ca->free_inc))
966 if (wait_buckets_available(c, ca)) {
967 up_read(&c->gc_lock);
971 up_read(&c->gc_lock);
973 BUG_ON(ca->free_inc.front);
975 spin_lock(&c->freelist_lock);
976 sort(ca->free_inc.data,
978 sizeof(ca->free_inc.data[0]),
980 spin_unlock(&c->freelist_lock);
983 * free_inc is now full of newly-invalidated buckets: next,
984 * write out the new bucket gens:
992 * Open buckets represent a bucket that's currently being allocated from. They
993 * serve two purposes:
995 * - They track buckets that have been partially allocated, allowing for
996 * sub-bucket sized allocations - they're used by the sector allocator below
998 * - They provide a reference to the buckets they own that mark and sweep GC
999 * can find, until the new allocation has a pointer to it inserted into the
1002 * When allocating some space with the sector allocator, the allocation comes
1003 * with a reference to an open bucket - the caller is required to put that
1004 * reference _after_ doing the index update that makes its allocation reachable.
1007 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
1009 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1011 spin_lock(&ob->lock);
1012 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
1013 false, gc_pos_alloc(c, ob), 0);
1015 spin_unlock(&ob->lock);
1017 spin_lock(&c->freelist_lock);
1018 ob->freelist = c->open_buckets_freelist;
1019 c->open_buckets_freelist = ob - c->open_buckets;
1020 c->open_buckets_nr_free++;
1021 spin_unlock(&c->freelist_lock);
1023 closure_wake_up(&c->open_buckets_wait);
1026 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
1028 struct open_bucket *ob;
1030 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
1032 ob = c->open_buckets + c->open_buckets_freelist;
1033 c->open_buckets_freelist = ob->freelist;
1034 atomic_set(&ob->pin, 1);
1036 c->open_buckets_nr_free--;
1041 * XXX: allocation on startup is still sketchy. There is insufficient
1042 * synchronization for bch2_bucket_alloc_startup() to work correctly after
1043 * bch2_alloc_write() has been called, and we aren't currently doing anything
1044 * to guarantee that this won't happen.
1046 * Even aside from that, it's really difficult to avoid situations where on
1047 * startup we write out a pointer to a freshly allocated bucket before the
1048 * corresponding gen - when we're still digging ourself out of the "i need to
1049 * allocate to write bucket gens, but i need to write bucket gens to allocate"
1052 * Fortunately, bch2_btree_mark_key_initial() will detect and repair this
1055 static long bch2_bucket_alloc_startup(struct bch_fs *c, struct bch_dev *ca)
1057 struct bucket_array *buckets;
1060 if (!down_read_trylock(&c->gc_lock))
1063 if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) {
1064 up_read(&c->gc_lock);
1068 spin_unlock(&c->freelist_lock);
1070 down_read(&ca->bucket_lock);
1071 buckets = bucket_array(ca);
1073 spin_lock(&c->freelist_lock);
1075 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
1076 if (is_startup_available_bucket(buckets->b[b].mark) &&
1077 bch2_mark_alloc_bucket_startup(c, ca, b)) {
1078 set_bit(b, ca->buckets_dirty);
1083 up_read(&ca->bucket_lock);
1084 up_read(&c->gc_lock);
1088 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
1094 return BTREE_NODE_RESERVE / 2;
1096 return BTREE_NODE_RESERVE;
1101 * bch_bucket_alloc - allocate a single bucket from a specific device
1103 * Returns index of bucket on success, 0 on failure
1105 int bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
1106 enum alloc_reserve reserve,
1107 bool may_alloc_partial,
1110 struct bucket_array *buckets;
1111 struct open_bucket *ob;
1114 spin_lock(&c->freelist_lock);
1115 if (may_alloc_partial &&
1116 ca->open_buckets_partial_nr) {
1117 int ret = ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1118 c->open_buckets[ret].on_partial_list = false;
1119 spin_unlock(&c->freelist_lock);
1123 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
1125 closure_wait(&c->open_buckets_wait, cl);
1126 spin_unlock(&c->freelist_lock);
1127 trace_open_bucket_alloc_fail(ca, reserve);
1128 return OPEN_BUCKETS_EMPTY;
1131 if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
1136 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1140 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
1141 ca->free[RESERVE_BTREE].size &&
1142 fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1145 case RESERVE_MOVINGGC:
1146 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
1153 if (unlikely(!ca->alloc_thread_started) &&
1154 (reserve == RESERVE_ALLOC) &&
1155 (bucket = bch2_bucket_alloc_startup(c, ca)) >= 0)
1158 spin_unlock(&c->freelist_lock);
1160 trace_bucket_alloc_fail(ca, reserve);
1161 return FREELIST_EMPTY;
1163 verify_not_on_freelist(c, ca, bucket);
1165 ob = bch2_open_bucket_alloc(c);
1167 spin_lock(&ob->lock);
1168 lg_local_lock(&c->usage_lock);
1169 buckets = bucket_array(ca);
1172 ob->sectors_free = ca->mi.bucket_size;
1173 ob->ptr = (struct bch_extent_ptr) {
1174 .gen = buckets->b[bucket].mark.gen,
1175 .offset = bucket_to_sector(ca, bucket),
1179 bucket_io_clock_reset(c, ca, bucket, READ);
1180 bucket_io_clock_reset(c, ca, bucket, WRITE);
1182 lg_local_unlock(&c->usage_lock);
1183 spin_unlock(&ob->lock);
1185 spin_unlock(&c->freelist_lock);
1187 bch2_wake_allocator(ca);
1189 trace_bucket_alloc(ca, reserve);
1190 return ob - c->open_buckets;
1193 struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
1194 struct write_point *wp,
1195 struct bch_devs_mask *devs)
1197 struct dev_alloc_list ret = { .nr = 0 };
1198 struct bch_dev *ca, *ca2;
1201 for_each_member_device_rcu(ca, c, i, devs) {
1202 for (j = 0; j < ret.nr; j++) {
1203 unsigned idx = ret.devs[j];
1205 ca2 = rcu_dereference(c->devs[idx]);
1209 if (ca->mi.tier < ca2->mi.tier)
1212 if (ca->mi.tier == ca2->mi.tier &&
1213 wp->next_alloc[i] < wp->next_alloc[idx])
1217 array_insert_item(ret.devs, ret.nr, j, i);
1223 void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca,
1224 struct write_point *wp)
1228 for (i = 0; i < ARRAY_SIZE(wp->next_alloc); i++)
1229 wp->next_alloc[i] >>= 1;
1232 static enum bucket_alloc_ret __bch2_bucket_alloc_set(struct bch_fs *c,
1233 struct write_point *wp,
1234 unsigned nr_replicas,
1235 enum alloc_reserve reserve,
1236 struct bch_devs_mask *devs,
1239 enum bucket_alloc_ret ret = NO_DEVICES;
1240 struct dev_alloc_list devs_sorted;
1244 BUG_ON(nr_replicas > ARRAY_SIZE(wp->ptrs));
1246 if (wp->nr_ptrs >= nr_replicas)
1247 return ALLOC_SUCCESS;
1250 devs_sorted = bch2_wp_alloc_list(c, wp, devs);
1252 for (i = 0; i < devs_sorted.nr; i++) {
1253 struct bch_dev *ca =
1254 rcu_dereference(c->devs[devs_sorted.devs[i]]);
1260 ob = bch2_bucket_alloc(c, ca, reserve,
1261 wp->type == BCH_DATA_USER, cl);
1264 if (ret == OPEN_BUCKETS_EMPTY)
1269 BUG_ON(ob <= 0 || ob > U8_MAX);
1270 BUG_ON(wp->nr_ptrs >= ARRAY_SIZE(wp->ptrs));
1271 wp->ptrs[wp->nr_ptrs++] = c->open_buckets + ob;
1273 buckets_free = U64_MAX, dev_buckets_free(c, ca);
1275 wp->next_alloc[ca->dev_idx] +=
1276 div64_u64(U64_MAX, buckets_free *
1277 ca->mi.bucket_size);
1279 wp->next_alloc[ca->dev_idx] = U64_MAX;
1280 bch2_wp_rescale(c, ca, wp);
1282 __clear_bit(ca->dev_idx, devs->d);
1284 if (wp->nr_ptrs == nr_replicas) {
1285 ret = ALLOC_SUCCESS;
1290 EBUG_ON(reserve == RESERVE_MOVINGGC &&
1291 ret != ALLOC_SUCCESS &&
1292 ret != OPEN_BUCKETS_EMPTY);
1297 static int bch2_bucket_alloc_set(struct bch_fs *c, struct write_point *wp,
1298 unsigned nr_replicas,
1299 enum alloc_reserve reserve,
1300 struct bch_devs_mask *devs,
1303 bool waiting = false;
1306 switch (__bch2_bucket_alloc_set(c, wp, nr_replicas,
1307 reserve, devs, cl)) {
1310 closure_wake_up(&c->freelist_wait);
1316 closure_wake_up(&c->freelist_wait);
1319 case FREELIST_EMPTY:
1326 /* Retry allocation after adding ourself to waitlist: */
1327 closure_wait(&c->freelist_wait, cl);
1330 case OPEN_BUCKETS_EMPTY:
1331 return cl ? -EAGAIN : -ENOSPC;
1338 /* Sector allocator */
1340 static void writepoint_drop_ptrs(struct bch_fs *c,
1341 struct write_point *wp,
1342 struct bch_devs_mask *devs,
1343 unsigned nr_ptrs_dislike)
1347 if (!nr_ptrs_dislike)
1350 for (i = wp->nr_ptrs - 1; i >= 0; --i) {
1351 struct open_bucket *ob = wp->ptrs[i];
1352 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1354 if (nr_ptrs_dislike && !test_bit(ob->ptr.dev, devs->d)) {
1355 BUG_ON(ca->open_buckets_partial_nr >=
1356 ARRAY_SIZE(ca->open_buckets_partial));
1358 spin_lock(&c->freelist_lock);
1359 ob->on_partial_list = true;
1360 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
1361 ob - c->open_buckets;
1362 spin_unlock(&c->freelist_lock);
1364 closure_wake_up(&c->open_buckets_wait);
1365 closure_wake_up(&c->freelist_wait);
1367 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1373 static void verify_not_stale(struct bch_fs *c, const struct write_point *wp)
1375 #ifdef CONFIG_BCACHEFS_DEBUG
1376 struct open_bucket *ob;
1379 writepoint_for_each_ptr(wp, ob, i) {
1380 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1382 BUG_ON(ptr_stale(ca, &ob->ptr));
1387 static int open_bucket_add_buckets(struct bch_fs *c,
1388 struct bch_devs_mask *_devs,
1389 struct write_point *wp,
1390 struct bch_devs_list *devs_have,
1391 unsigned nr_replicas,
1392 enum alloc_reserve reserve,
1395 struct bch_devs_mask devs = c->rw_devs[wp->type];
1396 struct open_bucket *ob;
1399 if (wp->nr_ptrs >= nr_replicas)
1402 /* Don't allocate from devices we already have pointers to: */
1403 for (i = 0; i < devs_have->nr; i++)
1404 __clear_bit(devs_have->devs[i], devs.d);
1406 writepoint_for_each_ptr(wp, ob, i)
1407 __clear_bit(ob->ptr.dev, devs.d);
1410 bitmap_and(devs.d, devs.d, _devs->d, BCH_SB_MEMBERS_MAX);
1412 return bch2_bucket_alloc_set(c, wp, nr_replicas, reserve, &devs, cl);
1415 static struct write_point *__writepoint_find(struct hlist_head *head,
1416 unsigned long write_point)
1418 struct write_point *wp;
1420 hlist_for_each_entry_rcu(wp, head, node)
1421 if (wp->write_point == write_point)
1427 static struct hlist_head *writepoint_hash(struct bch_fs *c,
1428 unsigned long write_point)
1431 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1433 return &c->write_points_hash[hash];
1436 static struct write_point *writepoint_find(struct bch_fs *c,
1437 unsigned long write_point)
1439 struct write_point *wp, *oldest;
1440 struct hlist_head *head;
1442 if (!(write_point & 1UL)) {
1443 wp = (struct write_point *) write_point;
1444 mutex_lock(&wp->lock);
1448 head = writepoint_hash(c, write_point);
1450 wp = __writepoint_find(head, write_point);
1453 mutex_lock(&wp->lock);
1454 if (wp->write_point == write_point)
1456 mutex_unlock(&wp->lock);
1461 for (wp = c->write_points;
1462 wp < c->write_points + ARRAY_SIZE(c->write_points);
1464 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1467 mutex_lock(&oldest->lock);
1468 mutex_lock(&c->write_points_hash_lock);
1469 wp = __writepoint_find(head, write_point);
1470 if (wp && wp != oldest) {
1471 mutex_unlock(&c->write_points_hash_lock);
1472 mutex_unlock(&oldest->lock);
1477 hlist_del_rcu(&wp->node);
1478 wp->write_point = write_point;
1479 hlist_add_head_rcu(&wp->node, head);
1480 mutex_unlock(&c->write_points_hash_lock);
1482 wp->last_used = sched_clock();
1487 * Get us an open_bucket we can allocate from, return with it locked:
1489 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1490 struct bch_devs_mask *devs,
1491 struct write_point_specifier write_point,
1492 struct bch_devs_list *devs_have,
1493 unsigned nr_replicas,
1494 unsigned nr_replicas_required,
1495 enum alloc_reserve reserve,
1499 struct write_point *wp;
1500 struct open_bucket *ob;
1501 unsigned i, nr_ptrs_dislike = 0, nr_ptrs_have = 0;
1504 BUG_ON(!nr_replicas || !nr_replicas_required);
1506 wp = writepoint_find(c, write_point.v);
1508 /* does ob have ptrs we don't need? */
1509 writepoint_for_each_ptr(wp, ob, i)
1510 if (bch2_dev_list_has_dev(*devs_have, ob->ptr.dev))
1512 else if (devs && !test_bit(ob->ptr.dev, devs->d))
1515 ret = open_bucket_add_buckets(c, devs, wp, devs_have,
1516 nr_replicas + nr_ptrs_have + nr_ptrs_dislike,
1518 if (ret && ret != -EROFS)
1522 nr_ptrs_have + nr_ptrs_dislike + nr_replicas_required) {
1527 if ((int) wp->nr_ptrs - nr_ptrs_dislike < nr_replicas)
1528 nr_ptrs_dislike = clamp_t(int, wp->nr_ptrs - nr_replicas,
1529 0, nr_ptrs_dislike);
1531 /* Remove pointers we don't want to use: */
1532 writepoint_drop_ptrs(c, wp, devs, nr_ptrs_dislike);
1535 * Move pointers to devices we already have to end of open bucket
1536 * pointer list - note that removing pointers we don't want to use might
1537 * have changed nr_ptrs_have:
1540 i = nr_ptrs_have = 0;
1541 while (i < wp->nr_ptrs - nr_ptrs_have)
1542 if (bch2_dev_list_has_dev(*devs_have, wp->ptrs[i]->ptr.dev)) {
1544 swap(wp->ptrs[i], wp->ptrs[wp->nr_ptrs - nr_ptrs_have]);
1550 wp->nr_ptrs_can_use =
1551 min_t(unsigned, nr_replicas, wp->nr_ptrs - nr_ptrs_have);
1553 BUG_ON(wp->nr_ptrs_can_use < nr_replicas_required ||
1554 wp->nr_ptrs_can_use > wp->nr_ptrs);
1556 wp->sectors_free = UINT_MAX;
1558 for (i = 0; i < wp->nr_ptrs_can_use; i++)
1559 wp->sectors_free = min(wp->sectors_free,
1560 wp->ptrs[i]->sectors_free);
1562 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1564 verify_not_stale(c, wp);
1568 mutex_unlock(&wp->lock);
1569 return ERR_PTR(ret);
1573 * Append pointers to the space we just allocated to @k, and mark @sectors space
1574 * as allocated out of @ob
1576 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1577 struct bkey_i_extent *e, unsigned sectors)
1581 BUG_ON(sectors > wp->sectors_free);
1582 wp->sectors_free -= sectors;
1584 for (i = 0; i < wp->nr_ptrs_can_use; i++) {
1585 struct open_bucket *ob = wp->ptrs[i];
1586 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1587 struct bch_extent_ptr tmp = ob->ptr;
1589 EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
1591 tmp.cached = bkey_extent_is_cached(&e->k);
1592 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
1593 extent_ptr_append(e, tmp);
1595 BUG_ON(sectors > ob->sectors_free);
1596 ob->sectors_free -= sectors;
1601 * Append pointers to the space we just allocated to @k, and mark @sectors space
1602 * as allocated out of @ob
1604 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1608 for (i = wp->nr_ptrs - 1; i >= 0; --i) {
1609 struct open_bucket *ob = wp->ptrs[i];
1611 if (!ob->sectors_free) {
1612 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1613 bch2_open_bucket_put(c, ob);
1617 mutex_unlock(&wp->lock);
1620 /* Startup/shutdown (ro/rw): */
1622 void bch2_recalc_capacity(struct bch_fs *c)
1624 struct bch_tier *fastest_tier = NULL, *slowest_tier = NULL, *tier;
1626 u64 total_capacity, capacity = 0, reserved_sectors = 0;
1627 unsigned long ra_pages = 0;
1630 lockdep_assert_held(&c->state_lock);
1632 for_each_online_member(ca, c, i) {
1633 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1635 ra_pages += bdi->ra_pages;
1638 bch2_set_ra_pages(c, ra_pages);
1640 /* Find fastest, slowest tiers with devices: */
1642 for (tier = c->tiers;
1643 tier < c->tiers + ARRAY_SIZE(c->tiers); tier++) {
1644 if (!dev_mask_nr(&tier->devs))
1647 fastest_tier = tier;
1648 slowest_tier = tier;
1651 c->fastest_tier = fastest_tier != slowest_tier ? fastest_tier : NULL;
1652 c->fastest_devs = fastest_tier != slowest_tier ? &fastest_tier->devs : NULL;
1658 * Capacity of the filesystem is the capacity of all the devices in the
1659 * slowest (highest) tier - we don't include lower tier devices.
1661 for_each_member_device_rcu(ca, c, i, &slowest_tier->devs) {
1665 * We need to reserve buckets (from the number
1666 * of currently available buckets) against
1667 * foreground writes so that mainly copygc can
1668 * make forward progress.
1670 * We need enough to refill the various reserves
1671 * from scratch - copygc will use its entire
1672 * reserve all at once, then run against when
1673 * its reserve is refilled (from the formerly
1674 * available buckets).
1676 * This reserve is just used when considering if
1677 * allocations for foreground writes must wait -
1678 * not -ENOSPC calculations.
1680 for (j = 0; j < RESERVE_NONE; j++)
1681 reserve += ca->free[j].size;
1683 reserve += ca->free_inc.size;
1685 reserve += ARRAY_SIZE(c->write_points);
1688 reserve += 1; /* tiering write point */
1689 reserve += 1; /* btree write point */
1691 reserved_sectors += bucket_to_sector(ca, reserve);
1693 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1694 ca->mi.first_bucket);
1697 total_capacity = capacity;
1699 capacity *= (100 - c->opts.gc_reserve_percent);
1700 capacity = div64_u64(capacity, 100);
1702 BUG_ON(reserved_sectors > total_capacity);
1704 capacity = min(capacity, total_capacity - reserved_sectors);
1706 c->capacity = capacity;
1709 bch2_io_timer_add(&c->io_clock[READ],
1710 &c->prio_clock[READ].rescale);
1711 bch2_io_timer_add(&c->io_clock[WRITE],
1712 &c->prio_clock[WRITE].rescale);
1714 bch2_io_timer_del(&c->io_clock[READ],
1715 &c->prio_clock[READ].rescale);
1716 bch2_io_timer_del(&c->io_clock[WRITE],
1717 &c->prio_clock[WRITE].rescale);
1720 /* Wake up case someone was waiting for buckets */
1721 closure_wake_up(&c->freelist_wait);
1724 static void bch2_stop_write_point(struct bch_fs *c, struct bch_dev *ca,
1725 struct write_point *wp)
1727 struct bch_devs_mask not_self;
1729 bitmap_complement(not_self.d, ca->self.d, BCH_SB_MEMBERS_MAX);
1731 mutex_lock(&wp->lock);
1732 writepoint_drop_ptrs(c, wp, ¬_self, wp->nr_ptrs);
1733 mutex_unlock(&wp->lock);
1736 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1738 struct open_bucket *ob;
1741 for (ob = c->open_buckets;
1742 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1744 spin_lock(&ob->lock);
1745 if (ob->valid && !ob->on_partial_list &&
1746 ob->ptr.dev == ca->dev_idx)
1748 spin_unlock(&ob->lock);
1754 /* device goes ro: */
1755 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1759 BUG_ON(ca->alloc_thread);
1761 /* First, remove device from allocation groups: */
1763 clear_bit(ca->dev_idx, c->tiers[ca->mi.tier].devs.d);
1764 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1765 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1768 * Capacity is calculated based off of devices in allocation groups:
1770 bch2_recalc_capacity(c);
1772 /* Next, close write points that point to this device... */
1773 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1774 bch2_stop_write_point(c, ca, &c->write_points[i]);
1776 bch2_stop_write_point(c, ca, &ca->copygc_write_point);
1777 bch2_stop_write_point(c, ca, &c->tiers[ca->mi.tier].wp);
1778 bch2_stop_write_point(c, ca, &c->btree_write_point);
1780 mutex_lock(&c->btree_reserve_cache_lock);
1781 while (c->btree_reserve_cache_nr) {
1782 struct btree_alloc *a =
1783 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1785 bch2_open_bucket_put_refs(c, &a->ob.nr, a->ob.refs);
1787 mutex_unlock(&c->btree_reserve_cache_lock);
1790 * Wake up threads that were blocked on allocation, so they can notice
1791 * the device can no longer be removed and the capacity has changed:
1793 closure_wake_up(&c->freelist_wait);
1796 * journal_res_get() can block waiting for free space in the journal -
1797 * it needs to notice there may not be devices to allocate from anymore:
1799 wake_up(&c->journal.wait);
1801 /* Now wait for any in flight writes: */
1803 closure_wait_event(&c->open_buckets_wait,
1804 !bch2_dev_has_open_write_point(c, ca));
1807 /* device goes rw: */
1808 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1812 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1813 if (ca->mi.data_allowed & (1 << i))
1814 set_bit(ca->dev_idx, c->rw_devs[i].d);
1815 set_bit(ca->dev_idx, c->tiers[ca->mi.tier].devs.d);
1818 /* stop allocator thread: */
1819 void bch2_dev_allocator_stop(struct bch_dev *ca)
1821 struct task_struct *p = ca->alloc_thread;
1823 ca->alloc_thread = NULL;
1826 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1827 * the thread shutting down to avoid bch2_wake_allocator() racing:
1829 * XXX: it would be better to have the rcu barrier be asynchronous
1830 * instead of blocking us here
1840 /* start allocator thread: */
1841 int bch2_dev_allocator_start(struct bch_dev *ca)
1843 struct task_struct *p;
1846 * allocator thread already started?
1848 if (ca->alloc_thread)
1851 p = kthread_create(bch2_allocator_thread, ca, "bcache_allocator");
1856 ca->alloc_thread = p;
1861 void bch2_fs_allocator_init(struct bch_fs *c)
1863 struct open_bucket *ob;
1864 struct write_point *wp;
1867 mutex_init(&c->write_points_hash_lock);
1868 spin_lock_init(&c->freelist_lock);
1869 bch2_prio_timer_init(c, READ);
1870 bch2_prio_timer_init(c, WRITE);
1872 /* open bucket 0 is a sentinal NULL: */
1873 spin_lock_init(&c->open_buckets[0].lock);
1875 for (ob = c->open_buckets + 1;
1876 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1877 spin_lock_init(&ob->lock);
1878 c->open_buckets_nr_free++;
1880 ob->freelist = c->open_buckets_freelist;
1881 c->open_buckets_freelist = ob - c->open_buckets;
1884 writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
1886 for (i = 0; i < ARRAY_SIZE(c->tiers); i++)
1887 writepoint_init(&c->tiers[i].wp, BCH_DATA_USER);
1889 for (wp = c->write_points;
1890 wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) {
1891 writepoint_init(wp, BCH_DATA_USER);
1893 wp->last_used = sched_clock();
1894 wp->write_point = (unsigned long) wp;
1895 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1898 c->pd_controllers_update_seconds = 5;
1899 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);