+// SPDX-License-Identifier: GPL-2.0
/*
* Moving/copying garbage collector
*
*/
#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
#include "btree_iter.h"
+#include "btree_update.h"
+#include "btree_write_buffer.h"
#include "buckets.h"
#include "clock.h"
-#include "extents.h"
-#include "io.h"
-#include "keylist.h"
+#include "errcode.h"
+#include "error.h"
+#include "lru.h"
#include "move.h"
#include "movinggc.h"
+#include "trace.h"
-#include <trace/events/bcachefs.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/math64.h>
+#include <linux/sched/task.h>
#include <linux/wait.h>
-/* Moving GC - IO loop */
-
-static const struct bch_extent_ptr *moving_pred(struct bch_dev *ca,
- struct bkey_s_c k)
+struct buckets_in_flight {
+ struct rhashtable table;
+ struct move_bucket_in_flight *first;
+ struct move_bucket_in_flight *last;
+ size_t nr;
+ size_t sectors;
+};
+
+static const struct rhashtable_params bch_move_bucket_params = {
+ .head_offset = offsetof(struct move_bucket_in_flight, hash),
+ .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
+ .key_len = sizeof(struct move_bucket_key),
+};
+
+static struct move_bucket_in_flight *
+move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
{
- const struct bch_extent_ptr *ptr;
+ struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
+ int ret;
+
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ new->bucket = b;
+
+ ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
+ bch_move_bucket_params);
+ if (ret) {
+ kfree(new);
+ return ERR_PTR(ret);
+ }
- if (bkey_extent_is_data(k.k) &&
- (ptr = bch2_extent_has_device(bkey_s_c_to_extent(k),
- ca->dev_idx)) &&
- PTR_BUCKET(ca, ptr)->mark.copygc)
- return ptr;
+ if (!list->first)
+ list->first = new;
+ else
+ list->last->next = new;
- return NULL;
+ list->last = new;
+ list->nr++;
+ list->sectors += b.sectors;
+ return new;
}
-static int issue_moving_gc_move(struct bch_dev *ca,
- struct moving_context *ctxt,
- struct bkey_s_c k)
+static int bch2_bucket_is_movable(struct btree_trans *trans,
+ struct move_bucket *b, u64 time)
{
- struct bch_fs *c = ca->fs;
- const struct bch_extent_ptr *ptr;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_alloc_v4 _a;
+ const struct bch_alloc_v4 *a;
int ret;
- ptr = moving_pred(ca, k);
- if (!ptr) /* We raced - bucket's been reused */
+ if (bch2_bucket_is_open(trans->c,
+ b->k.bucket.inode,
+ b->k.bucket.offset))
return 0;
- ret = bch2_data_move(c, ctxt, &ca->copygc_write_point, k, ptr);
- if (!ret)
- trace_gc_copy(k.k);
- else
- trace_moving_gc_alloc_fail(c, k.k->size);
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
+ b->k.bucket, BTREE_ITER_CACHED);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ a = bch2_alloc_to_v4(k, &_a);
+ b->k.gen = a->gen;
+ b->sectors = a->dirty_sectors;
+
+ ret = data_type_movable(a->data_type) &&
+ a->fragmentation_lru &&
+ a->fragmentation_lru <= time;
+
+ bch2_trans_iter_exit(trans, &iter);
return ret;
}
-static void read_moving(struct bch_dev *ca, size_t buckets_to_move,
- u64 sectors_to_move)
+static void move_buckets_wait(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct buckets_in_flight *list,
+ bool flush)
{
- struct bch_fs *c = ca->fs;
- struct bucket *g;
- struct moving_context ctxt;
- struct btree_iter iter;
- struct bkey_s_c k;
- u64 sectors_not_moved = 0;
- size_t buckets_not_moved = 0;
-
- bch2_ratelimit_reset(&ca->moving_gc_pd.rate);
- bch2_move_ctxt_init(&ctxt, &ca->moving_gc_pd.rate,
- SECTORS_IN_FLIGHT_PER_DEVICE);
- bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
-
- while (1) {
- if (kthread_should_stop())
- goto out;
- if (bch2_move_ctxt_wait(&ctxt))
- goto out;
- k = bch2_btree_iter_peek(&iter);
- if (!k.k)
- break;
- if (btree_iter_err(k))
- goto out;
-
- if (!moving_pred(ca, k))
- goto next;
+ struct move_bucket_in_flight *i;
+ int ret;
- if (issue_moving_gc_move(ca, &ctxt, k)) {
- bch2_btree_iter_unlock(&iter);
+ while ((i = list->first)) {
+ if (flush)
+ move_ctxt_wait_event(ctxt, trans, !atomic_read(&i->count));
- /* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(&ctxt);
- continue;
- }
-next:
- bch2_btree_iter_advance_pos(&iter);
- //bch2_btree_iter_cond_resched(&iter);
+ if (atomic_read(&i->count))
+ break;
- /* unlock before calling moving_context_wait() */
- bch2_btree_iter_unlock(&iter);
- cond_resched();
- }
+ list->first = i->next;
+ if (!list->first)
+ list->last = NULL;
- bch2_btree_iter_unlock(&iter);
- bch2_move_ctxt_exit(&ctxt);
- trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
- buckets_to_move);
+ list->nr--;
+ list->sectors -= i->bucket.sectors;
- /* don't check this if we bailed out early: */
- for_each_bucket(g, ca)
- if (g->mark.copygc && bucket_sectors_used(g)) {
- sectors_not_moved += bucket_sectors_used(g);
- buckets_not_moved++;
- }
+ ret = rhashtable_remove_fast(&list->table, &i->hash,
+ bch_move_bucket_params);
+ BUG_ON(ret);
+ kfree(i);
+ }
- if (sectors_not_moved)
- bch_warn(c, "copygc finished but %llu/%llu sectors, %zu/%zu buckets not moved",
- sectors_not_moved, sectors_to_move,
- buckets_not_moved, buckets_to_move);
- return;
-out:
- bch2_btree_iter_unlock(&iter);
- bch2_move_ctxt_exit(&ctxt);
- trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
- buckets_to_move);
+ bch2_trans_unlock(trans);
}
-static bool have_copygc_reserve(struct bch_dev *ca)
+static bool bucket_in_flight(struct buckets_in_flight *list,
+ struct move_bucket_key k)
{
- bool ret;
-
- spin_lock(&ca->freelist_lock);
- ret = fifo_used(&ca->free[RESERVE_MOVINGGC]) >=
- COPYGC_BUCKETS_PER_ITER(ca);
- spin_unlock(&ca->freelist_lock);
-
- return ret;
+ return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
}
-static void bch2_moving_gc(struct bch_dev *ca)
-{
- struct bch_fs *c = ca->fs;
- struct bucket *g;
- struct bucket_mark new;
- u64 sectors_to_move;
- size_t buckets_to_move, buckets_unused = 0;
- struct bucket_heap_entry e;
- unsigned sectors_used, i;
- int reserve_sectors;
-
- if (!have_copygc_reserve(ca)) {
- struct closure cl;
-
- closure_init_stack(&cl);
- while (1) {
- closure_wait(&c->freelist_wait, &cl);
- if (have_copygc_reserve(ca))
- break;
- closure_sync(&cl);
- }
- closure_wake_up(&c->freelist_wait);
- }
+typedef DARRAY(struct move_bucket) move_buckets;
- reserve_sectors = COPYGC_SECTORS_PER_ITER(ca);
+static int bch2_copygc_get_buckets(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct buckets_in_flight *buckets_in_flight,
+ move_buckets *buckets)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
+ size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
+ int ret;
- trace_moving_gc_start(ca);
+ move_buckets_wait(trans, ctxt, buckets_in_flight, false);
+
+ ret = bch2_btree_write_buffer_flush(trans);
+ if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
+ __func__, bch2_err_str(ret)))
+ return ret;
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
+ lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
+ lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
+ 0, k, ({
+ struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
+ int ret2 = 0;
+
+ saw++;
+
+ if (!bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p)))
+ not_movable++;
+ else if (bucket_in_flight(buckets_in_flight, b.k))
+ in_flight++;
+ else {
+ ret2 = darray_push(buckets, b) ?: buckets->nr >= nr_to_get;
+ if (ret2 >= 0)
+ sectors += b.sectors;
+ }
+ ret2;
+ }));
- /*
- * Find buckets with lowest sector counts, skipping completely
- * empty buckets, by building a maxheap sorted by sector count,
- * and repeatedly replacing the maximum element until all
- * buckets have been visited.
- */
+ pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
+ buckets_in_flight->nr, buckets_in_flight->sectors,
+ saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
- /*
- * We need bucket marks to be up to date, so gc can't be recalculating
- * them, and we don't want the allocator invalidating a bucket after
- * we've decided to evacuate it but before we set copygc:
- */
- down_read(&c->gc_lock);
- mutex_lock(&ca->heap_lock);
- mutex_lock(&ca->fs->bucket_lock);
+ return ret < 0 ? ret : 0;
+}
- ca->heap.used = 0;
- for_each_bucket(g, ca) {
- bucket_cmpxchg(g, new, new.copygc = 0);
+noinline
+static int bch2_copygc(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct buckets_in_flight *buckets_in_flight)
+{
+ struct bch_fs *c = trans->c;
+ struct data_update_opts data_opts = {
+ .btree_insert_flags = BCH_WATERMARK_copygc,
+ };
+ move_buckets buckets = { 0 };
+ struct move_bucket_in_flight *f;
+ struct move_bucket *i;
+ u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
+ int ret = 0;
+
+ ret = bch2_copygc_get_buckets(trans, ctxt, buckets_in_flight, &buckets);
+ if (ret)
+ goto err;
+
+ darray_for_each(buckets, i) {
+ if (unlikely(freezing(current)))
+ break;
- if (bucket_unused(g)) {
- buckets_unused++;
+ f = move_bucket_in_flight_add(buckets_in_flight, *i);
+ ret = PTR_ERR_OR_ZERO(f);
+ if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
+ ret = 0;
continue;
}
+ if (ret == -ENOMEM) { /* flush IO, continue later */
+ ret = 0;
+ break;
+ }
- if (g->mark.owned_by_allocator ||
- g->mark.data_type != BUCKET_DATA)
- continue;
+ ret = __bch2_evacuate_bucket(trans, ctxt, f, f->bucket.k.bucket,
+ f->bucket.k.gen, data_opts);
+ if (ret)
+ goto err;
+ }
+err:
+ darray_exit(&buckets);
- sectors_used = bucket_sectors_used(g);
+ /* no entries in LRU btree found, or got to end: */
+ if (bch2_err_matches(ret, ENOENT))
+ ret = 0;
- if (sectors_used >= ca->mi.bucket_size)
- continue;
+ if (ret < 0 && !bch2_err_matches(ret, EROFS))
+ bch_err_msg(c, ret, "from bch2_move_data()");
- bucket_heap_push(ca, g, sectors_used);
- }
+ moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
+ trace_and_count(c, copygc, c, moved, 0, 0, 0);
+ return ret;
+}
+
+/*
+ * Copygc runs when the amount of fragmented data is above some arbitrary
+ * threshold:
+ *
+ * The threshold at the limit - when the device is full - is the amount of space
+ * we reserved in bch2_recalc_capacity; we can't have more than that amount of
+ * disk space stranded due to fragmentation and store everything we have
+ * promised to store.
+ *
+ * But we don't want to be running copygc unnecessarily when the device still
+ * has plenty of free space - rather, we want copygc to smoothly run every so
+ * often and continually reduce the amount of fragmented space as the device
+ * fills up. So, we increase the threshold by half the current free space.
+ */
+unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned dev_idx;
+ s64 wait = S64_MAX, fragmented_allowed, fragmented;
+ unsigned i;
- sectors_to_move = 0;
- for (i = 0; i < ca->heap.used; i++)
- sectors_to_move += ca->heap.data[i].val;
+ for_each_rw_member(ca, c, dev_idx) {
+ struct bch_dev_usage usage = bch2_dev_usage_read(ca);
- while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
- BUG_ON(!heap_pop(&ca->heap, e, bucket_min_cmp));
- sectors_to_move -= e.val;
- }
+ fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
+ ca->mi.bucket_size) >> 1);
+ fragmented = 0;
- for (i = 0; i < ca->heap.used; i++)
- bucket_cmpxchg(ca->heap.data[i].g, new, new.copygc = 1);
+ for (i = 0; i < BCH_DATA_NR; i++)
+ if (data_type_movable(i))
+ fragmented += usage.d[i].fragmented;
- buckets_to_move = ca->heap.used;
+ wait = min(wait, max(0LL, fragmented_allowed - fragmented));
+ }
- mutex_unlock(&ca->fs->bucket_lock);
- mutex_unlock(&ca->heap_lock);
- up_read(&c->gc_lock);
+ return wait;
+}
- read_moving(ca, buckets_to_move, sectors_to_move);
+void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ prt_printf(out, "Currently waiting for: ");
+ prt_human_readable_u64(out, max(0LL, c->copygc_wait -
+ atomic64_read(&c->io_clock[WRITE].now)) << 9);
+ prt_newline(out);
+
+ prt_printf(out, "Currently waiting since: ");
+ prt_human_readable_u64(out, max(0LL,
+ atomic64_read(&c->io_clock[WRITE].now) -
+ c->copygc_wait_at) << 9);
+ prt_newline(out);
+
+ prt_printf(out, "Currently calculated wait: ");
+ prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
+ prt_newline(out);
}
-static int bch2_moving_gc_thread(void *arg)
+static int bch2_copygc_thread(void *arg)
{
- struct bch_dev *ca = arg;
- struct bch_fs *c = ca->fs;
+ struct bch_fs *c = arg;
+ struct btree_trans *trans;
+ struct moving_context ctxt;
+ struct bch_move_stats move_stats;
struct io_clock *clock = &c->io_clock[WRITE];
- unsigned long last;
- u64 available, want, next;
+ struct buckets_in_flight buckets;
+ u64 last, wait;
+ int ret = 0;
+
+ memset(&buckets, 0, sizeof(buckets));
+
+ ret = rhashtable_init(&buckets.table, &bch_move_bucket_params);
+ if (ret) {
+ bch_err_msg(c, ret, "allocating copygc buckets in flight");
+ return ret;
+ }
set_freezable();
+ trans = bch2_trans_get(c);
- while (!kthread_should_stop()) {
- if (kthread_wait_freezable(c->copy_gc_enabled))
- break;
+ bch2_move_stats_init(&move_stats, "copygc");
+ bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
+ writepoint_ptr(&c->copygc_write_point),
+ false);
+
+ while (!ret && !kthread_should_stop()) {
+ bch2_trans_unlock(trans);
+ cond_resched();
+
+ if (!c->copy_gc_enabled) {
+ move_buckets_wait(trans, &ctxt, &buckets, true);
+ kthread_wait_freezable(c->copy_gc_enabled);
+ }
+
+ if (unlikely(freezing(current))) {
+ move_buckets_wait(trans, &ctxt, &buckets, true);
+ __refrigerator(false);
+ continue;
+ }
- last = atomic_long_read(&clock->now);
- /*
- * don't start copygc until less than half the gc reserve is
- * available:
- */
- available = dev_buckets_available(ca);
- want = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) *
- c->opts.gc_reserve_percent, 200);
- if (available > want) {
- next = last + (available - want) *
- ca->mi.bucket_size;
- bch2_kthread_io_clock_wait(clock, next);
+ last = atomic64_read(&clock->now);
+ wait = bch2_copygc_wait_amount(c);
+
+ if (wait > clock->max_slop) {
+ c->copygc_wait_at = last;
+ c->copygc_wait = last + wait;
+ move_buckets_wait(trans, &ctxt, &buckets, true);
+ trace_and_count(c, copygc_wait, c, wait, last + wait);
+ bch2_kthread_io_clock_wait(clock, last + wait,
+ MAX_SCHEDULE_TIMEOUT);
continue;
}
- bch2_moving_gc(ca);
+ c->copygc_wait = 0;
+
+ c->copygc_running = true;
+ ret = bch2_copygc(trans, &ctxt, &buckets);
+ c->copygc_running = false;
+
+ wake_up(&c->copygc_running_wq);
}
+ move_buckets_wait(trans, &ctxt, &buckets, true);
+ rhashtable_destroy(&buckets.table);
+ bch2_trans_put(trans);
+ bch2_moving_ctxt_exit(&ctxt);
+
return 0;
}
-void bch2_moving_gc_stop(struct bch_dev *ca)
+void bch2_copygc_stop(struct bch_fs *c)
{
- ca->moving_gc_pd.rate.rate = UINT_MAX;
- bch2_ratelimit_reset(&ca->moving_gc_pd.rate);
-
- if (ca->moving_gc_read)
- kthread_stop(ca->moving_gc_read);
- ca->moving_gc_read = NULL;
+ if (c->copygc_thread) {
+ kthread_stop(c->copygc_thread);
+ put_task_struct(c->copygc_thread);
+ }
+ c->copygc_thread = NULL;
}
-int bch2_moving_gc_start(struct bch_dev *ca)
+int bch2_copygc_start(struct bch_fs *c)
{
struct task_struct *t;
+ int ret;
- BUG_ON(ca->moving_gc_read);
+ if (c->copygc_thread)
+ return 0;
- if (ca->fs->opts.nochanges)
+ if (c->opts.nochanges)
return 0;
- if (bch2_fs_init_fault("moving_gc_start"))
+ if (bch2_fs_init_fault("copygc_start"))
return -ENOMEM;
- t = kthread_create(bch2_moving_gc_thread, ca, "bch_copygc_read");
- if (IS_ERR(t))
- return PTR_ERR(t);
+ t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
+ ret = PTR_ERR_OR_ZERO(t);
+ if (ret) {
+ bch_err_msg(c, ret, "creating copygc thread");
+ return ret;
+ }
+
+ get_task_struct(t);
- ca->moving_gc_read = t;
- wake_up_process(ca->moving_gc_read);
+ c->copygc_thread = t;
+ wake_up_process(c->copygc_thread);
return 0;
}
-void bch2_dev_moving_gc_init(struct bch_dev *ca)
+void bch2_fs_copygc_init(struct bch_fs *c)
{
- bch2_pd_controller_init(&ca->moving_gc_pd);
- ca->moving_gc_pd.d_term = 0;
+ init_waitqueue_head(&c->copygc_running_wq);
+ c->copygc_running = false;
}