*/
#include "bcachefs.h"
+#include "alloc_background.h"
#include "alloc_foreground.h"
#include "btree_iter.h"
#include "btree_update.h"
+#include "btree_write_buffer.h"
#include "buckets.h"
#include "clock.h"
#include "disk_groups.h"
+#include "errcode.h"
#include "error.h"
#include "extents.h"
#include "eytzinger.h"
#include "io.h"
#include "keylist.h"
+#include "lru.h"
#include "move.h"
#include "movinggc.h"
#include "super-io.h"
#include <linux/sort.h>
#include <linux/wait.h>
-/*
- * We can't use the entire copygc reserve in one iteration of copygc: we may
- * need the buckets we're freeing up to go back into the copygc reserve to make
- * forward progress, but if the copygc reserve is full they'll be available for
- * any allocation - and it's possible that in a given iteration, we free up most
- * of the buckets we're going to free before we allocate most of the buckets
- * we're going to allocate.
- *
- * If we only use half of the reserve per iteration, then in steady state we'll
- * always have room in the reserve for the buckets we're going to need in the
- * next iteration:
- */
-#define COPYGC_BUCKETS_PER_ITER(ca) \
- ((ca)->free[RESERVE_MOVINGGC].size / 2)
-
-static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
+static int bch2_bucket_is_movable(struct btree_trans *trans,
+ struct bpos bucket, u64 time, u8 *gen)
{
- const struct copygc_heap_entry *l = _l;
- const struct copygc_heap_entry *r = _r;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_alloc_v4 _a;
+ const struct bch_alloc_v4 *a;
+ int ret;
- return cmp_int(l->dev, r->dev) ?:
- cmp_int(l->offset, r->offset);
-}
+ if (bch2_bucket_is_open(trans->c, bucket.inode, bucket.offset))
+ return 0;
-static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
- struct bkey_s_c k,
- struct bch_io_opts *io_opts,
- struct data_opts *data_opts)
-{
- copygc_heap *h = &c->copygc_heap;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p = { 0 };
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- struct copygc_heap_entry search = {
- .dev = p.ptr.dev,
- .offset = p.ptr.offset,
- };
-
- ssize_t i = eytzinger0_find_le(h->data, h->used,
- sizeof(h->data[0]),
- bucket_offset_cmp, &search);
-#if 0
- /* eytzinger search verify code: */
- ssize_t j = -1, k;
-
- for (k = 0; k < h->used; k++)
- if (h->data[k].offset <= ptr->offset &&
- (j < 0 || h->data[k].offset > h->data[j].offset))
- j = k;
-
- BUG_ON(i != j);
-#endif
- if (i >= 0 &&
- p.ptr.dev == h->data[i].dev &&
- p.ptr.offset < h->data[i].offset + ca->mi.bucket_size &&
- p.ptr.gen == h->data[i].gen) {
- /*
- * We need to use the journal reserve here, because
- * - journal reclaim depends on btree key cache
- * flushing to make forward progress,
- * - which has to make forward progress when the
- * journal is pre-reservation full,
- * - and depends on allocation - meaning allocator and
- * copygc
- */
-
- data_opts->target = io_opts->background_target;
- data_opts->nr_replicas = 1;
- data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE|
- BTREE_INSERT_JOURNAL_RESERVED;
- data_opts->rewrite_dev = p.ptr.dev;
-
- if (p.has_ec)
- data_opts->nr_replicas += p.ec.redundancy;
-
- return DATA_REWRITE;
- }
- }
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, 0);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ bch2_trans_iter_exit(trans, &iter);
- return DATA_SKIP;
-}
+ if (ret)
+ return ret;
-static bool have_copygc_reserve(struct bch_dev *ca)
-{
- bool ret;
+ a = bch2_alloc_to_v4(k, &_a);
+ *gen = a->gen;
+ ret = (a->data_type == BCH_DATA_btree ||
+ a->data_type == BCH_DATA_user) &&
+ a->fragmentation_lru &&
+ a->fragmentation_lru <= time;
- spin_lock(&ca->fs->freelist_lock);
- ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
- ca->allocator_state != ALLOCATOR_running;
- spin_unlock(&ca->fs->freelist_lock);
+ if (ret) {
+ struct printbuf buf = PRINTBUF;
- return ret;
-}
+ bch2_bkey_val_to_text(&buf, trans->c, k);
+ pr_debug("%s", buf.buf);
+ printbuf_exit(&buf);
+ }
-static inline int fragmentation_cmp(copygc_heap *heap,
- struct copygc_heap_entry l,
- struct copygc_heap_entry r)
-{
- return cmp_int(l.fragmentation, r.fragmentation);
+ return ret;
}
-static int bch2_copygc(struct bch_fs *c)
+static int bch2_copygc_next_bucket(struct btree_trans *trans,
+ struct bpos *bucket, u8 *gen, struct bpos *pos)
{
- copygc_heap *h = &c->copygc_heap;
- struct copygc_heap_entry e, *i;
- struct bucket_array *buckets;
- struct bch_move_stats move_stats;
- u64 sectors_to_move = 0, sectors_to_write = 0, sectors_not_moved = 0;
- u64 sectors_reserved = 0;
- u64 buckets_to_move, buckets_not_moved = 0;
- struct bch_dev *ca;
- unsigned dev_idx;
- size_t b, heap_size = 0;
+ struct btree_iter iter;
+ struct bkey_s_c k;
int ret;
- bch_move_stats_init(&move_stats, "copygc");
-
- /*
- * Find buckets with lowest sector counts, skipping completely
- * empty buckets, by building a maxheap sorted by sector count,
- * and repeatedly replacing the maximum element until all
- * buckets have been visited.
- */
- h->used = 0;
-
- for_each_rw_member(ca, c, dev_idx)
- heap_size += ca->mi.nbuckets >> 7;
-
- if (h->size < heap_size) {
- free_heap(&c->copygc_heap);
- if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) {
- bch_err(c, "error allocating copygc heap");
- return 0;
- }
- }
-
- for_each_rw_member(ca, c, dev_idx) {
- closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
-
- spin_lock(&ca->fs->freelist_lock);
- sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size;
- spin_unlock(&ca->fs->freelist_lock);
-
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
-
- for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
- struct bucket *g = buckets->b + b;
- struct bucket_mark m = READ_ONCE(g->mark);
- struct copygc_heap_entry e;
-
- if (m.owned_by_allocator ||
- m.data_type != BCH_DATA_user ||
- !bucket_sectors_used(m) ||
- bucket_sectors_used(m) >= ca->mi.bucket_size)
- continue;
-
- WARN_ON(m.stripe && !g->stripe_redundancy);
-
- e = (struct copygc_heap_entry) {
- .dev = dev_idx,
- .gen = m.gen,
- .replicas = 1 + g->stripe_redundancy,
- .fragmentation = bucket_sectors_used(m) * (1U << 15)
- / ca->mi.bucket_size,
- .sectors = bucket_sectors_used(m),
- .offset = bucket_to_sector(ca, b),
- };
- heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
- }
- up_read(&ca->bucket_lock);
- }
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
+ bpos_max(*pos, lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0)),
+ lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
+ 0, k, ({
+ *bucket = u64_to_bucket(k.k->p.offset);
- /*
- * Our btree node allocations also come out of RESERVE_MOVINGGC:
- */
- sectors_reserved = (sectors_reserved * 3) / 4;
- if (!sectors_reserved) {
- bch2_fs_fatal_error(c, "stuck, ran out of copygc reserve!");
- return -1;
- }
+ bch2_bucket_is_movable(trans, *bucket, lru_pos_time(k.k->p), gen);
+ }));
- for (i = h->data; i < h->data + h->used; i++) {
- sectors_to_move += i->sectors;
- sectors_to_write += i->sectors * i->replicas;
- }
+ *pos = iter.pos;
+ if (ret < 0)
+ return ret;
+ return ret ? 0 : -ENOENT;
+}
- while (sectors_to_write > sectors_reserved) {
- BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL));
- sectors_to_write -= e.sectors * e.replicas;
+static int bch2_copygc(struct bch_fs *c)
+{
+ struct bch_move_stats move_stats;
+ struct btree_trans trans;
+ struct moving_context ctxt;
+ struct data_update_opts data_opts = {
+ .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
+ };
+ struct bpos bucket;
+ struct bpos pos;
+ u8 gen = 0;
+ unsigned nr_evacuated;
+ int ret = 0;
+
+ bch2_move_stats_init(&move_stats, "copygc");
+ bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
+ writepoint_ptr(&c->copygc_write_point),
+ false);
+ bch2_trans_init(&trans, c, 0, 0);
+
+ ret = bch2_btree_write_buffer_flush(&trans);
+ BUG_ON(ret);
+
+ for (nr_evacuated = 0, pos = POS_MIN;
+ nr_evacuated < 32 && !ret;
+ nr_evacuated++, pos = bpos_nosnap_successor(pos)) {
+ ret = bch2_copygc_next_bucket(&trans, &bucket, &gen, &pos) ?:
+ __bch2_evacuate_bucket(&trans, &ctxt, bucket, gen, data_opts);
+ if (bkey_eq(pos, POS_MAX))
+ break;
}
- buckets_to_move = h->used;
-
- if (!buckets_to_move)
- return 0;
-
- eytzinger0_sort(h->data, h->used,
- sizeof(h->data[0]),
- bucket_offset_cmp, NULL);
+ bch2_trans_exit(&trans);
+ bch2_moving_ctxt_exit(&ctxt);
- ret = bch2_move_data(c,
- 0, POS_MIN,
- BTREE_ID_NR, POS_MAX,
- NULL,
- writepoint_ptr(&c->copygc_write_point),
- copygc_pred, NULL,
- &move_stats);
+ /* no entries in LRU btree found, or got to end: */
+ if (ret == -ENOENT)
+ ret = 0;
- for_each_rw_member(ca, c, dev_idx) {
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
- for (i = h->data; i < h->data + h->used; i++) {
- struct bucket_mark m;
- size_t b;
-
- if (i->dev != dev_idx)
- continue;
-
- b = sector_to_bucket(ca, i->offset);
- m = READ_ONCE(buckets->b[b].mark);
-
- if (i->gen == m.gen &&
- bucket_sectors_used(m)) {
- sectors_not_moved += bucket_sectors_used(m);
- buckets_not_moved++;
- }
- }
- up_read(&ca->bucket_lock);
- }
+ if (ret < 0 && !bch2_err_matches(ret, EROFS))
+ bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret));
- if (sectors_not_moved && !ret)
- bch_warn_ratelimited(c,
- "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)",
- sectors_not_moved, sectors_to_move,
- buckets_not_moved, buckets_to_move,
- atomic64_read(&move_stats.sectors_moved),
- atomic64_read(&move_stats.keys_raced),
- atomic64_read(&move_stats.sectors_raced));
-
- trace_copygc(c,
- atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
- buckets_to_move, buckets_not_moved);
- return 0;
+ trace_and_count(c, copygc, c, atomic64_read(&move_stats.sectors_moved), 0, 0, 0);
+ return ret;
}
/*
for_each_rw_member(ca, c, dev_idx) {
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
- fragmented_allowed = ((__dev_buckets_reclaimable(ca, usage) *
- ca->mi.bucket_size) >> 1);
+ fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
+ ca->mi.bucket_size) >> 1);
fragmented = usage.d[BCH_DATA_user].fragmented;
wait = min(wait, max(0LL, fragmented_allowed - fragmented));
struct bch_fs *c = arg;
struct io_clock *clock = &c->io_clock[WRITE];
u64 last, wait;
+ int ret = 0;
set_freezable();
- while (!kthread_should_stop()) {
+ while (!ret && !kthread_should_stop()) {
cond_resched();
if (kthread_wait_freezable(c->copy_gc_enabled))
wait = bch2_copygc_wait_amount(c);
if (wait > clock->max_slop) {
- trace_copygc_wait(c, wait, last + wait);
+ trace_and_count(c, copygc_wait, c, wait, last + wait);
c->copygc_wait = last + wait;
bch2_kthread_io_clock_wait(clock, last + wait,
MAX_SCHEDULE_TIMEOUT);
c->copygc_wait = 0;
- if (bch2_copygc(c))
- break;
+ c->copygc_running = true;
+ ret = bch2_copygc(c);
+ c->copygc_running = false;
+
+ wake_up(&c->copygc_running_wq);
}
return 0;
int bch2_copygc_start(struct bch_fs *c)
{
struct task_struct *t;
+ int ret;
if (c->copygc_thread)
return 0;
return -ENOMEM;
t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
- if (IS_ERR(t)) {
- bch_err(c, "error creating copygc thread: %li", PTR_ERR(t));
- return PTR_ERR(t);
+ ret = PTR_ERR_OR_ZERO(t);
+ if (ret) {
+ bch_err(c, "error creating copygc thread: %s", bch2_err_str(ret));
+ return ret;
}
get_task_struct(t);
void bch2_fs_copygc_init(struct bch_fs *c)
{
+ init_waitqueue_head(&c->copygc_running_wq);
+ c->copygc_running = false;
}