#include "alloc_foreground.h"
#include "btree_iter.h"
#include "btree_update.h"
+#include "btree_write_buffer.h"
#include "buckets.h"
#include "clock.h"
#include "disk_groups.h"
#include "eytzinger.h"
#include "io.h"
#include "keylist.h"
+#include "lru.h"
#include "move.h"
#include "movinggc.h"
#include "super-io.h"
#include <trace/events/bcachefs.h>
+#include <linux/bsearch.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/math64.h>
#include <linux/sort.h>
#include <linux/wait.h>
-static inline int fragmentation_cmp(copygc_heap *heap,
- struct copygc_heap_entry l,
- struct copygc_heap_entry r)
+static int bch2_bucket_is_movable(struct btree_trans *trans,
+ struct bpos bucket, u64 time, u8 *gen)
{
- return cmp_int(l.fragmentation, r.fragmentation);
-}
-
-static int find_buckets_to_copygc(struct bch_fs *c)
-{
- copygc_heap *h = &c->copygc_heap;
- struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 _a;
+ const struct bch_alloc_v4 *a;
int ret;
- bch2_trans_init(&trans, c, 0, 0);
+ if (bch2_bucket_is_open(trans->c, bucket.inode, bucket.offset))
+ return 0;
- /*
- * Find buckets with lowest sector counts, skipping completely
- * empty buckets, by building a maxheap sorted by sector count,
- * and repeatedly replacing the maximum element until all
- * buckets have been visited.
- */
- h->used = 0;
-
- for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_PREFETCH, k, ret) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
- struct copygc_heap_entry e;
-
- bch2_alloc_to_v4(k, &a);
-
- if ((a.data_type != BCH_DATA_btree &&
- a.data_type != BCH_DATA_user) ||
- a.dirty_sectors >= ca->mi.bucket_size ||
- bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
- continue;
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_CACHED);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ return ret;
- e = (struct copygc_heap_entry) {
- .dev = iter.pos.inode,
- .gen = a.gen,
- .replicas = 1 + a.stripe_redundancy,
- .fragmentation = div_u64((u64) a.dirty_sectors * (1ULL << 31),
- ca->mi.bucket_size),
- .sectors = a.dirty_sectors,
- .bucket = iter.pos.offset,
- };
- heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
+ a = bch2_alloc_to_v4(k, &_a);
+ *gen = a->gen;
+ ret = data_type_movable(a->data_type) &&
+ a->fragmentation_lru &&
+ a->fragmentation_lru <= time;
+
+ if (ret) {
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, trans->c, k);
+ pr_debug("%s", buf.buf);
+ printbuf_exit(&buf);
}
- bch2_trans_iter_exit(&trans, &iter);
- bch2_trans_exit(&trans);
return ret;
}
-static int bch2_copygc(struct bch_fs *c)
+typedef FIFO(struct move_bucket_in_flight) move_buckets_in_flight;
+
+struct move_bucket {
+ struct bpos bucket;
+ u8 gen;
+};
+
+typedef DARRAY(struct move_bucket) move_buckets;
+
+static int move_bucket_cmp(const void *_l, const void *_r)
{
- copygc_heap *h = &c->copygc_heap;
- struct copygc_heap_entry e;
- struct bch_move_stats move_stats;
- struct bch_dev *ca;
- unsigned dev_idx;
- size_t heap_size = 0;
- struct moving_context ctxt;
- struct data_update_opts data_opts = {
- .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
- };
- int ret = 0;
+ const struct move_bucket *l = _l;
+ const struct move_bucket *r = _r;
- bch2_move_stats_init(&move_stats, "copygc");
+ return bkey_cmp(l->bucket, r->bucket);
+}
- for_each_rw_member(ca, c, dev_idx)
- heap_size += ca->mi.nbuckets >> 7;
+static bool bucket_in_flight(move_buckets *buckets_sorted, struct move_bucket b)
+{
+ return bsearch(&b,
+ buckets_sorted->data,
+ buckets_sorted->nr,
+ sizeof(buckets_sorted->data[0]),
+ move_bucket_cmp) != NULL;
+}
- if (h->size < heap_size) {
- free_heap(&c->copygc_heap);
- if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) {
- bch_err(c, "error allocating copygc heap");
- return 0;
- }
- }
+static void move_buckets_wait(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ move_buckets_in_flight *buckets_in_flight,
+ size_t nr, bool verify_evacuated)
+{
+ while (!fifo_empty(buckets_in_flight)) {
+ struct move_bucket_in_flight *i = &fifo_peek_front(buckets_in_flight);
- ret = find_buckets_to_copygc(c);
- if (ret) {
- bch2_fs_fatal_error(c, "error walking buckets to copygc!");
- return ret;
+ if (fifo_used(buckets_in_flight) > nr)
+ move_ctxt_wait_event(ctxt, trans, !atomic_read(&i->count));
+
+ if (atomic_read(&i->count))
+ break;
+
+ /*
+ * moving_ctxt_exit calls bch2_write as it flushes pending
+ * reads, which inits another btree_trans; this one must be
+ * unlocked:
+ */
+ if (verify_evacuated)
+ bch2_verify_bucket_evacuated(trans, i->bucket, i->gen);
+ buckets_in_flight->front++;
}
- if (!h->used) {
- s64 wait = S64_MAX, dev_wait;
- u64 dev_min_wait_fragmented = 0;
- u64 dev_min_wait_allowed = 0;
- int dev_min_wait = -1;
-
- for_each_rw_member(ca, c, dev_idx) {
- struct bch_dev_usage usage = bch2_dev_usage_read(ca);
- s64 allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
- ca->mi.bucket_size) >> 1);
- s64 fragmented = usage.d[BCH_DATA_user].fragmented;
-
- dev_wait = max(0LL, allowed - fragmented);
-
- if (dev_min_wait < 0 || dev_wait < wait) {
- dev_min_wait = dev_idx;
- dev_min_wait_fragmented = fragmented;
- dev_min_wait_allowed = allowed;
- }
- }
+ bch2_trans_unlock(trans);
+}
- bch_err_ratelimited(c, "copygc requested to run but found no buckets to move! dev %u fragmented %llu allowed %llu",
- dev_min_wait, dev_min_wait_fragmented, dev_min_wait_allowed);
- return 0;
+static int bch2_copygc_get_buckets(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ move_buckets_in_flight *buckets_in_flight,
+ move_buckets *buckets)
+{
+ struct btree_iter iter;
+ move_buckets buckets_sorted = { 0 };
+ struct move_bucket_in_flight *i;
+ struct bkey_s_c k;
+ size_t fifo_iter, nr_to_get;
+ int ret;
+
+ move_buckets_wait(trans, ctxt, buckets_in_flight, buckets_in_flight->size / 2, true);
+
+ nr_to_get = max(16UL, fifo_used(buckets_in_flight) / 4);
+
+ fifo_for_each_entry_ptr(i, buckets_in_flight, fifo_iter) {
+ ret = darray_push(&buckets_sorted, ((struct move_bucket) {i->bucket, i->gen}));
+ if (ret) {
+ bch_err(trans->c, "error allocating move_buckets_sorted");
+ goto err;
+ }
}
- heap_resort(h, fragmentation_cmp, NULL);
+ sort(buckets_sorted.data,
+ buckets_sorted.nr,
+ sizeof(buckets_sorted.data[0]),
+ move_bucket_cmp,
+ NULL);
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
+ lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
+ lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
+ 0, k, ({
+ struct move_bucket b = { .bucket = u64_to_bucket(k.k->p.offset) };
+ int ret = 0;
+
+ if (!bucket_in_flight(&buckets_sorted, b) &&
+ bch2_bucket_is_movable(trans, b.bucket, lru_pos_time(k.k->p), &b.gen))
+ ret = darray_push(buckets, b) ?: buckets->nr >= nr_to_get;
+
+ ret;
+ }));
+err:
+ darray_exit(&buckets_sorted);
+
+ return ret < 0 ? ret : 0;
+}
- bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
- writepoint_ptr(&c->copygc_write_point),
- false);
+static int bch2_copygc(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ move_buckets_in_flight *buckets_in_flight)
+{
+ struct bch_fs *c = trans->c;
+ struct data_update_opts data_opts = {
+ .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
+ };
+ move_buckets buckets = { 0 };
+ struct move_bucket_in_flight *f;
+ struct move_bucket *i;
+ u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
+ int ret = 0;
+
+ ret = bch2_btree_write_buffer_flush(trans);
+ if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
+ __func__, bch2_err_str(ret)))
+ return ret;
+
+ ret = bch2_copygc_get_buckets(trans, ctxt, buckets_in_flight, &buckets);
+ if (ret)
+ goto err;
+
+ darray_for_each(buckets, i) {
+ if (unlikely(freezing(current)))
+ break;
- /* not correct w.r.t. device removal */
- while (h->used && !ret) {
- BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL));
- ret = __bch2_evacuate_bucket(&ctxt, POS(e.dev, e.bucket), e.gen,
- data_opts);
+ f = fifo_push_ref(buckets_in_flight);
+ f->bucket = i->bucket;
+ f->gen = i->gen;
+ atomic_set(&f->count, 0);
+
+ ret = __bch2_evacuate_bucket(trans, ctxt, f, f->bucket, f->gen, data_opts);
+ if (ret)
+ goto err;
}
+err:
+ darray_exit(&buckets);
- bch2_moving_ctxt_exit(&ctxt);
+ /* no entries in LRU btree found, or got to end: */
+ if (ret == -ENOENT)
+ ret = 0;
- if (ret < 0 && ret != -EROFS)
+ if (ret < 0 && !bch2_err_matches(ret, EROFS))
bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret));
- trace_and_count(c, copygc, c, atomic64_read(&move_stats.sectors_moved), 0, 0, 0);
+ moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
+ trace_and_count(c, copygc, c, moved, 0, 0, 0);
return ret;
}
struct bch_dev *ca;
unsigned dev_idx;
s64 wait = S64_MAX, fragmented_allowed, fragmented;
+ unsigned i;
for_each_rw_member(ca, c, dev_idx) {
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
- fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
+ fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) *
ca->mi.bucket_size) >> 1);
- fragmented = usage.d[BCH_DATA_user].fragmented;
+ fragmented = 0;
+
+ for (i = 0; i < BCH_DATA_NR; i++)
+ if (data_type_movable(i))
+ fragmented += usage.d[i].fragmented;
wait = min(wait, max(0LL, fragmented_allowed - fragmented));
}
return wait;
}
+void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ prt_printf(out, "Currently waiting for: ");
+ prt_human_readable_u64(out, max(0LL, c->copygc_wait -
+ atomic64_read(&c->io_clock[WRITE].now)) << 9);
+ prt_newline(out);
+
+ prt_printf(out, "Currently calculated wait: ");
+ prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
+ prt_newline(out);
+}
+
static int bch2_copygc_thread(void *arg)
{
struct bch_fs *c = arg;
+ struct btree_trans trans;
+ struct moving_context ctxt;
+ struct bch_move_stats move_stats;
struct io_clock *clock = &c->io_clock[WRITE];
+ move_buckets_in_flight move_buckets;
u64 last, wait;
int ret = 0;
+ if (!init_fifo(&move_buckets, 1 << 14, GFP_KERNEL)) {
+ bch_err(c, "error allocating copygc buckets in flight");
+ return -ENOMEM;
+ }
+
set_freezable();
+ bch2_trans_init(&trans, c, 0, 0);
+
+ bch2_move_stats_init(&move_stats, "copygc");
+ bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
+ writepoint_ptr(&c->copygc_write_point),
+ false);
while (!ret && !kthread_should_stop()) {
+ bch2_trans_unlock(&trans);
cond_resched();
- if (kthread_wait_freezable(c->copy_gc_enabled))
- break;
+ if (!c->copy_gc_enabled) {
+ move_buckets_wait(&trans, &ctxt, &move_buckets, 0, true);
+ kthread_wait_freezable(c->copy_gc_enabled);
+ }
+
+ if (unlikely(freezing(current))) {
+ move_buckets_wait(&trans, &ctxt, &move_buckets, 0, true);
+ __refrigerator(false);
+ continue;
+ }
last = atomic64_read(&clock->now);
wait = bch2_copygc_wait_amount(c);
if (wait > clock->max_slop) {
+ move_buckets_wait(&trans, &ctxt, &move_buckets, 0, true);
trace_and_count(c, copygc_wait, c, wait, last + wait);
c->copygc_wait = last + wait;
bch2_kthread_io_clock_wait(clock, last + wait,
c->copygc_wait = 0;
c->copygc_running = true;
- ret = bch2_copygc(c);
+ ret = bch2_copygc(&trans, &ctxt, &move_buckets);
c->copygc_running = false;
wake_up(&c->copygc_running_wq);
}
+ bch2_trans_exit(&trans);
+ bch2_moving_ctxt_exit(&ctxt);
+ free_fifo(&move_buckets);
+
return 0;
}