+// SPDX-License-Identifier: GPL-2.0
/*
* Moving/copying garbage collector
*
*/
#include "bcachefs.h"
+#include "alloc_foreground.h"
#include "btree_iter.h"
#include "btree_update.h"
#include "buckets.h"
#include "clock.h"
+#include "disk_groups.h"
#include "extents.h"
#include "eytzinger.h"
#include "io.h"
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/math64.h>
+#include <linux/sched/task.h>
#include <linux/sort.h>
#include <linux/wait.h>
struct copygc_heap_entry l,
struct copygc_heap_entry r)
{
- return bucket_sectors_used(l.mark) - bucket_sectors_used(r.mark);
+ return cmp_int(l.sectors, r.sectors);
}
static int bucket_offset_cmp(const void *_l, const void *_r, size_t size)
const struct copygc_heap_entry *l = _l;
const struct copygc_heap_entry *r = _r;
- return (l->offset > r->offset) - (l->offset < r->offset);
+ return cmp_int(l->offset, r->offset);
}
-static bool copygc_pred(void *arg, struct bkey_s_c_extent e)
+static bool __copygc_pred(struct bch_dev *ca,
+ struct bkey_s_c k)
{
- struct bch_dev *ca = arg;
copygc_heap *h = &ca->copygc_heap;
const struct bch_extent_ptr *ptr =
- bch2_extent_has_device(e, ca->dev_idx);
+ bch2_bkey_has_device(k, ca->dev_idx);
if (ptr) {
struct copygc_heap_entry search = { .offset = ptr->offset };
- size_t i = eytzinger0_find_le(h->data, h->used,
- sizeof(h->data[0]),
- bucket_offset_cmp, &search);
+ ssize_t i = eytzinger0_find_le(h->data, h->used,
+ sizeof(h->data[0]),
+ bucket_offset_cmp, &search);
+#if 0
+ /* eytzinger search verify code: */
+ ssize_t j = -1, k;
+
+ for (k = 0; k < h->used; k++)
+ if (h->data[k].offset <= ptr->offset &&
+ (j < 0 || h->data[k].offset > h->data[j].offset))
+ j = k;
+ BUG_ON(i != j);
+#endif
return (i >= 0 &&
ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
- ptr->gen == h->data[i].mark.gen);
+ ptr->gen == h->data[i].gen);
}
return false;
}
+static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_opts *data_opts)
+{
+ struct bch_dev *ca = arg;
+
+ if (!__copygc_pred(ca, k))
+ return DATA_SKIP;
+
+ data_opts->target = dev_to_target(ca->dev_idx);
+ data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE;
+ data_opts->rewrite_dev = ca->dev_idx;
+ return DATA_REWRITE;
+}
+
static bool have_copygc_reserve(struct bch_dev *ca)
{
bool ret;
- spin_lock(&ca->freelist_lock);
- ret = fifo_used(&ca->free[RESERVE_MOVINGGC]) >=
- COPYGC_BUCKETS_PER_ITER(ca);
- spin_unlock(&ca->freelist_lock);
+ spin_lock(&ca->fs->freelist_lock);
+ ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
+ ca->allocator_state != ALLOCATOR_RUNNING;
+ spin_unlock(&ca->fs->freelist_lock);
return ret;
}
{
copygc_heap *h = &ca->copygc_heap;
struct copygc_heap_entry e, *i;
- struct bucket *g;
- u64 keys_moved, sectors_moved;
+ struct bucket_array *buckets;
+ struct bch_move_stats move_stats;
u64 sectors_to_move = 0, sectors_not_moved = 0;
u64 buckets_to_move, buckets_not_moved = 0;
+ size_t b;
int ret;
+ memset(&move_stats, 0, sizeof(move_stats));
closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
/*
* and repeatedly replacing the maximum element until all
* buckets have been visited.
*/
+ h->used = 0;
/*
* We need bucket marks to be up to date - gc can't be recalculating
* them:
*/
down_read(&c->gc_lock);
- h->used = 0;
- for_each_bucket(g, ca) {
- struct bucket_mark m = READ_ONCE(g->mark);
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
+ struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
struct copygc_heap_entry e;
if (m.owned_by_allocator ||
- m.data_type != BUCKET_DATA ||
+ m.data_type != BCH_DATA_USER ||
!bucket_sectors_used(m) ||
bucket_sectors_used(m) >= ca->mi.bucket_size)
continue;
e = (struct copygc_heap_entry) {
- .offset = bucket_to_sector(ca, g - ca->buckets),
- .mark = m
+ .gen = m.gen,
+ .sectors = bucket_sectors_used(m),
+ .offset = bucket_to_sector(ca, b),
};
- heap_add_or_replace(h, e, -sectors_used_cmp);
+ heap_add_or_replace(h, e, -sectors_used_cmp, NULL);
}
+ up_read(&ca->bucket_lock);
up_read(&c->gc_lock);
for (i = h->data; i < h->data + h->used; i++)
- sectors_to_move += bucket_sectors_used(i->mark);
+ sectors_to_move += i->sectors;
while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
- BUG_ON(!heap_pop(h, e, -sectors_used_cmp));
- sectors_to_move -= bucket_sectors_used(e.mark);
+ BUG_ON(!heap_pop(h, e, -sectors_used_cmp, NULL));
+ sectors_to_move -= e.sectors;
}
buckets_to_move = h->used;
bucket_offset_cmp, NULL);
ret = bch2_move_data(c, &ca->copygc_pd.rate,
- SECTORS_IN_FLIGHT_PER_DEVICE,
- &ca->self,
writepoint_ptr(&ca->copygc_write_point),
- BTREE_INSERT_USE_RESERVE,
- ca->dev_idx,
+ POS_MIN, POS_MAX,
copygc_pred, ca,
- &keys_moved,
- §ors_moved);
+ &move_stats);
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
for (i = h->data; i < h->data + h->used; i++) {
- size_t bucket = sector_to_bucket(ca, i->offset);
- struct bucket_mark m = READ_ONCE(ca->buckets[bucket].mark);
+ size_t b = sector_to_bucket(ca, i->offset);
+ struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
- if (i->mark.gen == m.gen && bucket_sectors_used(m)) {
+ if (i->gen == m.gen && bucket_sectors_used(m)) {
sectors_not_moved += bucket_sectors_used(m);
buckets_not_moved++;
}
}
+ up_read(&ca->bucket_lock);
if (sectors_not_moved && !ret)
- bch_warn(c, "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved",
+ bch_warn_ratelimited(c,
+ "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)",
sectors_not_moved, sectors_to_move,
- buckets_not_moved, buckets_to_move);
+ buckets_not_moved, buckets_to_move,
+ atomic64_read(&move_stats.sectors_moved),
+ atomic64_read(&move_stats.keys_raced),
+ atomic64_read(&move_stats.sectors_raced));
trace_copygc(ca,
- sectors_moved, sectors_not_moved,
+ atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
buckets_to_move, buckets_not_moved);
}
+/*
+ * Copygc runs when the amount of fragmented data is above some arbitrary
+ * threshold:
+ *
+ * The threshold at the limit - when the device is full - is the amount of space
+ * we reserved in bch2_recalc_capacity; we can't have more than that amount of
+ * disk space stranded due to fragmentation and store everything we have
+ * promised to store.
+ *
+ * But we don't want to be running copygc unnecessarily when the device still
+ * has plenty of free space - rather, we want copygc to smoothly run every so
+ * often and continually reduce the amount of fragmented space as the device
+ * fills up. So, we increase the threshold by half the current free space.
+ */
+unsigned long bch2_copygc_wait_amount(struct bch_dev *ca)
+{
+ struct bch_fs *c = ca->fs;
+ struct bch_dev_usage usage = bch2_dev_usage_read(c, ca);
+ u64 fragmented_allowed = ca->copygc_threshold +
+ ((__dev_buckets_available(ca, usage) * ca->mi.bucket_size) >> 1);
+
+ return max_t(s64, 0, fragmented_allowed - usage.sectors_fragmented);
+}
+
static int bch2_copygc_thread(void *arg)
{
struct bch_dev *ca = arg;
struct bch_fs *c = ca->fs;
struct io_clock *clock = &c->io_clock[WRITE];
- unsigned long last;
- u64 available, want, next;
+ unsigned long last, wait;
set_freezable();
break;
last = atomic_long_read(&clock->now);
- /*
- * don't start copygc until less than half the gc reserve is
- * available:
- */
- available = dev_buckets_available(c, ca);
- want = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) *
- c->opts.gc_reserve_percent, 200);
- if (available > want) {
- next = last + (available - want) *
- ca->mi.bucket_size;
- bch2_kthread_io_clock_wait(clock, next);
+ wait = bch2_copygc_wait_amount(ca);
+
+ if (wait > clock->max_slop) {
+ bch2_kthread_io_clock_wait(clock, last + wait,
+ MAX_SCHEDULE_TIMEOUT);
continue;
}
ca->copygc_pd.rate.rate = UINT_MAX;
bch2_ratelimit_reset(&ca->copygc_pd.rate);
- if (ca->copygc_thread)
+ if (ca->copygc_thread) {
kthread_stop(ca->copygc_thread);
+ put_task_struct(ca->copygc_thread);
+ }
ca->copygc_thread = NULL;
}
{
struct task_struct *t;
- BUG_ON(ca->copygc_thread);
+ if (ca->copygc_thread)
+ return 0;
if (c->opts.nochanges)
return 0;
if (bch2_fs_init_fault("copygc_start"))
return -ENOMEM;
- t = kthread_create(bch2_copygc_thread, ca, "bch_copygc");
+ t = kthread_create(bch2_copygc_thread, ca,
+ "bch_copygc[%s]", ca->name);
if (IS_ERR(t))
return PTR_ERR(t);
+ get_task_struct(t);
+
ca->copygc_thread = t;
wake_up_process(ca->copygc_thread);