#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/math64.h>
+#include <linux/sched/task.h>
#include <linux/sort.h>
#include <linux/wait.h>
return (l->offset > r->offset) - (l->offset < r->offset);
}
-static bool copygc_pred(void *arg, struct bkey_s_c_extent e)
+static bool __copygc_pred(struct bch_dev *ca,
+ struct bkey_s_c_extent e)
{
- struct bch_dev *ca = arg;
copygc_heap *h = &ca->copygc_heap;
const struct bch_extent_ptr *ptr =
bch2_extent_has_device(e, ca->dev_idx);
if (ptr) {
struct copygc_heap_entry search = { .offset = ptr->offset };
- size_t i = eytzinger0_find_le(h->data, h->used,
- sizeof(h->data[0]),
- bucket_offset_cmp, &search);
+ ssize_t i = eytzinger0_find_le(h->data, h->used,
+ sizeof(h->data[0]),
+ bucket_offset_cmp, &search);
return (i >= 0 &&
ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
return false;
}
+static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
+ enum bkey_type type,
+ struct bkey_s_c_extent e,
+ struct bch_io_opts *io_opts,
+ struct data_opts *data_opts)
+{
+ struct bch_dev *ca = arg;
+
+ if (!__copygc_pred(ca, e))
+ return DATA_SKIP;
+
+ data_opts->target = dev_to_target(ca->dev_idx);
+ data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE;
+ data_opts->rewrite_dev = ca->dev_idx;
+ return DATA_REWRITE;
+}
+
static bool have_copygc_reserve(struct bch_dev *ca)
{
bool ret;
spin_lock(&ca->freelist_lock);
- ret = fifo_used(&ca->free[RESERVE_MOVINGGC]) >=
- COPYGC_BUCKETS_PER_ITER(ca);
+ ret = fifo_full(&ca->free[RESERVE_MOVINGGC]) ||
+ ca->allocator_blocked;
spin_unlock(&ca->freelist_lock);
return ret;
bucket_offset_cmp, NULL);
ret = bch2_move_data(c, &ca->copygc_pd.rate,
- SECTORS_IN_FLIGHT_PER_DEVICE,
- &ca->self,
writepoint_ptr(&ca->copygc_write_point),
- BTREE_INSERT_USE_RESERVE,
- ca->dev_idx,
POS_MIN, POS_MAX,
copygc_pred, ca,
&move_stats);
struct bch_dev *ca = arg;
struct bch_fs *c = ca->fs;
struct io_clock *clock = &c->io_clock[WRITE];
+ struct bch_dev_usage usage;
unsigned long last;
- u64 available, want, next;
+ u64 available, fragmented, reserve, next;
set_freezable();
break;
last = atomic_long_read(&clock->now);
+
+ reserve = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) *
+ ca->mi.bucket_size *
+ c->opts.gc_reserve_percent, 200);
+
+ usage = bch2_dev_usage_read(c, ca);
+
/*
* don't start copygc until less than half the gc reserve is
* available:
*/
- available = dev_buckets_available(c, ca);
- want = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) *
- c->opts.gc_reserve_percent, 200);
- if (available > want) {
- next = last + (available - want) *
- ca->mi.bucket_size;
+ available = __dev_buckets_available(ca, usage) *
+ ca->mi.bucket_size;
+ if (available > reserve) {
+ next = last + available - reserve;
+ bch2_kthread_io_clock_wait(clock, next);
+ continue;
+ }
+
+ /*
+ * don't start copygc until there's more than half the copygc
+ * reserve of fragmented space:
+ */
+ fragmented = usage.sectors_fragmented;
+ if (fragmented < reserve) {
+ next = last + reserve - fragmented;
bch2_kthread_io_clock_wait(clock, next);
continue;
}
ca->copygc_pd.rate.rate = UINT_MAX;
bch2_ratelimit_reset(&ca->copygc_pd.rate);
- if (ca->copygc_thread)
+ if (ca->copygc_thread) {
kthread_stop(ca->copygc_thread);
+ put_task_struct(ca->copygc_thread);
+ }
ca->copygc_thread = NULL;
}
if (IS_ERR(t))
return PTR_ERR(t);
+ get_task_struct(t);
+
ca->copygc_thread = t;
wake_up_process(ca->copygc_thread);