return (l->offset > r->offset) - (l->offset < r->offset);
}
-static bool copygc_pred(void *arg, struct bkey_s_c_extent e)
+static bool __copygc_pred(struct bch_dev *ca,
+ struct bkey_s_c_extent e)
{
- struct bch_dev *ca = arg;
copygc_heap *h = &ca->copygc_heap;
const struct bch_extent_ptr *ptr =
bch2_extent_has_device(e, ca->dev_idx);
return false;
}
+static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
+ enum bkey_type type,
+ struct bkey_s_c_extent e,
+ struct bch_io_opts *io_opts,
+ struct data_opts *data_opts)
+{
+ struct bch_dev *ca = arg;
+
+ if (!__copygc_pred(ca, e))
+ return DATA_SKIP;
+
+ data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE,
+ data_opts->rewrite_dev = ca->dev_idx;
+ return DATA_REWRITE;
+}
+
static bool have_copygc_reserve(struct bch_dev *ca)
{
bool ret;
copygc_heap *h = &ca->copygc_heap;
struct copygc_heap_entry e, *i;
struct bucket_array *buckets;
- u64 keys_moved, sectors_moved;
+ struct bch_move_stats move_stats;
u64 sectors_to_move = 0, sectors_not_moved = 0;
u64 buckets_to_move, buckets_not_moved = 0;
size_t b;
int ret;
+ memset(&move_stats, 0, sizeof(move_stats));
closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
/*
SECTORS_IN_FLIGHT_PER_DEVICE,
&ca->self,
writepoint_ptr(&ca->copygc_write_point),
- BTREE_INSERT_USE_RESERVE,
- ca->dev_idx,
+ POS_MIN, POS_MAX,
copygc_pred, ca,
- &keys_moved,
- §ors_moved);
+ &move_stats);
down_read(&ca->bucket_lock);
buckets = bucket_array(ca);
buckets_not_moved, buckets_to_move);
trace_copygc(ca,
- sectors_moved, sectors_not_moved,
+ atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
buckets_to_move, buckets_not_moved);
}