#include "buckets.h"
#include "data_update.h"
#include "ec.h"
+#include "error.h"
#include "extents.h"
-#include "io.h"
+#include "io_write.h"
#include "keylist.h"
#include "move.h"
+#include "nocow_locking.h"
+#include "rebalance.h"
#include "subvolume.h"
+#include "trace.h"
-#include <trace/events/bcachefs.h>
-
-static int insert_snapshot_whiteouts(struct btree_trans *trans,
- enum btree_id id,
- struct bpos old_pos,
- struct bpos new_pos)
+static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
{
- struct bch_fs *c = trans->c;
- struct btree_iter iter, update_iter;
- struct bkey_s_c k;
- snapshot_id_list s;
- int ret;
+ if (trace_move_extent_finish_enabled()) {
+ struct printbuf buf = PRINTBUF;
- if (!btree_type_has_snapshots(id))
- return 0;
+ bch2_bkey_val_to_text(&buf, c, k);
+ trace_move_extent_finish(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+}
- darray_init(&s);
+static void trace_move_extent_fail2(struct data_update *m,
+ struct bkey_s_c new,
+ struct bkey_s_c wrote,
+ struct bkey_i *insert,
+ const char *msg)
+{
+ struct bch_fs *c = m->op.c;
+ struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
+ const union bch_extent_entry *entry;
+ struct bch_extent_ptr *ptr;
+ struct extent_ptr_decoded p;
+ struct printbuf buf = PRINTBUF;
+ unsigned i, rewrites_found = 0;
- if (!bkey_cmp(old_pos, new_pos))
- return 0;
+ if (!trace_move_extent_fail_enabled())
+ return;
- if (!snapshot_t(c, old_pos.snapshot)->children[0])
- return 0;
+ prt_str(&buf, msg);
- bch2_trans_iter_init(trans, &iter, id, old_pos,
- BTREE_ITER_NOT_EXTENTS|
- BTREE_ITER_ALL_SNAPSHOTS);
- while (1) {
- k = bch2_btree_iter_prev(&iter);
- ret = bkey_err(k);
- if (ret)
- break;
+ if (insert) {
+ i = 0;
+ bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
+ if (((1U << i) & m->data_opts.rewrite_ptrs) &&
+ (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
+ !ptr->cached)
+ rewrites_found |= 1U << i;
+ i++;
+ }
+ }
- if (bkey_cmp(old_pos, k.k->p))
- break;
+ prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
+ (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
+ (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
+ (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
+ (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
- if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
- struct bkey_i *update;
+ prt_printf(&buf, "\nrewrites found: %u%u%u%u",
+ (rewrites_found & (1 << 0)) != 0,
+ (rewrites_found & (1 << 1)) != 0,
+ (rewrites_found & (1 << 2)) != 0,
+ (rewrites_found & (1 << 3)) != 0);
- if (snapshot_list_has_ancestor(c, &s, k.k->p.snapshot))
- continue;
+ prt_str(&buf, "\nold: ");
+ bch2_bkey_val_to_text(&buf, c, old);
- update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
+ prt_str(&buf, "\nnew: ");
+ bch2_bkey_val_to_text(&buf, c, new);
- ret = PTR_ERR_OR_ZERO(update);
- if (ret)
- break;
-
- bkey_init(&update->k);
- update->k.p = new_pos;
- update->k.p.snapshot = k.k->p.snapshot;
-
- bch2_trans_iter_init(trans, &update_iter, id, update->k.p,
- BTREE_ITER_NOT_EXTENTS|
- BTREE_ITER_ALL_SNAPSHOTS|
- BTREE_ITER_INTENT);
- ret = bch2_btree_iter_traverse(&update_iter) ?:
- bch2_trans_update(trans, &update_iter, update,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
- bch2_trans_iter_exit(trans, &update_iter);
- if (ret)
- break;
+ prt_str(&buf, "\nwrote: ");
+ bch2_bkey_val_to_text(&buf, c, wrote);
- ret = snapshot_list_add(c, &s, k.k->p.snapshot);
- if (ret)
- break;
- }
+ if (insert) {
+ prt_str(&buf, "\ninsert: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
}
- bch2_trans_iter_exit(trans, &iter);
- darray_exit(&s);
-
- return ret;
-}
-static void bch2_bkey_mark_dev_cached(struct bkey_s k, unsigned dev)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- struct bch_extent_ptr *ptr;
-
- bkey_for_each_ptr(ptrs, ptr)
- if (ptr->dev == dev)
- ptr->cached = true;
+ trace_move_extent_fail(c, buf.buf);
+ printbuf_exit(&buf);
}
-static int bch2_data_update_index_update(struct bch_write_op *op)
+static int __bch2_data_update_index_update(struct btree_trans *trans,
+ struct bch_write_op *op)
{
struct bch_fs *c = op->c;
- struct btree_trans trans;
struct btree_iter iter;
struct data_update *m =
container_of(op, struct data_update, op);
bch2_bkey_buf_init(&_insert);
bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
-
- bch2_trans_iter_init(&trans, &iter, m->btree_id,
+ bch2_trans_iter_init(trans, &iter, m->btree_id,
bkey_start_pos(&bch2_keylist_front(keys)->k),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
while (1) {
struct bkey_s_c k;
struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
- struct bkey_i *insert;
+ struct bkey_i *insert = NULL;
struct bkey_i_extent *new;
- const union bch_extent_entry *entry;
+ const union bch_extent_entry *entry_c;
+ union bch_extent_entry *entry;
struct extent_ptr_decoded p;
+ struct bch_extent_ptr *ptr;
+ const struct bch_extent_ptr *ptr_c;
struct bpos next_pos;
- bool did_work = false;
bool should_check_enospc;
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
- unsigned i;
+ unsigned rewrites_found = 0, durability, i;
- bch2_trans_begin(&trans);
+ bch2_trans_begin(trans);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
new = bkey_i_to_extent(bch2_keylist_front(keys));
- if (!bch2_extents_match(k, old))
- goto nomatch;
+ if (!bch2_extents_match(k, old)) {
+ trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
+ NULL, "no match:");
+ goto nowork;
+ }
bkey_reassemble(_insert.k, k);
insert = _insert.k;
* Fist, drop rewrite_ptrs from @new:
*/
i = 0;
- bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
+ bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
if (((1U << i) & m->data_opts.rewrite_ptrs) &&
- bch2_extent_has_ptr(old, p, bkey_i_to_s_c(insert))) {
+ (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
+ !ptr->cached) {
+ bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
/*
- * If we're going to be adding a pointer to the
- * same device, we have to drop the old one -
- * otherwise, we can just mark it cached:
- */
- if (bch2_bkey_has_device(bkey_i_to_s_c(&new->k_i), p.ptr.dev))
- bch2_bkey_drop_device_noerror(bkey_i_to_s(insert), p.ptr.dev);
- else
- bch2_bkey_mark_dev_cached(bkey_i_to_s(insert), p.ptr.dev);
+ * See comment below:
+ bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
+ */
+ rewrites_found |= 1U << i;
}
i++;
}
+ if (m->data_opts.rewrite_ptrs &&
+ !rewrites_found &&
+ bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
+ trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
+ goto nowork;
+ }
- /* Add new ptrs: */
- extent_for_each_ptr_decode(extent_i_to_s(new), p, entry) {
- if (bch2_bkey_has_device(bkey_i_to_s_c(insert), p.ptr.dev)) {
- /*
- * raced with another move op? extent already
- * has a pointer to the device we just wrote
- * data to
- */
- continue;
+ /*
+ * A replica that we just wrote might conflict with a replica
+ * that we want to keep, due to racing with another move:
+ */
+restart_drop_conflicting_replicas:
+ extent_for_each_ptr(extent_i_to_s(new), ptr)
+ if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
+ !ptr_c->cached) {
+ bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
+ goto restart_drop_conflicting_replicas;
}
- bch2_extent_ptr_decoded_append(insert, &p);
- did_work = true;
+ if (!bkey_val_u64s(&new->k)) {
+ trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
+ goto nowork;
}
- if (!did_work)
- goto nomatch;
+ /* Now, drop pointers that conflict with what we just wrote: */
+ extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
+ if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
+ bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
+
+ durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
+ bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
+
+ /* Now, drop excess replicas: */
+restart_drop_extra_replicas:
+ bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
+ unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
+
+ if (!p.ptr.cached &&
+ durability - ptr_durability >= m->op.opts.data_replicas) {
+ durability -= ptr_durability;
+ bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), &entry->ptr);
+ /*
+ * Currently, we're dropping unneeded replicas
+ * instead of marking them as cached, since
+ * cached data in stripe buckets prevents them
+ * from being reused:
+ bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
+ */
+ goto restart_drop_extra_replicas;
+ }
+ }
+
+ /* Finally, add the pointers we just wrote: */
+ extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
+ bch2_extent_ptr_decoded_append(insert, &p);
bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
bch2_extent_normalize(c, bkey_i_to_s(insert));
- ret = bch2_sum_sector_overwrites(&trans, &iter, insert,
+ ret = bch2_sum_sector_overwrites(trans, &iter, insert,
&should_check_enospc,
&i_sectors_delta,
&disk_sectors_delta);
next_pos = insert->k.p;
- ret = insert_snapshot_whiteouts(&trans, m->btree_id,
- k.k->p, insert->k.p) ?:
- bch2_trans_update(&trans, &iter, insert,
+ ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
+ k.k->p, bkey_start_pos(&insert->k)) ?:
+ bch2_insert_snapshot_whiteouts(trans, m->btree_id,
+ k.k->p, insert->k.p) ?:
+ bch2_bkey_set_needs_rebalance(c, insert,
+ op->opts.background_target,
+ op->opts.background_compression) ?:
+ bch2_trans_update(trans, &iter, insert,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
- bch2_trans_commit(&trans, &op->res,
- op_journal_seq(op),
+ bch2_trans_commit(trans, &op->res,
+ NULL,
+ BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL|
m->data_opts.btree_insert_flags);
if (!ret) {
bch2_btree_iter_set_pos(&iter, next_pos);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
- trace_move_extent_finish(&new->k);
+ trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
}
err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
if (ret)
break;
next:
- while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
+ while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
bch2_keylist_pop_front(keys);
if (bch2_keylist_empty(keys))
goto out;
}
continue;
-nomatch:
- if (m->ctxt) {
+nowork:
+ if (m->stats && m->stats) {
BUG_ON(k.k->p.offset <= iter.pos.offset);
- atomic64_inc(&m->ctxt->stats->keys_raced);
+ atomic64_inc(&m->stats->keys_raced);
atomic64_add(k.k->p.offset - iter.pos.offset,
- &m->ctxt->stats->sectors_raced);
+ &m->stats->sectors_raced);
}
- this_cpu_add(c->counters[BCH_COUNTER_move_extent_race], new->k.size);
- trace_move_extent_race(&new->k);
+ this_cpu_inc(c->counters[BCH_COUNTER_move_extent_fail]);
bch2_btree_iter_advance(&iter);
goto next;
}
out:
- bch2_trans_iter_exit(&trans, &iter);
- bch2_trans_exit(&trans);
+ bch2_trans_iter_exit(trans, &iter);
bch2_bkey_buf_exit(&_insert, c);
bch2_bkey_buf_exit(&_new, c);
BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
return ret;
}
+int bch2_data_update_index_update(struct bch_write_op *op)
+{
+ return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
+}
+
void bch2_data_update_read_done(struct data_update *m,
- struct bch_extent_crc_unpacked crc,
- struct closure *cl)
+ struct bch_extent_crc_unpacked crc)
{
/* write bio must own pages: */
BUG_ON(!m->op.wbio.bio.bi_vcnt);
m->op.crc = crc;
m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
- closure_call(&m->op.cl, bch2_write, NULL, cl);
+ closure_call(&m->op.cl, bch2_write, NULL, NULL);
}
void bch2_data_update_exit(struct data_update *update)
{
struct bch_fs *c = update->op.c;
+ struct bkey_ptrs_c ptrs =
+ bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ if (c->opts.nocow_enabled)
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr), 0);
+ percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
+ }
bch2_bkey_buf_exit(&update->k, c);
bch2_disk_reservation_put(c, &update->op.res);
bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
}
-int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
+void bch2_update_unwritten_extent(struct btree_trans *trans,
+ struct data_update *update)
+{
+ struct bch_fs *c = update->op.c;
+ struct bio *bio = &update->op.wbio.bio;
+ struct bkey_i_extent *e;
+ struct write_point *wp;
+ struct bch_extent_ptr *ptr;
+ struct closure cl;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ closure_init_stack(&cl);
+ bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
+
+ while (bio_sectors(bio)) {
+ unsigned sectors = bio_sectors(bio);
+
+ bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
+ BTREE_ITER_SLOTS);
+ ret = lockrestart_do(trans, ({
+ k = bch2_btree_iter_peek_slot(&iter);
+ bkey_err(k);
+ }));
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
+ break;
+
+ e = bkey_extent_init(update->op.insert_keys.top);
+ e->k.p = update->op.pos;
+
+ ret = bch2_alloc_sectors_start_trans(trans,
+ update->op.target,
+ false,
+ update->op.write_point,
+ &update->op.devs_have,
+ update->op.nr_replicas,
+ update->op.nr_replicas,
+ update->op.watermark,
+ 0, &cl, &wp);
+ if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
+ continue;
+ }
+
+ if (ret)
+ return;
+
+ sectors = min(sectors, wp->sectors_free);
+
+ bch2_key_resize(&e->k, sectors);
+
+ bch2_open_bucket_get(c, wp, &update->op.open_buckets);
+ bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
+ bch2_alloc_sectors_done(c, wp);
+
+ bio_advance(bio, sectors << 9);
+ update->op.pos.offset += sectors;
+
+ extent_for_each_ptr(extent_i_to_s(e), ptr)
+ ptr->unwritten = true;
+ bch2_keylist_push(&update->op.insert_keys);
+
+ ret = __bch2_data_update_index_update(trans, &update->op);
+
+ bch2_open_buckets_put(c, &update->op.open_buckets);
+
+ if (ret)
+ break;
+ }
+
+ if (closure_nr_remaining(&cl) != 1) {
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
+ }
+}
+
+int bch2_data_update_init(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct data_update *m,
struct write_point_specifier wp,
struct bch_io_opts io_opts,
struct data_update_opts data_opts,
enum btree_id btree_id,
struct bkey_s_c k)
{
+ struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
+ const struct bch_extent_ptr *ptr;
unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
+ unsigned ptrs_locked = 0;
int ret;
bch2_bkey_buf_init(&m->k);
bch2_bkey_buf_reassemble(&m->k, c, k);
m->btree_id = btree_id;
m->data_opts = data_opts;
+ m->ctxt = ctxt;
+ m->stats = ctxt ? ctxt->stats : NULL;
bch2_write_op_init(&m->op, c, io_opts);
m->op.pos = bkey_start_pos(k.k);
m->op.version = k.k->version;
- m->op.target = data_opts.target,
+ m->op.target = data_opts.target;
m->op.write_point = wp;
+ m->op.nr_replicas = 0;
m->op.flags |= BCH_WRITE_PAGES_STABLE|
BCH_WRITE_PAGES_OWNED|
BCH_WRITE_DATA_ENCODED|
- BCH_WRITE_FROM_INTERNAL|
+ BCH_WRITE_MOVE|
m->data_opts.write_flags;
- m->op.compression_type =
- bch2_compression_opt_to_type[io_opts.background_compression ?:
- io_opts.compression];
- if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
- m->op.alloc_reserve = RESERVE_movinggc;
- m->op.index_update_fn = bch2_data_update_index_update;
+ m->op.compression_opt = io_opts.background_compression ?: io_opts.compression;
+ m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- if (((1U << i) & m->data_opts.rewrite_ptrs) &&
- p.ptr.cached)
- BUG();
+ bool locked;
- if (!((1U << i) & m->data_opts.rewrite_ptrs))
- bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
+ if (((1U << i) & m->data_opts.rewrite_ptrs)) {
+ BUG_ON(p.ptr.cached);
+
+ if (crc_is_compressed(p.crc))
+ reserve_sectors += k.k->size;
- if (((1U << i) & m->data_opts.rewrite_ptrs) &&
- crc_is_compressed(p.crc))
- reserve_sectors += k.k->size;
+ m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
+ } else if (!p.ptr.cached) {
+ bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
+ }
/*
* op->csum_type is normally initialized from the fs/file's
if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
m->op.incompressible = true;
+ if (c->opts.nocow_enabled) {
+ if (ctxt) {
+ move_ctxt_wait_event(ctxt,
+ (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0)) ||
+ !atomic_read(&ctxt->read_sectors));
+
+ if (!locked)
+ bch2_bucket_nocow_lock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0);
+ } else {
+ if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0)) {
+ ret = -BCH_ERR_nocow_lock_blocked;
+ goto err;
+ }
+ }
+ ptrs_locked |= (1U << i);
+ }
+
i++;
}
? 0
: BCH_DISK_RESERVATION_NOFAIL);
if (ret)
- return ret;
+ goto err;
}
- m->op.nr_replicas = m->op.nr_replicas_required =
- hweight32(m->data_opts.rewrite_ptrs) + m->data_opts.extra_replicas;
+ m->op.nr_replicas += m->data_opts.extra_replicas;
+ m->op.nr_replicas_required = m->op.nr_replicas;
BUG_ON(!m->op.nr_replicas);
+
+ /* Special handling required: */
+ if (bkey_extent_is_unwritten(k))
+ return -BCH_ERR_unwritten_extent_update;
return 0;
+err:
+ i = 0;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ if ((1U << i) & ptrs_locked)
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0);
+ percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
+ i++;
+ }
+
+ bch2_bkey_buf_exit(&m->k, c);
+ bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
+ return ret;
}
void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)