#include "debug.h"
#include "extents.h"
#include "journal.h"
+#include "journal_reclaim.h"
#include "keylist.h"
#include <linux/sort.h>
{
const struct bkey_format *f = &b->format;
struct bkey_packed *k;
- struct bset_tree *t;
unsigned clobber_u64s;
EBUG_ON(btree_node_just_written(b));
if (k && !bkey_cmp_packed(b, k, &insert->k)) {
BUG_ON(bkey_whiteout(k));
- t = bch2_bkey_to_bset(b, k);
-
- if (bset_unwritten(b, bset(b, t)) &&
- bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) {
- BUG_ON(bkey_whiteout(k) != bkey_whiteout(&insert->k));
-
+ if (!bkey_written(b, k) &&
+ bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
+ !bkey_whiteout(&insert->k)) {
k->type = insert->k.type;
memcpy_u64s(bkeyp_val(f, k), &insert->v,
bkey_val_u64s(&insert->k));
insert->k.needs_whiteout = k->needs_whiteout;
- btree_keys_account_key_drop(&b->nr, t - b->set, k);
+ btree_account_key_drop(b, k);
- if (t == bset_tree_last(b)) {
+ if (k >= btree_bset_last(b)->start) {
clobber_u64s = k->u64s;
/*
*/
if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
bch2_bset_delete(b, k, clobber_u64s);
- bch2_btree_node_iter_fix(iter, b, node_iter, t,
- k, clobber_u64s, 0);
+ bch2_btree_node_iter_fix(iter, b, node_iter,
+ k, clobber_u64s, 0);
+ bch2_btree_iter_verify(iter, b);
return true;
}
}
k->type = KEY_TYPE_DELETED;
- bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
- k->u64s, k->u64s);
+ bch2_btree_node_iter_fix(iter, b, node_iter, k,
+ k->u64s, k->u64s);
+ bch2_btree_iter_verify(iter, b);
if (bkey_whiteout(&insert->k)) {
- reserve_whiteout(b, t, k);
+ reserve_whiteout(b, k);
return true;
} else {
k->needs_whiteout = false;
insert->k.needs_whiteout = false;
}
- t = bset_tree_last(b);
- k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
+ k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
clobber_u64s = 0;
overwrite:
bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
- bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
- clobber_u64s, k->u64s);
+ bch2_btree_node_iter_fix(iter, b, node_iter, k,
+ clobber_u64s, k->u64s);
+ bch2_btree_iter_verify(iter, b);
return true;
}
struct btree_write *w = container_of(pin, struct btree_write, journal);
struct btree *b = container_of(w, struct btree, writes[i]);
- six_lock_read(&b->lock);
- bch2_btree_node_write_dirty(c, b, NULL,
- (btree_current_write(b) == w &&
- w->journal.pin_list == journal_seq_pin(j, seq)));
+ btree_node_lock_type(c, b, SIX_LOCK_read);
+ bch2_btree_node_write_cond(c, b,
+ (btree_current_write(b) == w && w->journal.seq == seq));
six_unlock_read(&b->lock);
}
{
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
- struct btree *b = iter->nodes[0];
+ struct btree *b = iter->l[0].b;
struct btree_write *w = btree_current_write(b);
EBUG_ON(iter->level || b->level);
EBUG_ON(trans->journal_res.ref !=
!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
- if (!journal_pin_active(&w->journal))
- bch2_journal_pin_add(j, &trans->journal_res,
- &w->journal,
- btree_node_write_idx(b) == 0
- ? btree_node_flush0
- : btree_node_flush1);
-
- if (trans->journal_res.ref) {
+ if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
u64 seq = trans->journal_res.seq;
bool needs_whiteout = insert->k.needs_whiteout;
/* ick */
insert->k.needs_whiteout = false;
bch2_journal_add_keys(j, &trans->journal_res,
- b->btree_id, insert);
+ iter->btree_id, insert);
insert->k.needs_whiteout = needs_whiteout;
bch2_journal_set_has_inode(j, &trans->journal_res,
btree_bset_last(b)->journal_seq = cpu_to_le64(seq);
}
- if (!btree_node_dirty(b))
+ if (unlikely(!journal_pin_active(&w->journal))) {
+ u64 seq = likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
+ ? trans->journal_res.seq
+ : j->replay_journal_seq;
+
+ bch2_journal_pin_add(j, seq, &w->journal,
+ btree_node_write_idx(b) == 0
+ ? btree_node_flush0
+ : btree_node_flush1);
+ }
+
+ if (unlikely(!btree_node_dirty(b)))
set_btree_node_dirty(b);
}
struct btree_insert_entry *insert)
{
struct btree_iter *iter = insert->iter;
+ struct btree_iter_level *l = &iter->l[0];
- BUG_ON(iter->level);
- BUG_ON(insert->k->k.u64s >
- bch_btree_keys_u64s_remaining(trans->c, iter->nodes[0]));
+ EBUG_ON(iter->level);
+ EBUG_ON(insert->k->k.u64s >
+ bch_btree_keys_u64s_remaining(trans->c, l->b));
- if (bch2_btree_bset_insert_key(iter, iter->nodes[0],
- &iter->node_iters[0],
+ if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
insert->k))
bch2_btree_journal_key(trans, iter, insert->k);
return BTREE_INSERT_OK;
}
-static int inline foreground_maybe_merge(struct bch_fs *c,
- struct btree_iter *iter,
- enum btree_node_sibling sib)
-{
- struct btree *b;
-
- if (!btree_node_locked(iter, iter->level))
- return 0;
-
- b = iter->nodes[iter->level];
- if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c))
- return 0;
-
- return bch2_foreground_maybe_merge(c, iter, sib);
-}
-
/**
* btree_insert_key - insert a key one key into a leaf node
*/
static enum btree_insert_ret
-btree_insert_key(struct btree_insert *trans,
- struct btree_insert_entry *insert)
+btree_insert_key_leaf(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
- struct btree *b = iter->nodes[0];
+ struct btree *b = iter->l[0].b;
enum btree_insert_ret ret;
int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
- iter->flags &= ~BTREE_ITER_UPTODATE;
-
ret = !btree_node_is_extents(b)
? bch2_insert_fixup_key(trans, insert)
: bch2_insert_fixup_extent(trans, insert);
return ret;
}
+#define trans_for_each_entry(trans, i) \
+ for ((i) = (trans)->entries; (i) < (trans)->entries + (trans)->nr; (i)++)
+
+/*
+ * We sort transaction entries so that if multiple iterators point to the same
+ * leaf node they'll be adjacent:
+ */
static bool same_leaf_as_prev(struct btree_insert *trans,
struct btree_insert_entry *i)
{
- /*
- * Because we sorted the transaction entries, if multiple iterators
- * point to the same leaf node they'll always be adjacent now:
- */
return i != trans->entries &&
- i[0].iter->nodes[0] == i[-1].iter->nodes[0];
+ i[0].iter->l[0].b == i[-1].iter->l[0].b;
}
-#define trans_for_each_entry(trans, i) \
- for ((i) = (trans)->entries; (i) < (trans)->entries + (trans)->nr; (i)++)
+static inline struct btree_insert_entry *trans_next_leaf(struct btree_insert *trans,
+ struct btree_insert_entry *i)
+{
+ struct btree *b = i->iter->l[0].b;
+
+ do {
+ i++;
+ } while (i < trans->entries + trans->nr && b == i->iter->l[0].b);
+
+ return i;
+}
+
+#define trans_for_each_leaf(trans, i) \
+ for ((i) = (trans)->entries; \
+ (i) < (trans)->entries + (trans)->nr; \
+ (i) = trans_next_leaf(trans, i))
inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
{
struct btree_insert_entry *i;
- trans_for_each_entry(trans, i)
- if (!same_leaf_as_prev(trans, i))
- bch2_btree_node_lock_for_insert(c, i->iter->nodes[0], i->iter);
+ trans_for_each_leaf(trans, i)
+ bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
}
static void multi_unlock_write(struct btree_insert *trans)
{
struct btree_insert_entry *i;
- trans_for_each_entry(trans, i)
- if (!same_leaf_as_prev(trans, i))
- bch2_btree_node_unlock_write(i->iter->nodes[0], i->iter);
+ trans_for_each_leaf(trans, i)
+ bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
}
-static int btree_trans_entry_cmp(const void *_l, const void *_r)
+static inline int btree_trans_cmp(struct btree_insert_entry l,
+ struct btree_insert_entry r)
{
- const struct btree_insert_entry *l = _l;
- const struct btree_insert_entry *r = _r;
-
- return btree_iter_cmp(l->iter, r->iter);
+ return btree_iter_cmp(l.iter, r.iter);
}
/* Normal update interface: */
-/**
- * __bch_btree_insert_at - insert keys at given iterator positions
- *
- * This is main entry point for btree updates.
- *
- * Return values:
- * -EINTR: locking changed, this function should be called again. Only returned
- * if passed BTREE_INSERT_ATOMIC.
- * -EROFS: filesystem read only
- * -EIO: journal or btree node IO error
+static enum btree_insert_ret
+btree_key_can_insert(struct btree_insert *trans,
+ struct btree_insert_entry *insert,
+ unsigned *u64s)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b = insert->iter->l[0].b;
+ static enum btree_insert_ret ret;
+
+ if (unlikely(btree_node_fake(b)))
+ return BTREE_INSERT_BTREE_NODE_FULL;
+
+ ret = !btree_node_is_extents(b)
+ ? BTREE_INSERT_OK
+ : bch2_extent_can_insert(trans, insert, u64s);
+ if (ret)
+ return ret;
+
+ if (*u64s > bch_btree_keys_u64s_remaining(c, b))
+ return BTREE_INSERT_BTREE_NODE_FULL;
+
+ return BTREE_INSERT_OK;
+}
+
+/*
+ * Get journal reservation, take write locks, and attempt to do btree update(s):
*/
-int __bch2_btree_insert_at(struct btree_insert *trans)
+static inline int do_btree_insert_at(struct btree_insert *trans,
+ struct btree_iter **split,
+ bool *cycle_gc_lock)
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
- struct btree_iter *split = NULL;
- bool cycle_gc_lock = false;
unsigned u64s;
int ret;
- trans_for_each_entry(trans, i) {
- BUG_ON(i->iter->level);
- BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
- BUG_ON(debug_check_bkeys(c) &&
- bch2_bkey_invalid(c, i->iter->btree_id,
- bkey_i_to_s_c(i->k)));
- }
-
- sort(trans->entries, trans->nr, sizeof(trans->entries[0]),
- btree_trans_entry_cmp, NULL);
-
- if (unlikely(!percpu_ref_tryget(&c->writes)))
- return -EROFS;
-retry_locks:
- ret = -EINTR;
trans_for_each_entry(trans, i)
- if (!bch2_btree_iter_set_locks_want(i->iter, 1))
- goto err;
-retry:
- trans->did_work = false;
+ BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
+
u64s = 0;
trans_for_each_entry(trans, i)
- if (!i->done)
- u64s += jset_u64s(i->k->k.u64s + i->extra_res);
+ u64s += jset_u64s(i->k->k.u64s);
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
u64s, u64s)
: 0;
if (ret)
- goto err;
+ return ret;
multi_lock_write(c, trans);
+ if (race_fault()) {
+ ret = -EINTR;
+ trans_restart(" (race)");
+ goto out;
+ }
+
+ /*
+ * Check if the insert will fit in the leaf node with the write lock
+ * held, otherwise another thread could write the node changing the
+ * amount of space available:
+ */
u64s = 0;
trans_for_each_entry(trans, i) {
/* Multiple inserts might go to same leaf: */
if (!same_leaf_as_prev(trans, i))
u64s = 0;
- /*
- * bch2_btree_node_insert_fits() must be called under write lock:
- * with only an intent lock, another thread can still call
- * bch2_btree_node_write(), converting an unwritten bset to a
- * written one
- */
- if (!i->done) {
- u64s += i->k->k.u64s + i->extra_res;
- if (!bch2_btree_node_insert_fits(c,
- i->iter->nodes[0], u64s)) {
- split = i->iter;
- goto unlock;
- }
- }
- }
-
- ret = 0;
- split = NULL;
- cycle_gc_lock = false;
-
- trans_for_each_entry(trans, i) {
- if (i->done)
- continue;
-
- switch (btree_insert_key(trans, i)) {
+ u64s += i->k->k.u64s;
+ switch (btree_key_can_insert(trans, i, &u64s)) {
case BTREE_INSERT_OK:
- i->done = true;
- break;
- case BTREE_INSERT_JOURNAL_RES_FULL:
- case BTREE_INSERT_NEED_TRAVERSE:
- ret = -EINTR;
- break;
- case BTREE_INSERT_NEED_RESCHED:
- ret = -EAGAIN;
break;
case BTREE_INSERT_BTREE_NODE_FULL:
- split = i->iter;
- break;
+ ret = -EINTR;
+ *split = i->iter;
+ goto out;
case BTREE_INSERT_ENOSPC:
ret = -ENOSPC;
- break;
+ goto out;
case BTREE_INSERT_NEED_GC_LOCK:
- cycle_gc_lock = true;
ret = -EINTR;
- break;
+ *cycle_gc_lock = true;
+ goto out;
default:
BUG();
}
+ }
+
+ if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) {
+ if (journal_seq_verify(c))
+ trans_for_each_entry(trans, i)
+ i->k->k.version.lo = trans->journal_res.seq;
+ else if (inject_invalid_keys(c))
+ trans_for_each_entry(trans, i)
+ i->k->k.version = MAX_VERSION;
+ }
- if (!trans->did_work && (ret || split))
+ trans_for_each_entry(trans, i) {
+ switch (btree_insert_key_leaf(trans, i)) {
+ case BTREE_INSERT_OK:
break;
+ case BTREE_INSERT_NEED_TRAVERSE:
+ BUG_ON((trans->flags & BTREE_INSERT_ATOMIC));
+ ret = -EINTR;
+ goto out;
+ default:
+ BUG();
+ }
}
-unlock:
+out:
multi_unlock_write(trans);
bch2_journal_res_put(&c->journal, &trans->journal_res);
- if (split)
- goto split;
- if (ret)
- goto err;
+ return ret;
+}
- /*
- * hack: iterators are inconsistent when they hit end of leaf, until
- * traversed again
- */
- trans_for_each_entry(trans, i)
- if (i->iter->flags & BTREE_ITER_AT_END_OF_LEAF)
- goto out;
+static inline void btree_insert_entry_checks(struct bch_fs *c,
+ struct btree_insert_entry *i)
+{
+ BUG_ON(i->iter->level);
+ BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
+ BUG_ON(debug_check_bkeys(c) &&
+ !bkey_deleted(&i->k->k) &&
+ bch2_bkey_invalid(c, i->iter->btree_id,
+ bkey_i_to_s_c(i->k)));
+}
+
+/**
+ * __bch_btree_insert_at - insert keys at given iterator positions
+ *
+ * This is main entry point for btree updates.
+ *
+ * Return values:
+ * -EINTR: locking changed, this function should be called again. Only returned
+ * if passed BTREE_INSERT_ATOMIC.
+ * -EROFS: filesystem read only
+ * -EIO: journal or btree node IO error
+ */
+int __bch2_btree_insert_at(struct btree_insert *trans)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i;
+ struct btree_iter *linked, *split = NULL;
+ bool cycle_gc_lock = false;
+ unsigned flags;
+ int ret;
+
+ BUG_ON(!trans->nr);
+
+ for_each_btree_iter(trans->entries[0].iter, linked)
+ bch2_btree_iter_verify_locks(linked);
+
+ /* for the sake of sanity: */
+ BUG_ON(trans->nr > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
trans_for_each_entry(trans, i)
- if (!same_leaf_as_prev(trans, i)) {
- foreground_maybe_merge(c, i->iter, btree_prev_sib);
- foreground_maybe_merge(c, i->iter, btree_next_sib);
+ btree_insert_entry_checks(c, i);
+
+ bubble_sort(trans->entries, trans->nr, btree_trans_cmp);
+
+ if (unlikely(!percpu_ref_tryget(&c->writes)))
+ return -EROFS;
+retry:
+ split = NULL;
+ cycle_gc_lock = false;
+
+ trans_for_each_entry(trans, i) {
+ unsigned old_locks_want = i->iter->locks_want;
+ unsigned old_uptodate = i->iter->uptodate;
+
+ if (!bch2_btree_iter_upgrade(i->iter, 1, true)) {
+ trans_restart(" (failed upgrade, locks_want %u uptodate %u)",
+ old_locks_want, old_uptodate);
+ ret = -EINTR;
+ goto err;
}
-out:
- /* make sure we didn't lose an error: */
- if (!ret && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
- trans_for_each_entry(trans, i)
- BUG_ON(!i->done);
+ if (i->iter->flags & BTREE_ITER_ERROR) {
+ ret = -EIO;
+ goto err;
+ }
+ }
+
+ ret = do_btree_insert_at(trans, &split, &cycle_gc_lock);
+ if (unlikely(ret))
+ goto err;
+
+ trans_for_each_leaf(trans, i)
+ bch2_foreground_maybe_merge(c, i->iter, 0, trans->flags);
+
+ trans_for_each_entry(trans, i)
+ bch2_btree_iter_downgrade(i->iter);
+out:
percpu_ref_put(&c->writes);
+
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+ /* make sure we didn't drop or screw up locks: */
+ for_each_btree_iter(trans->entries[0].iter, linked) {
+ bch2_btree_iter_verify_locks(linked);
+ BUG_ON((trans->flags & BTREE_INSERT_NOUNLOCK) &&
+ trans->did_work &&
+ !btree_node_locked(linked, 0));
+ }
+ }
+
+ BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
+
return ret;
-split:
- /*
- * have to drop journal res before splitting, because splitting means
- * allocating new btree nodes, and holding a journal reservation
- * potentially blocks the allocator:
- */
- ret = bch2_btree_split_leaf(c, split, trans->flags);
- if (ret)
- goto err;
+err:
+ flags = trans->flags;
+
/*
- * if the split didn't have to drop locks the insert will still be
- * atomic (in the BTREE_INSERT_ATOMIC sense, what the caller peeked()
- * and is overwriting won't have changed)
+ * BTREE_INSERT_NOUNLOCK means don't unlock _after_ successful btree
+ * update; if we haven't done anything yet it doesn't apply
*/
- goto retry_locks;
-err:
+ if (!trans->did_work)
+ flags &= ~BTREE_INSERT_NOUNLOCK;
+
+ if (split) {
+ ret = bch2_btree_split_leaf(c, split, flags);
+
+ /*
+ * if the split succeeded without dropping locks the insert will
+ * still be atomic (in the BTREE_INSERT_ATOMIC sense, what the
+ * caller peeked() and is overwriting won't have changed)
+ */
+#if 0
+ /*
+ * XXX:
+ * split -> btree node merging (of parent node) might still drop
+ * locks when we're not passing it BTREE_INSERT_NOUNLOCK
+ */
+ if (!ret && !trans->did_work)
+ goto retry;
+#endif
+
+ /*
+ * don't care if we got ENOSPC because we told split it
+ * couldn't block:
+ */
+ if (!ret || (flags & BTREE_INSERT_NOUNLOCK)) {
+ trans_restart(" (split)");
+ ret = -EINTR;
+ }
+ }
+
if (cycle_gc_lock) {
- down_read(&c->gc_lock);
+ if (!down_read_trylock(&c->gc_lock)) {
+ if (flags & BTREE_INSERT_NOUNLOCK)
+ goto out;
+
+ bch2_btree_iter_unlock(trans->entries[0].iter);
+ down_read(&c->gc_lock);
+ }
up_read(&c->gc_lock);
}
if (ret == -EINTR) {
+ if (flags & BTREE_INSERT_NOUNLOCK) {
+ trans_restart(" (can't unlock)");
+ goto out;
+ }
+
trans_for_each_entry(trans, i) {
int ret2 = bch2_btree_iter_traverse(i->iter);
if (ret2) {
ret = ret2;
+ trans_restart(" (traverse)");
goto out;
}
+
+ BUG_ON(i->iter->uptodate > BTREE_ITER_NEED_PEEK);
}
/*
* BTREE_ITER_ATOMIC means we have to return -EINTR if we
* dropped locks:
*/
- if (!(trans->flags & BTREE_INSERT_ATOMIC))
+ if (!(flags & BTREE_INSERT_ATOMIC))
goto retry;
+
+ trans_restart(" (atomic)");
}
goto out;
}
+int bch2_trans_commit(struct btree_trans *trans,
+ struct disk_reservation *disk_res,
+ u64 *journal_seq,
+ unsigned flags)
+{
+ struct btree_insert insert = {
+ .c = trans->c,
+ .disk_res = disk_res,
+ .journal_seq = journal_seq,
+ .flags = flags,
+ .nr = trans->nr_updates,
+ .entries = trans->updates,
+ };
+
+ if (!trans->nr_updates)
+ return 0;
+
+ trans->nr_updates = 0;
+
+ return __bch2_btree_insert_at(&insert);
+}
+
int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
{
struct bkey_i k;
bkey_init(&k.k);
k.k.p = iter->pos;
- return bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
+ return bch2_btree_insert_at(iter->c, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|flags,
BTREE_INSERT_ENTRY(iter, &k));
int bch2_btree_insert_list_at(struct btree_iter *iter,
struct keylist *keys,
struct disk_reservation *disk_res,
- struct extent_insert_hook *hook,
u64 *journal_seq, unsigned flags)
{
BUG_ON(flags & BTREE_INSERT_ATOMIC);
bch2_verify_keylist_sorted(keys);
while (!bch2_keylist_empty(keys)) {
- /* need to traverse between each insert */
- int ret = bch2_btree_iter_traverse(iter);
- if (ret)
- return ret;
-
- ret = bch2_btree_insert_at(iter->c, disk_res, hook,
+ int ret = bch2_btree_insert_at(iter->c, disk_res,
journal_seq, flags,
BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
if (ret)
int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
struct bkey_i *k,
struct disk_reservation *disk_res,
- struct extent_insert_hook *hook,
u64 *journal_seq, int flags)
{
struct btree_iter iter;
- int ret, ret2;
-
- bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
- BTREE_ITER_INTENT);
-
- ret = bch2_btree_iter_traverse(&iter);
- if (unlikely(ret))
- goto out;
-
- ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
- BTREE_INSERT_ENTRY(&iter, k));
-out: ret2 = bch2_btree_iter_unlock(&iter);
-
- return ret ?: ret2;
-}
-
-/**
- * bch_btree_update - like bch2_btree_insert(), but asserts that we're
- * overwriting an existing key
- */
-int bch2_btree_update(struct bch_fs *c, enum btree_id id,
- struct bkey_i *k, u64 *journal_seq)
-{
- struct btree_iter iter;
- struct bkey_s_c u;
int ret;
- EBUG_ON(id == BTREE_ID_EXTENTS);
-
- bch2_btree_iter_init(&iter, c, id, k->k.p,
+ bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
BTREE_ITER_INTENT);
-
- u = bch2_btree_iter_peek_with_holes(&iter);
- ret = btree_iter_err(u);
- if (ret)
- return ret;
-
- if (bkey_deleted(u.k)) {
- bch2_btree_iter_unlock(&iter);
- return -ENOENT;
- }
-
- ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, 0,
- BTREE_INSERT_ENTRY(&iter, k));
+ ret = bch2_btree_insert_at(c, disk_res, journal_seq, flags,
+ BTREE_INSERT_ENTRY(&iter, k));
bch2_btree_iter_unlock(&iter);
+
return ret;
}
* Range is a half open interval - [start, end)
*/
int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
- struct bpos start,
- struct bpos end,
- struct bversion version,
- struct disk_reservation *disk_res,
- struct extent_insert_hook *hook,
- u64 *journal_seq)
+ struct bpos start, struct bpos end,
+ u64 *journal_seq)
{
struct btree_iter iter;
struct bkey_s_c k;
BTREE_ITER_INTENT);
while ((k = bch2_btree_iter_peek(&iter)).k &&
- !(ret = btree_iter_err(k))) {
+ !(ret = btree_iter_err(k)) &&
+ bkey_cmp(iter.pos, end) < 0) {
unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
/* really shouldn't be using a bare, unpadded bkey_i */
struct bkey_i delete;
- if (bkey_cmp(iter.pos, end) >= 0)
- break;
-
bkey_init(&delete.k);
/*
* bkey_start_pos(k.k)).
*/
delete.k.p = iter.pos;
- delete.k.version = version;
if (iter.flags & BTREE_ITER_IS_EXTENTS) {
- /*
- * The extents btree is special - KEY_TYPE_DISCARD is
- * used for deletions, not KEY_TYPE_DELETED. This is an
- * internal implementation detail that probably
- * shouldn't be exposed (internally, KEY_TYPE_DELETED is
- * used as a proxy for k->size == 0):
- */
- delete.k.type = KEY_TYPE_DISCARD;
-
/* create the biggest key we can */
bch2_key_resize(&delete.k, max_sectors);
bch2_cut_back(end, &delete.k);
}
- ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
- BTREE_INSERT_NOFAIL,
- BTREE_INSERT_ENTRY(&iter, &delete));
+ ret = bch2_btree_insert_at(c, NULL, journal_seq,
+ BTREE_INSERT_NOFAIL,
+ BTREE_INSERT_ENTRY(&iter, &delete));
if (ret)
break;