+// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "btree_update.h"
#include "btree_update_interior.h"
+#include "btree_gc.h"
#include "btree_io.h"
#include "btree_iter.h"
+#include "btree_key_cache.h"
#include "btree_locking.h"
#include "buckets.h"
#include "debug.h"
#include "error.h"
-#include "extents.h"
+#include "extent_update.h"
#include "journal.h"
#include "journal_reclaim.h"
#include "keylist.h"
#include "replicas.h"
+#include <linux/prefetch.h>
#include <linux/sort.h>
#include <trace/events/bcachefs.h>
+static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
+ const struct btree_insert_entry *r)
+{
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ -cmp_int(l->level, r->level) ?:
+ bpos_cmp(l->k->k.p, r->k->k.p);
+}
+
+static inline bool same_leaf_as_prev(struct btree_trans *trans,
+ struct btree_insert_entry *i)
+{
+ return i != trans->updates2 &&
+ iter_l(i[0].iter)->b == iter_l(i[-1].iter)->b;
+}
+
inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
{
bch2_btree_node_lock_write(b, iter);
- if (btree_node_just_written(b) &&
+ if (btree_iter_type(iter) == BTREE_ITER_CACHED)
+ return;
+
+ if (unlikely(btree_node_just_written(b)) &&
bch2_btree_post_write_cleanup(c, b))
bch2_btree_iter_reinit_node(iter, b);
bch2_btree_init_next(c, b, iter);
}
-static void btree_trans_lock_write(struct bch_fs *c, struct btree_trans *trans)
-{
- struct btree_insert_entry *i;
-
- trans_for_each_update_leaf(trans, i)
- bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
-}
-
-static void btree_trans_unlock_write(struct btree_trans *trans)
-{
- struct btree_insert_entry *i;
-
- trans_for_each_update_leaf(trans, i)
- bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
-}
-
-static bool btree_trans_relock(struct btree_trans *trans)
-{
- struct btree_insert_entry *i;
-
- trans_for_each_update_iter(trans, i)
- return bch2_btree_iter_relock(i->iter);
- return true;
-}
-
-static void btree_trans_unlock(struct btree_trans *trans)
-{
- struct btree_insert_entry *i;
-
- trans_for_each_update_iter(trans, i) {
- bch2_btree_iter_unlock(i->iter);
- break;
- }
-}
-
-static inline int btree_trans_cmp(struct btree_insert_entry l,
- struct btree_insert_entry r)
-{
- return (l.deferred > r.deferred) - (l.deferred < r.deferred) ?:
- btree_iter_cmp(l.iter, r.iter);
-}
-
/* Inserting into a given leaf node (last stage of insert): */
/* Handle overwrites and do insert, for non extents: */
struct btree_node_iter *node_iter,
struct bkey_i *insert)
{
- const struct bkey_format *f = &b->format;
struct bkey_packed *k;
- unsigned clobber_u64s;
+ unsigned clobber_u64s = 0, new_u64s = 0;
EBUG_ON(btree_node_just_written(b));
EBUG_ON(bset_written(b, btree_bset_last(b)));
EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
- EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
- bkey_cmp(insert->k.p, b->data->max_key) > 0);
+ EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0);
+ EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0);
+ EBUG_ON(insert->k.u64s >
+ bch_btree_keys_u64s_remaining(iter->trans->c, b));
+ EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
k = bch2_btree_node_iter_peek_all(node_iter, b);
- if (k && !bkey_cmp_packed(b, k, &insert->k)) {
- BUG_ON(bkey_whiteout(k));
-
- if (!bkey_written(b, k) &&
- bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
- !bkey_whiteout(&insert->k)) {
- k->type = insert->k.type;
- memcpy_u64s(bkeyp_val(f, k), &insert->v,
- bkey_val_u64s(&insert->k));
- return true;
- }
+ if (k && bkey_cmp_left_packed(b, k, &insert->k.p))
+ k = NULL;
- insert->k.needs_whiteout = k->needs_whiteout;
+ /* @k is the key being overwritten/deleted, if any: */
+ EBUG_ON(k && bkey_deleted(k));
+
+ /* Deleting, but not found? nothing to do: */
+ if (bkey_deleted(&insert->k) && !k)
+ return false;
+ if (bkey_deleted(&insert->k)) {
+ /* Deleting: */
btree_account_key_drop(b, k);
+ k->type = KEY_TYPE_deleted;
+
+ if (k->needs_whiteout)
+ push_whiteout(iter->trans->c, b, insert->k.p);
+ k->needs_whiteout = false;
if (k >= btree_bset_last(b)->start) {
clobber_u64s = k->u64s;
-
- /*
- * If we're deleting, and the key we're deleting doesn't
- * need a whiteout (it wasn't overwriting a key that had
- * been written to disk) - just delete it:
- */
- if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
- bch2_bset_delete(b, k, clobber_u64s);
- bch2_btree_node_iter_fix(iter, b, node_iter,
- k, clobber_u64s, 0);
- bch2_btree_iter_verify(iter, b);
- return true;
- }
-
- goto overwrite;
+ bch2_bset_delete(b, k, clobber_u64s);
+ goto fix_iter;
+ } else {
+ bch2_btree_iter_fix_key_modified(iter, b, k);
}
+ return true;
+ }
+
+ if (k) {
+ /* Overwriting: */
+ btree_account_key_drop(b, k);
k->type = KEY_TYPE_deleted;
- bch2_btree_node_iter_fix(iter, b, node_iter, k,
- k->u64s, k->u64s);
- bch2_btree_iter_verify(iter, b);
- if (bkey_whiteout(&insert->k)) {
- reserve_whiteout(b, k);
- return true;
+ insert->k.needs_whiteout = k->needs_whiteout;
+ k->needs_whiteout = false;
+
+ if (k >= btree_bset_last(b)->start) {
+ clobber_u64s = k->u64s;
+ goto overwrite;
} else {
- k->needs_whiteout = false;
+ bch2_btree_iter_fix_key_modified(iter, b, k);
}
- } else {
- /*
- * Deleting, but the key to delete wasn't found - nothing to do:
- */
- if (bkey_whiteout(&insert->k))
- return false;
-
- insert->k.needs_whiteout = false;
}
k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
- clobber_u64s = 0;
overwrite:
bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
- if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
+ new_u64s = k->u64s;
+fix_iter:
+ if (clobber_u64s != new_u64s)
bch2_btree_node_iter_fix(iter, b, node_iter, k,
- clobber_u64s, k->u64s);
- bch2_btree_iter_verify(iter, b);
+ clobber_u64s, new_u64s);
return true;
}
-static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
+static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
unsigned i, u64 seq)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
btree_node_lock_type(c, b, SIX_LOCK_read);
bch2_btree_node_write_cond(c, b,
(btree_current_write(b) == w && w->journal.seq == seq));
- six_unlock_read(&b->lock);
+ six_unlock_read(&b->c.lock);
+ return 0;
}
-static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
+static int btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
{
return __btree_node_flush(j, pin, 0, seq);
}
-static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
+static int btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
{
return __btree_node_flush(j, pin, 1, seq);
}
-static inline void __btree_journal_key(struct btree_trans *trans,
- enum btree_id btree_id,
- struct bkey_i *insert)
+inline void bch2_btree_add_journal_pin(struct bch_fs *c,
+ struct btree *b, u64 seq)
{
- struct journal *j = &trans->c->journal;
- u64 seq = trans->journal_res.seq;
- bool needs_whiteout = insert->k.needs_whiteout;
-
- /* ick */
- insert->k.needs_whiteout = false;
- bch2_journal_add_keys(j, &trans->journal_res,
- btree_id, insert);
- insert->k.needs_whiteout = needs_whiteout;
-
- bch2_journal_set_has_inode(j, &trans->journal_res,
- insert->k.p.inode);
-
- if (trans->journal_seq)
- *trans->journal_seq = seq;
-}
-
-void bch2_btree_journal_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_i *insert)
-{
- struct bch_fs *c = trans->c;
- struct journal *j = &c->journal;
- struct btree *b = iter->l[0].b;
struct btree_write *w = btree_current_write(b);
- EBUG_ON(iter->level || b->level);
- EBUG_ON(trans->journal_res.ref !=
- !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
-
- if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
- __btree_journal_key(trans, iter->btree_id, insert);
- btree_bset_last(b)->journal_seq =
- cpu_to_le64(trans->journal_res.seq);
- }
-
- if (unlikely(!journal_pin_active(&w->journal))) {
- u64 seq = likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
- ? trans->journal_res.seq
- : j->replay_journal_seq;
-
- bch2_journal_pin_add(j, seq, &w->journal,
- btree_node_write_idx(b) == 0
- ? btree_node_flush0
- : btree_node_flush1);
- }
-
- if (unlikely(!btree_node_dirty(b)))
- set_btree_node_dirty(b);
-}
-
-static void bch2_insert_fixup_key(struct btree_trans *trans,
- struct btree_insert_entry *insert)
-{
- struct btree_iter *iter = insert->iter;
- struct btree_iter_level *l = &iter->l[0];
-
- EBUG_ON(iter->level);
- EBUG_ON(insert->k->k.u64s >
- bch_btree_keys_u64s_remaining(trans->c, l->b));
-
- if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
- insert->k))
- bch2_btree_journal_key(trans, iter, insert->k);
+ bch2_journal_pin_add(&c->journal, seq, &w->journal,
+ btree_node_write_idx(b) == 0
+ ? btree_node_flush0
+ : btree_node_flush1);
}
/**
* btree_insert_key - insert a key one key into a leaf node
*/
-static void btree_insert_key_leaf(struct btree_trans *trans,
- struct btree_insert_entry *insert)
+static bool btree_insert_key_leaf(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *insert)
{
struct bch_fs *c = trans->c;
- struct btree_iter *iter = insert->iter;
- struct btree *b = iter->l[0].b;
- int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
+ struct btree *b = iter_l(iter)->b;
+ struct bset_tree *t = bset_tree_last(b);
+ struct bset *i = bset(b, t);
+ int old_u64s = bset_u64s(t);
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
- if (!btree_node_is_extents(b))
- bch2_insert_fixup_key(trans, insert);
- else
- bch2_insert_fixup_extent(trans, insert);
+ EBUG_ON(!iter->level &&
+ !test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags));
+
+ if (unlikely(!bch2_btree_bset_insert_key(iter, b,
+ &iter_l(iter)->iter, insert)))
+ return false;
+
+ i->journal_seq = cpu_to_le64(max(trans->journal_res.seq,
+ le64_to_cpu(i->journal_seq)));
+
+ bch2_btree_add_journal_pin(c, b, trans->journal_res.seq);
+
+ if (unlikely(!btree_node_dirty(b)))
+ set_btree_node_dirty(c, b);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
- u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
+ u64s_added = (int) bset_u64s(t) - old_u64s;
if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
bch2_maybe_compact_whiteouts(c, b))
bch2_btree_iter_reinit_node(iter, b);
- trace_btree_insert_key(c, b, insert->k);
+ trace_btree_insert_key(c, b, insert);
+ return true;
}
-/* Deferred btree updates: */
+/* Cached btree updates: */
-static void deferred_update_flush(struct journal *j,
- struct journal_entry_pin *pin,
- u64 seq)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct deferred_update *d =
- container_of(pin, struct deferred_update, journal);
- struct journal_preres res = { 0 };
- u64 tmp[32];
- struct bkey_i *k = (void *) tmp;
- int ret;
+/* Normal update interface: */
- if (d->allocated_u64s > ARRAY_SIZE(tmp)) {
- k = kmalloc(d->allocated_u64s * sizeof(u64), GFP_NOFS);
+static inline void btree_insert_entry_checks(struct btree_trans *trans,
+ struct btree_insert_entry *i)
+{
+ BUG_ON(!i->is_extent && bpos_cmp(i->k->k.p, i->iter->real_pos));
+ BUG_ON(i->level != i->iter->level);
+ BUG_ON(i->btree_id != i->iter->btree_id);
+}
- BUG_ON(!k); /* XXX */
- }
+static noinline int
+bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s)
+{
+ struct bch_fs *c = trans->c;
+ int ret;
- spin_lock(&d->lock);
- if (d->dirty) {
- BUG_ON(jset_u64s(d->k.k.u64s) > d->res.u64s);
+ bch2_trans_unlock(trans);
- swap(res, d->res);
+ ret = bch2_journal_preres_get(&c->journal,
+ &trans->journal_preres, u64s, 0);
+ if (ret)
+ return ret;
- BUG_ON(d->k.k.u64s > d->allocated_u64s);
+ if (!bch2_trans_relock(trans)) {
+ trace_trans_restart_journal_preres_get(trans->ip);
+ return -EINTR;
+ }
- bkey_copy(k, &d->k);
- d->dirty = false;
- spin_unlock(&d->lock);
+ return 0;
+}
- ret = bch2_btree_insert(c, d->btree_id, k, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE|
- BTREE_INSERT_JOURNAL_RESERVED);
- bch2_fs_fatal_err_on(ret && !bch2_journal_error(j),
- c, "error flushing deferred btree update: %i", ret);
+static inline int bch2_trans_journal_res_get(struct btree_trans *trans,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ int ret;
- spin_lock(&d->lock);
- }
+ if (trans->flags & BTREE_INSERT_JOURNAL_RESERVED)
+ flags |= JOURNAL_RES_GET_RESERVED;
- if (!d->dirty)
- bch2_journal_pin_drop(j, &d->journal);
- spin_unlock(&d->lock);
+ ret = bch2_journal_res_get(&c->journal, &trans->journal_res,
+ trans->journal_u64s, flags);
- bch2_journal_preres_put(j, &res);
- if (k != (void *) tmp)
- kfree(k);
+ return ret == -EAGAIN ? BTREE_INSERT_NEED_JOURNAL_RES : ret;
}
-static void btree_insert_key_deferred(struct btree_trans *trans,
- struct btree_insert_entry *insert)
+static enum btree_insert_ret
+btree_key_can_insert(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned u64s)
{
struct bch_fs *c = trans->c;
- struct journal *j = &c->journal;
- struct deferred_update *d = insert->d;
- int difference;
+ struct btree *b = iter_l(iter)->b;
- BUG_ON(trans->flags & BTREE_INSERT_JOURNAL_REPLAY);
- BUG_ON(insert->k->u64s > d->allocated_u64s);
+ if (!bch2_btree_node_insert_fits(c, b, u64s))
+ return BTREE_INSERT_BTREE_NODE_FULL;
- __btree_journal_key(trans, d->btree_id, insert->k);
+ return BTREE_INSERT_OK;
+}
- spin_lock(&d->lock);
- BUG_ON(jset_u64s(insert->k->u64s) >
- trans->journal_preres.u64s);
+static enum btree_insert_ret
+btree_key_can_insert_cached(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned u64s)
+{
+ struct bkey_cached *ck = (void *) iter->l[0].b;
+ unsigned new_u64s;
+ struct bkey_i *new_k;
- difference = jset_u64s(insert->k->u64s) - d->res.u64s;
- if (difference > 0) {
- trans->journal_preres.u64s -= difference;
- d->res.u64s += difference;
- }
+ BUG_ON(iter->level);
- bkey_copy(&d->k, insert->k);
- d->dirty = true;
+ if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
+ bch2_btree_key_cache_must_wait(trans->c) &&
+ !(trans->flags & BTREE_INSERT_JOURNAL_RECLAIM))
+ return BTREE_INSERT_NEED_JOURNAL_RECLAIM;
- bch2_journal_pin_update(j, trans->journal_res.seq, &d->journal,
- deferred_update_flush);
- spin_unlock(&d->lock);
-}
+ /*
+ * bch2_varint_decode can read past the end of the buffer by at most 7
+ * bytes (it won't be used):
+ */
+ u64s += 1;
-void bch2_deferred_update_free(struct bch_fs *c,
- struct deferred_update *d)
-{
- deferred_update_flush(&c->journal, &d->journal, 0);
+ if (u64s <= ck->u64s)
+ return BTREE_INSERT_OK;
- BUG_ON(journal_pin_active(&d->journal));
+ new_u64s = roundup_pow_of_two(u64s);
+ new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
+ if (!new_k)
+ return -ENOMEM;
- bch2_journal_pin_flush(&c->journal, &d->journal);
- kfree(d);
+ ck->u64s = new_u64s;
+ ck->k = new_k;
+ return BTREE_INSERT_OK;
}
-struct deferred_update *
-bch2_deferred_update_alloc(struct bch_fs *c,
- enum btree_id btree_id,
- unsigned u64s)
+static inline void do_btree_insert_one(struct btree_trans *trans,
+ struct btree_insert_entry *i)
{
- struct deferred_update *d;
+ struct bch_fs *c = trans->c;
+ struct journal *j = &c->journal;
+ bool did_work;
- BUG_ON(u64s > U8_MAX);
+ EBUG_ON(trans->journal_res.ref !=
+ !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
- d = kmalloc(offsetof(struct deferred_update, k) +
- u64s * sizeof(u64), GFP_NOFS);
- BUG_ON(!d);
+ i->k->k.needs_whiteout = false;
- memset(d, 0, offsetof(struct deferred_update, k));
+ did_work = (btree_iter_type(i->iter) != BTREE_ITER_CACHED)
+ ? btree_insert_key_leaf(trans, i->iter, i->k)
+ : bch2_btree_insert_key_cached(trans, i->iter, i->k);
+ if (!did_work)
+ return;
- spin_lock_init(&d->lock);
- d->allocated_u64s = u64s;
- d->btree_id = btree_id;
+ if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
+ bch2_journal_add_keys(j, &trans->journal_res,
+ i->btree_id,
+ i->level,
+ i->k);
- return d;
+ bch2_journal_set_has_inode(j, &trans->journal_res,
+ i->k->k.p.inode);
+
+ if (trans->journal_seq)
+ *trans->journal_seq = trans->journal_res.seq;
+ }
}
-/* Normal update interface: */
+static noinline void bch2_btree_iter_unlock_noinline(struct btree_iter *iter)
+{
+ __bch2_btree_iter_unlock(iter);
+}
-static inline void btree_insert_entry_checks(struct btree_trans *trans,
- struct btree_insert_entry *i)
+static noinline void bch2_trans_mark_gc(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
- enum btree_id btree_id = !i->deferred
- ? i->iter->btree_id
- : i->d->btree_id;
-
- if (!i->deferred) {
- BUG_ON(i->iter->level);
- BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
- EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
- !bch2_extent_is_atomic(i->k, i->iter));
+ struct btree_insert_entry *i;
- EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
- !(trans->flags & BTREE_INSERT_ATOMIC));
+ trans_for_each_update(trans, i) {
+ /*
+ * XXX: synchronization of cached update triggers with gc
+ */
+ BUG_ON(btree_iter_type(i->iter) == BTREE_ITER_CACHED);
- bch2_btree_iter_verify_locks(i->iter);
+ if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
+ bch2_mark_update(trans, i->iter, i->k, NULL,
+ i->trigger_flags|BTREE_TRIGGER_GC);
}
-
- BUG_ON(debug_check_bkeys(c) &&
- !bkey_deleted(&i->k->k) &&
- bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), btree_id));
}
-static int bch2_trans_journal_preres_get(struct btree_trans *trans)
+static inline int
+bch2_trans_commit_write_locked(struct btree_trans *trans,
+ struct btree_insert_entry **stopped_at)
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
+ struct btree_trans_commit_hook *h;
unsigned u64s = 0;
+ bool marking = false;
int ret;
- trans_for_each_update(trans, i)
- if (i->deferred)
- u64s += jset_u64s(i->k->k.u64s);
+ if (race_fault()) {
+ trace_trans_restart_fault_inject(trans->ip);
+ return -EINTR;
+ }
- if (!u64s)
- return 0;
+ /*
+ * Check if the insert will fit in the leaf node with the write lock
+ * held, otherwise another thread could write the node changing the
+ * amount of space available:
+ */
- ret = bch2_journal_preres_get(&c->journal,
- &trans->journal_preres, u64s,
- JOURNAL_RES_GET_NONBLOCK);
- if (ret != -EAGAIN)
- return ret;
+ prefetch(&trans->c->journal.flags);
- btree_trans_unlock(trans);
+ h = trans->hooks;
+ while (h) {
+ ret = h->fn(trans, h);
+ if (ret)
+ return ret;
+ h = h->next;
+ }
- ret = bch2_journal_preres_get(&c->journal,
- &trans->journal_preres, u64s, 0);
- if (ret)
- return ret;
+ trans_for_each_update2(trans, i) {
+ /* Multiple inserts might go to same leaf: */
+ if (!same_leaf_as_prev(trans, i))
+ u64s = 0;
- if (!btree_trans_relock(trans)) {
- trans_restart(" (iter relock after journal preres get blocked)");
- return -EINTR;
+ u64s += i->k->k.u64s;
+ ret = btree_iter_type(i->iter) != BTREE_ITER_CACHED
+ ? btree_key_can_insert(trans, i->iter, u64s)
+ : btree_key_can_insert_cached(trans, i->iter, u64s);
+ if (ret) {
+ *stopped_at = i;
+ return ret;
+ }
+
+ if (btree_node_type_needs_gc(i->bkey_type))
+ marking = true;
}
- return 0;
-}
+ if (marking) {
+ percpu_down_read(&c->mark_lock);
+ }
-static int bch2_trans_journal_res_get(struct btree_trans *trans,
- unsigned flags)
-{
- struct bch_fs *c = trans->c;
- struct btree_insert_entry *i;
- unsigned u64s = 0;
- int ret;
+ /* Must be called under mark_lock: */
+ if (marking && trans->fs_usage_deltas &&
+ !bch2_replicas_delta_list_marked(c, trans->fs_usage_deltas)) {
+ ret = BTREE_INSERT_NEED_MARK_REPLICAS;
+ goto err;
+ }
- if (unlikely(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
- return 0;
+ /*
+ * Don't get journal reservation until after we know insert will
+ * succeed:
+ */
+ if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
+ ret = bch2_trans_journal_res_get(trans,
+ JOURNAL_RES_GET_NONBLOCK);
+ if (ret)
+ goto err;
+ } else {
+ trans->journal_res.seq = c->journal.replay_journal_seq;
+ }
- if (trans->flags & BTREE_INSERT_JOURNAL_RESERVED)
- flags |= JOURNAL_RES_GET_RESERVED;
+ if (unlikely(trans->extra_journal_entry_u64s)) {
+ memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res),
+ trans->extra_journal_entries,
+ trans->extra_journal_entry_u64s);
- trans_for_each_update(trans, i)
- u64s += jset_u64s(i->k->k.u64s);
+ trans->journal_res.offset += trans->extra_journal_entry_u64s;
+ trans->journal_res.u64s -= trans->extra_journal_entry_u64s;
+ }
- ret = bch2_journal_res_get(&c->journal, &trans->journal_res,
- u64s, flags);
+ /*
+ * Not allowed to fail after we've gotten our journal reservation - we
+ * have to use it:
+ */
- return ret == -EAGAIN ? BTREE_INSERT_NEED_JOURNAL_RES : ret;
-}
+ if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) {
+ if (bch2_journal_seq_verify)
+ trans_for_each_update2(trans, i)
+ i->k->k.version.lo = trans->journal_res.seq;
+ else if (bch2_inject_invalid_keys)
+ trans_for_each_update2(trans, i)
+ i->k->k.version = MAX_VERSION;
+ }
-static enum btree_insert_ret
-btree_key_can_insert(struct btree_trans *trans,
- struct btree_insert_entry *insert,
- unsigned *u64s)
-{
- struct bch_fs *c = trans->c;
- struct btree *b = insert->iter->l[0].b;
- static enum btree_insert_ret ret;
+ trans_for_each_update(trans, i)
+ if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type))
+ bch2_mark_update(trans, i->iter, i->k,
+ NULL, i->trigger_flags);
- if (unlikely(btree_node_fake(b)))
- return BTREE_INSERT_BTREE_NODE_FULL;
+ if (marking && trans->fs_usage_deltas)
+ bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas);
- ret = !btree_node_is_extents(b)
- ? BTREE_INSERT_OK
- : bch2_extent_can_insert(trans, insert, u64s);
- if (ret)
- return ret;
+ if (unlikely(c->gc_pos.phase))
+ bch2_trans_mark_gc(trans);
- if (*u64s > bch_btree_keys_u64s_remaining(c, b))
- return BTREE_INSERT_BTREE_NODE_FULL;
+ trans_for_each_update2(trans, i)
+ do_btree_insert_one(trans, i);
+err:
+ if (marking) {
+ percpu_up_read(&c->mark_lock);
+ }
- return BTREE_INSERT_OK;
+ return ret;
}
-static int btree_trans_check_can_insert(struct btree_trans *trans,
- struct btree_insert_entry **stopped_at)
+static noinline int maybe_do_btree_merge(struct btree_trans *trans, struct btree_iter *iter)
{
struct btree_insert_entry *i;
- unsigned u64s = 0;
+ struct btree *b = iter_l(iter)->b;
+ struct bkey_s_c old;
+ int u64s_delta = 0;
int ret;
- trans_for_each_update_iter(trans, i) {
- /* Multiple inserts might go to same leaf: */
- if (!same_leaf_as_prev(trans, i))
- u64s = 0;
+ /*
+ * Inserting directly into interior nodes is an uncommon operation with
+ * various weird edge cases: also, a lot of things about
+ * BTREE_ITER_NODES iters need to be audited
+ */
+ if (unlikely(btree_iter_type(iter) != BTREE_ITER_KEYS))
+ return 0;
- u64s += i->k->k.u64s;
- ret = btree_key_can_insert(trans, i, &u64s);
- if (ret) {
- *stopped_at = i;
+ BUG_ON(iter->level);
+
+ trans_for_each_update2(trans, i) {
+ if (iter_l(i->iter)->b != b)
+ continue;
+
+ old = bch2_btree_iter_peek_slot(i->iter);
+ ret = bkey_err(old);
+ if (ret)
return ret;
- }
- }
- return 0;
-}
+ u64s_delta += !bkey_deleted(&i->k->k) ? i->k->k.u64s : 0;
+ u64s_delta -= !bkey_deleted(old.k) ? old.k->u64s : 0;
+ }
-static inline void do_btree_insert_one(struct btree_trans *trans,
- struct btree_insert_entry *insert)
-{
- if (likely(!insert->deferred))
- btree_insert_key_leaf(trans, insert);
- else
- btree_insert_key_deferred(trans, insert);
+ return u64s_delta <= 0
+ ? (bch2_foreground_maybe_merge(trans->c, iter, iter->level,
+ trans->flags & ~BTREE_INSERT_NOUNLOCK) ?: -EINTR)
+ : 0;
}
/*
* Get journal reservation, take write locks, and attempt to do btree update(s):
*/
-static inline int do_btree_insert_at(struct btree_trans *trans,
- struct btree_insert_entry **stopped_at)
+static inline int do_bch2_trans_commit(struct btree_trans *trans,
+ struct btree_insert_entry **stopped_at)
{
struct bch_fs *c = trans->c;
- struct bch_fs_usage *fs_usage = NULL;
struct btree_insert_entry *i;
- struct btree_iter *linked;
+ struct btree_iter *iter;
int ret;
- trans_for_each_update_iter(trans, i)
- BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
+ trans_for_each_update2(trans, i) {
+ struct btree *b;
- btree_trans_lock_write(c, trans);
+ BUG_ON(!btree_node_intent_locked(i->iter, i->level));
- trans_for_each_update_iter(trans, i) {
- if (i->deferred ||
- !btree_node_type_needs_gc(i->iter->btree_id))
+ if (btree_iter_type(i->iter) == BTREE_ITER_CACHED)
continue;
- if (!fs_usage) {
- percpu_down_read(&c->mark_lock);
- fs_usage = bch2_fs_usage_scratch_get(c);
- }
-
- if (!bch2_bkey_replicas_marked_locked(c,
- bkey_i_to_s_c(i->k), true)) {
- ret = BTREE_INSERT_NEED_MARK_REPLICAS;
- goto out;
+ b = iter_l(i->iter)->b;
+ if (b->sib_u64s[0] < c->btree_foreground_merge_threshold ||
+ b->sib_u64s[1] < c->btree_foreground_merge_threshold) {
+ ret = maybe_do_btree_merge(trans, i->iter);
+ if (unlikely(ret))
+ return ret;
}
- }
-
- if (race_fault()) {
- ret = -EINTR;
- trans_restart(" (race)");
- goto out;
- }
-
- /*
- * Check if the insert will fit in the leaf node with the write lock
- * held, otherwise another thread could write the node changing the
- * amount of space available:
- */
- ret = btree_trans_check_can_insert(trans, stopped_at);
- if (ret)
- goto out;
+ }
+
+ trans_for_each_update2(trans, i)
+ BUG_ON(!btree_node_intent_locked(i->iter, i->level));
+
+ ret = bch2_journal_preres_get(&c->journal,
+ &trans->journal_preres, trans->journal_preres_u64s,
+ JOURNAL_RES_GET_NONBLOCK|
+ ((trans->flags & BTREE_INSERT_JOURNAL_RESERVED)
+ ? JOURNAL_RES_GET_RESERVED : 0));
+ if (unlikely(ret == -EAGAIN))
+ ret = bch2_trans_journal_preres_get_cold(trans,
+ trans->journal_preres_u64s);
+ if (unlikely(ret))
+ return ret;
/*
- * Don't get journal reservation until after we know insert will
- * succeed:
+ * Can't be holding any read locks when we go to take write locks:
+ * another thread could be holding an intent lock on the same node we
+ * have a read lock on, and it'll block trying to take a write lock
+ * (because we hold a read lock) and it could be blocking us by holding
+ * its own read lock (while we're trying to to take write locks).
+ *
+ * note - this must be done after bch2_trans_journal_preres_get_cold()
+ * or anything else that might call bch2_trans_relock(), since that
+ * would just retake the read locks:
*/
- ret = bch2_trans_journal_res_get(trans, JOURNAL_RES_GET_NONBLOCK);
- if (ret)
- goto out;
-
- if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) {
- if (journal_seq_verify(c))
- trans_for_each_update(trans, i)
- i->k->k.version.lo = trans->journal_res.seq;
- else if (inject_invalid_keys(c))
- trans_for_each_update(trans, i)
- i->k->k.version = MAX_VERSION;
+ trans_for_each_iter(trans, iter) {
+ if (iter->nodes_locked != iter->nodes_intent_locked) {
+ if (btree_iter_keep(trans, iter)) {
+ if (!bch2_btree_iter_upgrade(iter, 1)) {
+ trace_trans_restart_upgrade(trans->ip);
+ return -EINTR;
+ }
+ } else {
+ bch2_btree_iter_unlock_noinline(iter);
+ }
+ }
}
- if (trans->flags & BTREE_INSERT_NOUNLOCK) {
- /*
- * linked iterators that weren't being updated may or may not
- * have been traversed/locked, depending on what the caller was
- * doing:
- */
- trans_for_each_update_iter(trans, i) {
- for_each_btree_iter(i->iter, linked)
- if (linked->uptodate < BTREE_ITER_NEED_RELOCK)
- linked->flags |= BTREE_ITER_NOUNLOCK;
- break;
+ trans_for_each_update2(trans, i) {
+ const char *invalid = bch2_bkey_invalid(c,
+ bkey_i_to_s_c(i->k), i->bkey_type);
+ if (invalid) {
+ char buf[200];
+
+ bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
+ bch_err(c, "invalid bkey %s on insert: %s\n", buf, invalid);
+ bch2_fatal_error(c);
}
+ btree_insert_entry_checks(trans, i);
}
+ bch2_btree_trans_verify_locks(trans);
- trans_for_each_update_iter(trans, i)
- bch2_mark_update(trans, i, fs_usage);
- if (fs_usage)
- bch2_trans_fs_usage_apply(trans, fs_usage);
+ trans_for_each_update2(trans, i)
+ if (!same_leaf_as_prev(trans, i))
+ bch2_btree_node_lock_for_insert(c,
+ iter_l(i->iter)->b, i->iter);
- trans_for_each_update(trans, i)
- do_btree_insert_one(trans, i);
-out:
- BUG_ON(ret &&
- (trans->flags & BTREE_INSERT_JOURNAL_RESERVED) &&
- trans->journal_res.ref);
+ ret = bch2_trans_commit_write_locked(trans, stopped_at);
- btree_trans_unlock_write(trans);
+ trans_for_each_update2(trans, i)
+ if (!same_leaf_as_prev(trans, i))
+ bch2_btree_node_unlock_write_inlined(iter_l(i->iter)->b,
+ i->iter);
- if (fs_usage) {
- bch2_fs_usage_scratch_put(c, fs_usage);
- percpu_up_read(&c->mark_lock);
- }
+ if (!ret && trans->journal_pin)
+ bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
+ trans->journal_pin, NULL);
+ /*
+ * Drop journal reservation after dropping write locks, since dropping
+ * the journal reservation may kick off a journal write:
+ */
bch2_journal_res_put(&c->journal, &trans->journal_res);
+ if (unlikely(ret))
+ return ret;
+
+ bch2_trans_downgrade(trans);
+
+ return 0;
+}
+
+static int journal_reclaim_wait_done(struct bch_fs *c)
+{
+ int ret = bch2_journal_error(&c->journal) ?:
+ !bch2_btree_key_cache_must_wait(c);
+
+ if (!ret)
+ journal_reclaim_kick(&c->journal);
return ret;
}
/*
* if the split succeeded without dropping locks the insert will
- * still be atomic (in the BTREE_INSERT_ATOMIC sense, what the
- * caller peeked() and is overwriting won't have changed)
+ * still be atomic (what the caller peeked() and is overwriting
+ * won't have changed)
*/
#if 0
/*
* don't care if we got ENOSPC because we told split it
* couldn't block:
*/
- if (!ret || (flags & BTREE_INSERT_NOUNLOCK)) {
- trans_restart(" (split)");
+ if (!ret ||
+ ret == -EINTR ||
+ (flags & BTREE_INSERT_NOUNLOCK)) {
+ trace_trans_restart_btree_node_split(trans->ip);
ret = -EINTR;
}
break;
case BTREE_INSERT_NEED_MARK_REPLICAS:
bch2_trans_unlock(trans);
- trans_for_each_update_iter(trans, i) {
- ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(i->k));
- if (ret)
- return ret;
- }
+ ret = bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas);
+ if (ret)
+ return ret;
- if (btree_trans_relock(trans))
+ if (bch2_trans_relock(trans))
return 0;
- trans_restart(" (iter relock after marking replicas)");
+ trace_trans_restart_mark_replicas(trans->ip);
ret = -EINTR;
break;
case BTREE_INSERT_NEED_JOURNAL_RES:
- btree_trans_unlock(trans);
+ bch2_trans_unlock(trans);
+
+ if ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
+ !(trans->flags & BTREE_INSERT_JOURNAL_RESERVED))
+ return -EAGAIN;
ret = bch2_trans_journal_res_get(trans, JOURNAL_RES_GET_CHECK);
if (ret)
return ret;
- if (btree_trans_relock(trans))
+ if (bch2_trans_relock(trans))
return 0;
- trans_restart(" (iter relock after journal res get blocked)");
+ trace_trans_restart_journal_res_get(trans->ip);
ret = -EINTR;
break;
- default:
- BUG_ON(ret >= 0);
- break;
- }
+ case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
+ bch2_trans_unlock(trans);
- if (ret == -EINTR) {
- trans_for_each_update_iter(trans, i) {
- int ret2 = bch2_btree_iter_traverse(i->iter);
- if (ret2) {
- trans_restart(" (traverse)");
- return ret2;
- }
+ trace_trans_blocked_journal_reclaim(trans->ip);
- BUG_ON(i->iter->uptodate > BTREE_ITER_NEED_PEEK);
- }
+ wait_event_freezable(c->journal.reclaim_wait,
+ (ret = journal_reclaim_wait_done(c)));
+ if (ret < 0)
+ return ret;
- /*
- * BTREE_ITER_ATOMIC means we have to return -EINTR if we
- * dropped locks:
- */
- if (!(flags & BTREE_INSERT_ATOMIC))
+ if (bch2_trans_relock(trans))
return 0;
- trans_restart(" (atomic)");
+ trace_trans_restart_journal_reclaim(trans->ip);
+ ret = -EINTR;
+ break;
+ default:
+ BUG_ON(ret >= 0);
+ break;
}
return ret;
}
-/**
- * __bch_btree_insert_at - insert keys at given iterator positions
- *
- * This is main entry point for btree updates.
- *
- * Return values:
- * -EINTR: locking changed, this function should be called again. Only returned
- * if passed BTREE_INSERT_ATOMIC.
- * -EROFS: filesystem read only
- * -EIO: journal or btree node IO error
- */
-static int __bch2_trans_commit(struct btree_trans *trans,
- struct btree_insert_entry **stopped_at)
+static noinline int
+bch2_trans_commit_get_rw_cold(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
+ int ret;
+
+ if (likely(!(trans->flags & BTREE_INSERT_LAZY_RW)))
+ return -EROFS;
+
+ bch2_trans_unlock(trans);
+
+ ret = bch2_fs_read_write_early(c);
+ if (ret)
+ return ret;
+
+ percpu_ref_get(&c->writes);
+ return 0;
+}
+
+static void __bch2_trans_update2(struct btree_trans *trans,
+ struct btree_insert_entry n)
+{
struct btree_insert_entry *i;
- struct btree_iter *linked;
+
+ btree_insert_entry_checks(trans, &n);
+
+ EBUG_ON(trans->nr_updates2 >= BTREE_ITER_MAX);
+
+ n.iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
+
+ trans_for_each_update2(trans, i)
+ if (btree_insert_entry_cmp(&n, i) <= 0)
+ break;
+
+ if (i < trans->updates2 + trans->nr_updates2 &&
+ !btree_insert_entry_cmp(&n, i))
+ *i = n;
+ else
+ array_insert_item(trans->updates2, trans->nr_updates2,
+ i - trans->updates2, n);
+}
+
+static void bch2_trans_update2(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *insert)
+{
+ __bch2_trans_update2(trans, (struct btree_insert_entry) {
+ .bkey_type = __btree_node_type(iter->level, iter->btree_id),
+ .btree_id = iter->btree_id,
+ .level = iter->level,
+ .iter = iter,
+ .k = insert,
+ });
+}
+
+static int extent_update_to_keys(struct btree_trans *trans,
+ struct btree_insert_entry n)
+{
int ret;
- trans_for_each_update_iter(trans, i) {
- unsigned old_locks_want = i->iter->locks_want;
- unsigned old_uptodate = i->iter->uptodate;
+ ret = bch2_extent_can_insert(trans, n.iter, n.k);
+ if (ret)
+ return ret;
- if (!bch2_btree_iter_upgrade(i->iter, 1, true)) {
- trans_restart(" (failed upgrade, locks_want %u uptodate %u)",
- old_locks_want, old_uptodate);
- ret = -EINTR;
- goto err;
+ if (bkey_deleted(&n.k->k))
+ return 0;
+
+ n.iter = bch2_trans_get_iter(trans, n.iter->btree_id, n.k->k.p,
+ BTREE_ITER_INTENT|
+ BTREE_ITER_NOT_EXTENTS);
+ n.is_extent = false;
+
+ __bch2_trans_update2(trans, n);
+ bch2_trans_iter_put(trans, n.iter);
+ return 0;
+}
+
+static int extent_handle_overwrites(struct btree_trans *trans,
+ enum btree_id btree_id,
+ struct bkey_i *insert)
+{
+ struct btree_iter *iter, *update_iter;
+ struct bpos start = bkey_start_pos(&insert->k);
+ struct bkey_i *update;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ iter = bch2_trans_get_iter(trans, btree_id, start,
+ BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek_with_updates(iter);
+
+ while (k.k && !(ret = bkey_err(k))) {
+ if (bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0)
+ break;
+
+ if (bkey_cmp(bkey_start_pos(k.k), start) < 0) {
+ update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ break;
+
+ bkey_reassemble(update, k);
+
+ bch2_cut_back(start, update);
+
+ update_iter = bch2_trans_get_iter(trans, btree_id, update->k.p,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_INTENT);
+ bch2_trans_update2(trans, update_iter, update);
+ bch2_trans_iter_put(trans, update_iter);
}
- if (i->iter->flags & BTREE_ITER_ERROR) {
- ret = -EIO;
- goto err;
+ if (bkey_cmp(k.k->p, insert->k.p) < 0 ||
+ (!bkey_cmp(k.k->p, insert->k.p) && bkey_deleted(&insert->k))) {
+ update = bch2_trans_kmalloc(trans, sizeof(struct bkey));
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ break;
+
+ bkey_init(&update->k);
+ update->k.p = k.k->p;
+
+ update_iter = bch2_trans_get_iter(trans, btree_id, update->k.p,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_INTENT);
+ bch2_trans_update2(trans, update_iter, update);
+ bch2_trans_iter_put(trans, update_iter);
}
- }
- ret = do_btree_insert_at(trans, stopped_at);
- if (unlikely(ret))
- goto err;
+ if (bkey_cmp(k.k->p, insert->k.p) > 0) {
+ update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ break;
- trans_for_each_update_leaf(trans, i)
- bch2_foreground_maybe_merge(c, i->iter, 0, trans->flags);
+ bkey_reassemble(update, k);
+ bch2_cut_front(insert->k.p, update);
- trans_for_each_update_iter(trans, i)
- bch2_btree_iter_downgrade(i->iter);
-err:
- /* make sure we didn't drop or screw up locks: */
- trans_for_each_update_iter(trans, i) {
- bch2_btree_iter_verify_locks(i->iter);
- break;
- }
+ update_iter = bch2_trans_get_iter(trans, btree_id, update->k.p,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_INTENT);
+ bch2_trans_update2(trans, update_iter, update);
+ bch2_trans_iter_put(trans, update_iter);
+ break;
+ }
- trans_for_each_update_iter(trans, i) {
- for_each_btree_iter(i->iter, linked)
- linked->flags &= ~BTREE_ITER_NOUNLOCK;
- break;
+ k = bch2_btree_iter_next_with_updates(iter);
}
+ bch2_trans_iter_put(trans, iter);
return ret;
}
-int bch2_trans_commit(struct btree_trans *trans,
- struct disk_reservation *disk_res,
- u64 *journal_seq,
- unsigned flags)
+int __bch2_trans_commit(struct btree_trans *trans)
{
- struct bch_fs *c = trans->c;
- struct btree_insert_entry *i;
+ struct btree_insert_entry *i = NULL;
+ struct btree_iter *iter;
+ bool trans_trigger_run;
+ unsigned u64s, reset_flags = 0;
int ret = 0;
if (!trans->nr_updates)
- goto out_noupdates;
-
- /* for the sake of sanity: */
- BUG_ON(trans->nr_updates > 1 && !(flags & BTREE_INSERT_ATOMIC));
-
- if (flags & BTREE_INSERT_GC_LOCK_HELD)
- lockdep_assert_held(&c->gc_lock);
+ goto out_reset;
- if (!trans->commit_start)
- trans->commit_start = local_clock();
+ if (trans->flags & BTREE_INSERT_GC_LOCK_HELD)
+ lockdep_assert_held(&trans->c->gc_lock);
- memset(&trans->journal_res, 0, sizeof(trans->journal_res));
memset(&trans->journal_preres, 0, sizeof(trans->journal_preres));
- trans->disk_res = disk_res;
- trans->journal_seq = journal_seq;
- trans->flags = flags;
- bubble_sort(trans->updates, trans->nr_updates, btree_trans_cmp);
+ trans->journal_u64s = trans->extra_journal_entry_u64s;
+ trans->journal_preres_u64s = 0;
+
+ if (!(trans->flags & BTREE_INSERT_NOCHECK_RW) &&
+ unlikely(!percpu_ref_tryget(&trans->c->writes))) {
+ ret = bch2_trans_commit_get_rw_cold(trans);
+ if (ret)
+ goto out_reset;
+ }
+#ifdef CONFIG_BCACHEFS_DEBUG
trans_for_each_update(trans, i)
- btree_insert_entry_checks(trans, i);
+ if (btree_iter_type(i->iter) != BTREE_ITER_CACHED &&
+ !(i->trigger_flags & BTREE_TRIGGER_NORUN))
+ bch2_btree_key_cache_verify_clean(trans,
+ i->btree_id, i->k->k.p);
+#endif
- if (unlikely(!(trans->flags & BTREE_INSERT_NOCHECK_RW) &&
- !percpu_ref_tryget(&c->writes))) {
- if (likely(!(trans->flags & BTREE_INSERT_LAZY_RW)))
- return -EROFS;
+ /*
+ * Running triggers will append more updates to the list of updates as
+ * we're walking it:
+ */
+ do {
+ trans_trigger_run = false;
+
+ trans_for_each_update(trans, i) {
+ if ((BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)) &&
+ !i->trans_triggers_run) {
+ i->trans_triggers_run = true;
+ trans_trigger_run = true;
+
+ ret = bch2_trans_mark_update(trans, i->iter, i->k,
+ i->trigger_flags);
+ if (unlikely(ret)) {
+ if (ret == -EINTR)
+ trace_trans_restart_mark(trans->ip);
+ goto out;
+ }
+ }
+ }
+ } while (trans_trigger_run);
- btree_trans_unlock(trans);
+ /* Turn extents updates into keys: */
+ trans_for_each_update(trans, i)
+ if (i->is_extent) {
+ ret = extent_handle_overwrites(trans, i->btree_id, i->k);
+ if (unlikely(ret))
+ goto out;
+ }
- ret = bch2_fs_read_write_early(c);
- if (ret)
- return ret;
+ trans_for_each_update(trans, i) {
+ ret = i->is_extent
+ ? extent_update_to_keys(trans, *i)
+ : (__bch2_trans_update2(trans, *i), 0);
+ if (unlikely(ret))
+ goto out;
+ }
- percpu_ref_get(&c->writes);
+ trans_for_each_update2(trans, i) {
+ ret = bch2_btree_iter_traverse(i->iter);
+ if (unlikely(ret)) {
+ trace_trans_restart_traverse(trans->ip);
+ goto out;
+ }
- if (!btree_trans_relock(trans)) {
+ if (unlikely(!bch2_btree_iter_upgrade(i->iter, i->level + 1))) {
+ trace_trans_restart_upgrade(trans->ip);
ret = -EINTR;
- goto err;
+ goto out;
}
+
+ BUG_ON(!btree_node_intent_locked(i->iter, i->level));
+
+ u64s = jset_u64s(i->k->k.u64s);
+ if (btree_iter_type(i->iter) == BTREE_ITER_CACHED &&
+ likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)))
+ trans->journal_preres_u64s += u64s;
+ trans->journal_u64s += u64s;
}
retry:
- ret = bch2_trans_journal_preres_get(trans);
- if (ret)
- goto err;
+ memset(&trans->journal_res, 0, sizeof(trans->journal_res));
+
+ ret = do_bch2_trans_commit(trans, &i);
+
+ /* make sure we didn't drop or screw up locks: */
+ bch2_btree_trans_verify_locks(trans);
- ret = __bch2_trans_commit(trans, &i);
if (ret)
goto err;
-out:
- bch2_journal_preres_put(&c->journal, &trans->journal_preres);
-
- if (unlikely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
- percpu_ref_put(&c->writes);
-out_noupdates:
- if (!ret && trans->commit_start) {
- bch2_time_stats_update(&c->times[BCH_TIME_btree_update],
- trans->commit_start);
- trans->commit_start = 0;
- }
- trans->nr_updates = 0;
+ trans_for_each_iter(trans, iter)
+ if (btree_iter_live(trans, iter) &&
+ (iter->flags & BTREE_ITER_SET_POS_AFTER_COMMIT))
+ bch2_btree_iter_set_pos(iter, iter->pos_after_commit);
+out:
+ bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
- BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
+ if (likely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
+ percpu_ref_put(&trans->c->writes);
+out_reset:
+ if (!ret)
+ reset_flags |= TRANS_RESET_NOTRAVERSE;
+ if (!ret && (trans->flags & BTREE_INSERT_NOUNLOCK))
+ reset_flags |= TRANS_RESET_NOUNLOCK;
+ bch2_trans_reset(trans, reset_flags);
return ret;
err:
ret = bch2_trans_commit_error(trans, i, ret);
- if (!ret)
- goto retry;
+ if (ret)
+ goto out;
- goto out;
+ goto retry;
}
-int bch2_btree_delete_at(struct btree_trans *trans,
- struct btree_iter *iter, unsigned flags)
+int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_i *k, enum btree_trigger_flags flags)
{
- struct bkey_i k;
+ struct btree_insert_entry *i, n = (struct btree_insert_entry) {
+ .trigger_flags = flags,
+ .bkey_type = __btree_node_type(iter->level, iter->btree_id),
+ .btree_id = iter->btree_id,
+ .level = iter->level,
+ .is_extent = (iter->flags & BTREE_ITER_IS_EXTENTS) != 0,
+ .iter = iter,
+ .k = k
+ };
+
+ BUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ BUG_ON(bkey_cmp(iter->pos,
+ n.is_extent ? bkey_start_pos(&k->k) : k->k.p));
+
+ trans_for_each_update(trans, i) {
+ BUG_ON(bkey_cmp(i->iter->pos,
+ i->is_extent ? bkey_start_pos(&i->k->k) : i->k->k.p));
+
+ BUG_ON(i != trans->updates &&
+ btree_insert_entry_cmp(i - 1, i) >= 0);
+ }
+#endif
- bkey_init(&k.k);
- k.k.p = iter->pos;
+ iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
- bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &k));
- return bch2_trans_commit(trans, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE|flags);
+ if (n.is_extent) {
+ iter->pos_after_commit = k->k.p;
+ iter->flags |= BTREE_ITER_SET_POS_AFTER_COMMIT;
+ }
+
+ /*
+ * Pending updates are kept sorted: first, find position of new update,
+ * then delete/trim any updates the new update overwrites:
+ */
+ if (!n.is_extent) {
+ trans_for_each_update(trans, i)
+ if (btree_insert_entry_cmp(&n, i) <= 0)
+ break;
+
+ if (i < trans->updates + trans->nr_updates &&
+ !btree_insert_entry_cmp(&n, i))
+ *i = n;
+ else
+ array_insert_item(trans->updates, trans->nr_updates,
+ i - trans->updates, n);
+ } else {
+ trans_for_each_update(trans, i)
+ if (btree_insert_entry_cmp(&n, i) < 0)
+ break;
+
+ while (i > trans->updates &&
+ i[-1].btree_id == n.btree_id &&
+ bkey_cmp(bkey_start_pos(&n.k->k),
+ bkey_start_pos(&i[-1].k->k)) <= 0) {
+ --i;
+ array_remove_item(trans->updates, trans->nr_updates,
+ i - trans->updates);
+ }
+
+ if (i > trans->updates &&
+ i[-1].btree_id == n.btree_id &&
+ bkey_cmp(bkey_start_pos(&n.k->k), i[-1].k->k.p) < 0)
+ bch2_cut_back(bkey_start_pos(&n.k->k), i[-1].k);
+
+ if (i < trans->updates + trans->nr_updates &&
+ i->btree_id == n.btree_id &&
+ bkey_cmp(n.k->k.p, bkey_start_pos(&i->k->k)) > 0) {
+ /* We don't handle splitting extents here: */
+ BUG_ON(bkey_cmp(bkey_start_pos(&n.k->k),
+ bkey_start_pos(&i->k->k)) > 0);
+
+ /*
+ * When we have an extent that overwrites the start of another
+ * update, trimming that extent will mean the iterator's
+ * position has to change since the iterator position has to
+ * match the extent's start pos - but we don't want to change
+ * the iterator pos if some other code is using it, so we may
+ * need to clone it:
+ */
+ if (btree_iter_live(trans, i->iter)) {
+ i->iter = bch2_trans_copy_iter(trans, i->iter);
+
+ i->iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
+ bch2_trans_iter_put(trans, i->iter);
+ }
+
+ bch2_cut_front(n.k->k.p, i->k);
+ bch2_btree_iter_set_pos(i->iter, n.k->k.p);
+ }
+
+ array_insert_item(trans->updates, trans->nr_updates,
+ i - trans->updates, n);
+ }
+
+ return 0;
+}
+
+void bch2_trans_commit_hook(struct btree_trans *trans,
+ struct btree_trans_commit_hook *h)
+{
+ h->next = trans->hooks;
+ trans->hooks = h;
+}
+
+int __bch2_btree_insert(struct btree_trans *trans,
+ enum btree_id id, struct bkey_i *k)
+{
+ struct btree_iter *iter;
+ int ret;
+
+ iter = bch2_trans_get_iter(trans, id, bkey_start_pos(&k->k),
+ BTREE_ITER_INTENT);
+
+ ret = bch2_trans_update(trans, iter, k, 0);
+ bch2_trans_iter_put(trans, iter);
+ return ret;
}
/**
* @hook: insert callback
*/
int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
- struct bkey_i *k,
- struct disk_reservation *disk_res,
- u64 *journal_seq, int flags)
+ struct bkey_i *k,
+ struct disk_reservation *disk_res,
+ u64 *journal_seq, int flags)
{
- struct btree_trans trans;
- struct btree_iter *iter;
- int ret;
-
- bch2_trans_init(&trans, c);
-
- iter = bch2_trans_get_iter(&trans, id, bkey_start_pos(&k->k),
- BTREE_ITER_INTENT);
+ return bch2_trans_do(c, disk_res, journal_seq, flags,
+ __bch2_btree_insert(&trans, id, k));
+}
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
+int bch2_btree_delete_at(struct btree_trans *trans,
+ struct btree_iter *iter, unsigned flags)
+{
+ struct bkey_i k;
- ret = bch2_trans_commit(&trans, disk_res, journal_seq, flags);
- bch2_trans_exit(&trans);
+ bkey_init(&k.k);
+ k.k.p = iter->pos;
- return ret;
+ bch2_trans_update(trans, iter, &k, 0);
+ return bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|flags);
}
-/*
- * bch_btree_delete_range - delete everything within a given range
- *
- * Range is a half open interval - [start, end)
- */
-int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
- struct bpos start, struct bpos end,
- u64 *journal_seq)
+int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
+ struct bpos start, struct bpos end,
+ u64 *journal_seq)
{
- struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
int ret = 0;
- bch2_trans_init(&trans, c);
-
- iter = bch2_trans_get_iter(&trans, id, start, BTREE_ITER_INTENT);
-
+ iter = bch2_trans_get_iter(trans, id, start, BTREE_ITER_INTENT);
+retry:
while ((k = bch2_btree_iter_peek(iter)).k &&
- !(ret = btree_iter_err(k)) &&
+ !(ret = bkey_err(k)) &&
bkey_cmp(iter->pos, end) < 0) {
- unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
- /* really shouldn't be using a bare, unpadded bkey_i */
struct bkey_i delete;
+ bch2_trans_begin(trans);
+
bkey_init(&delete.k);
+ /*
+ * This could probably be more efficient for extents:
+ */
+
/*
* For extents, iter.pos won't necessarily be the same as
* bkey_start_pos(k.k) (for non extents they always will be the
*/
delete.k.p = iter->pos;
- if (iter->flags & BTREE_ITER_IS_EXTENTS) {
+ if (btree_node_type_is_extents(iter->btree_id)) {
+ unsigned max_sectors =
+ KEY_SIZE_MAX & (~0 << trans->c->block_bits);
+
/* create the biggest key we can */
bch2_key_resize(&delete.k, max_sectors);
- bch2_cut_back(end, &delete.k);
- bch2_extent_trim_atomic(&delete, iter);
- }
+ bch2_cut_back(end, &delete);
- bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &delete));
+ ret = bch2_extent_trim_atomic(&delete, iter);
+ if (ret)
+ break;
+ }
- ret = bch2_trans_commit(&trans, NULL, journal_seq,
- BTREE_INSERT_ATOMIC|
+ bch2_trans_update(trans, iter, &delete, 0);
+ ret = bch2_trans_commit(trans, NULL, journal_seq,
BTREE_INSERT_NOFAIL);
- if (ret == -EINTR)
- ret = 0;
if (ret)
break;
- bch2_btree_iter_cond_resched(iter);
+ bch2_trans_cond_resched(trans);
+ }
+
+ if (ret == -EINTR) {
+ ret = 0;
+ goto retry;
}
- bch2_trans_exit(&trans);
+ bch2_trans_iter_free(trans, iter);
return ret;
}
+
+/*
+ * bch_btree_delete_range - delete everything within a given range
+ *
+ * Range is a half open interval - [start, end)
+ */
+int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
+ struct bpos start, struct bpos end,
+ u64 *journal_seq)
+{
+ return bch2_trans_do(c, NULL, journal_seq, 0,
+ bch2_btree_delete_range_trans(&trans, id, start, end, journal_seq));
+}