+ if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
+ if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
+ ret = extent_front_merge(trans, &iter, k, &insert, flags);
+ if (ret)
+ goto err;
+ }
+
+ goto next;
+ }
+
+ while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
+ bool front_split = bkey_lt(bkey_start_pos(k.k), start);
+ bool back_split = bkey_gt(k.k->p, insert->k.p);
+
+ /*
+ * If we're going to be splitting a compressed extent, note it
+ * so that __bch2_trans_commit() can increase our disk
+ * reservation:
+ */
+ if (((front_split && back_split) ||
+ ((front_split || back_split) && k.k->p.snapshot != insert->k.p.snapshot)) &&
+ (compressed_sectors = bch2_bkey_sectors_compressed(k)))
+ trans->extra_journal_res += compressed_sectors;
+
+ if (front_split) {
+ update = bch2_bkey_make_mut(trans, k);
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ goto err;
+
+ bch2_cut_back(start, update);
+
+ bch2_trans_iter_init(trans, &update_iter, btree_id, update->k.p,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&update_iter) ?:
+ bch2_trans_update(trans, &update_iter, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
+ flags);
+ bch2_trans_iter_exit(trans, &update_iter);
+
+ if (ret)
+ goto err;
+ }
+
+ if (k.k->p.snapshot != insert->k.p.snapshot &&
+ (front_split || back_split)) {
+ update = bch2_bkey_make_mut(trans, k);
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ goto err;
+
+ bch2_cut_front(start, update);
+ bch2_cut_back(insert->k.p, update);
+
+ bch2_trans_iter_init(trans, &update_iter, btree_id, update->k.p,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&update_iter) ?:
+ bch2_trans_update(trans, &update_iter, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
+ flags);
+ bch2_trans_iter_exit(trans, &update_iter);
+ if (ret)
+ goto err;
+ }
+
+ if (bkey_le(k.k->p, insert->k.p)) {
+ update = bch2_trans_kmalloc(trans, sizeof(*update));
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ goto err;
+
+ bkey_init(&update->k);
+ update->k.p = k.k->p;
+
+ if (insert->k.p.snapshot != k.k->p.snapshot) {
+ update->k.p.snapshot = insert->k.p.snapshot;
+ update->k.type = KEY_TYPE_whiteout;
+ }
+
+ bch2_trans_iter_init(trans, &update_iter, btree_id, update->k.p,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&update_iter) ?:
+ bch2_trans_update(trans, &update_iter, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
+ flags);
+ bch2_trans_iter_exit(trans, &update_iter);
+
+ if (ret)
+ goto err;
+ }
+
+ if (back_split) {
+ update = bch2_bkey_make_mut(trans, k);
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ goto err;
+
+ bch2_cut_front(insert->k.p, update);
+
+ ret = bch2_trans_update_by_path(trans, iter.path, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
+ flags);
+ if (ret)
+ goto err;
+ goto out;
+ }
+next:
+ bch2_btree_iter_advance(&iter);
+ k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
+ if ((ret = bkey_err(k)))
+ goto err;
+ if (!k.k)
+ goto out;
+ }
+
+ if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
+ ret = extent_back_merge(trans, &iter, insert, k);
+ if (ret)
+ goto err;
+ }
+out:
+ if (!bkey_deleted(&insert->k)) {
+ /*
+ * Rewinding iterators is expensive: get a new one and the one
+ * that points to the start of insert will be cloned from:
+ */
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_init(trans, &iter, btree_id, insert->k.p,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, insert, flags);
+ }
+err:
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+/*
+ * When deleting, check if we need to emit a whiteout (because we're overwriting
+ * something in an ancestor snapshot)
+ */
+static int need_whiteout_for_snapshot(struct btree_trans *trans,
+ enum btree_id btree_id, struct bpos pos)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 snapshot = pos.snapshot;
+ int ret;
+
+ if (!bch2_snapshot_parent(trans->c, pos.snapshot))
+ return 0;
+
+ pos.snapshot++;
+
+ for_each_btree_key_norestart(trans, iter, btree_id, pos,
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_NOPRESERVE, k, ret) {
+ if (!bkey_eq(k.k->p, pos))
+ break;
+
+ if (bch2_snapshot_is_ancestor(trans->c, snapshot,
+ k.k->p.snapshot)) {
+ ret = !bkey_whiteout(k.k);
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+static int __must_check
+bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *path,
+ struct bkey_i *k, enum btree_update_flags flags,
+ unsigned long ip);
+
+static noinline int flush_new_cached_update(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_insert_entry *i,
+ enum btree_update_flags flags,
+ unsigned long ip)
+{
+ struct btree_path *btree_path;
+ int ret;
+
+ i->key_cache_already_flushed = true;
+ i->flags |= BTREE_TRIGGER_NORUN;
+
+ btree_path = bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
+ BTREE_ITER_INTENT, _THIS_IP_);
+
+ ret = bch2_btree_path_traverse(trans, btree_path, 0);
+ if (ret)
+ goto err;
+
+ btree_path_set_should_be_locked(btree_path);
+ ret = bch2_trans_update_by_path_trace(trans, btree_path, i->k, flags, ip);
+err:
+ bch2_path_put(trans, btree_path, true);
+ return ret;
+}
+
+static int __must_check
+bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *path,
+ struct bkey_i *k, enum btree_update_flags flags,
+ unsigned long ip)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i, n;
+ int cmp;
+
+ EBUG_ON(!path->should_be_locked);
+ EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
+ EBUG_ON(!bpos_eq(k->k.p, path->pos));
+
+ n = (struct btree_insert_entry) {
+ .flags = flags,
+ .bkey_type = __btree_node_type(path->level, path->btree_id),
+ .btree_id = path->btree_id,
+ .level = path->level,
+ .cached = path->cached,
+ .path = path,
+ .k = k,
+ .ip_allocated = ip,
+ };
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans_for_each_update(trans, i)
+ BUG_ON(i != trans->updates &&
+ btree_insert_entry_cmp(i - 1, i) >= 0);
+#endif
+
+ /*
+ * Pending updates are kept sorted: first, find position of new update,
+ * then delete/trim any updates the new update overwrites:
+ */
+ trans_for_each_update(trans, i) {
+ cmp = btree_insert_entry_cmp(&n, i);
+ if (cmp <= 0)
+ break;
+ }
+
+ if (!cmp && i < trans->updates + trans->nr_updates) {
+ EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
+
+ bch2_path_put(trans, i->path, true);
+ i->flags = n.flags;
+ i->cached = n.cached;
+ i->k = n.k;
+ i->path = n.path;
+ i->ip_allocated = n.ip_allocated;
+ } else {
+ array_insert_item(trans->updates, trans->nr_updates,
+ i - trans->updates, n);
+
+ i->old_v = bch2_btree_path_peek_slot_exact(path, &i->old_k).v;
+ i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
+
+ if (unlikely(trans->journal_replay_not_finished)) {
+ struct bkey_i *j_k =
+ bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
+
+ if (j_k) {
+ i->old_k = j_k->k;
+ i->old_v = &j_k->v;
+ }
+ }
+ }
+
+ __btree_path_get(i->path, true);
+
+ /*
+ * If a key is present in the key cache, it must also exist in the
+ * btree - this is necessary for cache coherency. When iterating over
+ * a btree that's cached in the key cache, the btree iter code checks
+ * the key cache - but the key has to exist in the btree for that to
+ * work:
+ */
+ if (path->cached &&
+ bkey_deleted(&i->old_k) &&
+ !(flags & BTREE_UPDATE_NO_KEY_CACHE_COHERENCY))
+ return flush_new_cached_update(trans, path, i, flags, ip);
+
+ return 0;
+}
+
+static inline int __must_check
+bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
+ struct bkey_i *k, enum btree_update_flags flags)
+{
+ return bch2_trans_update_by_path_trace(trans, path, k, flags, _RET_IP_);
+}
+
+int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_i *k, enum btree_update_flags flags)
+{
+ struct btree_path *path = iter->update_path ?: iter->path;
+ struct bkey_cached *ck;
+ int ret;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ return bch2_trans_update_extent(trans, iter, k, flags);
+
+ if (bkey_deleted(&k->k) &&
+ !(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
+ (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
+ ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (ret)
+ k->k.type = KEY_TYPE_whiteout;
+ }
+
+ /*
+ * Ensure that updates to cached btrees go to the key cache:
+ */
+ if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
+ !path->cached &&
+ !path->level &&
+ btree_id_cached(trans->c, path->btree_id)) {
+ if (!iter->key_cache_path ||
+ !iter->key_cache_path->should_be_locked ||
+ !bpos_eq(iter->key_cache_path->pos, k->k.p)) {
+ if (!iter->key_cache_path)
+ iter->key_cache_path =
+ bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
+ BTREE_ITER_INTENT|
+ BTREE_ITER_CACHED, _THIS_IP_);
+
+ iter->key_cache_path =
+ bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
+ iter->flags & BTREE_ITER_INTENT,
+ _THIS_IP_);
+
+ ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
+ BTREE_ITER_CACHED);
+ if (unlikely(ret))
+ return ret;
+
+ ck = (void *) iter->key_cache_path->l[0].b;
+
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
+ }
+
+ btree_path_set_should_be_locked(iter->key_cache_path);
+ }
+
+ path = iter->key_cache_path;
+ }
+
+ return bch2_trans_update_by_path(trans, path, k, flags);
+}
+
+int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
+ enum btree_id btree,
+ struct bkey_i *k)
+{
+ struct btree_write_buffered_key *i;
+ int ret;
+
+ EBUG_ON(trans->nr_wb_updates > trans->wb_updates_size);
+ EBUG_ON(k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
+
+ trans_for_each_wb_update(trans, i) {
+ if (i->btree == btree && bpos_eq(i->k.k.p, k->k.p)) {
+ bkey_copy(&i->k, k);
+ return 0;
+ }
+ }
+
+ if (!trans->wb_updates ||
+ trans->nr_wb_updates == trans->wb_updates_size) {
+ struct btree_write_buffered_key *u;
+
+ if (trans->nr_wb_updates == trans->wb_updates_size) {
+ struct btree_transaction_stats *s = btree_trans_stats(trans);
+
+ BUG_ON(trans->wb_updates_size > U8_MAX / 2);
+ trans->wb_updates_size = max(1, trans->wb_updates_size * 2);
+ if (s)
+ s->wb_updates_size = trans->wb_updates_size;
+ }
+
+ u = bch2_trans_kmalloc_nomemzero(trans,
+ trans->wb_updates_size *
+ sizeof(struct btree_write_buffered_key));
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ return ret;
+
+ if (trans->nr_wb_updates)
+ memcpy(u, trans->wb_updates, trans->nr_wb_updates *
+ sizeof(struct btree_write_buffered_key));
+ trans->wb_updates = u;
+ }
+
+ trans->wb_updates[trans->nr_wb_updates] = (struct btree_write_buffered_key) {
+ .btree = btree,
+ };
+
+ bkey_copy(&trans->wb_updates[trans->nr_wb_updates].k, k);
+ trans->nr_wb_updates++;
+
+ return 0;
+}
+
+void bch2_trans_commit_hook(struct btree_trans *trans,
+ struct btree_trans_commit_hook *h)
+{
+ h->next = trans->hooks;
+ trans->hooks = h;
+}
+
+int __bch2_btree_insert(struct btree_trans *trans,
+ enum btree_id id,
+ struct bkey_i *k, enum btree_update_flags flags)
+{
+ struct btree_iter iter;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, k, flags);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;