]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_update.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / btree_update.c
index ba42f578f8107591ec46f2c6c90d0b562179956b..c3ff365acce9afeae894c69003d247bef9c8e955 100644 (file)
@@ -24,7 +24,7 @@ static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
 }
 
 static int __must_check
-bch2_trans_update_by_path(struct btree_trans *, struct btree_path *,
+bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
                          struct bkey_i *, enum btree_update_flags,
                          unsigned long ip);
 
@@ -186,8 +186,11 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
        enum btree_id btree_id = iter->btree_id;
        struct bkey_i *update;
        struct bpos new_start = bkey_start_pos(new.k);
-       bool front_split = bkey_lt(bkey_start_pos(old.k), new_start);
-       bool back_split  = bkey_gt(old.k->p, new.k->p);
+       unsigned front_split = bkey_lt(bkey_start_pos(old.k), new_start);
+       unsigned back_split  = bkey_gt(old.k->p, new.k->p);
+       unsigned middle_split = (front_split || back_split) &&
+               old.k->p.snapshot != new.k->p.snapshot;
+       unsigned nr_splits = front_split + back_split + middle_split;
        int ret = 0, compressed_sectors;
 
        /*
@@ -195,10 +198,9 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
         * so that __bch2_trans_commit() can increase our disk
         * reservation:
         */
-       if (((front_split && back_split) ||
-            ((front_split || back_split) && old.k->p.snapshot != new.k->p.snapshot)) &&
+       if (nr_splits > 1 &&
            (compressed_sectors = bch2_bkey_sectors_compressed(old)))
-               trans->extra_journal_res += compressed_sectors;
+               trans->extra_disk_res += compressed_sectors * (nr_splits - 1);
 
        if (front_split) {
                update = bch2_bkey_make_mut_noupdate(trans, old);
@@ -216,8 +218,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
        }
 
        /* If we're overwriting in a different snapshot - middle split: */
-       if (old.k->p.snapshot != new.k->p.snapshot &&
-           (front_split || back_split)) {
+       if (middle_split) {
                update = bch2_bkey_make_mut_noupdate(trans, old);
                if ((ret = PTR_ERR_OR_ZERO(update)))
                        return ret;
@@ -338,21 +339,22 @@ err:
 }
 
 static noinline int flush_new_cached_update(struct btree_trans *trans,
-                                           struct btree_path *path,
                                            struct btree_insert_entry *i,
                                            enum btree_update_flags flags,
                                            unsigned long ip)
 {
-       struct btree_path *btree_path;
        struct bkey k;
        int ret;
 
-       btree_path = bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
-                                  BTREE_ITER_INTENT, _THIS_IP_);
-       ret = bch2_btree_path_traverse(trans, btree_path, 0);
+       btree_path_idx_t path_idx =
+               bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
+                             BTREE_ITER_INTENT, _THIS_IP_);
+       ret = bch2_btree_path_traverse(trans, path_idx, 0);
        if (ret)
                goto out;
 
+       struct btree_path *btree_path = trans->paths + path_idx;
+
        /*
         * The old key in the insert entry might actually refer to an existing
         * key in the btree that has been deleted from cache and not yet
@@ -367,14 +369,14 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
        i->flags |= BTREE_TRIGGER_NORUN;
 
        btree_path_set_should_be_locked(btree_path);
-       ret = bch2_trans_update_by_path(trans, btree_path, i->k, flags, ip);
+       ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
 out:
-       bch2_path_put(trans, btree_path, true);
+       bch2_path_put(trans, path_idx, true);
        return ret;
 }
 
 static int __must_check
-bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
+bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
                          struct bkey_i *k, enum btree_update_flags flags,
                          unsigned long ip)
 {
@@ -382,8 +384,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
        struct btree_insert_entry *i, n;
        int cmp;
 
+       struct btree_path *path = trans->paths + path_idx;
        EBUG_ON(!path->should_be_locked);
-       EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
+       EBUG_ON(trans->nr_updates >= trans->nr_paths);
        EBUG_ON(!bpos_eq(k->k.p, path->pos));
 
        n = (struct btree_insert_entry) {
@@ -392,7 +395,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
                .btree_id       = path->btree_id,
                .level          = path->level,
                .cached         = path->cached,
-               .path           = path,
+               .path           = path_idx,
                .k              = k,
                .ip_allocated   = ip,
        };
@@ -407,7 +410,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
         * Pending updates are kept sorted: first, find position of new update,
         * then delete/trim any updates the new update overwrites:
         */
-       trans_for_each_update(trans, i) {
+       for (i = trans->updates; i < trans->updates + trans->nr_updates; i++) {
                cmp = btree_insert_entry_cmp(&n, i);
                if (cmp <= 0)
                        break;
@@ -440,7 +443,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
                }
        }
 
-       __btree_path_get(i->path, true);
+       __btree_path_get(trans->paths + i->path, true);
 
        /*
         * If a key is present in the key cache, it must also exist in the
@@ -450,7 +453,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
         * work:
         */
        if (path->cached && bkey_deleted(&i->old_k))
-               return flush_new_cached_update(trans, path, i, flags, ip);
+               return flush_new_cached_update(trans, i, flags, ip);
 
        return 0;
 }
@@ -459,9 +462,11 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
                                                    struct btree_iter *iter,
                                                    struct btree_path *path)
 {
-       if (!iter->key_cache_path ||
-           !iter->key_cache_path->should_be_locked ||
-           !bpos_eq(iter->key_cache_path->pos, iter->pos)) {
+       struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter);
+
+       if (!key_cache_path ||
+           !key_cache_path->should_be_locked ||
+           !bpos_eq(key_cache_path->pos, iter->pos)) {
                struct bkey_cached *ck;
                int ret;
 
@@ -476,19 +481,18 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
                                                iter->flags & BTREE_ITER_INTENT,
                                                _THIS_IP_);
 
-               ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
-                                              BTREE_ITER_CACHED);
+               ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_CACHED);
                if (unlikely(ret))
                        return ret;
 
-               ck = (void *) iter->key_cache_path->l[0].b;
+               ck = (void *) trans->paths[iter->key_cache_path].l[0].b;
 
                if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
                        trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
                        return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
                }
 
-               btree_path_set_should_be_locked(iter->key_cache_path);
+               btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
        }
 
        return 0;
@@ -497,7 +501,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
 int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
                                   struct bkey_i *k, enum btree_update_flags flags)
 {
-       struct btree_path *path = iter->update_path ?: iter->path;
+       btree_path_idx_t path_idx = iter->update_path ?: iter->path;
        int ret;
 
        if (iter->flags & BTREE_ITER_IS_EXTENTS)
@@ -517,6 +521,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
        /*
         * Ensure that updates to cached btrees go to the key cache:
         */
+       struct btree_path *path = trans->paths + path_idx;
        if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
            !path->cached &&
            !path->level &&
@@ -525,63 +530,49 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
                if (ret)
                        return ret;
 
-               path = iter->key_cache_path;
+               path_idx = iter->key_cache_path;
        }
 
-       return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);
+       return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_);
 }
 
-int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
-                                           enum btree_id btree,
-                                           struct bkey_i *k)
+int bch2_btree_insert_clone_trans(struct btree_trans *trans,
+                                 enum btree_id btree,
+                                 struct bkey_i *k)
 {
-       struct btree_write_buffered_key *i;
-       int ret;
-
-       EBUG_ON(trans->nr_wb_updates > trans->wb_updates_size);
-       EBUG_ON(k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
-
-       trans_for_each_wb_update(trans, i) {
-               if (i->btree == btree && bpos_eq(i->k.k.p, k->k.p)) {
-                       bkey_copy(&i->k, k);
-                       return 0;
-               }
-       }
-
-       if (!trans->wb_updates ||
-           trans->nr_wb_updates == trans->wb_updates_size) {
-               struct btree_write_buffered_key *u;
+       struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k));
+       int ret = PTR_ERR_OR_ZERO(n);
+       if (ret)
+               return ret;
 
-               if (trans->nr_wb_updates == trans->wb_updates_size) {
-                       struct btree_transaction_stats *s = btree_trans_stats(trans);
+       bkey_copy(n, k);
+       return bch2_btree_insert_trans(trans, btree, n, 0);
+}
 
-                       BUG_ON(trans->wb_updates_size > U8_MAX / 2);
-                       trans->wb_updates_size = max(1, trans->wb_updates_size * 2);
-                       if (s)
-                               s->wb_updates_size = trans->wb_updates_size;
-               }
+struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
+{
+       unsigned new_top = trans->journal_entries_u64s + u64s;
+       unsigned old_size = trans->journal_entries_size;
 
-               u = bch2_trans_kmalloc_nomemzero(trans,
-                                       trans->wb_updates_size *
-                                       sizeof(struct btree_write_buffered_key));
-               ret = PTR_ERR_OR_ZERO(u);
-               if (ret)
-                       return ret;
+       if (new_top > trans->journal_entries_size) {
+               trans->journal_entries_size = roundup_pow_of_two(new_top);
 
-               if (trans->nr_wb_updates)
-                       memcpy(u, trans->wb_updates, trans->nr_wb_updates *
-                              sizeof(struct btree_write_buffered_key));
-               trans->wb_updates = u;
+               btree_trans_stats(trans)->journal_entries_size = trans->journal_entries_size;
        }
 
-       trans->wb_updates[trans->nr_wb_updates] = (struct btree_write_buffered_key) {
-               .btree  = btree,
-       };
+       struct jset_entry *n =
+               bch2_trans_kmalloc_nomemzero(trans,
+                               trans->journal_entries_size * sizeof(u64));
+       if (IS_ERR(n))
+               return ERR_CAST(n);
 
-       bkey_copy(&trans->wb_updates[trans->nr_wb_updates].k, k);
-       trans->nr_wb_updates++;
+       if (trans->journal_entries)
+               memcpy(n, trans->journal_entries, old_size * sizeof(u64));
+       trans->journal_entries = n;
 
-       return 0;
+       struct jset_entry *e = btree_trans_journal_entries_top(trans);
+       trans->journal_entries_u64s = new_top;
+       return e;
 }
 
 int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
@@ -806,41 +797,17 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
        return bch2_trans_update_buffered(trans, btree, &k);
 }
 
-__printf(2, 0)
-static int __bch2_trans_log_msg(darray_u64 *entries, const char *fmt, va_list args)
+static int __bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf, unsigned u64s)
 {
-       struct printbuf buf = PRINTBUF;
-       struct jset_entry_log *l;
-       unsigned u64s;
-       int ret;
-
-       prt_vprintf(&buf, fmt, args);
-       ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
-       if (ret)
-               goto err;
-
-       u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
-
-       ret = darray_make_room(entries, jset_u64s(u64s));
+       struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s));
+       int ret = PTR_ERR_OR_ZERO(e);
        if (ret)
-               goto err;
+               return ret;
 
-       l = (void *) &darray_top(*entries);
-       l->entry.u64s           = cpu_to_le16(u64s);
-       l->entry.btree_id       = 0;
-       l->entry.level          = 1;
-       l->entry.type           = BCH_JSET_ENTRY_log;
-       l->entry.pad[0]         = 0;
-       l->entry.pad[1]         = 0;
-       l->entry.pad[2]         = 0;
-       memcpy(l->d, buf.buf, buf.pos);
-       while (buf.pos & 7)
-               l->d[buf.pos++] = '\0';
-
-       entries->nr += jset_u64s(u64s);
-err:
-       printbuf_exit(&buf);
-       return ret;
+       struct jset_entry_log *l = container_of(e, struct jset_entry_log, entry);
+       journal_entry_init(e, BCH_JSET_ENTRY_log, 0, 1, u64s);
+       memcpy(l->d, buf->buf, buf->pos);
+       return 0;
 }
 
 __printf(3, 0)
@@ -848,16 +815,32 @@ static int
 __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
                  va_list args)
 {
-       int ret;
+       struct printbuf buf = PRINTBUF;
+       prt_vprintf(&buf, fmt, args);
+
+       unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
+       prt_chars(&buf, '\0', u64s * sizeof(u64) - buf.pos);
+
+       int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
+       if (ret)
+               goto err;
 
        if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
-               ret = __bch2_trans_log_msg(&c->journal.early_journal_entries, fmt, args);
+               ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
+               if (ret)
+                       goto err;
+
+               struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries);
+               journal_entry_init(&l->entry, BCH_JSET_ENTRY_log, 0, 1, u64s);
+               memcpy(l->d, buf.buf, buf.pos);
+               c->journal.early_journal_entries.nr += jset_u64s(u64s);
        } else {
                ret = bch2_trans_do(c, NULL, NULL,
                        BCH_TRANS_COMMIT_lazy_rw|commit_flags,
-                       __bch2_trans_log_msg(&trans->extra_journal_entries, fmt, args));
+                       __bch2_trans_log_msg(trans, &buf, u64s));
        }
-
+err:
+       printbuf_exit(&buf);
        return ret;
 }