]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_cache.c
Update bcachefs sources to 50847e296b34 bcachefs: Check subvol <-> inode pointers...
[bcachefs-tools-debian] / libbcachefs / btree_cache.c
index 9574c8c4d70864b8617df8d68dff6d8e47bdf855..9b7ea1227069e6d73d53ef15fa0d1ee3afaadd5e 100644 (file)
@@ -60,7 +60,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
 
        clear_btree_node_just_written(b);
 
-       kvpfree(b->data, btree_bytes(c));
+       kvfree(b->data);
        b->data = NULL;
 #ifdef __KERNEL__
        kvfree(b->aux_data);
@@ -94,7 +94,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 {
        BUG_ON(b->data || b->aux_data);
 
-       b->data = kvpmalloc(btree_bytes(c), gfp);
+       b->data = kvmalloc(btree_buf_bytes(b), gfp);
        if (!b->data)
                return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
 #ifdef __KERNEL__
@@ -107,7 +107,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
                b->aux_data = NULL;
 #endif
        if (!b->aux_data) {
-               kvpfree(b->data, btree_bytes(c));
+               kvfree(b->data);
                b->data = NULL;
                return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
        }
@@ -126,7 +126,7 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
        bkey_btree_ptr_init(&b->key);
        INIT_LIST_HEAD(&b->list);
        INIT_LIST_HEAD(&b->write_blocked);
-       b->byte_order = ilog2(btree_bytes(c));
+       b->byte_order = ilog2(c->opts.btree_node_size);
        return b;
 }
 
@@ -408,7 +408,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
        if (c->verify_data)
                list_move(&c->verify_data->list, &bc->live);
 
-       kvpfree(c->verify_ondisk, btree_bytes(c));
+       kvfree(c->verify_ondisk);
 
        for (i = 0; i < btree_id_nr_alive(c); i++) {
                struct btree_root *r = bch2_btree_id_root(c, i);
@@ -648,7 +648,7 @@ out:
        bch2_btree_keys_init(b);
        set_btree_node_accessed(b);
 
-       bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
+       time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
                               start_time);
 
        memalloc_nofs_restore(flags);
@@ -711,6 +711,9 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
        b = bch2_btree_node_mem_alloc(trans, level != 0);
 
        if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
+               if (!path)
+                       return b;
+
                trans->memory_allocation_failure = true;
                trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
@@ -719,12 +722,6 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
        if (IS_ERR(b))
                return b;
 
-       /*
-        * Btree nodes read in from disk should not have the accessed bit set
-        * initially, so that linear scans don't thrash the cache:
-        */
-       clear_btree_node_accessed(b);
-
        bkey_copy(&b->key, k);
        if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
                /* raced with another fill: */
@@ -766,8 +763,9 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
        }
 
        if (!six_relock_type(&b->c.lock, lock_type, seq)) {
-               if (path)
-                       trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
+               BUG_ON(!path);
+
+               trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
        }
 
@@ -1102,7 +1100,7 @@ int bch2_btree_node_prefetch(struct btree_trans *trans,
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
 
-       BUG_ON(trans && !btree_node_locked(path, level + 1));
+       BUG_ON(path && !btree_node_locked(path, level + 1));
        BUG_ON(level >= BTREE_MAX_DEPTH);
 
        b = btree_cache_find(bc, k);
@@ -1198,7 +1196,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
               "    failed unpacked %zu\n",
               b->unpack_fn_len,
               b->nr.live_u64s * sizeof(u64),
-              btree_bytes(c) - sizeof(struct btree_node),
+              btree_buf_bytes(b) - sizeof(struct btree_node),
               b->nr.live_u64s * 100 / btree_max_u64s(c),
               b->sib_u64s[0],
               b->sib_u64s[1],