}
static struct bkey_cached *
-btree_key_cache_create(struct btree_key_cache *c,
+btree_key_cache_create(struct bch_fs *c,
enum btree_id btree_id,
struct bpos pos)
{
+ struct btree_key_cache *bc = &c->btree_key_cache;
struct bkey_cached *ck;
bool was_new = true;
- ck = bkey_cached_alloc(c);
+ ck = bkey_cached_alloc(bc);
if (unlikely(!ck)) {
- ck = bkey_cached_reuse(c);
- if (unlikely(!ck))
+ ck = bkey_cached_reuse(bc);
+ if (unlikely(!ck)) {
+ bch_err(c, "error allocating memory for key cache item, btree %s",
+ bch2_btree_ids[btree_id]);
return ERR_PTR(-ENOMEM);
+ }
was_new = false;
}
+ if (btree_id == BTREE_ID_subvolumes)
+ six_lock_pcpu_alloc(&ck->c.lock);
+ else
+ six_lock_pcpu_free(&ck->c.lock);
+
ck->c.level = 0;
ck->c.btree_id = btree_id;
ck->key.btree_id = btree_id;
ck->valid = false;
ck->flags = 1U << BKEY_CACHED_ACCESSED;
- if (unlikely(rhashtable_lookup_insert_fast(&c->table,
+ if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
&ck->hash,
bch2_btree_key_cache_params))) {
/* We raced with another fill: */
six_unlock_intent(&ck->c.lock);
kfree(ck);
} else {
- mutex_lock(&c->lock);
- bkey_cached_free(c, ck);
- mutex_unlock(&c->lock);
+ mutex_lock(&bc->lock);
+ bkey_cached_free(bc, ck);
+ mutex_unlock(&bc->lock);
}
return NULL;
}
- atomic_long_inc(&c->nr_keys);
+ atomic_long_inc(&bc->nr_keys);
six_unlock_write(&ck->c.lock);
}
static int btree_key_cache_fill(struct btree_trans *trans,
- struct btree_iter *ck_iter,
+ struct btree_path *ck_path,
struct bkey_cached *ck)
{
- struct btree_iter *iter;
+ struct btree_path *path;
struct bkey_s_c k;
unsigned new_u64s = 0;
struct bkey_i *new_k = NULL;
+ struct bkey u;
int ret;
- iter = bch2_trans_get_iter(trans, ck->key.btree_id,
- ck->key.pos, BTREE_ITER_SLOTS);
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
+ path = bch2_path_get(trans, ck->key.btree_id,
+ ck->key.pos, 0, 0, 0, _THIS_IP_);
+ ret = bch2_btree_path_traverse(trans, path, 0);
if (ret)
goto err;
- if (!bch2_btree_node_relock(ck_iter, 0)) {
- trace_transaction_restart_ip(trans->ip, _THIS_IP_);
+ k = bch2_btree_path_peek_slot(path, &u);
+
+ if (!bch2_btree_node_relock(trans, ck_path, 0)) {
+ trace_trans_restart_relock_key_cache_fill(trans->fn,
+ _THIS_IP_, ck_path->btree_id, &ck_path->pos);
ret = btree_trans_restart(trans);
goto err;
}
new_u64s = roundup_pow_of_two(new_u64s);
new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
if (!new_k) {
+ bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
+ bch2_btree_ids[ck->key.btree_id], new_u64s);
ret = -ENOMEM;
goto err;
}
* XXX: not allowed to be holding read locks when we take a write lock,
* currently
*/
- bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter);
+ bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b);
if (new_k) {
kfree(ck->k);
ck->u64s = new_u64s;
bkey_reassemble(ck->k, k);
ck->valid = true;
- bch2_btree_node_unlock_write(ck_iter->l[0].b, ck_iter);
+ bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
/* We're not likely to need this iterator again: */
- set_btree_iter_dontneed(trans, iter);
+ path->preserve = false;
err:
- bch2_trans_iter_put(trans, iter);
+ bch2_path_put(trans, path, 0);
return ret;
}
static int bkey_cached_check_fn(struct six_lock *lock, void *p)
{
struct bkey_cached *ck = container_of(lock, struct bkey_cached, c.lock);
- const struct btree_iter *iter = p;
+ const struct btree_path *path = p;
- return ck->key.btree_id == iter->btree_id &&
- !bpos_cmp(ck->key.pos, iter->pos) ? 0 : -1;
+ return ck->key.btree_id == path->btree_id &&
+ !bpos_cmp(ck->key.pos, path->pos) ? 0 : -1;
}
__flatten
-int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
+int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
+ unsigned flags)
{
- struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct bkey_cached *ck;
int ret = 0;
- BUG_ON(iter->level);
+ BUG_ON(path->level);
- iter->l[1].b = NULL;
+ path->l[1].b = NULL;
- if (bch2_btree_node_relock(iter, 0)) {
- ck = (void *) iter->l[0].b;
+ if (bch2_btree_node_relock(trans, path, 0)) {
+ ck = (void *) path->l[0].b;
goto fill;
}
retry:
- ck = bch2_btree_key_cache_find(c, iter->btree_id, iter->pos);
+ ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
if (!ck) {
- if (iter->flags & BTREE_ITER_CACHED_NOCREATE) {
- iter->l[0].b = NULL;
+ if (flags & BTREE_ITER_CACHED_NOCREATE) {
+ path->l[0].b = NULL;
return 0;
}
- ck = btree_key_cache_create(&c->btree_key_cache,
- iter->btree_id, iter->pos);
+ ck = btree_key_cache_create(c, path->btree_id, path->pos);
ret = PTR_ERR_OR_ZERO(ck);
if (ret)
goto err;
if (!ck)
goto retry;
- mark_btree_node_locked(iter, 0, SIX_LOCK_intent);
- iter->locks_want = 1;
+ mark_btree_node_locked(path, 0, SIX_LOCK_intent);
+ path->locks_want = 1;
} else {
- enum six_lock_type lock_want = __btree_lock_want(iter, 0);
+ enum six_lock_type lock_want = __btree_lock_want(path, 0);
- if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want,
- bkey_cached_check_fn, iter, _THIS_IP_)) {
+ if (!btree_node_lock(trans, path, (void *) ck, path->pos, 0,
+ lock_want,
+ bkey_cached_check_fn, path, _THIS_IP_)) {
if (!trans->restarted)
goto retry;
- trace_transaction_restart_ip(trans->ip, _THIS_IP_);
ret = -EINTR;
goto err;
}
- if (ck->key.btree_id != iter->btree_id ||
- bpos_cmp(ck->key.pos, iter->pos)) {
+ if (ck->key.btree_id != path->btree_id ||
+ bpos_cmp(ck->key.pos, path->pos)) {
six_unlock_type(&ck->c.lock, lock_want);
goto retry;
}
- mark_btree_node_locked(iter, 0, lock_want);
+ mark_btree_node_locked(path, 0, lock_want);
}
- iter->l[0].lock_seq = ck->c.lock.state.seq;
- iter->l[0].b = (void *) ck;
+ path->l[0].lock_seq = ck->c.lock.state.seq;
+ path->l[0].b = (void *) ck;
fill:
- if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) {
- if (!iter->locks_want &&
- !!__bch2_btree_iter_upgrade(iter, 1)) {
- trace_transaction_restart_ip(trans->ip, _THIS_IP_);
- BUG_ON(!trans->restarted);
- ret = -EINTR;
+ if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) {
+ if (!path->locks_want &&
+ !__bch2_btree_path_upgrade(trans, path, 1)) {
+ trace_transaction_restart_ip(trans->fn, _THIS_IP_);
+ ret = btree_trans_restart(trans);
goto err;
}
- ret = btree_key_cache_fill(trans, iter, ck);
+ ret = btree_key_cache_fill(trans, path, ck);
if (ret)
goto err;
}
if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
- iter->uptodate = BTREE_ITER_NEED_PEEK;
-
- if ((iter->flags & BTREE_ITER_INTENT) &&
- !bch2_btree_iter_upgrade(iter, 1)) {
- BUG_ON(!trans->restarted);
- ret = -EINTR;
- }
-
- BUG_ON(!ret && !btree_node_locked(iter, 0));
+ path->uptodate = BTREE_ITER_UPTODATE;
+ BUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
return ret;
err:
if (ret != -EINTR) {
- btree_node_unlock(iter, 0);
- iter->flags |= BTREE_ITER_ERROR;
- iter->l[0].b = BTREE_ITER_NO_NODE_ERROR;
+ btree_node_unlock(path, 0);
+ path->l[0].b = BTREE_ITER_NO_NODE_ERROR;
}
return ret;
}
{
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
- struct btree_iter *c_iter = NULL, *b_iter = NULL;
+ struct btree_iter c_iter, b_iter;
struct bkey_cached *ck = NULL;
int ret;
- b_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
- BTREE_ITER_SLOTS|
- BTREE_ITER_INTENT);
- c_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
- BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL|
- BTREE_ITER_CACHED_NOCREATE|
- BTREE_ITER_INTENT);
- ret = bch2_btree_iter_traverse(c_iter);
+ bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
+ BTREE_ITER_SLOTS|
+ BTREE_ITER_INTENT|
+ BTREE_ITER_ALL_SNAPSHOTS);
+ bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_CACHED_NOCREATE|
+ BTREE_ITER_INTENT);
+ b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
+
+ ret = bch2_btree_iter_traverse(&c_iter);
if (ret)
goto out;
- ck = (void *) c_iter->l[0].b;
- if (!ck ||
- (journal_seq && ck->journal.seq != journal_seq))
+ ck = (void *) c_iter.path->l[0].b;
+ if (!ck)
goto out;
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
- if (!evict)
- goto out;
- goto evict;
+ if (evict)
+ goto evict;
+ goto out;
}
+ BUG_ON(!ck->valid);
+
+ if (journal_seq && ck->journal.seq != journal_seq)
+ goto out;
+
/*
* Since journal reclaim depends on us making progress here, and the
* allocator/copygc depend on journal reclaim making progress, we need
* to be using alloc reserves:
* */
- ret = bch2_btree_iter_traverse(b_iter) ?:
- bch2_trans_update(trans, b_iter, ck->k,
+ ret = bch2_btree_iter_traverse(&b_iter) ?:
+ bch2_trans_update(trans, &b_iter, ck->k,
+ BTREE_UPDATE_KEY_CACHE_RECLAIM|
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
BTREE_TRIGGER_NORUN) ?:
bch2_trans_commit(trans, NULL, NULL,
bch2_journal_pin_drop(j, &ck->journal);
bch2_journal_preres_put(j, &ck->res);
- BUG_ON(!btree_node_locked(c_iter, 0));
+ BUG_ON(!btree_node_locked(c_iter.path, 0));
if (!evict) {
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
}
} else {
evict:
- BUG_ON(!btree_node_intent_locked(c_iter, 0));
+ BUG_ON(!btree_node_intent_locked(c_iter.path, 0));
- mark_btree_node_unlocked(c_iter, 0);
- c_iter->l[0].b = NULL;
+ mark_btree_node_unlocked(c_iter.path, 0);
+ c_iter.path->l[0].b = NULL;
six_lock_write(&ck->c.lock, NULL, NULL);
mutex_unlock(&c->btree_key_cache.lock);
}
out:
- bch2_trans_iter_put(trans, b_iter);
- bch2_trans_iter_put(trans, c_iter);
+ bch2_trans_iter_exit(trans, &b_iter);
+ bch2_trans_iter_exit(trans, &c_iter);
return ret;
}
}
bool bch2_btree_insert_key_cached(struct btree_trans *trans,
- struct btree_iter *iter,
+ struct btree_path *path,
struct bkey_i *insert)
{
struct bch_fs *c = trans->c;
- struct bkey_cached *ck = (void *) iter->l[0].b;
+ struct bkey_cached *ck = (void *) path->l[0].b;
bool kick_reclaim = false;
BUG_ON(insert->u64s > ck->u64s);
rcu_read_lock();
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
- for (i = 0; i < tbl->size; i++)
- rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
- bkey_cached_evict(bc, ck);
- list_add(&ck->list, &bc->freed);
- }
+ if (tbl)
+ for (i = 0; i < tbl->size; i++)
+ rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
+ bkey_cached_evict(bc, ck);
+ list_add(&ck->list, &bc->freed);
+ }
rcu_read_unlock();
list_for_each_entry_safe(ck, n, &bc->freed, list) {