-ff3a76e1af04f51506f45e0f71d53f7e6dd51a75
+d3422f9b18ea3154abe19d859f1a61c4fae9ccdc
for (b = buckets->first_bucket; b < buckets->nbuckets; b++)
if (is_available_bucket(buckets->b[b].mark) &&
+ (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)) &&
!buckets->b[b].mark.owned_by_allocator)
goto success;
b = -1;
struct btree_path_buf __percpu *btree_paths_bufs;
struct srcu_struct btree_trans_barrier;
+ bool btree_trans_barrier_initialized;
struct btree_key_cache btree_key_cache;
EBUG_ON(level >= BTREE_MAX_DEPTH);
- if (c->opts.btree_node_mem_ptr_optimization) {
- b = btree_node_mem_ptr(k);
- /*
- * Check b->hash_val _before_ calling btree_node_lock() - this
- * might not be the node we want anymore, and trying to lock the
- * wrong node could cause an unneccessary transaction restart:
- */
- if (b && b->hash_val == btree_ptr_hash_val(k))
+ b = btree_node_mem_ptr(k);
+
+ /*
+ * Check b->hash_val _before_ calling btree_node_lock() - this might not
+ * be the node we want anymore, and trying to lock the wrong node could
+ * cause an unneccessary transaction restart:
+ */
+ if (likely(c->opts.btree_node_mem_ptr_optimization &&
+ b &&
+ b->hash_val == btree_ptr_hash_val(k)))
goto lock_node;
- }
retry:
b = btree_cache_find(bc, k);
if (unlikely(!b)) {
return path;
}
-struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached,
+struct btree_path *bch2_path_get(struct btree_trans *trans,
enum btree_id btree_id, struct bpos pos,
unsigned locks_want, unsigned level,
- bool intent, unsigned long ip)
+ unsigned flags, unsigned long ip)
{
struct btree_path *path, *path_pos = NULL;
+ bool cached = flags & BTREE_ITER_CACHED;
+ bool intent = flags & BTREE_ITER_INTENT;
int i;
BUG_ON(trans->restarted);
path_pos->level == level) {
__btree_path_get(path_pos, intent);
path = btree_path_set_pos(trans, path_pos, pos, intent, ip);
- path->preserve = true;
} else {
path = btree_path_alloc(trans, path_pos);
path_pos = NULL;
path->pos = pos;
path->btree_id = btree_id;
path->cached = cached;
- path->preserve = true;
path->uptodate = BTREE_ITER_NEED_TRAVERSE;
path->should_be_locked = false;
path->level = level;
btree_trans_verify_sorted(trans);
}
+ if (!(flags & BTREE_ITER_NOPRESERVE))
+ path->preserve = true;
+
if (path->intent_ref)
locks_want = max(locks_want, level + 1);
iter->ip_allocated = ip;
#endif
- iter->path = bch2_path_get(trans,
- flags & BTREE_ITER_CACHED,
- btree_id,
- iter->pos,
- locks_want,
- depth,
- flags & BTREE_ITER_INTENT, ip);
+ iter->path = bch2_path_get(trans, btree_id, iter->pos,
+ locks_want, depth, flags, ip);
}
void bch2_trans_iter_init(struct btree_trans *trans,
void bch2_fs_btree_iter_exit(struct bch_fs *c)
{
+ if (c->btree_trans_barrier_initialized)
+ cleanup_srcu_struct(&c->btree_trans_barrier);
mempool_exit(&c->btree_trans_mem_pool);
mempool_exit(&c->btree_paths_pool);
- cleanup_srcu_struct(&c->btree_trans_barrier);
}
int bch2_fs_btree_iter_init(struct bch_fs *c)
{
unsigned nr = BTREE_ITER_MAX;
+ int ret;
INIT_LIST_HEAD(&c->btree_trans_list);
mutex_init(&c->btree_trans_lock);
- return init_srcu_struct(&c->btree_trans_barrier) ?:
- mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
+ ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
sizeof(struct btree_path) * nr +
sizeof(struct btree_insert_entry) * nr) ?:
mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
- BTREE_TRANS_MEM_MAX);
+ BTREE_TRANS_MEM_MAX) ?:
+ init_srcu_struct(&c->btree_trans_barrier);
+ if (!ret)
+ c->btree_trans_barrier_initialized = true;
+ return ret;
}
bool, unsigned long);
int __must_check bch2_btree_path_traverse(struct btree_trans *,
struct btree_path *, unsigned);
-struct btree_path *bch2_path_get(struct btree_trans *, bool, enum btree_id,
- struct bpos, unsigned, unsigned, bool,
- unsigned long);
+struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
+ unsigned, unsigned, unsigned, unsigned long);
inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
#ifdef CONFIG_BCACHEFS_DEBUG
rcu_read_lock();
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
- for (i = 0; i < tbl->size; i++)
- rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
- bkey_cached_evict(bc, ck);
- list_add(&ck->list, &bc->freed);
- }
+ if (tbl)
+ for (i = 0; i < tbl->size; i++)
+ rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
+ bkey_cached_evict(bc, ck);
+ list_add(&ck->list, &bc->freed);
+ }
rcu_read_unlock();
list_for_each_entry_safe(ck, n, &bc->freed, list) {
#define __BTREE_ITER_ALL_SNAPSHOTS (1 << 11)
#define BTREE_ITER_ALL_SNAPSHOTS (1 << 12)
#define BTREE_ITER_FILTER_SNAPSHOTS (1 << 13)
+#define BTREE_ITER_NOPRESERVE (1 << 14)
enum btree_path_uptodate {
BTREE_ITER_UPTODATE = 0,
? bpos_predecessor(b->data->min_key)
: bpos_successor(b->data->max_key);
- sib_path = bch2_path_get(trans, false, path->btree_id, sib_pos,
- U8_MAX, level, true, _THIS_IP_);
+ sib_path = bch2_path_get(trans, path->btree_id, sib_pos,
+ U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_);
ret = bch2_btree_path_traverse(trans, sib_path, false);
if (ret)
goto err;
* When deleting, check if we need to emit a whiteout (because we're overwriting
* something in an ancestor snapshot)
*/
-static int need_whiteout_for_snapshot(struct btree_trans *trans, struct btree_iter *orig)
+static int need_whiteout_for_snapshot(struct btree_trans *trans,
+ enum btree_id btree_id, struct bpos pos)
{
struct btree_iter iter;
struct bkey_s_c k;
- u32 snapshot = orig->pos.snapshot;
+ u32 snapshot = pos.snapshot;
int ret;
- if (!bch2_snapshot_parent(trans->c, snapshot))
+ if (!bch2_snapshot_parent(trans->c, pos.snapshot))
return 0;
- bch2_trans_copy_iter(&iter, orig);
- iter.flags &= BTREE_ITER_FILTER_SNAPSHOTS;
- iter.flags |= BTREE_ITER_ALL_SNAPSHOTS;
+ pos.snapshot++;
- bch2_btree_iter_advance(&iter);
-
- for_each_btree_key_continue_norestart(iter, 0, k, ret) {
- if (bkey_cmp(k.k->p, orig->pos))
+ for_each_btree_key_norestart(trans, iter, btree_id, pos,
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_NOPRESERVE, k, ret) {
+ if (bkey_cmp(k.k->p, pos))
break;
if (bch2_snapshot_is_ancestor(trans->c, snapshot,
BUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
BUG_ON(bpos_cmp(k->k.p, iter->path->pos));
- BUG_ON(bpos_cmp(k->k.p, iter->pos));
n = (struct btree_insert_entry) {
.flags = flags,
if (bkey_deleted(&n.k->k) &&
(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
- int ret = need_whiteout_for_snapshot(trans, iter);
+ int ret = need_whiteout_for_snapshot(trans, n.btree_id, n.k->k.p);
if (unlikely(ret < 0))
return ret;
int bch2_journal_meta(struct journal *j)
{
+ struct journal_buf *buf;
struct journal_res res;
int ret;
if (ret)
return ret;
+ buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
+ buf->must_flush = true;
+ set_bit(JOURNAL_NEED_WRITE, &j->flags);
+
bch2_journal_res_put(j, &res);
return bch2_journal_flush_seq(j, res.seq);
j->replay_journal_seq = last_seq;
j->replay_journal_seq_end = cur_seq;
j->last_seq_ondisk = last_seq;
+ j->flushed_seq_ondisk = last_seq;
j->pin.front = last_seq;
j->pin.back = cur_seq;
atomic64_set(&j->seq, cur_seq - 1);
if (seq < last_seq)
continue;
+ if (journal_entry_empty(&i->j))
+ j->last_empty_seq = le64_to_cpu(i->j.seq);
+
p = journal_seq_pin(j, seq);
p->devs.nr = 0;
bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
}
+ if (list_empty(journal_entries))
+ j->last_empty_seq = cur_seq;
+
spin_lock(&j->lock);
set_bit(JOURNAL_STARTED, &j->flags);
SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
- if (journal_entry_empty(jset))
+ if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
j->last_empty_seq = le64_to_cpu(jset->seq);
if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))