#include "error.h"
#include "extents.h"
#include "journal.h"
+#include "journal_io.h"
#include "replicas.h"
#include "snapshot.h"
#include "trace.h"
#include <linux/prefetch.h>
static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
-static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
- struct btree_path *);
+static inline void btree_path_list_add(struct btree_trans *,
+ btree_path_idx_t, btree_path_idx_t);
static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
{
#endif
}
-static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
+static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
+static void bch2_trans_srcu_lock(struct btree_trans *);
static inline int __btree_path_cmp(const struct btree_path *l,
enum btree_id r_btree_id,
void bch2_trans_verify_paths(struct btree_trans *trans)
{
struct btree_path *path;
+ unsigned iter;
- trans_for_each_path(trans, path)
+ trans_for_each_path(trans, path, iter)
bch2_btree_path_verify(trans, path);
}
BUG_ON(iter->btree_id >= BTREE_ID_NR);
- BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
+ BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != btree_iter_path(trans, iter)->cached);
BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
!btree_type_has_snapshot_field(iter->btree_id));
if (iter->update_path)
- bch2_btree_path_verify(trans, iter->update_path);
- bch2_btree_path_verify(trans, iter->path);
+ bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
+ bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
}
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
struct bpos pos, bool key_cache)
{
struct btree_path *path;
- unsigned idx;
+ struct trans_for_each_path_inorder_iter iter;
struct printbuf buf = PRINTBUF;
btree_trans_sort_paths(trans);
- trans_for_each_path_inorder(trans, path, idx) {
+ trans_for_each_path_inorder(trans, path, iter) {
int cmp = cmp_int(path->btree_id, id) ?:
cmp_int(path->cached, key_cache);
struct bkey_packed *where)
{
struct btree_path *path;
+ unsigned i;
- trans_for_each_path_with_node(trans, b, path) {
+ trans_for_each_path_with_node(trans, b, path, i) {
__bch2_btree_path_fix_key_modified(path, b, where);
bch2_btree_path_verify_level(trans, path, b->c.level);
}
{
struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
struct btree_path *linked;
+ unsigned i;
if (node_iter != &path->l[b->c.level].iter) {
__bch2_btree_node_iter_fix(path, b, node_iter, t,
bch2_btree_node_iter_verify(node_iter, b);
}
- trans_for_each_path_with_node(trans, b, linked) {
+ trans_for_each_path_with_node(trans, b, linked, i) {
__bch2_btree_node_iter_fix(linked, b,
&linked->l[b->c.level].iter, t,
where, clobber_u64s, new_u64s);
static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
{
struct bch_fs *c = trans->c;
- struct btree_insert_entry *i;
trans_for_each_update(trans, i)
if (!i->cached &&
i->btree_id == b->c.btree_id &&
bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
- i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v;
+ i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
if (unlikely(trans->journal_replay_not_finished)) {
struct bkey_i *j_k =
* A btree node is being replaced - update the iterator to point to the new
* node:
*/
-void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
+void bch2_trans_node_add(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b)
{
- struct btree_path *path;
+ struct btree_path *prev;
+
+ BUG_ON(!btree_path_pos_in_node(path, b));
- trans_for_each_path(trans, path)
- if (path->uptodate == BTREE_ITER_UPTODATE &&
- !path->cached &&
- btree_path_pos_in_node(path, b)) {
+ while ((prev = prev_btree_path(trans, path)) &&
+ btree_path_pos_in_node(prev, b))
+ path = prev;
+
+ for (;
+ path && btree_path_pos_in_node(path, b);
+ path = next_btree_path(trans, path))
+ if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
enum btree_node_locked_type t =
btree_lock_want(path, b->c.level);
void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
{
struct btree_path *path;
+ unsigned i;
- trans_for_each_path_with_node(trans, b, path)
+ trans_for_each_path_with_node(trans, b, path, i)
__btree_path_level_init(path, b->c.level);
bch2_trans_revalidate_updates_in_node(trans, b);
struct bkey_s_c k;
int ret = 0;
- __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
+ __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
k = bch2_btree_and_journal_iter_peek(&jiter);
bch2_bkey_buf_reassemble(out, c, k);
- if (flags & BTREE_ITER_PREFETCH)
+ if ((flags & BTREE_ITER_PREFETCH) &&
+ c->opts.btree_node_prefetch)
ret = btree_path_prefetch_j(trans, path, &jiter);
bch2_btree_and_journal_iter_exit(&jiter);
bch2_bkey_buf_unpack(&tmp, c, l->b,
bch2_btree_node_iter_peek(&l->iter, l->b));
- if (flags & BTREE_ITER_PREFETCH) {
+ if ((flags & BTREE_ITER_PREFETCH) &&
+ c->opts.btree_node_prefetch) {
ret = btree_path_prefetch(trans, path);
if (ret)
goto err;
struct bch_fs *c = trans->c;
struct btree_path *path;
unsigned long trace_ip = _RET_IP_;
- int i, ret = 0;
+ unsigned i;
+ int ret = 0;
if (trans->in_traverse_all)
return -BCH_ERR_transaction_restart_in_traverse_all;
trans->restarted = 0;
trans->last_restarted_ip = 0;
- trans_for_each_path(trans, path)
+ trans_for_each_path(trans, path, i)
path->should_be_locked = false;
btree_trans_sort_paths(trans);
/* Now, redo traversals in correct order: */
i = 0;
while (i < trans->nr_sorted) {
- path = trans->paths + trans->sorted[i];
+ btree_path_idx_t idx = trans->sorted[i];
/*
* Traversing a path can cause another path to be added at about
* the same position:
*/
- if (path->uptodate) {
- __btree_path_get(path, false);
- ret = bch2_btree_path_traverse_one(trans, path, 0, _THIS_IP_);
- __btree_path_put(path, false);
+ if (trans->paths[idx].uptodate) {
+ __btree_path_get(&trans->paths[idx], false);
+ ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
+ __btree_path_put(&trans->paths[idx], false);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, ENOMEM))
* stashed in the iterator and returned from bch2_trans_exit().
*/
int bch2_btree_path_traverse_one(struct btree_trans *trans,
- struct btree_path *path,
+ btree_path_idx_t path_idx,
unsigned flags,
unsigned long trace_ip)
{
+ struct btree_path *path = &trans->paths[path_idx];
unsigned depth_want = path->level;
int ret = -((int) trans->restarted);
goto out;
}
+ path = &trans->paths[path_idx];
+
if (unlikely(path->level >= BTREE_MAX_DEPTH))
- goto out;
+ goto out_uptodate;
path->level = btree_path_up_until_good_node(trans, path, 0);
goto out;
}
}
-
+out_uptodate:
path->uptodate = BTREE_ITER_UPTODATE;
out:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
}
}
-static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
- bool intent)
+static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
+ bool intent)
{
- struct btree_path *new = btree_path_alloc(trans, src);
-
- btree_path_copy(trans, new, src);
- __btree_path_get(new, intent);
+ btree_path_idx_t new = btree_path_alloc(trans, src);
+ btree_path_copy(trans, trans->paths + new, trans->paths + src);
+ __btree_path_get(trans->paths + new, intent);
return new;
}
__flatten
-struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
- struct btree_path *path, bool intent,
- unsigned long ip)
+btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
+ btree_path_idx_t path, bool intent, unsigned long ip)
{
- __btree_path_put(path, intent);
+ __btree_path_put(trans->paths + path, intent);
path = btree_path_clone(trans, path, intent);
- path->preserve = false;
+ trans->paths[path].preserve = false;
return path;
}
-struct btree_path * __must_check
+btree_path_idx_t __must_check
__bch2_btree_path_set_pos(struct btree_trans *trans,
- struct btree_path *path, struct bpos new_pos,
- bool intent, unsigned long ip, int cmp)
+ btree_path_idx_t path_idx, struct bpos new_pos,
+ bool intent, unsigned long ip)
{
+ int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
+
bch2_trans_verify_not_in_restart(trans);
- EBUG_ON(!path->ref);
+ EBUG_ON(!trans->paths[path_idx].ref);
- path = bch2_btree_path_make_mut(trans, path, intent, ip);
+ path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
+ struct btree_path *path = trans->paths + path_idx;
path->pos = new_pos;
trans->paths_sorted = false;
}
out:
bch2_btree_path_verify(trans, path);
- return path;
+ return path_idx;
}
/* Btree path: main interface: */
return NULL;
}
-static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
+static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
{
- __bch2_btree_path_unlock(trans, path);
- btree_path_list_remove(trans, path);
- __clear_bit(path->idx, trans->paths_allocated);
+ __bch2_btree_path_unlock(trans, trans->paths + path);
+ btree_path_list_remove(trans, trans->paths + path);
+ __clear_bit(path, trans->paths_allocated);
}
-void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
+void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
{
- struct btree_path *dup;
-
- EBUG_ON(trans->paths + path->idx != path);
- EBUG_ON(!path->ref);
+ struct btree_path *path = trans->paths + path_idx, *dup;
if (!__btree_path_put(path, intent))
return;
if (path->should_be_locked &&
!trans->restarted &&
- (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
+ (!dup || !bch2_btree_path_relock_norestart(trans, dup)))
return;
if (dup) {
dup->should_be_locked |= path->should_be_locked;
}
- __bch2_path_free(trans, path);
+ __bch2_path_free(trans, path_idx);
}
-static void bch2_path_put_nokeep(struct btree_trans *trans, struct btree_path *path,
+static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
bool intent)
{
- EBUG_ON(trans->paths + path->idx != path);
- EBUG_ON(!path->ref);
-
- if (!__btree_path_put(path, intent))
+ if (!__btree_path_put(trans->paths + path, intent))
return;
__bch2_path_free(trans, path);
noinline __cold
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
{
- struct btree_insert_entry *i;
- struct btree_write_buffered_key *wb;
-
prt_printf(buf, "transaction updates for %s journal seq %llu",
trans->fn, trans->journal_res.seq);
prt_newline(buf);
prt_newline(buf);
}
- trans_for_each_wb_update(trans, wb) {
- prt_printf(buf, "update: btree=%s wb=1 %pS",
- bch2_btree_id_str(wb->btree),
- (void *) i->ip_allocated);
- prt_newline(buf);
-
- prt_printf(buf, " new ");
- bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(&wb->k));
- prt_newline(buf);
- }
+ for (struct jset_entry *e = trans->journal_entries;
+ e != btree_trans_journal_entries_top(trans);
+ e = vstruct_next(e))
+ bch2_journal_entry_to_text(buf, trans->c, e);
printbuf_indent_sub(buf, 2);
}
printbuf_exit(&buf);
}
-noinline __cold
-void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
+static void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
{
+ struct btree_path *path = trans->paths + path_idx;
+
prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
- path->idx, path->ref, path->intent_ref,
+ path_idx, path->ref, path->intent_ref,
path->preserve ? 'P' : ' ',
path->should_be_locked ? 'S' : ' ',
bch2_btree_id_str(path->btree_id),
void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
bool nosort)
{
- struct btree_path *path;
- unsigned idx;
+ struct trans_for_each_path_inorder_iter iter;
if (!nosort)
btree_trans_sort_paths(trans);
- trans_for_each_path_inorder(trans, path, idx)
- bch2_btree_path_to_text(out, path);
+ trans_for_each_path_idx_inorder(trans, iter)
+ bch2_btree_path_to_text(out, trans, iter.path_idx);
}
noinline __cold
{
struct btree_transaction_stats *s = btree_trans_stats(trans);
struct printbuf buf = PRINTBUF;
- size_t nr = bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX);
-
- if (!s)
- return;
+ size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
bch2_trans_paths_to_text(&buf, trans);
printbuf_exit(&buf);
- trans->nr_max_paths = nr;
+ trans->nr_paths_max = nr;
}
noinline __cold
static noinline void btree_path_overflow(struct btree_trans *trans)
{
bch2_dump_trans_paths_updates(trans);
- panic("trans path overflow\n");
+ bch_err(trans->c, "trans path overflow");
}
-static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
- struct btree_path *pos)
+static noinline void btree_paths_realloc(struct btree_trans *trans)
{
- struct btree_path *path;
- size_t idx = find_first_zero_bit(trans->paths_allocated, BTREE_ITER_MAX);
+ unsigned nr = trans->nr_paths * 2;
+
+ void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
+ sizeof(struct btree_trans_paths) +
+ nr * sizeof(struct btree_path) +
+ nr * sizeof(btree_path_idx_t) + 8 +
+ nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
+
+ unsigned long *paths_allocated = p;
+ memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
+ p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
+
+ p += sizeof(struct btree_trans_paths);
+ struct btree_path *paths = p;
+ *trans_paths_nr(paths) = nr;
+ memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
+ p += nr * sizeof(struct btree_path);
+
+ btree_path_idx_t *sorted = p;
+ memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
+ p += nr * sizeof(btree_path_idx_t) + 8;
+
+ struct btree_insert_entry *updates = p;
+ memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
+
+ unsigned long *old = trans->paths_allocated;
- if (unlikely(idx == BTREE_ITER_MAX))
- btree_path_overflow(trans);
+ rcu_assign_pointer(trans->paths_allocated, paths_allocated);
+ rcu_assign_pointer(trans->paths, paths);
+ rcu_assign_pointer(trans->sorted, sorted);
+ rcu_assign_pointer(trans->updates, updates);
- BUG_ON(idx > BTREE_ITER_MAX);
+ trans->nr_paths = nr;
+
+ if (old != trans->_paths_allocated)
+ kfree_rcu_mightsleep(old);
+}
+
+static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
+ btree_path_idx_t pos)
+{
+ btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
+
+ if (unlikely(idx == trans->nr_paths)) {
+ if (trans->nr_paths == BTREE_ITER_MAX) {
+ btree_path_overflow(trans);
+ return 0;
+ }
+
+ btree_paths_realloc(trans);
+ }
/*
* Do this before marking the new path as allocated, since it won't be
* initialized yet:
*/
- if (unlikely(idx > trans->nr_max_paths))
+ if (unlikely(idx > trans->nr_paths_max))
bch2_trans_update_max_paths(trans);
__set_bit(idx, trans->paths_allocated);
- path = &trans->paths[idx];
- path->idx = idx;
+ struct btree_path *path = &trans->paths[idx];
path->ref = 0;
path->intent_ref = 0;
path->nodes_locked = 0;
- path->alloc_seq++;
- btree_path_list_add(trans, pos, path);
+ btree_path_list_add(trans, pos, idx);
trans->paths_sorted = false;
- return path;
+ return idx;
}
-struct btree_path *bch2_path_get(struct btree_trans *trans,
- enum btree_id btree_id, struct bpos pos,
- unsigned locks_want, unsigned level,
- unsigned flags, unsigned long ip)
+btree_path_idx_t bch2_path_get(struct btree_trans *trans,
+ enum btree_id btree_id, struct bpos pos,
+ unsigned locks_want, unsigned level,
+ unsigned flags, unsigned long ip)
{
- struct btree_path *path, *path_pos = NULL;
+ struct btree_path *path;
bool cached = flags & BTREE_ITER_CACHED;
bool intent = flags & BTREE_ITER_INTENT;
- int i;
+ struct trans_for_each_path_inorder_iter iter;
+ btree_path_idx_t path_pos = 0, path_idx;
bch2_trans_verify_not_in_restart(trans);
bch2_trans_verify_locks(trans);
btree_trans_sort_paths(trans);
- trans_for_each_path_inorder(trans, path, i) {
+ trans_for_each_path_inorder(trans, path, iter) {
if (__btree_path_cmp(path,
btree_id,
cached,
level) > 0)
break;
- path_pos = path;
+ path_pos = iter.path_idx;
}
if (path_pos &&
- path_pos->cached == cached &&
- path_pos->btree_id == btree_id &&
- path_pos->level == level) {
- __btree_path_get(path_pos, intent);
- path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
+ trans->paths[path_pos].cached == cached &&
+ trans->paths[path_pos].btree_id == btree_id &&
+ trans->paths[path_pos].level == level) {
+ __btree_path_get(trans->paths + path_pos, intent);
+ path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
+ path = trans->paths + path_idx;
} else {
- path = btree_path_alloc(trans, path_pos);
- path_pos = NULL;
+ path_idx = btree_path_alloc(trans, path_pos);
+ path = trans->paths + path_idx;
__btree_path_get(path, intent);
path->pos = pos;
path->level = level;
path->locks_want = locks_want;
path->nodes_locked = 0;
- for (i = 0; i < ARRAY_SIZE(path->l); i++)
+ for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
#ifdef TRACK_PATH_ALLOCATED
path->ip_allocated = ip;
if (locks_want > path->locks_want)
bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
- return path;
+ return path_idx;
}
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
int __must_check
bch2_btree_iter_traverse(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
int ret;
- iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
+ iter->path = bch2_btree_path_set_pos(trans, iter->path,
btree_iter_search_key(iter),
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
if (ret)
return ret;
- btree_path_set_should_be_locked(iter->path);
+ btree_path_set_should_be_locked(trans->paths + iter->path);
return 0;
}
struct btree *b = NULL;
int ret;
- EBUG_ON(iter->path->cached);
+ EBUG_ON(trans->paths[iter->path].cached);
bch2_btree_iter_verify(iter);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
goto err;
- b = btree_path_node(iter->path, iter->path->level);
+ struct btree_path *path = btree_iter_path(trans, iter);
+ b = btree_path_node(path, path->level);
if (!b)
goto out;
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(iter->path);
+ btree_path_set_should_be_locked(btree_iter_path(trans, iter));
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
{
struct btree_trans *trans = iter->trans;
- struct btree_path *path = iter->path;
struct btree *b = NULL;
int ret;
+ EBUG_ON(trans->paths[iter->path].cached);
bch2_trans_verify_not_in_restart(trans);
- EBUG_ON(iter->path->cached);
bch2_btree_iter_verify(iter);
+ struct btree_path *path = btree_iter_path(trans, iter);
+
/* already at end? */
if (!btree_path_node(path, path->level))
return NULL;
* Haven't gotten to the end of the parent node: go back down to
* the next child node
*/
- path = iter->path =
- bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
- iter->flags & BTREE_ITER_INTENT,
- btree_iter_ip_allocated(iter));
+ iter->path = bch2_btree_path_set_pos(trans, iter->path,
+ bpos_successor(iter->pos),
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+ path = btree_iter_path(trans, iter);
btree_path_set_level_down(trans, path, iter->min_depth);
- ret = bch2_btree_path_traverse(trans, path, iter->flags);
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
goto err;
+ path = btree_iter_path(trans, iter);
b = path->l[path->level].b;
}
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(iter->path);
- BUG_ON(iter->path->uptodate);
+ btree_path_set_should_be_locked(btree_iter_path(trans, iter));
+ EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
}
static noinline
-struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
+void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c *k)
{
- struct btree_insert_entry *i;
- struct bkey_i *ret = NULL;
+ struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
- trans_for_each_update(iter->trans, i) {
- if (i->btree_id < iter->btree_id)
- continue;
- if (i->btree_id > iter->btree_id)
- break;
- if (bpos_lt(i->k->k.p, iter->path->pos))
- continue;
- if (i->key_cache_already_flushed)
- continue;
- if (!ret || bpos_lt(i->k->k.p, ret->k.p))
- ret = i->k;
- }
+ trans_for_each_update(trans, i)
+ if (!i->key_cache_already_flushed &&
+ i->btree_id == iter->btree_id &&
+ bpos_le(i->k->k.p, iter->pos) &&
+ bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
+ iter->k = i->k->k;
+ *k = bkey_i_to_s_c(i->k);
+ }
+}
- return ret;
+static noinline
+void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c *k)
+{
+ struct btree_path *path = btree_iter_path(trans, iter);
+ struct bpos end = path_l(path)->b->key.k.p;
+
+ trans_for_each_update(trans, i)
+ if (!i->key_cache_already_flushed &&
+ i->btree_id == iter->btree_id &&
+ bpos_ge(i->k->k.p, path->pos) &&
+ bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
+ iter->k = i->k->k;
+ *k = bkey_i_to_s_c(i->k);
+ }
}
-static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
+static noinline
+void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c *k)
{
- return iter->flags & BTREE_ITER_WITH_UPDATES
- ? __bch2_btree_trans_peek_updates(iter)
- : NULL;
+ trans_for_each_update(trans, i)
+ if (!i->key_cache_already_flushed &&
+ i->btree_id == iter->btree_id &&
+ bpos_eq(i->k->k.p, iter->pos)) {
+ iter->k = i->k->k;
+ *k = bkey_i_to_s_c(i->k);
+ }
}
static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
struct btree_iter *iter,
struct bpos end_pos)
{
+ struct btree_path *path = btree_iter_path(trans, iter);
+
return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
- iter->path->level,
- iter->path->pos,
+ path->level,
+ path->pos,
end_pos,
&iter->journal_idx);
}
struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
struct btree_iter *iter)
{
- struct bkey_i *k = bch2_btree_journal_peek(trans, iter, iter->path->pos);
+ struct btree_path *path = btree_iter_path(trans, iter);
+ struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
if (k) {
iter->k = k->k;
struct btree_iter *iter,
struct bkey_s_c k)
{
+ struct btree_path *path = btree_iter_path(trans, iter);
struct bkey_i *next_journal =
bch2_btree_journal_peek(trans, iter,
- k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
+ k.k ? k.k->p : path_l(path)->b->key.k.p);
if (next_journal) {
iter->k = next_journal->k;
ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
iter->flags|BTREE_ITER_CACHED) ?:
- bch2_btree_path_relock(trans, iter->path, _THIS_IP_);
+ bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
if (unlikely(ret))
return bkey_s_c_err(ret);
- btree_path_set_should_be_locked(iter->key_cache_path);
+ btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
- k = bch2_btree_path_peek_slot(iter->key_cache_path, &u);
+ k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
if (k.k && !bkey_err(k)) {
iter->k = u;
k.k = &iter->k;
static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
{
struct btree_trans *trans = iter->trans;
- struct bkey_i *next_update;
struct bkey_s_c k, k2;
int ret;
- EBUG_ON(iter->path->cached);
+ EBUG_ON(btree_iter_path(trans, iter)->cached);
bch2_btree_iter_verify(iter);
while (1) {
goto out;
}
- l = path_l(iter->path);
+ struct btree_path *path = btree_iter_path(trans, iter);
+ l = path_l(path);
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
goto out;
}
- btree_path_set_should_be_locked(iter->path);
+ btree_path_set_should_be_locked(path);
k = btree_path_level_peek_all(trans->c, l, &iter->k);
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
k = btree_trans_peek_journal(trans, iter, k);
- next_update = btree_trans_peek_updates(iter);
-
- if (next_update &&
- bpos_le(next_update->k.p,
- k.k ? k.k->p : l->b->key.k.p)) {
- iter->k = next_update->k;
- k = bkey_i_to_s_c(next_update);
- }
+ if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
+ trans->nr_updates))
+ bch2_btree_trans_peek_updates(trans, iter, &k);
if (k.k && bkey_deleted(k.k)) {
/*
if (iter->update_path) {
bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
- iter->update_path = NULL;
+ iter->update_path = 0;
}
bch2_btree_iter_verify_entry_exit(iter);
goto out_no_locked;
/*
- * iter->pos should be mononotically increasing, and always be
- * equal to the key we just returned - except extents can
- * straddle iter->pos:
+ * We need to check against @end before FILTER_SNAPSHOTS because
+ * if we get to a different inode that requested we might be
+ * seeing keys for a different snapshot tree that will all be
+ * filtered out.
+ *
+ * But we can't do the full check here, because bkey_start_pos()
+ * isn't monotonically increasing before FILTER_SNAPSHOTS, and
+ * that's what we check against in extents mode:
*/
- if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
- iter_pos = k.k->p;
- else
- iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
-
- if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
- ? bkey_gt(iter_pos, end)
- : bkey_ge(iter_pos, end)))
+ if (k.k->p.inode > end.inode)
goto end;
if (iter->update_path &&
- !bkey_eq(iter->update_path->pos, k.k->p)) {
+ !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
- iter->update_path = NULL;
+ iter->update_path = 0;
}
if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
* advance, same as on exit for iter->path, but only up
* to snapshot
*/
- __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
+ __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_INTENT);
iter->update_path = iter->path;
iter->update_path = bch2_btree_path_set_pos(trans,
continue;
}
+ /*
+ * iter->pos should be mononotically increasing, and always be
+ * equal to the key we just returned - except extents can
+ * straddle iter->pos:
+ */
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
+ iter_pos = k.k->p;
+ else
+ iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
+
+ if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? bkey_gt(iter_pos, end)
+ : bkey_ge(iter_pos, end)))
+ goto end;
+
break;
}
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
- btree_path_set_should_be_locked(iter->path);
+ btree_path_set_should_be_locked(btree_iter_path(trans, iter));
out_no_locked:
if (iter->update_path) {
- ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_);
+ ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
if (unlikely(ret))
k = bkey_s_c_err(ret);
else
- btree_path_set_should_be_locked(iter->update_path);
+ btree_path_set_should_be_locked(trans->paths + iter->update_path);
}
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
{
struct btree_trans *trans = iter->trans;
struct bpos search_key = iter->pos;
- struct btree_path *saved_path = NULL;
struct bkey_s_c k;
struct bkey saved_k;
const struct bch_val *saved_v;
+ btree_path_idx_t saved_path = 0;
int ret;
- EBUG_ON(iter->path->cached || iter->path->level);
- EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
+ EBUG_ON(btree_iter_path(trans, iter)->cached ||
+ btree_iter_path(trans, iter)->level);
if (iter->flags & BTREE_ITER_WITH_JOURNAL)
- return bkey_s_c_err(-EIO);
+ return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter);
goto out_no_locked;
}
- k = btree_path_level_peek(trans, iter->path,
- &iter->path->l[0], &iter->k);
+ struct btree_path *path = btree_iter_path(trans, iter);
+
+ k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
if (!k.k ||
((iter->flags & BTREE_ITER_IS_EXTENTS)
? bpos_ge(bkey_start_pos(k.k), search_key)
: bpos_gt(k.k->p, search_key)))
- k = btree_path_level_prev(trans, iter->path,
- &iter->path->l[0], &iter->k);
+ k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
+
+ if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
+ trans->nr_updates))
+ bch2_btree_trans_peek_prev_updates(trans, iter, &k);
if (likely(k.k)) {
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
bch2_path_put_nokeep(trans, iter->path,
iter->flags & BTREE_ITER_INTENT);
iter->path = saved_path;
- saved_path = NULL;
+ saved_path = 0;
iter->k = saved_k;
k.v = saved_v;
goto got_key;
}
- if (bch2_snapshot_is_ancestor(iter->trans->c,
+ if (bch2_snapshot_is_ancestor(trans->c,
iter->snapshot,
k.k->p.snapshot)) {
if (saved_path)
iter->flags & BTREE_ITER_INTENT);
saved_path = btree_path_clone(trans, iter->path,
iter->flags & BTREE_ITER_INTENT);
+ path = btree_iter_path(trans, iter);
saved_k = *k.k;
saved_v = k.v;
}
continue;
}
+ btree_path_set_should_be_locked(path);
break;
- } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
+ } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */
- search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
+ search_key = bpos_predecessor(path->l[0].b->data->min_key);
} else {
/* Start of btree: */
bch2_btree_iter_set_pos(iter, POS_MIN);
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
iter->pos.snapshot = iter->snapshot;
-
- btree_path_set_should_be_locked(iter->path);
out_no_locked:
if (saved_path)
bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter);
- EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
+ EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
/* extents can't span inode numbers: */
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
if ((iter->flags & BTREE_ITER_CACHED) ||
!(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
- struct bkey_i *next_update;
+ k = bkey_s_c_null;
- if ((next_update = btree_trans_peek_updates(iter)) &&
- bpos_eq(next_update->k.p, iter->pos)) {
- iter->k = next_update->k;
- k = bkey_i_to_s_c(next_update);
- goto out;
+ if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
+ trans->nr_updates)) {
+ bch2_btree_trans_peek_slot_updates(trans, iter, &k);
+ if (k.k)
+ goto out;
}
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
goto out_no_locked;
}
- k = bch2_btree_path_peek_slot(iter->path, &iter->k);
+ k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
if (unlikely(!k.k))
goto out_no_locked;
} else {
if (iter->flags & BTREE_ITER_IS_EXTENTS)
end.offset = U64_MAX;
- EBUG_ON(iter->path->level);
+ EBUG_ON(btree_iter_path(trans, iter)->level);
if (iter->flags & BTREE_ITER_INTENT) {
struct btree_iter iter2;
k = bch2_btree_iter_peek_upto(&iter2, end);
if (k.k && !bkey_err(k)) {
+ swap(iter->key_cache_path, iter2.key_cache_path);
iter->k = iter2.k;
k.k = &iter->k;
}
}
}
out:
- btree_path_set_should_be_locked(iter->path);
+ btree_path_set_should_be_locked(btree_iter_path(trans, iter));
out_no_locked:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
struct btree_path *path;
unsigned i;
- BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, BTREE_ITER_MAX));
+ BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
- trans_for_each_path(trans, path) {
+ trans_for_each_path(trans, path, i) {
BUG_ON(path->sorted_idx >= trans->nr_sorted);
- BUG_ON(trans->sorted[path->sorted_idx] != path->idx);
+ BUG_ON(trans->sorted[path->sorted_idx] != i);
}
for (i = 0; i < trans->nr_sorted; i++) {
static void btree_trans_verify_sorted(struct btree_trans *trans)
{
struct btree_path *path, *prev = NULL;
- unsigned i;
+ struct trans_for_each_path_inorder_iter iter;
if (!bch2_debug_check_iterators)
return;
- trans_for_each_path_inorder(trans, path, i) {
+ trans_for_each_path_inorder(trans, path, iter) {
if (prev && btree_path_cmp(prev, path) > 0) {
__bch2_dump_trans_paths_updates(trans, true);
panic("trans paths out of order!\n");
static inline void btree_path_list_remove(struct btree_trans *trans,
struct btree_path *path)
{
- unsigned i;
-
EBUG_ON(path->sorted_idx >= trans->nr_sorted);
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
trans->nr_sorted--;
memmove_u64s_down_small(trans->sorted + path->sorted_idx,
trans->sorted + path->sorted_idx + 1,
- DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
+ DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
+ sizeof(u64) / sizeof(btree_path_idx_t)));
#else
array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
#endif
- for (i = path->sorted_idx; i < trans->nr_sorted; i++)
+ for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
trans->paths[trans->sorted[i]].sorted_idx = i;
-
- path->sorted_idx = U8_MAX;
}
static inline void btree_path_list_add(struct btree_trans *trans,
- struct btree_path *pos,
- struct btree_path *path)
+ btree_path_idx_t pos,
+ btree_path_idx_t path_idx)
{
- unsigned i;
+ struct btree_path *path = trans->paths + path_idx;
- path->sorted_idx = pos ? pos->sorted_idx + 1 : trans->nr_sorted;
+ path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
trans->sorted + path->sorted_idx,
- DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
+ DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
+ sizeof(u64) / sizeof(btree_path_idx_t)));
trans->nr_sorted++;
- trans->sorted[path->sorted_idx] = path->idx;
+ trans->sorted[path->sorted_idx] = path_idx;
#else
- array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
+ array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
#endif
- for (i = path->sorted_idx; i < trans->nr_sorted; i++)
+ for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
trans->paths[trans->sorted[i]].sorted_idx = i;
btree_trans_verify_sorted_refs(trans);
if (iter->key_cache_path)
bch2_path_put(trans, iter->key_cache_path,
iter->flags & BTREE_ITER_INTENT);
- iter->path = NULL;
- iter->update_path = NULL;
- iter->key_cache_path = NULL;
+ iter->path = 0;
+ iter->update_path = 0;
+ iter->key_cache_path = 0;
+ iter->trans = NULL;
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
iter->min_depth = depth;
- BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
- BUG_ON(iter->path->level != depth);
- BUG_ON(iter->min_depth != depth);
+ struct btree_path *path = btree_iter_path(trans, iter);
+ BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
+ BUG_ON(path->level != depth);
+ BUG_ON(iter->min_depth != depth);
}
void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
{
+ struct btree_trans *trans = src->trans;
+
*dst = *src;
+#ifdef TRACK_PATH_ALLOCATED
+ dst->ip_allocated = _RET_IP_;
+#endif
if (src->path)
- __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
+ __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_INTENT);
if (src->update_path)
- __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
- dst->key_cache_path = NULL;
+ __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_INTENT);
+ dst->key_cache_path = 0;
}
void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
{
+ struct bch_fs *c = trans->c;
unsigned new_top = trans->mem_top + size;
- size_t old_bytes = trans->mem_bytes;
- size_t new_bytes = roundup_pow_of_two(new_top);
+ unsigned old_bytes = trans->mem_bytes;
+ unsigned new_bytes = roundup_pow_of_two(new_top);
int ret;
void *new_mem;
void *p;
- trans->mem_max = max(trans->mem_max, new_top);
-
WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
+ struct btree_transaction_stats *s = btree_trans_stats(trans);
+ s->max_mem = max(s->max_mem, new_bytes);
+
new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
if (unlikely(!new_mem)) {
bch2_trans_unlock(trans);
new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
- new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
+ new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
new_bytes = BTREE_TRANS_MEM_MAX;
kfree(trans->mem);
}
trans->mem_bytes = new_bytes;
if (old_bytes) {
- trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
+ trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
}
if (trans->srcu_held) {
struct bch_fs *c = trans->c;
struct btree_path *path;
+ unsigned i;
- trans_for_each_path(trans, path)
+ trans_for_each_path(trans, path, i)
if (path->cached && !btree_node_locked(path, 0))
path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
}
}
-void bch2_trans_srcu_lock(struct btree_trans *trans)
+static void bch2_trans_srcu_lock(struct btree_trans *trans)
{
if (!trans->srcu_held) {
trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
u32 bch2_trans_begin(struct btree_trans *trans)
{
struct btree_path *path;
+ unsigned i;
u64 now;
bch2_trans_reset_updates(trans);
trans->restart_count++;
trans->mem_top = 0;
+ trans->journal_entries = NULL;
- trans_for_each_path(trans, path) {
+ trans_for_each_path(trans, path, i) {
path->should_be_locked = false;
/*
* iterators if we do that
*/
if (!path->ref && !path->preserve)
- __bch2_path_free(trans, path);
+ __bch2_path_free(trans, i);
else
path->preserve = false;
}
now = local_clock();
+
+ if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
+ time_after64(now, trans->last_begin_time + 10))
+ __time_stats_update(&btree_trans_stats(trans)->duration,
+ trans->last_begin_time, now);
+
if (!trans->restarted &&
(need_resched() ||
- now - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
+ time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
drop_locks_do(trans, (cond_resched(), 0));
now = local_clock();
}
return trans->restart_count;
}
-static struct btree_trans *bch2_trans_alloc(struct bch_fs *c)
-{
- struct btree_trans *trans;
-
- if (IS_ENABLED(__KERNEL__)) {
- trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
- if (trans)
- return trans;
- }
-
- trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
- /*
- * paths need to be zeroed, bch2_check_for_deadlock looks at
- * paths in other threads
- */
- memset(&trans->paths, 0, sizeof(trans->paths));
- return trans;
-}
-
-const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
+const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
unsigned bch2_trans_get_fn_idx(const char *fn)
{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
+ for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
if (!bch2_btree_transaction_fns[i] ||
bch2_btree_transaction_fns[i] == fn) {
bch2_btree_transaction_fns[i] = fn;
}
pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
- return i;
+ return 0;
}
struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
__acquires(&c->btree_trans_barrier)
{
struct btree_trans *trans;
- struct btree_transaction_stats *s;
- trans = bch2_trans_alloc(c);
-
- memset(trans, 0, sizeof(*trans));
- trans->c = c;
- trans->fn = fn_idx < ARRAY_SIZE(bch2_btree_transaction_fns)
- ? bch2_btree_transaction_fns[fn_idx] : NULL;
- trans->last_begin_time = local_clock();
- trans->fn_idx = fn_idx;
- trans->locking_wait.task = current;
- trans->journal_replay_not_finished =
- unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
- atomic_inc_not_zero(&c->journal_keys.ref);
- closure_init_stack(&trans->ref);
-
- s = btree_trans_stats(trans);
- if (s && s->max_mem) {
- unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
-
- trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
-
- if (!unlikely(trans->mem)) {
- trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
- trans->mem_bytes = BTREE_TRANS_MEM_MAX;
- } else {
- trans->mem_bytes = expected_mem_bytes;
+ if (IS_ENABLED(__KERNEL__)) {
+ trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
+ if (trans) {
+ memset(trans, 0, offsetof(struct btree_trans, list));
+ goto got_trans;
}
}
- if (s) {
- trans->nr_max_paths = s->nr_max_paths;
- trans->wb_updates_size = s->wb_updates_size;
- }
-
- trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
- trans->srcu_lock_time = jiffies;
- trans->srcu_held = true;
+ trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
+ memset(trans, 0, sizeof(*trans));
+ closure_init_stack(&trans->ref);
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
+ seqmutex_lock(&c->btree_trans_lock);
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
struct btree_trans *pos;
+ pid_t pid = current->pid;
+
+ trans->locking_wait.task = current;
- seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(pos, &c->btree_trans_list, list) {
+ struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
/*
* We'd much prefer to be stricter here and completely
* disallow multiple btree_trans in the same thread -
* but the data move path calls bch2_write when we
* already have a btree_trans initialized.
*/
- BUG_ON(trans->locking_wait.task->pid == pos->locking_wait.task->pid &&
+ BUG_ON(pos_task &&
+ pid == pos_task->pid &&
bch2_trans_locked(pos));
- if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
+ if (pos_task && pid < pos_task->pid) {
list_add_tail(&trans->list, &pos->list);
goto list_add_done;
}
}
- list_add_tail(&trans->list, &c->btree_trans_list);
+ }
+ list_add_tail(&trans->list, &c->btree_trans_list);
list_add_done:
- seqmutex_unlock(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+got_trans:
+ trans->c = c;
+ trans->last_begin_time = local_clock();
+ trans->fn_idx = fn_idx;
+ trans->locking_wait.task = current;
+ trans->journal_replay_not_finished =
+ unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
+ atomic_inc_not_zero(&c->journal_keys.ref);
+ trans->nr_paths = ARRAY_SIZE(trans->_paths);
+ trans->paths_allocated = trans->_paths_allocated;
+ trans->sorted = trans->_sorted;
+ trans->paths = trans->_paths;
+ trans->updates = trans->_updates;
+
+ *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
+
+ trans->paths_allocated[0] = 1;
+
+ if (fn_idx < BCH_TRANSACTIONS_NR) {
+ trans->fn = bch2_btree_transaction_fns[fn_idx];
+
+ struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
+
+ if (s->max_mem) {
+ unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
+
+ trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
+ if (likely(trans->mem))
+ trans->mem_bytes = expected_mem_bytes;
+ }
+
+ trans->nr_paths_max = s->nr_max_paths;
+ trans->journal_entries_size = s->journal_entries_size;
}
+ trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+ trans->srcu_lock_time = jiffies;
+ trans->srcu_held = true;
return trans;
}
#ifdef CONFIG_BCACHEFS_DEBUG
struct bch_fs *c = trans->c;
struct btree_path *path;
+ unsigned i;
- trans_for_each_path(trans, path)
+ trans_for_each_path(trans, path, i)
if (path->ref)
goto leaked;
return;
leaked:
bch_err(c, "btree paths leaked from %s!", trans->fn);
- trans_for_each_path(trans, path)
+ trans_for_each_path(trans, path, i)
if (path->ref)
printk(KERN_ERR " btree %s %pS\n",
bch2_btree_id_str(path->btree_id),
void bch2_trans_put(struct btree_trans *trans)
__releases(&c->btree_trans_barrier)
{
- struct btree_insert_entry *i;
struct bch_fs *c = trans->c;
- struct btree_transaction_stats *s = btree_trans_stats(trans);
bch2_trans_unlock(trans);
- if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
- seqmutex_lock(&c->btree_trans_lock);
- list_del(&trans->list);
- seqmutex_unlock(&c->btree_trans_lock);
- }
-
- closure_sync(&trans->ref);
-
- if (s)
- s->max_mem = max(s->max_mem, trans->mem_max);
-
trans_for_each_update(trans, i)
- __btree_path_put(i->path, true);
- trans->nr_updates = 0;
+ __btree_path_put(trans->paths + i->path, true);
+ trans->nr_updates = 0;
+ trans->locking_wait.task = NULL;
check_btree_paths_leaked(trans);
srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
}
- kfree(trans->extra_journal_entries.data);
-
if (trans->fs_usage_deltas) {
if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
REPLICAS_DELTA_LIST_MAX)
if (unlikely(trans->journal_replay_not_finished))
bch2_journal_keys_put(c);
+ unsigned long *paths_allocated = trans->paths_allocated;
+ trans->paths_allocated = NULL;
+ trans->paths = NULL;
+
+ if (paths_allocated != trans->_paths_allocated)
+ kvfree_rcu_mightsleep(paths_allocated);
+
if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
mempool_free(trans->mem, &c->btree_trans_mem_pool);
else
/* Userspace doesn't have a real percpu implementation: */
if (IS_ENABLED(__KERNEL__))
trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
- if (trans)
+
+ if (trans) {
+ closure_sync(&trans->ref);
+
+ seqmutex_lock(&c->btree_trans_lock);
+ list_del(&trans->list);
+ seqmutex_unlock(&c->btree_trans_lock);
+
mempool_free(trans, &c->btree_trans_pool);
+ }
}
static void __maybe_unused
void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
{
- struct btree_path *path;
struct btree_bkey_cached_common *b;
static char lock_types[] = { 'r', 'i', 'w' };
struct task_struct *task = READ_ONCE(trans->locking_wait.task);
unsigned l, idx;
+ /* before rcu_read_lock(): */
+ bch2_printbuf_make_room(out, 4096);
+
if (!out->nr_tabstops) {
printbuf_tabstop_push(out, 16);
printbuf_tabstop_push(out, 32);
prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
- trans_for_each_path_safe(trans, path, idx) {
+ /* trans->paths is rcu protected vs. freeing */
+ rcu_read_lock();
+ out->atomic++;
+
+ struct btree_path *paths = rcu_dereference(trans->paths);
+ if (!paths)
+ goto out;
+
+ unsigned long *paths_allocated = trans_paths_allocated(paths);
+
+ trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
+ struct btree_path *path = paths + idx;
if (!path->nodes_locked)
continue;
prt_printf(out, " path %u %c l=%u %s:",
- path->idx,
+ idx,
path->cached ? 'c' : 'b',
path->level,
bch2_btree_id_str(path->btree_id));
bch2_btree_bkey_cached_common_to_text(out, b);
prt_newline(out);
}
+out:
+ --out->atomic;
+ rcu_read_unlock();
}
void bch2_fs_btree_iter_exit(struct bch_fs *c)
struct btree_trans *trans;
int cpu;
+ if (c->btree_trans_bufs)
+ for_each_possible_cpu(cpu) {
+ struct btree_trans *trans =
+ per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
+
+ if (trans) {
+ closure_sync(&trans->ref);
+
+ seqmutex_lock(&c->btree_trans_lock);
+ list_del(&trans->list);
+ seqmutex_unlock(&c->btree_trans_lock);
+ }
+ kfree(trans);
+ }
+ free_percpu(c->btree_trans_bufs);
+
trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
if (trans)
panic("%s leaked btree_trans\n", trans->fn);
- if (c->btree_trans_bufs)
- for_each_possible_cpu(cpu)
- kfree(per_cpu_ptr(c->btree_trans_bufs, cpu)->trans);
- free_percpu(c->btree_trans_bufs);
-
for (s = c->btree_transaction_stats;
s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
s++) {
kfree(s->max_paths_text);
- bch2_time_stats_exit(&s->lock_hold_times);
+ time_stats_exit(&s->lock_hold_times);
}
if (c->btree_trans_barrier_initialized)
mempool_exit(&c->btree_trans_pool);
}
-int bch2_fs_btree_iter_init(struct bch_fs *c)
+void bch2_fs_btree_iter_init_early(struct bch_fs *c)
{
struct btree_transaction_stats *s;
- int ret;
for (s = c->btree_transaction_stats;
s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
s++) {
- bch2_time_stats_init(&s->lock_hold_times);
+ time_stats_init(&s->duration);
+ time_stats_init(&s->lock_hold_times);
mutex_init(&s->lock);
}
INIT_LIST_HEAD(&c->btree_trans_list);
seqmutex_init(&c->btree_trans_lock);
+}
+
+int bch2_fs_btree_iter_init(struct bch_fs *c)
+{
+ int ret;
c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
if (!c->btree_trans_bufs)