X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_iter.h;h=5e103f519e62ec280863c389cb765904a6becb91;hb=0a08ddf78c9cf4b6671ba64b049c37da64233f4f;hp=6b7cef145cedd72185381592ca90b22a7b33e23d;hpb=0206d42daf4c4bd3bbcfa15a2bef34319524db49;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_iter.h b/libbcachefs/btree_iter.h index 6b7cef1..5e103f5 100644 --- a/libbcachefs/btree_iter.h +++ b/libbcachefs/btree_iter.h @@ -4,8 +4,14 @@ #include "bset.h" #include "btree_types.h" +#include "trace.h" -#include +static inline int __bkey_err(const struct bkey *k) +{ + return PTR_ERR_OR_ZERO(k); +} + +#define bkey_err(_k) __bkey_err((_k).k) static inline void __btree_path_get(struct btree_path *path, bool intent) { @@ -36,14 +42,7 @@ static inline struct btree *btree_path_node(struct btree_path *path, static inline bool btree_node_lock_seq_matches(const struct btree_path *path, const struct btree *b, unsigned level) { - /* - * We don't compare the low bits of the lock sequence numbers because - * @path might have taken a write lock on @b, and we don't want to skip - * the linked path if the sequence numbers were equal before taking that - * write lock. The lock sequence number is incremented by taking and - * releasing write locks and is even when unlocked: - */ - return path->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1; + return path->l[level].lock_seq == six_lock_seq(&b->c.lock); } static inline struct btree *btree_node_parent(struct btree_path *path, @@ -90,6 +89,35 @@ __trans_next_path(struct btree_trans *trans, unsigned idx) #define trans_for_each_path(_trans, _path) \ trans_for_each_path_from(_trans, _path, 0) +static inline struct btree_path * +__trans_next_path_safe(struct btree_trans *trans, unsigned *idx) +{ + u64 l; + + if (*idx == BTREE_ITER_MAX) + return NULL; + + l = trans->paths_allocated >> *idx; + if (!l) + return NULL; + + *idx += __ffs64(l); + EBUG_ON(*idx >= BTREE_ITER_MAX); + return &trans->paths[*idx]; +} + +/* + * This version is intended to be safe for use on a btree_trans that is owned by + * another thread, for bch2_btree_trans_to_text(); + */ +#define trans_for_each_path_safe_from(_trans, _path, _idx, _start) \ + for (_idx = _start; \ + (_path = __trans_next_path_safe((_trans), &_idx)); \ + _idx++) + +#define trans_for_each_path_safe(_trans, _path, _idx) \ + trans_for_each_path_safe_from(_trans, _path, _idx, 0) + static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path) { unsigned idx = path ? path->sorted_idx + 1 : 0; @@ -193,6 +221,22 @@ struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpo unsigned, unsigned, unsigned, unsigned long); struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *); +/* + * bch2_btree_path_peek_slot() for a cached iterator might return a key in a + * different snapshot: + */ +static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u) +{ + struct bkey_s_c k = bch2_btree_path_peek_slot(path, u); + + if (k.k && bpos_eq(path->pos, k.k->p)) + return k; + + bkey_init(u); + u->p = path->pos; + return (struct bkey_s_c) { u, NULL }; +} + struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *, struct btree_iter *, struct bpos); @@ -230,14 +274,17 @@ void bch2_path_put(struct btree_trans *, struct btree_path *, bool); int bch2_trans_relock(struct btree_trans *); int bch2_trans_relock_notrace(struct btree_trans *); void bch2_trans_unlock(struct btree_trans *); +void bch2_trans_unlock_long(struct btree_trans *); bool bch2_trans_locked(struct btree_trans *); -static inline bool trans_was_restarted(struct btree_trans *trans, u32 restart_count) +static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count) { - return restart_count != trans->restart_count; + return restart_count != trans->restart_count + ? -BCH_ERR_transaction_restart_nested + : 0; } -void bch2_trans_restart_error(struct btree_trans *, u32); +void __noreturn bch2_trans_restart_error(struct btree_trans *, u32); static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans, u32 restart_count) @@ -246,7 +293,7 @@ static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans, bch2_trans_restart_error(trans, restart_count); } -void bch2_trans_in_restart_error(struct btree_trans *); +void __noreturn bch2_trans_in_restart_error(struct btree_trans *); static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans) { @@ -255,10 +302,10 @@ static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans) } __always_inline -static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err) +static int btree_trans_restart_nounlock(struct btree_trans *trans, int err) { BUG_ON(err <= 0); - BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart)); + BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart)); trans->restarted = err; trans->last_restarted_ip = _THIS_IP_; @@ -266,7 +313,7 @@ static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int er } __always_inline -static inline int btree_trans_restart(struct btree_trans *trans, int err) +static int btree_trans_restart(struct btree_trans *trans, int err) { btree_trans_restart_nounlock(trans, err); return -err; @@ -365,7 +412,7 @@ static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans, flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS; if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) && - btree_node_type_is_extents(btree_id)) + btree_id_is_extents(btree_id)) flags |= BTREE_ITER_IS_EXTENTS; if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) && @@ -419,7 +466,7 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans, } void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *, - unsigned, struct bpos, unsigned); + enum btree_id, struct bpos, unsigned); static inline void bch2_trans_iter_init(struct btree_trans *trans, struct btree_iter *iter, @@ -477,48 +524,64 @@ static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size } } -static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k) +static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans, + struct btree_iter *iter, + unsigned btree_id, struct bpos pos, + unsigned flags, unsigned type) { - struct bkey_i *mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k)); + struct bkey_s_c k; + + bch2_trans_iter_init(trans, iter, btree_id, pos, flags); + k = bch2_btree_iter_peek_slot(iter); - if (!IS_ERR(mut)) - bkey_reassemble(mut, k); - return mut; + if (!bkey_err(k) && type && k.k->type != type) + k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch); + if (unlikely(bkey_err(k))) + bch2_trans_iter_exit(trans, iter); + return k; } -static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans, - struct btree_iter *iter) +static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans, + struct btree_iter *iter, + unsigned btree_id, struct bpos pos, + unsigned flags) { - struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); + return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0); +} - return unlikely(IS_ERR(k.k)) - ? ERR_CAST(k.k) - : bch2_bkey_make_mut(trans, k); +#define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\ + bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \ + _btree_id, _pos, _flags, KEY_TYPE_##_type)) + +static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans, + unsigned btree_id, struct bpos pos, + unsigned flags, unsigned type, + unsigned val_size, void *val) +{ + struct btree_iter iter; + struct bkey_s_c k; + int ret; + + k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type); + ret = bkey_err(k); + if (!ret) { + unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size); + + memcpy(val, k.v, b); + if (unlikely(b < sizeof(*val))) + memset((void *) val + b, 0, sizeof(*val) - b); + bch2_trans_iter_exit(trans, &iter); + } + + return ret; } -#define bch2_bkey_get_mut_typed(_trans, _iter, _type) \ -({ \ - struct bkey_i *_k = bch2_bkey_get_mut(_trans, _iter); \ - struct bkey_i_##_type *_ret; \ - \ - if (IS_ERR(_k)) \ - _ret = ERR_CAST(_k); \ - else if (unlikely(_k->k.type != KEY_TYPE_##_type)) \ - _ret = ERR_PTR(-ENOENT); \ - else \ - _ret = bkey_i_to_##_type(_k); \ - _ret; \ -}) +#define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\ + __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, \ + KEY_TYPE_##_type, sizeof(*_val), _val) -#define bch2_bkey_alloc(_trans, _iter, _type) \ -({ \ - struct bkey_i_##_type *_k = bch2_trans_kmalloc_nomemzero(_trans, sizeof(*_k));\ - if (!IS_ERR(_k)) { \ - bkey_##_type##_init(&_k->k_i); \ - _k->k.p = (_iter)->pos; \ - } \ - _k; \ -}) +void bch2_trans_srcu_unlock(struct btree_trans *); +void bch2_trans_srcu_lock(struct btree_trans *); u32 bch2_trans_begin(struct btree_trans *); @@ -540,11 +603,6 @@ u32 bch2_trans_begin(struct btree_trans *); __for_each_btree_node(_trans, _iter, _btree_id, _start, \ 0, 0, _flags, _b, _ret) -static inline int bkey_err(struct bkey_s_c k) -{ - return PTR_ERR_OR_ZERO(k.k); -} - static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter, unsigned flags) { @@ -620,17 +678,17 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, #define lockrestart_do(_trans, _do) \ ({ \ u32 _restart_count; \ - int _ret; \ + int _ret2; \ \ do { \ _restart_count = bch2_trans_begin(_trans); \ - _ret = (_do); \ - } while (bch2_err_matches(_ret, BCH_ERR_transaction_restart)); \ + _ret2 = (_do); \ + } while (bch2_err_matches(_ret2, BCH_ERR_transaction_restart)); \ \ - if (!_ret) \ + if (!_ret2) \ bch2_trans_verify_not_restarted(_trans, _restart_count);\ \ - _ret; \ + _ret2; \ }) /* @@ -645,26 +703,23 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, #define nested_lockrestart_do(_trans, _do) \ ({ \ u32 _restart_count, _orig_restart_count; \ - int _ret; \ + int _ret2; \ \ _restart_count = _orig_restart_count = (_trans)->restart_count; \ \ - while (bch2_err_matches(_ret = (_do), BCH_ERR_transaction_restart))\ + while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\ _restart_count = bch2_trans_begin(_trans); \ \ - if (!_ret) \ + if (!_ret2) \ bch2_trans_verify_not_restarted(_trans, _restart_count);\ \ - if (!_ret && trans_was_restarted(_trans, _orig_restart_count)) \ - _ret = -BCH_ERR_transaction_restart_nested; \ - \ - _ret; \ + _ret2 ?: trans_was_restarted(_trans, _restart_count); \ }) #define for_each_btree_key2(_trans, _iter, _btree_id, \ _start, _flags, _k, _do) \ ({ \ - int _ret = 0; \ + int _ret3 = 0; \ \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ @@ -672,15 +727,15 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, while (1) { \ u32 _restart_count = bch2_trans_begin(_trans); \ \ - _ret = 0; \ + _ret3 = 0; \ (_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \ if (!(_k).k) \ break; \ \ - _ret = bkey_err(_k) ?: (_do); \ - if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\ + _ret3 = bkey_err(_k) ?: (_do); \ + if (bch2_err_matches(_ret3, BCH_ERR_transaction_restart))\ continue; \ - if (_ret) \ + if (_ret3) \ break; \ bch2_trans_verify_not_restarted(_trans, _restart_count);\ if (!bch2_btree_iter_advance(&(_iter))) \ @@ -688,13 +743,13 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, } \ \ bch2_trans_iter_exit((_trans), &(_iter)); \ - _ret; \ + _ret3; \ }) #define for_each_btree_key2_upto(_trans, _iter, _btree_id, \ _start, _end, _flags, _k, _do) \ ({ \ - int _ret = 0; \ + int _ret3 = 0; \ \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ @@ -702,15 +757,15 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, while (1) { \ u32 _restart_count = bch2_trans_begin(_trans); \ \ - _ret = 0; \ + _ret3 = 0; \ (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, (_flags));\ if (!(_k).k) \ break; \ \ - _ret = bkey_err(_k) ?: (_do); \ - if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\ + _ret3 = bkey_err(_k) ?: (_do); \ + if (bch2_err_matches(_ret3, BCH_ERR_transaction_restart))\ continue; \ - if (_ret) \ + if (_ret3) \ break; \ bch2_trans_verify_not_restarted(_trans, _restart_count);\ if (!bch2_btree_iter_advance(&(_iter))) \ @@ -718,13 +773,13 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, } \ \ bch2_trans_iter_exit((_trans), &(_iter)); \ - _ret; \ + _ret3; \ }) #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \ _start, _flags, _k, _do) \ ({ \ - int _ret = 0; \ + int _ret3 = 0; \ \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ @@ -733,14 +788,14 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, u32 _restart_count = bch2_trans_begin(_trans); \ (_k) = bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\ if (!(_k).k) { \ - _ret = 0; \ + _ret3 = 0; \ break; \ } \ \ - _ret = bkey_err(_k) ?: (_do); \ - if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\ + _ret3 = bkey_err(_k) ?: (_do); \ + if (bch2_err_matches(_ret3, BCH_ERR_transaction_restart))\ continue; \ - if (_ret) \ + if (_ret3) \ break; \ bch2_trans_verify_not_restarted(_trans, _restart_count);\ if (!bch2_btree_iter_rewind(&(_iter))) \ @@ -748,7 +803,7 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, } \ \ bch2_trans_iter_exit((_trans), &(_iter)); \ - _ret; \ + _ret3; \ }) #define for_each_btree_key_commit(_trans, _iter, _btree_id, \ @@ -759,6 +814,14 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ (_journal_seq), (_commit_flags))) +#define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id, \ + _start, _iter_flags, _k, \ + _disk_res, _journal_seq, _commit_flags,\ + _do) \ + for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\ + (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ + (_journal_seq), (_commit_flags))) + #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \ _start, _end, _iter_flags, _k, \ _disk_res, _journal_seq, _commit_flags,\ @@ -818,6 +881,37 @@ __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans, !((_ret) = bkey_err(_k)) && (_k).k; \ bch2_btree_iter_advance(&(_iter))) +#define drop_locks_do(_trans, _do) \ +({ \ + bch2_trans_unlock(_trans); \ + _do ?: bch2_trans_relock(_trans); \ +}) + +#define allocate_dropping_locks_errcode(_trans, _do) \ +({ \ + gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ + int _ret = _do; \ + \ + if (bch2_err_matches(_ret, ENOMEM)) { \ + _gfp = GFP_KERNEL; \ + _ret = drop_locks_do(trans, _do); \ + } \ + _ret; \ +}) + +#define allocate_dropping_locks(_trans, _ret, _do) \ +({ \ + gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \ + typeof(_do) _p = _do; \ + \ + _ret = 0; \ + if (unlikely(!_p)) { \ + _gfp = GFP_KERNEL; \ + _ret = drop_locks_do(trans, ((_p = _do), 0)); \ + } \ + _p; \ +}) + /* new multiple iterator interface: */ void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *); @@ -825,21 +919,21 @@ void bch2_btree_path_to_text(struct printbuf *, struct btree_path *); void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *); void bch2_dump_trans_updates(struct btree_trans *); void bch2_dump_trans_paths_updates(struct btree_trans *); -void __bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned); -void bch2_trans_exit(struct btree_trans *); + +struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned); +void bch2_trans_put(struct btree_trans *); extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR]; unsigned bch2_trans_get_fn_idx(const char *); -#define bch2_trans_init(_trans, _c, _nr_iters, _mem) \ -do { \ +#define bch2_trans_get(_c) \ +({ \ static unsigned trans_fn_idx; \ \ if (unlikely(!trans_fn_idx)) \ trans_fn_idx = bch2_trans_get_fn_idx(__func__); \ - \ - __bch2_trans_init(_trans, _c, trans_fn_idx); \ -} while (0) + __bch2_trans_get(_c, trans_fn_idx); \ +}) void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);