]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_iter.h
Update bcachefs sources to 33a60d9b05 bcachefs: Assorted fixes for clang
[bcachefs-tools-debian] / libbcachefs / btree_iter.h
index c630ab56a8757c5c5f35bc6ecc3cd99923d4c8ad..c472aa8c58a09b8181aab307cd2ced3508c17545 100644 (file)
@@ -4,8 +4,14 @@
 
 #include "bset.h"
 #include "btree_types.h"
+#include "trace.h"
 
-#include <trace/events/bcachefs.h>
+static inline int __bkey_err(const struct bkey *k)
+{
+       return PTR_ERR_OR_ZERO(k);
+}
+
+#define bkey_err(_k)   __bkey_err((_k).k)
 
 static inline void __btree_path_get(struct btree_path *path, bool intent)
 {
@@ -36,14 +42,7 @@ static inline struct btree *btree_path_node(struct btree_path *path,
 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
                                        const struct btree *b, unsigned level)
 {
-       /*
-        * We don't compare the low bits of the lock sequence numbers because
-        * @path might have taken a write lock on @b, and we don't want to skip
-        * the linked path if the sequence numbers were equal before taking that
-        * write lock. The lock sequence number is incremented by taking and
-        * releasing write locks and is even when unlocked:
-        */
-       return path->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
+       return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
 }
 
 static inline struct btree *btree_node_parent(struct btree_path *path,
@@ -54,6 +53,16 @@ static inline struct btree *btree_node_parent(struct btree_path *path,
 
 /* Iterate over paths within a transaction: */
 
+void __bch2_btree_trans_sort_paths(struct btree_trans *);
+
+static inline void btree_trans_sort_paths(struct btree_trans *trans)
+{
+       if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
+           trans->paths_sorted)
+               return;
+       __bch2_btree_trans_sort_paths(trans);
+}
+
 static inline struct btree_path *
 __trans_next_path(struct btree_trans *trans, unsigned idx)
 {
@@ -72,8 +81,6 @@ __trans_next_path(struct btree_trans *trans, unsigned idx)
        return &trans->paths[idx];
 }
 
-void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
-
 #define trans_for_each_path_from(_trans, _path, _start)                        \
        for (_path = __trans_next_path((_trans), _start);               \
             (_path);                                                   \
@@ -82,6 +89,35 @@ void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
 #define trans_for_each_path(_trans, _path)                             \
        trans_for_each_path_from(_trans, _path, 0)
 
+static inline struct btree_path *
+__trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
+{
+       u64 l;
+
+       if (*idx == BTREE_ITER_MAX)
+               return NULL;
+
+       l = trans->paths_allocated >> *idx;
+       if (!l)
+               return NULL;
+
+       *idx += __ffs64(l);
+       EBUG_ON(*idx >= BTREE_ITER_MAX);
+       return &trans->paths[*idx];
+}
+
+/*
+ * This version is intended to be safe for use on a btree_trans that is owned by
+ * another thread, for bch2_btree_trans_to_text();
+ */
+#define trans_for_each_path_safe_from(_trans, _path, _idx, _start)     \
+       for (_idx = _start;                                             \
+            (_path = __trans_next_path_safe((_trans), &_idx));         \
+            _idx++)
+
+#define trans_for_each_path_safe(_trans, _path, _idx)                  \
+       trans_for_each_path_safe_from(_trans, _path, _idx, 0)
+
 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
 {
        unsigned idx = path ? path->sorted_idx + 1 : 0;
@@ -95,9 +131,10 @@ static inline struct btree_path *next_btree_path(struct btree_trans *trans, stru
 
 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
 {
-       EBUG_ON(path->sorted_idx >= trans->nr_sorted);
-       return path->sorted_idx
-               ? trans->paths + trans->sorted[path->sorted_idx - 1]
+       unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
+
+       return idx
+               ? trans->paths + trans->sorted[idx - 1]
                : NULL;
 }
 
@@ -106,6 +143,11 @@ static inline struct btree_path *prev_btree_path(struct btree_trans *trans, stru
             ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
             _i++)
 
+#define trans_for_each_path_inorder_reverse(_trans, _path, _i)         \
+       for (_i = trans->nr_sorted - 1;                                 \
+            ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
+            --_i)
+
 static inline bool __path_has_node(const struct btree_path *path,
                                   const struct btree *b)
 {
@@ -161,6 +203,18 @@ bch2_btree_path_set_pos(struct btree_trans *trans,
                : path;
 }
 
+int __must_check bch2_btree_path_traverse_one(struct btree_trans *, struct btree_path *,
+                                             unsigned, unsigned long);
+
+static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
+                                         struct btree_path *path, unsigned flags)
+{
+       if (path->uptodate < BTREE_ITER_NEED_RELOCK)
+               return 0;
+
+       return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
+}
+
 int __must_check bch2_btree_path_traverse(struct btree_trans *,
                                          struct btree_path *, unsigned);
 struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
@@ -172,6 +226,15 @@ struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
 
 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
 
+int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
+
+static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
+{
+       return mutex_trylock(lock)
+               ? 0
+               : __bch2_trans_mutex_lock(trans, lock);
+}
+
 #ifdef CONFIG_BCACHEFS_DEBUG
 void bch2_trans_verify_paths(struct btree_trans *);
 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
@@ -193,6 +256,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
 void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
 
 int bch2_trans_relock(struct btree_trans *);
+int bch2_trans_relock_notrace(struct btree_trans *);
 void bch2_trans_unlock(struct btree_trans *);
 bool bch2_trans_locked(struct btree_trans *);
 
@@ -201,20 +265,36 @@ static inline bool trans_was_restarted(struct btree_trans *trans, u32 restart_co
        return restart_count != trans->restart_count;
 }
 
-void bch2_trans_verify_not_restarted(struct btree_trans *, u32);
+void bch2_trans_restart_error(struct btree_trans *, u32);
+
+static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
+                                                  u32 restart_count)
+{
+       if (trans_was_restarted(trans, restart_count))
+               bch2_trans_restart_error(trans, restart_count);
+}
+
+void bch2_trans_in_restart_error(struct btree_trans *);
+
+static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
+{
+       if (trans->restarted)
+               bch2_trans_in_restart_error(trans);
+}
 
 __always_inline
-static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
+static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
 {
        BUG_ON(err <= 0);
-       BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart));
+       BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
 
        trans->restarted = err;
+       trans->last_restarted_ip = _THIS_IP_;
        return -err;
 }
 
 __always_inline
-static inline int btree_trans_restart(struct btree_trans *trans, int err)
+static int btree_trans_restart(struct btree_trans *trans, int err)
 {
        btree_trans_restart_nounlock(trans, err);
        return -err;
@@ -243,6 +323,7 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
 
 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
 
 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
@@ -424,63 +505,64 @@ static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size
        }
 }
 
-static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k)
+static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
+                               struct btree_iter *iter,
+                               unsigned btree_id, struct bpos pos,
+                               unsigned flags, unsigned type)
 {
-       struct bkey_i *mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k));
+       struct bkey_s_c k;
 
-       if (!IS_ERR(mut))
-               bkey_reassemble(mut, k);
-       return mut;
+       bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
+       k = bch2_btree_iter_peek_slot(iter);
+
+       if (!bkey_err(k) && type && k.k->type != type)
+               k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
+       if (unlikely(bkey_err(k)))
+               bch2_trans_iter_exit(trans, iter);
+       return k;
 }
 
-static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
-                                              struct btree_iter *iter)
+static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
+                               struct btree_iter *iter,
+                               unsigned btree_id, struct bpos pos,
+                               unsigned flags)
 {
-       struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
-
-       return unlikely(IS_ERR(k.k))
-               ? ERR_CAST(k.k)
-               : bch2_bkey_make_mut(trans, k);
+       return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
 }
 
-#define bch2_bkey_get_mut_typed(_trans, _iter, _type)                  \
-({                                                                     \
-       struct bkey_i *_k = bch2_bkey_get_mut(_trans, _iter);           \
-       struct bkey_i_##_type *_ret;                                    \
-                                                                       \
-       if (IS_ERR(_k))                                                 \
-               _ret = ERR_CAST(_k);                                    \
-       else if (unlikely(_k->k.type != KEY_TYPE_##_type))              \
-               _ret = ERR_PTR(-ENOENT);                                \
-       else                                                            \
-               _ret = bkey_i_to_##_type(_k);                           \
-       _ret;                                                           \
-})
-
-#define bch2_bkey_alloc(_trans, _iter, _type)                          \
-({                                                                     \
-       struct bkey_i_##_type *_k = bch2_trans_kmalloc(_trans, sizeof(*_k));\
-       if (!IS_ERR(_k)) {                                              \
-               bkey_##_type##_init(&_k->k_i);                          \
-               _k->k.p = (_iter)->pos;                                 \
-       }                                                               \
-       _k;                                                             \
-})
-
-u32 bch2_trans_begin(struct btree_trans *);
+#define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
+       bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter,                 \
+                                      _btree_id, _pos, _flags, KEY_TYPE_##_type))
 
-static inline struct btree *
-__btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter *iter)
+static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
+                               unsigned btree_id, struct bpos pos,
+                               unsigned flags, unsigned type,
+                               unsigned val_size, void *val)
 {
-       struct btree *b;
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       int ret;
 
-       while (b = bch2_btree_iter_peek_node(iter),
-              bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
-               bch2_trans_begin(trans);
+       k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
+       ret = bkey_err(k);
+       if (!ret) {
+               unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size);
 
-       return b;
+               memcpy(val, k.v, b);
+               if (unlikely(b < sizeof(*val)))
+                       memset((void *) val + b, 0, sizeof(*val) - b);
+               bch2_trans_iter_exit(trans, &iter);
+       }
+
+       return ret;
 }
 
+#define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
+       __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags,      \
+                                 KEY_TYPE_##_type, sizeof(*_val), _val)
+
+u32 bch2_trans_begin(struct btree_trans *);
+
 /*
  * XXX
  * this does not handle transaction restarts from bch2_btree_iter_next_node()
@@ -490,7 +572,7 @@ __btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter
                              _locks_want, _depth, _flags, _b, _ret)    \
        for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
                                _start, _locks_want, _depth, _flags);   \
-            (_b) = __btree_iter_peek_node_and_restart((_trans), &(_iter)),\
+            (_b) = bch2_btree_iter_peek_node_and_restart(&(_iter)),    \
             !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b);                   \
             (_b) = bch2_btree_iter_next_node(&(_iter)))
 
@@ -499,11 +581,6 @@ __btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter
        __for_each_btree_node(_trans, _iter, _btree_id, _start,         \
                              0, 0, _flags, _b, _ret)
 
-static inline int bkey_err(struct bkey_s_c k)
-{
-       return PTR_ERR_OR_ZERO(k.k);
-}
-
 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
                                                             unsigned flags)
 {
@@ -544,6 +621,8 @@ static inline int btree_trans_too_many_iters(struct btree_trans *trans)
        return 0;
 }
 
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
+
 static inline struct bkey_s_c
 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
                                   struct btree_iter *iter, unsigned flags)
@@ -558,6 +637,22 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
        return k;
 }
 
+static inline struct bkey_s_c
+__bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
+                                       struct btree_iter *iter,
+                                       struct bpos end,
+                                       unsigned flags)
+{
+       struct bkey_s_c k;
+
+       while (btree_trans_too_many_iters(trans) ||
+              (k = bch2_btree_iter_peek_upto_type(iter, end, flags),
+               bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
+               bch2_trans_begin(trans);
+
+       return k;
+}
+
 #define lockrestart_do(_trans, _do)                                    \
 ({                                                                     \
        u32 _restart_count;                                             \
@@ -700,6 +795,14 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
                            (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
                                        (_journal_seq), (_commit_flags)))
 
+#define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id,    \
+                                 _start, _iter_flags, _k,              \
+                                 _disk_res, _journal_seq, _commit_flags,\
+                                 _do)                                  \
+       for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
+                           (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
+                                       (_journal_seq), (_commit_flags)))
+
 #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id,       \
                                  _start, _end, _iter_flags, _k,        \
                                  _disk_res, _journal_seq, _commit_flags,\
@@ -716,6 +819,15 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
             !((_ret) = bkey_err(_k)) && (_k).k;                        \
             bch2_btree_iter_advance(&(_iter)))
 
+#define for_each_btree_key_upto(_trans, _iter, _btree_id,              \
+                               _start, _end, _flags, _k, _ret)         \
+       for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),      \
+                                 (_start), (_flags));                  \
+            (_k) = __bch2_btree_iter_peek_upto_and_restart((_trans),   \
+                                               &(_iter), _end, _flags),\
+            !((_ret) = bkey_err(_k)) && (_k).k;                        \
+            bch2_btree_iter_advance(&(_iter)))
+
 #define for_each_btree_key_norestart(_trans, _iter, _btree_id,         \
                           _start, _flags, _k, _ret)                    \
        for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),      \
@@ -750,6 +862,37 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
             !((_ret) = bkey_err(_k)) && (_k).k;                                \
             bch2_btree_iter_advance(&(_iter)))
 
+#define drop_locks_do(_trans, _do)                                     \
+({                                                                     \
+       bch2_trans_unlock(_trans);                                      \
+       _do ?: bch2_trans_relock(_trans);                               \
+})
+
+#define allocate_dropping_locks_errcode(_trans, _do)                   \
+({                                                                     \
+       gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;                           \
+       int _ret = _do;                                                 \
+                                                                       \
+       if (bch2_err_matches(_ret, ENOMEM)) {                           \
+               _gfp = GFP_KERNEL;                                      \
+               _ret = drop_locks_do(trans, _do);                       \
+       }                                                               \
+       _ret;                                                           \
+})
+
+#define allocate_dropping_locks(_trans, _ret, _do)                     \
+({                                                                     \
+       gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;                           \
+       typeof(_do) _p = _do;                                           \
+                                                                       \
+       _ret = 0;                                                       \
+       if (unlikely(!_p)) {                                            \
+               _gfp = GFP_KERNEL;                                      \
+               _ret = drop_locks_do(trans, ((_p = _do), 0));           \
+       }                                                               \
+       _p;                                                             \
+})
+
 /* new multiple iterator interface: */
 
 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);