]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.c
Update bcachefs sources to 1bda24d7cc fixup! bcachefs: for_each_btree_key2()
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "debug.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "journal.h"
15 #include "recovery.h"
16 #include "replicas.h"
17 #include "subvolume.h"
18
19 #include <linux/prefetch.h>
20 #include <trace/events/bcachefs.h>
21
22 static void btree_trans_verify_sorted(struct btree_trans *);
23 inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
24
25 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
26 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
27                                        struct btree_path *);
28
29 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
30 {
31 #ifdef CONFIG_BCACHEFS_DEBUG
32         return iter->ip_allocated;
33 #else
34         return 0;
35 #endif
36 }
37
38 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
39
40 /*
41  * Unlocks before scheduling
42  * Note: does not revalidate iterator
43  */
44 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
45 {
46         if (need_resched() || race_fault()) {
47                 bch2_trans_unlock(trans);
48                 schedule();
49                 return bch2_trans_relock(trans) ? 0 : -EINTR;
50         } else {
51                 return 0;
52         }
53 }
54
55 static inline int __btree_path_cmp(const struct btree_path *l,
56                                    enum btree_id        r_btree_id,
57                                    bool                 r_cached,
58                                    struct bpos          r_pos,
59                                    unsigned             r_level)
60 {
61         /*
62          * Must match lock ordering as defined by __bch2_btree_node_lock:
63          */
64         return   cmp_int(l->btree_id,   r_btree_id) ?:
65                  cmp_int((int) l->cached,       (int) r_cached) ?:
66                  bpos_cmp(l->pos,       r_pos) ?:
67                 -cmp_int(l->level,      r_level);
68 }
69
70 static inline int btree_path_cmp(const struct btree_path *l,
71                                  const struct btree_path *r)
72 {
73         return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
74 }
75
76 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
77 {
78         /* Are we iterating over keys in all snapshots? */
79         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
80                 p = bpos_successor(p);
81         } else {
82                 p = bpos_nosnap_successor(p);
83                 p.snapshot = iter->snapshot;
84         }
85
86         return p;
87 }
88
89 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
90 {
91         /* Are we iterating over keys in all snapshots? */
92         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
93                 p = bpos_predecessor(p);
94         } else {
95                 p = bpos_nosnap_predecessor(p);
96                 p.snapshot = iter->snapshot;
97         }
98
99         return p;
100 }
101
102 static inline bool is_btree_node(struct btree_path *path, unsigned l)
103 {
104         return l < BTREE_MAX_DEPTH &&
105                 (unsigned long) path->l[l].b >= 128;
106 }
107
108 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
109 {
110         struct bpos pos = iter->pos;
111
112         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
113             bkey_cmp(pos, POS_MAX))
114                 pos = bkey_successor(iter, pos);
115         return pos;
116 }
117
118 static inline bool btree_path_pos_before_node(struct btree_path *path,
119                                               struct btree *b)
120 {
121         return bpos_cmp(path->pos, b->data->min_key) < 0;
122 }
123
124 static inline bool btree_path_pos_after_node(struct btree_path *path,
125                                              struct btree *b)
126 {
127         return bpos_cmp(b->key.k.p, path->pos) < 0;
128 }
129
130 static inline bool btree_path_pos_in_node(struct btree_path *path,
131                                           struct btree *b)
132 {
133         return path->btree_id == b->c.btree_id &&
134                 !btree_path_pos_before_node(path, b) &&
135                 !btree_path_pos_after_node(path, b);
136 }
137
138 /* Btree node locking: */
139
140 void bch2_btree_node_unlock_write(struct btree_trans *trans,
141                         struct btree_path *path, struct btree *b)
142 {
143         bch2_btree_node_unlock_write_inlined(trans, path, b);
144 }
145
146 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
147 {
148         struct btree_path *linked;
149         unsigned readers = 0;
150
151         trans_for_each_path(trans, linked)
152                 if (linked->l[b->c.level].b == b &&
153                     btree_node_read_locked(linked, b->c.level))
154                         readers++;
155
156         /*
157          * Must drop our read locks before calling six_lock_write() -
158          * six_unlock() won't do wakeups until the reader count
159          * goes to 0, and it's safe because we have the node intent
160          * locked:
161          */
162         if (!b->c.lock.readers)
163                 atomic64_sub(__SIX_VAL(read_lock, readers),
164                              &b->c.lock.state.counter);
165         else
166                 this_cpu_sub(*b->c.lock.readers, readers);
167
168         six_lock_write(&b->c.lock, NULL, NULL);
169
170         if (!b->c.lock.readers)
171                 atomic64_add(__SIX_VAL(read_lock, readers),
172                              &b->c.lock.state.counter);
173         else
174                 this_cpu_add(*b->c.lock.readers, readers);
175 }
176
177 bool __bch2_btree_node_relock(struct btree_trans *trans,
178                               struct btree_path *path, unsigned level)
179 {
180         struct btree *b = btree_path_node(path, level);
181         int want = __btree_lock_want(path, level);
182
183         if (!is_btree_node(path, level))
184                 goto fail;
185
186         if (race_fault())
187                 goto fail;
188
189         if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
190             (btree_node_lock_seq_matches(path, b, level) &&
191              btree_node_lock_increment(trans, b, level, want))) {
192                 mark_btree_node_locked(trans, path, level, want);
193                 return true;
194         }
195 fail:
196         if (b != BTREE_ITER_NO_NODE_CACHED &&
197             b != BTREE_ITER_NO_NODE_INIT)
198                 trace_btree_node_relock_fail(trans->fn, _RET_IP_,
199                                              path->btree_id,
200                                              &path->pos,
201                                              (unsigned long) b,
202                                              path->l[level].lock_seq,
203                                              is_btree_node(path, level) ? b->c.lock.state.seq : 0);
204         return false;
205 }
206
207 bool bch2_btree_node_upgrade(struct btree_trans *trans,
208                              struct btree_path *path, unsigned level)
209 {
210         struct btree *b = path->l[level].b;
211
212         if (!is_btree_node(path, level))
213                 return false;
214
215         switch (btree_lock_want(path, level)) {
216         case BTREE_NODE_UNLOCKED:
217                 BUG_ON(btree_node_locked(path, level));
218                 return true;
219         case BTREE_NODE_READ_LOCKED:
220                 BUG_ON(btree_node_intent_locked(path, level));
221                 return bch2_btree_node_relock(trans, path, level);
222         case BTREE_NODE_INTENT_LOCKED:
223                 break;
224         }
225
226         if (btree_node_intent_locked(path, level))
227                 return true;
228
229         if (race_fault())
230                 return false;
231
232         if (btree_node_locked(path, level)
233             ? six_lock_tryupgrade(&b->c.lock)
234             : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
235                 goto success;
236
237         if (btree_node_lock_seq_matches(path, b, level) &&
238             btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
239                 btree_node_unlock(path, level);
240                 goto success;
241         }
242
243         return false;
244 success:
245         mark_btree_node_intent_locked(trans, path, level);
246         return true;
247 }
248
249 static inline bool btree_path_get_locks(struct btree_trans *trans,
250                                         struct btree_path *path,
251                                         bool upgrade)
252 {
253         unsigned l = path->level;
254         int fail_idx = -1;
255
256         do {
257                 if (!btree_path_node(path, l))
258                         break;
259
260                 if (!(upgrade
261                       ? bch2_btree_node_upgrade(trans, path, l)
262                       : bch2_btree_node_relock(trans, path, l)))
263                         fail_idx = l;
264
265                 l++;
266         } while (l < path->locks_want);
267
268         /*
269          * When we fail to get a lock, we have to ensure that any child nodes
270          * can't be relocked so bch2_btree_path_traverse has to walk back up to
271          * the node that we failed to relock:
272          */
273         if (fail_idx >= 0) {
274                 __bch2_btree_path_unlock(path);
275                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
276
277                 do {
278                         path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
279                         --fail_idx;
280                 } while (fail_idx >= 0);
281         }
282
283         if (path->uptodate == BTREE_ITER_NEED_RELOCK)
284                 path->uptodate = BTREE_ITER_UPTODATE;
285
286         bch2_trans_verify_locks(trans);
287
288         return path->uptodate < BTREE_ITER_NEED_RELOCK;
289 }
290
291 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
292                                   bool cached)
293 {
294         return !cached
295                 ? container_of(_b, struct btree, c)->key.k.p
296                 : container_of(_b, struct bkey_cached, c)->key.pos;
297 }
298
299 /* Slowpath: */
300 bool __bch2_btree_node_lock(struct btree_trans *trans,
301                             struct btree_path *path,
302                             struct btree *b,
303                             struct bpos pos, unsigned level,
304                             enum six_lock_type type,
305                             six_lock_should_sleep_fn should_sleep_fn, void *p,
306                             unsigned long ip)
307 {
308         struct btree_path *linked;
309         unsigned reason;
310
311         /* Check if it's safe to block: */
312         trans_for_each_path(trans, linked) {
313                 if (!linked->nodes_locked)
314                         continue;
315
316                 /*
317                  * Can't block taking an intent lock if we have _any_ nodes read
318                  * locked:
319                  *
320                  * - Our read lock blocks another thread with an intent lock on
321                  *   the same node from getting a write lock, and thus from
322                  *   dropping its intent lock
323                  *
324                  * - And the other thread may have multiple nodes intent locked:
325                  *   both the node we want to intent lock, and the node we
326                  *   already have read locked - deadlock:
327                  */
328                 if (type == SIX_LOCK_intent &&
329                     linked->nodes_locked != linked->nodes_intent_locked) {
330                         reason = 1;
331                         goto deadlock;
332                 }
333
334                 if (linked->btree_id != path->btree_id) {
335                         if (linked->btree_id < path->btree_id)
336                                 continue;
337
338                         reason = 3;
339                         goto deadlock;
340                 }
341
342                 /*
343                  * Within the same btree, non-cached paths come before cached
344                  * paths:
345                  */
346                 if (linked->cached != path->cached) {
347                         if (!linked->cached)
348                                 continue;
349
350                         reason = 4;
351                         goto deadlock;
352                 }
353
354                 /*
355                  * Interior nodes must be locked before their descendants: if
356                  * another path has possible descendants locked of the node
357                  * we're about to lock, it must have the ancestors locked too:
358                  */
359                 if (level > __fls(linked->nodes_locked)) {
360                         reason = 5;
361                         goto deadlock;
362                 }
363
364                 /* Must lock btree nodes in key order: */
365                 if (btree_node_locked(linked, level) &&
366                     bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
367                                                  linked->cached)) <= 0) {
368                         reason = 7;
369                         goto deadlock;
370                 }
371         }
372
373         return btree_node_lock_type(trans, path, b, pos, level,
374                                     type, should_sleep_fn, p);
375 deadlock:
376         trace_trans_restart_would_deadlock(trans->fn, ip,
377                         trans->in_traverse_all, reason,
378                         linked->btree_id,
379                         linked->cached,
380                         &linked->pos,
381                         path->btree_id,
382                         path->cached,
383                         &pos);
384         btree_trans_restart(trans);
385         return false;
386 }
387
388 /* Btree iterator locking: */
389
390 #ifdef CONFIG_BCACHEFS_DEBUG
391
392 static void bch2_btree_path_verify_locks(struct btree_path *path)
393 {
394         unsigned l;
395
396         if (!path->nodes_locked) {
397                 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
398                        btree_path_node(path, path->level));
399                 return;
400         }
401
402         for (l = 0; btree_path_node(path, l); l++)
403                 BUG_ON(btree_lock_want(path, l) !=
404                        btree_node_locked_type(path, l));
405 }
406
407 void bch2_trans_verify_locks(struct btree_trans *trans)
408 {
409         struct btree_path *path;
410
411         trans_for_each_path(trans, path)
412                 bch2_btree_path_verify_locks(path);
413 }
414 #else
415 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
416 #endif
417
418 /* Btree path locking: */
419
420 /*
421  * Only for btree_cache.c - only relocks intent locks
422  */
423 bool bch2_btree_path_relock_intent(struct btree_trans *trans,
424                                    struct btree_path *path)
425 {
426         unsigned l;
427
428         for (l = path->level;
429              l < path->locks_want && btree_path_node(path, l);
430              l++) {
431                 if (!bch2_btree_node_relock(trans, path, l)) {
432                         __bch2_btree_path_unlock(path);
433                         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
434                         trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
435                                                    path->btree_id, &path->pos);
436                         btree_trans_restart(trans);
437                         return false;
438                 }
439         }
440
441         return true;
442 }
443
444 __flatten
445 static bool bch2_btree_path_relock(struct btree_trans *trans,
446                         struct btree_path *path, unsigned long trace_ip)
447 {
448         bool ret = btree_path_get_locks(trans, path, false);
449
450         if (!ret) {
451                 trace_trans_restart_relock_path(trans->fn, trace_ip,
452                                                 path->btree_id, &path->pos);
453                 btree_trans_restart(trans);
454         }
455         return ret;
456 }
457
458 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
459                                struct btree_path *path,
460                                unsigned new_locks_want)
461 {
462         struct btree_path *linked;
463
464         EBUG_ON(path->locks_want >= new_locks_want);
465
466         path->locks_want = new_locks_want;
467
468         if (btree_path_get_locks(trans, path, true))
469                 return true;
470
471         /*
472          * XXX: this is ugly - we'd prefer to not be mucking with other
473          * iterators in the btree_trans here.
474          *
475          * On failure to upgrade the iterator, setting iter->locks_want and
476          * calling get_locks() is sufficient to make bch2_btree_path_traverse()
477          * get the locks we want on transaction restart.
478          *
479          * But if this iterator was a clone, on transaction restart what we did
480          * to this iterator isn't going to be preserved.
481          *
482          * Possibly we could add an iterator field for the parent iterator when
483          * an iterator is a copy - for now, we'll just upgrade any other
484          * iterators with the same btree id.
485          *
486          * The code below used to be needed to ensure ancestor nodes get locked
487          * before interior nodes - now that's handled by
488          * bch2_btree_path_traverse_all().
489          */
490         if (!path->cached && !trans->in_traverse_all)
491                 trans_for_each_path(trans, linked)
492                         if (linked != path &&
493                             linked->cached == path->cached &&
494                             linked->btree_id == path->btree_id &&
495                             linked->locks_want < new_locks_want) {
496                                 linked->locks_want = new_locks_want;
497                                 btree_path_get_locks(trans, linked, true);
498                         }
499
500         return false;
501 }
502
503 void __bch2_btree_path_downgrade(struct btree_path *path,
504                                  unsigned new_locks_want)
505 {
506         unsigned l;
507
508         EBUG_ON(path->locks_want < new_locks_want);
509
510         path->locks_want = new_locks_want;
511
512         while (path->nodes_locked &&
513                (l = __fls(path->nodes_locked)) >= path->locks_want) {
514                 if (l > path->level) {
515                         btree_node_unlock(path, l);
516                 } else {
517                         if (btree_node_intent_locked(path, l)) {
518                                 six_lock_downgrade(&path->l[l].b->c.lock);
519                                 path->nodes_intent_locked ^= 1 << l;
520                         }
521                         break;
522                 }
523         }
524
525         bch2_btree_path_verify_locks(path);
526 }
527
528 void bch2_trans_downgrade(struct btree_trans *trans)
529 {
530         struct btree_path *path;
531
532         trans_for_each_path(trans, path)
533                 bch2_btree_path_downgrade(path);
534 }
535
536 /* Btree transaction locking: */
537
538 bool bch2_trans_relock(struct btree_trans *trans)
539 {
540         struct btree_path *path;
541
542         if (unlikely(trans->restarted))
543                 return false;
544
545         trans_for_each_path(trans, path)
546                 if (path->should_be_locked &&
547                     !bch2_btree_path_relock(trans, path, _RET_IP_)) {
548                         trace_trans_restart_relock(trans->fn, _RET_IP_,
549                                         path->btree_id, &path->pos);
550                         BUG_ON(!trans->restarted);
551                         return false;
552                 }
553         return true;
554 }
555
556 void bch2_trans_unlock(struct btree_trans *trans)
557 {
558         struct btree_path *path;
559
560         trans_for_each_path(trans, path)
561                 __bch2_btree_path_unlock(path);
562
563         /*
564          * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
565          * btree nodes, it implements its own walking:
566          */
567         BUG_ON(!trans->is_initial_gc &&
568                lock_class_is_held(&bch2_btree_node_lock_key));
569 }
570
571 /* Btree iterator: */
572
573 #ifdef CONFIG_BCACHEFS_DEBUG
574
575 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
576                                           struct btree_path *path)
577 {
578         struct bkey_cached *ck;
579         bool locked = btree_node_locked(path, 0);
580
581         if (!bch2_btree_node_relock(trans, path, 0))
582                 return;
583
584         ck = (void *) path->l[0].b;
585         BUG_ON(ck->key.btree_id != path->btree_id ||
586                bkey_cmp(ck->key.pos, path->pos));
587
588         if (!locked)
589                 btree_node_unlock(path, 0);
590 }
591
592 static void bch2_btree_path_verify_level(struct btree_trans *trans,
593                                 struct btree_path *path, unsigned level)
594 {
595         struct btree_path_level *l;
596         struct btree_node_iter tmp;
597         bool locked;
598         struct bkey_packed *p, *k;
599         struct printbuf buf1 = PRINTBUF;
600         struct printbuf buf2 = PRINTBUF;
601         struct printbuf buf3 = PRINTBUF;
602         const char *msg;
603
604         if (!bch2_debug_check_iterators)
605                 return;
606
607         l       = &path->l[level];
608         tmp     = l->iter;
609         locked  = btree_node_locked(path, level);
610
611         if (path->cached) {
612                 if (!level)
613                         bch2_btree_path_verify_cached(trans, path);
614                 return;
615         }
616
617         if (!btree_path_node(path, level))
618                 return;
619
620         if (!bch2_btree_node_relock(trans, path, level))
621                 return;
622
623         BUG_ON(!btree_path_pos_in_node(path, l->b));
624
625         bch2_btree_node_iter_verify(&l->iter, l->b);
626
627         /*
628          * For interior nodes, the iterator will have skipped past deleted keys:
629          */
630         p = level
631                 ? bch2_btree_node_iter_prev(&tmp, l->b)
632                 : bch2_btree_node_iter_prev_all(&tmp, l->b);
633         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
634
635         if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
636                 msg = "before";
637                 goto err;
638         }
639
640         if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
641                 msg = "after";
642                 goto err;
643         }
644
645         if (!locked)
646                 btree_node_unlock(path, level);
647         return;
648 err:
649         bch2_bpos_to_text(&buf1, path->pos);
650
651         if (p) {
652                 struct bkey uk = bkey_unpack_key(l->b, p);
653                 bch2_bkey_to_text(&buf2, &uk);
654         } else {
655                 prt_printf(&buf2, "(none)");
656         }
657
658         if (k) {
659                 struct bkey uk = bkey_unpack_key(l->b, k);
660                 bch2_bkey_to_text(&buf3, &uk);
661         } else {
662                 prt_printf(&buf3, "(none)");
663         }
664
665         panic("path should be %s key at level %u:\n"
666               "path pos %s\n"
667               "prev key %s\n"
668               "cur  key %s\n",
669               msg, level, buf1.buf, buf2.buf, buf3.buf);
670 }
671
672 static void bch2_btree_path_verify(struct btree_trans *trans,
673                                    struct btree_path *path)
674 {
675         struct bch_fs *c = trans->c;
676         unsigned i;
677
678         EBUG_ON(path->btree_id >= BTREE_ID_NR);
679
680         for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
681                 if (!path->l[i].b) {
682                         BUG_ON(!path->cached &&
683                                c->btree_roots[path->btree_id].b->c.level > i);
684                         break;
685                 }
686
687                 bch2_btree_path_verify_level(trans, path, i);
688         }
689
690         bch2_btree_path_verify_locks(path);
691 }
692
693 void bch2_trans_verify_paths(struct btree_trans *trans)
694 {
695         struct btree_path *path;
696
697         trans_for_each_path(trans, path)
698                 bch2_btree_path_verify(trans, path);
699 }
700
701 static void bch2_btree_iter_verify(struct btree_iter *iter)
702 {
703         struct btree_trans *trans = iter->trans;
704
705         BUG_ON(iter->btree_id >= BTREE_ID_NR);
706
707         BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
708
709         BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
710                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
711
712         BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
713                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
714                !btree_type_has_snapshots(iter->btree_id));
715
716         if (iter->update_path)
717                 bch2_btree_path_verify(trans, iter->update_path);
718         bch2_btree_path_verify(trans, iter->path);
719 }
720
721 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
722 {
723         BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
724                !iter->pos.snapshot);
725
726         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
727                iter->pos.snapshot != iter->snapshot);
728
729         BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
730                bkey_cmp(iter->pos, iter->k.p) > 0);
731 }
732
733 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
734 {
735         struct btree_trans *trans = iter->trans;
736         struct btree_iter copy;
737         struct bkey_s_c prev;
738         int ret = 0;
739
740         if (!bch2_debug_check_iterators)
741                 return 0;
742
743         if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
744                 return 0;
745
746         if (bkey_err(k) || !k.k)
747                 return 0;
748
749         BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
750                                           iter->snapshot,
751                                           k.k->p.snapshot));
752
753         bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
754                              BTREE_ITER_NOPRESERVE|
755                              BTREE_ITER_ALL_SNAPSHOTS);
756         prev = bch2_btree_iter_prev(&copy);
757         if (!prev.k)
758                 goto out;
759
760         ret = bkey_err(prev);
761         if (ret)
762                 goto out;
763
764         if (!bkey_cmp(prev.k->p, k.k->p) &&
765             bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
766                                       prev.k->p.snapshot) > 0) {
767                 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
768
769                 bch2_bkey_to_text(&buf1, k.k);
770                 bch2_bkey_to_text(&buf2, prev.k);
771
772                 panic("iter snap %u\n"
773                       "k    %s\n"
774                       "prev %s\n",
775                       iter->snapshot,
776                       buf1.buf, buf2.buf);
777         }
778 out:
779         bch2_trans_iter_exit(trans, &copy);
780         return ret;
781 }
782
783 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
784                             struct bpos pos, bool key_cache)
785 {
786         struct btree_path *path;
787         unsigned idx;
788         struct printbuf buf = PRINTBUF;
789
790         trans_for_each_path_inorder(trans, path, idx) {
791                 int cmp = cmp_int(path->btree_id, id) ?:
792                         cmp_int(path->cached, key_cache);
793
794                 if (cmp > 0)
795                         break;
796                 if (cmp < 0)
797                         continue;
798
799                 if (!(path->nodes_locked & 1) ||
800                     !path->should_be_locked)
801                         continue;
802
803                 if (!key_cache) {
804                         if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
805                             bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
806                                 return;
807                 } else {
808                         if (!bkey_cmp(pos, path->pos))
809                                 return;
810                 }
811         }
812
813         bch2_dump_trans_paths_updates(trans);
814         bch2_bpos_to_text(&buf, pos);
815
816         panic("not locked: %s %s%s\n",
817               bch2_btree_ids[id], buf.buf,
818               key_cache ? " cached" : "");
819 }
820
821 #else
822
823 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
824                                                 struct btree_path *path, unsigned l) {}
825 static inline void bch2_btree_path_verify(struct btree_trans *trans,
826                                           struct btree_path *path) {}
827 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
828 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
829 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
830
831 #endif
832
833 /* Btree path: fixups after btree updates */
834
835 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
836                                         struct btree *b,
837                                         struct bset_tree *t,
838                                         struct bkey_packed *k)
839 {
840         struct btree_node_iter_set *set;
841
842         btree_node_iter_for_each(iter, set)
843                 if (set->end == t->end_offset) {
844                         set->k = __btree_node_key_to_offset(b, k);
845                         bch2_btree_node_iter_sort(iter, b);
846                         return;
847                 }
848
849         bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
850 }
851
852 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
853                                                struct btree *b,
854                                                struct bkey_packed *where)
855 {
856         struct btree_path_level *l = &path->l[b->c.level];
857
858         if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
859                 return;
860
861         if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
862                 bch2_btree_node_iter_advance(&l->iter, l->b);
863 }
864
865 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
866                                       struct btree *b,
867                                       struct bkey_packed *where)
868 {
869         struct btree_path *path;
870
871         trans_for_each_path_with_node(trans, b, path) {
872                 __bch2_btree_path_fix_key_modified(path, b, where);
873                 bch2_btree_path_verify_level(trans, path, b->c.level);
874         }
875 }
876
877 static void __bch2_btree_node_iter_fix(struct btree_path *path,
878                                        struct btree *b,
879                                        struct btree_node_iter *node_iter,
880                                        struct bset_tree *t,
881                                        struct bkey_packed *where,
882                                        unsigned clobber_u64s,
883                                        unsigned new_u64s)
884 {
885         const struct bkey_packed *end = btree_bkey_last(b, t);
886         struct btree_node_iter_set *set;
887         unsigned offset = __btree_node_key_to_offset(b, where);
888         int shift = new_u64s - clobber_u64s;
889         unsigned old_end = t->end_offset - shift;
890         unsigned orig_iter_pos = node_iter->data[0].k;
891         bool iter_current_key_modified =
892                 orig_iter_pos >= offset &&
893                 orig_iter_pos <= offset + clobber_u64s;
894
895         btree_node_iter_for_each(node_iter, set)
896                 if (set->end == old_end)
897                         goto found;
898
899         /* didn't find the bset in the iterator - might have to readd it: */
900         if (new_u64s &&
901             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
902                 bch2_btree_node_iter_push(node_iter, b, where, end);
903                 goto fixup_done;
904         } else {
905                 /* Iterator is after key that changed */
906                 return;
907         }
908 found:
909         set->end = t->end_offset;
910
911         /* Iterator hasn't gotten to the key that changed yet: */
912         if (set->k < offset)
913                 return;
914
915         if (new_u64s &&
916             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
917                 set->k = offset;
918         } else if (set->k < offset + clobber_u64s) {
919                 set->k = offset + new_u64s;
920                 if (set->k == set->end)
921                         bch2_btree_node_iter_set_drop(node_iter, set);
922         } else {
923                 /* Iterator is after key that changed */
924                 set->k = (int) set->k + shift;
925                 return;
926         }
927
928         bch2_btree_node_iter_sort(node_iter, b);
929 fixup_done:
930         if (node_iter->data[0].k != orig_iter_pos)
931                 iter_current_key_modified = true;
932
933         /*
934          * When a new key is added, and the node iterator now points to that
935          * key, the iterator might have skipped past deleted keys that should
936          * come after the key the iterator now points to. We have to rewind to
937          * before those deleted keys - otherwise
938          * bch2_btree_node_iter_prev_all() breaks:
939          */
940         if (!bch2_btree_node_iter_end(node_iter) &&
941             iter_current_key_modified &&
942             b->c.level) {
943                 struct bset_tree *t;
944                 struct bkey_packed *k, *k2, *p;
945
946                 k = bch2_btree_node_iter_peek_all(node_iter, b);
947
948                 for_each_bset(b, t) {
949                         bool set_pos = false;
950
951                         if (node_iter->data[0].end == t->end_offset)
952                                 continue;
953
954                         k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
955
956                         while ((p = bch2_bkey_prev_all(b, t, k2)) &&
957                                bkey_iter_cmp(b, k, p) < 0) {
958                                 k2 = p;
959                                 set_pos = true;
960                         }
961
962                         if (set_pos)
963                                 btree_node_iter_set_set_pos(node_iter,
964                                                             b, t, k2);
965                 }
966         }
967 }
968
969 void bch2_btree_node_iter_fix(struct btree_trans *trans,
970                               struct btree_path *path,
971                               struct btree *b,
972                               struct btree_node_iter *node_iter,
973                               struct bkey_packed *where,
974                               unsigned clobber_u64s,
975                               unsigned new_u64s)
976 {
977         struct bset_tree *t = bch2_bkey_to_bset(b, where);
978         struct btree_path *linked;
979
980         if (node_iter != &path->l[b->c.level].iter) {
981                 __bch2_btree_node_iter_fix(path, b, node_iter, t,
982                                            where, clobber_u64s, new_u64s);
983
984                 if (bch2_debug_check_iterators)
985                         bch2_btree_node_iter_verify(node_iter, b);
986         }
987
988         trans_for_each_path_with_node(trans, b, linked) {
989                 __bch2_btree_node_iter_fix(linked, b,
990                                            &linked->l[b->c.level].iter, t,
991                                            where, clobber_u64s, new_u64s);
992                 bch2_btree_path_verify_level(trans, linked, b->c.level);
993         }
994 }
995
996 /* Btree path level: pointer to a particular btree node and node iter */
997
998 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
999                                                   struct btree_path_level *l,
1000                                                   struct bkey *u,
1001                                                   struct bkey_packed *k)
1002 {
1003         if (unlikely(!k)) {
1004                 /*
1005                  * signal to bch2_btree_iter_peek_slot() that we're currently at
1006                  * a hole
1007                  */
1008                 u->type = KEY_TYPE_deleted;
1009                 return bkey_s_c_null;
1010         }
1011
1012         return bkey_disassemble(l->b, k, u);
1013 }
1014
1015 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1016                                                         struct btree_path_level *l,
1017                                                         struct bkey *u)
1018 {
1019         return __btree_iter_unpack(c, l, u,
1020                         bch2_btree_node_iter_peek_all(&l->iter, l->b));
1021 }
1022
1023 static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c,
1024                                                     struct btree_path *path,
1025                                                     struct btree_path_level *l,
1026                                                     struct bkey *u)
1027 {
1028         struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1029                         bch2_btree_node_iter_peek(&l->iter, l->b));
1030
1031         path->pos = k.k ? k.k->p : l->b->key.k.p;
1032         return k;
1033 }
1034
1035 static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c,
1036                                                     struct btree_path *path,
1037                                                     struct btree_path_level *l,
1038                                                     struct bkey *u)
1039 {
1040         struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1041                         bch2_btree_node_iter_prev(&l->iter, l->b));
1042
1043         path->pos = k.k ? k.k->p : l->b->data->min_key;
1044         return k;
1045 }
1046
1047 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1048                                              struct btree_path_level *l,
1049                                              int max_advance)
1050 {
1051         struct bkey_packed *k;
1052         int nr_advanced = 0;
1053
1054         while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1055                bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1056                 if (max_advance > 0 && nr_advanced >= max_advance)
1057                         return false;
1058
1059                 bch2_btree_node_iter_advance(&l->iter, l->b);
1060                 nr_advanced++;
1061         }
1062
1063         return true;
1064 }
1065
1066 /*
1067  * Verify that iterator for parent node points to child node:
1068  */
1069 static void btree_path_verify_new_node(struct btree_trans *trans,
1070                                        struct btree_path *path, struct btree *b)
1071 {
1072         struct bch_fs *c = trans->c;
1073         struct btree_path_level *l;
1074         unsigned plevel;
1075         bool parent_locked;
1076         struct bkey_packed *k;
1077
1078         if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1079                 return;
1080
1081         if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1082                 return;
1083
1084         plevel = b->c.level + 1;
1085         if (!btree_path_node(path, plevel))
1086                 return;
1087
1088         parent_locked = btree_node_locked(path, plevel);
1089
1090         if (!bch2_btree_node_relock(trans, path, plevel))
1091                 return;
1092
1093         l = &path->l[plevel];
1094         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1095         if (!k ||
1096             bkey_deleted(k) ||
1097             bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1098                 struct printbuf buf1 = PRINTBUF;
1099                 struct printbuf buf2 = PRINTBUF;
1100                 struct printbuf buf3 = PRINTBUF;
1101                 struct printbuf buf4 = PRINTBUF;
1102                 struct bkey uk = bkey_unpack_key(b, k);
1103
1104                 bch2_dump_btree_node(c, l->b);
1105                 bch2_bpos_to_text(&buf1, path->pos);
1106                 bch2_bkey_to_text(&buf2, &uk);
1107                 bch2_bpos_to_text(&buf3, b->data->min_key);
1108                 bch2_bpos_to_text(&buf3, b->data->max_key);
1109                 panic("parent iter doesn't point to new node:\n"
1110                       "iter pos %s %s\n"
1111                       "iter key %s\n"
1112                       "new node %s-%s\n",
1113                       bch2_btree_ids[path->btree_id],
1114                       buf1.buf, buf2.buf, buf3.buf, buf4.buf);
1115         }
1116
1117         if (!parent_locked)
1118                 btree_node_unlock(path, plevel);
1119 }
1120
1121 static inline void __btree_path_level_init(struct btree_path *path,
1122                                            unsigned level)
1123 {
1124         struct btree_path_level *l = &path->l[level];
1125
1126         bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1127
1128         /*
1129          * Iterators to interior nodes should always be pointed at the first non
1130          * whiteout:
1131          */
1132         if (level)
1133                 bch2_btree_node_iter_peek(&l->iter, l->b);
1134 }
1135
1136 static inline void btree_path_level_init(struct btree_trans *trans,
1137                                          struct btree_path *path,
1138                                          struct btree *b)
1139 {
1140         BUG_ON(path->cached);
1141
1142         btree_path_verify_new_node(trans, path, b);
1143
1144         EBUG_ON(!btree_path_pos_in_node(path, b));
1145         EBUG_ON(b->c.lock.state.seq & 1);
1146
1147         path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1148         path->l[b->c.level].b = b;
1149         __btree_path_level_init(path, b->c.level);
1150 }
1151
1152 /* Btree path: fixups after btree node updates: */
1153
1154 /*
1155  * A btree node is being replaced - update the iterator to point to the new
1156  * node:
1157  */
1158 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1159 {
1160         struct btree_path *path;
1161
1162         trans_for_each_path(trans, path)
1163                 if (!path->cached &&
1164                     btree_path_pos_in_node(path, b)) {
1165                         enum btree_node_locked_type t =
1166                                 btree_lock_want(path, b->c.level);
1167
1168                         if (path->nodes_locked &&
1169                             t != BTREE_NODE_UNLOCKED) {
1170                                 btree_node_unlock(path, b->c.level);
1171                                 six_lock_increment(&b->c.lock, t);
1172                                 mark_btree_node_locked(trans, path, b->c.level, t);
1173                         }
1174
1175                         btree_path_level_init(trans, path, b);
1176                 }
1177 }
1178
1179 /*
1180  * A btree node has been modified in such a way as to invalidate iterators - fix
1181  * them:
1182  */
1183 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1184 {
1185         struct btree_path *path;
1186
1187         trans_for_each_path_with_node(trans, b, path)
1188                 __btree_path_level_init(path, b->c.level);
1189 }
1190
1191 /* Btree path: traverse, set_pos: */
1192
1193 static int lock_root_check_fn(struct six_lock *lock, void *p)
1194 {
1195         struct btree *b = container_of(lock, struct btree, c.lock);
1196         struct btree **rootp = p;
1197
1198         return b == *rootp ? 0 : -1;
1199 }
1200
1201 static inline int btree_path_lock_root(struct btree_trans *trans,
1202                                        struct btree_path *path,
1203                                        unsigned depth_want,
1204                                        unsigned long trace_ip)
1205 {
1206         struct bch_fs *c = trans->c;
1207         struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1208         enum six_lock_type lock_type;
1209         unsigned i;
1210
1211         EBUG_ON(path->nodes_locked);
1212
1213         while (1) {
1214                 b = READ_ONCE(*rootp);
1215                 path->level = READ_ONCE(b->c.level);
1216
1217                 if (unlikely(path->level < depth_want)) {
1218                         /*
1219                          * the root is at a lower depth than the depth we want:
1220                          * got to the end of the btree, or we're walking nodes
1221                          * greater than some depth and there are no nodes >=
1222                          * that depth
1223                          */
1224                         path->level = depth_want;
1225                         for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1226                                 path->l[i].b = NULL;
1227                         return 1;
1228                 }
1229
1230                 lock_type = __btree_lock_want(path, path->level);
1231                 if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
1232                                               path->level, lock_type,
1233                                               lock_root_check_fn, rootp,
1234                                               trace_ip))) {
1235                         if (trans->restarted)
1236                                 return -EINTR;
1237                         continue;
1238                 }
1239
1240                 if (likely(b == READ_ONCE(*rootp) &&
1241                            b->c.level == path->level &&
1242                            !race_fault())) {
1243                         for (i = 0; i < path->level; i++)
1244                                 path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1245                         path->l[path->level].b = b;
1246                         for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1247                                 path->l[i].b = NULL;
1248
1249                         mark_btree_node_locked(trans, path, path->level, lock_type);
1250                         btree_path_level_init(trans, path, b);
1251                         return 0;
1252                 }
1253
1254                 six_unlock_type(&b->c.lock, lock_type);
1255         }
1256 }
1257
1258 noinline
1259 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1260 {
1261         struct bch_fs *c = trans->c;
1262         struct btree_path_level *l = path_l(path);
1263         struct btree_node_iter node_iter = l->iter;
1264         struct bkey_packed *k;
1265         struct bkey_buf tmp;
1266         unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1267                 ? (path->level > 1 ? 0 :  2)
1268                 : (path->level > 1 ? 1 : 16);
1269         bool was_locked = btree_node_locked(path, path->level);
1270         int ret = 0;
1271
1272         bch2_bkey_buf_init(&tmp);
1273
1274         while (nr && !ret) {
1275                 if (!bch2_btree_node_relock(trans, path, path->level))
1276                         break;
1277
1278                 bch2_btree_node_iter_advance(&node_iter, l->b);
1279                 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1280                 if (!k)
1281                         break;
1282
1283                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1284                 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1285                                                path->level - 1);
1286         }
1287
1288         if (!was_locked)
1289                 btree_node_unlock(path, path->level);
1290
1291         bch2_bkey_buf_exit(&tmp, c);
1292         return ret;
1293 }
1294
1295 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
1296                                  struct btree_and_journal_iter *jiter)
1297 {
1298         struct bch_fs *c = trans->c;
1299         struct bkey_s_c k;
1300         struct bkey_buf tmp;
1301         unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1302                 ? (path->level > 1 ? 0 :  2)
1303                 : (path->level > 1 ? 1 : 16);
1304         bool was_locked = btree_node_locked(path, path->level);
1305         int ret = 0;
1306
1307         bch2_bkey_buf_init(&tmp);
1308
1309         while (nr && !ret) {
1310                 if (!bch2_btree_node_relock(trans, path, path->level))
1311                         break;
1312
1313                 bch2_btree_and_journal_iter_advance(jiter);
1314                 k = bch2_btree_and_journal_iter_peek(jiter);
1315                 if (!k.k)
1316                         break;
1317
1318                 bch2_bkey_buf_reassemble(&tmp, c, k);
1319                 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1320                                                path->level - 1);
1321         }
1322
1323         if (!was_locked)
1324                 btree_node_unlock(path, path->level);
1325
1326         bch2_bkey_buf_exit(&tmp, c);
1327         return ret;
1328 }
1329
1330 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1331                                             struct btree_path *path,
1332                                             unsigned plevel, struct btree *b)
1333 {
1334         struct btree_path_level *l = &path->l[plevel];
1335         bool locked = btree_node_locked(path, plevel);
1336         struct bkey_packed *k;
1337         struct bch_btree_ptr_v2 *bp;
1338
1339         if (!bch2_btree_node_relock(trans, path, plevel))
1340                 return;
1341
1342         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1343         BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1344
1345         bp = (void *) bkeyp_val(&l->b->format, k);
1346         bp->mem_ptr = (unsigned long)b;
1347
1348         if (!locked)
1349                 btree_node_unlock(path, plevel);
1350 }
1351
1352 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
1353                                                      struct btree_path *path,
1354                                                      unsigned flags,
1355                                                      struct bkey_buf *out)
1356 {
1357         struct bch_fs *c = trans->c;
1358         struct btree_path_level *l = path_l(path);
1359         struct btree_and_journal_iter jiter;
1360         struct bkey_s_c k;
1361         int ret = 0;
1362
1363         __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
1364
1365         k = bch2_btree_and_journal_iter_peek(&jiter);
1366
1367         bch2_bkey_buf_reassemble(out, c, k);
1368
1369         if (flags & BTREE_ITER_PREFETCH)
1370                 ret = btree_path_prefetch_j(trans, path, &jiter);
1371
1372         bch2_btree_and_journal_iter_exit(&jiter);
1373         return ret;
1374 }
1375
1376 static __always_inline int btree_path_down(struct btree_trans *trans,
1377                                            struct btree_path *path,
1378                                            unsigned flags,
1379                                            unsigned long trace_ip)
1380 {
1381         struct bch_fs *c = trans->c;
1382         struct btree_path_level *l = path_l(path);
1383         struct btree *b;
1384         unsigned level = path->level - 1;
1385         enum six_lock_type lock_type = __btree_lock_want(path, level);
1386         bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
1387         struct bkey_buf tmp;
1388         int ret;
1389
1390         EBUG_ON(!btree_node_locked(path, path->level));
1391
1392         bch2_bkey_buf_init(&tmp);
1393
1394         if (unlikely(!replay_done)) {
1395                 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
1396                 if (ret)
1397                         goto err;
1398         } else {
1399                 bch2_bkey_buf_unpack(&tmp, c, l->b,
1400                                  bch2_btree_node_iter_peek(&l->iter, l->b));
1401
1402                 if (flags & BTREE_ITER_PREFETCH) {
1403                         ret = btree_path_prefetch(trans, path);
1404                         if (ret)
1405                                 goto err;
1406                 }
1407         }
1408
1409         b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1410         ret = PTR_ERR_OR_ZERO(b);
1411         if (unlikely(ret))
1412                 goto err;
1413
1414         mark_btree_node_locked(trans, path, level, lock_type);
1415         btree_path_level_init(trans, path, b);
1416
1417         if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
1418             unlikely(b != btree_node_mem_ptr(tmp.k)))
1419                 btree_node_mem_ptr_set(trans, path, level + 1, b);
1420
1421         if (btree_node_read_locked(path, level + 1))
1422                 btree_node_unlock(path, level + 1);
1423         path->level = level;
1424
1425         bch2_btree_path_verify_locks(path);
1426 err:
1427         bch2_bkey_buf_exit(&tmp, c);
1428         return ret;
1429 }
1430
1431 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1432                                    unsigned, unsigned long);
1433
1434 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1435 {
1436         struct bch_fs *c = trans->c;
1437         struct btree_path *path;
1438         unsigned long trace_ip = _RET_IP_;
1439         int i, ret = 0;
1440
1441         if (trans->in_traverse_all)
1442                 return -EINTR;
1443
1444         trans->in_traverse_all = true;
1445 retry_all:
1446         trans->restarted = false;
1447         trans->traverse_all_idx = U8_MAX;
1448
1449         trans_for_each_path(trans, path)
1450                 path->should_be_locked = false;
1451
1452         btree_trans_verify_sorted(trans);
1453
1454         for (i = trans->nr_sorted - 2; i >= 0; --i) {
1455                 struct btree_path *path1 = trans->paths + trans->sorted[i];
1456                 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1457
1458                 if (path1->btree_id == path2->btree_id &&
1459                     path1->locks_want < path2->locks_want)
1460                         __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1461                 else if (!path1->locks_want && path2->locks_want)
1462                         __bch2_btree_path_upgrade(trans, path1, 1);
1463         }
1464
1465         bch2_trans_unlock(trans);
1466         cond_resched();
1467
1468         if (unlikely(trans->memory_allocation_failure)) {
1469                 struct closure cl;
1470
1471                 closure_init_stack(&cl);
1472
1473                 do {
1474                         ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1475                         closure_sync(&cl);
1476                 } while (ret);
1477         }
1478
1479         /* Now, redo traversals in correct order: */
1480         trans->traverse_all_idx = 0;
1481         while (trans->traverse_all_idx < trans->nr_sorted) {
1482                 path = trans->paths + trans->sorted[trans->traverse_all_idx];
1483
1484                 /*
1485                  * Traversing a path can cause another path to be added at about
1486                  * the same position:
1487                  */
1488                 if (path->uptodate) {
1489                         ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1490                         if (ret == -EINTR || ret == -ENOMEM)
1491                                 goto retry_all;
1492                         if (ret)
1493                                 goto err;
1494                         BUG_ON(path->uptodate);
1495                 } else {
1496                         trans->traverse_all_idx++;
1497                 }
1498         }
1499
1500         /*
1501          * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1502          * and relock(), relock() won't relock since path->should_be_locked
1503          * isn't set yet, which is all fine
1504          */
1505         trans_for_each_path(trans, path)
1506                 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1507 err:
1508         bch2_btree_cache_cannibalize_unlock(c);
1509
1510         trans->in_traverse_all = false;
1511
1512         trace_trans_traverse_all(trans->fn, trace_ip);
1513         return ret;
1514 }
1515
1516 static inline bool btree_path_good_node(struct btree_trans *trans,
1517                                         struct btree_path *path,
1518                                         unsigned l, int check_pos)
1519 {
1520         if (!is_btree_node(path, l) ||
1521             !bch2_btree_node_relock(trans, path, l))
1522                 return false;
1523
1524         if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1525                 return false;
1526         if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1527                 return false;
1528         return true;
1529 }
1530
1531 static void btree_path_set_level_up(struct btree_path *path)
1532 {
1533         btree_node_unlock(path, path->level);
1534         path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
1535         path->level++;
1536         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1537 }
1538
1539 static void btree_path_set_level_down(struct btree_trans *trans,
1540                                       struct btree_path *path,
1541                                       unsigned new_level)
1542 {
1543         unsigned l;
1544
1545         path->level = new_level;
1546
1547         for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1548                 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1549                         btree_node_unlock(path, l);
1550
1551         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1552         bch2_btree_path_verify(trans, path);
1553 }
1554
1555 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1556                                                      struct btree_path *path,
1557                                                      int check_pos)
1558 {
1559         unsigned i, l = path->level;
1560
1561         while (btree_path_node(path, l) &&
1562                !btree_path_good_node(trans, path, l, check_pos)) {
1563                 btree_node_unlock(path, l);
1564                 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1565                 l++;
1566         }
1567
1568         /* If we need intent locks, take them too: */
1569         for (i = l + 1;
1570              i < path->locks_want && btree_path_node(path, i);
1571              i++)
1572                 if (!bch2_btree_node_relock(trans, path, i))
1573                         while (l <= i) {
1574                                 btree_node_unlock(path, l);
1575                                 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1576                                 l++;
1577                         }
1578
1579         return l;
1580 }
1581
1582 /*
1583  * This is the main state machine for walking down the btree - walks down to a
1584  * specified depth
1585  *
1586  * Returns 0 on success, -EIO on error (error reading in a btree node).
1587  *
1588  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1589  * stashed in the iterator and returned from bch2_trans_exit().
1590  */
1591 static int btree_path_traverse_one(struct btree_trans *trans,
1592                                    struct btree_path *path,
1593                                    unsigned flags,
1594                                    unsigned long trace_ip)
1595 {
1596         unsigned depth_want = path->level;
1597         int ret = 0;
1598
1599         if (unlikely(trans->restarted)) {
1600                 ret = -EINTR;
1601                 goto out;
1602         }
1603
1604         /*
1605          * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1606          * and re-traverse the path without a transaction restart:
1607          */
1608         if (path->should_be_locked) {
1609                 ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
1610                 goto out;
1611         }
1612
1613         if (path->cached) {
1614                 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1615                 goto out;
1616         }
1617
1618         if (unlikely(path->level >= BTREE_MAX_DEPTH))
1619                 goto out;
1620
1621         path->level = btree_path_up_until_good_node(trans, path, 0);
1622
1623         /*
1624          * Note: path->nodes[path->level] may be temporarily NULL here - that
1625          * would indicate to other code that we got to the end of the btree,
1626          * here it indicates that relocking the root failed - it's critical that
1627          * btree_path_lock_root() comes next and that it can't fail
1628          */
1629         while (path->level > depth_want) {
1630                 ret = btree_path_node(path, path->level)
1631                         ? btree_path_down(trans, path, flags, trace_ip)
1632                         : btree_path_lock_root(trans, path, depth_want, trace_ip);
1633                 if (unlikely(ret)) {
1634                         if (ret == 1) {
1635                                 /*
1636                                  * No nodes at this level - got to the end of
1637                                  * the btree:
1638                                  */
1639                                 ret = 0;
1640                                 goto out;
1641                         }
1642
1643                         __bch2_btree_path_unlock(path);
1644                         path->level = depth_want;
1645
1646                         if (ret == -EIO)
1647                                 path->l[path->level].b =
1648                                         BTREE_ITER_NO_NODE_ERROR;
1649                         else
1650                                 path->l[path->level].b =
1651                                         BTREE_ITER_NO_NODE_DOWN;
1652                         goto out;
1653                 }
1654         }
1655
1656         path->uptodate = BTREE_ITER_UPTODATE;
1657 out:
1658         BUG_ON((ret == -EINTR) != !!trans->restarted);
1659         bch2_btree_path_verify(trans, path);
1660         return ret;
1661 }
1662
1663 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1664                                           struct btree_path *path, unsigned flags)
1665 {
1666         if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1667                 return 0;
1668
1669         return  bch2_trans_cond_resched(trans) ?:
1670                 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1671 }
1672
1673 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1674                             struct btree_path *src)
1675 {
1676         unsigned i, offset = offsetof(struct btree_path, pos);
1677
1678         memcpy((void *) dst + offset,
1679                (void *) src + offset,
1680                sizeof(struct btree_path) - offset);
1681
1682         for (i = 0; i < BTREE_MAX_DEPTH; i++)
1683                 if (btree_node_locked(dst, i))
1684                         six_lock_increment(&dst->l[i].b->c.lock,
1685                                            __btree_lock_want(dst, i));
1686
1687         bch2_btree_path_check_sort(trans, dst, 0);
1688 }
1689
1690 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1691                                            bool intent)
1692 {
1693         struct btree_path *new = btree_path_alloc(trans, src);
1694
1695         btree_path_copy(trans, new, src);
1696         __btree_path_get(new, intent);
1697         return new;
1698 }
1699
1700 inline struct btree_path * __must_check
1701 bch2_btree_path_make_mut(struct btree_trans *trans,
1702                          struct btree_path *path, bool intent,
1703                          unsigned long ip)
1704 {
1705         if (path->ref > 1 || path->preserve) {
1706                 __btree_path_put(path, intent);
1707                 path = btree_path_clone(trans, path, intent);
1708                 path->preserve = false;
1709 #ifdef CONFIG_BCACHEFS_DEBUG
1710                 path->ip_allocated = ip;
1711 #endif
1712                 btree_trans_verify_sorted(trans);
1713         }
1714
1715         path->should_be_locked = false;
1716         return path;
1717 }
1718
1719 struct btree_path * __must_check
1720 bch2_btree_path_set_pos(struct btree_trans *trans,
1721                    struct btree_path *path, struct bpos new_pos,
1722                    bool intent, unsigned long ip)
1723 {
1724         int cmp = bpos_cmp(new_pos, path->pos);
1725         unsigned l = path->level;
1726
1727         EBUG_ON(trans->restarted);
1728         EBUG_ON(!path->ref);
1729
1730         if (!cmp)
1731                 return path;
1732
1733         path = bch2_btree_path_make_mut(trans, path, intent, ip);
1734
1735         path->pos = new_pos;
1736
1737         bch2_btree_path_check_sort(trans, path, cmp);
1738
1739         if (unlikely(path->cached)) {
1740                 btree_node_unlock(path, 0);
1741                 path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1742                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1743                 goto out;
1744         }
1745
1746         l = btree_path_up_until_good_node(trans, path, cmp);
1747
1748         if (btree_path_node(path, l)) {
1749                 BUG_ON(!btree_node_locked(path, l));
1750                 /*
1751                  * We might have to skip over many keys, or just a few: try
1752                  * advancing the node iterator, and if we have to skip over too
1753                  * many keys just reinit it (or if we're rewinding, since that
1754                  * is expensive).
1755                  */
1756                 if (cmp < 0 ||
1757                     !btree_path_advance_to_pos(path, &path->l[l], 8))
1758                         __btree_path_level_init(path, l);
1759         }
1760
1761         if (l != path->level) {
1762                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1763                 __bch2_btree_path_unlock(path);
1764         }
1765 out:
1766         bch2_btree_path_verify(trans, path);
1767         return path;
1768 }
1769
1770 /* Btree path: main interface: */
1771
1772 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1773 {
1774         struct btree_path *next;
1775
1776         next = prev_btree_path(trans, path);
1777         if (next && !btree_path_cmp(next, path))
1778                 return next;
1779
1780         next = next_btree_path(trans, path);
1781         if (next && !btree_path_cmp(next, path))
1782                 return next;
1783
1784         return NULL;
1785 }
1786
1787 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1788 {
1789         struct btree_path *next;
1790
1791         next = prev_btree_path(trans, path);
1792         if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1793                 return next;
1794
1795         next = next_btree_path(trans, path);
1796         if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1797                 return next;
1798
1799         return NULL;
1800 }
1801
1802 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1803 {
1804         __bch2_btree_path_unlock(path);
1805         btree_path_list_remove(trans, path);
1806         trans->paths_allocated &= ~(1ULL << path->idx);
1807 }
1808
1809 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1810 {
1811         struct btree_path *dup;
1812
1813         EBUG_ON(trans->paths + path->idx != path);
1814         EBUG_ON(!path->ref);
1815
1816         if (!__btree_path_put(path, intent))
1817                 return;
1818
1819         /*
1820          * Perhaps instead we should check for duplicate paths in traverse_all:
1821          */
1822         if (path->preserve &&
1823             (dup = have_path_at_pos(trans, path))) {
1824                 dup->preserve = true;
1825                 path->preserve = false;
1826                 goto free;
1827         }
1828
1829         if (!path->preserve &&
1830             (dup = have_node_at_pos(trans, path)))
1831                 goto free;
1832         return;
1833 free:
1834         if (path->should_be_locked &&
1835             !btree_node_locked(dup, path->level))
1836                 return;
1837
1838         dup->should_be_locked |= path->should_be_locked;
1839         __bch2_path_free(trans, path);
1840 }
1841
1842 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1843 {
1844         struct btree_insert_entry *i;
1845
1846         prt_printf(buf, "transaction updates for %s journal seq %llu",
1847                trans->fn, trans->journal_res.seq);
1848         prt_newline(buf);
1849         printbuf_indent_add(buf, 2);
1850
1851         trans_for_each_update(trans, i) {
1852                 struct bkey_s_c old = { &i->old_k, i->old_v };
1853
1854                 prt_printf(buf, "update: btree=%s cached=%u %pS",
1855                        bch2_btree_ids[i->btree_id],
1856                        i->cached,
1857                        (void *) i->ip_allocated);
1858                 prt_newline(buf);
1859
1860                 prt_printf(buf, "  old ");
1861                 bch2_bkey_val_to_text(buf, trans->c, old);
1862                 prt_newline(buf);
1863
1864                 prt_printf(buf, "  new ");
1865                 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1866                 prt_newline(buf);
1867         }
1868
1869         printbuf_indent_sub(buf, 2);
1870 }
1871
1872 noinline __cold
1873 void bch2_dump_trans_updates(struct btree_trans *trans)
1874 {
1875         struct printbuf buf = PRINTBUF;
1876
1877         bch2_trans_updates_to_text(&buf, trans);
1878         bch_err(trans->c, "%s", buf.buf);
1879         printbuf_exit(&buf);
1880 }
1881
1882 noinline __cold
1883 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1884 {
1885         struct btree_path *path;
1886         struct printbuf buf = PRINTBUF;
1887         unsigned idx;
1888
1889         trans_for_each_path_inorder(trans, path, idx) {
1890                 printbuf_reset(&buf);
1891
1892                 bch2_bpos_to_text(&buf, path->pos);
1893
1894                 printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree=%s l=%u pos %s locks %u %pS\n",
1895                        path->idx, path->ref, path->intent_ref,
1896                        path->should_be_locked ? " S" : "",
1897                        path->preserve ? " P" : "",
1898                        bch2_btree_ids[path->btree_id],
1899                        path->level,
1900                        buf.buf,
1901                        path->nodes_locked,
1902 #ifdef CONFIG_BCACHEFS_DEBUG
1903                        (void *) path->ip_allocated
1904 #else
1905                        NULL
1906 #endif
1907                        );
1908         }
1909
1910         printbuf_exit(&buf);
1911
1912         bch2_dump_trans_updates(trans);
1913 }
1914
1915 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1916                                            struct btree_path *pos)
1917 {
1918         struct btree_path *path;
1919         unsigned idx;
1920
1921         if (unlikely(trans->paths_allocated ==
1922                      ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1923                 bch2_dump_trans_paths_updates(trans);
1924                 panic("trans path oveflow\n");
1925         }
1926
1927         idx = __ffs64(~trans->paths_allocated);
1928         trans->paths_allocated |= 1ULL << idx;
1929
1930         path = &trans->paths[idx];
1931
1932         path->idx               = idx;
1933         path->ref               = 0;
1934         path->intent_ref        = 0;
1935         path->nodes_locked      = 0;
1936         path->nodes_intent_locked = 0;
1937
1938         btree_path_list_add(trans, pos, path);
1939         return path;
1940 }
1941
1942 struct btree_path *bch2_path_get(struct btree_trans *trans,
1943                                  enum btree_id btree_id, struct bpos pos,
1944                                  unsigned locks_want, unsigned level,
1945                                  unsigned flags, unsigned long ip)
1946 {
1947         struct btree_path *path, *path_pos = NULL;
1948         bool cached = flags & BTREE_ITER_CACHED;
1949         bool intent = flags & BTREE_ITER_INTENT;
1950         int i;
1951
1952         BUG_ON(trans->restarted);
1953         btree_trans_verify_sorted(trans);
1954         bch2_trans_verify_locks(trans);
1955
1956         trans_for_each_path_inorder(trans, path, i) {
1957                 if (__btree_path_cmp(path,
1958                                      btree_id,
1959                                      cached,
1960                                      pos,
1961                                      level) > 0)
1962                         break;
1963
1964                 path_pos = path;
1965         }
1966
1967         if (path_pos &&
1968             path_pos->cached    == cached &&
1969             path_pos->btree_id  == btree_id &&
1970             path_pos->level     == level) {
1971                 __btree_path_get(path_pos, intent);
1972                 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1973         } else {
1974                 path = btree_path_alloc(trans, path_pos);
1975                 path_pos = NULL;
1976
1977                 __btree_path_get(path, intent);
1978                 path->pos                       = pos;
1979                 path->btree_id                  = btree_id;
1980                 path->cached                    = cached;
1981                 path->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
1982                 path->should_be_locked          = false;
1983                 path->level                     = level;
1984                 path->locks_want                = locks_want;
1985                 path->nodes_locked              = 0;
1986                 path->nodes_intent_locked       = 0;
1987                 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1988                         path->l[i].b            = BTREE_ITER_NO_NODE_INIT;
1989 #ifdef CONFIG_BCACHEFS_DEBUG
1990                 path->ip_allocated              = ip;
1991 #endif
1992                 btree_trans_verify_sorted(trans);
1993         }
1994
1995         if (!(flags & BTREE_ITER_NOPRESERVE))
1996                 path->preserve = true;
1997
1998         if (path->intent_ref)
1999                 locks_want = max(locks_want, level + 1);
2000
2001         /*
2002          * If the path has locks_want greater than requested, we don't downgrade
2003          * it here - on transaction restart because btree node split needs to
2004          * upgrade locks, we might be putting/getting the iterator again.
2005          * Downgrading iterators only happens via bch2_trans_downgrade(), after
2006          * a successful transaction commit.
2007          */
2008
2009         locks_want = min(locks_want, BTREE_MAX_DEPTH);
2010         if (locks_want > path->locks_want) {
2011                 path->locks_want = locks_want;
2012                 btree_path_get_locks(trans, path, true);
2013         }
2014
2015         return path;
2016 }
2017
2018 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
2019 {
2020
2021         struct bkey_s_c k;
2022
2023         if (!path->cached) {
2024                 struct btree_path_level *l = path_l(path);
2025                 struct bkey_packed *_k;
2026
2027                 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2028
2029                 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
2030                 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
2031
2032                 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
2033
2034                 if (!k.k || bpos_cmp(path->pos, k.k->p))
2035                         goto hole;
2036         } else {
2037                 struct bkey_cached *ck = (void *) path->l[0].b;
2038
2039                 EBUG_ON(ck &&
2040                         (path->btree_id != ck->key.btree_id ||
2041                          bkey_cmp(path->pos, ck->key.pos)));
2042
2043                 /* BTREE_ITER_CACHED_NOFILL|BTREE_ITER_CACHED_NOCREATE? */
2044                 if (unlikely(!ck || !ck->valid))
2045                         return bkey_s_c_null;
2046
2047                 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2048
2049                 *u = ck->k->k;
2050                 k = bkey_i_to_s_c(ck->k);
2051         }
2052
2053         return k;
2054 hole:
2055         bkey_init(u);
2056         u->p = path->pos;
2057         return (struct bkey_s_c) { u, NULL };
2058 }
2059
2060 /* Btree iterators: */
2061
2062 int __must_check
2063 __bch2_btree_iter_traverse(struct btree_iter *iter)
2064 {
2065         return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2066 }
2067
2068 int __must_check
2069 bch2_btree_iter_traverse(struct btree_iter *iter)
2070 {
2071         int ret;
2072
2073         iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
2074                                         btree_iter_search_key(iter),
2075                                         iter->flags & BTREE_ITER_INTENT,
2076                                         btree_iter_ip_allocated(iter));
2077
2078         ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2079         if (ret)
2080                 return ret;
2081
2082         iter->path->should_be_locked = true;
2083         return 0;
2084 }
2085
2086 /* Iterate across nodes (leaf and interior nodes) */
2087
2088 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
2089 {
2090         struct btree_trans *trans = iter->trans;
2091         struct btree *b = NULL;
2092         int ret;
2093
2094         EBUG_ON(iter->path->cached);
2095         bch2_btree_iter_verify(iter);
2096
2097         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2098         if (ret)
2099                 goto err;
2100
2101         b = btree_path_node(iter->path, iter->path->level);
2102         if (!b)
2103                 goto out;
2104
2105         BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
2106
2107         bkey_init(&iter->k);
2108         iter->k.p = iter->pos = b->key.k.p;
2109
2110         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2111                                         iter->flags & BTREE_ITER_INTENT,
2112                                         btree_iter_ip_allocated(iter));
2113         iter->path->should_be_locked = true;
2114         BUG_ON(iter->path->uptodate);
2115 out:
2116         bch2_btree_iter_verify_entry_exit(iter);
2117         bch2_btree_iter_verify(iter);
2118
2119         return b;
2120 err:
2121         b = ERR_PTR(ret);
2122         goto out;
2123 }
2124
2125 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
2126 {
2127         struct btree_trans *trans = iter->trans;
2128         struct btree_path *path = iter->path;
2129         struct btree *b = NULL;
2130         int ret;
2131
2132         BUG_ON(trans->restarted);
2133         EBUG_ON(iter->path->cached);
2134         bch2_btree_iter_verify(iter);
2135
2136         /* already at end? */
2137         if (!btree_path_node(path, path->level))
2138                 return NULL;
2139
2140         /* got to end? */
2141         if (!btree_path_node(path, path->level + 1)) {
2142                 btree_path_set_level_up(path);
2143                 return NULL;
2144         }
2145
2146         if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2147                 __bch2_btree_path_unlock(path);
2148                 path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2149                 path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2150                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2151                 trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
2152                                            path->btree_id, &path->pos);
2153                 btree_trans_restart(trans);
2154                 ret = -EINTR;
2155                 goto err;
2156         }
2157
2158         b = btree_path_node(path, path->level + 1);
2159
2160         if (!bpos_cmp(iter->pos, b->key.k.p)) {
2161                 btree_node_unlock(path, path->level);
2162                 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2163                 path->level++;
2164         } else {
2165                 /*
2166                  * Haven't gotten to the end of the parent node: go back down to
2167                  * the next child node
2168                  */
2169                 path = iter->path =
2170                         bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2171                                            iter->flags & BTREE_ITER_INTENT,
2172                                            btree_iter_ip_allocated(iter));
2173
2174                 btree_path_set_level_down(trans, path, iter->min_depth);
2175
2176                 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2177                 if (ret)
2178                         goto err;
2179
2180                 b = path->l[path->level].b;
2181         }
2182
2183         bkey_init(&iter->k);
2184         iter->k.p = iter->pos = b->key.k.p;
2185
2186         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2187                                         iter->flags & BTREE_ITER_INTENT,
2188                                         btree_iter_ip_allocated(iter));
2189         iter->path->should_be_locked = true;
2190         BUG_ON(iter->path->uptodate);
2191 out:
2192         bch2_btree_iter_verify_entry_exit(iter);
2193         bch2_btree_iter_verify(iter);
2194
2195         return b;
2196 err:
2197         b = ERR_PTR(ret);
2198         goto out;
2199 }
2200
2201 /* Iterate across keys (in leaf nodes only) */
2202
2203 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2204 {
2205         if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
2206                 struct bpos pos = iter->k.p;
2207                 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2208                             ? bpos_cmp(pos, SPOS_MAX)
2209                             : bkey_cmp(pos, SPOS_MAX)) != 0;
2210
2211                 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2212                         pos = bkey_successor(iter, pos);
2213                 bch2_btree_iter_set_pos(iter, pos);
2214                 return ret;
2215         } else {
2216                 if (!btree_path_node(iter->path, iter->path->level))
2217                         return true;
2218
2219                 iter->advanced = true;
2220                 return false;
2221         }
2222 }
2223
2224 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2225 {
2226         struct bpos pos = bkey_start_pos(&iter->k);
2227         bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2228                     ? bpos_cmp(pos, POS_MIN)
2229                     : bkey_cmp(pos, POS_MIN)) != 0;
2230
2231         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2232                 pos = bkey_predecessor(iter, pos);
2233         bch2_btree_iter_set_pos(iter, pos);
2234         return ret;
2235 }
2236
2237 static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
2238                                                       enum btree_id btree_id,
2239                                                       struct bpos pos)
2240 {
2241         struct btree_insert_entry *i;
2242         struct bkey_i *ret = NULL;
2243
2244         trans_for_each_update(trans, i) {
2245                 if (i->btree_id < btree_id)
2246                         continue;
2247                 if (i->btree_id > btree_id)
2248                         break;
2249                 if (bpos_cmp(i->k->k.p, pos) < 0)
2250                         continue;
2251                 if (i->key_cache_already_flushed)
2252                         continue;
2253                 if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
2254                         ret = i->k;
2255         }
2256
2257         return ret;
2258 }
2259
2260 struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2261                                        struct btree_iter *iter,
2262                                        struct bpos start_pos,
2263                                        struct bpos end_pos)
2264 {
2265         struct bkey_i *k;
2266
2267         if (bpos_cmp(start_pos, iter->journal_pos) < 0)
2268                 iter->journal_idx = 0;
2269
2270         k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
2271                                         start_pos, end_pos,
2272                                         &iter->journal_idx);
2273
2274         iter->journal_pos = k ? k->k.p : end_pos;
2275         return k;
2276 }
2277
2278 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *trans,
2279                                             struct btree_iter *iter,
2280                                             struct bpos pos)
2281 {
2282         return bch2_btree_journal_peek(trans, iter, pos, pos);
2283 }
2284
2285 static noinline
2286 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2287                                          struct btree_iter *iter,
2288                                          struct bkey_s_c k)
2289 {
2290         struct bkey_i *next_journal =
2291                 bch2_btree_journal_peek(trans, iter, iter->path->pos,
2292                                 k.k ? k.k->p : iter->path->l[0].b->key.k.p);
2293
2294         if (next_journal) {
2295                 iter->k = next_journal->k;
2296                 k = bkey_i_to_s_c(next_journal);
2297         }
2298
2299         return k;
2300 }
2301
2302 /*
2303  * Checks btree key cache for key at iter->pos and returns it if present, or
2304  * bkey_s_c_null:
2305  */
2306 static noinline
2307 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2308 {
2309         struct btree_trans *trans = iter->trans;
2310         struct bch_fs *c = trans->c;
2311         struct bkey u;
2312         int ret;
2313
2314         if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2315                 return bkey_s_c_null;
2316
2317         if (!iter->key_cache_path)
2318                 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2319                                                      iter->flags & BTREE_ITER_INTENT, 0,
2320                                                      iter->flags|BTREE_ITER_CACHED,
2321                                                      _THIS_IP_);
2322
2323         iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2324                                         iter->flags & BTREE_ITER_INTENT,
2325                                         btree_iter_ip_allocated(iter));
2326
2327         ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
2328         if (unlikely(ret))
2329                 return bkey_s_c_err(ret);
2330
2331         iter->key_cache_path->should_be_locked = true;
2332
2333         return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
2334 }
2335
2336 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2337 {
2338         struct btree_trans *trans = iter->trans;
2339         struct bkey_i *next_update;
2340         struct bkey_s_c k, k2;
2341         int ret;
2342
2343         EBUG_ON(iter->path->cached || iter->path->level);
2344         bch2_btree_iter_verify(iter);
2345
2346         while (1) {
2347                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2348                                         iter->flags & BTREE_ITER_INTENT,
2349                                         btree_iter_ip_allocated(iter));
2350
2351                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2352                 if (unlikely(ret)) {
2353                         /* ensure that iter->k is consistent with iter->pos: */
2354                         bch2_btree_iter_set_pos(iter, iter->pos);
2355                         k = bkey_s_c_err(ret);
2356                         goto out;
2357                 }
2358
2359                 iter->path->should_be_locked = true;
2360
2361                 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2362
2363                 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2364                     k.k &&
2365                     (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2366                         ret = bkey_err(k2);
2367                         if (ret) {
2368                                 k = k2;
2369                                 bch2_btree_iter_set_pos(iter, iter->pos);
2370                                 goto out;
2371                         }
2372
2373                         k = k2;
2374                         iter->k = *k.k;
2375                 }
2376
2377                 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2378                         k = btree_trans_peek_journal(trans, iter, k);
2379
2380                 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2381                         ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2382                         : NULL;
2383                 if (next_update &&
2384                     bpos_cmp(next_update->k.p,
2385                              k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2386                         iter->k = next_update->k;
2387                         k = bkey_i_to_s_c(next_update);
2388                 }
2389
2390                 if (k.k && bkey_deleted(k.k)) {
2391                         /*
2392                          * If we've got a whiteout, and it's after the search
2393                          * key, advance the search key to the whiteout instead
2394                          * of just after the whiteout - it might be a btree
2395                          * whiteout, with a real key at the same position, since
2396                          * in the btree deleted keys sort before non deleted.
2397                          */
2398                         search_key = bpos_cmp(search_key, k.k->p)
2399                                 ? k.k->p
2400                                 : bpos_successor(k.k->p);
2401                         continue;
2402                 }
2403
2404                 if (likely(k.k)) {
2405                         break;
2406                 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2407                         /* Advance to next leaf node: */
2408                         search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2409                 } else {
2410                         /* End of btree: */
2411                         bch2_btree_iter_set_pos(iter, SPOS_MAX);
2412                         k = bkey_s_c_null;
2413                         goto out;
2414                 }
2415         }
2416 out:
2417         bch2_btree_iter_verify(iter);
2418
2419         return k;
2420 }
2421
2422 /**
2423  * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2424  * current position
2425  */
2426 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2427 {
2428         struct btree_trans *trans = iter->trans;
2429         struct bpos search_key = btree_iter_search_key(iter);
2430         struct bkey_s_c k;
2431         struct bpos iter_pos;
2432         int ret;
2433
2434         EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2435
2436         if (iter->update_path) {
2437                 bch2_path_put(trans, iter->update_path,
2438                               iter->flags & BTREE_ITER_INTENT);
2439                 iter->update_path = NULL;
2440         }
2441
2442         bch2_btree_iter_verify_entry_exit(iter);
2443
2444         while (1) {
2445                 k = __bch2_btree_iter_peek(iter, search_key);
2446                 if (!k.k || bkey_err(k))
2447                         goto out;
2448
2449                 /*
2450                  * iter->pos should be mononotically increasing, and always be
2451                  * equal to the key we just returned - except extents can
2452                  * straddle iter->pos:
2453                  */
2454                 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2455                         iter_pos = k.k->p;
2456                 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2457                         iter_pos = bkey_start_pos(k.k);
2458                 else
2459                         iter_pos = iter->pos;
2460
2461                 if (bkey_cmp(iter_pos, end) > 0) {
2462                         bch2_btree_iter_set_pos(iter, end);
2463                         k = bkey_s_c_null;
2464                         goto out;
2465                 }
2466
2467                 if (iter->update_path &&
2468                     bkey_cmp(iter->update_path->pos, k.k->p)) {
2469                         bch2_path_put(trans, iter->update_path,
2470                                       iter->flags & BTREE_ITER_INTENT);
2471                         iter->update_path = NULL;
2472                 }
2473
2474                 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2475                     (iter->flags & BTREE_ITER_INTENT) &&
2476                     !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2477                     !iter->update_path) {
2478                         struct bpos pos = k.k->p;
2479
2480                         if (pos.snapshot < iter->snapshot) {
2481                                 search_key = bpos_successor(k.k->p);
2482                                 continue;
2483                         }
2484
2485                         pos.snapshot = iter->snapshot;
2486
2487                         /*
2488                          * advance, same as on exit for iter->path, but only up
2489                          * to snapshot
2490                          */
2491                         __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2492                         iter->update_path = iter->path;
2493
2494                         iter->update_path = bch2_btree_path_set_pos(trans,
2495                                                 iter->update_path, pos,
2496                                                 iter->flags & BTREE_ITER_INTENT,
2497                                                 _THIS_IP_);
2498                 }
2499
2500                 /*
2501                  * We can never have a key in a leaf node at POS_MAX, so
2502                  * we don't have to check these successor() calls:
2503                  */
2504                 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2505                     !bch2_snapshot_is_ancestor(trans->c,
2506                                                iter->snapshot,
2507                                                k.k->p.snapshot)) {
2508                         search_key = bpos_successor(k.k->p);
2509                         continue;
2510                 }
2511
2512                 if (bkey_whiteout(k.k) &&
2513                     !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2514                         search_key = bkey_successor(iter, k.k->p);
2515                         continue;
2516                 }
2517
2518                 break;
2519         }
2520
2521         iter->pos = iter_pos;
2522
2523         iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2524                                 iter->flags & BTREE_ITER_INTENT,
2525                                 btree_iter_ip_allocated(iter));
2526         BUG_ON(!iter->path->nodes_locked);
2527 out:
2528         if (iter->update_path) {
2529                 if (iter->update_path->uptodate &&
2530                     !bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)) {
2531                         k = bkey_s_c_err(-EINTR);
2532                 } else {
2533                         BUG_ON(!(iter->update_path->nodes_locked & 1));
2534                         iter->update_path->should_be_locked = true;
2535                 }
2536         }
2537         iter->path->should_be_locked = true;
2538
2539         if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2540                 iter->pos.snapshot = iter->snapshot;
2541
2542         ret = bch2_btree_iter_verify_ret(iter, k);
2543         if (unlikely(ret)) {
2544                 bch2_btree_iter_set_pos(iter, iter->pos);
2545                 k = bkey_s_c_err(ret);
2546         }
2547
2548         bch2_btree_iter_verify_entry_exit(iter);
2549
2550         return k;
2551 }
2552
2553 /**
2554  * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2555  * to iterator's current position, returning keys from every level of the btree.
2556  * For keys at different levels of the btree that compare equal, the key from
2557  * the lower level (leaf) is returned first.
2558  */
2559 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2560 {
2561         struct btree_trans *trans = iter->trans;
2562         struct bkey_s_c k;
2563         int ret;
2564
2565         EBUG_ON(iter->path->cached);
2566         bch2_btree_iter_verify(iter);
2567         BUG_ON(iter->path->level < iter->min_depth);
2568         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2569         EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2570
2571         while (1) {
2572                 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2573                                         iter->flags & BTREE_ITER_INTENT,
2574                                         btree_iter_ip_allocated(iter));
2575
2576                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2577                 if (unlikely(ret)) {
2578                         /* ensure that iter->k is consistent with iter->pos: */
2579                         bch2_btree_iter_set_pos(iter, iter->pos);
2580                         k = bkey_s_c_err(ret);
2581                         goto out;
2582                 }
2583
2584                 /* Already at end? */
2585                 if (!btree_path_node(iter->path, iter->path->level)) {
2586                         k = bkey_s_c_null;
2587                         goto out;
2588                 }
2589
2590                 k = btree_path_level_peek_all(trans->c,
2591                                 &iter->path->l[iter->path->level], &iter->k);
2592
2593                 /* Check if we should go up to the parent node: */
2594                 if (!k.k ||
2595                     (iter->advanced &&
2596                      !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
2597                         iter->pos = path_l(iter->path)->b->key.k.p;
2598                         btree_path_set_level_up(iter->path);
2599                         iter->advanced = false;
2600                         continue;
2601                 }
2602
2603                 /*
2604                  * Check if we should go back down to a leaf:
2605                  * If we're not in a leaf node, we only return the current key
2606                  * if it exactly matches iter->pos - otherwise we first have to
2607                  * go back to the leaf:
2608                  */
2609                 if (iter->path->level != iter->min_depth &&
2610                     (iter->advanced ||
2611                      !k.k ||
2612                      bpos_cmp(iter->pos, k.k->p))) {
2613                         btree_path_set_level_down(trans, iter->path, iter->min_depth);
2614                         iter->pos = bpos_successor(iter->pos);
2615                         iter->advanced = false;
2616                         continue;
2617                 }
2618
2619                 /* Check if we should go to the next key: */
2620                 if (iter->path->level == iter->min_depth &&
2621                     iter->advanced &&
2622                     k.k &&
2623                     !bpos_cmp(iter->pos, k.k->p)) {
2624                         iter->pos = bpos_successor(iter->pos);
2625                         iter->advanced = false;
2626                         continue;
2627                 }
2628
2629                 if (iter->advanced &&
2630                     iter->path->level == iter->min_depth &&
2631                     bpos_cmp(k.k->p, iter->pos))
2632                         iter->advanced = false;
2633
2634                 BUG_ON(iter->advanced);
2635                 BUG_ON(!k.k);
2636                 break;
2637         }
2638
2639         iter->pos = k.k->p;
2640 out:
2641         iter->path->should_be_locked = true;
2642         bch2_btree_iter_verify(iter);
2643
2644         return k;
2645 }
2646
2647 /**
2648  * bch2_btree_iter_next: returns first key greater than iterator's current
2649  * position
2650  */
2651 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2652 {
2653         if (!bch2_btree_iter_advance(iter))
2654                 return bkey_s_c_null;
2655
2656         return bch2_btree_iter_peek(iter);
2657 }
2658
2659 /**
2660  * bch2_btree_iter_peek_prev: returns first key less than or equal to
2661  * iterator's current position
2662  */
2663 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2664 {
2665         struct btree_trans *trans = iter->trans;
2666         struct bpos search_key = iter->pos;
2667         struct btree_path *saved_path = NULL;
2668         struct bkey_s_c k;
2669         struct bkey saved_k;
2670         const struct bch_val *saved_v;
2671         int ret;
2672
2673         EBUG_ON(iter->path->cached || iter->path->level);
2674         EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2675
2676         if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2677                 return bkey_s_c_err(-EIO);
2678
2679         bch2_btree_iter_verify(iter);
2680         bch2_btree_iter_verify_entry_exit(iter);
2681
2682         if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2683                 search_key.snapshot = U32_MAX;
2684
2685         while (1) {
2686                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2687                                                 iter->flags & BTREE_ITER_INTENT,
2688                                                 btree_iter_ip_allocated(iter));
2689
2690                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2691                 if (unlikely(ret)) {
2692                         /* ensure that iter->k is consistent with iter->pos: */
2693                         bch2_btree_iter_set_pos(iter, iter->pos);
2694                         k = bkey_s_c_err(ret);
2695                         goto out;
2696                 }
2697
2698                 k = btree_path_level_peek(trans->c, iter->path,
2699                                           &iter->path->l[0], &iter->k);
2700                 if (!k.k ||
2701                     ((iter->flags & BTREE_ITER_IS_EXTENTS)
2702                      ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2703                      : bpos_cmp(k.k->p, search_key) > 0))
2704                         k = btree_path_level_prev(trans->c, iter->path,
2705                                                   &iter->path->l[0], &iter->k);
2706
2707                 bch2_btree_path_check_sort(trans, iter->path, 0);
2708
2709                 if (likely(k.k)) {
2710                         if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2711                                 if (k.k->p.snapshot == iter->snapshot)
2712                                         goto got_key;
2713
2714                                 /*
2715                                  * If we have a saved candidate, and we're no
2716                                  * longer at the same _key_ (not pos), return
2717                                  * that candidate
2718                                  */
2719                                 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2720                                         bch2_path_put(trans, iter->path,
2721                                                       iter->flags & BTREE_ITER_INTENT);
2722                                         iter->path = saved_path;
2723                                         saved_path = NULL;
2724                                         iter->k = saved_k;
2725                                         k.v     = saved_v;
2726                                         goto got_key;
2727                                 }
2728
2729                                 if (bch2_snapshot_is_ancestor(iter->trans->c,
2730                                                               iter->snapshot,
2731                                                               k.k->p.snapshot)) {
2732                                         if (saved_path)
2733                                                 bch2_path_put(trans, saved_path,
2734                                                       iter->flags & BTREE_ITER_INTENT);
2735                                         saved_path = btree_path_clone(trans, iter->path,
2736                                                                 iter->flags & BTREE_ITER_INTENT);
2737                                         saved_k = *k.k;
2738                                         saved_v = k.v;
2739                                 }
2740
2741                                 search_key = bpos_predecessor(k.k->p);
2742                                 continue;
2743                         }
2744 got_key:
2745                         if (bkey_whiteout(k.k) &&
2746                             !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2747                                 search_key = bkey_predecessor(iter, k.k->p);
2748                                 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2749                                         search_key.snapshot = U32_MAX;
2750                                 continue;
2751                         }
2752
2753                         break;
2754                 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2755                         /* Advance to previous leaf node: */
2756                         search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2757                 } else {
2758                         /* Start of btree: */
2759                         bch2_btree_iter_set_pos(iter, POS_MIN);
2760                         k = bkey_s_c_null;
2761                         goto out;
2762                 }
2763         }
2764
2765         EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2766
2767         /* Extents can straddle iter->pos: */
2768         if (bkey_cmp(k.k->p, iter->pos) < 0)
2769                 iter->pos = k.k->p;
2770
2771         if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2772                 iter->pos.snapshot = iter->snapshot;
2773 out:
2774         if (saved_path)
2775                 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2776         iter->path->should_be_locked = true;
2777
2778         bch2_btree_iter_verify_entry_exit(iter);
2779         bch2_btree_iter_verify(iter);
2780
2781         return k;
2782 }
2783
2784 /**
2785  * bch2_btree_iter_prev: returns first key less than iterator's current
2786  * position
2787  */
2788 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2789 {
2790         if (!bch2_btree_iter_rewind(iter))
2791                 return bkey_s_c_null;
2792
2793         return bch2_btree_iter_peek_prev(iter);
2794 }
2795
2796 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2797 {
2798         struct btree_trans *trans = iter->trans;
2799         struct bpos search_key;
2800         struct bkey_s_c k;
2801         int ret;
2802
2803         bch2_btree_iter_verify(iter);
2804         bch2_btree_iter_verify_entry_exit(iter);
2805         EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2806         EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2807
2808         /* extents can't span inode numbers: */
2809         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2810             unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2811                 if (iter->pos.inode == KEY_INODE_MAX)
2812                         return bkey_s_c_null;
2813
2814                 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2815         }
2816
2817         search_key = btree_iter_search_key(iter);
2818         iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2819                                         iter->flags & BTREE_ITER_INTENT,
2820                                         btree_iter_ip_allocated(iter));
2821
2822         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2823         if (unlikely(ret))
2824                 return bkey_s_c_err(ret);
2825
2826         if ((iter->flags & BTREE_ITER_CACHED) ||
2827             !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2828                 struct bkey_i *next_update;
2829
2830                 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2831                     (next_update = btree_trans_peek_updates(trans,
2832                                                 iter->btree_id, search_key)) &&
2833                     !bpos_cmp(next_update->k.p, iter->pos)) {
2834                         iter->k = next_update->k;
2835                         k = bkey_i_to_s_c(next_update);
2836                         goto out;
2837                 }
2838
2839                 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2840                     (next_update = bch2_btree_journal_peek_slot(trans,
2841                                         iter, iter->pos))) {
2842                         iter->k = next_update->k;
2843                         k = bkey_i_to_s_c(next_update);
2844                         goto out;
2845                 }
2846
2847                 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2848                     (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2849                         if (!bkey_err(k))
2850                                 iter->k = *k.k;
2851                         goto out;
2852                 }
2853
2854                 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2855         } else {
2856                 struct bpos next;
2857
2858                 EBUG_ON(iter->path->level);
2859
2860                 if (iter->flags & BTREE_ITER_INTENT) {
2861                         struct btree_iter iter2;
2862                         struct bpos end = iter->pos;
2863
2864                         if (iter->flags & BTREE_ITER_IS_EXTENTS)
2865                                 end.offset = U64_MAX;
2866
2867                         bch2_trans_copy_iter(&iter2, iter);
2868                         k = bch2_btree_iter_peek_upto(&iter2, end);
2869
2870                         if (k.k && !bkey_err(k)) {
2871                                 iter->k = iter2.k;
2872                                 k.k = &iter->k;
2873                         }
2874                         bch2_trans_iter_exit(trans, &iter2);
2875                 } else {
2876                         struct bpos pos = iter->pos;
2877
2878                         k = bch2_btree_iter_peek(iter);
2879                         iter->pos = pos;
2880                 }
2881
2882                 if (unlikely(bkey_err(k)))
2883                         return k;
2884
2885                 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2886
2887                 if (bkey_cmp(iter->pos, next) < 0) {
2888                         bkey_init(&iter->k);
2889                         iter->k.p = iter->pos;
2890
2891                         if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2892                                 bch2_key_resize(&iter->k,
2893                                                 min_t(u64, KEY_SIZE_MAX,
2894                                                       (next.inode == iter->pos.inode
2895                                                        ? next.offset
2896                                                        : KEY_OFFSET_MAX) -
2897                                                       iter->pos.offset));
2898                                 EBUG_ON(!iter->k.size);
2899                         }
2900
2901                         k = (struct bkey_s_c) { &iter->k, NULL };
2902                 }
2903         }
2904 out:
2905         iter->path->should_be_locked = true;
2906
2907         bch2_btree_iter_verify_entry_exit(iter);
2908         bch2_btree_iter_verify(iter);
2909         ret = bch2_btree_iter_verify_ret(iter, k);
2910         if (unlikely(ret))
2911                 return bkey_s_c_err(ret);
2912
2913         return k;
2914 }
2915
2916 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2917 {
2918         if (!bch2_btree_iter_advance(iter))
2919                 return bkey_s_c_null;
2920
2921         return bch2_btree_iter_peek_slot(iter);
2922 }
2923
2924 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2925 {
2926         if (!bch2_btree_iter_rewind(iter))
2927                 return bkey_s_c_null;
2928
2929         return bch2_btree_iter_peek_slot(iter);
2930 }
2931
2932 /* new transactional stuff: */
2933
2934 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2935                                                 struct btree_path *path)
2936 {
2937         EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2938         EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2939         EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2940 }
2941
2942 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2943 {
2944 #ifdef CONFIG_BCACHEFS_DEBUG
2945         unsigned i;
2946
2947         for (i = 0; i < trans->nr_sorted; i++)
2948                 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2949 #endif
2950 }
2951
2952 static void btree_trans_verify_sorted(struct btree_trans *trans)
2953 {
2954 #ifdef CONFIG_BCACHEFS_DEBUG
2955         struct btree_path *path, *prev = NULL;
2956         unsigned i;
2957
2958         if (!bch2_debug_check_iterators)
2959                 return;
2960
2961         trans_for_each_path_inorder(trans, path, i) {
2962                 if (prev && btree_path_cmp(prev, path) > 0) {
2963                         bch2_dump_trans_paths_updates(trans);
2964                         panic("trans paths out of order!\n");
2965                 }
2966                 prev = path;
2967         }
2968 #endif
2969 }
2970
2971 static inline void btree_path_swap(struct btree_trans *trans,
2972                                    struct btree_path *l, struct btree_path *r)
2973 {
2974         swap(l->sorted_idx, r->sorted_idx);
2975         swap(trans->sorted[l->sorted_idx],
2976              trans->sorted[r->sorted_idx]);
2977
2978         btree_path_verify_sorted_ref(trans, l);
2979         btree_path_verify_sorted_ref(trans, r);
2980 }
2981
2982 inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2983                                        int cmp)
2984 {
2985         struct btree_path *n;
2986
2987         if (cmp <= 0) {
2988                 n = prev_btree_path(trans, path);
2989                 if (n && btree_path_cmp(n, path) > 0) {
2990                         do {
2991                                 btree_path_swap(trans, n, path);
2992                                 n = prev_btree_path(trans, path);
2993                         } while (n && btree_path_cmp(n, path) > 0);
2994
2995                         goto out;
2996                 }
2997         }
2998
2999         if (cmp >= 0) {
3000                 n = next_btree_path(trans, path);
3001                 if (n && btree_path_cmp(path, n) > 0) {
3002                         do {
3003                                 btree_path_swap(trans, path, n);
3004                                 n = next_btree_path(trans, path);
3005                         } while (n && btree_path_cmp(path, n) > 0);
3006                 }
3007         }
3008 out:
3009         btree_trans_verify_sorted(trans);
3010 }
3011
3012 static inline void btree_path_list_remove(struct btree_trans *trans,
3013                                           struct btree_path *path)
3014 {
3015         unsigned i;
3016
3017         EBUG_ON(path->sorted_idx >= trans->nr_sorted);
3018
3019         array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
3020
3021         for (i = path->sorted_idx; i < trans->nr_sorted; i++)
3022                 trans->paths[trans->sorted[i]].sorted_idx = i;
3023
3024         path->sorted_idx = U8_MAX;
3025
3026         btree_trans_verify_sorted_refs(trans);
3027 }
3028
3029 static inline void btree_path_list_add(struct btree_trans *trans,
3030                                        struct btree_path *pos,
3031                                        struct btree_path *path)
3032 {
3033         unsigned i;
3034
3035         btree_trans_verify_sorted_refs(trans);
3036
3037         path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
3038
3039         if (trans->in_traverse_all &&
3040             trans->traverse_all_idx != U8_MAX &&
3041             trans->traverse_all_idx >= path->sorted_idx)
3042                 trans->traverse_all_idx++;
3043
3044         array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
3045
3046         for (i = path->sorted_idx; i < trans->nr_sorted; i++)
3047                 trans->paths[trans->sorted[i]].sorted_idx = i;
3048
3049         btree_trans_verify_sorted_refs(trans);
3050 }
3051
3052 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3053 {
3054         if (iter->path)
3055                 bch2_path_put(trans, iter->path,
3056                               iter->flags & BTREE_ITER_INTENT);
3057         if (iter->update_path)
3058                 bch2_path_put(trans, iter->update_path,
3059                               iter->flags & BTREE_ITER_INTENT);
3060         if (iter->key_cache_path)
3061                 bch2_path_put(trans, iter->key_cache_path,
3062                               iter->flags & BTREE_ITER_INTENT);
3063         iter->path = NULL;
3064         iter->update_path = NULL;
3065         iter->key_cache_path = NULL;
3066 }
3067
3068 static void __bch2_trans_iter_init(struct btree_trans *trans,
3069                                    struct btree_iter *iter,
3070                                    unsigned btree_id, struct bpos pos,
3071                                    unsigned locks_want,
3072                                    unsigned depth,
3073                                    unsigned flags,
3074                                    unsigned long ip)
3075 {
3076         EBUG_ON(trans->restarted);
3077
3078         if (flags & BTREE_ITER_ALL_LEVELS)
3079                 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
3080
3081         if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
3082             btree_node_type_is_extents(btree_id))
3083                 flags |= BTREE_ITER_IS_EXTENTS;
3084
3085         if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
3086             !btree_type_has_snapshots(btree_id))
3087                 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
3088
3089         if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
3090             btree_type_has_snapshots(btree_id))
3091                 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
3092
3093         if (!test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags))
3094                 flags |= BTREE_ITER_WITH_JOURNAL;
3095
3096         iter->trans     = trans;
3097         iter->path      = NULL;
3098         iter->update_path = NULL;
3099         iter->key_cache_path = NULL;
3100         iter->btree_id  = btree_id;
3101         iter->min_depth = depth;
3102         iter->flags     = flags;
3103         iter->snapshot  = pos.snapshot;
3104         iter->pos       = pos;
3105         iter->k.type    = KEY_TYPE_deleted;
3106         iter->k.p       = pos;
3107         iter->k.size    = 0;
3108         iter->journal_idx = 0;
3109         iter->journal_pos = POS_MIN;
3110 #ifdef CONFIG_BCACHEFS_DEBUG
3111         iter->ip_allocated = ip;
3112 #endif
3113
3114         iter->path = bch2_path_get(trans, btree_id, iter->pos,
3115                                    locks_want, depth, flags, ip);
3116 }
3117
3118 void bch2_trans_iter_init(struct btree_trans *trans,
3119                           struct btree_iter *iter,
3120                           unsigned btree_id, struct bpos pos,
3121                           unsigned flags)
3122 {
3123         if (!btree_id_cached(trans->c, btree_id)) {
3124                 flags &= ~BTREE_ITER_CACHED;
3125                 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
3126         } else if (!(flags & BTREE_ITER_CACHED))
3127                 flags |= BTREE_ITER_WITH_KEY_CACHE;
3128
3129         __bch2_trans_iter_init(trans, iter, btree_id, pos,
3130                                0, 0, flags, _RET_IP_);
3131 }
3132
3133 void bch2_trans_node_iter_init(struct btree_trans *trans,
3134                                struct btree_iter *iter,
3135                                enum btree_id btree_id,
3136                                struct bpos pos,
3137                                unsigned locks_want,
3138                                unsigned depth,
3139                                unsigned flags)
3140 {
3141         __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
3142                                BTREE_ITER_NOT_EXTENTS|
3143                                __BTREE_ITER_ALL_SNAPSHOTS|
3144                                BTREE_ITER_ALL_SNAPSHOTS|
3145                                flags, _RET_IP_);
3146         BUG_ON(iter->path->locks_want    < min(locks_want, BTREE_MAX_DEPTH));
3147         BUG_ON(iter->path->level        != depth);
3148         BUG_ON(iter->min_depth          != depth);
3149 }
3150
3151 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
3152 {
3153         *dst = *src;
3154         if (src->path)
3155                 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
3156         if (src->update_path)
3157                 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
3158         dst->key_cache_path = NULL;
3159 }
3160
3161 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3162 {
3163         size_t new_top = trans->mem_top + size;
3164         void *p;
3165
3166         if (new_top > trans->mem_bytes) {
3167                 size_t old_bytes = trans->mem_bytes;
3168                 size_t new_bytes = roundup_pow_of_two(new_top);
3169                 void *new_mem;
3170
3171                 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3172
3173                 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
3174                 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3175                         new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
3176                         new_bytes = BTREE_TRANS_MEM_MAX;
3177                         kfree(trans->mem);
3178                 }
3179
3180                 if (!new_mem)
3181                         return ERR_PTR(-ENOMEM);
3182
3183                 trans->mem = new_mem;
3184                 trans->mem_bytes = new_bytes;
3185
3186                 if (old_bytes) {
3187                         trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
3188                         btree_trans_restart(trans);
3189                         return ERR_PTR(-EINTR);
3190                 }
3191         }
3192
3193         p = trans->mem + trans->mem_top;
3194         trans->mem_top += size;
3195         memset(p, 0, size);
3196         return p;
3197 }
3198
3199 /**
3200  * bch2_trans_begin() - reset a transaction after a interrupted attempt
3201  * @trans: transaction to reset
3202  *
3203  * While iterating over nodes or updating nodes a attempt to lock a btree
3204  * node may return EINTR when the trylock fails. When this occurs
3205  * bch2_trans_begin() should be called and the transaction retried.
3206  */
3207 void bch2_trans_begin(struct btree_trans *trans)
3208 {
3209         struct btree_path *path;
3210
3211         bch2_trans_reset_updates(trans);
3212
3213         trans->mem_top                  = 0;
3214
3215         if (trans->fs_usage_deltas) {
3216                 trans->fs_usage_deltas->used = 0;
3217                 memset((void *) trans->fs_usage_deltas +
3218                        offsetof(struct replicas_delta_list, memset_start), 0,
3219                        (void *) &trans->fs_usage_deltas->memset_end -
3220                        (void *) &trans->fs_usage_deltas->memset_start);
3221         }
3222
3223         trans_for_each_path(trans, path) {
3224                 path->should_be_locked = false;
3225
3226                 /*
3227                  * If the transaction wasn't restarted, we're presuming to be
3228                  * doing something new: dont keep iterators excpt the ones that
3229                  * are in use - except for the subvolumes btree:
3230                  */
3231                 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3232                         path->preserve = false;
3233
3234                 /*
3235                  * XXX: we probably shouldn't be doing this if the transaction
3236                  * was restarted, but currently we still overflow transaction
3237                  * iterators if we do that
3238                  */
3239                 if (!path->ref && !path->preserve)
3240                         __bch2_path_free(trans, path);
3241                 else
3242                         path->preserve = false;
3243         }
3244
3245         if (!trans->restarted &&
3246             (need_resched() ||
3247              ktime_get_ns() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
3248                 bch2_trans_unlock(trans);
3249                 cond_resched();
3250                 bch2_trans_relock(trans);
3251         }
3252
3253         if (trans->restarted)
3254                 bch2_btree_path_traverse_all(trans);
3255
3256         trans->restarted = false;
3257         trans->last_begin_time = ktime_get_ns();
3258 }
3259
3260 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
3261 {
3262         size_t paths_bytes      = sizeof(struct btree_path) * BTREE_ITER_MAX;
3263         size_t updates_bytes    = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
3264         void *p = NULL;
3265
3266         BUG_ON(trans->used_mempool);
3267
3268 #ifdef __KERNEL__
3269         p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
3270 #endif
3271         if (!p)
3272                 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
3273
3274         trans->paths            = p; p += paths_bytes;
3275         trans->updates          = p; p += updates_bytes;
3276 }
3277
3278 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
3279                        unsigned expected_nr_iters,
3280                        size_t expected_mem_bytes,
3281                        const char *fn)
3282         __acquires(&c->btree_trans_barrier)
3283 {
3284         struct btree_trans *pos;
3285
3286         BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
3287
3288         memset(trans, 0, sizeof(*trans));
3289         trans->c                = c;
3290         trans->fn               = fn;
3291         trans->last_begin_time  = ktime_get_ns();
3292         trans->task             = current;
3293
3294         bch2_trans_alloc_paths(trans, c);
3295
3296         if (expected_mem_bytes) {
3297                 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
3298                 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
3299
3300                 if (!unlikely(trans->mem)) {
3301                         trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3302                         trans->mem_bytes = BTREE_TRANS_MEM_MAX;
3303                 }
3304         }
3305
3306         trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3307
3308         mutex_lock(&c->btree_trans_lock);
3309         list_for_each_entry(pos, &c->btree_trans_list, list) {
3310                 if (trans->task->pid < pos->task->pid) {
3311                         list_add_tail(&trans->list, &pos->list);
3312                         goto list_add_done;
3313                 }
3314         }
3315         list_add_tail(&trans->list, &c->btree_trans_list);
3316 list_add_done:
3317         mutex_unlock(&c->btree_trans_lock);
3318 }
3319
3320 static void check_btree_paths_leaked(struct btree_trans *trans)
3321 {
3322 #ifdef CONFIG_BCACHEFS_DEBUG
3323         struct bch_fs *c = trans->c;
3324         struct btree_path *path;
3325
3326         trans_for_each_path(trans, path)
3327                 if (path->ref)
3328                         goto leaked;
3329         return;
3330 leaked:
3331         bch_err(c, "btree paths leaked from %s!", trans->fn);
3332         trans_for_each_path(trans, path)
3333                 if (path->ref)
3334                         printk(KERN_ERR "  btree %s %pS\n",
3335                                bch2_btree_ids[path->btree_id],
3336                                (void *) path->ip_allocated);
3337         /* Be noisy about this: */
3338         bch2_fatal_error(c);
3339 #endif
3340 }
3341
3342 void bch2_trans_exit(struct btree_trans *trans)
3343         __releases(&c->btree_trans_barrier)
3344 {
3345         struct btree_insert_entry *i;
3346         struct bch_fs *c = trans->c;
3347
3348         bch2_trans_unlock(trans);
3349
3350         trans_for_each_update(trans, i)
3351                 __btree_path_put(i->path, true);
3352         trans->nr_updates               = 0;
3353
3354         check_btree_paths_leaked(trans);
3355
3356         mutex_lock(&c->btree_trans_lock);
3357         list_del(&trans->list);
3358         mutex_unlock(&c->btree_trans_lock);
3359
3360         srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3361
3362         bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3363
3364         kfree(trans->extra_journal_entries.data);
3365
3366         if (trans->fs_usage_deltas) {
3367                 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3368                     REPLICAS_DELTA_LIST_MAX)
3369                         mempool_free(trans->fs_usage_deltas,
3370                                      &c->replicas_delta_pool);
3371                 else
3372                         kfree(trans->fs_usage_deltas);
3373         }
3374
3375         if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3376                 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3377         else
3378                 kfree(trans->mem);
3379
3380 #ifdef __KERNEL__
3381         /*
3382          * Userspace doesn't have a real percpu implementation:
3383          */
3384         trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3385 #endif
3386
3387         if (trans->paths)
3388                 mempool_free(trans->paths, &c->btree_paths_pool);
3389
3390         trans->mem      = (void *) 0x1;
3391         trans->paths    = (void *) 0x1;
3392 }
3393
3394 static void __maybe_unused
3395 bch2_btree_path_node_to_text(struct printbuf *out,
3396                              struct btree_bkey_cached_common *_b,
3397                              bool cached)
3398 {
3399         prt_printf(out, "    l=%u %s:",
3400                _b->level, bch2_btree_ids[_b->btree_id]);
3401         bch2_bpos_to_text(out, btree_node_pos(_b, cached));
3402 }
3403
3404 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3405 {
3406         struct btree_path *path;
3407         struct btree *b;
3408         static char lock_types[] = { 'r', 'i', 'w' };
3409         unsigned l;
3410
3411         prt_printf(out, "%i %s\n", trans->task->pid, trans->fn);
3412
3413         trans_for_each_path(trans, path) {
3414                 if (!path->nodes_locked)
3415                         continue;
3416
3417                 prt_printf(out, "  path %u %c l=%u %s:",
3418                        path->idx,
3419                        path->cached ? 'c' : 'b',
3420                        path->level,
3421                        bch2_btree_ids[path->btree_id]);
3422                 bch2_bpos_to_text(out, path->pos);
3423                 prt_printf(out, "\n");
3424
3425                 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3426                         if (btree_node_locked(path, l)) {
3427                                 prt_printf(out, "    %s l=%u ",
3428                                        btree_node_intent_locked(path, l) ? "i" : "r", l);
3429                                 bch2_btree_path_node_to_text(out,
3430                                                 (void *) path->l[l].b,
3431                                                 path->cached);
3432                                 prt_printf(out, "\n");
3433                         }
3434                 }
3435         }
3436
3437         b = READ_ONCE(trans->locking);
3438         if (b) {
3439                 path = &trans->paths[trans->locking_path_idx];
3440                 prt_printf(out, "  locking path %u %c l=%u %c %s:",
3441                        trans->locking_path_idx,
3442                        path->cached ? 'c' : 'b',
3443                        trans->locking_level,
3444                        lock_types[trans->locking_lock_type],
3445                        bch2_btree_ids[trans->locking_btree_id]);
3446                 bch2_bpos_to_text(out, trans->locking_pos);
3447
3448                 prt_printf(out, " node ");
3449                 bch2_btree_path_node_to_text(out,
3450                                 (void *) b, path->cached);
3451                 prt_printf(out, "\n");
3452         }
3453 }
3454
3455 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3456 {
3457         if (c->btree_trans_barrier_initialized)
3458                 cleanup_srcu_struct(&c->btree_trans_barrier);
3459         mempool_exit(&c->btree_trans_mem_pool);
3460         mempool_exit(&c->btree_paths_pool);
3461 }
3462
3463 int bch2_fs_btree_iter_init(struct bch_fs *c)
3464 {
3465         unsigned nr = BTREE_ITER_MAX;
3466         int ret;
3467
3468         INIT_LIST_HEAD(&c->btree_trans_list);
3469         mutex_init(&c->btree_trans_lock);
3470
3471         ret   = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3472                         sizeof(struct btree_path) * nr +
3473                         sizeof(struct btree_insert_entry) * nr) ?:
3474                 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3475                                           BTREE_TRANS_MEM_MAX) ?:
3476                 init_srcu_struct(&c->btree_trans_barrier);
3477         if (!ret)
3478                 c->btree_trans_barrier_initialized = true;
3479         return ret;
3480 }