]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.c
Update bcachefs sources to 49c34dadca bcachefs: Switch bch2_btree_delete_range()...
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "debug.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "journal.h"
15 #include "recovery.h"
16 #include "replicas.h"
17 #include "subvolume.h"
18
19 #include <linux/prandom.h>
20 #include <linux/prefetch.h>
21 #include <trace/events/bcachefs.h>
22
23 static void btree_trans_verify_sorted(struct btree_trans *);
24 inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
25
26 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
27 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
28                                        struct btree_path *);
29
30 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
31 {
32 #ifdef CONFIG_BCACHEFS_DEBUG
33         return iter->ip_allocated;
34 #else
35         return 0;
36 #endif
37 }
38
39 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
40
41 /*
42  * Unlocks before scheduling
43  * Note: does not revalidate iterator
44  */
45 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
46 {
47         if (need_resched() || race_fault()) {
48                 bch2_trans_unlock(trans);
49                 schedule();
50                 return bch2_trans_relock(trans);
51         } else {
52                 return 0;
53         }
54 }
55
56 static inline int __btree_path_cmp(const struct btree_path *l,
57                                    enum btree_id        r_btree_id,
58                                    bool                 r_cached,
59                                    struct bpos          r_pos,
60                                    unsigned             r_level)
61 {
62         /*
63          * Must match lock ordering as defined by __bch2_btree_node_lock:
64          */
65         return   cmp_int(l->btree_id,   r_btree_id) ?:
66                  cmp_int((int) l->cached,       (int) r_cached) ?:
67                  bpos_cmp(l->pos,       r_pos) ?:
68                 -cmp_int(l->level,      r_level);
69 }
70
71 static inline int btree_path_cmp(const struct btree_path *l,
72                                  const struct btree_path *r)
73 {
74         return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
75 }
76
77 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
78 {
79         /* Are we iterating over keys in all snapshots? */
80         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
81                 p = bpos_successor(p);
82         } else {
83                 p = bpos_nosnap_successor(p);
84                 p.snapshot = iter->snapshot;
85         }
86
87         return p;
88 }
89
90 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
91 {
92         /* Are we iterating over keys in all snapshots? */
93         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
94                 p = bpos_predecessor(p);
95         } else {
96                 p = bpos_nosnap_predecessor(p);
97                 p.snapshot = iter->snapshot;
98         }
99
100         return p;
101 }
102
103 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
104 {
105         struct bpos pos = iter->pos;
106
107         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
108             bkey_cmp(pos, POS_MAX))
109                 pos = bkey_successor(iter, pos);
110         return pos;
111 }
112
113 static inline bool btree_path_pos_before_node(struct btree_path *path,
114                                               struct btree *b)
115 {
116         return bpos_cmp(path->pos, b->data->min_key) < 0;
117 }
118
119 static inline bool btree_path_pos_after_node(struct btree_path *path,
120                                              struct btree *b)
121 {
122         return bpos_cmp(b->key.k.p, path->pos) < 0;
123 }
124
125 static inline bool btree_path_pos_in_node(struct btree_path *path,
126                                           struct btree *b)
127 {
128         return path->btree_id == b->c.btree_id &&
129                 !btree_path_pos_before_node(path, b) &&
130                 !btree_path_pos_after_node(path, b);
131 }
132
133 /* Btree node locking: */
134
135 void bch2_btree_node_unlock_write(struct btree_trans *trans,
136                         struct btree_path *path, struct btree *b)
137 {
138         bch2_btree_node_unlock_write_inlined(trans, path, b);
139 }
140
141 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
142                                                   struct btree_path *skip,
143                                                   struct btree *b,
144                                                   unsigned level)
145 {
146         struct btree_path *path;
147         struct six_lock_count ret = { 0, 0 };
148
149         if (IS_ERR_OR_NULL(b))
150                 return ret;
151
152         trans_for_each_path(trans, path)
153                 if (path != skip && path->l[level].b == b) {
154                         ret.read += btree_node_read_locked(path, level);
155                         ret.intent += btree_node_intent_locked(path, level);
156                 }
157
158         return ret;
159 }
160
161 static inline void six_lock_readers_add(struct six_lock *lock, int nr)
162 {
163         if (!lock->readers)
164                 atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
165         else
166                 this_cpu_add(*lock->readers, nr);
167 }
168
169 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
170 {
171         int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).read;
172
173         /*
174          * Must drop our read locks before calling six_lock_write() -
175          * six_unlock() won't do wakeups until the reader count
176          * goes to 0, and it's safe because we have the node intent
177          * locked:
178          */
179         six_lock_readers_add(&b->c.lock, -readers);
180         six_lock_write(&b->c.lock, NULL, NULL);
181         six_lock_readers_add(&b->c.lock, readers);
182 }
183
184 bool __bch2_btree_node_relock(struct btree_trans *trans,
185                               struct btree_path *path, unsigned level)
186 {
187         struct btree *b = btree_path_node(path, level);
188         int want = __btree_lock_want(path, level);
189
190         if (!is_btree_node(path, level))
191                 goto fail;
192
193         if (race_fault())
194                 goto fail;
195
196         if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
197             (btree_node_lock_seq_matches(path, b, level) &&
198              btree_node_lock_increment(trans, b, level, want))) {
199                 mark_btree_node_locked(trans, path, level, want);
200                 return true;
201         }
202 fail:
203         if (b != ERR_PTR(-BCH_ERR_no_btree_node_cached) &&
204             b != ERR_PTR(-BCH_ERR_no_btree_node_init))
205                 trace_btree_node_relock_fail(trans, _RET_IP_, path, level);
206         return false;
207 }
208
209 bool bch2_btree_node_upgrade(struct btree_trans *trans,
210                              struct btree_path *path, unsigned level)
211 {
212         struct btree *b = path->l[level].b;
213
214         if (!is_btree_node(path, level))
215                 return false;
216
217         switch (btree_lock_want(path, level)) {
218         case BTREE_NODE_UNLOCKED:
219                 BUG_ON(btree_node_locked(path, level));
220                 return true;
221         case BTREE_NODE_READ_LOCKED:
222                 BUG_ON(btree_node_intent_locked(path, level));
223                 return bch2_btree_node_relock(trans, path, level);
224         case BTREE_NODE_INTENT_LOCKED:
225                 break;
226         }
227
228         if (btree_node_intent_locked(path, level))
229                 return true;
230
231         if (race_fault())
232                 return false;
233
234         if (btree_node_locked(path, level)
235             ? six_lock_tryupgrade(&b->c.lock)
236             : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
237                 goto success;
238
239         if (btree_node_lock_seq_matches(path, b, level) &&
240             btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
241                 btree_node_unlock(trans, path, level);
242                 goto success;
243         }
244
245         trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
246         return false;
247 success:
248         mark_btree_node_intent_locked(trans, path, level);
249         return true;
250 }
251
252 static inline bool btree_path_get_locks(struct btree_trans *trans,
253                                         struct btree_path *path,
254                                         bool upgrade)
255 {
256         unsigned l = path->level;
257         int fail_idx = -1;
258
259         do {
260                 if (!btree_path_node(path, l))
261                         break;
262
263                 if (!(upgrade
264                       ? bch2_btree_node_upgrade(trans, path, l)
265                       : bch2_btree_node_relock(trans, path, l)))
266                         fail_idx = l;
267
268                 l++;
269         } while (l < path->locks_want);
270
271         /*
272          * When we fail to get a lock, we have to ensure that any child nodes
273          * can't be relocked so bch2_btree_path_traverse has to walk back up to
274          * the node that we failed to relock:
275          */
276         if (fail_idx >= 0) {
277                 __bch2_btree_path_unlock(trans, path);
278                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
279
280                 do {
281                         path->l[fail_idx].b = upgrade
282                                 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
283                                 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
284                         --fail_idx;
285                 } while (fail_idx >= 0);
286         }
287
288         if (path->uptodate == BTREE_ITER_NEED_RELOCK)
289                 path->uptodate = BTREE_ITER_UPTODATE;
290
291         bch2_trans_verify_locks(trans);
292
293         return path->uptodate < BTREE_ITER_NEED_RELOCK;
294 }
295
296 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
297                                   bool cached)
298 {
299         return !cached
300                 ? container_of(_b, struct btree, c)->key.k.p
301                 : container_of(_b, struct bkey_cached, c)->key.pos;
302 }
303
304 /* Slowpath: */
305 int __bch2_btree_node_lock(struct btree_trans *trans,
306                            struct btree_path *path,
307                            struct btree *b,
308                            struct bpos pos, unsigned level,
309                            enum six_lock_type type,
310                            six_lock_should_sleep_fn should_sleep_fn, void *p,
311                            unsigned long ip)
312 {
313         struct btree_path *linked;
314         unsigned reason;
315
316         /* Check if it's safe to block: */
317         trans_for_each_path(trans, linked) {
318                 if (!linked->nodes_locked)
319                         continue;
320
321                 /*
322                  * Can't block taking an intent lock if we have _any_ nodes read
323                  * locked:
324                  *
325                  * - Our read lock blocks another thread with an intent lock on
326                  *   the same node from getting a write lock, and thus from
327                  *   dropping its intent lock
328                  *
329                  * - And the other thread may have multiple nodes intent locked:
330                  *   both the node we want to intent lock, and the node we
331                  *   already have read locked - deadlock:
332                  */
333                 if (type == SIX_LOCK_intent &&
334                     linked->nodes_locked != linked->nodes_intent_locked) {
335                         reason = 1;
336                         goto deadlock;
337                 }
338
339                 if (linked->btree_id != path->btree_id) {
340                         if (linked->btree_id < path->btree_id)
341                                 continue;
342
343                         reason = 3;
344                         goto deadlock;
345                 }
346
347                 /*
348                  * Within the same btree, non-cached paths come before cached
349                  * paths:
350                  */
351                 if (linked->cached != path->cached) {
352                         if (!linked->cached)
353                                 continue;
354
355                         reason = 4;
356                         goto deadlock;
357                 }
358
359                 /*
360                  * Interior nodes must be locked before their descendants: if
361                  * another path has possible descendants locked of the node
362                  * we're about to lock, it must have the ancestors locked too:
363                  */
364                 if (level > __fls(linked->nodes_locked)) {
365                         reason = 5;
366                         goto deadlock;
367                 }
368
369                 /* Must lock btree nodes in key order: */
370                 if (btree_node_locked(linked, level) &&
371                     bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
372                                                  linked->cached)) <= 0) {
373                         reason = 7;
374                         goto deadlock;
375                 }
376         }
377
378         return btree_node_lock_type(trans, path, b, pos, level,
379                                     type, should_sleep_fn, p);
380 deadlock:
381         trace_trans_restart_would_deadlock(trans, ip, reason, linked, path, &pos);
382         return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
383 }
384
385 /* Btree iterator locking: */
386
387 #ifdef CONFIG_BCACHEFS_DEBUG
388
389 static void bch2_btree_path_verify_locks(struct btree_path *path)
390 {
391         unsigned l;
392
393         if (!path->nodes_locked) {
394                 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
395                        btree_path_node(path, path->level));
396                 return;
397         }
398
399         for (l = 0; btree_path_node(path, l); l++)
400                 BUG_ON(btree_lock_want(path, l) !=
401                        btree_node_locked_type(path, l));
402 }
403
404 void bch2_trans_verify_locks(struct btree_trans *trans)
405 {
406         struct btree_path *path;
407
408         trans_for_each_path(trans, path)
409                 bch2_btree_path_verify_locks(path);
410 }
411 #else
412 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
413 #endif
414
415 /* Btree path locking: */
416
417 /*
418  * Only for btree_cache.c - only relocks intent locks
419  */
420 int bch2_btree_path_relock_intent(struct btree_trans *trans,
421                                   struct btree_path *path)
422 {
423         unsigned l;
424
425         for (l = path->level;
426              l < path->locks_want && btree_path_node(path, l);
427              l++) {
428                 if (!bch2_btree_node_relock(trans, path, l)) {
429                         __bch2_btree_path_unlock(trans, path);
430                         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
431                         trace_trans_restart_relock_path_intent(trans, _RET_IP_, path);
432                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
433                 }
434         }
435
436         return 0;
437 }
438
439 __flatten
440 static bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
441                         struct btree_path *path, unsigned long trace_ip)
442 {
443         return btree_path_get_locks(trans, path, false);
444 }
445
446 static int bch2_btree_path_relock(struct btree_trans *trans,
447                         struct btree_path *path, unsigned long trace_ip)
448 {
449         if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
450                 trace_trans_restart_relock_path(trans, trace_ip, path);
451                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
452         }
453
454         return 0;
455 }
456
457 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
458                                struct btree_path *path,
459                                unsigned new_locks_want)
460 {
461         struct btree_path *linked;
462
463         EBUG_ON(path->locks_want >= new_locks_want);
464
465         path->locks_want = new_locks_want;
466
467         if (btree_path_get_locks(trans, path, true))
468                 return true;
469
470         /*
471          * XXX: this is ugly - we'd prefer to not be mucking with other
472          * iterators in the btree_trans here.
473          *
474          * On failure to upgrade the iterator, setting iter->locks_want and
475          * calling get_locks() is sufficient to make bch2_btree_path_traverse()
476          * get the locks we want on transaction restart.
477          *
478          * But if this iterator was a clone, on transaction restart what we did
479          * to this iterator isn't going to be preserved.
480          *
481          * Possibly we could add an iterator field for the parent iterator when
482          * an iterator is a copy - for now, we'll just upgrade any other
483          * iterators with the same btree id.
484          *
485          * The code below used to be needed to ensure ancestor nodes get locked
486          * before interior nodes - now that's handled by
487          * bch2_btree_path_traverse_all().
488          */
489         if (!path->cached && !trans->in_traverse_all)
490                 trans_for_each_path(trans, linked)
491                         if (linked != path &&
492                             linked->cached == path->cached &&
493                             linked->btree_id == path->btree_id &&
494                             linked->locks_want < new_locks_want) {
495                                 linked->locks_want = new_locks_want;
496                                 btree_path_get_locks(trans, linked, true);
497                         }
498
499         return false;
500 }
501
502 void __bch2_btree_path_downgrade(struct btree_trans *trans,
503                                  struct btree_path *path,
504                                  unsigned new_locks_want)
505 {
506         unsigned l;
507
508         EBUG_ON(path->locks_want < new_locks_want);
509
510         path->locks_want = new_locks_want;
511
512         while (path->nodes_locked &&
513                (l = __fls(path->nodes_locked)) >= path->locks_want) {
514                 if (l > path->level) {
515                         btree_node_unlock(trans, path, l);
516                 } else {
517                         if (btree_node_intent_locked(path, l)) {
518                                 six_lock_downgrade(&path->l[l].b->c.lock);
519                                 path->nodes_intent_locked ^= 1 << l;
520                         }
521                         break;
522                 }
523         }
524
525         bch2_btree_path_verify_locks(path);
526 }
527
528 void bch2_trans_downgrade(struct btree_trans *trans)
529 {
530         struct btree_path *path;
531
532         trans_for_each_path(trans, path)
533                 bch2_btree_path_downgrade(trans, path);
534 }
535
536 /* Btree transaction locking: */
537
538 int bch2_trans_relock(struct btree_trans *trans)
539 {
540         struct btree_path *path;
541
542         if (unlikely(trans->restarted))
543                 return -BCH_ERR_transaction_restart_relock;
544
545         trans_for_each_path(trans, path)
546                 if (path->should_be_locked &&
547                     bch2_btree_path_relock(trans, path, _RET_IP_)) {
548                         trace_trans_restart_relock(trans, _RET_IP_, path);
549                         BUG_ON(!trans->restarted);
550                         return -BCH_ERR_transaction_restart_relock;
551                 }
552         return 0;
553 }
554
555 void bch2_trans_unlock(struct btree_trans *trans)
556 {
557         struct btree_path *path;
558
559         trans_for_each_path(trans, path)
560                 __bch2_btree_path_unlock(trans, path);
561
562         /*
563          * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
564          * btree nodes, it implements its own walking:
565          */
566         BUG_ON(!trans->is_initial_gc &&
567                lock_class_is_held(&bch2_btree_node_lock_key));
568 }
569
570 /* Btree iterator: */
571
572 #ifdef CONFIG_BCACHEFS_DEBUG
573
574 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
575                                           struct btree_path *path)
576 {
577         struct bkey_cached *ck;
578         bool locked = btree_node_locked(path, 0);
579
580         if (!bch2_btree_node_relock(trans, path, 0))
581                 return;
582
583         ck = (void *) path->l[0].b;
584         BUG_ON(ck->key.btree_id != path->btree_id ||
585                bkey_cmp(ck->key.pos, path->pos));
586
587         if (!locked)
588                 btree_node_unlock(trans, path, 0);
589 }
590
591 static void bch2_btree_path_verify_level(struct btree_trans *trans,
592                                 struct btree_path *path, unsigned level)
593 {
594         struct btree_path_level *l;
595         struct btree_node_iter tmp;
596         bool locked;
597         struct bkey_packed *p, *k;
598         struct printbuf buf1 = PRINTBUF;
599         struct printbuf buf2 = PRINTBUF;
600         struct printbuf buf3 = PRINTBUF;
601         const char *msg;
602
603         if (!bch2_debug_check_iterators)
604                 return;
605
606         l       = &path->l[level];
607         tmp     = l->iter;
608         locked  = btree_node_locked(path, level);
609
610         if (path->cached) {
611                 if (!level)
612                         bch2_btree_path_verify_cached(trans, path);
613                 return;
614         }
615
616         if (!btree_path_node(path, level))
617                 return;
618
619         if (!bch2_btree_node_relock(trans, path, level))
620                 return;
621
622         BUG_ON(!btree_path_pos_in_node(path, l->b));
623
624         bch2_btree_node_iter_verify(&l->iter, l->b);
625
626         /*
627          * For interior nodes, the iterator will have skipped past deleted keys:
628          */
629         p = level
630                 ? bch2_btree_node_iter_prev(&tmp, l->b)
631                 : bch2_btree_node_iter_prev_all(&tmp, l->b);
632         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
633
634         if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
635                 msg = "before";
636                 goto err;
637         }
638
639         if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
640                 msg = "after";
641                 goto err;
642         }
643
644         if (!locked)
645                 btree_node_unlock(trans, path, level);
646         return;
647 err:
648         bch2_bpos_to_text(&buf1, path->pos);
649
650         if (p) {
651                 struct bkey uk = bkey_unpack_key(l->b, p);
652                 bch2_bkey_to_text(&buf2, &uk);
653         } else {
654                 prt_printf(&buf2, "(none)");
655         }
656
657         if (k) {
658                 struct bkey uk = bkey_unpack_key(l->b, k);
659                 bch2_bkey_to_text(&buf3, &uk);
660         } else {
661                 prt_printf(&buf3, "(none)");
662         }
663
664         panic("path should be %s key at level %u:\n"
665               "path pos %s\n"
666               "prev key %s\n"
667               "cur  key %s\n",
668               msg, level, buf1.buf, buf2.buf, buf3.buf);
669 }
670
671 static void bch2_btree_path_verify(struct btree_trans *trans,
672                                    struct btree_path *path)
673 {
674         struct bch_fs *c = trans->c;
675         unsigned i;
676
677         EBUG_ON(path->btree_id >= BTREE_ID_NR);
678
679         for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
680                 if (!path->l[i].b) {
681                         BUG_ON(!path->cached &&
682                                c->btree_roots[path->btree_id].b->c.level > i);
683                         break;
684                 }
685
686                 bch2_btree_path_verify_level(trans, path, i);
687         }
688
689         bch2_btree_path_verify_locks(path);
690 }
691
692 void bch2_trans_verify_paths(struct btree_trans *trans)
693 {
694         struct btree_path *path;
695
696         trans_for_each_path(trans, path)
697                 bch2_btree_path_verify(trans, path);
698 }
699
700 static void bch2_btree_iter_verify(struct btree_iter *iter)
701 {
702         struct btree_trans *trans = iter->trans;
703
704         BUG_ON(iter->btree_id >= BTREE_ID_NR);
705
706         BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
707
708         BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
709                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
710
711         BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
712                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
713                !btree_type_has_snapshots(iter->btree_id));
714
715         if (iter->update_path)
716                 bch2_btree_path_verify(trans, iter->update_path);
717         bch2_btree_path_verify(trans, iter->path);
718 }
719
720 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
721 {
722         BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
723                !iter->pos.snapshot);
724
725         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
726                iter->pos.snapshot != iter->snapshot);
727
728         BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
729                bkey_cmp(iter->pos, iter->k.p) > 0);
730 }
731
732 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
733 {
734         struct btree_trans *trans = iter->trans;
735         struct btree_iter copy;
736         struct bkey_s_c prev;
737         int ret = 0;
738
739         if (!bch2_debug_check_iterators)
740                 return 0;
741
742         if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
743                 return 0;
744
745         if (bkey_err(k) || !k.k)
746                 return 0;
747
748         BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
749                                           iter->snapshot,
750                                           k.k->p.snapshot));
751
752         bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
753                              BTREE_ITER_NOPRESERVE|
754                              BTREE_ITER_ALL_SNAPSHOTS);
755         prev = bch2_btree_iter_prev(&copy);
756         if (!prev.k)
757                 goto out;
758
759         ret = bkey_err(prev);
760         if (ret)
761                 goto out;
762
763         if (!bkey_cmp(prev.k->p, k.k->p) &&
764             bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
765                                       prev.k->p.snapshot) > 0) {
766                 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
767
768                 bch2_bkey_to_text(&buf1, k.k);
769                 bch2_bkey_to_text(&buf2, prev.k);
770
771                 panic("iter snap %u\n"
772                       "k    %s\n"
773                       "prev %s\n",
774                       iter->snapshot,
775                       buf1.buf, buf2.buf);
776         }
777 out:
778         bch2_trans_iter_exit(trans, &copy);
779         return ret;
780 }
781
782 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
783                             struct bpos pos, bool key_cache)
784 {
785         struct btree_path *path;
786         unsigned idx;
787         struct printbuf buf = PRINTBUF;
788
789         trans_for_each_path_inorder(trans, path, idx) {
790                 int cmp = cmp_int(path->btree_id, id) ?:
791                         cmp_int(path->cached, key_cache);
792
793                 if (cmp > 0)
794                         break;
795                 if (cmp < 0)
796                         continue;
797
798                 if (!(path->nodes_locked & 1) ||
799                     !path->should_be_locked)
800                         continue;
801
802                 if (!key_cache) {
803                         if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
804                             bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
805                                 return;
806                 } else {
807                         if (!bkey_cmp(pos, path->pos))
808                                 return;
809                 }
810         }
811
812         bch2_dump_trans_paths_updates(trans);
813         bch2_bpos_to_text(&buf, pos);
814
815         panic("not locked: %s %s%s\n",
816               bch2_btree_ids[id], buf.buf,
817               key_cache ? " cached" : "");
818 }
819
820 #else
821
822 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
823                                                 struct btree_path *path, unsigned l) {}
824 static inline void bch2_btree_path_verify(struct btree_trans *trans,
825                                           struct btree_path *path) {}
826 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
827 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
828 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
829
830 #endif
831
832 /* Btree path: fixups after btree updates */
833
834 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
835                                         struct btree *b,
836                                         struct bset_tree *t,
837                                         struct bkey_packed *k)
838 {
839         struct btree_node_iter_set *set;
840
841         btree_node_iter_for_each(iter, set)
842                 if (set->end == t->end_offset) {
843                         set->k = __btree_node_key_to_offset(b, k);
844                         bch2_btree_node_iter_sort(iter, b);
845                         return;
846                 }
847
848         bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
849 }
850
851 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
852                                                struct btree *b,
853                                                struct bkey_packed *where)
854 {
855         struct btree_path_level *l = &path->l[b->c.level];
856
857         if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
858                 return;
859
860         if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
861                 bch2_btree_node_iter_advance(&l->iter, l->b);
862 }
863
864 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
865                                       struct btree *b,
866                                       struct bkey_packed *where)
867 {
868         struct btree_path *path;
869
870         trans_for_each_path_with_node(trans, b, path) {
871                 __bch2_btree_path_fix_key_modified(path, b, where);
872                 bch2_btree_path_verify_level(trans, path, b->c.level);
873         }
874 }
875
876 static void __bch2_btree_node_iter_fix(struct btree_path *path,
877                                        struct btree *b,
878                                        struct btree_node_iter *node_iter,
879                                        struct bset_tree *t,
880                                        struct bkey_packed *where,
881                                        unsigned clobber_u64s,
882                                        unsigned new_u64s)
883 {
884         const struct bkey_packed *end = btree_bkey_last(b, t);
885         struct btree_node_iter_set *set;
886         unsigned offset = __btree_node_key_to_offset(b, where);
887         int shift = new_u64s - clobber_u64s;
888         unsigned old_end = t->end_offset - shift;
889         unsigned orig_iter_pos = node_iter->data[0].k;
890         bool iter_current_key_modified =
891                 orig_iter_pos >= offset &&
892                 orig_iter_pos <= offset + clobber_u64s;
893
894         btree_node_iter_for_each(node_iter, set)
895                 if (set->end == old_end)
896                         goto found;
897
898         /* didn't find the bset in the iterator - might have to readd it: */
899         if (new_u64s &&
900             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
901                 bch2_btree_node_iter_push(node_iter, b, where, end);
902                 goto fixup_done;
903         } else {
904                 /* Iterator is after key that changed */
905                 return;
906         }
907 found:
908         set->end = t->end_offset;
909
910         /* Iterator hasn't gotten to the key that changed yet: */
911         if (set->k < offset)
912                 return;
913
914         if (new_u64s &&
915             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
916                 set->k = offset;
917         } else if (set->k < offset + clobber_u64s) {
918                 set->k = offset + new_u64s;
919                 if (set->k == set->end)
920                         bch2_btree_node_iter_set_drop(node_iter, set);
921         } else {
922                 /* Iterator is after key that changed */
923                 set->k = (int) set->k + shift;
924                 return;
925         }
926
927         bch2_btree_node_iter_sort(node_iter, b);
928 fixup_done:
929         if (node_iter->data[0].k != orig_iter_pos)
930                 iter_current_key_modified = true;
931
932         /*
933          * When a new key is added, and the node iterator now points to that
934          * key, the iterator might have skipped past deleted keys that should
935          * come after the key the iterator now points to. We have to rewind to
936          * before those deleted keys - otherwise
937          * bch2_btree_node_iter_prev_all() breaks:
938          */
939         if (!bch2_btree_node_iter_end(node_iter) &&
940             iter_current_key_modified &&
941             b->c.level) {
942                 struct bset_tree *t;
943                 struct bkey_packed *k, *k2, *p;
944
945                 k = bch2_btree_node_iter_peek_all(node_iter, b);
946
947                 for_each_bset(b, t) {
948                         bool set_pos = false;
949
950                         if (node_iter->data[0].end == t->end_offset)
951                                 continue;
952
953                         k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
954
955                         while ((p = bch2_bkey_prev_all(b, t, k2)) &&
956                                bkey_iter_cmp(b, k, p) < 0) {
957                                 k2 = p;
958                                 set_pos = true;
959                         }
960
961                         if (set_pos)
962                                 btree_node_iter_set_set_pos(node_iter,
963                                                             b, t, k2);
964                 }
965         }
966 }
967
968 void bch2_btree_node_iter_fix(struct btree_trans *trans,
969                               struct btree_path *path,
970                               struct btree *b,
971                               struct btree_node_iter *node_iter,
972                               struct bkey_packed *where,
973                               unsigned clobber_u64s,
974                               unsigned new_u64s)
975 {
976         struct bset_tree *t = bch2_bkey_to_bset(b, where);
977         struct btree_path *linked;
978
979         if (node_iter != &path->l[b->c.level].iter) {
980                 __bch2_btree_node_iter_fix(path, b, node_iter, t,
981                                            where, clobber_u64s, new_u64s);
982
983                 if (bch2_debug_check_iterators)
984                         bch2_btree_node_iter_verify(node_iter, b);
985         }
986
987         trans_for_each_path_with_node(trans, b, linked) {
988                 __bch2_btree_node_iter_fix(linked, b,
989                                            &linked->l[b->c.level].iter, t,
990                                            where, clobber_u64s, new_u64s);
991                 bch2_btree_path_verify_level(trans, linked, b->c.level);
992         }
993 }
994
995 /* Btree path level: pointer to a particular btree node and node iter */
996
997 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
998                                                   struct btree_path_level *l,
999                                                   struct bkey *u,
1000                                                   struct bkey_packed *k)
1001 {
1002         if (unlikely(!k)) {
1003                 /*
1004                  * signal to bch2_btree_iter_peek_slot() that we're currently at
1005                  * a hole
1006                  */
1007                 u->type = KEY_TYPE_deleted;
1008                 return bkey_s_c_null;
1009         }
1010
1011         return bkey_disassemble(l->b, k, u);
1012 }
1013
1014 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1015                                                         struct btree_path_level *l,
1016                                                         struct bkey *u)
1017 {
1018         return __btree_iter_unpack(c, l, u,
1019                         bch2_btree_node_iter_peek_all(&l->iter, l->b));
1020 }
1021
1022 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
1023                                                     struct btree_path *path,
1024                                                     struct btree_path_level *l,
1025                                                     struct bkey *u)
1026 {
1027         struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
1028                         bch2_btree_node_iter_peek(&l->iter, l->b));
1029
1030         path->pos = k.k ? k.k->p : l->b->key.k.p;
1031         bch2_btree_path_verify_level(trans, path, l - path->l);
1032         return k;
1033 }
1034
1035 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
1036                                                     struct btree_path *path,
1037                                                     struct btree_path_level *l,
1038                                                     struct bkey *u)
1039 {
1040         struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
1041                         bch2_btree_node_iter_prev(&l->iter, l->b));
1042
1043         path->pos = k.k ? k.k->p : l->b->data->min_key;
1044         bch2_btree_path_verify_level(trans, path, l - path->l);
1045         return k;
1046 }
1047
1048 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1049                                              struct btree_path_level *l,
1050                                              int max_advance)
1051 {
1052         struct bkey_packed *k;
1053         int nr_advanced = 0;
1054
1055         while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1056                bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1057                 if (max_advance > 0 && nr_advanced >= max_advance)
1058                         return false;
1059
1060                 bch2_btree_node_iter_advance(&l->iter, l->b);
1061                 nr_advanced++;
1062         }
1063
1064         return true;
1065 }
1066
1067 /*
1068  * Verify that iterator for parent node points to child node:
1069  */
1070 static void btree_path_verify_new_node(struct btree_trans *trans,
1071                                        struct btree_path *path, struct btree *b)
1072 {
1073         struct bch_fs *c = trans->c;
1074         struct btree_path_level *l;
1075         unsigned plevel;
1076         bool parent_locked;
1077         struct bkey_packed *k;
1078
1079         if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1080                 return;
1081
1082         if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1083                 return;
1084
1085         plevel = b->c.level + 1;
1086         if (!btree_path_node(path, plevel))
1087                 return;
1088
1089         parent_locked = btree_node_locked(path, plevel);
1090
1091         if (!bch2_btree_node_relock(trans, path, plevel))
1092                 return;
1093
1094         l = &path->l[plevel];
1095         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1096         if (!k ||
1097             bkey_deleted(k) ||
1098             bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1099                 struct printbuf buf1 = PRINTBUF;
1100                 struct printbuf buf2 = PRINTBUF;
1101                 struct printbuf buf3 = PRINTBUF;
1102                 struct printbuf buf4 = PRINTBUF;
1103                 struct bkey uk = bkey_unpack_key(b, k);
1104
1105                 bch2_dump_btree_node(c, l->b);
1106                 bch2_bpos_to_text(&buf1, path->pos);
1107                 bch2_bkey_to_text(&buf2, &uk);
1108                 bch2_bpos_to_text(&buf3, b->data->min_key);
1109                 bch2_bpos_to_text(&buf3, b->data->max_key);
1110                 panic("parent iter doesn't point to new node:\n"
1111                       "iter pos %s %s\n"
1112                       "iter key %s\n"
1113                       "new node %s-%s\n",
1114                       bch2_btree_ids[path->btree_id],
1115                       buf1.buf, buf2.buf, buf3.buf, buf4.buf);
1116         }
1117
1118         if (!parent_locked)
1119                 btree_node_unlock(trans, path, plevel);
1120 }
1121
1122 static inline void __btree_path_level_init(struct btree_path *path,
1123                                            unsigned level)
1124 {
1125         struct btree_path_level *l = &path->l[level];
1126
1127         bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1128
1129         /*
1130          * Iterators to interior nodes should always be pointed at the first non
1131          * whiteout:
1132          */
1133         if (level)
1134                 bch2_btree_node_iter_peek(&l->iter, l->b);
1135 }
1136
1137 static inline void btree_path_level_init(struct btree_trans *trans,
1138                                          struct btree_path *path,
1139                                          struct btree *b)
1140 {
1141         BUG_ON(path->cached);
1142
1143         btree_path_verify_new_node(trans, path, b);
1144
1145         EBUG_ON(!btree_path_pos_in_node(path, b));
1146         EBUG_ON(b->c.lock.state.seq & 1);
1147
1148         path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1149         path->l[b->c.level].b = b;
1150         __btree_path_level_init(path, b->c.level);
1151 }
1152
1153 /* Btree path: fixups after btree node updates: */
1154
1155 /*
1156  * A btree node is being replaced - update the iterator to point to the new
1157  * node:
1158  */
1159 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1160 {
1161         struct btree_path *path;
1162
1163         trans_for_each_path(trans, path)
1164                 if (!path->cached &&
1165                     btree_path_pos_in_node(path, b)) {
1166                         enum btree_node_locked_type t =
1167                                 btree_lock_want(path, b->c.level);
1168
1169                         if (path->nodes_locked &&
1170                             t != BTREE_NODE_UNLOCKED) {
1171                                 btree_node_unlock(trans, path, b->c.level);
1172                                 six_lock_increment(&b->c.lock, t);
1173                                 mark_btree_node_locked(trans, path, b->c.level, t);
1174                         }
1175
1176                         btree_path_level_init(trans, path, b);
1177                 }
1178 }
1179
1180 /*
1181  * A btree node has been modified in such a way as to invalidate iterators - fix
1182  * them:
1183  */
1184 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1185 {
1186         struct btree_path *path;
1187
1188         trans_for_each_path_with_node(trans, b, path)
1189                 __btree_path_level_init(path, b->c.level);
1190 }
1191
1192 /* Btree path: traverse, set_pos: */
1193
1194 static int lock_root_check_fn(struct six_lock *lock, void *p)
1195 {
1196         struct btree *b = container_of(lock, struct btree, c.lock);
1197         struct btree **rootp = p;
1198
1199         if (b != *rootp)
1200                 return BCH_ERR_lock_fail_root_changed;
1201         return 0;
1202 }
1203
1204 static inline int btree_path_lock_root(struct btree_trans *trans,
1205                                        struct btree_path *path,
1206                                        unsigned depth_want,
1207                                        unsigned long trace_ip)
1208 {
1209         struct bch_fs *c = trans->c;
1210         struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1211         enum six_lock_type lock_type;
1212         unsigned i;
1213         int ret;
1214
1215         EBUG_ON(path->nodes_locked);
1216
1217         while (1) {
1218                 b = READ_ONCE(*rootp);
1219                 path->level = READ_ONCE(b->c.level);
1220
1221                 if (unlikely(path->level < depth_want)) {
1222                         /*
1223                          * the root is at a lower depth than the depth we want:
1224                          * got to the end of the btree, or we're walking nodes
1225                          * greater than some depth and there are no nodes >=
1226                          * that depth
1227                          */
1228                         path->level = depth_want;
1229                         for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1230                                 path->l[i].b = NULL;
1231                         return 1;
1232                 }
1233
1234                 lock_type = __btree_lock_want(path, path->level);
1235                 ret = btree_node_lock(trans, path, b, SPOS_MAX,
1236                                       path->level, lock_type,
1237                                       lock_root_check_fn, rootp,
1238                                       trace_ip);
1239                 if (unlikely(ret)) {
1240                         if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
1241                                 continue;
1242                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1243                                 return ret;
1244                         BUG();
1245                 }
1246
1247                 if (likely(b == READ_ONCE(*rootp) &&
1248                            b->c.level == path->level &&
1249                            !race_fault())) {
1250                         for (i = 0; i < path->level; i++)
1251                                 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
1252                         path->l[path->level].b = b;
1253                         for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1254                                 path->l[i].b = NULL;
1255
1256                         mark_btree_node_locked(trans, path, path->level, lock_type);
1257                         btree_path_level_init(trans, path, b);
1258                         return 0;
1259                 }
1260
1261                 six_unlock_type(&b->c.lock, lock_type);
1262         }
1263 }
1264
1265 noinline
1266 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1267 {
1268         struct bch_fs *c = trans->c;
1269         struct btree_path_level *l = path_l(path);
1270         struct btree_node_iter node_iter = l->iter;
1271         struct bkey_packed *k;
1272         struct bkey_buf tmp;
1273         unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1274                 ? (path->level > 1 ? 0 :  2)
1275                 : (path->level > 1 ? 1 : 16);
1276         bool was_locked = btree_node_locked(path, path->level);
1277         int ret = 0;
1278
1279         bch2_bkey_buf_init(&tmp);
1280
1281         while (nr && !ret) {
1282                 if (!bch2_btree_node_relock(trans, path, path->level))
1283                         break;
1284
1285                 bch2_btree_node_iter_advance(&node_iter, l->b);
1286                 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1287                 if (!k)
1288                         break;
1289
1290                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1291                 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1292                                                path->level - 1);
1293         }
1294
1295         if (!was_locked)
1296                 btree_node_unlock(trans, path, path->level);
1297
1298         bch2_bkey_buf_exit(&tmp, c);
1299         return ret;
1300 }
1301
1302 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
1303                                  struct btree_and_journal_iter *jiter)
1304 {
1305         struct bch_fs *c = trans->c;
1306         struct bkey_s_c k;
1307         struct bkey_buf tmp;
1308         unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1309                 ? (path->level > 1 ? 0 :  2)
1310                 : (path->level > 1 ? 1 : 16);
1311         bool was_locked = btree_node_locked(path, path->level);
1312         int ret = 0;
1313
1314         bch2_bkey_buf_init(&tmp);
1315
1316         while (nr && !ret) {
1317                 if (!bch2_btree_node_relock(trans, path, path->level))
1318                         break;
1319
1320                 bch2_btree_and_journal_iter_advance(jiter);
1321                 k = bch2_btree_and_journal_iter_peek(jiter);
1322                 if (!k.k)
1323                         break;
1324
1325                 bch2_bkey_buf_reassemble(&tmp, c, k);
1326                 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1327                                                path->level - 1);
1328         }
1329
1330         if (!was_locked)
1331                 btree_node_unlock(trans, path, path->level);
1332
1333         bch2_bkey_buf_exit(&tmp, c);
1334         return ret;
1335 }
1336
1337 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1338                                             struct btree_path *path,
1339                                             unsigned plevel, struct btree *b)
1340 {
1341         struct btree_path_level *l = &path->l[plevel];
1342         bool locked = btree_node_locked(path, plevel);
1343         struct bkey_packed *k;
1344         struct bch_btree_ptr_v2 *bp;
1345
1346         if (!bch2_btree_node_relock(trans, path, plevel))
1347                 return;
1348
1349         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1350         BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1351
1352         bp = (void *) bkeyp_val(&l->b->format, k);
1353         bp->mem_ptr = (unsigned long)b;
1354
1355         if (!locked)
1356                 btree_node_unlock(trans, path, plevel);
1357 }
1358
1359 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
1360                                                      struct btree_path *path,
1361                                                      unsigned flags,
1362                                                      struct bkey_buf *out)
1363 {
1364         struct bch_fs *c = trans->c;
1365         struct btree_path_level *l = path_l(path);
1366         struct btree_and_journal_iter jiter;
1367         struct bkey_s_c k;
1368         int ret = 0;
1369
1370         __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
1371
1372         k = bch2_btree_and_journal_iter_peek(&jiter);
1373
1374         bch2_bkey_buf_reassemble(out, c, k);
1375
1376         if (flags & BTREE_ITER_PREFETCH)
1377                 ret = btree_path_prefetch_j(trans, path, &jiter);
1378
1379         bch2_btree_and_journal_iter_exit(&jiter);
1380         return ret;
1381 }
1382
1383 static __always_inline int btree_path_down(struct btree_trans *trans,
1384                                            struct btree_path *path,
1385                                            unsigned flags,
1386                                            unsigned long trace_ip)
1387 {
1388         struct bch_fs *c = trans->c;
1389         struct btree_path_level *l = path_l(path);
1390         struct btree *b;
1391         unsigned level = path->level - 1;
1392         enum six_lock_type lock_type = __btree_lock_want(path, level);
1393         bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
1394         struct bkey_buf tmp;
1395         int ret;
1396
1397         EBUG_ON(!btree_node_locked(path, path->level));
1398
1399         bch2_bkey_buf_init(&tmp);
1400
1401         if (unlikely(!replay_done)) {
1402                 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
1403                 if (ret)
1404                         goto err;
1405         } else {
1406                 bch2_bkey_buf_unpack(&tmp, c, l->b,
1407                                  bch2_btree_node_iter_peek(&l->iter, l->b));
1408
1409                 if (flags & BTREE_ITER_PREFETCH) {
1410                         ret = btree_path_prefetch(trans, path);
1411                         if (ret)
1412                                 goto err;
1413                 }
1414         }
1415
1416         b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1417         ret = PTR_ERR_OR_ZERO(b);
1418         if (unlikely(ret))
1419                 goto err;
1420
1421         mark_btree_node_locked(trans, path, level, lock_type);
1422         btree_path_level_init(trans, path, b);
1423
1424         if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
1425             unlikely(b != btree_node_mem_ptr(tmp.k)))
1426                 btree_node_mem_ptr_set(trans, path, level + 1, b);
1427
1428         if (btree_node_read_locked(path, level + 1))
1429                 btree_node_unlock(trans, path, level + 1);
1430         path->level = level;
1431
1432         bch2_btree_path_verify_locks(path);
1433 err:
1434         bch2_bkey_buf_exit(&tmp, c);
1435         return ret;
1436 }
1437
1438 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1439                                    unsigned, unsigned long);
1440
1441 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1442 {
1443         struct bch_fs *c = trans->c;
1444         struct btree_path *path;
1445         unsigned long trace_ip = _RET_IP_;
1446         int i, ret = 0;
1447
1448         if (trans->in_traverse_all)
1449                 return -BCH_ERR_transaction_restart_in_traverse_all;
1450
1451         trans->in_traverse_all = true;
1452 retry_all:
1453         trans->restarted = 0;
1454         trans->traverse_all_idx = U8_MAX;
1455
1456         trans_for_each_path(trans, path)
1457                 path->should_be_locked = false;
1458
1459         btree_trans_verify_sorted(trans);
1460
1461         for (i = trans->nr_sorted - 2; i >= 0; --i) {
1462                 struct btree_path *path1 = trans->paths + trans->sorted[i];
1463                 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1464
1465                 if (path1->btree_id == path2->btree_id &&
1466                     path1->locks_want < path2->locks_want)
1467                         __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1468                 else if (!path1->locks_want && path2->locks_want)
1469                         __bch2_btree_path_upgrade(trans, path1, 1);
1470         }
1471
1472         bch2_trans_unlock(trans);
1473         cond_resched();
1474
1475         if (unlikely(trans->memory_allocation_failure)) {
1476                 struct closure cl;
1477
1478                 closure_init_stack(&cl);
1479
1480                 do {
1481                         ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1482                         closure_sync(&cl);
1483                 } while (ret);
1484         }
1485
1486         /* Now, redo traversals in correct order: */
1487         trans->traverse_all_idx = 0;
1488         while (trans->traverse_all_idx < trans->nr_sorted) {
1489                 path = trans->paths + trans->sorted[trans->traverse_all_idx];
1490
1491                 /*
1492                  * Traversing a path can cause another path to be added at about
1493                  * the same position:
1494                  */
1495                 if (path->uptodate) {
1496                         ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1497                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1498                             ret == -ENOMEM)
1499                                 goto retry_all;
1500                         if (ret)
1501                                 goto err;
1502                         BUG_ON(path->uptodate);
1503                 } else {
1504                         trans->traverse_all_idx++;
1505                 }
1506         }
1507
1508         /*
1509          * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1510          * and relock(), relock() won't relock since path->should_be_locked
1511          * isn't set yet, which is all fine
1512          */
1513         trans_for_each_path(trans, path)
1514                 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1515 err:
1516         bch2_btree_cache_cannibalize_unlock(c);
1517
1518         trans->in_traverse_all = false;
1519
1520         trace_trans_traverse_all(trans, trace_ip);
1521         return ret;
1522 }
1523
1524 static inline bool btree_path_good_node(struct btree_trans *trans,
1525                                         struct btree_path *path,
1526                                         unsigned l, int check_pos)
1527 {
1528         if (!is_btree_node(path, l) ||
1529             !bch2_btree_node_relock(trans, path, l))
1530                 return false;
1531
1532         if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1533                 return false;
1534         if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1535                 return false;
1536         return true;
1537 }
1538
1539 static void btree_path_set_level_down(struct btree_trans *trans,
1540                                       struct btree_path *path,
1541                                       unsigned new_level)
1542 {
1543         unsigned l;
1544
1545         path->level = new_level;
1546
1547         for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1548                 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1549                         btree_node_unlock(trans, path, l);
1550
1551         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1552         bch2_btree_path_verify(trans, path);
1553 }
1554
1555 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1556                                                      struct btree_path *path,
1557                                                      int check_pos)
1558 {
1559         unsigned i, l = path->level;
1560
1561         while (btree_path_node(path, l) &&
1562                !btree_path_good_node(trans, path, l, check_pos))
1563                 __btree_path_set_level_up(trans, path, l++);
1564
1565         /* If we need intent locks, take them too: */
1566         for (i = l + 1;
1567              i < path->locks_want && btree_path_node(path, i);
1568              i++)
1569                 if (!bch2_btree_node_relock(trans, path, i))
1570                         while (l <= i)
1571                                 __btree_path_set_level_up(trans, path, l++);
1572
1573         return l;
1574 }
1575
1576 /*
1577  * This is the main state machine for walking down the btree - walks down to a
1578  * specified depth
1579  *
1580  * Returns 0 on success, -EIO on error (error reading in a btree node).
1581  *
1582  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1583  * stashed in the iterator and returned from bch2_trans_exit().
1584  */
1585 static int btree_path_traverse_one(struct btree_trans *trans,
1586                                    struct btree_path *path,
1587                                    unsigned flags,
1588                                    unsigned long trace_ip)
1589 {
1590         unsigned depth_want = path->level;
1591         int ret = trans->restarted;
1592
1593         if (unlikely(ret))
1594                 goto out;
1595
1596         /*
1597          * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1598          * and re-traverse the path without a transaction restart:
1599          */
1600         if (path->should_be_locked) {
1601                 ret = bch2_btree_path_relock(trans, path, trace_ip);
1602                 goto out;
1603         }
1604
1605         if (path->cached) {
1606                 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1607                 goto out;
1608         }
1609
1610         if (unlikely(path->level >= BTREE_MAX_DEPTH))
1611                 goto out;
1612
1613         path->level = btree_path_up_until_good_node(trans, path, 0);
1614
1615         /*
1616          * Note: path->nodes[path->level] may be temporarily NULL here - that
1617          * would indicate to other code that we got to the end of the btree,
1618          * here it indicates that relocking the root failed - it's critical that
1619          * btree_path_lock_root() comes next and that it can't fail
1620          */
1621         while (path->level > depth_want) {
1622                 ret = btree_path_node(path, path->level)
1623                         ? btree_path_down(trans, path, flags, trace_ip)
1624                         : btree_path_lock_root(trans, path, depth_want, trace_ip);
1625                 if (unlikely(ret)) {
1626                         if (ret == 1) {
1627                                 /*
1628                                  * No nodes at this level - got to the end of
1629                                  * the btree:
1630                                  */
1631                                 ret = 0;
1632                                 goto out;
1633                         }
1634
1635                         __bch2_btree_path_unlock(trans, path);
1636                         path->level = depth_want;
1637                         path->l[path->level].b = ERR_PTR(ret);
1638                         goto out;
1639                 }
1640         }
1641
1642         path->uptodate = BTREE_ITER_UPTODATE;
1643 out:
1644         BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
1645         bch2_btree_path_verify(trans, path);
1646         return ret;
1647 }
1648
1649 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1650                                           struct btree_path *path, unsigned flags)
1651 {
1652         if (0 && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1653                 unsigned restart_probability_bits = 4 << min(trans->restart_count, 32U);
1654                 u64 mask = ~(~0ULL << restart_probability_bits);
1655
1656                 if ((prandom_u32() & mask) == mask) {
1657                         trace_transaction_restart_injected(trans, _RET_IP_);
1658                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject);
1659                 }
1660         }
1661
1662         if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1663                 return 0;
1664
1665         return  bch2_trans_cond_resched(trans) ?:
1666                 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1667 }
1668
1669 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1670                             struct btree_path *src)
1671 {
1672         unsigned i, offset = offsetof(struct btree_path, pos);
1673
1674         memcpy((void *) dst + offset,
1675                (void *) src + offset,
1676                sizeof(struct btree_path) - offset);
1677
1678         for (i = 0; i < BTREE_MAX_DEPTH; i++)
1679                 if (btree_node_locked(dst, i))
1680                         six_lock_increment(&dst->l[i].b->c.lock,
1681                                            __btree_lock_want(dst, i));
1682
1683         bch2_btree_path_check_sort(trans, dst, 0);
1684 }
1685
1686 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1687                                            bool intent)
1688 {
1689         struct btree_path *new = btree_path_alloc(trans, src);
1690
1691         btree_path_copy(trans, new, src);
1692         __btree_path_get(new, intent);
1693         return new;
1694 }
1695
1696 inline struct btree_path * __must_check
1697 bch2_btree_path_make_mut(struct btree_trans *trans,
1698                          struct btree_path *path, bool intent,
1699                          unsigned long ip)
1700 {
1701         if (path->ref > 1 || path->preserve) {
1702                 __btree_path_put(path, intent);
1703                 path = btree_path_clone(trans, path, intent);
1704                 path->preserve = false;
1705 #ifdef CONFIG_BCACHEFS_DEBUG
1706                 path->ip_allocated = ip;
1707 #endif
1708                 btree_trans_verify_sorted(trans);
1709         }
1710
1711         path->should_be_locked = false;
1712         return path;
1713 }
1714
1715 struct btree_path * __must_check
1716 bch2_btree_path_set_pos(struct btree_trans *trans,
1717                    struct btree_path *path, struct bpos new_pos,
1718                    bool intent, unsigned long ip)
1719 {
1720         int cmp = bpos_cmp(new_pos, path->pos);
1721         unsigned l = path->level;
1722
1723         EBUG_ON(trans->restarted);
1724         EBUG_ON(!path->ref);
1725
1726         if (!cmp)
1727                 return path;
1728
1729         path = bch2_btree_path_make_mut(trans, path, intent, ip);
1730
1731         path->pos = new_pos;
1732
1733         bch2_btree_path_check_sort(trans, path, cmp);
1734
1735         if (unlikely(path->cached)) {
1736                 btree_node_unlock(trans, path, 0);
1737                 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1738                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1739                 goto out;
1740         }
1741
1742         l = btree_path_up_until_good_node(trans, path, cmp);
1743
1744         if (btree_path_node(path, l)) {
1745                 BUG_ON(!btree_node_locked(path, l));
1746                 /*
1747                  * We might have to skip over many keys, or just a few: try
1748                  * advancing the node iterator, and if we have to skip over too
1749                  * many keys just reinit it (or if we're rewinding, since that
1750                  * is expensive).
1751                  */
1752                 if (cmp < 0 ||
1753                     !btree_path_advance_to_pos(path, &path->l[l], 8))
1754                         __btree_path_level_init(path, l);
1755         }
1756
1757         if (l != path->level) {
1758                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1759                 __bch2_btree_path_unlock(trans, path);
1760         }
1761 out:
1762         bch2_btree_path_verify(trans, path);
1763         return path;
1764 }
1765
1766 /* Btree path: main interface: */
1767
1768 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1769 {
1770         struct btree_path *sib;
1771
1772         sib = prev_btree_path(trans, path);
1773         if (sib && !btree_path_cmp(sib, path))
1774                 return sib;
1775
1776         sib = next_btree_path(trans, path);
1777         if (sib && !btree_path_cmp(sib, path))
1778                 return sib;
1779
1780         return NULL;
1781 }
1782
1783 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1784 {
1785         struct btree_path *sib;
1786
1787         sib = prev_btree_path(trans, path);
1788         if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1789                 return sib;
1790
1791         sib = next_btree_path(trans, path);
1792         if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1793                 return sib;
1794
1795         return NULL;
1796 }
1797
1798 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1799 {
1800         __bch2_btree_path_unlock(trans, path);
1801         btree_path_list_remove(trans, path);
1802         trans->paths_allocated &= ~(1ULL << path->idx);
1803 }
1804
1805 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1806 {
1807         struct btree_path *dup;
1808
1809         EBUG_ON(trans->paths + path->idx != path);
1810         EBUG_ON(!path->ref);
1811
1812         if (!__btree_path_put(path, intent))
1813                 return;
1814
1815         dup = path->preserve
1816                 ? have_path_at_pos(trans, path)
1817                 : have_node_at_pos(trans, path);
1818
1819         if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1820                 return;
1821
1822         if (path->should_be_locked &&
1823             !trans->restarted &&
1824             (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
1825                 return;
1826
1827         if (dup) {
1828                 dup->preserve           |= path->preserve;
1829                 dup->should_be_locked   |= path->should_be_locked;
1830         }
1831
1832         __bch2_path_free(trans, path);
1833 }
1834
1835 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1836 {
1837         struct btree_insert_entry *i;
1838
1839         prt_printf(buf, "transaction updates for %s journal seq %llu",
1840                trans->fn, trans->journal_res.seq);
1841         prt_newline(buf);
1842         printbuf_indent_add(buf, 2);
1843
1844         trans_for_each_update(trans, i) {
1845                 struct bkey_s_c old = { &i->old_k, i->old_v };
1846
1847                 prt_printf(buf, "update: btree=%s cached=%u %pS",
1848                        bch2_btree_ids[i->btree_id],
1849                        i->cached,
1850                        (void *) i->ip_allocated);
1851                 prt_newline(buf);
1852
1853                 prt_printf(buf, "  old ");
1854                 bch2_bkey_val_to_text(buf, trans->c, old);
1855                 prt_newline(buf);
1856
1857                 prt_printf(buf, "  new ");
1858                 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1859                 prt_newline(buf);
1860         }
1861
1862         printbuf_indent_sub(buf, 2);
1863 }
1864
1865 noinline __cold
1866 void bch2_dump_trans_updates(struct btree_trans *trans)
1867 {
1868         struct printbuf buf = PRINTBUF;
1869
1870         bch2_trans_updates_to_text(&buf, trans);
1871         bch_err(trans->c, "%s", buf.buf);
1872         printbuf_exit(&buf);
1873 }
1874
1875 noinline __cold
1876 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1877 {
1878         struct btree_path *path;
1879         struct printbuf buf = PRINTBUF;
1880         unsigned idx;
1881
1882         trans_for_each_path_inorder(trans, path, idx) {
1883                 printbuf_reset(&buf);
1884
1885                 bch2_bpos_to_text(&buf, path->pos);
1886
1887                 printk(KERN_ERR "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos %s locks %u %pS\n",
1888                        path->idx, path->ref, path->intent_ref,
1889                        path->preserve ? 'P' : ' ',
1890                        path->should_be_locked ? 'S' : ' ',
1891                        bch2_btree_ids[path->btree_id],
1892                        path->level,
1893                        buf.buf,
1894                        path->nodes_locked,
1895 #ifdef CONFIG_BCACHEFS_DEBUG
1896                        (void *) path->ip_allocated
1897 #else
1898                        NULL
1899 #endif
1900                        );
1901         }
1902
1903         printbuf_exit(&buf);
1904
1905         bch2_dump_trans_updates(trans);
1906 }
1907
1908 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1909                                            struct btree_path *pos)
1910 {
1911         struct btree_path *path;
1912         unsigned idx;
1913
1914         if (unlikely(trans->paths_allocated ==
1915                      ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1916                 bch2_dump_trans_paths_updates(trans);
1917                 panic("trans path oveflow\n");
1918         }
1919
1920         idx = __ffs64(~trans->paths_allocated);
1921         trans->paths_allocated |= 1ULL << idx;
1922
1923         path = &trans->paths[idx];
1924
1925         path->idx               = idx;
1926         path->ref               = 0;
1927         path->intent_ref        = 0;
1928         path->nodes_locked      = 0;
1929         path->nodes_intent_locked = 0;
1930
1931         btree_path_list_add(trans, pos, path);
1932         return path;
1933 }
1934
1935 struct btree_path *bch2_path_get(struct btree_trans *trans,
1936                                  enum btree_id btree_id, struct bpos pos,
1937                                  unsigned locks_want, unsigned level,
1938                                  unsigned flags, unsigned long ip)
1939 {
1940         struct btree_path *path, *path_pos = NULL;
1941         bool cached = flags & BTREE_ITER_CACHED;
1942         bool intent = flags & BTREE_ITER_INTENT;
1943         int i;
1944
1945         BUG_ON(trans->restarted);
1946         btree_trans_verify_sorted(trans);
1947         bch2_trans_verify_locks(trans);
1948
1949         trans_for_each_path_inorder(trans, path, i) {
1950                 if (__btree_path_cmp(path,
1951                                      btree_id,
1952                                      cached,
1953                                      pos,
1954                                      level) > 0)
1955                         break;
1956
1957                 path_pos = path;
1958         }
1959
1960         if (path_pos &&
1961             path_pos->cached    == cached &&
1962             path_pos->btree_id  == btree_id &&
1963             path_pos->level     == level) {
1964                 __btree_path_get(path_pos, intent);
1965                 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1966         } else {
1967                 path = btree_path_alloc(trans, path_pos);
1968                 path_pos = NULL;
1969
1970                 __btree_path_get(path, intent);
1971                 path->pos                       = pos;
1972                 path->btree_id                  = btree_id;
1973                 path->cached                    = cached;
1974                 path->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
1975                 path->should_be_locked          = false;
1976                 path->level                     = level;
1977                 path->locks_want                = locks_want;
1978                 path->nodes_locked              = 0;
1979                 path->nodes_intent_locked       = 0;
1980                 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1981                         path->l[i].b            = ERR_PTR(-BCH_ERR_no_btree_node_init);
1982 #ifdef CONFIG_BCACHEFS_DEBUG
1983                 path->ip_allocated              = ip;
1984 #endif
1985                 btree_trans_verify_sorted(trans);
1986         }
1987
1988         if (!(flags & BTREE_ITER_NOPRESERVE))
1989                 path->preserve = true;
1990
1991         if (path->intent_ref)
1992                 locks_want = max(locks_want, level + 1);
1993
1994         /*
1995          * If the path has locks_want greater than requested, we don't downgrade
1996          * it here - on transaction restart because btree node split needs to
1997          * upgrade locks, we might be putting/getting the iterator again.
1998          * Downgrading iterators only happens via bch2_trans_downgrade(), after
1999          * a successful transaction commit.
2000          */
2001
2002         locks_want = min(locks_want, BTREE_MAX_DEPTH);
2003         if (locks_want > path->locks_want) {
2004                 path->locks_want = locks_want;
2005                 btree_path_get_locks(trans, path, true);
2006         }
2007
2008         return path;
2009 }
2010
2011 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
2012 {
2013
2014         struct bkey_s_c k;
2015
2016         if (!path->cached) {
2017                 struct btree_path_level *l = path_l(path);
2018                 struct bkey_packed *_k;
2019
2020                 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2021
2022                 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
2023                 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
2024
2025                 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
2026
2027                 if (!k.k || bpos_cmp(path->pos, k.k->p))
2028                         goto hole;
2029         } else {
2030                 struct bkey_cached *ck = (void *) path->l[0].b;
2031
2032                 EBUG_ON(ck &&
2033                         (path->btree_id != ck->key.btree_id ||
2034                          bkey_cmp(path->pos, ck->key.pos)));
2035                 EBUG_ON(!ck || !ck->valid);
2036                 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2037
2038                 *u = ck->k->k;
2039                 k = bkey_i_to_s_c(ck->k);
2040         }
2041
2042         return k;
2043 hole:
2044         bkey_init(u);
2045         u->p = path->pos;
2046         return (struct bkey_s_c) { u, NULL };
2047 }
2048
2049 /* Btree iterators: */
2050
2051 int __must_check
2052 __bch2_btree_iter_traverse(struct btree_iter *iter)
2053 {
2054         return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2055 }
2056
2057 int __must_check
2058 bch2_btree_iter_traverse(struct btree_iter *iter)
2059 {
2060         int ret;
2061
2062         iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
2063                                         btree_iter_search_key(iter),
2064                                         iter->flags & BTREE_ITER_INTENT,
2065                                         btree_iter_ip_allocated(iter));
2066
2067         ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2068         if (ret)
2069                 return ret;
2070
2071         btree_path_set_should_be_locked(iter->path);
2072         return 0;
2073 }
2074
2075 /* Iterate across nodes (leaf and interior nodes) */
2076
2077 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
2078 {
2079         struct btree_trans *trans = iter->trans;
2080         struct btree *b = NULL;
2081         int ret;
2082
2083         EBUG_ON(iter->path->cached);
2084         bch2_btree_iter_verify(iter);
2085
2086         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2087         if (ret)
2088                 goto err;
2089
2090         b = btree_path_node(iter->path, iter->path->level);
2091         if (!b)
2092                 goto out;
2093
2094         BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
2095
2096         bkey_init(&iter->k);
2097         iter->k.p = iter->pos = b->key.k.p;
2098
2099         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2100                                         iter->flags & BTREE_ITER_INTENT,
2101                                         btree_iter_ip_allocated(iter));
2102         btree_path_set_should_be_locked(iter->path);
2103 out:
2104         bch2_btree_iter_verify_entry_exit(iter);
2105         bch2_btree_iter_verify(iter);
2106
2107         return b;
2108 err:
2109         b = ERR_PTR(ret);
2110         goto out;
2111 }
2112
2113 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
2114 {
2115         struct btree_trans *trans = iter->trans;
2116         struct btree_path *path = iter->path;
2117         struct btree *b = NULL;
2118         int ret;
2119
2120         BUG_ON(trans->restarted);
2121         EBUG_ON(iter->path->cached);
2122         bch2_btree_iter_verify(iter);
2123
2124         /* already at end? */
2125         if (!btree_path_node(path, path->level))
2126                 return NULL;
2127
2128         /* got to end? */
2129         if (!btree_path_node(path, path->level + 1)) {
2130                 btree_path_set_level_up(trans, path);
2131                 return NULL;
2132         }
2133
2134         if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2135                 __bch2_btree_path_unlock(trans, path);
2136                 path->l[path->level].b          = ERR_PTR(-BCH_ERR_no_btree_node_relock);
2137                 path->l[path->level + 1].b      = ERR_PTR(-BCH_ERR_no_btree_node_relock);
2138                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2139                 trace_trans_restart_relock_next_node(trans, _THIS_IP_, path);
2140                 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
2141                 goto err;
2142         }
2143
2144         b = btree_path_node(path, path->level + 1);
2145
2146         if (!bpos_cmp(iter->pos, b->key.k.p)) {
2147                 __btree_path_set_level_up(trans, path, path->level++);
2148         } else {
2149                 /*
2150                  * Haven't gotten to the end of the parent node: go back down to
2151                  * the next child node
2152                  */
2153                 path = iter->path =
2154                         bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2155                                            iter->flags & BTREE_ITER_INTENT,
2156                                            btree_iter_ip_allocated(iter));
2157
2158                 btree_path_set_level_down(trans, path, iter->min_depth);
2159
2160                 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2161                 if (ret)
2162                         goto err;
2163
2164                 b = path->l[path->level].b;
2165         }
2166
2167         bkey_init(&iter->k);
2168         iter->k.p = iter->pos = b->key.k.p;
2169
2170         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2171                                         iter->flags & BTREE_ITER_INTENT,
2172                                         btree_iter_ip_allocated(iter));
2173         btree_path_set_should_be_locked(iter->path);
2174         BUG_ON(iter->path->uptodate);
2175 out:
2176         bch2_btree_iter_verify_entry_exit(iter);
2177         bch2_btree_iter_verify(iter);
2178
2179         return b;
2180 err:
2181         b = ERR_PTR(ret);
2182         goto out;
2183 }
2184
2185 /* Iterate across keys (in leaf nodes only) */
2186
2187 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2188 {
2189         if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
2190                 struct bpos pos = iter->k.p;
2191                 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2192                             ? bpos_cmp(pos, SPOS_MAX)
2193                             : bkey_cmp(pos, SPOS_MAX)) != 0;
2194
2195                 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2196                         pos = bkey_successor(iter, pos);
2197                 bch2_btree_iter_set_pos(iter, pos);
2198                 return ret;
2199         } else {
2200                 if (!btree_path_node(iter->path, iter->path->level))
2201                         return true;
2202
2203                 iter->advanced = true;
2204                 return false;
2205         }
2206 }
2207
2208 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2209 {
2210         struct bpos pos = bkey_start_pos(&iter->k);
2211         bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2212                     ? bpos_cmp(pos, POS_MIN)
2213                     : bkey_cmp(pos, POS_MIN)) != 0;
2214
2215         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2216                 pos = bkey_predecessor(iter, pos);
2217         bch2_btree_iter_set_pos(iter, pos);
2218         return ret;
2219 }
2220
2221 static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
2222                                                       enum btree_id btree_id,
2223                                                       struct bpos pos)
2224 {
2225         struct btree_insert_entry *i;
2226         struct bkey_i *ret = NULL;
2227
2228         trans_for_each_update(trans, i) {
2229                 if (i->btree_id < btree_id)
2230                         continue;
2231                 if (i->btree_id > btree_id)
2232                         break;
2233                 if (bpos_cmp(i->k->k.p, pos) < 0)
2234                         continue;
2235                 if (i->key_cache_already_flushed)
2236                         continue;
2237                 if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
2238                         ret = i->k;
2239         }
2240
2241         return ret;
2242 }
2243
2244 struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2245                                        struct btree_iter *iter,
2246                                        struct bpos start_pos,
2247                                        struct bpos end_pos)
2248 {
2249         struct bkey_i *k;
2250
2251         if (bpos_cmp(start_pos, iter->journal_pos) < 0)
2252                 iter->journal_idx = 0;
2253
2254         k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
2255                                         start_pos, end_pos,
2256                                         &iter->journal_idx);
2257
2258         iter->journal_pos = k ? k->k.p : end_pos;
2259         return k;
2260 }
2261
2262 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *trans,
2263                                             struct btree_iter *iter,
2264                                             struct bpos pos)
2265 {
2266         return bch2_btree_journal_peek(trans, iter, pos, pos);
2267 }
2268
2269 static noinline
2270 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2271                                          struct btree_iter *iter,
2272                                          struct bkey_s_c k)
2273 {
2274         struct bkey_i *next_journal =
2275                 bch2_btree_journal_peek(trans, iter, iter->path->pos,
2276                                 k.k ? k.k->p : iter->path->l[0].b->key.k.p);
2277
2278         if (next_journal) {
2279                 iter->k = next_journal->k;
2280                 k = bkey_i_to_s_c(next_journal);
2281         }
2282
2283         return k;
2284 }
2285
2286 /*
2287  * Checks btree key cache for key at iter->pos and returns it if present, or
2288  * bkey_s_c_null:
2289  */
2290 static noinline
2291 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2292 {
2293         struct btree_trans *trans = iter->trans;
2294         struct bch_fs *c = trans->c;
2295         struct bkey u;
2296         int ret;
2297
2298         if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2299                 return bkey_s_c_null;
2300
2301         if (!iter->key_cache_path)
2302                 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2303                                                      iter->flags & BTREE_ITER_INTENT, 0,
2304                                                      iter->flags|BTREE_ITER_CACHED,
2305                                                      _THIS_IP_);
2306
2307         iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2308                                         iter->flags & BTREE_ITER_INTENT,
2309                                         btree_iter_ip_allocated(iter));
2310
2311         ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
2312         if (unlikely(ret))
2313                 return bkey_s_c_err(ret);
2314
2315         btree_path_set_should_be_locked(iter->key_cache_path);
2316
2317         return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
2318 }
2319
2320 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2321 {
2322         struct btree_trans *trans = iter->trans;
2323         struct bkey_i *next_update;
2324         struct bkey_s_c k, k2;
2325         int ret;
2326
2327         EBUG_ON(iter->path->cached || iter->path->level);
2328         bch2_btree_iter_verify(iter);
2329
2330         while (1) {
2331                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2332                                         iter->flags & BTREE_ITER_INTENT,
2333                                         btree_iter_ip_allocated(iter));
2334
2335                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2336                 if (unlikely(ret)) {
2337                         /* ensure that iter->k is consistent with iter->pos: */
2338                         bch2_btree_iter_set_pos(iter, iter->pos);
2339                         k = bkey_s_c_err(ret);
2340                         goto out;
2341                 }
2342
2343                 btree_path_set_should_be_locked(iter->path);
2344
2345                 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2346
2347                 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2348                     k.k &&
2349                     (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2350                         ret = bkey_err(k2);
2351                         if (ret) {
2352                                 k = k2;
2353                                 bch2_btree_iter_set_pos(iter, iter->pos);
2354                                 goto out;
2355                         }
2356
2357                         k = k2;
2358                         iter->k = *k.k;
2359                 }
2360
2361                 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2362                         k = btree_trans_peek_journal(trans, iter, k);
2363
2364                 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2365                         ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2366                         : NULL;
2367                 if (next_update &&
2368                     bpos_cmp(next_update->k.p,
2369                              k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2370                         iter->k = next_update->k;
2371                         k = bkey_i_to_s_c(next_update);
2372                 }
2373
2374                 if (k.k && bkey_deleted(k.k)) {
2375                         /*
2376                          * If we've got a whiteout, and it's after the search
2377                          * key, advance the search key to the whiteout instead
2378                          * of just after the whiteout - it might be a btree
2379                          * whiteout, with a real key at the same position, since
2380                          * in the btree deleted keys sort before non deleted.
2381                          */
2382                         search_key = bpos_cmp(search_key, k.k->p)
2383                                 ? k.k->p
2384                                 : bpos_successor(k.k->p);
2385                         continue;
2386                 }
2387
2388                 if (likely(k.k)) {
2389                         break;
2390                 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2391                         /* Advance to next leaf node: */
2392                         search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2393                 } else {
2394                         /* End of btree: */
2395                         bch2_btree_iter_set_pos(iter, SPOS_MAX);
2396                         k = bkey_s_c_null;
2397                         goto out;
2398                 }
2399         }
2400 out:
2401         bch2_btree_iter_verify(iter);
2402
2403         return k;
2404 }
2405
2406 /**
2407  * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2408  * current position
2409  */
2410 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2411 {
2412         struct btree_trans *trans = iter->trans;
2413         struct bpos search_key = btree_iter_search_key(iter);
2414         struct bkey_s_c k;
2415         struct bpos iter_pos;
2416         int ret;
2417
2418         EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2419
2420         if (iter->update_path) {
2421                 bch2_path_put(trans, iter->update_path,
2422                               iter->flags & BTREE_ITER_INTENT);
2423                 iter->update_path = NULL;
2424         }
2425
2426         bch2_btree_iter_verify_entry_exit(iter);
2427
2428         while (1) {
2429                 k = __bch2_btree_iter_peek(iter, search_key);
2430                 if (!k.k || bkey_err(k))
2431                         goto out_no_locked;
2432
2433                 /*
2434                  * iter->pos should be mononotically increasing, and always be
2435                  * equal to the key we just returned - except extents can
2436                  * straddle iter->pos:
2437                  */
2438                 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2439                         iter_pos = k.k->p;
2440                 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2441                         iter_pos = bkey_start_pos(k.k);
2442                 else
2443                         iter_pos = iter->pos;
2444
2445                 if (bkey_cmp(iter_pos, end) > 0) {
2446                         bch2_btree_iter_set_pos(iter, end);
2447                         k = bkey_s_c_null;
2448                         goto out_no_locked;
2449                 }
2450
2451                 if (iter->update_path &&
2452                     bkey_cmp(iter->update_path->pos, k.k->p)) {
2453                         bch2_path_put(trans, iter->update_path,
2454                                       iter->flags & BTREE_ITER_INTENT);
2455                         iter->update_path = NULL;
2456                 }
2457
2458                 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2459                     (iter->flags & BTREE_ITER_INTENT) &&
2460                     !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2461                     !iter->update_path) {
2462                         struct bpos pos = k.k->p;
2463
2464                         if (pos.snapshot < iter->snapshot) {
2465                                 search_key = bpos_successor(k.k->p);
2466                                 continue;
2467                         }
2468
2469                         pos.snapshot = iter->snapshot;
2470
2471                         /*
2472                          * advance, same as on exit for iter->path, but only up
2473                          * to snapshot
2474                          */
2475                         __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2476                         iter->update_path = iter->path;
2477
2478                         iter->update_path = bch2_btree_path_set_pos(trans,
2479                                                 iter->update_path, pos,
2480                                                 iter->flags & BTREE_ITER_INTENT,
2481                                                 _THIS_IP_);
2482                 }
2483
2484                 /*
2485                  * We can never have a key in a leaf node at POS_MAX, so
2486                  * we don't have to check these successor() calls:
2487                  */
2488                 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2489                     !bch2_snapshot_is_ancestor(trans->c,
2490                                                iter->snapshot,
2491                                                k.k->p.snapshot)) {
2492                         search_key = bpos_successor(k.k->p);
2493                         continue;
2494                 }
2495
2496                 if (bkey_whiteout(k.k) &&
2497                     !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2498                         search_key = bkey_successor(iter, k.k->p);
2499                         continue;
2500                 }
2501
2502                 break;
2503         }
2504
2505         iter->pos = iter_pos;
2506
2507         iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2508                                 iter->flags & BTREE_ITER_INTENT,
2509                                 btree_iter_ip_allocated(iter));
2510
2511         btree_path_set_should_be_locked(iter->path);
2512 out_no_locked:
2513         if (iter->update_path) {
2514                 if (iter->update_path->uptodate &&
2515                     (ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)))
2516                         k = bkey_s_c_err(ret);
2517                 else
2518                         btree_path_set_should_be_locked(iter->update_path);
2519         }
2520
2521         if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2522                 iter->pos.snapshot = iter->snapshot;
2523
2524         ret = bch2_btree_iter_verify_ret(iter, k);
2525         if (unlikely(ret)) {
2526                 bch2_btree_iter_set_pos(iter, iter->pos);
2527                 k = bkey_s_c_err(ret);
2528         }
2529
2530         bch2_btree_iter_verify_entry_exit(iter);
2531
2532         return k;
2533 }
2534
2535 /**
2536  * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2537  * to iterator's current position, returning keys from every level of the btree.
2538  * For keys at different levels of the btree that compare equal, the key from
2539  * the lower level (leaf) is returned first.
2540  */
2541 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2542 {
2543         struct btree_trans *trans = iter->trans;
2544         struct bkey_s_c k;
2545         int ret;
2546
2547         EBUG_ON(iter->path->cached);
2548         bch2_btree_iter_verify(iter);
2549         BUG_ON(iter->path->level < iter->min_depth);
2550         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2551         EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2552
2553         while (1) {
2554                 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2555                                         iter->flags & BTREE_ITER_INTENT,
2556                                         btree_iter_ip_allocated(iter));
2557
2558                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2559                 if (unlikely(ret)) {
2560                         /* ensure that iter->k is consistent with iter->pos: */
2561                         bch2_btree_iter_set_pos(iter, iter->pos);
2562                         k = bkey_s_c_err(ret);
2563                         goto out_no_locked;
2564                 }
2565
2566                 /* Already at end? */
2567                 if (!btree_path_node(iter->path, iter->path->level)) {
2568                         k = bkey_s_c_null;
2569                         goto out_no_locked;
2570                 }
2571
2572                 k = btree_path_level_peek_all(trans->c,
2573                                 &iter->path->l[iter->path->level], &iter->k);
2574
2575                 /* Check if we should go up to the parent node: */
2576                 if (!k.k ||
2577                     (iter->advanced &&
2578                      !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
2579                         iter->pos = path_l(iter->path)->b->key.k.p;
2580                         btree_path_set_level_up(trans, iter->path);
2581                         iter->advanced = false;
2582                         continue;
2583                 }
2584
2585                 /*
2586                  * Check if we should go back down to a leaf:
2587                  * If we're not in a leaf node, we only return the current key
2588                  * if it exactly matches iter->pos - otherwise we first have to
2589                  * go back to the leaf:
2590                  */
2591                 if (iter->path->level != iter->min_depth &&
2592                     (iter->advanced ||
2593                      !k.k ||
2594                      bpos_cmp(iter->pos, k.k->p))) {
2595                         btree_path_set_level_down(trans, iter->path, iter->min_depth);
2596                         iter->pos = bpos_successor(iter->pos);
2597                         iter->advanced = false;
2598                         continue;
2599                 }
2600
2601                 /* Check if we should go to the next key: */
2602                 if (iter->path->level == iter->min_depth &&
2603                     iter->advanced &&
2604                     k.k &&
2605                     !bpos_cmp(iter->pos, k.k->p)) {
2606                         iter->pos = bpos_successor(iter->pos);
2607                         iter->advanced = false;
2608                         continue;
2609                 }
2610
2611                 if (iter->advanced &&
2612                     iter->path->level == iter->min_depth &&
2613                     bpos_cmp(k.k->p, iter->pos))
2614                         iter->advanced = false;
2615
2616                 BUG_ON(iter->advanced);
2617                 BUG_ON(!k.k);
2618                 break;
2619         }
2620
2621         iter->pos = k.k->p;
2622         btree_path_set_should_be_locked(iter->path);
2623 out_no_locked:
2624         bch2_btree_iter_verify(iter);
2625
2626         return k;
2627 }
2628
2629 /**
2630  * bch2_btree_iter_next: returns first key greater than iterator's current
2631  * position
2632  */
2633 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2634 {
2635         if (!bch2_btree_iter_advance(iter))
2636                 return bkey_s_c_null;
2637
2638         return bch2_btree_iter_peek(iter);
2639 }
2640
2641 /**
2642  * bch2_btree_iter_peek_prev: returns first key less than or equal to
2643  * iterator's current position
2644  */
2645 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2646 {
2647         struct btree_trans *trans = iter->trans;
2648         struct bpos search_key = iter->pos;
2649         struct btree_path *saved_path = NULL;
2650         struct bkey_s_c k;
2651         struct bkey saved_k;
2652         const struct bch_val *saved_v;
2653         int ret;
2654
2655         EBUG_ON(iter->path->cached || iter->path->level);
2656         EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2657
2658         if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2659                 return bkey_s_c_err(-EIO);
2660
2661         bch2_btree_iter_verify(iter);
2662         bch2_btree_iter_verify_entry_exit(iter);
2663
2664         if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2665                 search_key.snapshot = U32_MAX;
2666
2667         while (1) {
2668                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2669                                                 iter->flags & BTREE_ITER_INTENT,
2670                                                 btree_iter_ip_allocated(iter));
2671
2672                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2673                 if (unlikely(ret)) {
2674                         /* ensure that iter->k is consistent with iter->pos: */
2675                         bch2_btree_iter_set_pos(iter, iter->pos);
2676                         k = bkey_s_c_err(ret);
2677                         goto out_no_locked;
2678                 }
2679
2680                 k = btree_path_level_peek(trans, iter->path,
2681                                           &iter->path->l[0], &iter->k);
2682                 if (!k.k ||
2683                     ((iter->flags & BTREE_ITER_IS_EXTENTS)
2684                      ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2685                      : bpos_cmp(k.k->p, search_key) > 0))
2686                         k = btree_path_level_prev(trans, iter->path,
2687                                                   &iter->path->l[0], &iter->k);
2688
2689                 bch2_btree_path_check_sort(trans, iter->path, 0);
2690
2691                 if (likely(k.k)) {
2692                         if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2693                                 if (k.k->p.snapshot == iter->snapshot)
2694                                         goto got_key;
2695
2696                                 /*
2697                                  * If we have a saved candidate, and we're no
2698                                  * longer at the same _key_ (not pos), return
2699                                  * that candidate
2700                                  */
2701                                 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2702                                         bch2_path_put(trans, iter->path,
2703                                                       iter->flags & BTREE_ITER_INTENT);
2704                                         iter->path = saved_path;
2705                                         saved_path = NULL;
2706                                         iter->k = saved_k;
2707                                         k.v     = saved_v;
2708                                         goto got_key;
2709                                 }
2710
2711                                 if (bch2_snapshot_is_ancestor(iter->trans->c,
2712                                                               iter->snapshot,
2713                                                               k.k->p.snapshot)) {
2714                                         if (saved_path)
2715                                                 bch2_path_put(trans, saved_path,
2716                                                       iter->flags & BTREE_ITER_INTENT);
2717                                         saved_path = btree_path_clone(trans, iter->path,
2718                                                                 iter->flags & BTREE_ITER_INTENT);
2719                                         saved_k = *k.k;
2720                                         saved_v = k.v;
2721                                 }
2722
2723                                 search_key = bpos_predecessor(k.k->p);
2724                                 continue;
2725                         }
2726 got_key:
2727                         if (bkey_whiteout(k.k) &&
2728                             !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2729                                 search_key = bkey_predecessor(iter, k.k->p);
2730                                 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2731                                         search_key.snapshot = U32_MAX;
2732                                 continue;
2733                         }
2734
2735                         break;
2736                 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2737                         /* Advance to previous leaf node: */
2738                         search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2739                 } else {
2740                         /* Start of btree: */
2741                         bch2_btree_iter_set_pos(iter, POS_MIN);
2742                         k = bkey_s_c_null;
2743                         goto out_no_locked;
2744                 }
2745         }
2746
2747         EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2748
2749         /* Extents can straddle iter->pos: */
2750         if (bkey_cmp(k.k->p, iter->pos) < 0)
2751                 iter->pos = k.k->p;
2752
2753         if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2754                 iter->pos.snapshot = iter->snapshot;
2755
2756         btree_path_set_should_be_locked(iter->path);
2757 out_no_locked:
2758         if (saved_path)
2759                 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2760
2761         bch2_btree_iter_verify_entry_exit(iter);
2762         bch2_btree_iter_verify(iter);
2763
2764         return k;
2765 }
2766
2767 /**
2768  * bch2_btree_iter_prev: returns first key less than iterator's current
2769  * position
2770  */
2771 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2772 {
2773         if (!bch2_btree_iter_rewind(iter))
2774                 return bkey_s_c_null;
2775
2776         return bch2_btree_iter_peek_prev(iter);
2777 }
2778
2779 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2780 {
2781         struct btree_trans *trans = iter->trans;
2782         struct bpos search_key;
2783         struct bkey_s_c k;
2784         int ret;
2785
2786         bch2_btree_iter_verify(iter);
2787         bch2_btree_iter_verify_entry_exit(iter);
2788         EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2789         EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2790
2791         /* extents can't span inode numbers: */
2792         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2793             unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2794                 if (iter->pos.inode == KEY_INODE_MAX)
2795                         return bkey_s_c_null;
2796
2797                 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2798         }
2799
2800         search_key = btree_iter_search_key(iter);
2801         iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2802                                         iter->flags & BTREE_ITER_INTENT,
2803                                         btree_iter_ip_allocated(iter));
2804
2805         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2806         if (unlikely(ret))
2807                 return bkey_s_c_err(ret);
2808
2809         if ((iter->flags & BTREE_ITER_CACHED) ||
2810             !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2811                 struct bkey_i *next_update;
2812
2813                 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2814                     (next_update = btree_trans_peek_updates(trans,
2815                                                 iter->btree_id, search_key)) &&
2816                     !bpos_cmp(next_update->k.p, iter->pos)) {
2817                         iter->k = next_update->k;
2818                         k = bkey_i_to_s_c(next_update);
2819                         goto out;
2820                 }
2821
2822                 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2823                     (next_update = bch2_btree_journal_peek_slot(trans,
2824                                         iter, iter->pos))) {
2825                         iter->k = next_update->k;
2826                         k = bkey_i_to_s_c(next_update);
2827                         goto out;
2828                 }
2829
2830                 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2831                     (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2832                         if (bkey_err(k)) {
2833                                 goto out_no_locked;
2834                         } else {
2835                                 iter->k = *k.k;
2836                                 goto out;
2837                         }
2838                 }
2839
2840                 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2841         } else {
2842                 struct bpos next;
2843
2844                 EBUG_ON(iter->path->level);
2845
2846                 if (iter->flags & BTREE_ITER_INTENT) {
2847                         struct btree_iter iter2;
2848                         struct bpos end = iter->pos;
2849
2850                         if (iter->flags & BTREE_ITER_IS_EXTENTS)
2851                                 end.offset = U64_MAX;
2852
2853                         bch2_trans_copy_iter(&iter2, iter);
2854                         k = bch2_btree_iter_peek_upto(&iter2, end);
2855
2856                         if (k.k && !bkey_err(k)) {
2857                                 iter->k = iter2.k;
2858                                 k.k = &iter->k;
2859                         }
2860                         bch2_trans_iter_exit(trans, &iter2);
2861                 } else {
2862                         struct bpos pos = iter->pos;
2863
2864                         k = bch2_btree_iter_peek(iter);
2865                         iter->pos = pos;
2866                 }
2867
2868                 if (unlikely(bkey_err(k)))
2869                         return k;
2870
2871                 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2872
2873                 if (bkey_cmp(iter->pos, next) < 0) {
2874                         bkey_init(&iter->k);
2875                         iter->k.p = iter->pos;
2876
2877                         if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2878                                 bch2_key_resize(&iter->k,
2879                                                 min_t(u64, KEY_SIZE_MAX,
2880                                                       (next.inode == iter->pos.inode
2881                                                        ? next.offset
2882                                                        : KEY_OFFSET_MAX) -
2883                                                       iter->pos.offset));
2884                                 EBUG_ON(!iter->k.size);
2885                         }
2886
2887                         k = (struct bkey_s_c) { &iter->k, NULL };
2888                 }
2889         }
2890 out:
2891         btree_path_set_should_be_locked(iter->path);
2892 out_no_locked:
2893         bch2_btree_iter_verify_entry_exit(iter);
2894         bch2_btree_iter_verify(iter);
2895         ret = bch2_btree_iter_verify_ret(iter, k);
2896         if (unlikely(ret))
2897                 return bkey_s_c_err(ret);
2898
2899         return k;
2900 }
2901
2902 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2903 {
2904         if (!bch2_btree_iter_advance(iter))
2905                 return bkey_s_c_null;
2906
2907         return bch2_btree_iter_peek_slot(iter);
2908 }
2909
2910 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2911 {
2912         if (!bch2_btree_iter_rewind(iter))
2913                 return bkey_s_c_null;
2914
2915         return bch2_btree_iter_peek_slot(iter);
2916 }
2917
2918 /* new transactional stuff: */
2919
2920 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2921                                                 struct btree_path *path)
2922 {
2923         EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2924         EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2925         EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2926 }
2927
2928 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2929 {
2930 #ifdef CONFIG_BCACHEFS_DEBUG
2931         unsigned i;
2932
2933         for (i = 0; i < trans->nr_sorted; i++)
2934                 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2935 #endif
2936 }
2937
2938 static void btree_trans_verify_sorted(struct btree_trans *trans)
2939 {
2940 #ifdef CONFIG_BCACHEFS_DEBUG
2941         struct btree_path *path, *prev = NULL;
2942         unsigned i;
2943
2944         if (!bch2_debug_check_iterators)
2945                 return;
2946
2947         trans_for_each_path_inorder(trans, path, i) {
2948                 if (prev && btree_path_cmp(prev, path) > 0) {
2949                         bch2_dump_trans_paths_updates(trans);
2950                         panic("trans paths out of order!\n");
2951                 }
2952                 prev = path;
2953         }
2954 #endif
2955 }
2956
2957 static inline void btree_path_swap(struct btree_trans *trans,
2958                                    struct btree_path *l, struct btree_path *r)
2959 {
2960         swap(l->sorted_idx, r->sorted_idx);
2961         swap(trans->sorted[l->sorted_idx],
2962              trans->sorted[r->sorted_idx]);
2963
2964         btree_path_verify_sorted_ref(trans, l);
2965         btree_path_verify_sorted_ref(trans, r);
2966 }
2967
2968 inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2969                                        int cmp)
2970 {
2971         struct btree_path *n;
2972
2973         if (cmp <= 0) {
2974                 n = prev_btree_path(trans, path);
2975                 if (n && btree_path_cmp(n, path) > 0) {
2976                         do {
2977                                 btree_path_swap(trans, n, path);
2978                                 n = prev_btree_path(trans, path);
2979                         } while (n && btree_path_cmp(n, path) > 0);
2980
2981                         goto out;
2982                 }
2983         }
2984
2985         if (cmp >= 0) {
2986                 n = next_btree_path(trans, path);
2987                 if (n && btree_path_cmp(path, n) > 0) {
2988                         do {
2989                                 btree_path_swap(trans, path, n);
2990                                 n = next_btree_path(trans, path);
2991                         } while (n && btree_path_cmp(path, n) > 0);
2992                 }
2993         }
2994 out:
2995         btree_trans_verify_sorted(trans);
2996 }
2997
2998 static inline void btree_path_list_remove(struct btree_trans *trans,
2999                                           struct btree_path *path)
3000 {
3001         unsigned i;
3002
3003         EBUG_ON(path->sorted_idx >= trans->nr_sorted);
3004
3005         array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
3006
3007         for (i = path->sorted_idx; i < trans->nr_sorted; i++)
3008                 trans->paths[trans->sorted[i]].sorted_idx = i;
3009
3010         path->sorted_idx = U8_MAX;
3011
3012         btree_trans_verify_sorted_refs(trans);
3013 }
3014
3015 static inline void btree_path_list_add(struct btree_trans *trans,
3016                                        struct btree_path *pos,
3017                                        struct btree_path *path)
3018 {
3019         unsigned i;
3020
3021         btree_trans_verify_sorted_refs(trans);
3022
3023         path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
3024
3025         if (trans->in_traverse_all &&
3026             trans->traverse_all_idx != U8_MAX &&
3027             trans->traverse_all_idx >= path->sorted_idx)
3028                 trans->traverse_all_idx++;
3029
3030         array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
3031
3032         for (i = path->sorted_idx; i < trans->nr_sorted; i++)
3033                 trans->paths[trans->sorted[i]].sorted_idx = i;
3034
3035         btree_trans_verify_sorted_refs(trans);
3036 }
3037
3038 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3039 {
3040         if (iter->path)
3041                 bch2_path_put(trans, iter->path,
3042                               iter->flags & BTREE_ITER_INTENT);
3043         if (iter->update_path)
3044                 bch2_path_put(trans, iter->update_path,
3045                               iter->flags & BTREE_ITER_INTENT);
3046         if (iter->key_cache_path)
3047                 bch2_path_put(trans, iter->key_cache_path,
3048                               iter->flags & BTREE_ITER_INTENT);
3049         iter->path = NULL;
3050         iter->update_path = NULL;
3051         iter->key_cache_path = NULL;
3052 }
3053
3054 static void __bch2_trans_iter_init(struct btree_trans *trans,
3055                                    struct btree_iter *iter,
3056                                    unsigned btree_id, struct bpos pos,
3057                                    unsigned locks_want,
3058                                    unsigned depth,
3059                                    unsigned flags,
3060                                    unsigned long ip)
3061 {
3062         EBUG_ON(trans->restarted);
3063
3064         if (flags & BTREE_ITER_ALL_LEVELS)
3065                 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
3066
3067         if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
3068             btree_node_type_is_extents(btree_id))
3069                 flags |= BTREE_ITER_IS_EXTENTS;
3070
3071         if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
3072             !btree_type_has_snapshots(btree_id))
3073                 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
3074
3075         if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
3076             btree_type_has_snapshots(btree_id))
3077                 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
3078
3079         if (!test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags))
3080                 flags |= BTREE_ITER_WITH_JOURNAL;
3081
3082         iter->trans     = trans;
3083         iter->path      = NULL;
3084         iter->update_path = NULL;
3085         iter->key_cache_path = NULL;
3086         iter->btree_id  = btree_id;
3087         iter->min_depth = depth;
3088         iter->flags     = flags;
3089         iter->snapshot  = pos.snapshot;
3090         iter->pos       = pos;
3091         iter->k.type    = KEY_TYPE_deleted;
3092         iter->k.p       = pos;
3093         iter->k.size    = 0;
3094         iter->journal_idx = 0;
3095         iter->journal_pos = POS_MIN;
3096 #ifdef CONFIG_BCACHEFS_DEBUG
3097         iter->ip_allocated = ip;
3098 #endif
3099
3100         iter->path = bch2_path_get(trans, btree_id, iter->pos,
3101                                    locks_want, depth, flags, ip);
3102 }
3103
3104 void bch2_trans_iter_init(struct btree_trans *trans,
3105                           struct btree_iter *iter,
3106                           unsigned btree_id, struct bpos pos,
3107                           unsigned flags)
3108 {
3109         if (!btree_id_cached(trans->c, btree_id)) {
3110                 flags &= ~BTREE_ITER_CACHED;
3111                 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
3112         } else if (!(flags & BTREE_ITER_CACHED))
3113                 flags |= BTREE_ITER_WITH_KEY_CACHE;
3114
3115         __bch2_trans_iter_init(trans, iter, btree_id, pos,
3116                                0, 0, flags, _RET_IP_);
3117 }
3118
3119 void bch2_trans_node_iter_init(struct btree_trans *trans,
3120                                struct btree_iter *iter,
3121                                enum btree_id btree_id,
3122                                struct bpos pos,
3123                                unsigned locks_want,
3124                                unsigned depth,
3125                                unsigned flags)
3126 {
3127         __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
3128                                BTREE_ITER_NOT_EXTENTS|
3129                                __BTREE_ITER_ALL_SNAPSHOTS|
3130                                BTREE_ITER_ALL_SNAPSHOTS|
3131                                flags, _RET_IP_);
3132         BUG_ON(iter->path->locks_want    < min(locks_want, BTREE_MAX_DEPTH));
3133         BUG_ON(iter->path->level        != depth);
3134         BUG_ON(iter->min_depth          != depth);
3135 }
3136
3137 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
3138 {
3139         *dst = *src;
3140         if (src->path)
3141                 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
3142         if (src->update_path)
3143                 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
3144         dst->key_cache_path = NULL;
3145 }
3146
3147 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3148 {
3149         size_t new_top = trans->mem_top + size;
3150         void *p;
3151
3152         if (new_top > trans->mem_bytes) {
3153                 size_t old_bytes = trans->mem_bytes;
3154                 size_t new_bytes = roundup_pow_of_two(new_top);
3155                 void *new_mem;
3156
3157                 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3158
3159                 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
3160                 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3161                         new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
3162                         new_bytes = BTREE_TRANS_MEM_MAX;
3163                         kfree(trans->mem);
3164                 }
3165
3166                 if (!new_mem)
3167                         return ERR_PTR(-ENOMEM);
3168
3169                 trans->mem = new_mem;
3170                 trans->mem_bytes = new_bytes;
3171
3172                 if (old_bytes) {
3173                         trace_trans_restart_mem_realloced(trans, _RET_IP_, new_bytes);
3174                         return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
3175                 }
3176         }
3177
3178         p = trans->mem + trans->mem_top;
3179         trans->mem_top += size;
3180         memset(p, 0, size);
3181         return p;
3182 }
3183
3184 /**
3185  * bch2_trans_begin() - reset a transaction after a interrupted attempt
3186  * @trans: transaction to reset
3187  *
3188  * While iterating over nodes or updating nodes a attempt to lock a btree node
3189  * may return BCH_ERR_transaction_restart when the trylock fails. When this
3190  * occurs bch2_trans_begin() should be called and the transaction retried.
3191  */
3192 u32 bch2_trans_begin(struct btree_trans *trans)
3193 {
3194         struct btree_path *path;
3195
3196         bch2_trans_reset_updates(trans);
3197
3198         trans->mem_top                  = 0;
3199
3200         if (trans->fs_usage_deltas) {
3201                 trans->fs_usage_deltas->used = 0;
3202                 memset((void *) trans->fs_usage_deltas +
3203                        offsetof(struct replicas_delta_list, memset_start), 0,
3204                        (void *) &trans->fs_usage_deltas->memset_end -
3205                        (void *) &trans->fs_usage_deltas->memset_start);
3206         }
3207
3208         trans_for_each_path(trans, path) {
3209                 path->should_be_locked = false;
3210
3211                 /*
3212                  * If the transaction wasn't restarted, we're presuming to be
3213                  * doing something new: dont keep iterators excpt the ones that
3214                  * are in use - except for the subvolumes btree:
3215                  */
3216                 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3217                         path->preserve = false;
3218
3219                 /*
3220                  * XXX: we probably shouldn't be doing this if the transaction
3221                  * was restarted, but currently we still overflow transaction
3222                  * iterators if we do that
3223                  */
3224                 if (!path->ref && !path->preserve)
3225                         __bch2_path_free(trans, path);
3226                 else
3227                         path->preserve = false;
3228         }
3229
3230         if (!trans->restarted &&
3231             (need_resched() ||
3232              ktime_get_ns() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
3233                 bch2_trans_unlock(trans);
3234                 cond_resched();
3235                 bch2_trans_relock(trans);
3236         }
3237
3238         trans->last_restarted_ip = _RET_IP_;
3239         if (trans->restarted)
3240                 bch2_btree_path_traverse_all(trans);
3241
3242         trans->last_begin_time = ktime_get_ns();
3243         return trans->restart_count;
3244 }
3245
3246 void bch2_trans_verify_not_restarted(struct btree_trans *trans, u32 restart_count)
3247 {
3248         bch2_trans_inconsistent_on(trans_was_restarted(trans, restart_count), trans,
3249                 "trans->restart_count %u, should be %u, last restarted by %ps\n",
3250                 trans->restart_count, restart_count,
3251                 (void *) trans->last_restarted_ip);
3252 }
3253
3254 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
3255 {
3256         size_t paths_bytes      = sizeof(struct btree_path) * BTREE_ITER_MAX;
3257         size_t updates_bytes    = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
3258         void *p = NULL;
3259
3260         BUG_ON(trans->used_mempool);
3261
3262 #ifdef __KERNEL__
3263         p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
3264 #endif
3265         if (!p)
3266                 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
3267
3268         trans->paths            = p; p += paths_bytes;
3269         trans->updates          = p; p += updates_bytes;
3270 }
3271
3272 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
3273                        unsigned expected_nr_iters,
3274                        size_t expected_mem_bytes,
3275                        const char *fn)
3276         __acquires(&c->btree_trans_barrier)
3277 {
3278         struct btree_trans *pos;
3279
3280         BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
3281
3282         memset(trans, 0, sizeof(*trans));
3283         trans->c                = c;
3284         trans->fn               = fn;
3285         trans->last_begin_time  = ktime_get_ns();
3286         trans->task             = current;
3287
3288         while (c->lock_held_stats.names[trans->lock_name_idx] != fn
3289                && c->lock_held_stats.names[trans->lock_name_idx] != 0)
3290                 trans->lock_name_idx++;
3291
3292         if (trans->lock_name_idx >= BCH_LOCK_TIME_NR)
3293                 pr_warn_once("lock_times array not big enough!");
3294         else
3295                 c->lock_held_stats.names[trans->lock_name_idx] = fn;
3296
3297         bch2_trans_alloc_paths(trans, c);
3298
3299         if (expected_mem_bytes) {
3300                 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
3301                 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
3302
3303                 if (!unlikely(trans->mem)) {
3304                         trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3305                         trans->mem_bytes = BTREE_TRANS_MEM_MAX;
3306                 }
3307         }
3308
3309         trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3310
3311         mutex_lock(&c->btree_trans_lock);
3312         list_for_each_entry(pos, &c->btree_trans_list, list) {
3313                 if (trans->task->pid < pos->task->pid) {
3314                         list_add_tail(&trans->list, &pos->list);
3315                         goto list_add_done;
3316                 }
3317         }
3318         list_add_tail(&trans->list, &c->btree_trans_list);
3319 list_add_done:
3320         mutex_unlock(&c->btree_trans_lock);
3321 }
3322
3323 static void check_btree_paths_leaked(struct btree_trans *trans)
3324 {
3325 #ifdef CONFIG_BCACHEFS_DEBUG
3326         struct bch_fs *c = trans->c;
3327         struct btree_path *path;
3328
3329         trans_for_each_path(trans, path)
3330                 if (path->ref)
3331                         goto leaked;
3332         return;
3333 leaked:
3334         bch_err(c, "btree paths leaked from %s!", trans->fn);
3335         trans_for_each_path(trans, path)
3336                 if (path->ref)
3337                         printk(KERN_ERR "  btree %s %pS\n",
3338                                bch2_btree_ids[path->btree_id],
3339                                (void *) path->ip_allocated);
3340         /* Be noisy about this: */
3341         bch2_fatal_error(c);
3342 #endif
3343 }
3344
3345 void bch2_trans_exit(struct btree_trans *trans)
3346         __releases(&c->btree_trans_barrier)
3347 {
3348         struct btree_insert_entry *i;
3349         struct bch_fs *c = trans->c;
3350
3351         bch2_trans_unlock(trans);
3352
3353         trans_for_each_update(trans, i)
3354                 __btree_path_put(i->path, true);
3355         trans->nr_updates               = 0;
3356
3357         check_btree_paths_leaked(trans);
3358
3359         mutex_lock(&c->btree_trans_lock);
3360         list_del(&trans->list);
3361         mutex_unlock(&c->btree_trans_lock);
3362
3363         srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3364
3365         bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3366
3367         kfree(trans->extra_journal_entries.data);
3368
3369         if (trans->fs_usage_deltas) {
3370                 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3371                     REPLICAS_DELTA_LIST_MAX)
3372                         mempool_free(trans->fs_usage_deltas,
3373                                      &c->replicas_delta_pool);
3374                 else
3375                         kfree(trans->fs_usage_deltas);
3376         }
3377
3378         if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3379                 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3380         else
3381                 kfree(trans->mem);
3382
3383 #ifdef __KERNEL__
3384         /*
3385          * Userspace doesn't have a real percpu implementation:
3386          */
3387         trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3388 #endif
3389
3390         if (trans->paths)
3391                 mempool_free(trans->paths, &c->btree_paths_pool);
3392
3393         trans->mem      = (void *) 0x1;
3394         trans->paths    = (void *) 0x1;
3395 }
3396
3397 static void __maybe_unused
3398 bch2_btree_path_node_to_text(struct printbuf *out,
3399                              struct btree_bkey_cached_common *b,
3400                              bool cached)
3401 {
3402         prt_printf(out, "    l=%u %s:",
3403                b->level, bch2_btree_ids[b->btree_id]);
3404         bch2_bpos_to_text(out, btree_node_pos(b, cached));
3405 }
3406
3407 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3408 {
3409         struct btree_path *path;
3410         struct btree_bkey_cached_common *b;
3411         static char lock_types[] = { 'r', 'i', 'w' };
3412         unsigned l;
3413
3414         prt_printf(out, "%i %s\n", trans->task->pid, trans->fn);
3415
3416         trans_for_each_path(trans, path) {
3417                 if (!path->nodes_locked)
3418                         continue;
3419
3420                 prt_printf(out, "  path %u %c l=%u %s:",
3421                        path->idx,
3422                        path->cached ? 'c' : 'b',
3423                        path->level,
3424                        bch2_btree_ids[path->btree_id]);
3425                 bch2_bpos_to_text(out, path->pos);
3426                 prt_printf(out, "\n");
3427
3428                 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3429                         if (btree_node_locked(path, l) &&
3430                             !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3431                                 prt_printf(out, "    %s l=%u ",
3432                                        btree_node_intent_locked(path, l) ? "i" : "r", l);
3433                                 bch2_btree_path_node_to_text(out, b, path->cached);
3434                                 prt_printf(out, "\n");
3435                         }
3436                 }
3437         }
3438
3439         b = READ_ONCE(trans->locking);
3440         if (b) {
3441                 path = &trans->paths[trans->locking_path_idx];
3442                 prt_printf(out, "  locking path %u %c l=%u %c %s:",
3443                        trans->locking_path_idx,
3444                        path->cached ? 'c' : 'b',
3445                        trans->locking_level,
3446                        lock_types[trans->locking_lock_type],
3447                        bch2_btree_ids[trans->locking_btree_id]);
3448                 bch2_bpos_to_text(out, trans->locking_pos);
3449
3450                 prt_printf(out, " node ");
3451                 bch2_btree_path_node_to_text(out, b, path->cached);
3452                 prt_printf(out, "\n");
3453         }
3454 }
3455
3456 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3457 {
3458         if (c->btree_trans_barrier_initialized)
3459                 cleanup_srcu_struct(&c->btree_trans_barrier);
3460         mempool_exit(&c->btree_trans_mem_pool);
3461         mempool_exit(&c->btree_paths_pool);
3462 }
3463
3464 int bch2_fs_btree_iter_init(struct bch_fs *c)
3465 {
3466         unsigned nr = BTREE_ITER_MAX;
3467         int ret;
3468
3469         INIT_LIST_HEAD(&c->btree_trans_list);
3470         mutex_init(&c->btree_trans_lock);
3471
3472         ret   = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3473                         sizeof(struct btree_path) * nr +
3474                         sizeof(struct btree_insert_entry) * nr) ?:
3475                 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3476                                           BTREE_TRANS_MEM_MAX) ?:
3477                 init_srcu_struct(&c->btree_trans_barrier);
3478         if (!ret)
3479                 c->btree_trans_barrier_initialized = true;
3480         return ret;
3481 }