]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.c
New upstream release
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "debug.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "journal.h"
15 #include "replicas.h"
16
17 #include <linux/prefetch.h>
18 #include <trace/events/bcachefs.h>
19
20 static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
21 static void btree_trans_sort_iters(struct btree_trans *);
22 static void btree_iter_check_sort(struct btree_trans *, struct btree_iter *);
23 static struct btree_iter *btree_iter_child_alloc(struct btree_iter *, unsigned long);
24 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *,
25                                                  struct btree_iter *);
26 static void btree_iter_copy(struct btree_iter *, struct btree_iter *);
27
28 static inline int btree_iter_cmp(const struct btree_iter *l,
29                                  const struct btree_iter *r)
30 {
31         return   cmp_int(l->btree_id, r->btree_id) ?:
32                 -cmp_int(btree_iter_is_cached(l), btree_iter_is_cached(r)) ?:
33                  bkey_cmp(l->real_pos, r->real_pos);
34 }
35
36 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
37 {
38         EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
39
40         /* Are we iterating over keys in all snapshots? */
41         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
42                 p = bpos_successor(p);
43         } else {
44                 p = bpos_nosnap_successor(p);
45                 p.snapshot = iter->snapshot;
46         }
47
48         return p;
49 }
50
51 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
52 {
53         EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
54
55         /* Are we iterating over keys in all snapshots? */
56         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
57                 p = bpos_predecessor(p);
58         } else {
59                 p = bpos_nosnap_predecessor(p);
60                 p.snapshot = iter->snapshot;
61         }
62
63         return p;
64 }
65
66 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
67 {
68         return l < BTREE_MAX_DEPTH &&
69                 (unsigned long) iter->l[l].b >= 128;
70 }
71
72 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
73 {
74         struct bpos pos = iter->pos;
75
76         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
77             bkey_cmp(pos, POS_MAX))
78                 pos = bkey_successor(iter, pos);
79         return pos;
80 }
81
82 static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
83                                               struct btree *b)
84 {
85         return bpos_cmp(iter->real_pos, b->data->min_key) < 0;
86 }
87
88 static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
89                                              struct btree *b)
90 {
91         return bpos_cmp(b->key.k.p, iter->real_pos) < 0;
92 }
93
94 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
95                                           struct btree *b)
96 {
97         return iter->btree_id == b->c.btree_id &&
98                 !btree_iter_pos_before_node(iter, b) &&
99                 !btree_iter_pos_after_node(iter, b);
100 }
101
102 /* Btree node locking: */
103
104 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
105 {
106         bch2_btree_node_unlock_write_inlined(b, iter);
107 }
108
109 void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
110 {
111         struct btree_iter *linked;
112         unsigned readers = 0;
113
114         EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
115
116         trans_for_each_iter(iter->trans, linked)
117                 if (linked->l[b->c.level].b == b &&
118                     btree_node_read_locked(linked, b->c.level))
119                         readers++;
120
121         /*
122          * Must drop our read locks before calling six_lock_write() -
123          * six_unlock() won't do wakeups until the reader count
124          * goes to 0, and it's safe because we have the node intent
125          * locked:
126          */
127         atomic64_sub(__SIX_VAL(read_lock, readers),
128                      &b->c.lock.state.counter);
129         btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
130         atomic64_add(__SIX_VAL(read_lock, readers),
131                      &b->c.lock.state.counter);
132 }
133
134 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
135 {
136         struct btree *b = btree_iter_node(iter, level);
137         int want = __btree_lock_want(iter, level);
138
139         if (!is_btree_node(iter, level))
140                 return false;
141
142         if (race_fault())
143                 return false;
144
145         if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
146             (btree_node_lock_seq_matches(iter, b, level) &&
147              btree_node_lock_increment(iter->trans, b, level, want))) {
148                 mark_btree_node_locked(iter, level, want);
149                 return true;
150         } else {
151                 return false;
152         }
153 }
154
155 static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
156 {
157         struct btree *b = iter->l[level].b;
158
159         EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
160
161         if (!is_btree_node(iter, level))
162                 return false;
163
164         if (btree_node_intent_locked(iter, level))
165                 return true;
166
167         if (race_fault())
168                 return false;
169
170         if (btree_node_locked(iter, level)
171             ? six_lock_tryupgrade(&b->c.lock)
172             : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
173                 goto success;
174
175         if (btree_node_lock_seq_matches(iter, b, level) &&
176             btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
177                 btree_node_unlock(iter, level);
178                 goto success;
179         }
180
181         return false;
182 success:
183         mark_btree_node_intent_locked(iter, level);
184         return true;
185 }
186
187 static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
188                                         unsigned long trace_ip)
189 {
190         unsigned l = iter->level;
191         int fail_idx = -1;
192
193         do {
194                 if (!btree_iter_node(iter, l))
195                         break;
196
197                 if (!(upgrade
198                       ? bch2_btree_node_upgrade(iter, l)
199                       : bch2_btree_node_relock(iter, l))) {
200                         (upgrade
201                          ? trace_node_upgrade_fail
202                          : trace_node_relock_fail)(iter->trans->ip, trace_ip,
203                                         btree_iter_type(iter) == BTREE_ITER_CACHED,
204                                         iter->btree_id, &iter->real_pos,
205                                         l, iter->l[l].lock_seq,
206                                         is_btree_node(iter, l)
207                                         ? 0
208                                         : (unsigned long) iter->l[l].b,
209                                         is_btree_node(iter, l)
210                                         ? iter->l[l].b->c.lock.state.seq
211                                         : 0);
212                         fail_idx = l;
213                         btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
214                 }
215
216                 l++;
217         } while (l < iter->locks_want);
218
219         /*
220          * When we fail to get a lock, we have to ensure that any child nodes
221          * can't be relocked so bch2_btree_iter_traverse has to walk back up to
222          * the node that we failed to relock:
223          */
224         while (fail_idx >= 0) {
225                 btree_node_unlock(iter, fail_idx);
226                 iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
227                 --fail_idx;
228         }
229
230         if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
231                 iter->uptodate = BTREE_ITER_NEED_PEEK;
232
233         bch2_btree_trans_verify_locks(iter->trans);
234
235         return iter->uptodate < BTREE_ITER_NEED_RELOCK;
236 }
237
238 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
239                                   enum btree_iter_type type)
240 {
241         return  type != BTREE_ITER_CACHED
242                 ? container_of(_b, struct btree, c)->key.k.p
243                 : container_of(_b, struct bkey_cached, c)->key.pos;
244 }
245
246 /* Slowpath: */
247 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
248                             unsigned level, struct btree_iter *iter,
249                             enum six_lock_type type,
250                             six_lock_should_sleep_fn should_sleep_fn, void *p,
251                             unsigned long ip)
252 {
253         struct btree_trans *trans = iter->trans;
254         struct btree_iter *linked, *deadlock_iter = NULL;
255         u64 start_time = local_clock();
256         unsigned reason = 9;
257         bool ret;
258
259         /* Check if it's safe to block: */
260         trans_for_each_iter(trans, linked) {
261                 if (!linked->nodes_locked)
262                         continue;
263
264                 /*
265                  * Can't block taking an intent lock if we have _any_ nodes read
266                  * locked:
267                  *
268                  * - Our read lock blocks another thread with an intent lock on
269                  *   the same node from getting a write lock, and thus from
270                  *   dropping its intent lock
271                  *
272                  * - And the other thread may have multiple nodes intent locked:
273                  *   both the node we want to intent lock, and the node we
274                  *   already have read locked - deadlock:
275                  */
276                 if (type == SIX_LOCK_intent &&
277                     linked->nodes_locked != linked->nodes_intent_locked) {
278                         deadlock_iter = linked;
279                         reason = 1;
280                 }
281
282                 if (linked->btree_id != iter->btree_id) {
283                         if (linked->btree_id > iter->btree_id) {
284                                 deadlock_iter = linked;
285                                 reason = 3;
286                         }
287                         continue;
288                 }
289
290                 /*
291                  * Within the same btree, cached iterators come before non
292                  * cached iterators:
293                  */
294                 if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) {
295                         if (btree_iter_is_cached(iter)) {
296                                 deadlock_iter = linked;
297                                 reason = 4;
298                         }
299                         continue;
300                 }
301
302                 /*
303                  * Interior nodes must be locked before their descendants: if
304                  * another iterator has possible descendants locked of the node
305                  * we're about to lock, it must have the ancestors locked too:
306                  */
307                 if (level > __fls(linked->nodes_locked)) {
308                         deadlock_iter = linked;
309                         reason = 5;
310                 }
311
312                 /* Must lock btree nodes in key order: */
313                 if (btree_node_locked(linked, level) &&
314                     bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
315                                                  btree_iter_type(linked))) <= 0) {
316                         deadlock_iter = linked;
317                         reason = 7;
318                         BUG_ON(trans->in_traverse_all);
319                 }
320         }
321
322         if (unlikely(deadlock_iter)) {
323                 trace_trans_restart_would_deadlock(trans->ip, ip,
324                                 trans->in_traverse_all, reason,
325                                 deadlock_iter->btree_id,
326                                 btree_iter_type(deadlock_iter),
327                                 &deadlock_iter->real_pos,
328                                 iter->btree_id,
329                                 btree_iter_type(iter),
330                                 &pos);
331                 btree_trans_restart(trans);
332                 return false;
333         }
334
335         if (six_trylock_type(&b->c.lock, type))
336                 return true;
337
338 #ifdef CONFIG_BCACHEFS_DEBUG
339         trans->locking_iter_idx = iter->idx;
340         trans->locking_pos      = pos;
341         trans->locking_btree_id = iter->btree_id;
342         trans->locking_level    = level;
343         trans->locking          = b;
344 #endif
345
346         ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
347
348 #ifdef CONFIG_BCACHEFS_DEBUG
349         trans->locking = NULL;
350 #endif
351         if (ret)
352                 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
353                                        start_time);
354         return ret;
355 }
356
357 /* Btree iterator locking: */
358
359 #ifdef CONFIG_BCACHEFS_DEBUG
360 static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
361 {
362         unsigned l;
363
364         if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
365                 BUG_ON(iter->nodes_locked);
366                 return;
367         }
368
369         for (l = 0; btree_iter_node(iter, l); l++) {
370                 if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
371                     !btree_node_locked(iter, l))
372                         continue;
373
374                 BUG_ON(btree_lock_want(iter, l) !=
375                        btree_node_locked_type(iter, l));
376         }
377 }
378
379 void bch2_btree_trans_verify_locks(struct btree_trans *trans)
380 {
381         struct btree_iter *iter;
382
383         trans_for_each_iter(trans, iter)
384                 bch2_btree_iter_verify_locks(iter);
385 }
386 #else
387 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
388 #endif
389
390 /*
391  * Only for btree_cache.c - only relocks intent locks
392  */
393 bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
394 {
395         unsigned l;
396
397         for (l = iter->level;
398              l < iter->locks_want && btree_iter_node(iter, l);
399              l++) {
400                 if (!bch2_btree_node_relock(iter, l)) {
401                         trace_node_relock_fail(iter->trans->ip, _RET_IP_,
402                                         btree_iter_type(iter) == BTREE_ITER_CACHED,
403                                         iter->btree_id, &iter->real_pos,
404                                         l, iter->l[l].lock_seq,
405                                         is_btree_node(iter, l)
406                                         ? 0
407                                         : (unsigned long) iter->l[l].b,
408                                         is_btree_node(iter, l)
409                                         ? iter->l[l].b->c.lock.state.seq
410                                         : 0);
411                         btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
412                         btree_trans_restart(iter->trans);
413                         return false;
414                 }
415         }
416
417         return true;
418 }
419
420 __flatten
421 bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
422 {
423         bool ret = btree_iter_get_locks(iter, false, trace_ip);
424
425         if (!ret)
426                 btree_trans_restart(iter->trans);
427         return ret;
428 }
429
430 bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
431                                unsigned new_locks_want)
432 {
433         struct btree_iter *linked;
434
435         EBUG_ON(iter->locks_want >= new_locks_want);
436
437         iter->locks_want = new_locks_want;
438
439         if (btree_iter_get_locks(iter, true, _THIS_IP_))
440                 return true;
441
442         /*
443          * XXX: this is ugly - we'd prefer to not be mucking with other
444          * iterators in the btree_trans here.
445          *
446          * On failure to upgrade the iterator, setting iter->locks_want and
447          * calling get_locks() is sufficient to make bch2_btree_iter_traverse()
448          * get the locks we want on transaction restart.
449          *
450          * But if this iterator was a clone, on transaction restart what we did
451          * to this iterator isn't going to be preserved.
452          *
453          * Possibly we could add an iterator field for the parent iterator when
454          * an iterator is a copy - for now, we'll just upgrade any other
455          * iterators with the same btree id.
456          *
457          * The code below used to be needed to ensure ancestor nodes get locked
458          * before interior nodes - now that's handled by
459          * bch2_btree_iter_traverse_all().
460          */
461         trans_for_each_iter(iter->trans, linked)
462                 if (linked != iter &&
463                     btree_iter_type(linked) == btree_iter_type(iter) &&
464                     linked->btree_id == iter->btree_id &&
465                     linked->locks_want < new_locks_want) {
466                         linked->locks_want = new_locks_want;
467                         btree_iter_get_locks(linked, true, _THIS_IP_);
468                 }
469
470         if (iter->should_be_locked)
471                 btree_trans_restart(iter->trans);
472         return false;
473 }
474
475 void __bch2_btree_iter_downgrade(struct btree_iter *iter,
476                                  unsigned new_locks_want)
477 {
478         unsigned l;
479
480         EBUG_ON(iter->locks_want < new_locks_want);
481
482         iter->locks_want = new_locks_want;
483
484         while (iter->nodes_locked &&
485                (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
486                 if (l > iter->level) {
487                         btree_node_unlock(iter, l);
488                 } else {
489                         if (btree_node_intent_locked(iter, l)) {
490                                 six_lock_downgrade(&iter->l[l].b->c.lock);
491                                 iter->nodes_intent_locked ^= 1 << l;
492                         }
493                         break;
494                 }
495         }
496
497         bch2_btree_trans_verify_locks(iter->trans);
498 }
499
500 void bch2_trans_downgrade(struct btree_trans *trans)
501 {
502         struct btree_iter *iter;
503
504         trans_for_each_iter(trans, iter)
505                 bch2_btree_iter_downgrade(iter);
506 }
507
508 /* Btree transaction locking: */
509
510 static inline bool btree_iter_should_be_locked(struct btree_iter *iter)
511 {
512         return (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) ||
513                 iter->should_be_locked;
514 }
515
516 bool bch2_trans_relock(struct btree_trans *trans)
517 {
518         struct btree_iter *iter;
519
520         if (unlikely(trans->restarted))
521                 return false;
522
523         trans_for_each_iter(trans, iter)
524                 if (btree_iter_should_be_locked(iter) &&
525                     !bch2_btree_iter_relock(iter, _RET_IP_)) {
526                         trace_trans_restart_relock(trans->ip, _RET_IP_,
527                                         iter->btree_id, &iter->real_pos);
528                         BUG_ON(!trans->restarted);
529                         return false;
530                 }
531         return true;
532 }
533
534 void bch2_trans_unlock(struct btree_trans *trans)
535 {
536         struct btree_iter *iter;
537
538         trans_for_each_iter(trans, iter)
539                 __bch2_btree_iter_unlock(iter);
540
541         BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
542 }
543
544 /* Btree iterator: */
545
546 #ifdef CONFIG_BCACHEFS_DEBUG
547
548 static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
549 {
550         struct bkey_cached *ck;
551         bool locked = btree_node_locked(iter, 0);
552
553         if (!bch2_btree_node_relock(iter, 0))
554                 return;
555
556         ck = (void *) iter->l[0].b;
557         BUG_ON(ck->key.btree_id != iter->btree_id ||
558                bkey_cmp(ck->key.pos, iter->pos));
559
560         if (!locked)
561                 btree_node_unlock(iter, 0);
562 }
563
564 static void bch2_btree_iter_verify_level(struct btree_iter *iter,
565                                          unsigned level)
566 {
567         struct btree_iter_level *l;
568         struct btree_node_iter tmp;
569         bool locked;
570         struct bkey_packed *p, *k;
571         char buf1[100], buf2[100], buf3[100];
572         const char *msg;
573
574         if (!bch2_debug_check_iterators)
575                 return;
576
577         l       = &iter->l[level];
578         tmp     = l->iter;
579         locked  = btree_node_locked(iter, level);
580
581         if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
582                 if (!level)
583                         bch2_btree_iter_verify_cached(iter);
584                 return;
585         }
586
587         BUG_ON(iter->level < iter->min_depth);
588
589         if (!btree_iter_node(iter, level))
590                 return;
591
592         if (!bch2_btree_node_relock(iter, level))
593                 return;
594
595         BUG_ON(!btree_iter_pos_in_node(iter, l->b));
596
597         /*
598          * node iterators don't use leaf node iterator:
599          */
600         if (btree_iter_type(iter) == BTREE_ITER_NODES &&
601             level <= iter->min_depth)
602                 goto unlock;
603
604         bch2_btree_node_iter_verify(&l->iter, l->b);
605
606         /*
607          * For interior nodes, the iterator will have skipped past
608          * deleted keys:
609          *
610          * For extents, the iterator may have skipped past deleted keys (but not
611          * whiteouts)
612          */
613         p = level || btree_node_type_is_extents(iter->btree_id)
614                 ? bch2_btree_node_iter_prev(&tmp, l->b)
615                 : bch2_btree_node_iter_prev_all(&tmp, l->b);
616         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
617
618         if (p && bkey_iter_pos_cmp(l->b, p, &iter->real_pos) >= 0) {
619                 msg = "before";
620                 goto err;
621         }
622
623         if (k && bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
624                 msg = "after";
625                 goto err;
626         }
627 unlock:
628         if (!locked)
629                 btree_node_unlock(iter, level);
630         return;
631 err:
632         strcpy(buf2, "(none)");
633         strcpy(buf3, "(none)");
634
635         bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
636
637         if (p) {
638                 struct bkey uk = bkey_unpack_key(l->b, p);
639                 bch2_bkey_to_text(&PBUF(buf2), &uk);
640         }
641
642         if (k) {
643                 struct bkey uk = bkey_unpack_key(l->b, k);
644                 bch2_bkey_to_text(&PBUF(buf3), &uk);
645         }
646
647         panic("iterator should be %s key at level %u:\n"
648               "iter pos %s\n"
649               "prev key %s\n"
650               "cur  key %s\n",
651               msg, level, buf1, buf2, buf3);
652 }
653
654 static void bch2_btree_iter_verify(struct btree_iter *iter)
655 {
656         struct btree_trans *trans = iter->trans;
657         struct bch_fs *c = trans->c;
658         enum btree_iter_type type = btree_iter_type(iter);
659         unsigned i;
660
661         EBUG_ON(iter->btree_id >= BTREE_ID_NR);
662
663         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
664                iter->pos.snapshot != iter->snapshot);
665
666         BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
667                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
668
669         BUG_ON(type == BTREE_ITER_NODES &&
670                !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
671
672         BUG_ON(type != BTREE_ITER_NODES &&
673                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
674                !btree_type_has_snapshots(iter->btree_id));
675
676         for (i = 0; i < (type != BTREE_ITER_CACHED ? BTREE_MAX_DEPTH : 1); i++) {
677                 if (!iter->l[i].b) {
678                         BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i);
679                         break;
680                 }
681
682                 bch2_btree_iter_verify_level(iter, i);
683         }
684
685         bch2_btree_iter_verify_locks(iter);
686 }
687
688 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
689 {
690         enum btree_iter_type type = btree_iter_type(iter);
691
692         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
693                iter->pos.snapshot != iter->snapshot);
694
695         BUG_ON((type == BTREE_ITER_KEYS ||
696                 type == BTREE_ITER_CACHED) &&
697                (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
698                 bkey_cmp(iter->pos, iter->k.p) > 0));
699 }
700
701 void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
702 {
703         struct btree_iter *iter;
704
705         if (!bch2_debug_check_iterators)
706                 return;
707
708         trans_for_each_iter_with_node(trans, b, iter)
709                 bch2_btree_iter_verify_level(iter, b->c.level);
710 }
711
712 #else
713
714 static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
715 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
716 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
717
718 #endif
719
720 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
721                                         struct btree *b,
722                                         struct bset_tree *t,
723                                         struct bkey_packed *k)
724 {
725         struct btree_node_iter_set *set;
726
727         btree_node_iter_for_each(iter, set)
728                 if (set->end == t->end_offset) {
729                         set->k = __btree_node_key_to_offset(b, k);
730                         bch2_btree_node_iter_sort(iter, b);
731                         return;
732                 }
733
734         bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
735 }
736
737 static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
738                                                struct btree *b,
739                                                struct bkey_packed *where)
740 {
741         struct btree_iter_level *l = &iter->l[b->c.level];
742
743         if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
744                 return;
745
746         if (bkey_iter_pos_cmp(l->b, where, &iter->real_pos) < 0)
747                 bch2_btree_node_iter_advance(&l->iter, l->b);
748
749         btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
750 }
751
752 void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
753                                       struct btree *b,
754                                       struct bkey_packed *where)
755 {
756         struct btree_iter *linked;
757
758         trans_for_each_iter_with_node(iter->trans, b, linked) {
759                 __bch2_btree_iter_fix_key_modified(linked, b, where);
760                 bch2_btree_iter_verify_level(linked, b->c.level);
761         }
762 }
763
764 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
765                                       struct btree *b,
766                                       struct btree_node_iter *node_iter,
767                                       struct bset_tree *t,
768                                       struct bkey_packed *where,
769                                       unsigned clobber_u64s,
770                                       unsigned new_u64s)
771 {
772         const struct bkey_packed *end = btree_bkey_last(b, t);
773         struct btree_node_iter_set *set;
774         unsigned offset = __btree_node_key_to_offset(b, where);
775         int shift = new_u64s - clobber_u64s;
776         unsigned old_end = t->end_offset - shift;
777         unsigned orig_iter_pos = node_iter->data[0].k;
778         bool iter_current_key_modified =
779                 orig_iter_pos >= offset &&
780                 orig_iter_pos <= offset + clobber_u64s;
781
782         btree_node_iter_for_each(node_iter, set)
783                 if (set->end == old_end)
784                         goto found;
785
786         /* didn't find the bset in the iterator - might have to readd it: */
787         if (new_u64s &&
788             bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
789                 bch2_btree_node_iter_push(node_iter, b, where, end);
790                 goto fixup_done;
791         } else {
792                 /* Iterator is after key that changed */
793                 return;
794         }
795 found:
796         set->end = t->end_offset;
797
798         /* Iterator hasn't gotten to the key that changed yet: */
799         if (set->k < offset)
800                 return;
801
802         if (new_u64s &&
803             bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
804                 set->k = offset;
805         } else if (set->k < offset + clobber_u64s) {
806                 set->k = offset + new_u64s;
807                 if (set->k == set->end)
808                         bch2_btree_node_iter_set_drop(node_iter, set);
809         } else {
810                 /* Iterator is after key that changed */
811                 set->k = (int) set->k + shift;
812                 return;
813         }
814
815         bch2_btree_node_iter_sort(node_iter, b);
816 fixup_done:
817         if (node_iter->data[0].k != orig_iter_pos)
818                 iter_current_key_modified = true;
819
820         /*
821          * When a new key is added, and the node iterator now points to that
822          * key, the iterator might have skipped past deleted keys that should
823          * come after the key the iterator now points to. We have to rewind to
824          * before those deleted keys - otherwise
825          * bch2_btree_node_iter_prev_all() breaks:
826          */
827         if (!bch2_btree_node_iter_end(node_iter) &&
828             iter_current_key_modified &&
829             (b->c.level ||
830              btree_node_type_is_extents(iter->btree_id))) {
831                 struct bset_tree *t;
832                 struct bkey_packed *k, *k2, *p;
833
834                 k = bch2_btree_node_iter_peek_all(node_iter, b);
835
836                 for_each_bset(b, t) {
837                         bool set_pos = false;
838
839                         if (node_iter->data[0].end == t->end_offset)
840                                 continue;
841
842                         k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
843
844                         while ((p = bch2_bkey_prev_all(b, t, k2)) &&
845                                bkey_iter_cmp(b, k, p) < 0) {
846                                 k2 = p;
847                                 set_pos = true;
848                         }
849
850                         if (set_pos)
851                                 btree_node_iter_set_set_pos(node_iter,
852                                                             b, t, k2);
853                 }
854         }
855
856         if (!b->c.level &&
857             node_iter == &iter->l[0].iter &&
858             iter_current_key_modified)
859                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
860 }
861
862 void bch2_btree_node_iter_fix(struct btree_iter *iter,
863                               struct btree *b,
864                               struct btree_node_iter *node_iter,
865                               struct bkey_packed *where,
866                               unsigned clobber_u64s,
867                               unsigned new_u64s)
868 {
869         struct bset_tree *t = bch2_bkey_to_bset(b, where);
870         struct btree_iter *linked;
871
872         if (node_iter != &iter->l[b->c.level].iter) {
873                 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
874                                            where, clobber_u64s, new_u64s);
875
876                 if (bch2_debug_check_iterators)
877                         bch2_btree_node_iter_verify(node_iter, b);
878         }
879
880         trans_for_each_iter_with_node(iter->trans, b, linked) {
881                 __bch2_btree_node_iter_fix(linked, b,
882                                            &linked->l[b->c.level].iter, t,
883                                            where, clobber_u64s, new_u64s);
884                 bch2_btree_iter_verify_level(linked, b->c.level);
885         }
886 }
887
888 static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
889                                                   struct btree_iter_level *l,
890                                                   struct bkey *u,
891                                                   struct bkey_packed *k)
892 {
893         struct bkey_s_c ret;
894
895         if (unlikely(!k)) {
896                 /*
897                  * signal to bch2_btree_iter_peek_slot() that we're currently at
898                  * a hole
899                  */
900                 u->type = KEY_TYPE_deleted;
901                 return bkey_s_c_null;
902         }
903
904         ret = bkey_disassemble(l->b, k, u);
905
906         /*
907          * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
908          * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
909          * being overwritten but doesn't change k->size. But this is ok, because
910          * those keys are never written out, we just have to avoid a spurious
911          * assertion here:
912          */
913         if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
914                 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
915
916         return ret;
917 }
918
919 /* peek_all() doesn't skip deleted keys */
920 static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter,
921                                                         struct btree_iter_level *l)
922 {
923         return __btree_iter_unpack(iter, l, &iter->k,
924                         bch2_btree_node_iter_peek_all(&l->iter, l->b));
925 }
926
927 static inline struct bkey_s_c btree_iter_level_peek(struct btree_iter *iter,
928                                                     struct btree_iter_level *l)
929 {
930         struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
931                         bch2_btree_node_iter_peek(&l->iter, l->b));
932
933         iter->real_pos = k.k ? k.k->p : l->b->key.k.p;
934         return k;
935 }
936
937 static inline struct bkey_s_c btree_iter_level_prev(struct btree_iter *iter,
938                                                     struct btree_iter_level *l)
939 {
940         struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
941                         bch2_btree_node_iter_prev(&l->iter, l->b));
942
943         iter->real_pos = k.k ? k.k->p : l->b->data->min_key;
944         return k;
945 }
946
947 static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
948                                              struct btree_iter_level *l,
949                                              int max_advance)
950 {
951         struct bkey_packed *k;
952         int nr_advanced = 0;
953
954         while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
955                bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
956                 if (max_advance > 0 && nr_advanced >= max_advance)
957                         return false;
958
959                 bch2_btree_node_iter_advance(&l->iter, l->b);
960                 nr_advanced++;
961         }
962
963         return true;
964 }
965
966 /*
967  * Verify that iterator for parent node points to child node:
968  */
969 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
970 {
971         struct btree_iter_level *l;
972         unsigned plevel;
973         bool parent_locked;
974         struct bkey_packed *k;
975
976         if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
977                 return;
978
979         plevel = b->c.level + 1;
980         if (!btree_iter_node(iter, plevel))
981                 return;
982
983         parent_locked = btree_node_locked(iter, plevel);
984
985         if (!bch2_btree_node_relock(iter, plevel))
986                 return;
987
988         l = &iter->l[plevel];
989         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
990         if (!k ||
991             bkey_deleted(k) ||
992             bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
993                 char buf1[100];
994                 char buf2[100];
995                 char buf3[100];
996                 char buf4[100];
997                 struct bkey uk = bkey_unpack_key(b, k);
998
999                 bch2_dump_btree_node(iter->trans->c, l->b);
1000                 bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
1001                 bch2_bkey_to_text(&PBUF(buf2), &uk);
1002                 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
1003                 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
1004                 panic("parent iter doesn't point to new node:\n"
1005                       "iter pos %s %s\n"
1006                       "iter key %s\n"
1007                       "new node %s-%s\n",
1008                       bch2_btree_ids[iter->btree_id], buf1,
1009                       buf2, buf3, buf4);
1010         }
1011
1012         if (!parent_locked)
1013                 btree_node_unlock(iter, b->c.level + 1);
1014 }
1015
1016 static inline void __btree_iter_init(struct btree_iter *iter,
1017                                      unsigned level)
1018 {
1019         struct btree_iter_level *l = &iter->l[level];
1020
1021         bch2_btree_node_iter_init(&l->iter, l->b, &iter->real_pos);
1022
1023         /*
1024          * Iterators to interior nodes should always be pointed at the first non
1025          * whiteout:
1026          */
1027         if (level)
1028                 bch2_btree_node_iter_peek(&l->iter, l->b);
1029
1030         btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1031 }
1032
1033 static inline void btree_iter_node_set(struct btree_iter *iter,
1034                                        struct btree *b)
1035 {
1036         BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1037
1038         btree_iter_verify_new_node(iter, b);
1039
1040         EBUG_ON(!btree_iter_pos_in_node(iter, b));
1041         EBUG_ON(b->c.lock.state.seq & 1);
1042
1043         iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
1044         iter->l[b->c.level].b = b;
1045         __btree_iter_init(iter, b->c.level);
1046 }
1047
1048 /*
1049  * A btree node is being replaced - update the iterator to point to the new
1050  * node:
1051  */
1052 void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
1053 {
1054         enum btree_node_locked_type t;
1055         struct btree_iter *linked;
1056
1057         trans_for_each_iter(iter->trans, linked)
1058                 if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
1059                     btree_iter_pos_in_node(linked, b)) {
1060                         /*
1061                          * bch2_btree_iter_node_drop() has already been called -
1062                          * the old node we're replacing has already been
1063                          * unlocked and the pointer invalidated
1064                          */
1065                         BUG_ON(btree_node_locked(linked, b->c.level));
1066
1067                         t = btree_lock_want(linked, b->c.level);
1068                         if (t != BTREE_NODE_UNLOCKED) {
1069                                 six_lock_increment(&b->c.lock, t);
1070                                 mark_btree_node_locked(linked, b->c.level, t);
1071                         }
1072
1073                         btree_iter_node_set(linked, b);
1074                 }
1075 }
1076
1077 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
1078 {
1079         struct btree_iter *linked;
1080         unsigned level = b->c.level;
1081
1082         trans_for_each_iter(iter->trans, linked)
1083                 if (linked->l[level].b == b) {
1084                         btree_node_unlock(linked, level);
1085                         linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
1086                 }
1087 }
1088
1089 /*
1090  * A btree node has been modified in such a way as to invalidate iterators - fix
1091  * them:
1092  */
1093 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
1094 {
1095         struct btree_iter *linked;
1096
1097         trans_for_each_iter_with_node(iter->trans, b, linked)
1098                 __btree_iter_init(linked, b->c.level);
1099 }
1100
1101 static int lock_root_check_fn(struct six_lock *lock, void *p)
1102 {
1103         struct btree *b = container_of(lock, struct btree, c.lock);
1104         struct btree **rootp = p;
1105
1106         return b == *rootp ? 0 : -1;
1107 }
1108
1109 static inline int btree_iter_lock_root(struct btree_trans *trans,
1110                                        struct btree_iter *iter,
1111                                        unsigned depth_want,
1112                                        unsigned long trace_ip)
1113 {
1114         struct bch_fs *c = trans->c;
1115         struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
1116         enum six_lock_type lock_type;
1117         unsigned i;
1118
1119         EBUG_ON(iter->nodes_locked);
1120
1121         while (1) {
1122                 b = READ_ONCE(*rootp);
1123                 iter->level = READ_ONCE(b->c.level);
1124
1125                 if (unlikely(iter->level < depth_want)) {
1126                         /*
1127                          * the root is at a lower depth than the depth we want:
1128                          * got to the end of the btree, or we're walking nodes
1129                          * greater than some depth and there are no nodes >=
1130                          * that depth
1131                          */
1132                         iter->level = depth_want;
1133                         for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
1134                                 iter->l[i].b = NULL;
1135                         return 1;
1136                 }
1137
1138                 lock_type = __btree_lock_want(iter, iter->level);
1139                 if (unlikely(!btree_node_lock(b, SPOS_MAX, iter->level,
1140                                               iter, lock_type,
1141                                               lock_root_check_fn, rootp,
1142                                               trace_ip))) {
1143                         if (trans->restarted)
1144                                 return -EINTR;
1145                         continue;
1146                 }
1147
1148                 if (likely(b == READ_ONCE(*rootp) &&
1149                            b->c.level == iter->level &&
1150                            !race_fault())) {
1151                         for (i = 0; i < iter->level; i++)
1152                                 iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1153                         iter->l[iter->level].b = b;
1154                         for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
1155                                 iter->l[i].b = NULL;
1156
1157                         mark_btree_node_locked(iter, iter->level, lock_type);
1158                         btree_iter_node_set(iter, b);
1159                         return 0;
1160                 }
1161
1162                 six_unlock_type(&b->c.lock, lock_type);
1163         }
1164 }
1165
1166 noinline
1167 static int btree_iter_prefetch(struct btree_iter *iter)
1168 {
1169         struct bch_fs *c = iter->trans->c;
1170         struct btree_iter_level *l = &iter->l[iter->level];
1171         struct btree_node_iter node_iter = l->iter;
1172         struct bkey_packed *k;
1173         struct bkey_buf tmp;
1174         unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1175                 ? (iter->level > 1 ? 0 :  2)
1176                 : (iter->level > 1 ? 1 : 16);
1177         bool was_locked = btree_node_locked(iter, iter->level);
1178         int ret = 0;
1179
1180         bch2_bkey_buf_init(&tmp);
1181
1182         while (nr && !ret) {
1183                 if (!bch2_btree_node_relock(iter, iter->level))
1184                         break;
1185
1186                 bch2_btree_node_iter_advance(&node_iter, l->b);
1187                 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1188                 if (!k)
1189                         break;
1190
1191                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1192                 ret = bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
1193                                                iter->level - 1);
1194         }
1195
1196         if (!was_locked)
1197                 btree_node_unlock(iter, iter->level);
1198
1199         bch2_bkey_buf_exit(&tmp, c);
1200         return ret;
1201 }
1202
1203 static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
1204                                             unsigned plevel, struct btree *b)
1205 {
1206         struct btree_iter_level *l = &iter->l[plevel];
1207         bool locked = btree_node_locked(iter, plevel);
1208         struct bkey_packed *k;
1209         struct bch_btree_ptr_v2 *bp;
1210
1211         if (!bch2_btree_node_relock(iter, plevel))
1212                 return;
1213
1214         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1215         BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1216
1217         bp = (void *) bkeyp_val(&l->b->format, k);
1218         bp->mem_ptr = (unsigned long)b;
1219
1220         if (!locked)
1221                 btree_node_unlock(iter, plevel);
1222 }
1223
1224 static __always_inline int btree_iter_down(struct btree_trans *trans,
1225                                            struct btree_iter *iter,
1226                                            unsigned long trace_ip)
1227 {
1228         struct bch_fs *c = trans->c;
1229         struct btree_iter_level *l = &iter->l[iter->level];
1230         struct btree *b;
1231         unsigned level = iter->level - 1;
1232         enum six_lock_type lock_type = __btree_lock_want(iter, level);
1233         struct bkey_buf tmp;
1234         int ret;
1235
1236         EBUG_ON(!btree_node_locked(iter, iter->level));
1237
1238         bch2_bkey_buf_init(&tmp);
1239         bch2_bkey_buf_unpack(&tmp, c, l->b,
1240                          bch2_btree_node_iter_peek(&l->iter, l->b));
1241
1242         b = bch2_btree_node_get(trans, iter, tmp.k, level, lock_type, trace_ip);
1243         ret = PTR_ERR_OR_ZERO(b);
1244         if (unlikely(ret))
1245                 goto err;
1246
1247         mark_btree_node_locked(iter, level, lock_type);
1248         btree_iter_node_set(iter, b);
1249
1250         if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
1251             unlikely(b != btree_node_mem_ptr(tmp.k)))
1252                 btree_node_mem_ptr_set(iter, level + 1, b);
1253
1254         if (iter->flags & BTREE_ITER_PREFETCH)
1255                 ret = btree_iter_prefetch(iter);
1256
1257         if (btree_node_read_locked(iter, level + 1))
1258                 btree_node_unlock(iter, level + 1);
1259         iter->level = level;
1260
1261         bch2_btree_iter_verify_locks(iter);
1262 err:
1263         bch2_bkey_buf_exit(&tmp, c);
1264         return ret;
1265 }
1266
1267 static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
1268
1269 static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
1270                                      unsigned long trace_ip)
1271 {
1272         struct bch_fs *c = trans->c;
1273         struct btree_iter *iter;
1274         int i;
1275
1276         if (trans->in_traverse_all)
1277                 return -EINTR;
1278
1279         trans->in_traverse_all = true;
1280 retry_all:
1281         trans->restarted = false;
1282
1283         trans_for_each_iter(trans, iter)
1284                 iter->should_be_locked = false;
1285
1286         btree_trans_sort_iters(trans);
1287
1288         for (i = trans->nr_sorted - 2; i >= 0; --i) {
1289                 struct btree_iter *iter1 = trans->iters + trans->sorted[i];
1290                 struct btree_iter *iter2 = trans->iters + trans->sorted[i + 1];
1291
1292                 if (iter1->btree_id == iter2->btree_id &&
1293                     iter1->locks_want < iter2->locks_want)
1294                         __bch2_btree_iter_upgrade(iter1, iter2->locks_want);
1295                 else if (!iter1->locks_want && iter2->locks_want)
1296                         __bch2_btree_iter_upgrade(iter1, 1);
1297         }
1298
1299         bch2_trans_unlock(trans);
1300         cond_resched();
1301
1302         if (unlikely(ret == -ENOMEM)) {
1303                 struct closure cl;
1304
1305                 closure_init_stack(&cl);
1306
1307                 do {
1308                         ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1309                         closure_sync(&cl);
1310                 } while (ret);
1311         }
1312
1313         if (unlikely(ret == -EIO)) {
1314                 trans->error = true;
1315                 goto out;
1316         }
1317
1318         BUG_ON(ret && ret != -EINTR);
1319
1320         /* Now, redo traversals in correct order: */
1321         trans_for_each_iter_inorder(trans, iter) {
1322                 EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
1323
1324                 ret = btree_iter_traverse_one(iter, _THIS_IP_);
1325                 if (ret)
1326                         goto retry_all;
1327
1328                 EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
1329         }
1330
1331         trans_for_each_iter(trans, iter)
1332                 BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1333 out:
1334         bch2_btree_cache_cannibalize_unlock(c);
1335
1336         trans->in_traverse_all = false;
1337
1338         trace_trans_traverse_all(trans->ip, trace_ip);
1339         return ret;
1340 }
1341
1342 static int bch2_btree_iter_traverse_all(struct btree_trans *trans)
1343 {
1344         return __btree_iter_traverse_all(trans, 0, _RET_IP_);
1345 }
1346
1347 static inline bool btree_iter_good_node(struct btree_iter *iter,
1348                                         unsigned l, int check_pos)
1349 {
1350         if (!is_btree_node(iter, l) ||
1351             !bch2_btree_node_relock(iter, l))
1352                 return false;
1353
1354         if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
1355                 return false;
1356         if (check_pos > 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
1357                 return false;
1358         return true;
1359 }
1360
1361 static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
1362                                                      int check_pos)
1363 {
1364         unsigned l = iter->level;
1365
1366         while (btree_iter_node(iter, l) &&
1367                !btree_iter_good_node(iter, l, check_pos)) {
1368                 btree_node_unlock(iter, l);
1369                 iter->l[l].b = BTREE_ITER_NO_NODE_UP;
1370                 l++;
1371         }
1372
1373         return l;
1374 }
1375
1376 /*
1377  * This is the main state machine for walking down the btree - walks down to a
1378  * specified depth
1379  *
1380  * Returns 0 on success, -EIO on error (error reading in a btree node).
1381  *
1382  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1383  * stashed in the iterator and returned from bch2_trans_exit().
1384  */
1385 static int btree_iter_traverse_one(struct btree_iter *iter,
1386                                    unsigned long trace_ip)
1387 {
1388         struct btree_trans *trans = iter->trans;
1389         unsigned l, depth_want = iter->level;
1390         int ret = 0;
1391
1392         /*
1393          * Ensure we obey iter->should_be_locked: if it's set, we can't unlock
1394          * and re-traverse the iterator without a transaction restart:
1395          */
1396         if (iter->should_be_locked) {
1397                 ret = bch2_btree_iter_relock(iter, trace_ip) ? 0 : -EINTR;
1398                 goto out;
1399         }
1400
1401         if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1402                 ret = bch2_btree_iter_traverse_cached(iter);
1403                 goto out;
1404         }
1405
1406         if (unlikely(iter->level >= BTREE_MAX_DEPTH))
1407                 goto out;
1408
1409         iter->level = btree_iter_up_until_good_node(iter, 0);
1410
1411         /* If we need intent locks, take them too: */
1412         for (l = iter->level + 1;
1413              l < iter->locks_want && btree_iter_node(iter, l);
1414              l++)
1415                 if (!bch2_btree_node_relock(iter, l))
1416                         while (iter->level <= l) {
1417                                 btree_node_unlock(iter, iter->level);
1418                                 iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
1419                                 iter->level++;
1420                         }
1421
1422         /*
1423          * Note: iter->nodes[iter->level] may be temporarily NULL here - that
1424          * would indicate to other code that we got to the end of the btree,
1425          * here it indicates that relocking the root failed - it's critical that
1426          * btree_iter_lock_root() comes next and that it can't fail
1427          */
1428         while (iter->level > depth_want) {
1429                 ret = btree_iter_node(iter, iter->level)
1430                         ? btree_iter_down(trans, iter, trace_ip)
1431                         : btree_iter_lock_root(trans, iter, depth_want, trace_ip);
1432                 if (unlikely(ret)) {
1433                         if (ret == 1) {
1434                                 /*
1435                                  * Got to the end of the btree (in
1436                                  * BTREE_ITER_NODES mode)
1437                                  */
1438                                 ret = 0;
1439                                 goto out;
1440                         }
1441
1442                         __bch2_btree_iter_unlock(iter);
1443                         iter->level = depth_want;
1444
1445                         if (ret == -EIO) {
1446                                 iter->flags |= BTREE_ITER_ERROR;
1447                                 iter->l[iter->level].b =
1448                                         BTREE_ITER_NO_NODE_ERROR;
1449                         } else {
1450                                 iter->l[iter->level].b =
1451                                         BTREE_ITER_NO_NODE_DOWN;
1452                         }
1453                         goto out;
1454                 }
1455         }
1456
1457         iter->uptodate = BTREE_ITER_NEED_PEEK;
1458 out:
1459         BUG_ON((ret == -EINTR) != !!trans->restarted);
1460         trace_iter_traverse(trans->ip, trace_ip,
1461                             btree_iter_type(iter) == BTREE_ITER_CACHED,
1462                             iter->btree_id, &iter->real_pos, ret);
1463         bch2_btree_iter_verify(iter);
1464         return ret;
1465 }
1466
1467 static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
1468 {
1469         struct btree_trans *trans = iter->trans;
1470         int ret;
1471
1472         ret =   bch2_trans_cond_resched(trans) ?:
1473                 btree_iter_traverse_one(iter, _RET_IP_);
1474         if (unlikely(ret) && hweight64(trans->iters_linked) == 1) {
1475                 ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
1476                 BUG_ON(ret == -EINTR);
1477         }
1478
1479         return ret;
1480 }
1481
1482 /*
1483  * Note:
1484  * bch2_btree_iter_traverse() is for external users, btree_iter_traverse() is
1485  * for internal btree iterator users
1486  *
1487  * bch2_btree_iter_traverse sets iter->real_pos to iter->pos,
1488  * btree_iter_traverse() does not:
1489  */
1490 static inline int __must_check
1491 btree_iter_traverse(struct btree_iter *iter)
1492 {
1493         return iter->uptodate >= BTREE_ITER_NEED_RELOCK
1494                 ? __bch2_btree_iter_traverse(iter)
1495                 : 0;
1496 }
1497
1498 int __must_check
1499 bch2_btree_iter_traverse(struct btree_iter *iter)
1500 {
1501         int ret;
1502
1503         btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
1504
1505         ret = btree_iter_traverse(iter);
1506         if (ret)
1507                 return ret;
1508
1509         iter->should_be_locked = true;
1510         return 0;
1511 }
1512
1513 /* Iterate across nodes (leaf and interior nodes) */
1514
1515 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1516 {
1517         struct btree *b;
1518         int ret;
1519
1520         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1521         bch2_btree_iter_verify(iter);
1522
1523         ret = btree_iter_traverse(iter);
1524         if (ret)
1525                 return NULL;
1526
1527         b = btree_iter_node(iter, iter->level);
1528         if (!b)
1529                 return NULL;
1530
1531         BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1532
1533         iter->pos = iter->real_pos = b->key.k.p;
1534
1535         bch2_btree_iter_verify(iter);
1536         iter->should_be_locked = true;
1537
1538         return b;
1539 }
1540
1541 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1542 {
1543         struct btree *b;
1544         int ret;
1545
1546         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1547         bch2_btree_iter_verify(iter);
1548
1549         /* already got to end? */
1550         if (!btree_iter_node(iter, iter->level))
1551                 return NULL;
1552
1553         bch2_trans_cond_resched(iter->trans);
1554
1555         btree_node_unlock(iter, iter->level);
1556         iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
1557         iter->level++;
1558
1559         btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1560         ret = btree_iter_traverse(iter);
1561         if (ret)
1562                 return NULL;
1563
1564         /* got to end? */
1565         b = btree_iter_node(iter, iter->level);
1566         if (!b)
1567                 return NULL;
1568
1569         if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
1570                 /*
1571                  * Haven't gotten to the end of the parent node: go back down to
1572                  * the next child node
1573                  */
1574                 btree_iter_set_search_pos(iter, bpos_successor(iter->pos));
1575
1576                 /* Unlock to avoid screwing up our lock invariants: */
1577                 btree_node_unlock(iter, iter->level);
1578
1579                 iter->level = iter->min_depth;
1580                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1581                 bch2_btree_iter_verify(iter);
1582
1583                 ret = btree_iter_traverse(iter);
1584                 if (ret)
1585                         return NULL;
1586
1587                 b = iter->l[iter->level].b;
1588         }
1589
1590         iter->pos = iter->real_pos = b->key.k.p;
1591
1592         bch2_btree_iter_verify(iter);
1593         iter->should_be_locked = true;
1594
1595         return b;
1596 }
1597
1598 /* Iterate across keys (in leaf nodes only) */
1599
1600 static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
1601 {
1602 #ifdef CONFIG_BCACHEFS_DEBUG
1603         struct bpos old_pos = iter->real_pos;
1604 #endif
1605         int cmp = bpos_cmp(new_pos, iter->real_pos);
1606         unsigned l = iter->level;
1607
1608         EBUG_ON(iter->trans->restarted);
1609
1610         if (!cmp)
1611                 goto out;
1612
1613         iter->real_pos = new_pos;
1614         iter->should_be_locked = false;
1615
1616         btree_iter_check_sort(iter->trans, iter);
1617
1618         if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
1619                 btree_node_unlock(iter, 0);
1620                 iter->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1621                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1622                 return;
1623         }
1624
1625         l = btree_iter_up_until_good_node(iter, cmp);
1626
1627         if (btree_iter_node(iter, l)) {
1628                 /*
1629                  * We might have to skip over many keys, or just a few: try
1630                  * advancing the node iterator, and if we have to skip over too
1631                  * many keys just reinit it (or if we're rewinding, since that
1632                  * is expensive).
1633                  */
1634                 if (cmp < 0 ||
1635                     !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
1636                         __btree_iter_init(iter, l);
1637
1638                 /* Don't leave it locked if we're not supposed to: */
1639                 if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
1640                         btree_node_unlock(iter, l);
1641         }
1642 out:
1643         if (l != iter->level)
1644                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1645         else
1646                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1647
1648         bch2_btree_iter_verify(iter);
1649 #ifdef CONFIG_BCACHEFS_DEBUG
1650         trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
1651                                   iter->btree_id,
1652                                   &old_pos, &new_pos, l);
1653 #endif
1654 }
1655
1656 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1657 {
1658         struct bpos pos = iter->k.p;
1659         bool ret = bpos_cmp(pos, SPOS_MAX) != 0;
1660
1661         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1662                 pos = bkey_successor(iter, pos);
1663         bch2_btree_iter_set_pos(iter, pos);
1664         return ret;
1665 }
1666
1667 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1668 {
1669         struct bpos pos = bkey_start_pos(&iter->k);
1670         bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1671                     ? bpos_cmp(pos, POS_MIN)
1672                     : bkey_cmp(pos, POS_MIN)) != 0;
1673
1674         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1675                 pos = bkey_predecessor(iter, pos);
1676         bch2_btree_iter_set_pos(iter, pos);
1677         return ret;
1678 }
1679
1680 static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
1681 {
1682         struct bpos next_pos = iter->l[0].b->key.k.p;
1683         bool ret = bpos_cmp(next_pos, SPOS_MAX) != 0;
1684
1685         /*
1686          * Typically, we don't want to modify iter->pos here, since that
1687          * indicates where we searched from - unless we got to the end of the
1688          * btree, in that case we want iter->pos to reflect that:
1689          */
1690         if (ret)
1691                 btree_iter_set_search_pos(iter, bpos_successor(next_pos));
1692         else
1693                 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1694
1695         return ret;
1696 }
1697
1698 static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
1699 {
1700         struct bpos next_pos = iter->l[0].b->data->min_key;
1701         bool ret = bpos_cmp(next_pos, POS_MIN) != 0;
1702
1703         if (ret)
1704                 btree_iter_set_search_pos(iter, bpos_predecessor(next_pos));
1705         else
1706                 bch2_btree_iter_set_pos(iter, POS_MIN);
1707
1708         return ret;
1709 }
1710
1711 static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter,
1712                                                       struct bpos pos)
1713 {
1714         struct btree_insert_entry *i;
1715
1716         if (!(iter->flags & BTREE_ITER_WITH_UPDATES))
1717                 return NULL;
1718
1719         trans_for_each_update(iter->trans, i)
1720                 if ((cmp_int(iter->btree_id,    i->iter->btree_id) ?:
1721                      bkey_cmp(pos,              i->k->k.p)) <= 0) {
1722                         if (iter->btree_id ==   i->iter->btree_id)
1723                                 return i->k;
1724                         break;
1725                 }
1726
1727         return NULL;
1728 }
1729
1730 /**
1731  * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1732  * current position
1733  */
1734 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1735 {
1736         struct bpos search_key = btree_iter_search_key(iter);
1737         struct bkey_i *next_update;
1738         struct bkey_s_c k;
1739         int ret;
1740
1741         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1742         bch2_btree_iter_verify(iter);
1743         bch2_btree_iter_verify_entry_exit(iter);
1744 start:
1745         next_update = btree_trans_peek_updates(iter, search_key);
1746         btree_iter_set_search_pos(iter, search_key);
1747
1748         while (1) {
1749                 ret = btree_iter_traverse(iter);
1750                 if (unlikely(ret))
1751                         return bkey_s_c_err(ret);
1752
1753                 k = btree_iter_level_peek(iter, &iter->l[0]);
1754
1755                 if (next_update &&
1756                     bpos_cmp(next_update->k.p, iter->real_pos) <= 0) {
1757                         iter->k = next_update->k;
1758                         k = bkey_i_to_s_c(next_update);
1759                 }
1760
1761                 if (likely(k.k)) {
1762                         if (bkey_deleted(k.k)) {
1763                                 search_key = bkey_successor(iter, k.k->p);
1764                                 goto start;
1765                         }
1766
1767                         break;
1768                 }
1769
1770                 if (!btree_iter_set_pos_to_next_leaf(iter))
1771                         return bkey_s_c_null;
1772         }
1773
1774         /*
1775          * iter->pos should be mononotically increasing, and always be equal to
1776          * the key we just returned - except extents can straddle iter->pos:
1777          */
1778         if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
1779                 iter->pos = k.k->p;
1780         else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1781                 iter->pos = bkey_start_pos(k.k);
1782
1783         bch2_btree_iter_verify_entry_exit(iter);
1784         bch2_btree_iter_verify(iter);
1785         iter->should_be_locked = true;
1786         return k;
1787 }
1788
1789 /**
1790  * bch2_btree_iter_next: returns first key greater than iterator's current
1791  * position
1792  */
1793 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
1794 {
1795         if (!bch2_btree_iter_advance(iter))
1796                 return bkey_s_c_null;
1797
1798         return bch2_btree_iter_peek(iter);
1799 }
1800
1801 /**
1802  * bch2_btree_iter_peek_prev: returns first key less than or equal to
1803  * iterator's current position
1804  */
1805 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
1806 {
1807         struct btree_iter_level *l = &iter->l[0];
1808         struct bkey_s_c k;
1809         int ret;
1810
1811         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1812         EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
1813         bch2_btree_iter_verify(iter);
1814         bch2_btree_iter_verify_entry_exit(iter);
1815
1816         btree_iter_set_search_pos(iter, iter->pos);
1817
1818         while (1) {
1819                 ret = btree_iter_traverse(iter);
1820                 if (unlikely(ret)) {
1821                         k = bkey_s_c_err(ret);
1822                         goto no_key;
1823                 }
1824
1825                 k = btree_iter_level_peek(iter, l);
1826                 if (!k.k ||
1827                     ((iter->flags & BTREE_ITER_IS_EXTENTS)
1828                      ? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0
1829                      : bkey_cmp(k.k->p, iter->pos) > 0))
1830                         k = btree_iter_level_prev(iter, l);
1831
1832                 if (likely(k.k))
1833                         break;
1834
1835                 if (!btree_iter_set_pos_to_prev_leaf(iter)) {
1836                         k = bkey_s_c_null;
1837                         goto no_key;
1838                 }
1839         }
1840
1841         EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
1842
1843         /* Extents can straddle iter->pos: */
1844         if (bkey_cmp(k.k->p, iter->pos) < 0)
1845                 iter->pos = k.k->p;
1846 out:
1847         bch2_btree_iter_verify_entry_exit(iter);
1848         bch2_btree_iter_verify(iter);
1849         iter->should_be_locked = true;
1850         return k;
1851 no_key:
1852         /*
1853          * btree_iter_level_peek() may have set iter->k to a key we didn't want, and
1854          * then we errored going to the previous leaf - make sure it's
1855          * consistent with iter->pos:
1856          */
1857         bkey_init(&iter->k);
1858         iter->k.p = iter->pos;
1859         goto out;
1860 }
1861
1862 /**
1863  * bch2_btree_iter_prev: returns first key less than iterator's current
1864  * position
1865  */
1866 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
1867 {
1868         if (!bch2_btree_iter_rewind(iter))
1869                 return bkey_s_c_null;
1870
1871         return bch2_btree_iter_peek_prev(iter);
1872 }
1873
1874 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
1875 {
1876         struct bpos search_key;
1877         struct bkey_s_c k;
1878         int ret;
1879
1880         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS &&
1881                 btree_iter_type(iter) != BTREE_ITER_CACHED);
1882         bch2_btree_iter_verify(iter);
1883         bch2_btree_iter_verify_entry_exit(iter);
1884
1885         /* extents can't span inode numbers: */
1886         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
1887             unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
1888                 if (iter->pos.inode == KEY_INODE_MAX)
1889                         return bkey_s_c_null;
1890
1891                 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
1892         }
1893
1894         search_key = btree_iter_search_key(iter);
1895         btree_iter_set_search_pos(iter, search_key);
1896
1897         ret = btree_iter_traverse(iter);
1898         if (unlikely(ret))
1899                 return bkey_s_c_err(ret);
1900
1901         if (btree_iter_type(iter) == BTREE_ITER_CACHED ||
1902             !(iter->flags & BTREE_ITER_IS_EXTENTS)) {
1903                 struct bkey_i *next_update;
1904                 struct bkey_cached *ck;
1905
1906                 switch (btree_iter_type(iter)) {
1907                 case BTREE_ITER_KEYS:
1908                         k = btree_iter_level_peek_all(iter, &iter->l[0]);
1909                         EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, iter->pos) == 0);
1910                         break;
1911                 case BTREE_ITER_CACHED:
1912                         ck = (void *) iter->l[0].b;
1913                         EBUG_ON(iter->btree_id != ck->key.btree_id ||
1914                                 bkey_cmp(iter->pos, ck->key.pos));
1915                         BUG_ON(!ck->valid);
1916
1917                         k = bkey_i_to_s_c(ck->k);
1918                         break;
1919                 case BTREE_ITER_NODES:
1920                         BUG();
1921                 }
1922
1923                 next_update = btree_trans_peek_updates(iter, search_key);
1924                 if (next_update &&
1925                     (!k.k || bpos_cmp(next_update->k.p, k.k->p) <= 0)) {
1926                         iter->k = next_update->k;
1927                         k = bkey_i_to_s_c(next_update);
1928                 }
1929         } else {
1930                 if ((iter->flags & BTREE_ITER_INTENT)) {
1931                         struct btree_iter *child =
1932                                 btree_iter_child_alloc(iter, _THIS_IP_);
1933
1934                         btree_iter_copy(child, iter);
1935                         k = bch2_btree_iter_peek(child);
1936
1937                         if (k.k && !bkey_err(k))
1938                                 iter->k = child->k;
1939                 } else {
1940                         struct bpos pos = iter->pos;
1941
1942                         k = bch2_btree_iter_peek(iter);
1943                         iter->pos = pos;
1944                 }
1945
1946                 if (unlikely(bkey_err(k)))
1947                         return k;
1948         }
1949
1950         if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) {
1951                 if (!k.k ||
1952                     ((iter->flags & BTREE_ITER_ALL_SNAPSHOTS)
1953                      ? bpos_cmp(iter->pos, k.k->p)
1954                      : bkey_cmp(iter->pos, k.k->p))) {
1955                         bkey_init(&iter->k);
1956                         iter->k.p = iter->pos;
1957                         k = (struct bkey_s_c) { &iter->k, NULL };
1958                 }
1959         } else {
1960                 struct bpos next = k.k ? bkey_start_pos(k.k) : POS_MAX;
1961
1962                 if (bkey_cmp(iter->pos, next) < 0) {
1963                         bkey_init(&iter->k);
1964                         iter->k.p = iter->pos;
1965                         bch2_key_resize(&iter->k,
1966                                         min_t(u64, KEY_SIZE_MAX,
1967                                               (next.inode == iter->pos.inode
1968                                                ? next.offset
1969                                                : KEY_OFFSET_MAX) -
1970                                               iter->pos.offset));
1971
1972                         k = (struct bkey_s_c) { &iter->k, NULL };
1973                         EBUG_ON(!k.k->size);
1974                 }
1975         }
1976
1977         bch2_btree_iter_verify_entry_exit(iter);
1978         bch2_btree_iter_verify(iter);
1979         iter->should_be_locked = true;
1980
1981         return k;
1982 }
1983
1984 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
1985 {
1986         if (!bch2_btree_iter_advance(iter))
1987                 return bkey_s_c_null;
1988
1989         return bch2_btree_iter_peek_slot(iter);
1990 }
1991
1992 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
1993 {
1994         if (!bch2_btree_iter_rewind(iter))
1995                 return bkey_s_c_null;
1996
1997         return bch2_btree_iter_peek_slot(iter);
1998 }
1999
2000 static inline void bch2_btree_iter_init(struct btree_trans *trans,
2001                         struct btree_iter *iter, enum btree_id btree_id)
2002 {
2003         struct bch_fs *c = trans->c;
2004         unsigned i;
2005
2006         iter->trans                     = trans;
2007         iter->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
2008         iter->btree_id                  = btree_id;
2009         iter->real_pos                  = POS_MIN;
2010         iter->level                     = 0;
2011         iter->min_depth                 = 0;
2012         iter->locks_want                = 0;
2013         iter->nodes_locked              = 0;
2014         iter->nodes_intent_locked       = 0;
2015         for (i = 0; i < ARRAY_SIZE(iter->l); i++)
2016                 iter->l[i].b            = BTREE_ITER_NO_NODE_INIT;
2017
2018         prefetch(c->btree_roots[btree_id].b);
2019 }
2020
2021 /* new transactional stuff: */
2022
2023 static inline void btree_iter_verify_sorted_ref(struct btree_trans *trans,
2024                                                 struct btree_iter *iter)
2025 {
2026         EBUG_ON(iter->sorted_idx >= trans->nr_sorted);
2027         EBUG_ON(trans->sorted[iter->sorted_idx] != iter->idx);
2028         EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
2029 }
2030
2031 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2032 {
2033 #ifdef CONFIG_BCACHEFS_DEBUG
2034         unsigned i;
2035
2036         for (i = 0; i < trans->nr_sorted; i++)
2037                 btree_iter_verify_sorted_ref(trans, trans->iters + trans->sorted[i]);
2038 #endif
2039 }
2040
2041 static inline void btree_trans_verify_sorted(struct btree_trans *trans)
2042 {
2043 #ifdef CONFIG_BCACHEFS_DEBUG
2044         struct btree_iter *iter, *prev = NULL;
2045
2046         trans_for_each_iter_inorder(trans, iter)
2047                 BUG_ON(prev && btree_iter_cmp(prev, iter) > 0);
2048 #endif
2049 }
2050
2051 static inline void btree_iter_swap(struct btree_trans *trans,
2052                                    struct btree_iter *l, struct btree_iter *r)
2053 {
2054         swap(l->sorted_idx, r->sorted_idx);
2055         swap(trans->sorted[l->sorted_idx],
2056              trans->sorted[r->sorted_idx]);
2057
2058         btree_iter_verify_sorted_ref(trans, l);
2059         btree_iter_verify_sorted_ref(trans, r);
2060 }
2061
2062 static void btree_trans_sort_iters(struct btree_trans *trans)
2063 {
2064         bool swapped = false;
2065         int i, l = 0, r = trans->nr_sorted;
2066
2067         while (1) {
2068                 for (i = l; i + 1 < r; i++) {
2069                         if (btree_iter_cmp(trans->iters + trans->sorted[i],
2070                                            trans->iters + trans->sorted[i + 1]) > 0) {
2071                                 swap(trans->sorted[i], trans->sorted[i + 1]);
2072                                 trans->iters[trans->sorted[i]].sorted_idx = i;
2073                                 trans->iters[trans->sorted[i + 1]].sorted_idx = i + 1;
2074                                 swapped = true;
2075                         }
2076                 }
2077
2078                 if (!swapped)
2079                         break;
2080
2081                 r--;
2082                 swapped = false;
2083
2084                 for (i = r - 2; i >= l; --i) {
2085                         if (btree_iter_cmp(trans->iters + trans->sorted[i],
2086                                            trans->iters + trans->sorted[i + 1]) > 0) {
2087                                 swap(trans->sorted[i],
2088                                      trans->sorted[i + 1]);
2089                                 trans->iters[trans->sorted[i]].sorted_idx = i;
2090                                 trans->iters[trans->sorted[i + 1]].sorted_idx = i + 1;
2091                                 swapped = true;
2092                         }
2093                 }
2094
2095                 if (!swapped)
2096                         break;
2097
2098                 l++;
2099                 swapped = false;
2100         }
2101
2102         btree_trans_verify_sorted_refs(trans);
2103         btree_trans_verify_sorted(trans);
2104 }
2105
2106 static void btree_iter_check_sort(struct btree_trans *trans, struct btree_iter *iter)
2107 {
2108         struct btree_iter *n;
2109
2110         EBUG_ON(iter->sorted_idx == U8_MAX);
2111
2112         n = next_btree_iter(trans, iter);
2113         if (n && btree_iter_cmp(iter, n) > 0) {
2114                 do {
2115                         btree_iter_swap(trans, iter, n);
2116                         n = next_btree_iter(trans, iter);
2117                 } while (n && btree_iter_cmp(iter, n) > 0);
2118
2119                 return;
2120         }
2121
2122         n = prev_btree_iter(trans, iter);
2123         if (n && btree_iter_cmp(n, iter) > 0) {
2124                 do {
2125                         btree_iter_swap(trans, n, iter);
2126                         n = prev_btree_iter(trans, iter);
2127                 } while (n && btree_iter_cmp(n, iter) > 0);
2128         }
2129
2130         btree_trans_verify_sorted(trans);
2131 }
2132
2133 static inline void btree_iter_list_remove(struct btree_trans *trans,
2134                                           struct btree_iter *iter)
2135 {
2136         unsigned i;
2137
2138         EBUG_ON(iter->sorted_idx >= trans->nr_sorted);
2139
2140         array_remove_item(trans->sorted, trans->nr_sorted, iter->sorted_idx);
2141
2142         for (i = iter->sorted_idx; i < trans->nr_sorted; i++)
2143                 trans->iters[trans->sorted[i]].sorted_idx = i;
2144
2145         iter->sorted_idx = U8_MAX;
2146
2147         btree_trans_verify_sorted_refs(trans);
2148 }
2149
2150 static inline void btree_iter_list_add(struct btree_trans *trans,
2151                                        struct btree_iter *pos,
2152                                        struct btree_iter *iter)
2153 {
2154         unsigned i;
2155
2156         btree_trans_verify_sorted_refs(trans);
2157
2158         iter->sorted_idx = pos ? pos->sorted_idx : trans->nr_sorted;
2159
2160         array_insert_item(trans->sorted, trans->nr_sorted, iter->sorted_idx, iter->idx);
2161
2162         for (i = iter->sorted_idx; i < trans->nr_sorted; i++)
2163                 trans->iters[trans->sorted[i]].sorted_idx = i;
2164
2165         btree_trans_verify_sorted_refs(trans);
2166 }
2167
2168 static void btree_iter_child_free(struct btree_iter *iter)
2169 {
2170         struct btree_iter *child = btree_iter_child(iter);
2171
2172         if (child) {
2173                 bch2_trans_iter_free(iter->trans, child);
2174                 iter->child_idx = U8_MAX;
2175         }
2176 }
2177
2178 static struct btree_iter *btree_iter_child_alloc(struct btree_iter *iter,
2179                                                  unsigned long ip)
2180 {
2181         struct btree_trans *trans = iter->trans;
2182         struct btree_iter *child = btree_iter_child(iter);
2183
2184         if (!child) {
2185                 child = btree_trans_iter_alloc(trans, iter);
2186                 child->ip_allocated     = ip;
2187                 iter->child_idx         = child->idx;
2188
2189                 trans->iters_live       |= 1ULL << child->idx;
2190                 trans->iters_touched    |= 1ULL << child->idx;
2191         }
2192
2193         return child;
2194 }
2195
2196 static inline void __bch2_trans_iter_free(struct btree_trans *trans,
2197                                           unsigned idx)
2198 {
2199         btree_iter_child_free(&trans->iters[idx]);
2200
2201         btree_iter_list_remove(trans, &trans->iters[idx]);
2202
2203         __bch2_btree_iter_unlock(&trans->iters[idx]);
2204         trans->iters_linked             &= ~(1ULL << idx);
2205         trans->iters_live               &= ~(1ULL << idx);
2206         trans->iters_touched            &= ~(1ULL << idx);
2207 }
2208
2209 int bch2_trans_iter_put(struct btree_trans *trans,
2210                         struct btree_iter *iter)
2211 {
2212         int ret;
2213
2214         if (IS_ERR_OR_NULL(iter))
2215                 return 0;
2216
2217         BUG_ON(trans->iters + iter->idx != iter);
2218         BUG_ON(!btree_iter_live(trans, iter));
2219
2220         ret = btree_iter_err(iter);
2221
2222         if (!(trans->iters_touched & (1ULL << iter->idx)) &&
2223             !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
2224                 __bch2_trans_iter_free(trans, iter->idx);
2225
2226         trans->iters_live       &= ~(1ULL << iter->idx);
2227         return ret;
2228 }
2229
2230 int bch2_trans_iter_free(struct btree_trans *trans,
2231                          struct btree_iter *iter)
2232 {
2233         if (IS_ERR_OR_NULL(iter))
2234                 return 0;
2235
2236         set_btree_iter_dontneed(trans, iter);
2237
2238         return bch2_trans_iter_put(trans, iter);
2239 }
2240
2241 noinline __cold
2242 static void btree_trans_iter_alloc_fail(struct btree_trans *trans)
2243 {
2244
2245         struct btree_iter *iter;
2246         struct btree_insert_entry *i;
2247         char buf[100];
2248
2249         btree_trans_sort_iters(trans);
2250
2251         trans_for_each_iter_inorder(trans, iter)
2252                 printk(KERN_ERR "iter: btree %s pos %s%s%s%s %pS\n",
2253                        bch2_btree_ids[iter->btree_id],
2254                        (bch2_bpos_to_text(&PBUF(buf), iter->real_pos), buf),
2255                        btree_iter_live(trans, iter) ? " live" : "",
2256                        (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
2257                        iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
2258                        (void *) iter->ip_allocated);
2259
2260         trans_for_each_update(trans, i) {
2261                 char buf[300];
2262
2263                 bch2_bkey_val_to_text(&PBUF(buf), trans->c, bkey_i_to_s_c(i->k));
2264                 printk(KERN_ERR "update: btree %s %s\n",
2265                        bch2_btree_ids[i->iter->btree_id], buf);
2266         }
2267         panic("trans iter oveflow\n");
2268 }
2269
2270 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans,
2271                                                  struct btree_iter *pos)
2272 {
2273         struct btree_iter *iter;
2274         unsigned idx;
2275
2276         if (unlikely(trans->iters_linked ==
2277                      ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
2278                 btree_trans_iter_alloc_fail(trans);
2279
2280         idx = __ffs64(~trans->iters_linked);
2281         iter = &trans->iters[idx];
2282
2283         iter->trans             = trans;
2284         iter->idx               = idx;
2285         iter->child_idx         = U8_MAX;
2286         iter->sorted_idx        = U8_MAX;
2287         iter->flags             = 0;
2288         iter->nodes_locked      = 0;
2289         iter->nodes_intent_locked = 0;
2290         trans->iters_linked     |= 1ULL << idx;
2291
2292         btree_iter_list_add(trans, pos, iter);
2293         return iter;
2294 }
2295
2296 static void btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
2297 {
2298         unsigned i;
2299
2300         __bch2_btree_iter_unlock(dst);
2301         btree_iter_child_free(dst);
2302
2303         memcpy(&dst->flags, &src->flags,
2304                sizeof(struct btree_iter) - offsetof(struct btree_iter, flags));
2305
2306         for (i = 0; i < BTREE_MAX_DEPTH; i++)
2307                 if (btree_node_locked(dst, i))
2308                         six_lock_increment(&dst->l[i].b->c.lock,
2309                                            __btree_lock_want(dst, i));
2310
2311         dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2312         dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
2313
2314         btree_iter_check_sort(dst->trans, dst);
2315 }
2316
2317 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
2318                                          unsigned btree_id, struct bpos pos,
2319                                          unsigned locks_want,
2320                                          unsigned depth,
2321                                          unsigned flags)
2322 {
2323         struct btree_iter *iter, *best = NULL;
2324         struct bpos real_pos, pos_min = POS_MIN;
2325
2326         EBUG_ON(trans->restarted);
2327
2328         if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2329             btree_node_type_is_extents(btree_id) &&
2330             !(flags & BTREE_ITER_NOT_EXTENTS) &&
2331             !(flags & BTREE_ITER_ALL_SNAPSHOTS))
2332                 flags |= BTREE_ITER_IS_EXTENTS;
2333
2334         if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2335             !btree_type_has_snapshots(btree_id))
2336                 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2337
2338         if (!(flags & BTREE_ITER_ALL_SNAPSHOTS))
2339                 pos.snapshot = btree_type_has_snapshots(btree_id)
2340                         ? U32_MAX : 0;
2341
2342         real_pos = pos;
2343
2344         if ((flags & BTREE_ITER_IS_EXTENTS) &&
2345             bkey_cmp(pos, POS_MAX))
2346                 real_pos = bpos_nosnap_successor(pos);
2347
2348         trans_for_each_iter(trans, iter) {
2349                 if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
2350                         continue;
2351
2352                 if (iter->btree_id != btree_id)
2353                         continue;
2354
2355                 if (best) {
2356                         int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos),
2357                                            bpos_diff(iter->real_pos, real_pos));
2358
2359                         if (cmp < 0 ||
2360                             ((cmp == 0 && btree_iter_keep(trans, iter))))
2361                                 continue;
2362                 }
2363
2364                 best = iter;
2365         }
2366
2367         if (!best) {
2368                 iter = btree_trans_iter_alloc(trans, NULL);
2369                 bch2_btree_iter_init(trans, iter, btree_id);
2370         } else if (btree_iter_keep(trans, best)) {
2371                 iter = btree_trans_iter_alloc(trans, best);
2372                 btree_iter_copy(iter, best);
2373         } else {
2374                 iter = best;
2375         }
2376
2377         trans->iters_live       |= 1ULL << iter->idx;
2378         trans->iters_touched    |= 1ULL << iter->idx;
2379
2380         iter->flags = flags;
2381
2382         iter->snapshot = pos.snapshot;
2383
2384         /*
2385          * If the iterator has locks_want greater than requested, we explicitly
2386          * do not downgrade it here - on transaction restart because btree node
2387          * split needs to upgrade locks, we might be putting/getting the
2388          * iterator again. Downgrading iterators only happens via an explicit
2389          * bch2_trans_downgrade().
2390          */
2391
2392         locks_want = min(locks_want, BTREE_MAX_DEPTH);
2393         if (locks_want > iter->locks_want) {
2394                 iter->locks_want = locks_want;
2395                 btree_iter_get_locks(iter, true, _THIS_IP_);
2396         }
2397
2398         while (iter->level != depth) {
2399                 btree_node_unlock(iter, iter->level);
2400                 iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
2401                 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
2402                 if (iter->level < depth)
2403                         iter->level++;
2404                 else
2405                         iter->level--;
2406         }
2407
2408         iter->min_depth = depth;
2409
2410         bch2_btree_iter_set_pos(iter, pos);
2411         btree_iter_set_search_pos(iter, real_pos);
2412
2413         trace_trans_get_iter(_RET_IP_, trans->ip,
2414                              btree_id,
2415                              &real_pos, locks_want, iter->uptodate,
2416                              best ? &best->real_pos     : &pos_min,
2417                              best ? best->locks_want    : U8_MAX,
2418                              best ? best->uptodate      : U8_MAX);
2419
2420         return iter;
2421 }
2422
2423 struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
2424                                             enum btree_id btree_id,
2425                                             struct bpos pos,
2426                                             unsigned locks_want,
2427                                             unsigned depth,
2428                                             unsigned flags)
2429 {
2430         struct btree_iter *iter =
2431                 __bch2_trans_get_iter(trans, btree_id, pos,
2432                                       locks_want, depth,
2433                                       BTREE_ITER_NODES|
2434                                       BTREE_ITER_NOT_EXTENTS|
2435                                       BTREE_ITER_ALL_SNAPSHOTS|
2436                                       flags);
2437
2438         BUG_ON(bkey_cmp(iter->pos, pos));
2439         BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH));
2440         BUG_ON(iter->level      != depth);
2441         BUG_ON(iter->min_depth  != depth);
2442         iter->ip_allocated = _RET_IP_;
2443
2444         return iter;
2445 }
2446
2447 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
2448                                         struct btree_iter *src)
2449 {
2450         struct btree_iter *iter;
2451
2452         iter = btree_trans_iter_alloc(trans, src);
2453         btree_iter_copy(iter, src);
2454
2455         trans->iters_live |= 1ULL << iter->idx;
2456         /*
2457          * We don't need to preserve this iter since it's cheap to copy it
2458          * again - this will cause trans_iter_put() to free it right away:
2459          */
2460         set_btree_iter_dontneed(trans, iter);
2461
2462         return iter;
2463 }
2464
2465 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2466 {
2467         size_t new_top = trans->mem_top + size;
2468         void *p;
2469
2470         if (new_top > trans->mem_bytes) {
2471                 size_t old_bytes = trans->mem_bytes;
2472                 size_t new_bytes = roundup_pow_of_two(new_top);
2473                 void *new_mem;
2474
2475                 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2476
2477                 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2478                 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2479                         new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2480                         new_bytes = BTREE_TRANS_MEM_MAX;
2481                         kfree(trans->mem);
2482                 }
2483
2484                 if (!new_mem)
2485                         return ERR_PTR(-ENOMEM);
2486
2487                 trans->mem = new_mem;
2488                 trans->mem_bytes = new_bytes;
2489
2490                 if (old_bytes) {
2491                         trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
2492                         btree_trans_restart(trans);
2493                         return ERR_PTR(-EINTR);
2494                 }
2495         }
2496
2497         p = trans->mem + trans->mem_top;
2498         trans->mem_top += size;
2499         memset(p, 0, size);
2500         return p;
2501 }
2502
2503 inline void bch2_trans_unlink_iters(struct btree_trans *trans)
2504 {
2505         u64 iters = trans->iters_linked &
2506                 ~trans->iters_touched &
2507                 ~trans->iters_live;
2508
2509         while (iters) {
2510                 unsigned idx = __ffs64(iters);
2511
2512                 iters &= ~(1ULL << idx);
2513                 __bch2_trans_iter_free(trans, idx);
2514         }
2515 }
2516
2517 /**
2518  * bch2_trans_begin() - reset a transaction after a interrupted attempt
2519  * @trans: transaction to reset
2520  *
2521  * While iterating over nodes or updating nodes a attempt to lock a btree
2522  * node may return EINTR when the trylock fails. When this occurs
2523  * bch2_trans_begin() should be called and the transaction retried.
2524  */
2525 void bch2_trans_begin(struct btree_trans *trans)
2526 {
2527         struct btree_iter *iter;
2528
2529         trans_for_each_iter(trans, iter)
2530                 iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
2531                                  BTREE_ITER_SET_POS_AFTER_COMMIT);
2532
2533         /*
2534          * XXX: we shouldn't be doing this if the transaction was restarted, but
2535          * currently we still overflow transaction iterators if we do that
2536          * */
2537         bch2_trans_unlink_iters(trans);
2538         trans->iters_touched &= trans->iters_live;
2539
2540         trans->extra_journal_res        = 0;
2541         trans->nr_updates               = 0;
2542         trans->mem_top                  = 0;
2543
2544         trans->hooks                    = NULL;
2545         trans->extra_journal_entries    = NULL;
2546         trans->extra_journal_entry_u64s = 0;
2547
2548         if (trans->fs_usage_deltas) {
2549                 trans->fs_usage_deltas->used = 0;
2550                 memset(&trans->fs_usage_deltas->memset_start, 0,
2551                        (void *) &trans->fs_usage_deltas->memset_end -
2552                        (void *) &trans->fs_usage_deltas->memset_start);
2553         }
2554
2555         bch2_trans_cond_resched(trans);
2556
2557         if (trans->restarted)
2558                 bch2_btree_iter_traverse_all(trans);
2559
2560         trans->restarted = false;
2561 }
2562
2563 static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
2564 {
2565         size_t iters_bytes      = sizeof(struct btree_iter) * BTREE_ITER_MAX;
2566         size_t updates_bytes    = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2567         size_t sorted_bytes     = sizeof(u8) * BTREE_ITER_MAX;
2568         void *p = NULL;
2569
2570         BUG_ON(trans->used_mempool);
2571
2572 #ifdef __KERNEL__
2573         p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL);
2574 #endif
2575         if (!p)
2576                 p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
2577
2578         trans->iters            = p; p += iters_bytes;
2579         trans->updates          = p; p += updates_bytes;
2580         trans->sorted           = p; p += sorted_bytes;
2581 }
2582
2583 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2584                      unsigned expected_nr_iters,
2585                      size_t expected_mem_bytes)
2586         __acquires(&c->btree_trans_barrier)
2587 {
2588         memset(trans, 0, sizeof(*trans));
2589         trans->c                = c;
2590         trans->ip               = _RET_IP_;
2591
2592         /*
2593          * reallocating iterators currently completely breaks
2594          * bch2_trans_iter_put(), we always allocate the max:
2595          */
2596         bch2_trans_alloc_iters(trans, c);
2597
2598         if (expected_mem_bytes) {
2599                 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2600                 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2601
2602                 if (!unlikely(trans->mem)) {
2603                         trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2604                         trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2605                 }
2606         }
2607
2608         trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2609
2610 #ifdef CONFIG_BCACHEFS_DEBUG
2611         trans->pid = current->pid;
2612         mutex_lock(&c->btree_trans_lock);
2613         list_add(&trans->list, &c->btree_trans_list);
2614         mutex_unlock(&c->btree_trans_lock);
2615 #endif
2616 }
2617
2618 int bch2_trans_exit(struct btree_trans *trans)
2619         __releases(&c->btree_trans_barrier)
2620 {
2621         struct bch_fs *c = trans->c;
2622
2623         bch2_trans_unlock(trans);
2624
2625 #ifdef CONFIG_BCACHEFS_DEBUG
2626         if (trans->iters_live) {
2627                 struct btree_iter *iter;
2628
2629                 trans_for_each_iter(trans, iter)
2630                         btree_iter_child_free(iter);
2631         }
2632
2633         if (trans->iters_live) {
2634                 struct btree_iter *iter;
2635
2636                 bch_err(c, "btree iterators leaked!");
2637                 trans_for_each_iter(trans, iter)
2638                         if (btree_iter_live(trans, iter))
2639                                 printk(KERN_ERR "  btree %s allocated at %pS\n",
2640                                        bch2_btree_ids[iter->btree_id],
2641                                        (void *) iter->ip_allocated);
2642                 /* Be noisy about this: */
2643                 bch2_fatal_error(c);
2644         }
2645
2646         mutex_lock(&trans->c->btree_trans_lock);
2647         list_del(&trans->list);
2648         mutex_unlock(&trans->c->btree_trans_lock);
2649 #endif
2650
2651         srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2652
2653         bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
2654
2655         if (trans->fs_usage_deltas) {
2656                 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2657                     REPLICAS_DELTA_LIST_MAX)
2658                         mempool_free(trans->fs_usage_deltas,
2659                                      &trans->c->replicas_delta_pool);
2660                 else
2661                         kfree(trans->fs_usage_deltas);
2662         }
2663
2664         if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2665                 mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
2666         else
2667                 kfree(trans->mem);
2668
2669 #ifdef __KERNEL__
2670         /*
2671          * Userspace doesn't have a real percpu implementation:
2672          */
2673         trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
2674 #endif
2675
2676         if (trans->iters)
2677                 mempool_free(trans->iters, &trans->c->btree_iters_pool);
2678
2679         trans->mem      = (void *) 0x1;
2680         trans->iters    = (void *) 0x1;
2681
2682         return trans->error ? -EIO : 0;
2683 }
2684
2685 static void __maybe_unused
2686 bch2_btree_iter_node_to_text(struct printbuf *out,
2687                              struct btree_bkey_cached_common *_b,
2688                              enum btree_iter_type type)
2689 {
2690         pr_buf(out, "    l=%u %s:",
2691                _b->level, bch2_btree_ids[_b->btree_id]);
2692         bch2_bpos_to_text(out, btree_node_pos(_b, type));
2693 }
2694
2695 #ifdef CONFIG_BCACHEFS_DEBUG
2696 static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
2697 {
2698         struct btree_iter *iter;
2699
2700         trans_for_each_iter(trans, iter)
2701                 if (btree_iter_type(iter) != BTREE_ITER_CACHED &&
2702                     iter->nodes_locked)
2703                         return true;
2704         return false;
2705 }
2706 #endif
2707
2708 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2709 {
2710 #ifdef CONFIG_BCACHEFS_DEBUG
2711         struct btree_trans *trans;
2712         struct btree_iter *iter;
2713         struct btree *b;
2714         unsigned l;
2715
2716         mutex_lock(&c->btree_trans_lock);
2717         list_for_each_entry(trans, &c->btree_trans_list, list) {
2718                 if (!trans_has_btree_nodes_locked(trans))
2719                         continue;
2720
2721                 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2722
2723                 trans_for_each_iter(trans, iter) {
2724                         if (!iter->nodes_locked)
2725                                 continue;
2726
2727                         pr_buf(out, "  iter %u %c %s:",
2728                                iter->idx,
2729                                btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2730                                bch2_btree_ids[iter->btree_id]);
2731                         bch2_bpos_to_text(out, iter->pos);
2732                         pr_buf(out, "\n");
2733
2734                         for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2735                                 if (btree_node_locked(iter, l)) {
2736                                         pr_buf(out, "    %s l=%u ",
2737                                                btree_node_intent_locked(iter, l) ? "i" : "r", l);
2738                                         bch2_btree_iter_node_to_text(out,
2739                                                         (void *) iter->l[l].b,
2740                                                         btree_iter_type(iter));
2741                                         pr_buf(out, "\n");
2742                                 }
2743                         }
2744                 }
2745
2746                 b = READ_ONCE(trans->locking);
2747                 if (b) {
2748                         iter = &trans->iters[trans->locking_iter_idx];
2749                         pr_buf(out, "  locking iter %u %c l=%u %s:",
2750                                trans->locking_iter_idx,
2751                                btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2752                                trans->locking_level,
2753                                bch2_btree_ids[trans->locking_btree_id]);
2754                         bch2_bpos_to_text(out, trans->locking_pos);
2755
2756                         pr_buf(out, " node ");
2757                         bch2_btree_iter_node_to_text(out,
2758                                         (void *) b,
2759                                         btree_iter_type(iter));
2760                         pr_buf(out, "\n");
2761                 }
2762         }
2763         mutex_unlock(&c->btree_trans_lock);
2764 #endif
2765 }
2766
2767 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2768 {
2769         mempool_exit(&c->btree_trans_mem_pool);
2770         mempool_exit(&c->btree_iters_pool);
2771         cleanup_srcu_struct(&c->btree_trans_barrier);
2772 }
2773
2774 int bch2_fs_btree_iter_init(struct bch_fs *c)
2775 {
2776         unsigned nr = BTREE_ITER_MAX;
2777
2778         INIT_LIST_HEAD(&c->btree_trans_list);
2779         mutex_init(&c->btree_trans_lock);
2780
2781         return  init_srcu_struct(&c->btree_trans_barrier) ?:
2782                 mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
2783                         sizeof(u8) * nr +
2784                         sizeof(struct btree_iter) * nr +
2785                         sizeof(struct btree_insert_entry) * nr) ?:
2786                 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
2787                                           BTREE_TRANS_MEM_MAX);
2788 }