]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.c
Update bcachefs sources to 3913e0cac3 bcachefs: Journal space calculation fix
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "debug.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "journal.h"
15 #include "replicas.h"
16
17 #include <linux/prefetch.h>
18 #include <trace/events/bcachefs.h>
19
20 static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
21
22 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
23 {
24         EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
25
26         /* Are we iterating over keys in all snapshots? */
27         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
28                 p = bpos_successor(p);
29         } else {
30                 p = bpos_nosnap_successor(p);
31                 p.snapshot = iter->snapshot;
32         }
33
34         return p;
35 }
36
37 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
38 {
39         EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
40
41         /* Are we iterating over keys in all snapshots? */
42         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
43                 p = bpos_predecessor(p);
44         } else {
45                 p = bpos_nosnap_predecessor(p);
46                 p.snapshot = iter->snapshot;
47         }
48
49         return p;
50 }
51
52 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
53 {
54         return l < BTREE_MAX_DEPTH &&
55                 (unsigned long) iter->l[l].b >= 128;
56 }
57
58 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
59 {
60         struct bpos pos = iter->pos;
61
62         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
63             bkey_cmp(pos, POS_MAX))
64                 pos = bkey_successor(iter, pos);
65         return pos;
66 }
67
68 static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
69                                               struct btree *b)
70 {
71         return bpos_cmp(iter->real_pos, b->data->min_key) < 0;
72 }
73
74 static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
75                                              struct btree *b)
76 {
77         return bpos_cmp(b->key.k.p, iter->real_pos) < 0;
78 }
79
80 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
81                                           struct btree *b)
82 {
83         return iter->btree_id == b->c.btree_id &&
84                 !btree_iter_pos_before_node(iter, b) &&
85                 !btree_iter_pos_after_node(iter, b);
86 }
87
88 /* Btree node locking: */
89
90 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
91 {
92         bch2_btree_node_unlock_write_inlined(b, iter);
93 }
94
95 void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
96 {
97         struct btree_iter *linked;
98         unsigned readers = 0;
99
100         EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
101
102         trans_for_each_iter(iter->trans, linked)
103                 if (linked->l[b->c.level].b == b &&
104                     btree_node_read_locked(linked, b->c.level))
105                         readers++;
106
107         /*
108          * Must drop our read locks before calling six_lock_write() -
109          * six_unlock() won't do wakeups until the reader count
110          * goes to 0, and it's safe because we have the node intent
111          * locked:
112          */
113         atomic64_sub(__SIX_VAL(read_lock, readers),
114                      &b->c.lock.state.counter);
115         btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
116         atomic64_add(__SIX_VAL(read_lock, readers),
117                      &b->c.lock.state.counter);
118 }
119
120 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
121 {
122         struct btree *b = btree_iter_node(iter, level);
123         int want = __btree_lock_want(iter, level);
124
125         if (!is_btree_node(iter, level))
126                 return false;
127
128         if (race_fault())
129                 return false;
130
131         if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
132             (btree_node_lock_seq_matches(iter, b, level) &&
133              btree_node_lock_increment(iter->trans, b, level, want))) {
134                 mark_btree_node_locked(iter, level, want);
135                 return true;
136         } else {
137                 return false;
138         }
139 }
140
141 static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
142 {
143         struct btree *b = iter->l[level].b;
144
145         EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
146
147         if (!is_btree_node(iter, level))
148                 return false;
149
150         if (btree_node_intent_locked(iter, level))
151                 return true;
152
153         if (race_fault())
154                 return false;
155
156         if (btree_node_locked(iter, level)
157             ? six_lock_tryupgrade(&b->c.lock)
158             : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
159                 goto success;
160
161         if (btree_node_lock_seq_matches(iter, b, level) &&
162             btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
163                 btree_node_unlock(iter, level);
164                 goto success;
165         }
166
167         return false;
168 success:
169         mark_btree_node_intent_locked(iter, level);
170         return true;
171 }
172
173 static inline bool btree_iter_get_locks(struct btree_iter *iter,
174                                         bool upgrade, bool trace)
175 {
176         unsigned l = iter->level;
177         int fail_idx = -1;
178
179         do {
180                 if (!btree_iter_node(iter, l))
181                         break;
182
183                 if (!(upgrade
184                       ? bch2_btree_node_upgrade(iter, l)
185                       : bch2_btree_node_relock(iter, l))) {
186                         if (trace)
187                                 (upgrade
188                                  ? trace_node_upgrade_fail
189                                  : trace_node_relock_fail)(l, iter->l[l].lock_seq,
190                                                 is_btree_node(iter, l)
191                                                 ? 0
192                                                 : (unsigned long) iter->l[l].b,
193                                                 is_btree_node(iter, l)
194                                                 ? iter->l[l].b->c.lock.state.seq
195                                                 : 0);
196
197                         fail_idx = l;
198                         btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
199                 }
200
201                 l++;
202         } while (l < iter->locks_want);
203
204         /*
205          * When we fail to get a lock, we have to ensure that any child nodes
206          * can't be relocked so bch2_btree_iter_traverse has to walk back up to
207          * the node that we failed to relock:
208          */
209         while (fail_idx >= 0) {
210                 btree_node_unlock(iter, fail_idx);
211                 iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
212                 --fail_idx;
213         }
214
215         if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
216                 iter->uptodate = BTREE_ITER_NEED_PEEK;
217
218         bch2_btree_trans_verify_locks(iter->trans);
219
220         return iter->uptodate < BTREE_ITER_NEED_RELOCK;
221 }
222
223 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
224                                   enum btree_iter_type type)
225 {
226         return  type != BTREE_ITER_CACHED
227                 ? container_of(_b, struct btree, c)->key.k.p
228                 : container_of(_b, struct bkey_cached, c)->key.pos;
229 }
230
231 /* Slowpath: */
232 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
233                             unsigned level, struct btree_iter *iter,
234                             enum six_lock_type type,
235                             six_lock_should_sleep_fn should_sleep_fn, void *p,
236                             unsigned long ip)
237 {
238         struct btree_trans *trans = iter->trans;
239         struct btree_iter *linked, *deadlock_iter = NULL;
240         u64 start_time = local_clock();
241         unsigned reason = 9;
242         bool ret;
243
244         /* Check if it's safe to block: */
245         trans_for_each_iter(trans, linked) {
246                 if (!linked->nodes_locked)
247                         continue;
248
249                 /*
250                  * Can't block taking an intent lock if we have _any_ nodes read
251                  * locked:
252                  *
253                  * - Our read lock blocks another thread with an intent lock on
254                  *   the same node from getting a write lock, and thus from
255                  *   dropping its intent lock
256                  *
257                  * - And the other thread may have multiple nodes intent locked:
258                  *   both the node we want to intent lock, and the node we
259                  *   already have read locked - deadlock:
260                  */
261                 if (type == SIX_LOCK_intent &&
262                     linked->nodes_locked != linked->nodes_intent_locked) {
263                         deadlock_iter = linked;
264                         reason = 1;
265                 }
266
267                 if (linked->btree_id != iter->btree_id) {
268                         if (linked->btree_id > iter->btree_id) {
269                                 deadlock_iter = linked;
270                                 reason = 3;
271                         }
272                         continue;
273                 }
274
275                 /*
276                  * Within the same btree, cached iterators come before non
277                  * cached iterators:
278                  */
279                 if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) {
280                         if (btree_iter_is_cached(iter)) {
281                                 deadlock_iter = linked;
282                                 reason = 4;
283                         }
284                         continue;
285                 }
286
287                 /*
288                  * Interior nodes must be locked before their descendants: if
289                  * another iterator has possible descendants locked of the node
290                  * we're about to lock, it must have the ancestors locked too:
291                  */
292                 if (level > __fls(linked->nodes_locked)) {
293                         deadlock_iter = linked;
294                         reason = 5;
295                 }
296
297                 /* Must lock btree nodes in key order: */
298                 if (btree_node_locked(linked, level) &&
299                     bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
300                                                  btree_iter_type(linked))) <= 0) {
301                         deadlock_iter = linked;
302                         reason = 7;
303                         BUG_ON(trans->in_traverse_all);
304                 }
305         }
306
307         if (unlikely(deadlock_iter)) {
308                 trace_trans_restart_would_deadlock(iter->trans->ip, ip,
309                                 trans->in_traverse_all, reason,
310                                 deadlock_iter->btree_id,
311                                 btree_iter_type(deadlock_iter),
312                                 &deadlock_iter->real_pos,
313                                 iter->btree_id,
314                                 btree_iter_type(iter),
315                                 &pos);
316                 return false;
317         }
318
319         if (six_trylock_type(&b->c.lock, type))
320                 return true;
321
322 #ifdef CONFIG_BCACHEFS_DEBUG
323         trans->locking_iter_idx = iter->idx;
324         trans->locking_pos      = pos;
325         trans->locking_btree_id = iter->btree_id;
326         trans->locking_level    = level;
327         trans->locking          = b;
328 #endif
329
330         ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
331
332 #ifdef CONFIG_BCACHEFS_DEBUG
333         trans->locking = NULL;
334 #endif
335         if (ret)
336                 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
337                                        start_time);
338         return ret;
339 }
340
341 /* Btree iterator locking: */
342
343 #ifdef CONFIG_BCACHEFS_DEBUG
344 static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
345 {
346         unsigned l;
347
348         if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
349                 BUG_ON(iter->nodes_locked);
350                 return;
351         }
352
353         for (l = 0; is_btree_node(iter, l); l++) {
354                 if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
355                     !btree_node_locked(iter, l))
356                         continue;
357
358                 BUG_ON(btree_lock_want(iter, l) !=
359                        btree_node_locked_type(iter, l));
360         }
361 }
362
363 void bch2_btree_trans_verify_locks(struct btree_trans *trans)
364 {
365         struct btree_iter *iter;
366
367         trans_for_each_iter(trans, iter)
368                 bch2_btree_iter_verify_locks(iter);
369 }
370 #else
371 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
372 #endif
373
374 __flatten
375 bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
376 {
377         return btree_iter_get_locks(iter, false, trace);
378 }
379
380 bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
381                                unsigned new_locks_want)
382 {
383         struct btree_iter *linked;
384
385         EBUG_ON(iter->locks_want >= new_locks_want);
386
387         iter->locks_want = new_locks_want;
388
389         if (btree_iter_get_locks(iter, true, true))
390                 return true;
391
392         /*
393          * XXX: this is ugly - we'd prefer to not be mucking with other
394          * iterators in the btree_trans here.
395          *
396          * On failure to upgrade the iterator, setting iter->locks_want and
397          * calling get_locks() is sufficient to make bch2_btree_iter_traverse()
398          * get the locks we want on transaction restart.
399          *
400          * But if this iterator was a clone, on transaction restart what we did
401          * to this iterator isn't going to be preserved.
402          *
403          * Possibly we could add an iterator field for the parent iterator when
404          * an iterator is a copy - for now, we'll just upgrade any other
405          * iterators with the same btree id.
406          *
407          * The code below used to be needed to ensure ancestor nodes get locked
408          * before interior nodes - now that's handled by
409          * bch2_btree_iter_traverse_all().
410          */
411         trans_for_each_iter(iter->trans, linked)
412                 if (linked != iter &&
413                     btree_iter_type(linked) == btree_iter_type(iter) &&
414                     linked->btree_id == iter->btree_id &&
415                     linked->locks_want < new_locks_want) {
416                         linked->locks_want = new_locks_want;
417                         btree_iter_get_locks(linked, true, false);
418                 }
419
420         return false;
421 }
422
423 void __bch2_btree_iter_downgrade(struct btree_iter *iter,
424                                  unsigned new_locks_want)
425 {
426         unsigned l;
427
428         EBUG_ON(iter->locks_want < new_locks_want);
429
430         iter->locks_want = new_locks_want;
431
432         while (iter->nodes_locked &&
433                (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
434                 if (l > iter->level) {
435                         btree_node_unlock(iter, l);
436                 } else {
437                         if (btree_node_intent_locked(iter, l)) {
438                                 six_lock_downgrade(&iter->l[l].b->c.lock);
439                                 iter->nodes_intent_locked ^= 1 << l;
440                         }
441                         break;
442                 }
443         }
444
445         bch2_btree_trans_verify_locks(iter->trans);
446 }
447
448 void bch2_trans_downgrade(struct btree_trans *trans)
449 {
450         struct btree_iter *iter;
451
452         trans_for_each_iter(trans, iter)
453                 bch2_btree_iter_downgrade(iter);
454 }
455
456 /* Btree transaction locking: */
457
458 bool bch2_trans_relock(struct btree_trans *trans)
459 {
460         struct btree_iter *iter;
461
462         trans_for_each_iter(trans, iter)
463                 if (!bch2_btree_iter_relock(iter, true)) {
464                         trace_trans_restart_relock(trans->ip);
465                         return false;
466                 }
467         return true;
468 }
469
470 void bch2_trans_unlock(struct btree_trans *trans)
471 {
472         struct btree_iter *iter;
473
474         trans_for_each_iter(trans, iter)
475                 __bch2_btree_iter_unlock(iter);
476 }
477
478 /* Btree iterator: */
479
480 #ifdef CONFIG_BCACHEFS_DEBUG
481
482 static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
483 {
484         struct bkey_cached *ck;
485         bool locked = btree_node_locked(iter, 0);
486
487         if (!bch2_btree_node_relock(iter, 0))
488                 return;
489
490         ck = (void *) iter->l[0].b;
491         BUG_ON(ck->key.btree_id != iter->btree_id ||
492                bkey_cmp(ck->key.pos, iter->pos));
493
494         if (!locked)
495                 btree_node_unlock(iter, 0);
496 }
497
498 static void bch2_btree_iter_verify_level(struct btree_iter *iter,
499                                          unsigned level)
500 {
501         struct btree_iter_level *l;
502         struct btree_node_iter tmp;
503         bool locked;
504         struct bkey_packed *p, *k;
505         char buf1[100], buf2[100], buf3[100];
506         const char *msg;
507
508         if (!bch2_debug_check_iterators)
509                 return;
510
511         l       = &iter->l[level];
512         tmp     = l->iter;
513         locked  = btree_node_locked(iter, level);
514
515         if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
516                 if (!level)
517                         bch2_btree_iter_verify_cached(iter);
518                 return;
519         }
520
521         BUG_ON(iter->level < iter->min_depth);
522
523         if (!btree_iter_node(iter, level))
524                 return;
525
526         if (!bch2_btree_node_relock(iter, level))
527                 return;
528
529         BUG_ON(!btree_iter_pos_in_node(iter, l->b));
530
531         /*
532          * node iterators don't use leaf node iterator:
533          */
534         if (btree_iter_type(iter) == BTREE_ITER_NODES &&
535             level <= iter->min_depth)
536                 goto unlock;
537
538         bch2_btree_node_iter_verify(&l->iter, l->b);
539
540         /*
541          * For interior nodes, the iterator will have skipped past
542          * deleted keys:
543          *
544          * For extents, the iterator may have skipped past deleted keys (but not
545          * whiteouts)
546          */
547         p = level || btree_node_type_is_extents(iter->btree_id)
548                 ? bch2_btree_node_iter_prev(&tmp, l->b)
549                 : bch2_btree_node_iter_prev_all(&tmp, l->b);
550         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
551
552         if (p && bkey_iter_pos_cmp(l->b, p, &iter->real_pos) >= 0) {
553                 msg = "before";
554                 goto err;
555         }
556
557         if (k && bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
558                 msg = "after";
559                 goto err;
560         }
561 unlock:
562         if (!locked)
563                 btree_node_unlock(iter, level);
564         return;
565 err:
566         strcpy(buf2, "(none)");
567         strcpy(buf3, "(none)");
568
569         bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
570
571         if (p) {
572                 struct bkey uk = bkey_unpack_key(l->b, p);
573                 bch2_bkey_to_text(&PBUF(buf2), &uk);
574         }
575
576         if (k) {
577                 struct bkey uk = bkey_unpack_key(l->b, k);
578                 bch2_bkey_to_text(&PBUF(buf3), &uk);
579         }
580
581         panic("iterator should be %s key at level %u:\n"
582               "iter pos %s\n"
583               "prev key %s\n"
584               "cur  key %s\n",
585               msg, level, buf1, buf2, buf3);
586 }
587
588 static void bch2_btree_iter_verify(struct btree_iter *iter)
589 {
590         enum btree_iter_type type = btree_iter_type(iter);
591         unsigned i;
592
593         EBUG_ON(iter->btree_id >= BTREE_ID_NR);
594
595         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
596                iter->pos.snapshot != iter->snapshot);
597
598         BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
599                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
600
601         BUG_ON(type == BTREE_ITER_NODES &&
602                !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
603
604         BUG_ON(type != BTREE_ITER_NODES &&
605                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
606                !btree_type_has_snapshots(iter->btree_id));
607
608         bch2_btree_iter_verify_locks(iter);
609
610         for (i = 0; i < BTREE_MAX_DEPTH; i++)
611                 bch2_btree_iter_verify_level(iter, i);
612 }
613
614 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
615 {
616         enum btree_iter_type type = btree_iter_type(iter);
617
618         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
619                iter->pos.snapshot != iter->snapshot);
620
621         BUG_ON((type == BTREE_ITER_KEYS ||
622                 type == BTREE_ITER_CACHED) &&
623                (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
624                 bkey_cmp(iter->pos, iter->k.p) > 0));
625 }
626
627 void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
628 {
629         struct btree_iter *iter;
630
631         if (!bch2_debug_check_iterators)
632                 return;
633
634         trans_for_each_iter_with_node(trans, b, iter)
635                 bch2_btree_iter_verify_level(iter, b->c.level);
636 }
637
638 #else
639
640 static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
641 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
642 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
643
644 #endif
645
646 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
647                                         struct btree *b,
648                                         struct bset_tree *t,
649                                         struct bkey_packed *k)
650 {
651         struct btree_node_iter_set *set;
652
653         btree_node_iter_for_each(iter, set)
654                 if (set->end == t->end_offset) {
655                         set->k = __btree_node_key_to_offset(b, k);
656                         bch2_btree_node_iter_sort(iter, b);
657                         return;
658                 }
659
660         bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
661 }
662
663 static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
664                                                struct btree *b,
665                                                struct bkey_packed *where)
666 {
667         struct btree_iter_level *l = &iter->l[b->c.level];
668
669         if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
670                 return;
671
672         if (bkey_iter_pos_cmp(l->b, where, &iter->real_pos) < 0)
673                 bch2_btree_node_iter_advance(&l->iter, l->b);
674
675         btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
676 }
677
678 void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
679                                       struct btree *b,
680                                       struct bkey_packed *where)
681 {
682         struct btree_iter *linked;
683
684         trans_for_each_iter_with_node(iter->trans, b, linked) {
685                 __bch2_btree_iter_fix_key_modified(linked, b, where);
686                 bch2_btree_iter_verify_level(linked, b->c.level);
687         }
688 }
689
690 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
691                                       struct btree *b,
692                                       struct btree_node_iter *node_iter,
693                                       struct bset_tree *t,
694                                       struct bkey_packed *where,
695                                       unsigned clobber_u64s,
696                                       unsigned new_u64s)
697 {
698         const struct bkey_packed *end = btree_bkey_last(b, t);
699         struct btree_node_iter_set *set;
700         unsigned offset = __btree_node_key_to_offset(b, where);
701         int shift = new_u64s - clobber_u64s;
702         unsigned old_end = t->end_offset - shift;
703         unsigned orig_iter_pos = node_iter->data[0].k;
704         bool iter_current_key_modified =
705                 orig_iter_pos >= offset &&
706                 orig_iter_pos <= offset + clobber_u64s;
707
708         btree_node_iter_for_each(node_iter, set)
709                 if (set->end == old_end)
710                         goto found;
711
712         /* didn't find the bset in the iterator - might have to readd it: */
713         if (new_u64s &&
714             bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
715                 bch2_btree_node_iter_push(node_iter, b, where, end);
716                 goto fixup_done;
717         } else {
718                 /* Iterator is after key that changed */
719                 return;
720         }
721 found:
722         set->end = t->end_offset;
723
724         /* Iterator hasn't gotten to the key that changed yet: */
725         if (set->k < offset)
726                 return;
727
728         if (new_u64s &&
729             bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
730                 set->k = offset;
731         } else if (set->k < offset + clobber_u64s) {
732                 set->k = offset + new_u64s;
733                 if (set->k == set->end)
734                         bch2_btree_node_iter_set_drop(node_iter, set);
735         } else {
736                 /* Iterator is after key that changed */
737                 set->k = (int) set->k + shift;
738                 return;
739         }
740
741         bch2_btree_node_iter_sort(node_iter, b);
742 fixup_done:
743         if (node_iter->data[0].k != orig_iter_pos)
744                 iter_current_key_modified = true;
745
746         /*
747          * When a new key is added, and the node iterator now points to that
748          * key, the iterator might have skipped past deleted keys that should
749          * come after the key the iterator now points to. We have to rewind to
750          * before those deleted keys - otherwise
751          * bch2_btree_node_iter_prev_all() breaks:
752          */
753         if (!bch2_btree_node_iter_end(node_iter) &&
754             iter_current_key_modified &&
755             (b->c.level ||
756              btree_node_type_is_extents(iter->btree_id))) {
757                 struct bset_tree *t;
758                 struct bkey_packed *k, *k2, *p;
759
760                 k = bch2_btree_node_iter_peek_all(node_iter, b);
761
762                 for_each_bset(b, t) {
763                         bool set_pos = false;
764
765                         if (node_iter->data[0].end == t->end_offset)
766                                 continue;
767
768                         k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
769
770                         while ((p = bch2_bkey_prev_all(b, t, k2)) &&
771                                bkey_iter_cmp(b, k, p) < 0) {
772                                 k2 = p;
773                                 set_pos = true;
774                         }
775
776                         if (set_pos)
777                                 btree_node_iter_set_set_pos(node_iter,
778                                                             b, t, k2);
779                 }
780         }
781
782         if (!b->c.level &&
783             node_iter == &iter->l[0].iter &&
784             iter_current_key_modified)
785                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
786 }
787
788 void bch2_btree_node_iter_fix(struct btree_iter *iter,
789                               struct btree *b,
790                               struct btree_node_iter *node_iter,
791                               struct bkey_packed *where,
792                               unsigned clobber_u64s,
793                               unsigned new_u64s)
794 {
795         struct bset_tree *t = bch2_bkey_to_bset(b, where);
796         struct btree_iter *linked;
797
798         if (node_iter != &iter->l[b->c.level].iter) {
799                 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
800                                            where, clobber_u64s, new_u64s);
801
802                 if (bch2_debug_check_iterators)
803                         bch2_btree_node_iter_verify(node_iter, b);
804         }
805
806         trans_for_each_iter_with_node(iter->trans, b, linked) {
807                 __bch2_btree_node_iter_fix(linked, b,
808                                            &linked->l[b->c.level].iter, t,
809                                            where, clobber_u64s, new_u64s);
810                 bch2_btree_iter_verify_level(linked, b->c.level);
811         }
812 }
813
814 static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
815                                                   struct btree_iter_level *l,
816                                                   struct bkey *u,
817                                                   struct bkey_packed *k)
818 {
819         struct bkey_s_c ret;
820
821         if (unlikely(!k)) {
822                 /*
823                  * signal to bch2_btree_iter_peek_slot() that we're currently at
824                  * a hole
825                  */
826                 u->type = KEY_TYPE_deleted;
827                 return bkey_s_c_null;
828         }
829
830         ret = bkey_disassemble(l->b, k, u);
831
832         if (bch2_debug_check_bkeys)
833                 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
834
835         return ret;
836 }
837
838 /* peek_all() doesn't skip deleted keys */
839 static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter,
840                                                         struct btree_iter_level *l,
841                                                         struct bkey *u)
842 {
843         return __btree_iter_unpack(iter, l, u,
844                         bch2_btree_node_iter_peek_all(&l->iter, l->b));
845 }
846
847 static inline struct bkey_s_c btree_iter_level_peek(struct btree_iter *iter,
848                                                     struct btree_iter_level *l)
849 {
850         struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
851                         bch2_btree_node_iter_peek(&l->iter, l->b));
852
853         iter->real_pos = k.k ? k.k->p : l->b->key.k.p;
854         return k;
855 }
856
857 static inline struct bkey_s_c btree_iter_level_prev(struct btree_iter *iter,
858                                                     struct btree_iter_level *l)
859 {
860         struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
861                         bch2_btree_node_iter_prev(&l->iter, l->b));
862
863         iter->real_pos = k.k ? k.k->p : l->b->data->min_key;
864         return k;
865 }
866
867 static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
868                                              struct btree_iter_level *l,
869                                              int max_advance)
870 {
871         struct bkey_packed *k;
872         int nr_advanced = 0;
873
874         while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
875                bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
876                 if (max_advance > 0 && nr_advanced >= max_advance)
877                         return false;
878
879                 bch2_btree_node_iter_advance(&l->iter, l->b);
880                 nr_advanced++;
881         }
882
883         return true;
884 }
885
886 /*
887  * Verify that iterator for parent node points to child node:
888  */
889 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
890 {
891         struct btree_iter_level *l;
892         unsigned plevel;
893         bool parent_locked;
894         struct bkey_packed *k;
895
896         if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
897                 return;
898
899         plevel = b->c.level + 1;
900         if (!btree_iter_node(iter, plevel))
901                 return;
902
903         parent_locked = btree_node_locked(iter, plevel);
904
905         if (!bch2_btree_node_relock(iter, plevel))
906                 return;
907
908         l = &iter->l[plevel];
909         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
910         if (!k ||
911             bkey_deleted(k) ||
912             bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
913                 char buf1[100];
914                 char buf2[100];
915                 char buf3[100];
916                 char buf4[100];
917                 struct bkey uk = bkey_unpack_key(b, k);
918
919                 bch2_dump_btree_node(iter->trans->c, l->b);
920                 bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
921                 bch2_bkey_to_text(&PBUF(buf2), &uk);
922                 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
923                 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
924                 panic("parent iter doesn't point to new node:\n"
925                       "iter pos %s %s\n"
926                       "iter key %s\n"
927                       "new node %s-%s\n",
928                       bch2_btree_ids[iter->btree_id], buf1,
929                       buf2, buf3, buf4);
930         }
931
932         if (!parent_locked)
933                 btree_node_unlock(iter, b->c.level + 1);
934 }
935
936 static inline void __btree_iter_init(struct btree_iter *iter,
937                                      unsigned level)
938 {
939         struct btree_iter_level *l = &iter->l[level];
940
941         bch2_btree_node_iter_init(&l->iter, l->b, &iter->real_pos);
942
943         /*
944          * Iterators to interior nodes should always be pointed at the first non
945          * whiteout:
946          */
947         if (level)
948                 bch2_btree_node_iter_peek(&l->iter, l->b);
949
950         btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
951 }
952
953 static inline void btree_iter_node_set(struct btree_iter *iter,
954                                        struct btree *b)
955 {
956         BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
957
958         btree_iter_verify_new_node(iter, b);
959
960         EBUG_ON(!btree_iter_pos_in_node(iter, b));
961         EBUG_ON(b->c.lock.state.seq & 1);
962
963         iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
964         iter->l[b->c.level].b = b;
965         __btree_iter_init(iter, b->c.level);
966 }
967
968 /*
969  * A btree node is being replaced - update the iterator to point to the new
970  * node:
971  */
972 void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
973 {
974         enum btree_node_locked_type t;
975         struct btree_iter *linked;
976
977         trans_for_each_iter(iter->trans, linked)
978                 if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
979                     btree_iter_pos_in_node(linked, b)) {
980                         /*
981                          * bch2_btree_iter_node_drop() has already been called -
982                          * the old node we're replacing has already been
983                          * unlocked and the pointer invalidated
984                          */
985                         BUG_ON(btree_node_locked(linked, b->c.level));
986
987                         t = btree_lock_want(linked, b->c.level);
988                         if (t != BTREE_NODE_UNLOCKED) {
989                                 six_lock_increment(&b->c.lock, t);
990                                 mark_btree_node_locked(linked, b->c.level, t);
991                         }
992
993                         btree_iter_node_set(linked, b);
994                 }
995 }
996
997 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
998 {
999         struct btree_iter *linked;
1000         unsigned level = b->c.level;
1001
1002         trans_for_each_iter(iter->trans, linked)
1003                 if (linked->l[level].b == b) {
1004                         btree_node_unlock(linked, level);
1005                         linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
1006                 }
1007 }
1008
1009 /*
1010  * A btree node has been modified in such a way as to invalidate iterators - fix
1011  * them:
1012  */
1013 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
1014 {
1015         struct btree_iter *linked;
1016
1017         trans_for_each_iter_with_node(iter->trans, b, linked)
1018                 __btree_iter_init(linked, b->c.level);
1019 }
1020
1021 static int lock_root_check_fn(struct six_lock *lock, void *p)
1022 {
1023         struct btree *b = container_of(lock, struct btree, c.lock);
1024         struct btree **rootp = p;
1025
1026         return b == *rootp ? 0 : -1;
1027 }
1028
1029 static inline int btree_iter_lock_root(struct btree_iter *iter,
1030                                        unsigned depth_want,
1031                                        unsigned long trace_ip)
1032 {
1033         struct bch_fs *c = iter->trans->c;
1034         struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
1035         enum six_lock_type lock_type;
1036         unsigned i;
1037
1038         EBUG_ON(iter->nodes_locked);
1039
1040         while (1) {
1041                 b = READ_ONCE(*rootp);
1042                 iter->level = READ_ONCE(b->c.level);
1043
1044                 if (unlikely(iter->level < depth_want)) {
1045                         /*
1046                          * the root is at a lower depth than the depth we want:
1047                          * got to the end of the btree, or we're walking nodes
1048                          * greater than some depth and there are no nodes >=
1049                          * that depth
1050                          */
1051                         iter->level = depth_want;
1052                         for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
1053                                 iter->l[i].b = NULL;
1054                         return 1;
1055                 }
1056
1057                 lock_type = __btree_lock_want(iter, iter->level);
1058                 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
1059                                               iter, lock_type,
1060                                               lock_root_check_fn, rootp,
1061                                               trace_ip)))
1062                         return -EINTR;
1063
1064                 if (likely(b == READ_ONCE(*rootp) &&
1065                            b->c.level == iter->level &&
1066                            !race_fault())) {
1067                         for (i = 0; i < iter->level; i++)
1068                                 iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1069                         iter->l[iter->level].b = b;
1070                         for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
1071                                 iter->l[i].b = NULL;
1072
1073                         mark_btree_node_locked(iter, iter->level, lock_type);
1074                         btree_iter_node_set(iter, b);
1075                         return 0;
1076                 }
1077
1078                 six_unlock_type(&b->c.lock, lock_type);
1079         }
1080 }
1081
1082 noinline
1083 static void btree_iter_prefetch(struct btree_iter *iter)
1084 {
1085         struct bch_fs *c = iter->trans->c;
1086         struct btree_iter_level *l = &iter->l[iter->level];
1087         struct btree_node_iter node_iter = l->iter;
1088         struct bkey_packed *k;
1089         struct bkey_buf tmp;
1090         unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1091                 ? (iter->level > 1 ? 0 :  2)
1092                 : (iter->level > 1 ? 1 : 16);
1093         bool was_locked = btree_node_locked(iter, iter->level);
1094
1095         bch2_bkey_buf_init(&tmp);
1096
1097         while (nr) {
1098                 if (!bch2_btree_node_relock(iter, iter->level))
1099                         break;
1100
1101                 bch2_btree_node_iter_advance(&node_iter, l->b);
1102                 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1103                 if (!k)
1104                         break;
1105
1106                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1107                 bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
1108                                          iter->level - 1);
1109         }
1110
1111         if (!was_locked)
1112                 btree_node_unlock(iter, iter->level);
1113
1114         bch2_bkey_buf_exit(&tmp, c);
1115 }
1116
1117 static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
1118                                             unsigned plevel, struct btree *b)
1119 {
1120         struct btree_iter_level *l = &iter->l[plevel];
1121         bool locked = btree_node_locked(iter, plevel);
1122         struct bkey_packed *k;
1123         struct bch_btree_ptr_v2 *bp;
1124
1125         if (!bch2_btree_node_relock(iter, plevel))
1126                 return;
1127
1128         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1129         BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1130
1131         bp = (void *) bkeyp_val(&l->b->format, k);
1132         bp->mem_ptr = (unsigned long)b;
1133
1134         if (!locked)
1135                 btree_node_unlock(iter, plevel);
1136 }
1137
1138 static __always_inline int btree_iter_down(struct btree_iter *iter,
1139                                            unsigned long trace_ip)
1140 {
1141         struct bch_fs *c = iter->trans->c;
1142         struct btree_iter_level *l = &iter->l[iter->level];
1143         struct btree *b;
1144         unsigned level = iter->level - 1;
1145         enum six_lock_type lock_type = __btree_lock_want(iter, level);
1146         struct bkey_buf tmp;
1147         int ret;
1148
1149         EBUG_ON(!btree_node_locked(iter, iter->level));
1150
1151         bch2_bkey_buf_init(&tmp);
1152         bch2_bkey_buf_unpack(&tmp, c, l->b,
1153                          bch2_btree_node_iter_peek(&l->iter, l->b));
1154
1155         b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip);
1156         ret = PTR_ERR_OR_ZERO(b);
1157         if (unlikely(ret))
1158                 goto err;
1159
1160         mark_btree_node_locked(iter, level, lock_type);
1161         btree_iter_node_set(iter, b);
1162
1163         if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
1164             unlikely(b != btree_node_mem_ptr(tmp.k)))
1165                 btree_node_mem_ptr_set(iter, level + 1, b);
1166
1167         if (iter->flags & BTREE_ITER_PREFETCH)
1168                 btree_iter_prefetch(iter);
1169
1170         iter->level = level;
1171 err:
1172         bch2_bkey_buf_exit(&tmp, c);
1173         return ret;
1174 }
1175
1176 static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
1177
1178 static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
1179 {
1180         struct bch_fs *c = trans->c;
1181         struct btree_iter *iter;
1182         u8 sorted[BTREE_ITER_MAX];
1183         int i, nr_sorted = 0;
1184         bool relock_fail;
1185
1186         if (trans->in_traverse_all)
1187                 return -EINTR;
1188
1189         trans->in_traverse_all = true;
1190 retry_all:
1191         nr_sorted = 0;
1192         relock_fail = false;
1193
1194         trans_for_each_iter(trans, iter) {
1195                 if (!bch2_btree_iter_relock(iter, true))
1196                         relock_fail = true;
1197                 sorted[nr_sorted++] = iter->idx;
1198         }
1199
1200         if (!relock_fail) {
1201                 trans->in_traverse_all = false;
1202                 return 0;
1203         }
1204
1205 #define btree_iter_cmp_by_idx(_l, _r)                           \
1206                 btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r])
1207
1208         bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
1209 #undef btree_iter_cmp_by_idx
1210
1211         for (i = nr_sorted - 2; i >= 0; --i) {
1212                 struct btree_iter *iter1 = trans->iters + sorted[i];
1213                 struct btree_iter *iter2 = trans->iters + sorted[i + 1];
1214
1215                 if (iter1->btree_id == iter2->btree_id &&
1216                     iter1->locks_want < iter2->locks_want)
1217                         __bch2_btree_iter_upgrade(iter1, iter2->locks_want);
1218                 else if (!iter1->locks_want && iter2->locks_want)
1219                         __bch2_btree_iter_upgrade(iter1, 1);
1220         }
1221
1222         bch2_trans_unlock(trans);
1223         cond_resched();
1224
1225         if (unlikely(ret == -ENOMEM)) {
1226                 struct closure cl;
1227
1228                 closure_init_stack(&cl);
1229
1230                 do {
1231                         ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1232                         closure_sync(&cl);
1233                 } while (ret);
1234         }
1235
1236         if (unlikely(ret == -EIO)) {
1237                 trans->error = true;
1238                 goto out;
1239         }
1240
1241         BUG_ON(ret && ret != -EINTR);
1242
1243         /* Now, redo traversals in correct order: */
1244         for (i = 0; i < nr_sorted; i++) {
1245                 unsigned idx = sorted[i];
1246
1247                 /*
1248                  * sucessfully traversing one iterator can cause another to be
1249                  * unlinked, in btree_key_cache_fill()
1250                  */
1251                 if (!(trans->iters_linked & (1ULL << idx)))
1252                         continue;
1253
1254                 ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_);
1255                 if (ret)
1256                         goto retry_all;
1257         }
1258
1259         if (hweight64(trans->iters_live) > 1)
1260                 ret = -EINTR;
1261         else
1262                 trans_for_each_iter(trans, iter)
1263                         if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
1264                                 ret = -EINTR;
1265                                 break;
1266                         }
1267 out:
1268         bch2_btree_cache_cannibalize_unlock(c);
1269
1270         trans->in_traverse_all = false;
1271
1272         trace_trans_traverse_all(trans->ip);
1273         return ret;
1274 }
1275
1276 int bch2_btree_iter_traverse_all(struct btree_trans *trans)
1277 {
1278         return __btree_iter_traverse_all(trans, 0);
1279 }
1280
1281 static inline bool btree_iter_good_node(struct btree_iter *iter,
1282                                         unsigned l, int check_pos)
1283 {
1284         if (!is_btree_node(iter, l) ||
1285             !bch2_btree_node_relock(iter, l))
1286                 return false;
1287
1288         if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
1289                 return false;
1290         if (check_pos > 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
1291                 return false;
1292         return true;
1293 }
1294
1295 static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
1296                                                      int check_pos)
1297 {
1298         unsigned l = iter->level;
1299
1300         while (btree_iter_node(iter, l) &&
1301                !btree_iter_good_node(iter, l, check_pos)) {
1302                 btree_node_unlock(iter, l);
1303                 iter->l[l].b = BTREE_ITER_NO_NODE_UP;
1304                 l++;
1305         }
1306
1307         return l;
1308 }
1309
1310 /*
1311  * This is the main state machine for walking down the btree - walks down to a
1312  * specified depth
1313  *
1314  * Returns 0 on success, -EIO on error (error reading in a btree node).
1315  *
1316  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1317  * stashed in the iterator and returned from bch2_trans_exit().
1318  */
1319 static int btree_iter_traverse_one(struct btree_iter *iter,
1320                                    unsigned long trace_ip)
1321 {
1322         unsigned depth_want = iter->level;
1323
1324         /*
1325          * if we need interior nodes locked, call btree_iter_relock() to make
1326          * sure we walk back up enough that we lock them:
1327          */
1328         if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
1329             iter->locks_want > 1)
1330                 bch2_btree_iter_relock(iter, false);
1331
1332         if (btree_iter_type(iter) == BTREE_ITER_CACHED)
1333                 return bch2_btree_iter_traverse_cached(iter);
1334
1335         if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
1336                 return 0;
1337
1338         if (unlikely(iter->level >= BTREE_MAX_DEPTH))
1339                 return 0;
1340
1341         iter->level = btree_iter_up_until_good_node(iter, 0);
1342
1343         /*
1344          * Note: iter->nodes[iter->level] may be temporarily NULL here - that
1345          * would indicate to other code that we got to the end of the btree,
1346          * here it indicates that relocking the root failed - it's critical that
1347          * btree_iter_lock_root() comes next and that it can't fail
1348          */
1349         while (iter->level > depth_want) {
1350                 int ret = btree_iter_node(iter, iter->level)
1351                         ? btree_iter_down(iter, trace_ip)
1352                         : btree_iter_lock_root(iter, depth_want, trace_ip);
1353                 if (unlikely(ret)) {
1354                         if (ret == 1)
1355                                 return 0;
1356
1357                         iter->level = depth_want;
1358
1359                         if (ret == -EIO) {
1360                                 iter->flags |= BTREE_ITER_ERROR;
1361                                 iter->l[iter->level].b =
1362                                         BTREE_ITER_NO_NODE_ERROR;
1363                         } else {
1364                                 iter->l[iter->level].b =
1365                                         BTREE_ITER_NO_NODE_DOWN;
1366                         }
1367                         return ret;
1368                 }
1369         }
1370
1371         iter->uptodate = BTREE_ITER_NEED_PEEK;
1372
1373         bch2_btree_iter_verify(iter);
1374         return 0;
1375 }
1376
1377 static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
1378 {
1379         struct btree_trans *trans = iter->trans;
1380         int ret;
1381
1382         ret =   bch2_trans_cond_resched(trans) ?:
1383                 btree_iter_traverse_one(iter, _RET_IP_);
1384         if (unlikely(ret))
1385                 ret = __btree_iter_traverse_all(trans, ret);
1386
1387         return ret;
1388 }
1389
1390 /*
1391  * Note:
1392  * bch2_btree_iter_traverse() is for external users, btree_iter_traverse() is
1393  * for internal btree iterator users
1394  *
1395  * bch2_btree_iter_traverse sets iter->real_pos to iter->pos,
1396  * btree_iter_traverse() does not:
1397  */
1398 static inline int __must_check
1399 btree_iter_traverse(struct btree_iter *iter)
1400 {
1401         return iter->uptodate >= BTREE_ITER_NEED_RELOCK
1402                 ? __bch2_btree_iter_traverse(iter)
1403                 : 0;
1404 }
1405
1406 int __must_check
1407 bch2_btree_iter_traverse(struct btree_iter *iter)
1408 {
1409         btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
1410
1411         return btree_iter_traverse(iter);
1412 }
1413
1414 /* Iterate across nodes (leaf and interior nodes) */
1415
1416 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1417 {
1418         struct btree *b;
1419         int ret;
1420
1421         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1422         bch2_btree_iter_verify(iter);
1423
1424         ret = btree_iter_traverse(iter);
1425         if (ret)
1426                 return NULL;
1427
1428         b = btree_iter_node(iter, iter->level);
1429         if (!b)
1430                 return NULL;
1431
1432         BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1433
1434         iter->pos = iter->real_pos = b->key.k.p;
1435
1436         bch2_btree_iter_verify(iter);
1437
1438         return b;
1439 }
1440
1441 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1442 {
1443         struct btree *b;
1444         int ret;
1445
1446         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1447         bch2_btree_iter_verify(iter);
1448
1449         /* already got to end? */
1450         if (!btree_iter_node(iter, iter->level))
1451                 return NULL;
1452
1453         bch2_trans_cond_resched(iter->trans);
1454
1455         btree_node_unlock(iter, iter->level);
1456         iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
1457         iter->level++;
1458
1459         btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1460         ret = btree_iter_traverse(iter);
1461         if (ret)
1462                 return NULL;
1463
1464         /* got to end? */
1465         b = btree_iter_node(iter, iter->level);
1466         if (!b)
1467                 return NULL;
1468
1469         if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
1470                 /*
1471                  * Haven't gotten to the end of the parent node: go back down to
1472                  * the next child node
1473                  */
1474                 btree_iter_set_search_pos(iter, bpos_successor(iter->pos));
1475
1476                 /* Unlock to avoid screwing up our lock invariants: */
1477                 btree_node_unlock(iter, iter->level);
1478
1479                 iter->level = iter->min_depth;
1480                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1481                 bch2_btree_iter_verify(iter);
1482
1483                 ret = btree_iter_traverse(iter);
1484                 if (ret)
1485                         return NULL;
1486
1487                 b = iter->l[iter->level].b;
1488         }
1489
1490         iter->pos = iter->real_pos = b->key.k.p;
1491
1492         bch2_btree_iter_verify(iter);
1493
1494         return b;
1495 }
1496
1497 /* Iterate across keys (in leaf nodes only) */
1498
1499 static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
1500 {
1501         int cmp = bpos_cmp(new_pos, iter->real_pos);
1502         unsigned l = iter->level;
1503
1504         if (!cmp)
1505                 goto out;
1506
1507         iter->real_pos = new_pos;
1508
1509         if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
1510                 btree_node_unlock(iter, 0);
1511                 iter->l[0].b = BTREE_ITER_NO_NODE_UP;
1512                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1513                 return;
1514         }
1515
1516         l = btree_iter_up_until_good_node(iter, cmp);
1517
1518         if (btree_iter_node(iter, l)) {
1519                 /*
1520                  * We might have to skip over many keys, or just a few: try
1521                  * advancing the node iterator, and if we have to skip over too
1522                  * many keys just reinit it (or if we're rewinding, since that
1523                  * is expensive).
1524                  */
1525                 if (cmp < 0 ||
1526                     !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
1527                         __btree_iter_init(iter, l);
1528
1529                 /* Don't leave it locked if we're not supposed to: */
1530                 if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
1531                         btree_node_unlock(iter, l);
1532         }
1533 out:
1534         if (l != iter->level)
1535                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1536         else
1537                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1538
1539         bch2_btree_iter_verify(iter);
1540 }
1541
1542 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1543 {
1544         struct bpos pos = iter->k.p;
1545         bool ret = bpos_cmp(pos, POS_MAX) != 0;
1546
1547         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1548                 pos = bkey_successor(iter, pos);
1549         bch2_btree_iter_set_pos(iter, pos);
1550         return ret;
1551 }
1552
1553 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1554 {
1555         struct bpos pos = bkey_start_pos(&iter->k);
1556         bool ret = bpos_cmp(pos, POS_MIN) != 0;
1557
1558         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1559                 pos = bkey_predecessor(iter, pos);
1560         bch2_btree_iter_set_pos(iter, pos);
1561         return ret;
1562 }
1563
1564 static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
1565 {
1566         struct bpos next_pos = iter->l[0].b->key.k.p;
1567         bool ret = bpos_cmp(next_pos, POS_MAX) != 0;
1568
1569         /*
1570          * Typically, we don't want to modify iter->pos here, since that
1571          * indicates where we searched from - unless we got to the end of the
1572          * btree, in that case we want iter->pos to reflect that:
1573          */
1574         if (ret)
1575                 btree_iter_set_search_pos(iter, bpos_successor(next_pos));
1576         else
1577                 bch2_btree_iter_set_pos(iter, POS_MAX);
1578
1579         return ret;
1580 }
1581
1582 static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
1583 {
1584         struct bpos next_pos = iter->l[0].b->data->min_key;
1585         bool ret = bpos_cmp(next_pos, POS_MIN) != 0;
1586
1587         if (ret)
1588                 btree_iter_set_search_pos(iter, bpos_predecessor(next_pos));
1589         else
1590                 bch2_btree_iter_set_pos(iter, POS_MIN);
1591
1592         return ret;
1593 }
1594
1595 static struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
1596                                                enum btree_id btree_id, struct bpos pos)
1597 {
1598         struct btree_insert_entry *i;
1599
1600         trans_for_each_update2(trans, i)
1601                 if ((cmp_int(btree_id,  i->iter->btree_id) ?:
1602                      bkey_cmp(pos,      i->k->k.p)) <= 0) {
1603                         if (btree_id == i->iter->btree_id)
1604                                 return i->k;
1605                         break;
1606                 }
1607
1608         return NULL;
1609 }
1610
1611 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool with_updates)
1612 {
1613         struct bpos search_key = btree_iter_search_key(iter);
1614         struct bkey_i *next_update;
1615         struct bkey_s_c k;
1616         int ret;
1617
1618         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1619         bch2_btree_iter_verify(iter);
1620         bch2_btree_iter_verify_entry_exit(iter);
1621 start:
1622         next_update = with_updates
1623                 ? btree_trans_peek_updates(iter->trans, iter->btree_id, search_key)
1624                 : NULL;
1625         btree_iter_set_search_pos(iter, search_key);
1626
1627         while (1) {
1628                 ret = btree_iter_traverse(iter);
1629                 if (unlikely(ret))
1630                         return bkey_s_c_err(ret);
1631
1632                 k = btree_iter_level_peek(iter, &iter->l[0]);
1633
1634                 if (next_update &&
1635                     bpos_cmp(next_update->k.p, iter->real_pos) <= 0)
1636                         k = bkey_i_to_s_c(next_update);
1637
1638                 if (likely(k.k)) {
1639                         if (bkey_deleted(k.k)) {
1640                                 search_key = bkey_successor(iter, k.k->p);
1641                                 goto start;
1642                         }
1643
1644                         break;
1645                 }
1646
1647                 if (!btree_iter_set_pos_to_next_leaf(iter))
1648                         return bkey_s_c_null;
1649         }
1650
1651         /*
1652          * iter->pos should be mononotically increasing, and always be equal to
1653          * the key we just returned - except extents can straddle iter->pos:
1654          */
1655         if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
1656                 iter->pos = k.k->p;
1657         else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1658                 iter->pos = bkey_start_pos(k.k);
1659
1660         bch2_btree_iter_verify_entry_exit(iter);
1661         bch2_btree_iter_verify(iter);
1662         return k;
1663 }
1664
1665 /**
1666  * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1667  * current position
1668  */
1669 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1670 {
1671         return __btree_iter_peek(iter, false);
1672 }
1673
1674 /**
1675  * bch2_btree_iter_next: returns first key greater than iterator's current
1676  * position
1677  */
1678 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
1679 {
1680         if (!bch2_btree_iter_advance(iter))
1681                 return bkey_s_c_null;
1682
1683         return bch2_btree_iter_peek(iter);
1684 }
1685
1686 struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
1687 {
1688         return __btree_iter_peek(iter, true);
1689 }
1690
1691 struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
1692 {
1693         if (!bch2_btree_iter_advance(iter))
1694                 return bkey_s_c_null;
1695
1696         return bch2_btree_iter_peek_with_updates(iter);
1697 }
1698
1699 /**
1700  * bch2_btree_iter_peek_prev: returns first key less than or equal to
1701  * iterator's current position
1702  */
1703 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
1704 {
1705         struct btree_iter_level *l = &iter->l[0];
1706         struct bkey_s_c k;
1707         int ret;
1708
1709         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1710         bch2_btree_iter_verify(iter);
1711         bch2_btree_iter_verify_entry_exit(iter);
1712
1713         btree_iter_set_search_pos(iter, iter->pos);
1714
1715         while (1) {
1716                 ret = btree_iter_traverse(iter);
1717                 if (unlikely(ret)) {
1718                         k = bkey_s_c_err(ret);
1719                         goto no_key;
1720                 }
1721
1722                 k = btree_iter_level_peek(iter, l);
1723                 if (!k.k ||
1724                     ((iter->flags & BTREE_ITER_IS_EXTENTS)
1725                      ? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0
1726                      : bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0))
1727                         k = btree_iter_level_prev(iter, l);
1728
1729                 if (likely(k.k))
1730                         break;
1731
1732                 if (!btree_iter_set_pos_to_prev_leaf(iter)) {
1733                         k = bkey_s_c_null;
1734                         goto no_key;
1735                 }
1736         }
1737
1738         EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
1739
1740         /* Extents can straddle iter->pos: */
1741         if (bkey_cmp(k.k->p, iter->pos) < 0)
1742                 iter->pos = k.k->p;
1743 out:
1744         bch2_btree_iter_verify_entry_exit(iter);
1745         bch2_btree_iter_verify(iter);
1746         return k;
1747 no_key:
1748         /*
1749          * btree_iter_level_peek() may have set iter->k to a key we didn't want, and
1750          * then we errored going to the previous leaf - make sure it's
1751          * consistent with iter->pos:
1752          */
1753         bkey_init(&iter->k);
1754         iter->k.p = iter->pos;
1755         goto out;
1756 }
1757
1758 /**
1759  * bch2_btree_iter_prev: returns first key less than iterator's current
1760  * position
1761  */
1762 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
1763 {
1764         if (!bch2_btree_iter_rewind(iter))
1765                 return bkey_s_c_null;
1766
1767         return bch2_btree_iter_peek_prev(iter);
1768 }
1769
1770 static inline struct bkey_s_c
1771 __bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
1772 {
1773         struct bkey_s_c k;
1774         struct bpos pos, next_start;
1775
1776         /* keys & holes can't span inode numbers: */
1777         if (iter->pos.offset == KEY_OFFSET_MAX) {
1778                 if (iter->pos.inode == KEY_INODE_MAX)
1779                         return bkey_s_c_null;
1780
1781                 bch2_btree_iter_set_pos(iter, bkey_successor(iter, iter->pos));
1782         }
1783
1784         pos = iter->pos;
1785         k = bch2_btree_iter_peek(iter);
1786         iter->pos = pos;
1787
1788         if (bkey_err(k))
1789                 return k;
1790
1791         if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0)
1792                 return k;
1793
1794         next_start = k.k ? bkey_start_pos(k.k) : POS_MAX;
1795
1796         bkey_init(&iter->k);
1797         iter->k.p = iter->pos;
1798         bch2_key_resize(&iter->k,
1799                         min_t(u64, KEY_SIZE_MAX,
1800                               (next_start.inode == iter->pos.inode
1801                                ? next_start.offset
1802                                : KEY_OFFSET_MAX) -
1803                               iter->pos.offset));
1804
1805         EBUG_ON(!iter->k.size);
1806
1807         bch2_btree_iter_verify_entry_exit(iter);
1808         bch2_btree_iter_verify(iter);
1809
1810         return (struct bkey_s_c) { &iter->k, NULL };
1811 }
1812
1813 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
1814 {
1815         struct btree_iter_level *l = &iter->l[0];
1816         struct bkey_s_c k;
1817         int ret;
1818
1819         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1820         bch2_btree_iter_verify(iter);
1821         bch2_btree_iter_verify_entry_exit(iter);
1822
1823         btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
1824
1825         if (iter->flags & BTREE_ITER_IS_EXTENTS)
1826                 return __bch2_btree_iter_peek_slot_extents(iter);
1827
1828         ret = btree_iter_traverse(iter);
1829         if (unlikely(ret))
1830                 return bkey_s_c_err(ret);
1831
1832         k = btree_iter_level_peek_all(iter, l, &iter->k);
1833
1834         EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
1835
1836         if (!k.k || bkey_cmp(iter->pos, k.k->p)) {
1837                 /* hole */
1838                 bkey_init(&iter->k);
1839                 iter->k.p = iter->pos;
1840                 k = (struct bkey_s_c) { &iter->k, NULL };
1841         }
1842
1843         bch2_btree_iter_verify_entry_exit(iter);
1844         bch2_btree_iter_verify(iter);
1845         return k;
1846 }
1847
1848 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
1849 {
1850         if (!bch2_btree_iter_advance(iter))
1851                 return bkey_s_c_null;
1852
1853         return bch2_btree_iter_peek_slot(iter);
1854 }
1855
1856 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
1857 {
1858         if (!bch2_btree_iter_rewind(iter))
1859                 return bkey_s_c_null;
1860
1861         return bch2_btree_iter_peek_slot(iter);
1862 }
1863
1864 struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
1865 {
1866         struct bkey_cached *ck;
1867         int ret;
1868
1869         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED);
1870         bch2_btree_iter_verify(iter);
1871
1872         ret = btree_iter_traverse(iter);
1873         if (unlikely(ret))
1874                 return bkey_s_c_err(ret);
1875
1876         ck = (void *) iter->l[0].b;
1877
1878         EBUG_ON(iter->btree_id != ck->key.btree_id ||
1879                 bkey_cmp(iter->pos, ck->key.pos));
1880         BUG_ON(!ck->valid);
1881
1882         return bkey_i_to_s_c(ck->k);
1883 }
1884
1885 static inline void bch2_btree_iter_init(struct btree_trans *trans,
1886                         struct btree_iter *iter, enum btree_id btree_id)
1887 {
1888         struct bch_fs *c = trans->c;
1889         unsigned i;
1890
1891         iter->trans                     = trans;
1892         iter->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
1893         iter->btree_id                  = btree_id;
1894         iter->real_pos                  = POS_MIN;
1895         iter->level                     = 0;
1896         iter->min_depth                 = 0;
1897         iter->locks_want                = 0;
1898         iter->nodes_locked              = 0;
1899         iter->nodes_intent_locked       = 0;
1900         for (i = 0; i < ARRAY_SIZE(iter->l); i++)
1901                 iter->l[i].b            = BTREE_ITER_NO_NODE_INIT;
1902
1903         prefetch(c->btree_roots[btree_id].b);
1904 }
1905
1906 /* new transactional stuff: */
1907
1908 static inline void __bch2_trans_iter_free(struct btree_trans *trans,
1909                                           unsigned idx)
1910 {
1911         __bch2_btree_iter_unlock(&trans->iters[idx]);
1912         trans->iters_linked             &= ~(1ULL << idx);
1913         trans->iters_live               &= ~(1ULL << idx);
1914         trans->iters_touched            &= ~(1ULL << idx);
1915 }
1916
1917 int bch2_trans_iter_put(struct btree_trans *trans,
1918                         struct btree_iter *iter)
1919 {
1920         int ret;
1921
1922         if (IS_ERR_OR_NULL(iter))
1923                 return 0;
1924
1925         BUG_ON(trans->iters + iter->idx != iter);
1926         BUG_ON(!btree_iter_live(trans, iter));
1927
1928         ret = btree_iter_err(iter);
1929
1930         if (!(trans->iters_touched & (1ULL << iter->idx)) &&
1931             !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
1932                 __bch2_trans_iter_free(trans, iter->idx);
1933
1934         trans->iters_live       &= ~(1ULL << iter->idx);
1935         return ret;
1936 }
1937
1938 int bch2_trans_iter_free(struct btree_trans *trans,
1939                          struct btree_iter *iter)
1940 {
1941         if (IS_ERR_OR_NULL(iter))
1942                 return 0;
1943
1944         set_btree_iter_dontneed(trans, iter);
1945
1946         return bch2_trans_iter_put(trans, iter);
1947 }
1948
1949 noinline __cold
1950 static void btree_trans_iter_alloc_fail(struct btree_trans *trans)
1951 {
1952
1953         struct btree_iter *iter;
1954         struct btree_insert_entry *i;
1955         char buf[100];
1956
1957         trans_for_each_iter(trans, iter)
1958                 printk(KERN_ERR "iter: btree %s pos %s%s%s%s %pS\n",
1959                        bch2_btree_ids[iter->btree_id],
1960                        (bch2_bpos_to_text(&PBUF(buf), iter->pos), buf),
1961                        btree_iter_live(trans, iter) ? " live" : "",
1962                        (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
1963                        iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
1964                        (void *) iter->ip_allocated);
1965
1966         trans_for_each_update(trans, i) {
1967                 char buf[300];
1968
1969                 bch2_bkey_val_to_text(&PBUF(buf), trans->c, bkey_i_to_s_c(i->k));
1970                 printk(KERN_ERR "update: btree %s %s\n",
1971                        bch2_btree_ids[i->iter->btree_id], buf);
1972         }
1973         panic("trans iter oveflow\n");
1974 }
1975
1976 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
1977 {
1978         unsigned idx;
1979
1980         if (unlikely(trans->iters_linked ==
1981                      ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
1982                 btree_trans_iter_alloc_fail(trans);
1983
1984         idx = __ffs64(~trans->iters_linked);
1985
1986         trans->iters_linked     |= 1ULL << idx;
1987         trans->iters[idx].idx    = idx;
1988         trans->iters[idx].flags  = 0;
1989         return &trans->iters[idx];
1990 }
1991
1992 static inline void btree_iter_copy(struct btree_iter *dst,
1993                                    struct btree_iter *src)
1994 {
1995         unsigned i, idx = dst->idx;
1996
1997         *dst = *src;
1998         dst->idx = idx;
1999         dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2000
2001         for (i = 0; i < BTREE_MAX_DEPTH; i++)
2002                 if (btree_node_locked(dst, i))
2003                         six_lock_increment(&dst->l[i].b->c.lock,
2004                                            __btree_lock_want(dst, i));
2005
2006         dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2007         dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
2008 }
2009
2010 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
2011                                          unsigned btree_id, struct bpos pos,
2012                                          unsigned locks_want,
2013                                          unsigned depth,
2014                                          unsigned flags)
2015 {
2016         struct btree_iter *iter, *best = NULL;
2017         struct bpos real_pos, pos_min = POS_MIN;
2018
2019         if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2020             btree_node_type_is_extents(btree_id) &&
2021             !(flags & BTREE_ITER_NOT_EXTENTS) &&
2022             !(flags & BTREE_ITER_ALL_SNAPSHOTS))
2023                 flags |= BTREE_ITER_IS_EXTENTS;
2024
2025         if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2026             !btree_type_has_snapshots(btree_id))
2027                 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2028
2029         if (!(flags & BTREE_ITER_ALL_SNAPSHOTS))
2030                 pos.snapshot = btree_type_has_snapshots(btree_id)
2031                         ? U32_MAX : 0;
2032
2033         real_pos = pos;
2034
2035         if ((flags & BTREE_ITER_IS_EXTENTS) &&
2036             bkey_cmp(pos, POS_MAX))
2037                 real_pos = bpos_nosnap_successor(pos);
2038
2039         trans_for_each_iter(trans, iter) {
2040                 if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
2041                         continue;
2042
2043                 if (iter->btree_id != btree_id)
2044                         continue;
2045
2046                 if (best) {
2047                         int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos),
2048                                            bpos_diff(iter->real_pos, real_pos));
2049
2050                         if (cmp < 0 ||
2051                             ((cmp == 0 && btree_iter_keep(trans, iter))))
2052                                 continue;
2053                 }
2054
2055                 best = iter;
2056         }
2057
2058         trace_trans_get_iter(_RET_IP_, trans->ip,
2059                              btree_id,
2060                              &real_pos, locks_want,
2061                              best ? &best->real_pos : &pos_min,
2062                              best ? best->locks_want : 0,
2063                              best ? best->uptodate : BTREE_ITER_NEED_TRAVERSE);
2064
2065         if (!best) {
2066                 iter = btree_trans_iter_alloc(trans);
2067                 bch2_btree_iter_init(trans, iter, btree_id);
2068         } else if (btree_iter_keep(trans, best)) {
2069                 iter = btree_trans_iter_alloc(trans);
2070                 btree_iter_copy(iter, best);
2071         } else {
2072                 iter = best;
2073         }
2074
2075         trans->iters_live       |= 1ULL << iter->idx;
2076         trans->iters_touched    |= 1ULL << iter->idx;
2077
2078         iter->flags = flags;
2079
2080         iter->snapshot = pos.snapshot;
2081
2082         /*
2083          * If the iterator has locks_want greater than requested, we explicitly
2084          * do not downgrade it here - on transaction restart because btree node
2085          * split needs to upgrade locks, we might be putting/getting the
2086          * iterator again. Downgrading iterators only happens via an explicit
2087          * bch2_trans_downgrade().
2088          */
2089
2090         locks_want = min(locks_want, BTREE_MAX_DEPTH);
2091         if (locks_want > iter->locks_want) {
2092                 iter->locks_want = locks_want;
2093                 btree_iter_get_locks(iter, true, false);
2094         }
2095
2096         while (iter->level != depth) {
2097                 btree_node_unlock(iter, iter->level);
2098                 iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
2099                 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
2100                 if (iter->level < depth)
2101                         iter->level++;
2102                 else
2103                         iter->level--;
2104         }
2105
2106         iter->min_depth = depth;
2107
2108         bch2_btree_iter_set_pos(iter, pos);
2109         btree_iter_set_search_pos(iter, real_pos);
2110
2111         return iter;
2112 }
2113
2114 struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
2115                                             enum btree_id btree_id,
2116                                             struct bpos pos,
2117                                             unsigned locks_want,
2118                                             unsigned depth,
2119                                             unsigned flags)
2120 {
2121         struct btree_iter *iter =
2122                 __bch2_trans_get_iter(trans, btree_id, pos,
2123                                       locks_want, depth,
2124                                       BTREE_ITER_NODES|
2125                                       BTREE_ITER_NOT_EXTENTS|
2126                                       BTREE_ITER_ALL_SNAPSHOTS|
2127                                       flags);
2128
2129         BUG_ON(bkey_cmp(iter->pos, pos));
2130         BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH));
2131         BUG_ON(iter->level      != depth);
2132         BUG_ON(iter->min_depth  != depth);
2133         iter->ip_allocated = _RET_IP_;
2134
2135         return iter;
2136 }
2137
2138 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
2139                                         struct btree_iter *src)
2140 {
2141         struct btree_iter *iter;
2142
2143         iter = btree_trans_iter_alloc(trans);
2144         btree_iter_copy(iter, src);
2145
2146         trans->iters_live |= 1ULL << iter->idx;
2147         /*
2148          * We don't need to preserve this iter since it's cheap to copy it
2149          * again - this will cause trans_iter_put() to free it right away:
2150          */
2151         set_btree_iter_dontneed(trans, iter);
2152
2153         return iter;
2154 }
2155
2156 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2157 {
2158         size_t new_top = trans->mem_top + size;
2159         void *p;
2160
2161         if (new_top > trans->mem_bytes) {
2162                 size_t old_bytes = trans->mem_bytes;
2163                 size_t new_bytes = roundup_pow_of_two(new_top);
2164                 void *new_mem;
2165
2166                 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2167
2168                 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2169                 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2170                         new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2171                         new_bytes = BTREE_TRANS_MEM_MAX;
2172                         kfree(trans->mem);
2173                 }
2174
2175                 if (!new_mem)
2176                         return ERR_PTR(-ENOMEM);
2177
2178                 trans->mem = new_mem;
2179                 trans->mem_bytes = new_bytes;
2180
2181                 if (old_bytes) {
2182                         trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
2183                         return ERR_PTR(-EINTR);
2184                 }
2185         }
2186
2187         p = trans->mem + trans->mem_top;
2188         trans->mem_top += size;
2189         return p;
2190 }
2191
2192 inline void bch2_trans_unlink_iters(struct btree_trans *trans)
2193 {
2194         u64 iters = trans->iters_linked &
2195                 ~trans->iters_touched &
2196                 ~trans->iters_live;
2197
2198         while (iters) {
2199                 unsigned idx = __ffs64(iters);
2200
2201                 iters &= ~(1ULL << idx);
2202                 __bch2_trans_iter_free(trans, idx);
2203         }
2204 }
2205
2206 void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
2207 {
2208         struct btree_iter *iter;
2209
2210         trans_for_each_iter(trans, iter)
2211                 iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
2212                                  BTREE_ITER_SET_POS_AFTER_COMMIT);
2213
2214         bch2_trans_unlink_iters(trans);
2215
2216         trans->iters_touched &= trans->iters_live;
2217
2218         trans->nr_updates               = 0;
2219         trans->nr_updates2              = 0;
2220         trans->mem_top                  = 0;
2221
2222         trans->hooks                    = NULL;
2223         trans->extra_journal_entries    = NULL;
2224         trans->extra_journal_entry_u64s = 0;
2225
2226         if (trans->fs_usage_deltas) {
2227                 trans->fs_usage_deltas->used = 0;
2228                 memset(&trans->fs_usage_deltas->memset_start, 0,
2229                        (void *) &trans->fs_usage_deltas->memset_end -
2230                        (void *) &trans->fs_usage_deltas->memset_start);
2231         }
2232
2233         if (!(flags & TRANS_RESET_NOUNLOCK))
2234                 bch2_trans_cond_resched(trans);
2235
2236         if (!(flags & TRANS_RESET_NOTRAVERSE) &&
2237             trans->iters_linked)
2238                 bch2_btree_iter_traverse_all(trans);
2239 }
2240
2241 static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
2242 {
2243         size_t iters_bytes      = sizeof(struct btree_iter) * BTREE_ITER_MAX;
2244         size_t updates_bytes    = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2245         void *p = NULL;
2246
2247         BUG_ON(trans->used_mempool);
2248
2249 #ifdef __KERNEL__
2250         p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL);
2251 #endif
2252         if (!p)
2253                 p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
2254
2255         trans->iters            = p; p += iters_bytes;
2256         trans->updates          = p; p += updates_bytes;
2257         trans->updates2         = p; p += updates_bytes;
2258 }
2259
2260 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2261                      unsigned expected_nr_iters,
2262                      size_t expected_mem_bytes)
2263         __acquires(&c->btree_trans_barrier)
2264 {
2265         memset(trans, 0, sizeof(*trans));
2266         trans->c                = c;
2267         trans->ip               = _RET_IP_;
2268
2269         /*
2270          * reallocating iterators currently completely breaks
2271          * bch2_trans_iter_put(), we always allocate the max:
2272          */
2273         bch2_trans_alloc_iters(trans, c);
2274
2275         if (expected_mem_bytes) {
2276                 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2277                 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2278
2279                 if (!unlikely(trans->mem)) {
2280                         trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2281                         trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2282                 }
2283         }
2284
2285         trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2286
2287 #ifdef CONFIG_BCACHEFS_DEBUG
2288         trans->pid = current->pid;
2289         mutex_lock(&c->btree_trans_lock);
2290         list_add(&trans->list, &c->btree_trans_list);
2291         mutex_unlock(&c->btree_trans_lock);
2292 #endif
2293 }
2294
2295 int bch2_trans_exit(struct btree_trans *trans)
2296         __releases(&c->btree_trans_barrier)
2297 {
2298         struct bch_fs *c = trans->c;
2299
2300         bch2_trans_unlock(trans);
2301
2302 #ifdef CONFIG_BCACHEFS_DEBUG
2303         if (trans->iters_live) {
2304                 struct btree_iter *iter;
2305
2306                 bch_err(c, "btree iterators leaked!");
2307                 trans_for_each_iter(trans, iter)
2308                         if (btree_iter_live(trans, iter))
2309                                 printk(KERN_ERR "  btree %s allocated at %pS\n",
2310                                        bch2_btree_ids[iter->btree_id],
2311                                        (void *) iter->ip_allocated);
2312                 /* Be noisy about this: */
2313                 bch2_fatal_error(c);
2314         }
2315
2316         mutex_lock(&trans->c->btree_trans_lock);
2317         list_del(&trans->list);
2318         mutex_unlock(&trans->c->btree_trans_lock);
2319 #endif
2320
2321         srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2322
2323         bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
2324
2325         if (trans->fs_usage_deltas) {
2326                 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2327                     REPLICAS_DELTA_LIST_MAX)
2328                         mempool_free(trans->fs_usage_deltas,
2329                                      &trans->c->replicas_delta_pool);
2330                 else
2331                         kfree(trans->fs_usage_deltas);
2332         }
2333
2334         if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2335                 mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
2336         else
2337                 kfree(trans->mem);
2338
2339 #ifdef __KERNEL__
2340         /*
2341          * Userspace doesn't have a real percpu implementation:
2342          */
2343         trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
2344 #endif
2345
2346         if (trans->iters)
2347                 mempool_free(trans->iters, &trans->c->btree_iters_pool);
2348
2349         trans->mem      = (void *) 0x1;
2350         trans->iters    = (void *) 0x1;
2351
2352         return trans->error ? -EIO : 0;
2353 }
2354
2355 static void __maybe_unused
2356 bch2_btree_iter_node_to_text(struct printbuf *out,
2357                              struct btree_bkey_cached_common *_b,
2358                              enum btree_iter_type type)
2359 {
2360         pr_buf(out, "    l=%u %s:",
2361                _b->level, bch2_btree_ids[_b->btree_id]);
2362         bch2_bpos_to_text(out, btree_node_pos(_b, type));
2363 }
2364
2365 #ifdef CONFIG_BCACHEFS_DEBUG
2366 static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
2367 {
2368         struct btree_iter *iter;
2369
2370         trans_for_each_iter(trans, iter)
2371                 if (btree_iter_type(iter) != BTREE_ITER_CACHED &&
2372                     iter->nodes_locked)
2373                         return true;
2374         return false;
2375 }
2376 #endif
2377
2378 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2379 {
2380 #ifdef CONFIG_BCACHEFS_DEBUG
2381         struct btree_trans *trans;
2382         struct btree_iter *iter;
2383         struct btree *b;
2384         unsigned l;
2385
2386         mutex_lock(&c->btree_trans_lock);
2387         list_for_each_entry(trans, &c->btree_trans_list, list) {
2388                 if (!trans_has_btree_nodes_locked(trans))
2389                         continue;
2390
2391                 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2392
2393                 trans_for_each_iter(trans, iter) {
2394                         if (!iter->nodes_locked)
2395                                 continue;
2396
2397                         pr_buf(out, "  iter %u %c %s:",
2398                                iter->idx,
2399                                btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2400                                bch2_btree_ids[iter->btree_id]);
2401                         bch2_bpos_to_text(out, iter->pos);
2402                         pr_buf(out, "\n");
2403
2404                         for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2405                                 if (btree_node_locked(iter, l)) {
2406                                         pr_buf(out, "    %s l=%u ",
2407                                                btree_node_intent_locked(iter, l) ? "i" : "r", l);
2408                                         bch2_btree_iter_node_to_text(out,
2409                                                         (void *) iter->l[l].b,
2410                                                         btree_iter_type(iter));
2411                                         pr_buf(out, "\n");
2412                                 }
2413                         }
2414                 }
2415
2416                 b = READ_ONCE(trans->locking);
2417                 if (b) {
2418                         iter = &trans->iters[trans->locking_iter_idx];
2419                         pr_buf(out, "  locking iter %u %c l=%u %s:",
2420                                trans->locking_iter_idx,
2421                                btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2422                                trans->locking_level,
2423                                bch2_btree_ids[trans->locking_btree_id]);
2424                         bch2_bpos_to_text(out, trans->locking_pos);
2425
2426                         pr_buf(out, " node ");
2427                         bch2_btree_iter_node_to_text(out,
2428                                         (void *) b,
2429                                         btree_iter_type(iter));
2430                         pr_buf(out, "\n");
2431                 }
2432         }
2433         mutex_unlock(&c->btree_trans_lock);
2434 #endif
2435 }
2436
2437 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2438 {
2439         mempool_exit(&c->btree_trans_mem_pool);
2440         mempool_exit(&c->btree_iters_pool);
2441         cleanup_srcu_struct(&c->btree_trans_barrier);
2442 }
2443
2444 int bch2_fs_btree_iter_init(struct bch_fs *c)
2445 {
2446         unsigned nr = BTREE_ITER_MAX;
2447
2448         INIT_LIST_HEAD(&c->btree_trans_list);
2449         mutex_init(&c->btree_trans_lock);
2450
2451         return  init_srcu_struct(&c->btree_trans_barrier) ?:
2452                 mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
2453                         sizeof(struct btree_iter) * nr +
2454                         sizeof(struct btree_insert_entry) * nr +
2455                         sizeof(struct btree_insert_entry) * nr) ?:
2456                 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
2457                                           BTREE_TRANS_MEM_MAX);
2458 }