]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.c
Update bcachefs sources to e3a7cee503 bcachefs: Don't mark superblocks past end of...
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "debug.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "journal.h"
15 #include "replicas.h"
16
17 #include <linux/prefetch.h>
18 #include <trace/events/bcachefs.h>
19
20 static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
21
22 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
23 {
24         EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
25
26         /* Are we iterating over keys in all snapshots? */
27         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
28                 p = bpos_successor(p);
29         } else {
30                 p = bpos_nosnap_successor(p);
31                 p.snapshot = iter->snapshot;
32         }
33
34         return p;
35 }
36
37 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
38 {
39         EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
40
41         /* Are we iterating over keys in all snapshots? */
42         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
43                 p = bpos_predecessor(p);
44         } else {
45                 p = bpos_nosnap_predecessor(p);
46                 p.snapshot = iter->snapshot;
47         }
48
49         return p;
50 }
51
52 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
53 {
54         return l < BTREE_MAX_DEPTH &&
55                 (unsigned long) iter->l[l].b >= 128;
56 }
57
58 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
59 {
60         struct bpos pos = iter->pos;
61
62         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
63             bkey_cmp(pos, POS_MAX))
64                 pos = bkey_successor(iter, pos);
65         return pos;
66 }
67
68 static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
69                                               struct btree *b)
70 {
71         return bpos_cmp(iter->real_pos, b->data->min_key) < 0;
72 }
73
74 static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
75                                              struct btree *b)
76 {
77         return bpos_cmp(b->key.k.p, iter->real_pos) < 0;
78 }
79
80 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
81                                           struct btree *b)
82 {
83         return iter->btree_id == b->c.btree_id &&
84                 !btree_iter_pos_before_node(iter, b) &&
85                 !btree_iter_pos_after_node(iter, b);
86 }
87
88 /* Btree node locking: */
89
90 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
91 {
92         bch2_btree_node_unlock_write_inlined(b, iter);
93 }
94
95 void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
96 {
97         struct btree_iter *linked;
98         unsigned readers = 0;
99
100         EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
101
102         trans_for_each_iter(iter->trans, linked)
103                 if (linked->l[b->c.level].b == b &&
104                     btree_node_read_locked(linked, b->c.level))
105                         readers++;
106
107         /*
108          * Must drop our read locks before calling six_lock_write() -
109          * six_unlock() won't do wakeups until the reader count
110          * goes to 0, and it's safe because we have the node intent
111          * locked:
112          */
113         atomic64_sub(__SIX_VAL(read_lock, readers),
114                      &b->c.lock.state.counter);
115         btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
116         atomic64_add(__SIX_VAL(read_lock, readers),
117                      &b->c.lock.state.counter);
118 }
119
120 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
121 {
122         struct btree *b = btree_iter_node(iter, level);
123         int want = __btree_lock_want(iter, level);
124
125         if (!is_btree_node(iter, level))
126                 return false;
127
128         if (race_fault())
129                 return false;
130
131         if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
132             (btree_node_lock_seq_matches(iter, b, level) &&
133              btree_node_lock_increment(iter->trans, b, level, want))) {
134                 mark_btree_node_locked(iter, level, want);
135                 return true;
136         } else {
137                 return false;
138         }
139 }
140
141 static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
142 {
143         struct btree *b = iter->l[level].b;
144
145         EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
146
147         if (!is_btree_node(iter, level))
148                 return false;
149
150         if (btree_node_intent_locked(iter, level))
151                 return true;
152
153         if (race_fault())
154                 return false;
155
156         if (btree_node_locked(iter, level)
157             ? six_lock_tryupgrade(&b->c.lock)
158             : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
159                 goto success;
160
161         if (btree_node_lock_seq_matches(iter, b, level) &&
162             btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
163                 btree_node_unlock(iter, level);
164                 goto success;
165         }
166
167         return false;
168 success:
169         mark_btree_node_intent_locked(iter, level);
170         return true;
171 }
172
173 static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
174                                         unsigned long trace_ip)
175 {
176         unsigned l = iter->level;
177         int fail_idx = -1;
178
179         do {
180                 if (!btree_iter_node(iter, l))
181                         break;
182
183                 if (!(upgrade
184                       ? bch2_btree_node_upgrade(iter, l)
185                       : bch2_btree_node_relock(iter, l))) {
186                         (upgrade
187                          ? trace_node_upgrade_fail
188                          : trace_node_relock_fail)(iter->trans->ip, trace_ip,
189                                         iter->btree_id, &iter->real_pos,
190                                         l, iter->l[l].lock_seq,
191                                         is_btree_node(iter, l)
192                                         ? 0
193                                         : (unsigned long) iter->l[l].b,
194                                         is_btree_node(iter, l)
195                                         ? iter->l[l].b->c.lock.state.seq
196                                         : 0);
197
198                         fail_idx = l;
199                         btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
200                 }
201
202                 l++;
203         } while (l < iter->locks_want);
204
205         /*
206          * When we fail to get a lock, we have to ensure that any child nodes
207          * can't be relocked so bch2_btree_iter_traverse has to walk back up to
208          * the node that we failed to relock:
209          */
210         while (fail_idx >= 0) {
211                 btree_node_unlock(iter, fail_idx);
212                 iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
213                 --fail_idx;
214         }
215
216         if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
217                 iter->uptodate = BTREE_ITER_NEED_PEEK;
218
219         bch2_btree_trans_verify_locks(iter->trans);
220
221         return iter->uptodate < BTREE_ITER_NEED_RELOCK;
222 }
223
224 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
225                                   enum btree_iter_type type)
226 {
227         return  type != BTREE_ITER_CACHED
228                 ? container_of(_b, struct btree, c)->key.k.p
229                 : container_of(_b, struct bkey_cached, c)->key.pos;
230 }
231
232 /* Slowpath: */
233 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
234                             unsigned level, struct btree_iter *iter,
235                             enum six_lock_type type,
236                             six_lock_should_sleep_fn should_sleep_fn, void *p,
237                             unsigned long ip)
238 {
239         struct btree_trans *trans = iter->trans;
240         struct btree_iter *linked, *deadlock_iter = NULL;
241         u64 start_time = local_clock();
242         unsigned reason = 9;
243         bool ret;
244
245         /* Check if it's safe to block: */
246         trans_for_each_iter(trans, linked) {
247                 if (!linked->nodes_locked)
248                         continue;
249
250                 /*
251                  * Can't block taking an intent lock if we have _any_ nodes read
252                  * locked:
253                  *
254                  * - Our read lock blocks another thread with an intent lock on
255                  *   the same node from getting a write lock, and thus from
256                  *   dropping its intent lock
257                  *
258                  * - And the other thread may have multiple nodes intent locked:
259                  *   both the node we want to intent lock, and the node we
260                  *   already have read locked - deadlock:
261                  */
262                 if (type == SIX_LOCK_intent &&
263                     linked->nodes_locked != linked->nodes_intent_locked) {
264                         deadlock_iter = linked;
265                         reason = 1;
266                 }
267
268                 if (linked->btree_id != iter->btree_id) {
269                         if (linked->btree_id > iter->btree_id) {
270                                 deadlock_iter = linked;
271                                 reason = 3;
272                         }
273                         continue;
274                 }
275
276                 /*
277                  * Within the same btree, cached iterators come before non
278                  * cached iterators:
279                  */
280                 if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) {
281                         if (btree_iter_is_cached(iter)) {
282                                 deadlock_iter = linked;
283                                 reason = 4;
284                         }
285                         continue;
286                 }
287
288                 /*
289                  * Interior nodes must be locked before their descendants: if
290                  * another iterator has possible descendants locked of the node
291                  * we're about to lock, it must have the ancestors locked too:
292                  */
293                 if (level > __fls(linked->nodes_locked)) {
294                         deadlock_iter = linked;
295                         reason = 5;
296                 }
297
298                 /* Must lock btree nodes in key order: */
299                 if (btree_node_locked(linked, level) &&
300                     bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
301                                                  btree_iter_type(linked))) <= 0) {
302                         deadlock_iter = linked;
303                         reason = 7;
304                         BUG_ON(trans->in_traverse_all);
305                 }
306         }
307
308         if (unlikely(deadlock_iter)) {
309                 trace_trans_restart_would_deadlock(iter->trans->ip, ip,
310                                 trans->in_traverse_all, reason,
311                                 deadlock_iter->btree_id,
312                                 btree_iter_type(deadlock_iter),
313                                 &deadlock_iter->real_pos,
314                                 iter->btree_id,
315                                 btree_iter_type(iter),
316                                 &pos);
317                 return false;
318         }
319
320         if (six_trylock_type(&b->c.lock, type))
321                 return true;
322
323 #ifdef CONFIG_BCACHEFS_DEBUG
324         trans->locking_iter_idx = iter->idx;
325         trans->locking_pos      = pos;
326         trans->locking_btree_id = iter->btree_id;
327         trans->locking_level    = level;
328         trans->locking          = b;
329 #endif
330
331         ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
332
333 #ifdef CONFIG_BCACHEFS_DEBUG
334         trans->locking = NULL;
335 #endif
336         if (ret)
337                 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
338                                        start_time);
339         return ret;
340 }
341
342 /* Btree iterator locking: */
343
344 #ifdef CONFIG_BCACHEFS_DEBUG
345 static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
346 {
347         unsigned l;
348
349         if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
350                 BUG_ON(iter->nodes_locked);
351                 return;
352         }
353
354         for (l = 0; is_btree_node(iter, l); l++) {
355                 if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
356                     !btree_node_locked(iter, l))
357                         continue;
358
359                 BUG_ON(btree_lock_want(iter, l) !=
360                        btree_node_locked_type(iter, l));
361         }
362 }
363
364 void bch2_btree_trans_verify_locks(struct btree_trans *trans)
365 {
366         struct btree_iter *iter;
367
368         trans_for_each_iter(trans, iter)
369                 bch2_btree_iter_verify_locks(iter);
370 }
371 #else
372 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
373 #endif
374
375 __flatten
376 static bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
377 {
378         return btree_iter_get_locks(iter, false, trace_ip);
379 }
380
381 bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
382                                unsigned new_locks_want)
383 {
384         struct btree_iter *linked;
385
386         EBUG_ON(iter->locks_want >= new_locks_want);
387
388         iter->locks_want = new_locks_want;
389
390         if (btree_iter_get_locks(iter, true, _THIS_IP_))
391                 return true;
392
393         /*
394          * XXX: this is ugly - we'd prefer to not be mucking with other
395          * iterators in the btree_trans here.
396          *
397          * On failure to upgrade the iterator, setting iter->locks_want and
398          * calling get_locks() is sufficient to make bch2_btree_iter_traverse()
399          * get the locks we want on transaction restart.
400          *
401          * But if this iterator was a clone, on transaction restart what we did
402          * to this iterator isn't going to be preserved.
403          *
404          * Possibly we could add an iterator field for the parent iterator when
405          * an iterator is a copy - for now, we'll just upgrade any other
406          * iterators with the same btree id.
407          *
408          * The code below used to be needed to ensure ancestor nodes get locked
409          * before interior nodes - now that's handled by
410          * bch2_btree_iter_traverse_all().
411          */
412         trans_for_each_iter(iter->trans, linked)
413                 if (linked != iter &&
414                     btree_iter_type(linked) == btree_iter_type(iter) &&
415                     linked->btree_id == iter->btree_id &&
416                     linked->locks_want < new_locks_want) {
417                         linked->locks_want = new_locks_want;
418                         btree_iter_get_locks(linked, true, _THIS_IP_);
419                 }
420
421         return false;
422 }
423
424 void __bch2_btree_iter_downgrade(struct btree_iter *iter,
425                                  unsigned new_locks_want)
426 {
427         unsigned l;
428
429         EBUG_ON(iter->locks_want < new_locks_want);
430
431         iter->locks_want = new_locks_want;
432
433         while (iter->nodes_locked &&
434                (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
435                 if (l > iter->level) {
436                         btree_node_unlock(iter, l);
437                 } else {
438                         if (btree_node_intent_locked(iter, l)) {
439                                 six_lock_downgrade(&iter->l[l].b->c.lock);
440                                 iter->nodes_intent_locked ^= 1 << l;
441                         }
442                         break;
443                 }
444         }
445
446         bch2_btree_trans_verify_locks(iter->trans);
447 }
448
449 void bch2_trans_downgrade(struct btree_trans *trans)
450 {
451         struct btree_iter *iter;
452
453         trans_for_each_iter(trans, iter)
454                 bch2_btree_iter_downgrade(iter);
455 }
456
457 /* Btree transaction locking: */
458
459 static inline bool btree_iter_should_be_locked(struct btree_trans *trans,
460                                                struct btree_iter *iter)
461 {
462         return (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) ||
463                 iter->should_be_locked;
464 }
465
466 bool bch2_trans_relock(struct btree_trans *trans)
467 {
468         struct btree_iter *iter;
469
470         trans_for_each_iter(trans, iter)
471                 if (!bch2_btree_iter_relock(iter, _RET_IP_) &&
472                     btree_iter_should_be_locked(trans, iter)) {
473                         trace_trans_restart_relock(trans->ip, _RET_IP_,
474                                         iter->btree_id, &iter->real_pos);
475                         return false;
476                 }
477         return true;
478 }
479
480 void bch2_trans_unlock(struct btree_trans *trans)
481 {
482         struct btree_iter *iter;
483
484         trans_for_each_iter(trans, iter)
485                 __bch2_btree_iter_unlock(iter);
486 }
487
488 /* Btree iterator: */
489
490 #ifdef CONFIG_BCACHEFS_DEBUG
491
492 static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
493 {
494         struct bkey_cached *ck;
495         bool locked = btree_node_locked(iter, 0);
496
497         if (!bch2_btree_node_relock(iter, 0))
498                 return;
499
500         ck = (void *) iter->l[0].b;
501         BUG_ON(ck->key.btree_id != iter->btree_id ||
502                bkey_cmp(ck->key.pos, iter->pos));
503
504         if (!locked)
505                 btree_node_unlock(iter, 0);
506 }
507
508 static void bch2_btree_iter_verify_level(struct btree_iter *iter,
509                                          unsigned level)
510 {
511         struct btree_iter_level *l;
512         struct btree_node_iter tmp;
513         bool locked;
514         struct bkey_packed *p, *k;
515         char buf1[100], buf2[100], buf3[100];
516         const char *msg;
517
518         if (!bch2_debug_check_iterators)
519                 return;
520
521         l       = &iter->l[level];
522         tmp     = l->iter;
523         locked  = btree_node_locked(iter, level);
524
525         if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
526                 if (!level)
527                         bch2_btree_iter_verify_cached(iter);
528                 return;
529         }
530
531         BUG_ON(iter->level < iter->min_depth);
532
533         if (!btree_iter_node(iter, level))
534                 return;
535
536         if (!bch2_btree_node_relock(iter, level))
537                 return;
538
539         BUG_ON(!btree_iter_pos_in_node(iter, l->b));
540
541         /*
542          * node iterators don't use leaf node iterator:
543          */
544         if (btree_iter_type(iter) == BTREE_ITER_NODES &&
545             level <= iter->min_depth)
546                 goto unlock;
547
548         bch2_btree_node_iter_verify(&l->iter, l->b);
549
550         /*
551          * For interior nodes, the iterator will have skipped past
552          * deleted keys:
553          *
554          * For extents, the iterator may have skipped past deleted keys (but not
555          * whiteouts)
556          */
557         p = level || btree_node_type_is_extents(iter->btree_id)
558                 ? bch2_btree_node_iter_prev(&tmp, l->b)
559                 : bch2_btree_node_iter_prev_all(&tmp, l->b);
560         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
561
562         if (p && bkey_iter_pos_cmp(l->b, p, &iter->real_pos) >= 0) {
563                 msg = "before";
564                 goto err;
565         }
566
567         if (k && bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
568                 msg = "after";
569                 goto err;
570         }
571 unlock:
572         if (!locked)
573                 btree_node_unlock(iter, level);
574         return;
575 err:
576         strcpy(buf2, "(none)");
577         strcpy(buf3, "(none)");
578
579         bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
580
581         if (p) {
582                 struct bkey uk = bkey_unpack_key(l->b, p);
583                 bch2_bkey_to_text(&PBUF(buf2), &uk);
584         }
585
586         if (k) {
587                 struct bkey uk = bkey_unpack_key(l->b, k);
588                 bch2_bkey_to_text(&PBUF(buf3), &uk);
589         }
590
591         panic("iterator should be %s key at level %u:\n"
592               "iter pos %s\n"
593               "prev key %s\n"
594               "cur  key %s\n",
595               msg, level, buf1, buf2, buf3);
596 }
597
598 static void bch2_btree_iter_verify(struct btree_iter *iter)
599 {
600         enum btree_iter_type type = btree_iter_type(iter);
601         unsigned i;
602
603         EBUG_ON(iter->btree_id >= BTREE_ID_NR);
604
605         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
606                iter->pos.snapshot != iter->snapshot);
607
608         BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
609                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
610
611         BUG_ON(type == BTREE_ITER_NODES &&
612                !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
613
614         BUG_ON(type != BTREE_ITER_NODES &&
615                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
616                !btree_type_has_snapshots(iter->btree_id));
617
618         bch2_btree_iter_verify_locks(iter);
619
620         for (i = 0; i < BTREE_MAX_DEPTH; i++)
621                 bch2_btree_iter_verify_level(iter, i);
622 }
623
624 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
625 {
626         enum btree_iter_type type = btree_iter_type(iter);
627
628         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
629                iter->pos.snapshot != iter->snapshot);
630
631         BUG_ON((type == BTREE_ITER_KEYS ||
632                 type == BTREE_ITER_CACHED) &&
633                (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
634                 bkey_cmp(iter->pos, iter->k.p) > 0));
635 }
636
637 void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
638 {
639         struct btree_iter *iter;
640
641         if (!bch2_debug_check_iterators)
642                 return;
643
644         trans_for_each_iter_with_node(trans, b, iter)
645                 bch2_btree_iter_verify_level(iter, b->c.level);
646 }
647
648 #else
649
650 static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
651 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
652 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
653
654 #endif
655
656 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
657                                         struct btree *b,
658                                         struct bset_tree *t,
659                                         struct bkey_packed *k)
660 {
661         struct btree_node_iter_set *set;
662
663         btree_node_iter_for_each(iter, set)
664                 if (set->end == t->end_offset) {
665                         set->k = __btree_node_key_to_offset(b, k);
666                         bch2_btree_node_iter_sort(iter, b);
667                         return;
668                 }
669
670         bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
671 }
672
673 static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
674                                                struct btree *b,
675                                                struct bkey_packed *where)
676 {
677         struct btree_iter_level *l = &iter->l[b->c.level];
678
679         if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
680                 return;
681
682         if (bkey_iter_pos_cmp(l->b, where, &iter->real_pos) < 0)
683                 bch2_btree_node_iter_advance(&l->iter, l->b);
684
685         btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
686 }
687
688 void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
689                                       struct btree *b,
690                                       struct bkey_packed *where)
691 {
692         struct btree_iter *linked;
693
694         trans_for_each_iter_with_node(iter->trans, b, linked) {
695                 __bch2_btree_iter_fix_key_modified(linked, b, where);
696                 bch2_btree_iter_verify_level(linked, b->c.level);
697         }
698 }
699
700 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
701                                       struct btree *b,
702                                       struct btree_node_iter *node_iter,
703                                       struct bset_tree *t,
704                                       struct bkey_packed *where,
705                                       unsigned clobber_u64s,
706                                       unsigned new_u64s)
707 {
708         const struct bkey_packed *end = btree_bkey_last(b, t);
709         struct btree_node_iter_set *set;
710         unsigned offset = __btree_node_key_to_offset(b, where);
711         int shift = new_u64s - clobber_u64s;
712         unsigned old_end = t->end_offset - shift;
713         unsigned orig_iter_pos = node_iter->data[0].k;
714         bool iter_current_key_modified =
715                 orig_iter_pos >= offset &&
716                 orig_iter_pos <= offset + clobber_u64s;
717
718         btree_node_iter_for_each(node_iter, set)
719                 if (set->end == old_end)
720                         goto found;
721
722         /* didn't find the bset in the iterator - might have to readd it: */
723         if (new_u64s &&
724             bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
725                 bch2_btree_node_iter_push(node_iter, b, where, end);
726                 goto fixup_done;
727         } else {
728                 /* Iterator is after key that changed */
729                 return;
730         }
731 found:
732         set->end = t->end_offset;
733
734         /* Iterator hasn't gotten to the key that changed yet: */
735         if (set->k < offset)
736                 return;
737
738         if (new_u64s &&
739             bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
740                 set->k = offset;
741         } else if (set->k < offset + clobber_u64s) {
742                 set->k = offset + new_u64s;
743                 if (set->k == set->end)
744                         bch2_btree_node_iter_set_drop(node_iter, set);
745         } else {
746                 /* Iterator is after key that changed */
747                 set->k = (int) set->k + shift;
748                 return;
749         }
750
751         bch2_btree_node_iter_sort(node_iter, b);
752 fixup_done:
753         if (node_iter->data[0].k != orig_iter_pos)
754                 iter_current_key_modified = true;
755
756         /*
757          * When a new key is added, and the node iterator now points to that
758          * key, the iterator might have skipped past deleted keys that should
759          * come after the key the iterator now points to. We have to rewind to
760          * before those deleted keys - otherwise
761          * bch2_btree_node_iter_prev_all() breaks:
762          */
763         if (!bch2_btree_node_iter_end(node_iter) &&
764             iter_current_key_modified &&
765             (b->c.level ||
766              btree_node_type_is_extents(iter->btree_id))) {
767                 struct bset_tree *t;
768                 struct bkey_packed *k, *k2, *p;
769
770                 k = bch2_btree_node_iter_peek_all(node_iter, b);
771
772                 for_each_bset(b, t) {
773                         bool set_pos = false;
774
775                         if (node_iter->data[0].end == t->end_offset)
776                                 continue;
777
778                         k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
779
780                         while ((p = bch2_bkey_prev_all(b, t, k2)) &&
781                                bkey_iter_cmp(b, k, p) < 0) {
782                                 k2 = p;
783                                 set_pos = true;
784                         }
785
786                         if (set_pos)
787                                 btree_node_iter_set_set_pos(node_iter,
788                                                             b, t, k2);
789                 }
790         }
791
792         if (!b->c.level &&
793             node_iter == &iter->l[0].iter &&
794             iter_current_key_modified)
795                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
796 }
797
798 void bch2_btree_node_iter_fix(struct btree_iter *iter,
799                               struct btree *b,
800                               struct btree_node_iter *node_iter,
801                               struct bkey_packed *where,
802                               unsigned clobber_u64s,
803                               unsigned new_u64s)
804 {
805         struct bset_tree *t = bch2_bkey_to_bset(b, where);
806         struct btree_iter *linked;
807
808         if (node_iter != &iter->l[b->c.level].iter) {
809                 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
810                                            where, clobber_u64s, new_u64s);
811
812                 if (bch2_debug_check_iterators)
813                         bch2_btree_node_iter_verify(node_iter, b);
814         }
815
816         trans_for_each_iter_with_node(iter->trans, b, linked) {
817                 __bch2_btree_node_iter_fix(linked, b,
818                                            &linked->l[b->c.level].iter, t,
819                                            where, clobber_u64s, new_u64s);
820                 bch2_btree_iter_verify_level(linked, b->c.level);
821         }
822 }
823
824 static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
825                                                   struct btree_iter_level *l,
826                                                   struct bkey *u,
827                                                   struct bkey_packed *k)
828 {
829         struct bkey_s_c ret;
830
831         if (unlikely(!k)) {
832                 /*
833                  * signal to bch2_btree_iter_peek_slot() that we're currently at
834                  * a hole
835                  */
836                 u->type = KEY_TYPE_deleted;
837                 return bkey_s_c_null;
838         }
839
840         ret = bkey_disassemble(l->b, k, u);
841
842         /*
843          * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
844          * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
845          * being overwritten but doesn't change k->size. But this is ok, because
846          * those keys are never written out, we just have to avoid a spurious
847          * assertion here:
848          */
849         if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
850                 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
851
852         return ret;
853 }
854
855 /* peek_all() doesn't skip deleted keys */
856 static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter,
857                                                         struct btree_iter_level *l,
858                                                         struct bkey *u)
859 {
860         return __btree_iter_unpack(iter, l, u,
861                         bch2_btree_node_iter_peek_all(&l->iter, l->b));
862 }
863
864 static inline struct bkey_s_c btree_iter_level_peek(struct btree_iter *iter,
865                                                     struct btree_iter_level *l)
866 {
867         struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
868                         bch2_btree_node_iter_peek(&l->iter, l->b));
869
870         iter->real_pos = k.k ? k.k->p : l->b->key.k.p;
871         return k;
872 }
873
874 static inline struct bkey_s_c btree_iter_level_prev(struct btree_iter *iter,
875                                                     struct btree_iter_level *l)
876 {
877         struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
878                         bch2_btree_node_iter_prev(&l->iter, l->b));
879
880         iter->real_pos = k.k ? k.k->p : l->b->data->min_key;
881         return k;
882 }
883
884 static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
885                                              struct btree_iter_level *l,
886                                              int max_advance)
887 {
888         struct bkey_packed *k;
889         int nr_advanced = 0;
890
891         while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
892                bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
893                 if (max_advance > 0 && nr_advanced >= max_advance)
894                         return false;
895
896                 bch2_btree_node_iter_advance(&l->iter, l->b);
897                 nr_advanced++;
898         }
899
900         return true;
901 }
902
903 /*
904  * Verify that iterator for parent node points to child node:
905  */
906 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
907 {
908         struct btree_iter_level *l;
909         unsigned plevel;
910         bool parent_locked;
911         struct bkey_packed *k;
912
913         if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
914                 return;
915
916         plevel = b->c.level + 1;
917         if (!btree_iter_node(iter, plevel))
918                 return;
919
920         parent_locked = btree_node_locked(iter, plevel);
921
922         if (!bch2_btree_node_relock(iter, plevel))
923                 return;
924
925         l = &iter->l[plevel];
926         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
927         if (!k ||
928             bkey_deleted(k) ||
929             bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
930                 char buf1[100];
931                 char buf2[100];
932                 char buf3[100];
933                 char buf4[100];
934                 struct bkey uk = bkey_unpack_key(b, k);
935
936                 bch2_dump_btree_node(iter->trans->c, l->b);
937                 bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
938                 bch2_bkey_to_text(&PBUF(buf2), &uk);
939                 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
940                 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
941                 panic("parent iter doesn't point to new node:\n"
942                       "iter pos %s %s\n"
943                       "iter key %s\n"
944                       "new node %s-%s\n",
945                       bch2_btree_ids[iter->btree_id], buf1,
946                       buf2, buf3, buf4);
947         }
948
949         if (!parent_locked)
950                 btree_node_unlock(iter, b->c.level + 1);
951 }
952
953 static inline void __btree_iter_init(struct btree_iter *iter,
954                                      unsigned level)
955 {
956         struct btree_iter_level *l = &iter->l[level];
957
958         bch2_btree_node_iter_init(&l->iter, l->b, &iter->real_pos);
959
960         /*
961          * Iterators to interior nodes should always be pointed at the first non
962          * whiteout:
963          */
964         if (level)
965                 bch2_btree_node_iter_peek(&l->iter, l->b);
966
967         btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
968 }
969
970 static inline void btree_iter_node_set(struct btree_iter *iter,
971                                        struct btree *b)
972 {
973         BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
974
975         btree_iter_verify_new_node(iter, b);
976
977         EBUG_ON(!btree_iter_pos_in_node(iter, b));
978         EBUG_ON(b->c.lock.state.seq & 1);
979
980         iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
981         iter->l[b->c.level].b = b;
982         __btree_iter_init(iter, b->c.level);
983 }
984
985 /*
986  * A btree node is being replaced - update the iterator to point to the new
987  * node:
988  */
989 void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
990 {
991         enum btree_node_locked_type t;
992         struct btree_iter *linked;
993
994         trans_for_each_iter(iter->trans, linked)
995                 if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
996                     btree_iter_pos_in_node(linked, b)) {
997                         /*
998                          * bch2_btree_iter_node_drop() has already been called -
999                          * the old node we're replacing has already been
1000                          * unlocked and the pointer invalidated
1001                          */
1002                         BUG_ON(btree_node_locked(linked, b->c.level));
1003
1004                         t = btree_lock_want(linked, b->c.level);
1005                         if (t != BTREE_NODE_UNLOCKED) {
1006                                 six_lock_increment(&b->c.lock, t);
1007                                 mark_btree_node_locked(linked, b->c.level, t);
1008                         }
1009
1010                         btree_iter_node_set(linked, b);
1011                 }
1012 }
1013
1014 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
1015 {
1016         struct btree_iter *linked;
1017         unsigned level = b->c.level;
1018
1019         trans_for_each_iter(iter->trans, linked)
1020                 if (linked->l[level].b == b) {
1021                         btree_node_unlock(linked, level);
1022                         linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
1023                 }
1024 }
1025
1026 /*
1027  * A btree node has been modified in such a way as to invalidate iterators - fix
1028  * them:
1029  */
1030 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
1031 {
1032         struct btree_iter *linked;
1033
1034         trans_for_each_iter_with_node(iter->trans, b, linked)
1035                 __btree_iter_init(linked, b->c.level);
1036 }
1037
1038 static int lock_root_check_fn(struct six_lock *lock, void *p)
1039 {
1040         struct btree *b = container_of(lock, struct btree, c.lock);
1041         struct btree **rootp = p;
1042
1043         return b == *rootp ? 0 : -1;
1044 }
1045
1046 static inline int btree_iter_lock_root(struct btree_iter *iter,
1047                                        unsigned depth_want,
1048                                        unsigned long trace_ip)
1049 {
1050         struct bch_fs *c = iter->trans->c;
1051         struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
1052         enum six_lock_type lock_type;
1053         unsigned i;
1054
1055         EBUG_ON(iter->nodes_locked);
1056
1057         while (1) {
1058                 b = READ_ONCE(*rootp);
1059                 iter->level = READ_ONCE(b->c.level);
1060
1061                 if (unlikely(iter->level < depth_want)) {
1062                         /*
1063                          * the root is at a lower depth than the depth we want:
1064                          * got to the end of the btree, or we're walking nodes
1065                          * greater than some depth and there are no nodes >=
1066                          * that depth
1067                          */
1068                         iter->level = depth_want;
1069                         for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
1070                                 iter->l[i].b = NULL;
1071                         return 1;
1072                 }
1073
1074                 lock_type = __btree_lock_want(iter, iter->level);
1075                 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
1076                                               iter, lock_type,
1077                                               lock_root_check_fn, rootp,
1078                                               trace_ip)))
1079                         return -EINTR;
1080
1081                 if (likely(b == READ_ONCE(*rootp) &&
1082                            b->c.level == iter->level &&
1083                            !race_fault())) {
1084                         for (i = 0; i < iter->level; i++)
1085                                 iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1086                         iter->l[iter->level].b = b;
1087                         for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
1088                                 iter->l[i].b = NULL;
1089
1090                         mark_btree_node_locked(iter, iter->level, lock_type);
1091                         btree_iter_node_set(iter, b);
1092                         return 0;
1093                 }
1094
1095                 six_unlock_type(&b->c.lock, lock_type);
1096         }
1097 }
1098
1099 noinline
1100 static void btree_iter_prefetch(struct btree_iter *iter)
1101 {
1102         struct bch_fs *c = iter->trans->c;
1103         struct btree_iter_level *l = &iter->l[iter->level];
1104         struct btree_node_iter node_iter = l->iter;
1105         struct bkey_packed *k;
1106         struct bkey_buf tmp;
1107         unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1108                 ? (iter->level > 1 ? 0 :  2)
1109                 : (iter->level > 1 ? 1 : 16);
1110         bool was_locked = btree_node_locked(iter, iter->level);
1111
1112         bch2_bkey_buf_init(&tmp);
1113
1114         while (nr) {
1115                 if (!bch2_btree_node_relock(iter, iter->level))
1116                         break;
1117
1118                 bch2_btree_node_iter_advance(&node_iter, l->b);
1119                 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1120                 if (!k)
1121                         break;
1122
1123                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1124                 bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
1125                                          iter->level - 1);
1126         }
1127
1128         if (!was_locked)
1129                 btree_node_unlock(iter, iter->level);
1130
1131         bch2_bkey_buf_exit(&tmp, c);
1132 }
1133
1134 static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
1135                                             unsigned plevel, struct btree *b)
1136 {
1137         struct btree_iter_level *l = &iter->l[plevel];
1138         bool locked = btree_node_locked(iter, plevel);
1139         struct bkey_packed *k;
1140         struct bch_btree_ptr_v2 *bp;
1141
1142         if (!bch2_btree_node_relock(iter, plevel))
1143                 return;
1144
1145         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1146         BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1147
1148         bp = (void *) bkeyp_val(&l->b->format, k);
1149         bp->mem_ptr = (unsigned long)b;
1150
1151         if (!locked)
1152                 btree_node_unlock(iter, plevel);
1153 }
1154
1155 static __always_inline int btree_iter_down(struct btree_iter *iter,
1156                                            unsigned long trace_ip)
1157 {
1158         struct bch_fs *c = iter->trans->c;
1159         struct btree_iter_level *l = &iter->l[iter->level];
1160         struct btree *b;
1161         unsigned level = iter->level - 1;
1162         enum six_lock_type lock_type = __btree_lock_want(iter, level);
1163         struct bkey_buf tmp;
1164         int ret;
1165
1166         EBUG_ON(!btree_node_locked(iter, iter->level));
1167
1168         bch2_bkey_buf_init(&tmp);
1169         bch2_bkey_buf_unpack(&tmp, c, l->b,
1170                          bch2_btree_node_iter_peek(&l->iter, l->b));
1171
1172         b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip);
1173         ret = PTR_ERR_OR_ZERO(b);
1174         if (unlikely(ret))
1175                 goto err;
1176
1177         mark_btree_node_locked(iter, level, lock_type);
1178         btree_iter_node_set(iter, b);
1179
1180         if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
1181             unlikely(b != btree_node_mem_ptr(tmp.k)))
1182                 btree_node_mem_ptr_set(iter, level + 1, b);
1183
1184         if (iter->flags & BTREE_ITER_PREFETCH)
1185                 btree_iter_prefetch(iter);
1186
1187         iter->level = level;
1188 err:
1189         bch2_bkey_buf_exit(&tmp, c);
1190         return ret;
1191 }
1192
1193 static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
1194
1195 static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
1196                                      unsigned long trace_ip)
1197 {
1198         struct bch_fs *c = trans->c;
1199         struct btree_iter *iter;
1200         u8 sorted[BTREE_ITER_MAX];
1201         int i, nr_sorted = 0;
1202         bool relock_fail;
1203
1204         if (trans->in_traverse_all)
1205                 return -EINTR;
1206
1207         trans->in_traverse_all = true;
1208 retry_all:
1209         nr_sorted = 0;
1210         relock_fail = false;
1211
1212         trans_for_each_iter(trans, iter) {
1213                 if (!bch2_btree_iter_relock(iter, _THIS_IP_))
1214                         relock_fail = true;
1215                 sorted[nr_sorted++] = iter->idx;
1216         }
1217
1218         if (!relock_fail) {
1219                 trans->in_traverse_all = false;
1220                 return 0;
1221         }
1222
1223 #define btree_iter_cmp_by_idx(_l, _r)                           \
1224                 btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r])
1225
1226         bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
1227 #undef btree_iter_cmp_by_idx
1228
1229         for (i = nr_sorted - 2; i >= 0; --i) {
1230                 struct btree_iter *iter1 = trans->iters + sorted[i];
1231                 struct btree_iter *iter2 = trans->iters + sorted[i + 1];
1232
1233                 if (iter1->btree_id == iter2->btree_id &&
1234                     iter1->locks_want < iter2->locks_want)
1235                         __bch2_btree_iter_upgrade(iter1, iter2->locks_want);
1236                 else if (!iter1->locks_want && iter2->locks_want)
1237                         __bch2_btree_iter_upgrade(iter1, 1);
1238         }
1239
1240         bch2_trans_unlock(trans);
1241         cond_resched();
1242
1243         if (unlikely(ret == -ENOMEM)) {
1244                 struct closure cl;
1245
1246                 closure_init_stack(&cl);
1247
1248                 do {
1249                         ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1250                         closure_sync(&cl);
1251                 } while (ret);
1252         }
1253
1254         if (unlikely(ret == -EIO)) {
1255                 trans->error = true;
1256                 goto out;
1257         }
1258
1259         BUG_ON(ret && ret != -EINTR);
1260
1261         /* Now, redo traversals in correct order: */
1262         for (i = 0; i < nr_sorted; i++) {
1263                 unsigned idx = sorted[i];
1264
1265                 /*
1266                  * sucessfully traversing one iterator can cause another to be
1267                  * unlinked, in btree_key_cache_fill()
1268                  */
1269                 if (!(trans->iters_linked & (1ULL << idx)))
1270                         continue;
1271
1272                 ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_);
1273                 if (ret)
1274                         goto retry_all;
1275         }
1276
1277         if (hweight64(trans->iters_live) > 1)
1278                 ret = -EINTR;
1279         else
1280                 trans_for_each_iter(trans, iter)
1281                         if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
1282                                 ret = -EINTR;
1283                                 break;
1284                         }
1285 out:
1286         bch2_btree_cache_cannibalize_unlock(c);
1287
1288         trans->in_traverse_all = false;
1289
1290         trace_trans_traverse_all(trans->ip, trace_ip);
1291         return ret;
1292 }
1293
1294 int bch2_btree_iter_traverse_all(struct btree_trans *trans)
1295 {
1296         return __btree_iter_traverse_all(trans, 0, _RET_IP_);
1297 }
1298
1299 static inline bool btree_iter_good_node(struct btree_iter *iter,
1300                                         unsigned l, int check_pos)
1301 {
1302         if (!is_btree_node(iter, l) ||
1303             !bch2_btree_node_relock(iter, l))
1304                 return false;
1305
1306         if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
1307                 return false;
1308         if (check_pos > 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
1309                 return false;
1310         return true;
1311 }
1312
1313 static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
1314                                                      int check_pos)
1315 {
1316         unsigned l = iter->level;
1317
1318         while (btree_iter_node(iter, l) &&
1319                !btree_iter_good_node(iter, l, check_pos)) {
1320                 btree_node_unlock(iter, l);
1321                 iter->l[l].b = BTREE_ITER_NO_NODE_UP;
1322                 l++;
1323         }
1324
1325         return l;
1326 }
1327
1328 /*
1329  * This is the main state machine for walking down the btree - walks down to a
1330  * specified depth
1331  *
1332  * Returns 0 on success, -EIO on error (error reading in a btree node).
1333  *
1334  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1335  * stashed in the iterator and returned from bch2_trans_exit().
1336  */
1337 static int btree_iter_traverse_one(struct btree_iter *iter,
1338                                    unsigned long trace_ip)
1339 {
1340         unsigned depth_want = iter->level;
1341         int ret = 0;
1342
1343         /*
1344          * if we need interior nodes locked, call btree_iter_relock() to make
1345          * sure we walk back up enough that we lock them:
1346          */
1347         if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
1348             iter->locks_want > 1)
1349                 bch2_btree_iter_relock(iter, _THIS_IP_);
1350
1351         if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1352                 ret = bch2_btree_iter_traverse_cached(iter);
1353                 goto out;
1354         }
1355
1356         if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
1357                 goto out;
1358
1359         if (unlikely(iter->level >= BTREE_MAX_DEPTH))
1360                 goto out;
1361
1362         iter->level = btree_iter_up_until_good_node(iter, 0);
1363
1364         /*
1365          * Note: iter->nodes[iter->level] may be temporarily NULL here - that
1366          * would indicate to other code that we got to the end of the btree,
1367          * here it indicates that relocking the root failed - it's critical that
1368          * btree_iter_lock_root() comes next and that it can't fail
1369          */
1370         while (iter->level > depth_want) {
1371                 ret = btree_iter_node(iter, iter->level)
1372                         ? btree_iter_down(iter, trace_ip)
1373                         : btree_iter_lock_root(iter, depth_want, trace_ip);
1374                 if (unlikely(ret)) {
1375                         if (ret == 1) {
1376                                 /*
1377                                  * Got to the end of the btree (in
1378                                  * BTREE_ITER_NODES mode)
1379                                  */
1380                                 ret = 0;
1381                                 goto out;
1382                         }
1383
1384                         iter->level = depth_want;
1385
1386                         if (ret == -EIO) {
1387                                 iter->flags |= BTREE_ITER_ERROR;
1388                                 iter->l[iter->level].b =
1389                                         BTREE_ITER_NO_NODE_ERROR;
1390                         } else {
1391                                 iter->l[iter->level].b =
1392                                         BTREE_ITER_NO_NODE_DOWN;
1393                         }
1394                         goto out;
1395                 }
1396         }
1397
1398         iter->uptodate = BTREE_ITER_NEED_PEEK;
1399 out:
1400         trace_iter_traverse(iter->trans->ip, trace_ip,
1401                             iter->btree_id, &iter->real_pos, ret);
1402         bch2_btree_iter_verify(iter);
1403         return ret;
1404 }
1405
1406 static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
1407 {
1408         struct btree_trans *trans = iter->trans;
1409         int ret;
1410
1411         ret =   bch2_trans_cond_resched(trans) ?:
1412                 btree_iter_traverse_one(iter, _RET_IP_);
1413         if (unlikely(ret))
1414                 ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
1415
1416         return ret;
1417 }
1418
1419 /*
1420  * Note:
1421  * bch2_btree_iter_traverse() is for external users, btree_iter_traverse() is
1422  * for internal btree iterator users
1423  *
1424  * bch2_btree_iter_traverse sets iter->real_pos to iter->pos,
1425  * btree_iter_traverse() does not:
1426  */
1427 static inline int __must_check
1428 btree_iter_traverse(struct btree_iter *iter)
1429 {
1430         return iter->uptodate >= BTREE_ITER_NEED_RELOCK
1431                 ? __bch2_btree_iter_traverse(iter)
1432                 : 0;
1433 }
1434
1435 int __must_check
1436 bch2_btree_iter_traverse(struct btree_iter *iter)
1437 {
1438         int ret;
1439
1440         btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
1441
1442         ret = btree_iter_traverse(iter);
1443         if (ret)
1444                 return ret;
1445
1446         iter->should_be_locked = true;
1447         return 0;
1448 }
1449
1450 /* Iterate across nodes (leaf and interior nodes) */
1451
1452 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1453 {
1454         struct btree *b;
1455         int ret;
1456
1457         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1458         bch2_btree_iter_verify(iter);
1459
1460         ret = btree_iter_traverse(iter);
1461         if (ret)
1462                 return NULL;
1463
1464         b = btree_iter_node(iter, iter->level);
1465         if (!b)
1466                 return NULL;
1467
1468         BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1469
1470         iter->pos = iter->real_pos = b->key.k.p;
1471
1472         bch2_btree_iter_verify(iter);
1473         iter->should_be_locked = true;
1474
1475         return b;
1476 }
1477
1478 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1479 {
1480         struct btree *b;
1481         int ret;
1482
1483         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1484         bch2_btree_iter_verify(iter);
1485
1486         /* already got to end? */
1487         if (!btree_iter_node(iter, iter->level))
1488                 return NULL;
1489
1490         bch2_trans_cond_resched(iter->trans);
1491
1492         btree_node_unlock(iter, iter->level);
1493         iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
1494         iter->level++;
1495
1496         btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1497         ret = btree_iter_traverse(iter);
1498         if (ret)
1499                 return NULL;
1500
1501         /* got to end? */
1502         b = btree_iter_node(iter, iter->level);
1503         if (!b)
1504                 return NULL;
1505
1506         if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
1507                 /*
1508                  * Haven't gotten to the end of the parent node: go back down to
1509                  * the next child node
1510                  */
1511                 btree_iter_set_search_pos(iter, bpos_successor(iter->pos));
1512
1513                 /* Unlock to avoid screwing up our lock invariants: */
1514                 btree_node_unlock(iter, iter->level);
1515
1516                 iter->level = iter->min_depth;
1517                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1518                 bch2_btree_iter_verify(iter);
1519
1520                 ret = btree_iter_traverse(iter);
1521                 if (ret)
1522                         return NULL;
1523
1524                 b = iter->l[iter->level].b;
1525         }
1526
1527         iter->pos = iter->real_pos = b->key.k.p;
1528
1529         bch2_btree_iter_verify(iter);
1530         iter->should_be_locked = true;
1531
1532         return b;
1533 }
1534
1535 /* Iterate across keys (in leaf nodes only) */
1536
1537 static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
1538 {
1539         struct bpos old_pos = iter->real_pos;
1540         int cmp = bpos_cmp(new_pos, iter->real_pos);
1541         unsigned l = iter->level;
1542
1543         if (!cmp)
1544                 goto out;
1545
1546         iter->real_pos = new_pos;
1547         iter->should_be_locked = false;
1548
1549         if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
1550                 btree_node_unlock(iter, 0);
1551                 iter->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1552                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1553                 return;
1554         }
1555
1556         l = btree_iter_up_until_good_node(iter, cmp);
1557
1558         if (btree_iter_node(iter, l)) {
1559                 /*
1560                  * We might have to skip over many keys, or just a few: try
1561                  * advancing the node iterator, and if we have to skip over too
1562                  * many keys just reinit it (or if we're rewinding, since that
1563                  * is expensive).
1564                  */
1565                 if (cmp < 0 ||
1566                     !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
1567                         __btree_iter_init(iter, l);
1568
1569                 /* Don't leave it locked if we're not supposed to: */
1570                 if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
1571                         btree_node_unlock(iter, l);
1572         }
1573 out:
1574         if (l != iter->level)
1575                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1576         else
1577                 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1578
1579         bch2_btree_iter_verify(iter);
1580 #ifdef CONFIG_BCACHEFS_DEBUG
1581         trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
1582                                   iter->btree_id,
1583                                   &old_pos, &new_pos, l);
1584 #endif
1585 }
1586
1587 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1588 {
1589         struct bpos pos = iter->k.p;
1590         bool ret = bpos_cmp(pos, POS_MAX) != 0;
1591
1592         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1593                 pos = bkey_successor(iter, pos);
1594         bch2_btree_iter_set_pos(iter, pos);
1595         return ret;
1596 }
1597
1598 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1599 {
1600         struct bpos pos = bkey_start_pos(&iter->k);
1601         bool ret = bpos_cmp(pos, POS_MIN) != 0;
1602
1603         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1604                 pos = bkey_predecessor(iter, pos);
1605         bch2_btree_iter_set_pos(iter, pos);
1606         return ret;
1607 }
1608
1609 static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
1610 {
1611         struct bpos next_pos = iter->l[0].b->key.k.p;
1612         bool ret = bpos_cmp(next_pos, POS_MAX) != 0;
1613
1614         /*
1615          * Typically, we don't want to modify iter->pos here, since that
1616          * indicates where we searched from - unless we got to the end of the
1617          * btree, in that case we want iter->pos to reflect that:
1618          */
1619         if (ret)
1620                 btree_iter_set_search_pos(iter, bpos_successor(next_pos));
1621         else
1622                 bch2_btree_iter_set_pos(iter, POS_MAX);
1623
1624         return ret;
1625 }
1626
1627 static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
1628 {
1629         struct bpos next_pos = iter->l[0].b->data->min_key;
1630         bool ret = bpos_cmp(next_pos, POS_MIN) != 0;
1631
1632         if (ret)
1633                 btree_iter_set_search_pos(iter, bpos_predecessor(next_pos));
1634         else
1635                 bch2_btree_iter_set_pos(iter, POS_MIN);
1636
1637         return ret;
1638 }
1639
1640 static struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
1641                                                enum btree_id btree_id, struct bpos pos)
1642 {
1643         struct btree_insert_entry *i;
1644
1645         trans_for_each_update2(trans, i)
1646                 if ((cmp_int(btree_id,  i->iter->btree_id) ?:
1647                      bkey_cmp(pos,      i->k->k.p)) <= 0) {
1648                         if (btree_id == i->iter->btree_id)
1649                                 return i->k;
1650                         break;
1651                 }
1652
1653         return NULL;
1654 }
1655
1656 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool with_updates)
1657 {
1658         struct bpos search_key = btree_iter_search_key(iter);
1659         struct bkey_i *next_update;
1660         struct bkey_s_c k;
1661         int ret;
1662
1663         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1664         bch2_btree_iter_verify(iter);
1665         bch2_btree_iter_verify_entry_exit(iter);
1666 start:
1667         next_update = with_updates
1668                 ? btree_trans_peek_updates(iter->trans, iter->btree_id, search_key)
1669                 : NULL;
1670         btree_iter_set_search_pos(iter, search_key);
1671
1672         while (1) {
1673                 ret = btree_iter_traverse(iter);
1674                 if (unlikely(ret))
1675                         return bkey_s_c_err(ret);
1676
1677                 k = btree_iter_level_peek(iter, &iter->l[0]);
1678
1679                 if (next_update &&
1680                     bpos_cmp(next_update->k.p, iter->real_pos) <= 0)
1681                         k = bkey_i_to_s_c(next_update);
1682
1683                 if (likely(k.k)) {
1684                         if (bkey_deleted(k.k)) {
1685                                 search_key = bkey_successor(iter, k.k->p);
1686                                 goto start;
1687                         }
1688
1689                         break;
1690                 }
1691
1692                 if (!btree_iter_set_pos_to_next_leaf(iter))
1693                         return bkey_s_c_null;
1694         }
1695
1696         /*
1697          * iter->pos should be mononotically increasing, and always be equal to
1698          * the key we just returned - except extents can straddle iter->pos:
1699          */
1700         if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
1701                 iter->pos = k.k->p;
1702         else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1703                 iter->pos = bkey_start_pos(k.k);
1704
1705         bch2_btree_iter_verify_entry_exit(iter);
1706         bch2_btree_iter_verify(iter);
1707         iter->should_be_locked = true;
1708         return k;
1709 }
1710
1711 /**
1712  * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1713  * current position
1714  */
1715 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1716 {
1717         return __btree_iter_peek(iter, false);
1718 }
1719
1720 /**
1721  * bch2_btree_iter_next: returns first key greater than iterator's current
1722  * position
1723  */
1724 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
1725 {
1726         if (!bch2_btree_iter_advance(iter))
1727                 return bkey_s_c_null;
1728
1729         return bch2_btree_iter_peek(iter);
1730 }
1731
1732 struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
1733 {
1734         return __btree_iter_peek(iter, true);
1735 }
1736
1737 struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
1738 {
1739         if (!bch2_btree_iter_advance(iter))
1740                 return bkey_s_c_null;
1741
1742         return bch2_btree_iter_peek_with_updates(iter);
1743 }
1744
1745 /**
1746  * bch2_btree_iter_peek_prev: returns first key less than or equal to
1747  * iterator's current position
1748  */
1749 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
1750 {
1751         struct btree_iter_level *l = &iter->l[0];
1752         struct bkey_s_c k;
1753         int ret;
1754
1755         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1756         bch2_btree_iter_verify(iter);
1757         bch2_btree_iter_verify_entry_exit(iter);
1758
1759         btree_iter_set_search_pos(iter, iter->pos);
1760
1761         while (1) {
1762                 ret = btree_iter_traverse(iter);
1763                 if (unlikely(ret)) {
1764                         k = bkey_s_c_err(ret);
1765                         goto no_key;
1766                 }
1767
1768                 k = btree_iter_level_peek(iter, l);
1769                 if (!k.k ||
1770                     ((iter->flags & BTREE_ITER_IS_EXTENTS)
1771                      ? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0
1772                      : bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0))
1773                         k = btree_iter_level_prev(iter, l);
1774
1775                 if (likely(k.k))
1776                         break;
1777
1778                 if (!btree_iter_set_pos_to_prev_leaf(iter)) {
1779                         k = bkey_s_c_null;
1780                         goto no_key;
1781                 }
1782         }
1783
1784         EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
1785
1786         /* Extents can straddle iter->pos: */
1787         if (bkey_cmp(k.k->p, iter->pos) < 0)
1788                 iter->pos = k.k->p;
1789 out:
1790         bch2_btree_iter_verify_entry_exit(iter);
1791         bch2_btree_iter_verify(iter);
1792         iter->should_be_locked = true;
1793         return k;
1794 no_key:
1795         /*
1796          * btree_iter_level_peek() may have set iter->k to a key we didn't want, and
1797          * then we errored going to the previous leaf - make sure it's
1798          * consistent with iter->pos:
1799          */
1800         bkey_init(&iter->k);
1801         iter->k.p = iter->pos;
1802         goto out;
1803 }
1804
1805 /**
1806  * bch2_btree_iter_prev: returns first key less than iterator's current
1807  * position
1808  */
1809 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
1810 {
1811         if (!bch2_btree_iter_rewind(iter))
1812                 return bkey_s_c_null;
1813
1814         return bch2_btree_iter_peek_prev(iter);
1815 }
1816
1817 static inline struct bkey_s_c
1818 __bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
1819 {
1820         struct bkey_s_c k;
1821         struct bpos pos, next_start;
1822
1823         /* keys & holes can't span inode numbers: */
1824         if (iter->pos.offset == KEY_OFFSET_MAX) {
1825                 if (iter->pos.inode == KEY_INODE_MAX)
1826                         return bkey_s_c_null;
1827
1828                 bch2_btree_iter_set_pos(iter, bkey_successor(iter, iter->pos));
1829         }
1830
1831         pos = iter->pos;
1832         k = bch2_btree_iter_peek(iter);
1833         iter->pos = pos;
1834
1835         if (bkey_err(k))
1836                 return k;
1837
1838         if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0)
1839                 return k;
1840
1841         next_start = k.k ? bkey_start_pos(k.k) : POS_MAX;
1842
1843         bkey_init(&iter->k);
1844         iter->k.p = iter->pos;
1845         bch2_key_resize(&iter->k,
1846                         min_t(u64, KEY_SIZE_MAX,
1847                               (next_start.inode == iter->pos.inode
1848                                ? next_start.offset
1849                                : KEY_OFFSET_MAX) -
1850                               iter->pos.offset));
1851
1852         EBUG_ON(!iter->k.size);
1853
1854         bch2_btree_iter_verify_entry_exit(iter);
1855         bch2_btree_iter_verify(iter);
1856
1857         return (struct bkey_s_c) { &iter->k, NULL };
1858 }
1859
1860 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
1861 {
1862         struct btree_iter_level *l = &iter->l[0];
1863         struct bkey_s_c k;
1864         int ret;
1865
1866         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1867         bch2_btree_iter_verify(iter);
1868         bch2_btree_iter_verify_entry_exit(iter);
1869
1870         btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
1871
1872         if (iter->flags & BTREE_ITER_IS_EXTENTS)
1873                 return __bch2_btree_iter_peek_slot_extents(iter);
1874
1875         ret = btree_iter_traverse(iter);
1876         if (unlikely(ret))
1877                 return bkey_s_c_err(ret);
1878
1879         k = btree_iter_level_peek_all(iter, l, &iter->k);
1880
1881         EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
1882
1883         if (!k.k || bkey_cmp(iter->pos, k.k->p)) {
1884                 /* hole */
1885                 bkey_init(&iter->k);
1886                 iter->k.p = iter->pos;
1887                 k = (struct bkey_s_c) { &iter->k, NULL };
1888         }
1889
1890         bch2_btree_iter_verify_entry_exit(iter);
1891         bch2_btree_iter_verify(iter);
1892         iter->should_be_locked = true;
1893
1894         return k;
1895 }
1896
1897 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
1898 {
1899         if (!bch2_btree_iter_advance(iter))
1900                 return bkey_s_c_null;
1901
1902         return bch2_btree_iter_peek_slot(iter);
1903 }
1904
1905 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
1906 {
1907         if (!bch2_btree_iter_rewind(iter))
1908                 return bkey_s_c_null;
1909
1910         return bch2_btree_iter_peek_slot(iter);
1911 }
1912
1913 struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
1914 {
1915         struct bkey_cached *ck;
1916         int ret;
1917
1918         EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED);
1919         bch2_btree_iter_verify(iter);
1920
1921         ret = btree_iter_traverse(iter);
1922         if (unlikely(ret))
1923                 return bkey_s_c_err(ret);
1924
1925         ck = (void *) iter->l[0].b;
1926
1927         EBUG_ON(iter->btree_id != ck->key.btree_id ||
1928                 bkey_cmp(iter->pos, ck->key.pos));
1929         BUG_ON(!ck->valid);
1930
1931         iter->should_be_locked = true;
1932
1933         return bkey_i_to_s_c(ck->k);
1934 }
1935
1936 static inline void bch2_btree_iter_init(struct btree_trans *trans,
1937                         struct btree_iter *iter, enum btree_id btree_id)
1938 {
1939         struct bch_fs *c = trans->c;
1940         unsigned i;
1941
1942         iter->trans                     = trans;
1943         iter->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
1944         iter->btree_id                  = btree_id;
1945         iter->real_pos                  = POS_MIN;
1946         iter->level                     = 0;
1947         iter->min_depth                 = 0;
1948         iter->locks_want                = 0;
1949         iter->nodes_locked              = 0;
1950         iter->nodes_intent_locked       = 0;
1951         for (i = 0; i < ARRAY_SIZE(iter->l); i++)
1952                 iter->l[i].b            = BTREE_ITER_NO_NODE_INIT;
1953
1954         prefetch(c->btree_roots[btree_id].b);
1955 }
1956
1957 /* new transactional stuff: */
1958
1959 static inline void __bch2_trans_iter_free(struct btree_trans *trans,
1960                                           unsigned idx)
1961 {
1962         __bch2_btree_iter_unlock(&trans->iters[idx]);
1963         trans->iters_linked             &= ~(1ULL << idx);
1964         trans->iters_live               &= ~(1ULL << idx);
1965         trans->iters_touched            &= ~(1ULL << idx);
1966 }
1967
1968 int bch2_trans_iter_put(struct btree_trans *trans,
1969                         struct btree_iter *iter)
1970 {
1971         int ret;
1972
1973         if (IS_ERR_OR_NULL(iter))
1974                 return 0;
1975
1976         BUG_ON(trans->iters + iter->idx != iter);
1977         BUG_ON(!btree_iter_live(trans, iter));
1978
1979         ret = btree_iter_err(iter);
1980
1981         if (!(trans->iters_touched & (1ULL << iter->idx)) &&
1982             !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
1983                 __bch2_trans_iter_free(trans, iter->idx);
1984
1985         trans->iters_live       &= ~(1ULL << iter->idx);
1986         return ret;
1987 }
1988
1989 int bch2_trans_iter_free(struct btree_trans *trans,
1990                          struct btree_iter *iter)
1991 {
1992         if (IS_ERR_OR_NULL(iter))
1993                 return 0;
1994
1995         set_btree_iter_dontneed(trans, iter);
1996
1997         return bch2_trans_iter_put(trans, iter);
1998 }
1999
2000 noinline __cold
2001 static void btree_trans_iter_alloc_fail(struct btree_trans *trans)
2002 {
2003
2004         struct btree_iter *iter;
2005         struct btree_insert_entry *i;
2006         char buf[100];
2007
2008         trans_for_each_iter(trans, iter)
2009                 printk(KERN_ERR "iter: btree %s pos %s%s%s%s %pS\n",
2010                        bch2_btree_ids[iter->btree_id],
2011                        (bch2_bpos_to_text(&PBUF(buf), iter->pos), buf),
2012                        btree_iter_live(trans, iter) ? " live" : "",
2013                        (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
2014                        iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
2015                        (void *) iter->ip_allocated);
2016
2017         trans_for_each_update(trans, i) {
2018                 char buf[300];
2019
2020                 bch2_bkey_val_to_text(&PBUF(buf), trans->c, bkey_i_to_s_c(i->k));
2021                 printk(KERN_ERR "update: btree %s %s\n",
2022                        bch2_btree_ids[i->iter->btree_id], buf);
2023         }
2024         panic("trans iter oveflow\n");
2025 }
2026
2027 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
2028 {
2029         unsigned idx;
2030
2031         if (unlikely(trans->iters_linked ==
2032                      ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
2033                 btree_trans_iter_alloc_fail(trans);
2034
2035         idx = __ffs64(~trans->iters_linked);
2036
2037         trans->iters_linked     |= 1ULL << idx;
2038         trans->iters[idx].idx    = idx;
2039         trans->iters[idx].flags  = 0;
2040         return &trans->iters[idx];
2041 }
2042
2043 static inline void btree_iter_copy(struct btree_iter *dst,
2044                                    struct btree_iter *src)
2045 {
2046         unsigned i, idx = dst->idx;
2047
2048         *dst = *src;
2049         dst->idx = idx;
2050         dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2051
2052         for (i = 0; i < BTREE_MAX_DEPTH; i++)
2053                 if (btree_node_locked(dst, i))
2054                         six_lock_increment(&dst->l[i].b->c.lock,
2055                                            __btree_lock_want(dst, i));
2056
2057         dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2058         dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
2059 }
2060
2061 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
2062                                          unsigned btree_id, struct bpos pos,
2063                                          unsigned locks_want,
2064                                          unsigned depth,
2065                                          unsigned flags)
2066 {
2067         struct btree_iter *iter, *best = NULL;
2068         struct bpos real_pos, pos_min = POS_MIN;
2069
2070         if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2071             btree_node_type_is_extents(btree_id) &&
2072             !(flags & BTREE_ITER_NOT_EXTENTS) &&
2073             !(flags & BTREE_ITER_ALL_SNAPSHOTS))
2074                 flags |= BTREE_ITER_IS_EXTENTS;
2075
2076         if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2077             !btree_type_has_snapshots(btree_id))
2078                 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2079
2080         if (!(flags & BTREE_ITER_ALL_SNAPSHOTS))
2081                 pos.snapshot = btree_type_has_snapshots(btree_id)
2082                         ? U32_MAX : 0;
2083
2084         real_pos = pos;
2085
2086         if ((flags & BTREE_ITER_IS_EXTENTS) &&
2087             bkey_cmp(pos, POS_MAX))
2088                 real_pos = bpos_nosnap_successor(pos);
2089
2090         trans_for_each_iter(trans, iter) {
2091                 if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
2092                         continue;
2093
2094                 if (iter->btree_id != btree_id)
2095                         continue;
2096
2097                 if (best) {
2098                         int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos),
2099                                            bpos_diff(iter->real_pos, real_pos));
2100
2101                         if (cmp < 0 ||
2102                             ((cmp == 0 && btree_iter_keep(trans, iter))))
2103                                 continue;
2104                 }
2105
2106                 best = iter;
2107         }
2108
2109         if (!best) {
2110                 iter = btree_trans_iter_alloc(trans);
2111                 bch2_btree_iter_init(trans, iter, btree_id);
2112         } else if (btree_iter_keep(trans, best)) {
2113                 iter = btree_trans_iter_alloc(trans);
2114                 btree_iter_copy(iter, best);
2115         } else {
2116                 iter = best;
2117         }
2118
2119         trans->iters_live       |= 1ULL << iter->idx;
2120         trans->iters_touched    |= 1ULL << iter->idx;
2121
2122         iter->flags = flags;
2123
2124         iter->snapshot = pos.snapshot;
2125
2126         /*
2127          * If the iterator has locks_want greater than requested, we explicitly
2128          * do not downgrade it here - on transaction restart because btree node
2129          * split needs to upgrade locks, we might be putting/getting the
2130          * iterator again. Downgrading iterators only happens via an explicit
2131          * bch2_trans_downgrade().
2132          */
2133
2134         locks_want = min(locks_want, BTREE_MAX_DEPTH);
2135         if (locks_want > iter->locks_want) {
2136                 iter->locks_want = locks_want;
2137                 btree_iter_get_locks(iter, true, _THIS_IP_);
2138         }
2139
2140         while (iter->level != depth) {
2141                 btree_node_unlock(iter, iter->level);
2142                 iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
2143                 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
2144                 if (iter->level < depth)
2145                         iter->level++;
2146                 else
2147                         iter->level--;
2148         }
2149
2150         iter->min_depth = depth;
2151
2152         bch2_btree_iter_set_pos(iter, pos);
2153         btree_iter_set_search_pos(iter, real_pos);
2154
2155         trace_trans_get_iter(_RET_IP_, trans->ip,
2156                              btree_id,
2157                              &real_pos, locks_want, iter->uptodate,
2158                              best ? &best->real_pos     : &pos_min,
2159                              best ? best->locks_want    : U8_MAX,
2160                              best ? best->uptodate      : U8_MAX);
2161
2162         return iter;
2163 }
2164
2165 struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
2166                                             enum btree_id btree_id,
2167                                             struct bpos pos,
2168                                             unsigned locks_want,
2169                                             unsigned depth,
2170                                             unsigned flags)
2171 {
2172         struct btree_iter *iter =
2173                 __bch2_trans_get_iter(trans, btree_id, pos,
2174                                       locks_want, depth,
2175                                       BTREE_ITER_NODES|
2176                                       BTREE_ITER_NOT_EXTENTS|
2177                                       BTREE_ITER_ALL_SNAPSHOTS|
2178                                       flags);
2179
2180         BUG_ON(bkey_cmp(iter->pos, pos));
2181         BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH));
2182         BUG_ON(iter->level      != depth);
2183         BUG_ON(iter->min_depth  != depth);
2184         iter->ip_allocated = _RET_IP_;
2185
2186         return iter;
2187 }
2188
2189 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
2190                                         struct btree_iter *src)
2191 {
2192         struct btree_iter *iter;
2193
2194         iter = btree_trans_iter_alloc(trans);
2195         btree_iter_copy(iter, src);
2196
2197         trans->iters_live |= 1ULL << iter->idx;
2198         /*
2199          * We don't need to preserve this iter since it's cheap to copy it
2200          * again - this will cause trans_iter_put() to free it right away:
2201          */
2202         set_btree_iter_dontneed(trans, iter);
2203
2204         return iter;
2205 }
2206
2207 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2208 {
2209         size_t new_top = trans->mem_top + size;
2210         void *p;
2211
2212         if (new_top > trans->mem_bytes) {
2213                 size_t old_bytes = trans->mem_bytes;
2214                 size_t new_bytes = roundup_pow_of_two(new_top);
2215                 void *new_mem;
2216
2217                 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2218
2219                 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2220                 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2221                         new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2222                         new_bytes = BTREE_TRANS_MEM_MAX;
2223                         kfree(trans->mem);
2224                 }
2225
2226                 if (!new_mem)
2227                         return ERR_PTR(-ENOMEM);
2228
2229                 trans->mem = new_mem;
2230                 trans->mem_bytes = new_bytes;
2231
2232                 if (old_bytes) {
2233                         trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
2234                         return ERR_PTR(-EINTR);
2235                 }
2236         }
2237
2238         p = trans->mem + trans->mem_top;
2239         trans->mem_top += size;
2240         return p;
2241 }
2242
2243 inline void bch2_trans_unlink_iters(struct btree_trans *trans)
2244 {
2245         u64 iters = trans->iters_linked &
2246                 ~trans->iters_touched &
2247                 ~trans->iters_live;
2248
2249         while (iters) {
2250                 unsigned idx = __ffs64(iters);
2251
2252                 iters &= ~(1ULL << idx);
2253                 __bch2_trans_iter_free(trans, idx);
2254         }
2255 }
2256
2257 void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
2258 {
2259         struct btree_iter *iter;
2260
2261         trans_for_each_iter(trans, iter)
2262                 iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
2263                                  BTREE_ITER_SET_POS_AFTER_COMMIT);
2264
2265         bch2_trans_unlink_iters(trans);
2266
2267         trans->iters_touched &= trans->iters_live;
2268
2269         trans->nr_updates               = 0;
2270         trans->nr_updates2              = 0;
2271         trans->mem_top                  = 0;
2272
2273         trans->hooks                    = NULL;
2274         trans->extra_journal_entries    = NULL;
2275         trans->extra_journal_entry_u64s = 0;
2276
2277         if (trans->fs_usage_deltas) {
2278                 trans->fs_usage_deltas->used = 0;
2279                 memset(&trans->fs_usage_deltas->memset_start, 0,
2280                        (void *) &trans->fs_usage_deltas->memset_end -
2281                        (void *) &trans->fs_usage_deltas->memset_start);
2282         }
2283
2284         if (!(flags & TRANS_RESET_NOUNLOCK))
2285                 bch2_trans_cond_resched(trans);
2286
2287         if (!(flags & TRANS_RESET_NOTRAVERSE) &&
2288             trans->iters_linked)
2289                 bch2_btree_iter_traverse_all(trans);
2290 }
2291
2292 static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
2293 {
2294         size_t iters_bytes      = sizeof(struct btree_iter) * BTREE_ITER_MAX;
2295         size_t updates_bytes    = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2296         void *p = NULL;
2297
2298         BUG_ON(trans->used_mempool);
2299
2300 #ifdef __KERNEL__
2301         p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL);
2302 #endif
2303         if (!p)
2304                 p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
2305
2306         trans->iters            = p; p += iters_bytes;
2307         trans->updates          = p; p += updates_bytes;
2308         trans->updates2         = p; p += updates_bytes;
2309 }
2310
2311 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2312                      unsigned expected_nr_iters,
2313                      size_t expected_mem_bytes)
2314         __acquires(&c->btree_trans_barrier)
2315 {
2316         memset(trans, 0, sizeof(*trans));
2317         trans->c                = c;
2318         trans->ip               = _RET_IP_;
2319
2320         /*
2321          * reallocating iterators currently completely breaks
2322          * bch2_trans_iter_put(), we always allocate the max:
2323          */
2324         bch2_trans_alloc_iters(trans, c);
2325
2326         if (expected_mem_bytes) {
2327                 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2328                 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2329
2330                 if (!unlikely(trans->mem)) {
2331                         trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2332                         trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2333                 }
2334         }
2335
2336         trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2337
2338 #ifdef CONFIG_BCACHEFS_DEBUG
2339         trans->pid = current->pid;
2340         mutex_lock(&c->btree_trans_lock);
2341         list_add(&trans->list, &c->btree_trans_list);
2342         mutex_unlock(&c->btree_trans_lock);
2343 #endif
2344 }
2345
2346 int bch2_trans_exit(struct btree_trans *trans)
2347         __releases(&c->btree_trans_barrier)
2348 {
2349         struct bch_fs *c = trans->c;
2350
2351         bch2_trans_unlock(trans);
2352
2353 #ifdef CONFIG_BCACHEFS_DEBUG
2354         if (trans->iters_live) {
2355                 struct btree_iter *iter;
2356
2357                 bch_err(c, "btree iterators leaked!");
2358                 trans_for_each_iter(trans, iter)
2359                         if (btree_iter_live(trans, iter))
2360                                 printk(KERN_ERR "  btree %s allocated at %pS\n",
2361                                        bch2_btree_ids[iter->btree_id],
2362                                        (void *) iter->ip_allocated);
2363                 /* Be noisy about this: */
2364                 bch2_fatal_error(c);
2365         }
2366
2367         mutex_lock(&trans->c->btree_trans_lock);
2368         list_del(&trans->list);
2369         mutex_unlock(&trans->c->btree_trans_lock);
2370 #endif
2371
2372         srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2373
2374         bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
2375
2376         if (trans->fs_usage_deltas) {
2377                 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2378                     REPLICAS_DELTA_LIST_MAX)
2379                         mempool_free(trans->fs_usage_deltas,
2380                                      &trans->c->replicas_delta_pool);
2381                 else
2382                         kfree(trans->fs_usage_deltas);
2383         }
2384
2385         if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2386                 mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
2387         else
2388                 kfree(trans->mem);
2389
2390 #ifdef __KERNEL__
2391         /*
2392          * Userspace doesn't have a real percpu implementation:
2393          */
2394         trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
2395 #endif
2396
2397         if (trans->iters)
2398                 mempool_free(trans->iters, &trans->c->btree_iters_pool);
2399
2400         trans->mem      = (void *) 0x1;
2401         trans->iters    = (void *) 0x1;
2402
2403         return trans->error ? -EIO : 0;
2404 }
2405
2406 static void __maybe_unused
2407 bch2_btree_iter_node_to_text(struct printbuf *out,
2408                              struct btree_bkey_cached_common *_b,
2409                              enum btree_iter_type type)
2410 {
2411         pr_buf(out, "    l=%u %s:",
2412                _b->level, bch2_btree_ids[_b->btree_id]);
2413         bch2_bpos_to_text(out, btree_node_pos(_b, type));
2414 }
2415
2416 #ifdef CONFIG_BCACHEFS_DEBUG
2417 static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
2418 {
2419         struct btree_iter *iter;
2420
2421         trans_for_each_iter(trans, iter)
2422                 if (btree_iter_type(iter) != BTREE_ITER_CACHED &&
2423                     iter->nodes_locked)
2424                         return true;
2425         return false;
2426 }
2427 #endif
2428
2429 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2430 {
2431 #ifdef CONFIG_BCACHEFS_DEBUG
2432         struct btree_trans *trans;
2433         struct btree_iter *iter;
2434         struct btree *b;
2435         unsigned l;
2436
2437         mutex_lock(&c->btree_trans_lock);
2438         list_for_each_entry(trans, &c->btree_trans_list, list) {
2439                 if (!trans_has_btree_nodes_locked(trans))
2440                         continue;
2441
2442                 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2443
2444                 trans_for_each_iter(trans, iter) {
2445                         if (!iter->nodes_locked)
2446                                 continue;
2447
2448                         pr_buf(out, "  iter %u %c %s:",
2449                                iter->idx,
2450                                btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2451                                bch2_btree_ids[iter->btree_id]);
2452                         bch2_bpos_to_text(out, iter->pos);
2453                         pr_buf(out, "\n");
2454
2455                         for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2456                                 if (btree_node_locked(iter, l)) {
2457                                         pr_buf(out, "    %s l=%u ",
2458                                                btree_node_intent_locked(iter, l) ? "i" : "r", l);
2459                                         bch2_btree_iter_node_to_text(out,
2460                                                         (void *) iter->l[l].b,
2461                                                         btree_iter_type(iter));
2462                                         pr_buf(out, "\n");
2463                                 }
2464                         }
2465                 }
2466
2467                 b = READ_ONCE(trans->locking);
2468                 if (b) {
2469                         iter = &trans->iters[trans->locking_iter_idx];
2470                         pr_buf(out, "  locking iter %u %c l=%u %s:",
2471                                trans->locking_iter_idx,
2472                                btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2473                                trans->locking_level,
2474                                bch2_btree_ids[trans->locking_btree_id]);
2475                         bch2_bpos_to_text(out, trans->locking_pos);
2476
2477                         pr_buf(out, " node ");
2478                         bch2_btree_iter_node_to_text(out,
2479                                         (void *) b,
2480                                         btree_iter_type(iter));
2481                         pr_buf(out, "\n");
2482                 }
2483         }
2484         mutex_unlock(&c->btree_trans_lock);
2485 #endif
2486 }
2487
2488 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2489 {
2490         mempool_exit(&c->btree_trans_mem_pool);
2491         mempool_exit(&c->btree_iters_pool);
2492         cleanup_srcu_struct(&c->btree_trans_barrier);
2493 }
2494
2495 int bch2_fs_btree_iter_init(struct bch_fs *c)
2496 {
2497         unsigned nr = BTREE_ITER_MAX;
2498
2499         INIT_LIST_HEAD(&c->btree_trans_list);
2500         mutex_init(&c->btree_trans_lock);
2501
2502         return  init_srcu_struct(&c->btree_trans_barrier) ?:
2503                 mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
2504                         sizeof(struct btree_iter) * nr +
2505                         sizeof(struct btree_insert_entry) * nr +
2506                         sizeof(struct btree_insert_entry) * nr) ?:
2507                 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
2508                                           BTREE_TRANS_MEM_MAX);
2509 }