]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.c
fb5c507e9bcebf90ae666a0aaec0bec7d3313bc0
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
1
2 #include "bcachefs.h"
3 #include "bkey_methods.h"
4 #include "btree_cache.h"
5 #include "btree_iter.h"
6 #include "btree_locking.h"
7 #include "debug.h"
8 #include "extents.h"
9
10 #include <trace/events/bcachefs.h>
11
12 #define BTREE_ITER_NOT_END      ((struct btree *) 1)
13
14 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
15 {
16         return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END;
17 }
18
19 /* Btree node locking: */
20
21 /*
22  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
23  * succeed:
24  */
25 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
26 {
27         struct btree_iter *linked;
28
29         EBUG_ON(iter->nodes[b->level] != b);
30         EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
31
32         for_each_linked_btree_node(iter, b, linked)
33                 linked->lock_seq[b->level] += 2;
34
35         iter->lock_seq[b->level] += 2;
36
37         six_unlock_write(&b->lock);
38 }
39
40 void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
41 {
42         struct btree_iter *linked;
43         unsigned readers = 0;
44
45         EBUG_ON(iter->nodes[b->level] != b);
46         EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
47
48         if (six_trylock_write(&b->lock))
49                 return;
50
51         for_each_linked_btree_iter(iter, linked)
52                 if (linked->nodes[b->level] == b &&
53                     btree_node_read_locked(linked, b->level))
54                         readers++;
55
56         if (likely(!readers)) {
57                 six_lock_write(&b->lock);
58         } else {
59                 /*
60                  * Must drop our read locks before calling six_lock_write() -
61                  * six_unlock() won't do wakeups until the reader count
62                  * goes to 0, and it's safe because we have the node intent
63                  * locked:
64                  */
65                 atomic64_sub(__SIX_VAL(read_lock, readers),
66                              &b->lock.state.counter);
67                 six_lock_write(&b->lock);
68                 atomic64_add(__SIX_VAL(read_lock, readers),
69                              &b->lock.state.counter);
70         }
71 }
72
73 bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
74 {
75         struct btree_iter *linked;
76         struct btree *b = iter->nodes[level];
77         enum btree_node_locked_type want = btree_lock_want(iter, level);
78         enum btree_node_locked_type have = btree_node_locked_type(iter, level);
79
80         if (want == have)
81                 return true;
82
83         if (!is_btree_node(iter, level))
84                 return false;
85
86         if (race_fault())
87                 return false;
88
89         if (have != BTREE_NODE_UNLOCKED
90             ? six_trylock_convert(&b->lock, have, want)
91             : six_relock_type(&b->lock, want, iter->lock_seq[level]))
92                 goto success;
93
94         for_each_linked_btree_iter(iter, linked)
95                 if (linked->nodes[level] == b &&
96                     btree_node_locked_type(linked, level) == want &&
97                     iter->lock_seq[level] == b->lock.state.seq) {
98                         btree_node_unlock(iter, level);
99                         six_lock_increment(&b->lock, want);
100                         goto success;
101                 }
102
103         return false;
104 success:
105         mark_btree_node_unlocked(iter, level);
106         mark_btree_node_locked(iter, level, want);
107         return true;
108 }
109
110 /* Slowpath: */
111 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
112                            unsigned level,
113                            struct btree_iter *iter,
114                            enum six_lock_type type)
115 {
116         struct btree_iter *linked;
117
118         /* Can't have children locked before ancestors: */
119         EBUG_ON(iter->nodes_locked && level > __ffs(iter->nodes_locked));
120
121         /*
122          * Can't hold any read locks while we block taking an intent lock - see
123          * below for reasoning, and we should have already dropped any read
124          * locks in the current iterator
125          */
126         EBUG_ON(type == SIX_LOCK_intent &&
127                 iter->nodes_locked != iter->nodes_intent_locked);
128
129         for_each_linked_btree_iter(iter, linked)
130                 if (linked->nodes[level] == b &&
131                     btree_node_locked_type(linked, level) == type) {
132                         six_lock_increment(&b->lock, type);
133                         return true;
134                 }
135
136         /*
137          * Must lock btree nodes in key order - this case hapens when locking
138          * the prev sibling in btree node merging:
139          */
140         if (iter->nodes_locked &&
141             __ffs(iter->nodes_locked) == level &&
142             __btree_iter_cmp(iter->btree_id, pos, iter))
143                 return false;
144
145         for_each_linked_btree_iter(iter, linked) {
146                 if (!linked->nodes_locked)
147                         continue;
148
149                 /*
150                  * Can't block taking an intent lock if we have _any_ nodes read
151                  * locked:
152                  *
153                  * - Our read lock blocks another thread with an intent lock on
154                  *   the same node from getting a write lock, and thus from
155                  *   dropping its intent lock
156                  *
157                  * - And the other thread may have multiple nodes intent locked:
158                  *   both the node we want to intent lock, and the node we
159                  *   already have read locked - deadlock:
160                  */
161                 if (type == SIX_LOCK_intent &&
162                     linked->nodes_locked != linked->nodes_intent_locked) {
163                         linked->locks_want = max(linked->locks_want,
164                                                  iter->locks_want);
165                         return false;
166                 }
167
168                 /* We have to lock btree nodes in key order: */
169                 if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
170                         return false;
171
172                 /*
173                  * Interior nodes must be locked before their descendants: if
174                  * another iterator has possible descendants locked of the node
175                  * we're about to lock, it must have the ancestors locked too:
176                  */
177                 if (linked->btree_id == iter->btree_id &&
178                     level > __fls(linked->nodes_locked)) {
179                         linked->locks_want = max(linked->locks_want,
180                                                  iter->locks_want);
181                         return false;
182                 }
183         }
184
185         six_lock_type(&b->lock, type);
186         return true;
187 }
188
189 /* Btree iterator locking: */
190
191
192 static void btree_iter_drop_extra_locks(struct btree_iter *iter)
193 {
194         unsigned l;
195
196         while (iter->nodes_locked &&
197                (l = __fls(iter->nodes_locked)) > iter->locks_want) {
198                 if (!btree_node_locked(iter, l))
199                         panic("l %u nodes_locked %u\n", l, iter->nodes_locked);
200
201                 if (l > iter->level) {
202                         btree_node_unlock(iter, l);
203                 } else if (btree_node_intent_locked(iter, l)) {
204                         six_lock_downgrade(&iter->nodes[l]->lock);
205                         iter->nodes_intent_locked ^= 1 << l;
206                 }
207         }
208 }
209
210 bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
211                                      unsigned new_locks_want)
212 {
213         struct btree_iter *linked;
214         unsigned l;
215
216         /* Drop locks we don't want anymore: */
217         if (new_locks_want < iter->locks_want)
218                 for_each_linked_btree_iter(iter, linked)
219                         if (linked->locks_want > new_locks_want) {
220                                 linked->locks_want = max_t(unsigned, 1,
221                                                            new_locks_want);
222                                 btree_iter_drop_extra_locks(linked);
223                         }
224
225         iter->locks_want = new_locks_want;
226         btree_iter_drop_extra_locks(iter);
227
228         for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++)
229                 if (!bch2_btree_node_relock(iter, l))
230                         goto fail;
231
232         return true;
233 fail:
234         /*
235          * Just an optimization: ancestor nodes must be locked before child
236          * nodes, so set locks_want on iterators that might lock ancestors
237          * before us to avoid getting -EINTR later:
238          */
239         for_each_linked_btree_iter(iter, linked)
240                 if (linked->btree_id == iter->btree_id &&
241                     btree_iter_cmp(linked, iter) <= 0)
242                         linked->locks_want = max_t(unsigned, linked->locks_want,
243                                                    new_locks_want);
244         return false;
245 }
246
247 static int __bch2_btree_iter_unlock(struct btree_iter *iter)
248 {
249         BUG_ON(iter->error == -EINTR);
250
251         while (iter->nodes_locked)
252                 btree_node_unlock(iter, __ffs(iter->nodes_locked));
253
254         return iter->error;
255 }
256
257 int bch2_btree_iter_unlock(struct btree_iter *iter)
258 {
259         struct btree_iter *linked;
260
261         for_each_linked_btree_iter(iter, linked)
262                 __bch2_btree_iter_unlock(linked);
263         return __bch2_btree_iter_unlock(iter);
264 }
265
266 /* Btree iterator: */
267
268 #ifdef CONFIG_BCACHEFS_DEBUG
269
270 static void __bch2_btree_iter_verify(struct btree_iter *iter,
271                                     struct btree *b)
272 {
273         struct btree_node_iter *node_iter = &iter->node_iters[b->level];
274         struct btree_node_iter tmp = *node_iter;
275         struct bkey_packed *k;
276
277         bch2_btree_node_iter_verify(node_iter, b);
278
279         /*
280          * For interior nodes, the iterator will have skipped past
281          * deleted keys:
282          */
283         k = b->level
284                 ? bch2_btree_node_iter_prev(&tmp, b)
285                 : bch2_btree_node_iter_prev_all(&tmp, b);
286         if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k,
287                                            iter->is_extents)) {
288                 char buf[100];
289                 struct bkey uk = bkey_unpack_key(b, k);
290
291                 bch2_bkey_to_text(buf, sizeof(buf), &uk);
292                 panic("prev key should be before after pos:\n%s\n%llu:%llu\n",
293                       buf, iter->pos.inode, iter->pos.offset);
294         }
295
296         k = bch2_btree_node_iter_peek_all(node_iter, b);
297         if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k,
298                                             iter->is_extents)) {
299                 char buf[100];
300                 struct bkey uk = bkey_unpack_key(b, k);
301
302                 bch2_bkey_to_text(buf, sizeof(buf), &uk);
303                 panic("next key should be before iter pos:\n%llu:%llu\n%s\n",
304                       iter->pos.inode, iter->pos.offset, buf);
305         }
306 }
307
308 void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
309 {
310         struct btree_iter *linked;
311
312         if (iter->nodes[b->level] == b)
313                 __bch2_btree_iter_verify(iter, b);
314
315         for_each_linked_btree_node(iter, b, linked)
316                 __bch2_btree_iter_verify(iter, b);
317 }
318
319 #endif
320
321 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
322                                       struct btree *b,
323                                       struct btree_node_iter *node_iter,
324                                       struct bset_tree *t,
325                                       struct bkey_packed *where,
326                                       unsigned clobber_u64s,
327                                       unsigned new_u64s)
328 {
329         const struct bkey_packed *end = btree_bkey_last(b, t);
330         struct btree_node_iter_set *set;
331         unsigned offset = __btree_node_key_to_offset(b, where);
332         int shift = new_u64s - clobber_u64s;
333         unsigned old_end = (int) __btree_node_key_to_offset(b, end) - shift;
334
335         btree_node_iter_for_each(node_iter, set)
336                 if (set->end == old_end)
337                         goto found;
338
339         /* didn't find the bset in the iterator - might have to readd it: */
340         if (new_u64s &&
341             btree_iter_pos_cmp_packed(b, &iter->pos, where,
342                                       iter->is_extents))
343                 bch2_btree_node_iter_push(node_iter, b, where, end);
344         return;
345 found:
346         set->end = (int) set->end + shift;
347
348         /* Iterator hasn't gotten to the key that changed yet: */
349         if (set->k < offset)
350                 return;
351
352         if (new_u64s &&
353             btree_iter_pos_cmp_packed(b, &iter->pos, where,
354                                       iter->is_extents)) {
355                 set->k = offset;
356                 bch2_btree_node_iter_sort(node_iter, b);
357         } else if (set->k < offset + clobber_u64s) {
358                 set->k = offset + new_u64s;
359                 if (set->k == set->end)
360                         *set = node_iter->data[--node_iter->used];
361                 bch2_btree_node_iter_sort(node_iter, b);
362         } else {
363                 set->k = (int) set->k + shift;
364         }
365
366         /*
367          * Interior nodes are special because iterators for interior nodes don't
368          * obey the usual invariants regarding the iterator position:
369          *
370          * We may have whiteouts that compare greater than the iterator
371          * position, and logically should be in the iterator, but that we
372          * skipped past to find the first live key greater than the iterator
373          * position. This becomes an issue when we insert a new key that is
374          * greater than the current iterator position, but smaller than the
375          * whiteouts we've already skipped past - this happens in the course of
376          * a btree split.
377          *
378          * We have to rewind the iterator past to before those whiteouts here,
379          * else bkey_node_iter_prev() is not going to work and who knows what
380          * else would happen. And we have to do it manually, because here we've
381          * already done the insert and the iterator is currently inconsistent:
382          *
383          * We've got multiple competing invariants, here - we have to be careful
384          * about rewinding iterators for interior nodes, because they should
385          * always point to the key for the child node the btree iterator points
386          * to.
387          */
388         if (b->level && new_u64s && !bkey_deleted(where) &&
389             btree_iter_pos_cmp_packed(b, &iter->pos, where,
390                                       iter->is_extents)) {
391                 struct bset_tree *t;
392                 struct bkey_packed *k;
393
394                 for_each_bset(b, t) {
395                         if (bch2_bkey_to_bset(b, where) == t)
396                                 continue;
397
398                         k = bch2_bkey_prev_all(b, t,
399                                 bch2_btree_node_iter_bset_pos(node_iter, b, t));
400                         if (k &&
401                             __btree_node_iter_cmp(node_iter, b,
402                                                   k, where) > 0) {
403                                 struct btree_node_iter_set *set;
404                                 unsigned offset =
405                                         __btree_node_key_to_offset(b, bkey_next(k));
406
407                                 btree_node_iter_for_each(node_iter, set)
408                                         if (set->k == offset) {
409                                                 set->k = __btree_node_key_to_offset(b, k);
410                                                 bch2_btree_node_iter_sort(node_iter, b);
411                                                 goto next_bset;
412                                         }
413
414                                 bch2_btree_node_iter_push(node_iter, b, k,
415                                                 btree_bkey_last(b, t));
416                         }
417 next_bset:
418                         t = t;
419                 }
420         }
421 }
422
423 void bch2_btree_node_iter_fix(struct btree_iter *iter,
424                              struct btree *b,
425                              struct btree_node_iter *node_iter,
426                              struct bset_tree *t,
427                              struct bkey_packed *where,
428                              unsigned clobber_u64s,
429                              unsigned new_u64s)
430 {
431         struct btree_iter *linked;
432
433         if (node_iter != &iter->node_iters[b->level])
434                 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
435                                           where, clobber_u64s, new_u64s);
436
437         if (iter->nodes[b->level] == b)
438                 __bch2_btree_node_iter_fix(iter, b,
439                                           &iter->node_iters[b->level], t,
440                                           where, clobber_u64s, new_u64s);
441
442         for_each_linked_btree_node(iter, b, linked)
443                 __bch2_btree_node_iter_fix(linked, b,
444                                           &linked->node_iters[b->level], t,
445                                           where, clobber_u64s, new_u64s);
446
447         /* interior node iterators are... special... */
448         if (!b->level)
449                 bch2_btree_iter_verify(iter, b);
450 }
451
452 /* peek_all() doesn't skip deleted keys */
453 static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
454 {
455         struct btree *b = iter->nodes[iter->level];
456         struct bkey_packed *k =
457                 bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
458         struct bkey_s_c ret;
459
460         EBUG_ON(!btree_node_locked(iter, iter->level));
461
462         if (!k)
463                 return bkey_s_c_null;
464
465         ret = bkey_disassemble(b, k, &iter->k);
466
467         if (debug_check_bkeys(iter->c))
468                 bch2_bkey_debugcheck(iter->c, b, ret);
469
470         return ret;
471 }
472
473 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
474 {
475         struct btree *b = iter->nodes[iter->level];
476         struct bkey_packed *k =
477                 bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b);
478         struct bkey_s_c ret;
479
480         EBUG_ON(!btree_node_locked(iter, iter->level));
481
482         if (!k)
483                 return bkey_s_c_null;
484
485         ret = bkey_disassemble(b, k, &iter->k);
486
487         if (debug_check_bkeys(iter->c))
488                 bch2_bkey_debugcheck(iter->c, b, ret);
489
490         return ret;
491 }
492
493 static inline void __btree_iter_advance(struct btree_iter *iter)
494 {
495         bch2_btree_node_iter_advance(&iter->node_iters[iter->level],
496                                     iter->nodes[iter->level]);
497 }
498
499 /*
500  * Verify that iterator for parent node points to child node:
501  */
502 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
503 {
504         bool parent_locked;
505         struct bkey_packed *k;
506
507         if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
508             !iter->nodes[b->level + 1])
509                 return;
510
511         parent_locked = btree_node_locked(iter, b->level + 1);
512
513         if (!bch2_btree_node_relock(iter, b->level + 1))
514                 return;
515
516         k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
517                                          iter->nodes[b->level + 1]);
518         if (!k ||
519             bkey_deleted(k) ||
520             bkey_cmp_left_packed(iter->nodes[b->level + 1],
521                                  k, &b->key.k.p)) {
522                 char buf[100];
523                 struct bkey uk = bkey_unpack_key(b, k);
524
525                 bch2_bkey_to_text(buf, sizeof(buf), &uk);
526                 panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
527                       buf, b->key.k.p.inode, b->key.k.p.offset);
528         }
529
530         if (!parent_locked)
531                 btree_node_unlock(iter, b->level + 1);
532 }
533
534 static inline void __btree_iter_init(struct btree_iter *iter,
535                                      struct btree *b)
536 {
537         bch2_btree_node_iter_init(&iter->node_iters[b->level], b,
538                                  iter->pos, iter->is_extents,
539                                  btree_node_is_extents(b));
540
541         /* Skip to first non whiteout: */
542         if (b->level)
543                 bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
544 }
545
546 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
547                                           struct btree *b)
548 {
549         return iter->btree_id == b->btree_id &&
550                 bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
551                 btree_iter_pos_cmp(iter->pos, &b->key.k, iter->is_extents);
552 }
553
554 static inline void btree_iter_node_set(struct btree_iter *iter,
555                                        struct btree *b)
556 {
557         btree_iter_verify_new_node(iter, b);
558
559         EBUG_ON(!btree_iter_pos_in_node(iter, b));
560         EBUG_ON(b->lock.state.seq & 1);
561
562         iter->lock_seq[b->level] = b->lock.state.seq;
563         iter->nodes[b->level] = b;
564         __btree_iter_init(iter, b);
565 }
566
567 /*
568  * A btree node is being replaced - update the iterator to point to the new
569  * node:
570  */
571 bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
572 {
573         struct btree_iter *linked;
574
575         for_each_linked_btree_iter(iter, linked)
576                 if (btree_iter_pos_in_node(linked, b)) {
577                         /*
578                          * bch2_btree_iter_node_drop() has already been called -
579                          * the old node we're replacing has already been
580                          * unlocked and the pointer invalidated
581                          */
582                         BUG_ON(btree_node_locked(linked, b->level));
583
584                         /*
585                          * If @linked wants this node read locked, we don't want
586                          * to actually take the read lock now because it's not
587                          * legal to hold read locks on other nodes while we take
588                          * write locks, so the journal can make forward
589                          * progress...
590                          *
591                          * Instead, btree_iter_node_set() sets things up so
592                          * bch2_btree_node_relock() will succeed:
593                          */
594
595                         if (btree_want_intent(linked, b->level)) {
596                                 six_lock_increment(&b->lock, SIX_LOCK_intent);
597                                 mark_btree_node_intent_locked(linked, b->level);
598                         }
599
600                         btree_iter_node_set(linked, b);
601                 }
602
603         if (!btree_iter_pos_in_node(iter, b)) {
604                 six_unlock_intent(&b->lock);
605                 return false;
606         }
607
608         mark_btree_node_intent_locked(iter, b->level);
609         btree_iter_node_set(iter, b);
610         return true;
611 }
612
613 void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
614 {
615         struct btree_iter *linked;
616         unsigned level = b->level;
617
618         for_each_linked_btree_iter(iter, linked)
619                 if (linked->nodes[level] == b) {
620                         btree_node_unlock(linked, level);
621                         linked->nodes[level] = BTREE_ITER_NOT_END;
622                 }
623 }
624
625 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
626 {
627         unsigned level = b->level;
628
629         if (iter->nodes[level] == b) {
630                 BUG_ON(b->lock.state.intent_lock != 1);
631                 btree_node_unlock(iter, level);
632                 iter->nodes[level] = BTREE_ITER_NOT_END;
633         }
634 }
635
636 /*
637  * A btree node has been modified in such a way as to invalidate iterators - fix
638  * them:
639  */
640 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
641 {
642         struct btree_iter *linked;
643
644         for_each_linked_btree_node(iter, b, linked)
645                 __btree_iter_init(linked, b);
646         __btree_iter_init(iter, b);
647 }
648
649 static inline int btree_iter_lock_root(struct btree_iter *iter,
650                                        unsigned depth_want)
651 {
652         struct bch_fs *c = iter->c;
653         struct btree *b;
654         enum six_lock_type lock_type;
655         unsigned i;
656
657         EBUG_ON(iter->nodes_locked);
658
659         while (1) {
660                 b = READ_ONCE(c->btree_roots[iter->btree_id].b);
661                 iter->level = READ_ONCE(b->level);
662
663                 if (unlikely(iter->level < depth_want)) {
664                         /*
665                          * the root is at a lower depth than the depth we want:
666                          * got to the end of the btree, or we're walking nodes
667                          * greater than some depth and there are no nodes >=
668                          * that depth
669                          */
670                         iter->level = depth_want;
671                         iter->nodes[iter->level] = NULL;
672                         return 0;
673                 }
674
675                 lock_type = btree_lock_want(iter, iter->level);
676                 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
677                                               iter, lock_type)))
678                         return -EINTR;
679
680                 if (likely(b == c->btree_roots[iter->btree_id].b &&
681                            b->level == iter->level &&
682                            !race_fault())) {
683                         for (i = 0; i < iter->level; i++)
684                                 iter->nodes[i] = BTREE_ITER_NOT_END;
685                         iter->nodes[iter->level] = b;
686
687                         mark_btree_node_locked(iter, iter->level, lock_type);
688                         btree_iter_node_set(iter, b);
689                         return 0;
690
691                 }
692
693                 six_unlock_type(&b->lock, lock_type);
694         }
695 }
696
697 static inline int btree_iter_down(struct btree_iter *iter)
698 {
699         struct btree *b;
700         struct bkey_s_c k = __btree_iter_peek(iter);
701         unsigned level = iter->level - 1;
702         enum six_lock_type lock_type = btree_lock_want(iter, level);
703         BKEY_PADDED(k) tmp;
704
705         bkey_reassemble(&tmp.k, k);
706
707         b = bch2_btree_node_get(iter, &tmp.k, level, lock_type);
708         if (unlikely(IS_ERR(b)))
709                 return PTR_ERR(b);
710
711         iter->level = level;
712         mark_btree_node_locked(iter, level, lock_type);
713         btree_iter_node_set(iter, b);
714         return 0;
715 }
716
717 static void btree_iter_up(struct btree_iter *iter)
718 {
719         btree_node_unlock(iter, iter->level++);
720 }
721
722 int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
723
724 static int btree_iter_traverse_error(struct btree_iter *iter, int ret)
725 {
726         struct bch_fs *c = iter->c;
727         struct btree_iter *linked, *sorted_iters, **i;
728 retry_all:
729         bch2_btree_iter_unlock(iter);
730
731         if (ret != -ENOMEM && ret != -EINTR)
732                 goto io_error;
733
734         if (ret == -ENOMEM) {
735                 struct closure cl;
736
737                 closure_init_stack(&cl);
738
739                 do {
740                         ret = bch2_btree_node_cannibalize_lock(c, &cl);
741                         closure_sync(&cl);
742                 } while (ret);
743         }
744
745         /*
746          * Linked iters are normally a circular singly linked list - break cycle
747          * while we sort them:
748          */
749         linked = iter->next;
750         iter->next = NULL;
751         sorted_iters = NULL;
752
753         while (linked) {
754                 iter = linked;
755                 linked = linked->next;
756
757                 i = &sorted_iters;
758                 while (*i && btree_iter_cmp(iter, *i) > 0)
759                         i = &(*i)->next;
760
761                 iter->next = *i;
762                 *i = iter;
763         }
764
765         /* Make list circular again: */
766         iter = sorted_iters;
767         while (iter->next)
768                 iter = iter->next;
769         iter->next = sorted_iters;
770
771         /* Now, redo traversals in correct order: */
772
773         iter = sorted_iters;
774         do {
775 retry:
776                 ret = __bch2_btree_iter_traverse(iter);
777                 if (unlikely(ret)) {
778                         if (ret == -EINTR)
779                                 goto retry;
780                         goto retry_all;
781                 }
782
783                 iter = iter->next;
784         } while (iter != sorted_iters);
785
786         ret = btree_iter_linked(iter) ? -EINTR : 0;
787 out:
788         bch2_btree_node_cannibalize_unlock(c);
789         return ret;
790 io_error:
791         BUG_ON(ret != -EIO);
792
793         iter->error = ret;
794         iter->nodes[iter->level] = NULL;
795         goto out;
796 }
797
798 /*
799  * This is the main state machine for walking down the btree - walks down to a
800  * specified depth
801  *
802  * Returns 0 on success, -EIO on error (error reading in a btree node).
803  *
804  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
805  * stashed in the iterator and returned from bch2_btree_iter_unlock().
806  */
807 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
808 {
809         unsigned depth_want = iter->level;
810
811         /* make sure we have all the intent locks we need - ugh */
812         if (unlikely(iter->nodes[iter->level] &&
813                      iter->level + 1 < iter->locks_want)) {
814                 unsigned i;
815
816                 for (i = iter->level + 1;
817                      i < iter->locks_want && iter->nodes[i];
818                      i++)
819                         if (!bch2_btree_node_relock(iter, i)) {
820                                 while (iter->nodes[iter->level] &&
821                                        iter->level + 1 < iter->locks_want)
822                                         btree_iter_up(iter);
823                                 break;
824                         }
825         }
826
827         /*
828          * If the current node isn't locked, go up until we have a locked node
829          * or run out of nodes:
830          */
831         while (iter->nodes[iter->level] &&
832                !(is_btree_node(iter, iter->level) &&
833                  bch2_btree_node_relock(iter, iter->level) &&
834                  btree_iter_pos_cmp(iter->pos,
835                                     &iter->nodes[iter->level]->key.k,
836                                     iter->is_extents)))
837                 btree_iter_up(iter);
838
839         /*
840          * If we've got a btree node locked (i.e. we aren't about to relock the
841          * root) - advance its node iterator if necessary:
842          */
843         if (iter->nodes[iter->level]) {
844                 struct bkey_s_c k;
845
846                 while ((k = __btree_iter_peek_all(iter)).k &&
847                        !btree_iter_pos_cmp(iter->pos, k.k, iter->is_extents))
848                         __btree_iter_advance(iter);
849         }
850
851         /*
852          * Note: iter->nodes[iter->level] may be temporarily NULL here - that
853          * would indicate to other code that we got to the end of the btree,
854          * here it indicates that relocking the root failed - it's critical that
855          * btree_iter_lock_root() comes next and that it can't fail
856          */
857         while (iter->level > depth_want) {
858                 int ret = iter->nodes[iter->level]
859                         ? btree_iter_down(iter)
860                         : btree_iter_lock_root(iter, depth_want);
861                 if (unlikely(ret)) {
862                         iter->level = depth_want;
863                         return ret;
864                 }
865         }
866
867         return 0;
868 }
869
870 int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
871 {
872         int ret;
873
874         if (unlikely(!iter->nodes[iter->level]))
875                 return 0;
876
877         iter->at_end_of_leaf = false;
878
879         ret = __bch2_btree_iter_traverse(iter);
880         if (unlikely(ret))
881                 ret = btree_iter_traverse_error(iter, ret);
882
883         return ret;
884 }
885
886 /* Iterate across nodes (leaf and interior nodes) */
887
888 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
889 {
890         struct btree *b;
891         int ret;
892
893         EBUG_ON(iter->is_extents);
894
895         ret = bch2_btree_iter_traverse(iter);
896         if (ret)
897                 return NULL;
898
899         b = iter->nodes[iter->level];
900
901         if (b) {
902                 EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
903                 iter->pos = b->key.k.p;
904         }
905
906         return b;
907 }
908
909 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
910 {
911         struct btree *b;
912         int ret;
913
914         EBUG_ON(iter->is_extents);
915
916         btree_iter_up(iter);
917
918         if (!iter->nodes[iter->level])
919                 return NULL;
920
921         /* parent node usually won't be locked: redo traversal if necessary */
922         ret = bch2_btree_iter_traverse(iter);
923         if (ret)
924                 return NULL;
925
926         b = iter->nodes[iter->level];
927         if (!b)
928                 return b;
929
930         if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
931                 /* Haven't gotten to the end of the parent node: */
932
933                 /* ick: */
934                 iter->pos       = iter->btree_id == BTREE_ID_INODES
935                         ? btree_type_successor(iter->btree_id, iter->pos)
936                         : bkey_successor(iter->pos);
937                 iter->level     = depth;
938
939                 ret = bch2_btree_iter_traverse(iter);
940                 if (ret)
941                         return NULL;
942
943                 b = iter->nodes[iter->level];
944         }
945
946         iter->pos = b->key.k.p;
947
948         return b;
949 }
950
951 /* Iterate across keys (in leaf nodes only) */
952
953 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
954 {
955         struct btree *b = iter->nodes[0];
956         struct btree_node_iter *node_iter = &iter->node_iters[0];
957         struct bkey_packed *k;
958
959         EBUG_ON(iter->level != 0);
960         EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
961         EBUG_ON(!btree_node_locked(iter, 0));
962         EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0);
963
964         while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
965                !btree_iter_pos_cmp_packed(b, &new_pos, k,
966                                           iter->is_extents))
967                 bch2_btree_node_iter_advance(node_iter, b);
968
969         if (!k &&
970             !btree_iter_pos_cmp(new_pos, &b->key.k, iter->is_extents))
971                 iter->at_end_of_leaf = true;
972
973         iter->pos = new_pos;
974 }
975
976 void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
977 {
978         EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */
979         iter->pos = new_pos;
980 }
981
982 void bch2_btree_iter_advance_pos(struct btree_iter *iter)
983 {
984         /*
985          * We use iter->k instead of iter->pos for extents: iter->pos will be
986          * equal to the start of the extent we returned, but we need to advance
987          * to the end of the extent we returned.
988          */
989         bch2_btree_iter_set_pos(iter,
990                 btree_type_successor(iter->btree_id, iter->k.p));
991 }
992
993 /* XXX: expensive */
994 void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
995 {
996         /* incapable of rewinding across nodes: */
997         BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
998
999         iter->pos = pos;
1000         __btree_iter_init(iter, iter->nodes[iter->level]);
1001 }
1002
1003 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1004 {
1005         struct bkey_s_c k;
1006         int ret;
1007
1008         while (1) {
1009                 ret = bch2_btree_iter_traverse(iter);
1010                 if (unlikely(ret)) {
1011                         iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1012                         return bkey_s_c_err(ret);
1013                 }
1014
1015                 k = __btree_iter_peek(iter);
1016                 if (likely(k.k)) {
1017                         /*
1018                          * iter->pos should always be equal to the key we just
1019                          * returned - except extents can straddle iter->pos:
1020                          */
1021                         if (!iter->is_extents ||
1022                             bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1023                                 bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1024                         return k;
1025                 }
1026
1027                 iter->pos = iter->nodes[0]->key.k.p;
1028
1029                 if (!bkey_cmp(iter->pos, POS_MAX)) {
1030                         iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1031                         bch2_btree_iter_unlock(iter);
1032                         return bkey_s_c_null;
1033                 }
1034
1035                 iter->pos = btree_type_successor(iter->btree_id, iter->pos);
1036         }
1037 }
1038
1039 struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
1040 {
1041         struct bkey_s_c k;
1042         struct bkey n;
1043         int ret;
1044
1045         while (1) {
1046                 ret = bch2_btree_iter_traverse(iter);
1047                 if (unlikely(ret)) {
1048                         iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1049                         return bkey_s_c_err(ret);
1050                 }
1051
1052                 k = __btree_iter_peek_all(iter);
1053 recheck:
1054                 if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) {
1055                         /* hole */
1056                         bkey_init(&n);
1057                         n.p = iter->pos;
1058
1059                         if (iter->is_extents) {
1060                                 if (n.p.offset == KEY_OFFSET_MAX) {
1061                                         iter->pos = bkey_successor(iter->pos);
1062                                         goto recheck;
1063                                 }
1064
1065                                 if (!k.k)
1066                                         k.k = &iter->nodes[0]->key.k;
1067
1068                                 bch2_key_resize(&n,
1069                                        min_t(u64, KEY_SIZE_MAX,
1070                                              (k.k->p.inode == n.p.inode
1071                                               ? bkey_start_offset(k.k)
1072                                               : KEY_OFFSET_MAX) -
1073                                              n.p.offset));
1074
1075                                 EBUG_ON(!n.size);
1076                         }
1077
1078                         iter->k = n;
1079                         return (struct bkey_s_c) { &iter->k, NULL };
1080                 } else if (!bkey_deleted(k.k)) {
1081                         return k;
1082                 } else {
1083                         __btree_iter_advance(iter);
1084                 }
1085         }
1086 }
1087
1088 void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
1089                            enum btree_id btree_id, struct bpos pos,
1090                            unsigned locks_want, unsigned depth)
1091 {
1092         iter->level                     = depth;
1093         /* bch2_bkey_ops isn't used much, this would be a cache miss */
1094         /* iter->is_extents             = bch2_bkey_ops[btree_id]->is_extents; */
1095         iter->is_extents                = btree_id == BTREE_ID_EXTENTS;
1096         iter->nodes_locked              = 0;
1097         iter->nodes_intent_locked       = 0;
1098         iter->locks_want                = min(locks_want, BTREE_MAX_DEPTH);
1099         iter->btree_id                  = btree_id;
1100         iter->at_end_of_leaf            = 0;
1101         iter->error                     = 0;
1102         iter->c                         = c;
1103         iter->pos                       = pos;
1104         memset(iter->nodes, 0, sizeof(iter->nodes));
1105         iter->nodes[iter->level]        = BTREE_ITER_NOT_END;
1106         iter->next                      = iter;
1107
1108         prefetch(c->btree_roots[btree_id].b);
1109 }
1110
1111 void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
1112 {
1113         BUG_ON(btree_iter_linked(new));
1114
1115         new->next = iter->next;
1116         iter->next = new;
1117
1118         if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1119                 unsigned nr_iters = 1;
1120
1121                 for_each_linked_btree_iter(iter, new)
1122                         nr_iters++;
1123
1124                 BUG_ON(nr_iters > SIX_LOCK_MAX_RECURSE);
1125         }
1126 }
1127
1128 void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
1129 {
1130         bch2_btree_iter_unlock(dst);
1131         memcpy(dst, src, offsetof(struct btree_iter, next));
1132         dst->nodes_locked = dst->nodes_intent_locked = 0;
1133 }