]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.c
bcachefs-in-userspace improvements
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
1
2 #include "bcachefs.h"
3 #include "bkey_methods.h"
4 #include "btree_cache.h"
5 #include "btree_iter.h"
6 #include "btree_locking.h"
7 #include "debug.h"
8 #include "extents.h"
9
10 #include <linux/prefetch.h>
11 #include <trace/events/bcachefs.h>
12
13 #define BTREE_ITER_NOT_END      ((struct btree *) 1)
14
15 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
16 {
17         return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END;
18 }
19
20 /* Btree node locking: */
21
22 /*
23  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
24  * succeed:
25  */
26 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
27 {
28         struct btree_iter *linked;
29
30         EBUG_ON(iter->nodes[b->level] != b);
31         EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
32
33         for_each_linked_btree_node(iter, b, linked)
34                 linked->lock_seq[b->level] += 2;
35
36         iter->lock_seq[b->level] += 2;
37
38         six_unlock_write(&b->lock);
39 }
40
41 void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
42 {
43         struct btree_iter *linked;
44         unsigned readers = 0;
45
46         EBUG_ON(iter->nodes[b->level] != b);
47         EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
48
49         if (six_trylock_write(&b->lock))
50                 return;
51
52         for_each_linked_btree_iter(iter, linked)
53                 if (linked->nodes[b->level] == b &&
54                     btree_node_read_locked(linked, b->level))
55                         readers++;
56
57         if (likely(!readers)) {
58                 six_lock_write(&b->lock);
59         } else {
60                 /*
61                  * Must drop our read locks before calling six_lock_write() -
62                  * six_unlock() won't do wakeups until the reader count
63                  * goes to 0, and it's safe because we have the node intent
64                  * locked:
65                  */
66                 atomic64_sub(__SIX_VAL(read_lock, readers),
67                              &b->lock.state.counter);
68                 six_lock_write(&b->lock);
69                 atomic64_add(__SIX_VAL(read_lock, readers),
70                              &b->lock.state.counter);
71         }
72 }
73
74 bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
75 {
76         struct btree_iter *linked;
77         struct btree *b = iter->nodes[level];
78         enum btree_node_locked_type want = btree_lock_want(iter, level);
79         enum btree_node_locked_type have = btree_node_locked_type(iter, level);
80
81         if (want == have)
82                 return true;
83
84         if (!is_btree_node(iter, level))
85                 return false;
86
87         if (race_fault())
88                 return false;
89
90         if (have != BTREE_NODE_UNLOCKED
91             ? six_trylock_convert(&b->lock, have, want)
92             : six_relock_type(&b->lock, want, iter->lock_seq[level]))
93                 goto success;
94
95         for_each_linked_btree_iter(iter, linked)
96                 if (linked->nodes[level] == b &&
97                     btree_node_locked_type(linked, level) == want &&
98                     iter->lock_seq[level] == b->lock.state.seq) {
99                         btree_node_unlock(iter, level);
100                         six_lock_increment(&b->lock, want);
101                         goto success;
102                 }
103
104         return false;
105 success:
106         mark_btree_node_unlocked(iter, level);
107         mark_btree_node_locked(iter, level, want);
108         return true;
109 }
110
111 /* Slowpath: */
112 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
113                            unsigned level,
114                            struct btree_iter *iter,
115                            enum six_lock_type type)
116 {
117         struct btree_iter *linked;
118
119         /* Can't have children locked before ancestors: */
120         EBUG_ON(iter->nodes_locked && level > __ffs(iter->nodes_locked));
121
122         /*
123          * Can't hold any read locks while we block taking an intent lock - see
124          * below for reasoning, and we should have already dropped any read
125          * locks in the current iterator
126          */
127         EBUG_ON(type == SIX_LOCK_intent &&
128                 iter->nodes_locked != iter->nodes_intent_locked);
129
130         for_each_linked_btree_iter(iter, linked)
131                 if (linked->nodes[level] == b &&
132                     btree_node_locked_type(linked, level) == type) {
133                         six_lock_increment(&b->lock, type);
134                         return true;
135                 }
136
137         /*
138          * Must lock btree nodes in key order - this case hapens when locking
139          * the prev sibling in btree node merging:
140          */
141         if (iter->nodes_locked &&
142             __ffs(iter->nodes_locked) == level &&
143             __btree_iter_cmp(iter->btree_id, pos, iter))
144                 return false;
145
146         for_each_linked_btree_iter(iter, linked) {
147                 if (!linked->nodes_locked)
148                         continue;
149
150                 /*
151                  * Can't block taking an intent lock if we have _any_ nodes read
152                  * locked:
153                  *
154                  * - Our read lock blocks another thread with an intent lock on
155                  *   the same node from getting a write lock, and thus from
156                  *   dropping its intent lock
157                  *
158                  * - And the other thread may have multiple nodes intent locked:
159                  *   both the node we want to intent lock, and the node we
160                  *   already have read locked - deadlock:
161                  */
162                 if (type == SIX_LOCK_intent &&
163                     linked->nodes_locked != linked->nodes_intent_locked) {
164                         linked->locks_want = max(linked->locks_want,
165                                                  iter->locks_want);
166                         return false;
167                 }
168
169                 /* We have to lock btree nodes in key order: */
170                 if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
171                         return false;
172
173                 /*
174                  * Interior nodes must be locked before their descendants: if
175                  * another iterator has possible descendants locked of the node
176                  * we're about to lock, it must have the ancestors locked too:
177                  */
178                 if (linked->btree_id == iter->btree_id &&
179                     level > __fls(linked->nodes_locked)) {
180                         linked->locks_want = max(linked->locks_want,
181                                                  iter->locks_want);
182                         return false;
183                 }
184         }
185
186         six_lock_type(&b->lock, type);
187         return true;
188 }
189
190 /* Btree iterator locking: */
191
192
193 static void btree_iter_drop_extra_locks(struct btree_iter *iter)
194 {
195         unsigned l;
196
197         while (iter->nodes_locked &&
198                (l = __fls(iter->nodes_locked)) > iter->locks_want) {
199                 if (!btree_node_locked(iter, l))
200                         panic("l %u nodes_locked %u\n", l, iter->nodes_locked);
201
202                 if (l > iter->level) {
203                         btree_node_unlock(iter, l);
204                 } else if (btree_node_intent_locked(iter, l)) {
205                         six_lock_downgrade(&iter->nodes[l]->lock);
206                         iter->nodes_intent_locked ^= 1 << l;
207                 }
208         }
209 }
210
211 bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
212                                      unsigned new_locks_want)
213 {
214         struct btree_iter *linked;
215         unsigned l;
216
217         /* Drop locks we don't want anymore: */
218         if (new_locks_want < iter->locks_want)
219                 for_each_linked_btree_iter(iter, linked)
220                         if (linked->locks_want > new_locks_want) {
221                                 linked->locks_want = max_t(unsigned, 1,
222                                                            new_locks_want);
223                                 btree_iter_drop_extra_locks(linked);
224                         }
225
226         iter->locks_want = new_locks_want;
227         btree_iter_drop_extra_locks(iter);
228
229         for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++)
230                 if (!bch2_btree_node_relock(iter, l))
231                         goto fail;
232
233         return true;
234 fail:
235         /*
236          * Just an optimization: ancestor nodes must be locked before child
237          * nodes, so set locks_want on iterators that might lock ancestors
238          * before us to avoid getting -EINTR later:
239          */
240         for_each_linked_btree_iter(iter, linked)
241                 if (linked->btree_id == iter->btree_id &&
242                     btree_iter_cmp(linked, iter) <= 0)
243                         linked->locks_want = max_t(unsigned, linked->locks_want,
244                                                    new_locks_want);
245         return false;
246 }
247
248 static int __bch2_btree_iter_unlock(struct btree_iter *iter)
249 {
250         BUG_ON(iter->error == -EINTR);
251
252         while (iter->nodes_locked)
253                 btree_node_unlock(iter, __ffs(iter->nodes_locked));
254
255         return iter->error;
256 }
257
258 int bch2_btree_iter_unlock(struct btree_iter *iter)
259 {
260         struct btree_iter *linked;
261
262         for_each_linked_btree_iter(iter, linked)
263                 __bch2_btree_iter_unlock(linked);
264         return __bch2_btree_iter_unlock(iter);
265 }
266
267 /* Btree iterator: */
268
269 #ifdef CONFIG_BCACHEFS_DEBUG
270
271 static void __bch2_btree_iter_verify(struct btree_iter *iter,
272                                     struct btree *b)
273 {
274         struct btree_node_iter *node_iter = &iter->node_iters[b->level];
275         struct btree_node_iter tmp = *node_iter;
276         struct bkey_packed *k;
277
278         bch2_btree_node_iter_verify(node_iter, b);
279
280         /*
281          * For interior nodes, the iterator will have skipped past
282          * deleted keys:
283          */
284         k = b->level
285                 ? bch2_btree_node_iter_prev(&tmp, b)
286                 : bch2_btree_node_iter_prev_all(&tmp, b);
287         if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k,
288                                            iter->is_extents)) {
289                 char buf[100];
290                 struct bkey uk = bkey_unpack_key(b, k);
291
292                 bch2_bkey_to_text(buf, sizeof(buf), &uk);
293                 panic("prev key should be before after pos:\n%s\n%llu:%llu\n",
294                       buf, iter->pos.inode, iter->pos.offset);
295         }
296
297         k = bch2_btree_node_iter_peek_all(node_iter, b);
298         if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k,
299                                             iter->is_extents)) {
300                 char buf[100];
301                 struct bkey uk = bkey_unpack_key(b, k);
302
303                 bch2_bkey_to_text(buf, sizeof(buf), &uk);
304                 panic("next key should be before iter pos:\n%llu:%llu\n%s\n",
305                       iter->pos.inode, iter->pos.offset, buf);
306         }
307 }
308
309 void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
310 {
311         struct btree_iter *linked;
312
313         if (iter->nodes[b->level] == b)
314                 __bch2_btree_iter_verify(iter, b);
315
316         for_each_linked_btree_node(iter, b, linked)
317                 __bch2_btree_iter_verify(iter, b);
318 }
319
320 #endif
321
322 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
323                                       struct btree *b,
324                                       struct btree_node_iter *node_iter,
325                                       struct bset_tree *t,
326                                       struct bkey_packed *where,
327                                       unsigned clobber_u64s,
328                                       unsigned new_u64s)
329 {
330         const struct bkey_packed *end = btree_bkey_last(b, t);
331         struct btree_node_iter_set *set;
332         unsigned offset = __btree_node_key_to_offset(b, where);
333         int shift = new_u64s - clobber_u64s;
334         unsigned old_end = (int) __btree_node_key_to_offset(b, end) - shift;
335
336         btree_node_iter_for_each(node_iter, set)
337                 if (set->end == old_end)
338                         goto found;
339
340         /* didn't find the bset in the iterator - might have to readd it: */
341         if (new_u64s &&
342             btree_iter_pos_cmp_packed(b, &iter->pos, where,
343                                       iter->is_extents))
344                 bch2_btree_node_iter_push(node_iter, b, where, end);
345         return;
346 found:
347         set->end = (int) set->end + shift;
348
349         /* Iterator hasn't gotten to the key that changed yet: */
350         if (set->k < offset)
351                 return;
352
353         if (new_u64s &&
354             btree_iter_pos_cmp_packed(b, &iter->pos, where,
355                                       iter->is_extents)) {
356                 set->k = offset;
357                 bch2_btree_node_iter_sort(node_iter, b);
358         } else if (set->k < offset + clobber_u64s) {
359                 set->k = offset + new_u64s;
360                 if (set->k == set->end)
361                         *set = node_iter->data[--node_iter->used];
362                 bch2_btree_node_iter_sort(node_iter, b);
363         } else {
364                 set->k = (int) set->k + shift;
365         }
366
367         /*
368          * Interior nodes are special because iterators for interior nodes don't
369          * obey the usual invariants regarding the iterator position:
370          *
371          * We may have whiteouts that compare greater than the iterator
372          * position, and logically should be in the iterator, but that we
373          * skipped past to find the first live key greater than the iterator
374          * position. This becomes an issue when we insert a new key that is
375          * greater than the current iterator position, but smaller than the
376          * whiteouts we've already skipped past - this happens in the course of
377          * a btree split.
378          *
379          * We have to rewind the iterator past to before those whiteouts here,
380          * else bkey_node_iter_prev() is not going to work and who knows what
381          * else would happen. And we have to do it manually, because here we've
382          * already done the insert and the iterator is currently inconsistent:
383          *
384          * We've got multiple competing invariants, here - we have to be careful
385          * about rewinding iterators for interior nodes, because they should
386          * always point to the key for the child node the btree iterator points
387          * to.
388          */
389         if (b->level && new_u64s && !bkey_deleted(where) &&
390             btree_iter_pos_cmp_packed(b, &iter->pos, where,
391                                       iter->is_extents)) {
392                 struct bset_tree *t;
393                 struct bkey_packed *k;
394
395                 for_each_bset(b, t) {
396                         if (bch2_bkey_to_bset(b, where) == t)
397                                 continue;
398
399                         k = bch2_bkey_prev_all(b, t,
400                                 bch2_btree_node_iter_bset_pos(node_iter, b, t));
401                         if (k &&
402                             __btree_node_iter_cmp(node_iter, b,
403                                                   k, where) > 0) {
404                                 struct btree_node_iter_set *set;
405                                 unsigned offset =
406                                         __btree_node_key_to_offset(b, bkey_next(k));
407
408                                 btree_node_iter_for_each(node_iter, set)
409                                         if (set->k == offset) {
410                                                 set->k = __btree_node_key_to_offset(b, k);
411                                                 bch2_btree_node_iter_sort(node_iter, b);
412                                                 goto next_bset;
413                                         }
414
415                                 bch2_btree_node_iter_push(node_iter, b, k,
416                                                 btree_bkey_last(b, t));
417                         }
418 next_bset:
419                         t = t;
420                 }
421         }
422 }
423
424 void bch2_btree_node_iter_fix(struct btree_iter *iter,
425                              struct btree *b,
426                              struct btree_node_iter *node_iter,
427                              struct bset_tree *t,
428                              struct bkey_packed *where,
429                              unsigned clobber_u64s,
430                              unsigned new_u64s)
431 {
432         struct btree_iter *linked;
433
434         if (node_iter != &iter->node_iters[b->level])
435                 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
436                                           where, clobber_u64s, new_u64s);
437
438         if (iter->nodes[b->level] == b)
439                 __bch2_btree_node_iter_fix(iter, b,
440                                           &iter->node_iters[b->level], t,
441                                           where, clobber_u64s, new_u64s);
442
443         for_each_linked_btree_node(iter, b, linked)
444                 __bch2_btree_node_iter_fix(linked, b,
445                                           &linked->node_iters[b->level], t,
446                                           where, clobber_u64s, new_u64s);
447
448         /* interior node iterators are... special... */
449         if (!b->level)
450                 bch2_btree_iter_verify(iter, b);
451 }
452
453 /* peek_all() doesn't skip deleted keys */
454 static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
455 {
456         struct btree *b = iter->nodes[iter->level];
457         struct bkey_packed *k =
458                 bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
459         struct bkey_s_c ret;
460
461         EBUG_ON(!btree_node_locked(iter, iter->level));
462
463         if (!k)
464                 return bkey_s_c_null;
465
466         ret = bkey_disassemble(b, k, &iter->k);
467
468         if (debug_check_bkeys(iter->c))
469                 bch2_bkey_debugcheck(iter->c, b, ret);
470
471         return ret;
472 }
473
474 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
475 {
476         struct btree *b = iter->nodes[iter->level];
477         struct bkey_packed *k =
478                 bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b);
479         struct bkey_s_c ret;
480
481         EBUG_ON(!btree_node_locked(iter, iter->level));
482
483         if (!k)
484                 return bkey_s_c_null;
485
486         ret = bkey_disassemble(b, k, &iter->k);
487
488         if (debug_check_bkeys(iter->c))
489                 bch2_bkey_debugcheck(iter->c, b, ret);
490
491         return ret;
492 }
493
494 static inline void __btree_iter_advance(struct btree_iter *iter)
495 {
496         bch2_btree_node_iter_advance(&iter->node_iters[iter->level],
497                                     iter->nodes[iter->level]);
498 }
499
500 /*
501  * Verify that iterator for parent node points to child node:
502  */
503 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
504 {
505         bool parent_locked;
506         struct bkey_packed *k;
507
508         if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
509             !iter->nodes[b->level + 1])
510                 return;
511
512         parent_locked = btree_node_locked(iter, b->level + 1);
513
514         if (!bch2_btree_node_relock(iter, b->level + 1))
515                 return;
516
517         k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
518                                          iter->nodes[b->level + 1]);
519         if (!k ||
520             bkey_deleted(k) ||
521             bkey_cmp_left_packed(iter->nodes[b->level + 1],
522                                  k, &b->key.k.p)) {
523                 char buf[100];
524                 struct bkey uk = bkey_unpack_key(b, k);
525
526                 bch2_bkey_to_text(buf, sizeof(buf), &uk);
527                 panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
528                       buf, b->key.k.p.inode, b->key.k.p.offset);
529         }
530
531         if (!parent_locked)
532                 btree_node_unlock(iter, b->level + 1);
533 }
534
535 static inline void __btree_iter_init(struct btree_iter *iter,
536                                      struct btree *b)
537 {
538         bch2_btree_node_iter_init(&iter->node_iters[b->level], b,
539                                  iter->pos, iter->is_extents,
540                                  btree_node_is_extents(b));
541
542         /* Skip to first non whiteout: */
543         if (b->level)
544                 bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
545 }
546
547 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
548                                           struct btree *b)
549 {
550         return iter->btree_id == b->btree_id &&
551                 bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
552                 btree_iter_pos_cmp(iter->pos, &b->key.k, iter->is_extents);
553 }
554
555 static inline void btree_iter_node_set(struct btree_iter *iter,
556                                        struct btree *b)
557 {
558         btree_iter_verify_new_node(iter, b);
559
560         EBUG_ON(!btree_iter_pos_in_node(iter, b));
561         EBUG_ON(b->lock.state.seq & 1);
562
563         iter->lock_seq[b->level] = b->lock.state.seq;
564         iter->nodes[b->level] = b;
565         __btree_iter_init(iter, b);
566 }
567
568 /*
569  * A btree node is being replaced - update the iterator to point to the new
570  * node:
571  */
572 bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
573 {
574         struct btree_iter *linked;
575
576         for_each_linked_btree_iter(iter, linked)
577                 if (btree_iter_pos_in_node(linked, b)) {
578                         /*
579                          * bch2_btree_iter_node_drop() has already been called -
580                          * the old node we're replacing has already been
581                          * unlocked and the pointer invalidated
582                          */
583                         BUG_ON(btree_node_locked(linked, b->level));
584
585                         /*
586                          * If @linked wants this node read locked, we don't want
587                          * to actually take the read lock now because it's not
588                          * legal to hold read locks on other nodes while we take
589                          * write locks, so the journal can make forward
590                          * progress...
591                          *
592                          * Instead, btree_iter_node_set() sets things up so
593                          * bch2_btree_node_relock() will succeed:
594                          */
595
596                         if (btree_want_intent(linked, b->level)) {
597                                 six_lock_increment(&b->lock, SIX_LOCK_intent);
598                                 mark_btree_node_intent_locked(linked, b->level);
599                         }
600
601                         btree_iter_node_set(linked, b);
602                 }
603
604         if (!btree_iter_pos_in_node(iter, b)) {
605                 six_unlock_intent(&b->lock);
606                 return false;
607         }
608
609         mark_btree_node_intent_locked(iter, b->level);
610         btree_iter_node_set(iter, b);
611         return true;
612 }
613
614 void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
615 {
616         struct btree_iter *linked;
617         unsigned level = b->level;
618
619         for_each_linked_btree_iter(iter, linked)
620                 if (linked->nodes[level] == b) {
621                         btree_node_unlock(linked, level);
622                         linked->nodes[level] = BTREE_ITER_NOT_END;
623                 }
624 }
625
626 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
627 {
628         unsigned level = b->level;
629
630         if (iter->nodes[level] == b) {
631                 BUG_ON(b->lock.state.intent_lock != 1);
632                 btree_node_unlock(iter, level);
633                 iter->nodes[level] = BTREE_ITER_NOT_END;
634         }
635 }
636
637 /*
638  * A btree node has been modified in such a way as to invalidate iterators - fix
639  * them:
640  */
641 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
642 {
643         struct btree_iter *linked;
644
645         for_each_linked_btree_node(iter, b, linked)
646                 __btree_iter_init(linked, b);
647         __btree_iter_init(iter, b);
648 }
649
650 static inline int btree_iter_lock_root(struct btree_iter *iter,
651                                        unsigned depth_want)
652 {
653         struct bch_fs *c = iter->c;
654         struct btree *b;
655         enum six_lock_type lock_type;
656         unsigned i;
657
658         EBUG_ON(iter->nodes_locked);
659
660         while (1) {
661                 b = READ_ONCE(c->btree_roots[iter->btree_id].b);
662                 iter->level = READ_ONCE(b->level);
663
664                 if (unlikely(iter->level < depth_want)) {
665                         /*
666                          * the root is at a lower depth than the depth we want:
667                          * got to the end of the btree, or we're walking nodes
668                          * greater than some depth and there are no nodes >=
669                          * that depth
670                          */
671                         iter->level = depth_want;
672                         iter->nodes[iter->level] = NULL;
673                         return 0;
674                 }
675
676                 lock_type = btree_lock_want(iter, iter->level);
677                 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
678                                               iter, lock_type)))
679                         return -EINTR;
680
681                 if (likely(b == c->btree_roots[iter->btree_id].b &&
682                            b->level == iter->level &&
683                            !race_fault())) {
684                         for (i = 0; i < iter->level; i++)
685                                 iter->nodes[i] = BTREE_ITER_NOT_END;
686                         iter->nodes[iter->level] = b;
687
688                         mark_btree_node_locked(iter, iter->level, lock_type);
689                         btree_iter_node_set(iter, b);
690                         return 0;
691
692                 }
693
694                 six_unlock_type(&b->lock, lock_type);
695         }
696 }
697
698 static inline int btree_iter_down(struct btree_iter *iter)
699 {
700         struct btree *b;
701         struct bkey_s_c k = __btree_iter_peek(iter);
702         unsigned level = iter->level - 1;
703         enum six_lock_type lock_type = btree_lock_want(iter, level);
704         BKEY_PADDED(k) tmp;
705
706         bkey_reassemble(&tmp.k, k);
707
708         b = bch2_btree_node_get(iter, &tmp.k, level, lock_type);
709         if (unlikely(IS_ERR(b)))
710                 return PTR_ERR(b);
711
712         iter->level = level;
713         mark_btree_node_locked(iter, level, lock_type);
714         btree_iter_node_set(iter, b);
715         return 0;
716 }
717
718 static void btree_iter_up(struct btree_iter *iter)
719 {
720         btree_node_unlock(iter, iter->level++);
721 }
722
723 int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
724
725 static int btree_iter_traverse_error(struct btree_iter *iter, int ret)
726 {
727         struct bch_fs *c = iter->c;
728         struct btree_iter *linked, *sorted_iters, **i;
729 retry_all:
730         bch2_btree_iter_unlock(iter);
731
732         if (ret != -ENOMEM && ret != -EINTR)
733                 goto io_error;
734
735         if (ret == -ENOMEM) {
736                 struct closure cl;
737
738                 closure_init_stack(&cl);
739
740                 do {
741                         ret = bch2_btree_node_cannibalize_lock(c, &cl);
742                         closure_sync(&cl);
743                 } while (ret);
744         }
745
746         /*
747          * Linked iters are normally a circular singly linked list - break cycle
748          * while we sort them:
749          */
750         linked = iter->next;
751         iter->next = NULL;
752         sorted_iters = NULL;
753
754         while (linked) {
755                 iter = linked;
756                 linked = linked->next;
757
758                 i = &sorted_iters;
759                 while (*i && btree_iter_cmp(iter, *i) > 0)
760                         i = &(*i)->next;
761
762                 iter->next = *i;
763                 *i = iter;
764         }
765
766         /* Make list circular again: */
767         iter = sorted_iters;
768         while (iter->next)
769                 iter = iter->next;
770         iter->next = sorted_iters;
771
772         /* Now, redo traversals in correct order: */
773
774         iter = sorted_iters;
775         do {
776 retry:
777                 ret = __bch2_btree_iter_traverse(iter);
778                 if (unlikely(ret)) {
779                         if (ret == -EINTR)
780                                 goto retry;
781                         goto retry_all;
782                 }
783
784                 iter = iter->next;
785         } while (iter != sorted_iters);
786
787         ret = btree_iter_linked(iter) ? -EINTR : 0;
788 out:
789         bch2_btree_node_cannibalize_unlock(c);
790         return ret;
791 io_error:
792         BUG_ON(ret != -EIO);
793
794         iter->error = ret;
795         iter->nodes[iter->level] = NULL;
796         goto out;
797 }
798
799 /*
800  * This is the main state machine for walking down the btree - walks down to a
801  * specified depth
802  *
803  * Returns 0 on success, -EIO on error (error reading in a btree node).
804  *
805  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
806  * stashed in the iterator and returned from bch2_btree_iter_unlock().
807  */
808 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
809 {
810         unsigned depth_want = iter->level;
811
812         /* make sure we have all the intent locks we need - ugh */
813         if (unlikely(iter->nodes[iter->level] &&
814                      iter->level + 1 < iter->locks_want)) {
815                 unsigned i;
816
817                 for (i = iter->level + 1;
818                      i < iter->locks_want && iter->nodes[i];
819                      i++)
820                         if (!bch2_btree_node_relock(iter, i)) {
821                                 while (iter->nodes[iter->level] &&
822                                        iter->level + 1 < iter->locks_want)
823                                         btree_iter_up(iter);
824                                 break;
825                         }
826         }
827
828         /*
829          * If the current node isn't locked, go up until we have a locked node
830          * or run out of nodes:
831          */
832         while (iter->nodes[iter->level] &&
833                !(is_btree_node(iter, iter->level) &&
834                  bch2_btree_node_relock(iter, iter->level) &&
835                  btree_iter_pos_cmp(iter->pos,
836                                     &iter->nodes[iter->level]->key.k,
837                                     iter->is_extents)))
838                 btree_iter_up(iter);
839
840         /*
841          * If we've got a btree node locked (i.e. we aren't about to relock the
842          * root) - advance its node iterator if necessary:
843          */
844         if (iter->nodes[iter->level]) {
845                 struct bkey_s_c k;
846
847                 while ((k = __btree_iter_peek_all(iter)).k &&
848                        !btree_iter_pos_cmp(iter->pos, k.k, iter->is_extents))
849                         __btree_iter_advance(iter);
850         }
851
852         /*
853          * Note: iter->nodes[iter->level] may be temporarily NULL here - that
854          * would indicate to other code that we got to the end of the btree,
855          * here it indicates that relocking the root failed - it's critical that
856          * btree_iter_lock_root() comes next and that it can't fail
857          */
858         while (iter->level > depth_want) {
859                 int ret = iter->nodes[iter->level]
860                         ? btree_iter_down(iter)
861                         : btree_iter_lock_root(iter, depth_want);
862                 if (unlikely(ret)) {
863                         iter->level = depth_want;
864                         return ret;
865                 }
866         }
867
868         return 0;
869 }
870
871 int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
872 {
873         int ret;
874
875         if (unlikely(!iter->nodes[iter->level]))
876                 return 0;
877
878         iter->at_end_of_leaf = false;
879
880         ret = __bch2_btree_iter_traverse(iter);
881         if (unlikely(ret))
882                 ret = btree_iter_traverse_error(iter, ret);
883
884         return ret;
885 }
886
887 /* Iterate across nodes (leaf and interior nodes) */
888
889 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
890 {
891         struct btree *b;
892         int ret;
893
894         EBUG_ON(iter->is_extents);
895
896         ret = bch2_btree_iter_traverse(iter);
897         if (ret)
898                 return NULL;
899
900         b = iter->nodes[iter->level];
901
902         if (b) {
903                 EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
904                 iter->pos = b->key.k.p;
905         }
906
907         return b;
908 }
909
910 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
911 {
912         struct btree *b;
913         int ret;
914
915         EBUG_ON(iter->is_extents);
916
917         btree_iter_up(iter);
918
919         if (!iter->nodes[iter->level])
920                 return NULL;
921
922         /* parent node usually won't be locked: redo traversal if necessary */
923         ret = bch2_btree_iter_traverse(iter);
924         if (ret)
925                 return NULL;
926
927         b = iter->nodes[iter->level];
928         if (!b)
929                 return b;
930
931         if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
932                 /* Haven't gotten to the end of the parent node: */
933
934                 /* ick: */
935                 iter->pos       = iter->btree_id == BTREE_ID_INODES
936                         ? btree_type_successor(iter->btree_id, iter->pos)
937                         : bkey_successor(iter->pos);
938                 iter->level     = depth;
939
940                 ret = bch2_btree_iter_traverse(iter);
941                 if (ret)
942                         return NULL;
943
944                 b = iter->nodes[iter->level];
945         }
946
947         iter->pos = b->key.k.p;
948
949         return b;
950 }
951
952 /* Iterate across keys (in leaf nodes only) */
953
954 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
955 {
956         struct btree *b = iter->nodes[0];
957         struct btree_node_iter *node_iter = &iter->node_iters[0];
958         struct bkey_packed *k;
959
960         EBUG_ON(iter->level != 0);
961         EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
962         EBUG_ON(!btree_node_locked(iter, 0));
963         EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0);
964
965         while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
966                !btree_iter_pos_cmp_packed(b, &new_pos, k,
967                                           iter->is_extents))
968                 bch2_btree_node_iter_advance(node_iter, b);
969
970         if (!k &&
971             !btree_iter_pos_cmp(new_pos, &b->key.k, iter->is_extents))
972                 iter->at_end_of_leaf = true;
973
974         iter->pos = new_pos;
975 }
976
977 void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
978 {
979         EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */
980         iter->pos = new_pos;
981 }
982
983 void bch2_btree_iter_advance_pos(struct btree_iter *iter)
984 {
985         /*
986          * We use iter->k instead of iter->pos for extents: iter->pos will be
987          * equal to the start of the extent we returned, but we need to advance
988          * to the end of the extent we returned.
989          */
990         bch2_btree_iter_set_pos(iter,
991                 btree_type_successor(iter->btree_id, iter->k.p));
992 }
993
994 /* XXX: expensive */
995 void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
996 {
997         /* incapable of rewinding across nodes: */
998         BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
999
1000         iter->pos = pos;
1001         __btree_iter_init(iter, iter->nodes[iter->level]);
1002 }
1003
1004 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1005 {
1006         struct bkey_s_c k;
1007         int ret;
1008
1009         while (1) {
1010                 ret = bch2_btree_iter_traverse(iter);
1011                 if (unlikely(ret)) {
1012                         iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1013                         return bkey_s_c_err(ret);
1014                 }
1015
1016                 k = __btree_iter_peek(iter);
1017                 if (likely(k.k)) {
1018                         /*
1019                          * iter->pos should always be equal to the key we just
1020                          * returned - except extents can straddle iter->pos:
1021                          */
1022                         if (!iter->is_extents ||
1023                             bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1024                                 bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1025                         return k;
1026                 }
1027
1028                 iter->pos = iter->nodes[0]->key.k.p;
1029
1030                 if (!bkey_cmp(iter->pos, POS_MAX)) {
1031                         iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1032                         bch2_btree_iter_unlock(iter);
1033                         return bkey_s_c_null;
1034                 }
1035
1036                 iter->pos = btree_type_successor(iter->btree_id, iter->pos);
1037         }
1038 }
1039
1040 struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
1041 {
1042         struct bkey_s_c k;
1043         struct bkey n;
1044         int ret;
1045
1046         while (1) {
1047                 ret = bch2_btree_iter_traverse(iter);
1048                 if (unlikely(ret)) {
1049                         iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1050                         return bkey_s_c_err(ret);
1051                 }
1052
1053                 k = __btree_iter_peek_all(iter);
1054 recheck:
1055                 if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) {
1056                         /* hole */
1057                         bkey_init(&n);
1058                         n.p = iter->pos;
1059
1060                         if (iter->is_extents) {
1061                                 if (n.p.offset == KEY_OFFSET_MAX) {
1062                                         iter->pos = bkey_successor(iter->pos);
1063                                         goto recheck;
1064                                 }
1065
1066                                 if (!k.k)
1067                                         k.k = &iter->nodes[0]->key.k;
1068
1069                                 bch2_key_resize(&n,
1070                                        min_t(u64, KEY_SIZE_MAX,
1071                                              (k.k->p.inode == n.p.inode
1072                                               ? bkey_start_offset(k.k)
1073                                               : KEY_OFFSET_MAX) -
1074                                              n.p.offset));
1075
1076                                 EBUG_ON(!n.size);
1077                         }
1078
1079                         iter->k = n;
1080                         return (struct bkey_s_c) { &iter->k, NULL };
1081                 } else if (!bkey_deleted(k.k)) {
1082                         return k;
1083                 } else {
1084                         __btree_iter_advance(iter);
1085                 }
1086         }
1087 }
1088
1089 void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
1090                            enum btree_id btree_id, struct bpos pos,
1091                            unsigned locks_want, unsigned depth)
1092 {
1093         iter->level                     = depth;
1094         /* bch2_bkey_ops isn't used much, this would be a cache miss */
1095         /* iter->is_extents             = bch2_bkey_ops[btree_id]->is_extents; */
1096         iter->is_extents                = btree_id == BTREE_ID_EXTENTS;
1097         iter->nodes_locked              = 0;
1098         iter->nodes_intent_locked       = 0;
1099         iter->locks_want                = min(locks_want, BTREE_MAX_DEPTH);
1100         iter->btree_id                  = btree_id;
1101         iter->at_end_of_leaf            = 0;
1102         iter->error                     = 0;
1103         iter->c                         = c;
1104         iter->pos                       = pos;
1105         memset(iter->nodes, 0, sizeof(iter->nodes));
1106         iter->nodes[iter->level]        = BTREE_ITER_NOT_END;
1107         iter->next                      = iter;
1108
1109         prefetch(c->btree_roots[btree_id].b);
1110 }
1111
1112 void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
1113 {
1114         BUG_ON(btree_iter_linked(new));
1115
1116         new->next = iter->next;
1117         iter->next = new;
1118
1119         if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1120                 unsigned nr_iters = 1;
1121
1122                 for_each_linked_btree_iter(iter, new)
1123                         nr_iters++;
1124
1125                 BUG_ON(nr_iters > SIX_LOCK_MAX_RECURSE);
1126         }
1127 }
1128
1129 void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
1130 {
1131         bch2_btree_iter_unlock(dst);
1132         memcpy(dst, src, offsetof(struct btree_iter, next));
1133         dst->nodes_locked = dst->nodes_intent_locked = 0;
1134 }