]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.c
Update bcachefs sources to 9e7ae5219c bcachefs: Make write points more dynamic
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
1
2 #include "bcachefs.h"
3 #include "bkey_methods.h"
4 #include "btree_cache.h"
5 #include "btree_iter.h"
6 #include "btree_locking.h"
7 #include "debug.h"
8 #include "extents.h"
9
10 #include <linux/prefetch.h>
11 #include <trace/events/bcachefs.h>
12
13 #define BTREE_ITER_NOT_END      ((struct btree *) 1)
14
15 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
16 {
17         return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END;
18 }
19
20 /* Btree node locking: */
21
22 /*
23  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
24  * succeed:
25  */
26 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
27 {
28         struct btree_iter *linked;
29
30         EBUG_ON(iter->nodes[b->level] != b);
31         EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
32
33         for_each_linked_btree_node(iter, b, linked)
34                 linked->lock_seq[b->level] += 2;
35
36         iter->lock_seq[b->level] += 2;
37
38         six_unlock_write(&b->lock);
39 }
40
41 void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
42 {
43         struct btree_iter *linked;
44         unsigned readers = 0;
45
46         EBUG_ON(iter->nodes[b->level] != b);
47         EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
48
49         if (six_trylock_write(&b->lock))
50                 return;
51
52         for_each_linked_btree_iter(iter, linked)
53                 if (linked->nodes[b->level] == b &&
54                     btree_node_read_locked(linked, b->level))
55                         readers++;
56
57         if (likely(!readers)) {
58                 six_lock_write(&b->lock);
59         } else {
60                 /*
61                  * Must drop our read locks before calling six_lock_write() -
62                  * six_unlock() won't do wakeups until the reader count
63                  * goes to 0, and it's safe because we have the node intent
64                  * locked:
65                  */
66                 atomic64_sub(__SIX_VAL(read_lock, readers),
67                              &b->lock.state.counter);
68                 six_lock_write(&b->lock);
69                 atomic64_add(__SIX_VAL(read_lock, readers),
70                              &b->lock.state.counter);
71         }
72 }
73
74 bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
75 {
76         struct btree_iter *linked;
77         struct btree *b = iter->nodes[level];
78         enum btree_node_locked_type want = btree_lock_want(iter, level);
79         enum btree_node_locked_type have = btree_node_locked_type(iter, level);
80
81         if (want == have)
82                 return true;
83
84         if (!is_btree_node(iter, level))
85                 return false;
86
87         if (race_fault())
88                 return false;
89
90         if (have != BTREE_NODE_UNLOCKED
91             ? six_trylock_convert(&b->lock, have, want)
92             : six_relock_type(&b->lock, want, iter->lock_seq[level]))
93                 goto success;
94
95         for_each_linked_btree_iter(iter, linked)
96                 if (linked->nodes[level] == b &&
97                     btree_node_locked_type(linked, level) == want &&
98                     iter->lock_seq[level] == b->lock.state.seq) {
99                         btree_node_unlock(iter, level);
100                         six_lock_increment(&b->lock, want);
101                         goto success;
102                 }
103
104         return false;
105 success:
106         mark_btree_node_unlocked(iter, level);
107         mark_btree_node_locked(iter, level, want);
108         return true;
109 }
110
111 /* Slowpath: */
112 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
113                            unsigned level,
114                            struct btree_iter *iter,
115                            enum six_lock_type type)
116 {
117         struct btree_iter *linked;
118
119         /* Can't have children locked before ancestors: */
120         EBUG_ON(iter->nodes_locked && level > __ffs(iter->nodes_locked));
121
122         /*
123          * Can't hold any read locks while we block taking an intent lock - see
124          * below for reasoning, and we should have already dropped any read
125          * locks in the current iterator
126          */
127         EBUG_ON(type == SIX_LOCK_intent &&
128                 iter->nodes_locked != iter->nodes_intent_locked);
129
130         for_each_linked_btree_iter(iter, linked)
131                 if (linked->nodes[level] == b &&
132                     btree_node_locked_type(linked, level) == type) {
133                         six_lock_increment(&b->lock, type);
134                         return true;
135                 }
136
137         /*
138          * Must lock btree nodes in key order - this case hapens when locking
139          * the prev sibling in btree node merging:
140          */
141         if (iter->nodes_locked &&
142             __ffs(iter->nodes_locked) == level &&
143             __btree_iter_cmp(iter->btree_id, pos, iter))
144                 return false;
145
146         for_each_linked_btree_iter(iter, linked) {
147                 if (!linked->nodes_locked)
148                         continue;
149
150                 /*
151                  * Can't block taking an intent lock if we have _any_ nodes read
152                  * locked:
153                  *
154                  * - Our read lock blocks another thread with an intent lock on
155                  *   the same node from getting a write lock, and thus from
156                  *   dropping its intent lock
157                  *
158                  * - And the other thread may have multiple nodes intent locked:
159                  *   both the node we want to intent lock, and the node we
160                  *   already have read locked - deadlock:
161                  */
162                 if (type == SIX_LOCK_intent &&
163                     linked->nodes_locked != linked->nodes_intent_locked) {
164                         linked->locks_want = max_t(unsigned,
165                                                    linked->locks_want,
166                                                    iter->locks_want);
167                         return false;
168                 }
169
170                 /* We have to lock btree nodes in key order: */
171                 if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
172                         return false;
173
174                 /*
175                  * Interior nodes must be locked before their descendants: if
176                  * another iterator has possible descendants locked of the node
177                  * we're about to lock, it must have the ancestors locked too:
178                  */
179                 if (linked->btree_id == iter->btree_id &&
180                     level > __fls(linked->nodes_locked)) {
181                         linked->locks_want = max_t(unsigned,
182                                                    linked->locks_want,
183                                                    iter->locks_want);
184                         return false;
185                 }
186         }
187
188         six_lock_type(&b->lock, type);
189         return true;
190 }
191
192 /* Btree iterator locking: */
193
194
195 static void btree_iter_drop_extra_locks(struct btree_iter *iter)
196 {
197         unsigned l;
198
199         while (iter->nodes_locked &&
200                (l = __fls(iter->nodes_locked)) > iter->locks_want) {
201                 if (!btree_node_locked(iter, l))
202                         panic("l %u nodes_locked %u\n", l, iter->nodes_locked);
203
204                 if (l > iter->level) {
205                         btree_node_unlock(iter, l);
206                 } else if (btree_node_intent_locked(iter, l)) {
207                         six_lock_downgrade(&iter->nodes[l]->lock);
208                         iter->nodes_intent_locked ^= 1 << l;
209                 }
210         }
211 }
212
213 bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
214                                      unsigned new_locks_want)
215 {
216         struct btree_iter *linked;
217         unsigned l;
218
219         /* Drop locks we don't want anymore: */
220         if (new_locks_want < iter->locks_want)
221                 for_each_linked_btree_iter(iter, linked)
222                         if (linked->locks_want > new_locks_want) {
223                                 linked->locks_want = max_t(unsigned, 1,
224                                                            new_locks_want);
225                                 btree_iter_drop_extra_locks(linked);
226                         }
227
228         iter->locks_want = new_locks_want;
229         btree_iter_drop_extra_locks(iter);
230
231         for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++)
232                 if (!bch2_btree_node_relock(iter, l))
233                         goto fail;
234
235         return true;
236 fail:
237         /*
238          * Just an optimization: ancestor nodes must be locked before child
239          * nodes, so set locks_want on iterators that might lock ancestors
240          * before us to avoid getting -EINTR later:
241          */
242         for_each_linked_btree_iter(iter, linked)
243                 if (linked->btree_id == iter->btree_id &&
244                     btree_iter_cmp(linked, iter) <= 0)
245                         linked->locks_want = max_t(unsigned, linked->locks_want,
246                                                    new_locks_want);
247         return false;
248 }
249
250 static void __bch2_btree_iter_unlock(struct btree_iter *iter)
251 {
252         iter->flags &= ~BTREE_ITER_UPTODATE;
253
254         while (iter->nodes_locked)
255                 btree_node_unlock(iter, __ffs(iter->nodes_locked));
256 }
257
258 int bch2_btree_iter_unlock(struct btree_iter *iter)
259 {
260         struct btree_iter *linked;
261
262         for_each_linked_btree_iter(iter, linked)
263                 __bch2_btree_iter_unlock(linked);
264         __bch2_btree_iter_unlock(iter);
265
266         return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
267 }
268
269 /* Btree iterator: */
270
271 #ifdef CONFIG_BCACHEFS_DEBUG
272
273 static void __bch2_btree_iter_verify(struct btree_iter *iter,
274                                     struct btree *b)
275 {
276         struct btree_node_iter *node_iter = &iter->node_iters[b->level];
277         struct btree_node_iter tmp = *node_iter;
278         struct bkey_packed *k;
279
280         bch2_btree_node_iter_verify(node_iter, b);
281
282         /*
283          * For interior nodes, the iterator will have skipped past
284          * deleted keys:
285          */
286         k = b->level
287                 ? bch2_btree_node_iter_prev(&tmp, b)
288                 : bch2_btree_node_iter_prev_all(&tmp, b);
289         if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k,
290                                 iter->flags & BTREE_ITER_IS_EXTENTS)) {
291                 char buf[100];
292                 struct bkey uk = bkey_unpack_key(b, k);
293
294                 bch2_bkey_to_text(buf, sizeof(buf), &uk);
295                 panic("prev key should be before after pos:\n%s\n%llu:%llu\n",
296                       buf, iter->pos.inode, iter->pos.offset);
297         }
298
299         k = bch2_btree_node_iter_peek_all(node_iter, b);
300         if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k,
301                                 iter->flags & BTREE_ITER_IS_EXTENTS)) {
302                 char buf[100];
303                 struct bkey uk = bkey_unpack_key(b, k);
304
305                 bch2_bkey_to_text(buf, sizeof(buf), &uk);
306                 panic("next key should be before iter pos:\n%llu:%llu\n%s\n",
307                       iter->pos.inode, iter->pos.offset, buf);
308         }
309 }
310
311 void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
312 {
313         struct btree_iter *linked;
314
315         if (iter->nodes[b->level] == b)
316                 __bch2_btree_iter_verify(iter, b);
317
318         for_each_linked_btree_node(iter, b, linked)
319                 __bch2_btree_iter_verify(iter, b);
320 }
321
322 #endif
323
324 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
325                                       struct btree *b,
326                                       struct btree_node_iter *node_iter,
327                                       struct bset_tree *t,
328                                       struct bkey_packed *where,
329                                       unsigned clobber_u64s,
330                                       unsigned new_u64s)
331 {
332         const struct bkey_packed *end = btree_bkey_last(b, t);
333         struct btree_node_iter_set *set;
334         unsigned offset = __btree_node_key_to_offset(b, where);
335         int shift = new_u64s - clobber_u64s;
336         unsigned old_end = (int) __btree_node_key_to_offset(b, end) - shift;
337
338         btree_node_iter_for_each(node_iter, set)
339                 if (set->end == old_end)
340                         goto found;
341
342         /* didn't find the bset in the iterator - might have to readd it: */
343         if (new_u64s &&
344             btree_iter_pos_cmp_packed(b, &iter->pos, where,
345                                       iter->flags & BTREE_ITER_IS_EXTENTS))
346                 bch2_btree_node_iter_push(node_iter, b, where, end);
347         return;
348 found:
349         set->end = (int) set->end + shift;
350
351         /* Iterator hasn't gotten to the key that changed yet: */
352         if (set->k < offset)
353                 return;
354
355         if (new_u64s &&
356             btree_iter_pos_cmp_packed(b, &iter->pos, where,
357                                 iter->flags & BTREE_ITER_IS_EXTENTS)) {
358                 set->k = offset;
359                 bch2_btree_node_iter_sort(node_iter, b);
360         } else if (set->k < offset + clobber_u64s) {
361                 set->k = offset + new_u64s;
362                 if (set->k == set->end)
363                         *set = node_iter->data[--node_iter->used];
364                 bch2_btree_node_iter_sort(node_iter, b);
365         } else {
366                 set->k = (int) set->k + shift;
367         }
368
369         /*
370          * Interior nodes are special because iterators for interior nodes don't
371          * obey the usual invariants regarding the iterator position:
372          *
373          * We may have whiteouts that compare greater than the iterator
374          * position, and logically should be in the iterator, but that we
375          * skipped past to find the first live key greater than the iterator
376          * position. This becomes an issue when we insert a new key that is
377          * greater than the current iterator position, but smaller than the
378          * whiteouts we've already skipped past - this happens in the course of
379          * a btree split.
380          *
381          * We have to rewind the iterator past to before those whiteouts here,
382          * else bkey_node_iter_prev() is not going to work and who knows what
383          * else would happen. And we have to do it manually, because here we've
384          * already done the insert and the iterator is currently inconsistent:
385          *
386          * We've got multiple competing invariants, here - we have to be careful
387          * about rewinding iterators for interior nodes, because they should
388          * always point to the key for the child node the btree iterator points
389          * to.
390          */
391         if (b->level && new_u64s && !bkey_deleted(where) &&
392             btree_iter_pos_cmp_packed(b, &iter->pos, where,
393                                 iter->flags & BTREE_ITER_IS_EXTENTS)) {
394                 struct bset_tree *t;
395                 struct bkey_packed *k;
396
397                 for_each_bset(b, t) {
398                         if (bch2_bkey_to_bset(b, where) == t)
399                                 continue;
400
401                         k = bch2_bkey_prev_all(b, t,
402                                 bch2_btree_node_iter_bset_pos(node_iter, b, t));
403                         if (k &&
404                             __btree_node_iter_cmp(node_iter, b,
405                                                   k, where) > 0) {
406                                 struct btree_node_iter_set *set;
407                                 unsigned offset =
408                                         __btree_node_key_to_offset(b, bkey_next(k));
409
410                                 btree_node_iter_for_each(node_iter, set)
411                                         if (set->k == offset) {
412                                                 set->k = __btree_node_key_to_offset(b, k);
413                                                 bch2_btree_node_iter_sort(node_iter, b);
414                                                 goto next_bset;
415                                         }
416
417                                 bch2_btree_node_iter_push(node_iter, b, k,
418                                                 btree_bkey_last(b, t));
419                         }
420 next_bset:
421                         t = t;
422                 }
423         }
424 }
425
426 void bch2_btree_node_iter_fix(struct btree_iter *iter,
427                              struct btree *b,
428                              struct btree_node_iter *node_iter,
429                              struct bset_tree *t,
430                              struct bkey_packed *where,
431                              unsigned clobber_u64s,
432                              unsigned new_u64s)
433 {
434         struct btree_iter *linked;
435
436         if (node_iter != &iter->node_iters[b->level])
437                 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
438                                           where, clobber_u64s, new_u64s);
439
440         if (iter->nodes[b->level] == b)
441                 __bch2_btree_node_iter_fix(iter, b,
442                                           &iter->node_iters[b->level], t,
443                                           where, clobber_u64s, new_u64s);
444
445         for_each_linked_btree_node(iter, b, linked)
446                 __bch2_btree_node_iter_fix(linked, b,
447                                           &linked->node_iters[b->level], t,
448                                           where, clobber_u64s, new_u64s);
449
450         /* interior node iterators are... special... */
451         if (!b->level)
452                 bch2_btree_iter_verify(iter, b);
453 }
454
455 /* peek_all() doesn't skip deleted keys */
456 static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
457 {
458         struct btree *b = iter->nodes[iter->level];
459         struct bkey_packed *k =
460                 bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
461         struct bkey_s_c ret;
462
463         EBUG_ON(!btree_node_locked(iter, iter->level));
464
465         if (!k)
466                 return bkey_s_c_null;
467
468         ret = bkey_disassemble(b, k, &iter->k);
469
470         if (debug_check_bkeys(iter->c))
471                 bch2_bkey_debugcheck(iter->c, b, ret);
472
473         return ret;
474 }
475
476 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
477 {
478         struct btree *b = iter->nodes[iter->level];
479         struct bkey_packed *k =
480                 bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b);
481         struct bkey_s_c ret;
482
483         EBUG_ON(!btree_node_locked(iter, iter->level));
484
485         if (!k)
486                 return bkey_s_c_null;
487
488         ret = bkey_disassemble(b, k, &iter->k);
489
490         if (debug_check_bkeys(iter->c))
491                 bch2_bkey_debugcheck(iter->c, b, ret);
492
493         return ret;
494 }
495
496 static inline void __btree_iter_advance(struct btree_iter *iter)
497 {
498         bch2_btree_node_iter_advance(&iter->node_iters[iter->level],
499                                     iter->nodes[iter->level]);
500 }
501
502 /*
503  * Verify that iterator for parent node points to child node:
504  */
505 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
506 {
507         bool parent_locked;
508         struct bkey_packed *k;
509
510         if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
511             !iter->nodes[b->level + 1])
512                 return;
513
514         parent_locked = btree_node_locked(iter, b->level + 1);
515
516         if (!bch2_btree_node_relock(iter, b->level + 1))
517                 return;
518
519         k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
520                                          iter->nodes[b->level + 1]);
521         if (!k ||
522             bkey_deleted(k) ||
523             bkey_cmp_left_packed(iter->nodes[b->level + 1],
524                                  k, &b->key.k.p)) {
525                 char buf[100];
526                 struct bkey uk = bkey_unpack_key(b, k);
527
528                 bch2_bkey_to_text(buf, sizeof(buf), &uk);
529                 panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
530                       buf, b->key.k.p.inode, b->key.k.p.offset);
531         }
532
533         if (!parent_locked)
534                 btree_node_unlock(iter, b->level + 1);
535 }
536
537 static inline void __btree_iter_init(struct btree_iter *iter,
538                                      struct btree *b)
539 {
540         bch2_btree_node_iter_init(&iter->node_iters[b->level], b, iter->pos,
541                                   iter->flags & BTREE_ITER_IS_EXTENTS,
542                                   btree_node_is_extents(b));
543
544         /* Skip to first non whiteout: */
545         if (b->level)
546                 bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
547 }
548
549 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
550                                           struct btree *b)
551 {
552         return iter->btree_id == b->btree_id &&
553                 bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
554                 btree_iter_pos_cmp(iter->pos, &b->key.k,
555                                    iter->flags & BTREE_ITER_IS_EXTENTS);
556 }
557
558 static inline void btree_iter_node_set(struct btree_iter *iter,
559                                        struct btree *b)
560 {
561         btree_iter_verify_new_node(iter, b);
562
563         EBUG_ON(!btree_iter_pos_in_node(iter, b));
564         EBUG_ON(b->lock.state.seq & 1);
565
566         iter->lock_seq[b->level] = b->lock.state.seq;
567         iter->nodes[b->level] = b;
568         __btree_iter_init(iter, b);
569 }
570
571 /*
572  * A btree node is being replaced - update the iterator to point to the new
573  * node:
574  */
575 bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
576 {
577         struct btree_iter *linked;
578
579         for_each_linked_btree_iter(iter, linked)
580                 if (btree_iter_pos_in_node(linked, b)) {
581                         /*
582                          * bch2_btree_iter_node_drop() has already been called -
583                          * the old node we're replacing has already been
584                          * unlocked and the pointer invalidated
585                          */
586                         BUG_ON(btree_node_locked(linked, b->level));
587
588                         /*
589                          * If @linked wants this node read locked, we don't want
590                          * to actually take the read lock now because it's not
591                          * legal to hold read locks on other nodes while we take
592                          * write locks, so the journal can make forward
593                          * progress...
594                          *
595                          * Instead, btree_iter_node_set() sets things up so
596                          * bch2_btree_node_relock() will succeed:
597                          */
598
599                         if (btree_want_intent(linked, b->level)) {
600                                 six_lock_increment(&b->lock, SIX_LOCK_intent);
601                                 mark_btree_node_intent_locked(linked, b->level);
602                         }
603
604                         btree_iter_node_set(linked, b);
605                 }
606
607         if (!btree_iter_pos_in_node(iter, b)) {
608                 six_unlock_intent(&b->lock);
609                 return false;
610         }
611
612         mark_btree_node_intent_locked(iter, b->level);
613         btree_iter_node_set(iter, b);
614         return true;
615 }
616
617 void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
618 {
619         struct btree_iter *linked;
620
621         for_each_linked_btree_iter(iter, linked)
622                 bch2_btree_iter_node_drop(linked, b);
623 }
624
625 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
626 {
627         unsigned level = b->level;
628
629         if (iter->nodes[level] == b) {
630                 iter->flags &= ~BTREE_ITER_UPTODATE;
631                 btree_node_unlock(iter, level);
632                 iter->nodes[level] = BTREE_ITER_NOT_END;
633         }
634 }
635
636 /*
637  * A btree node has been modified in such a way as to invalidate iterators - fix
638  * them:
639  */
640 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
641 {
642         struct btree_iter *linked;
643
644         for_each_linked_btree_node(iter, b, linked)
645                 __btree_iter_init(linked, b);
646         __btree_iter_init(iter, b);
647 }
648
649 static inline int btree_iter_lock_root(struct btree_iter *iter,
650                                        unsigned depth_want)
651 {
652         struct bch_fs *c = iter->c;
653         struct btree *b;
654         enum six_lock_type lock_type;
655         unsigned i;
656
657         EBUG_ON(iter->nodes_locked);
658
659         while (1) {
660                 b = READ_ONCE(c->btree_roots[iter->btree_id].b);
661                 iter->level = READ_ONCE(b->level);
662
663                 if (unlikely(iter->level < depth_want)) {
664                         /*
665                          * the root is at a lower depth than the depth we want:
666                          * got to the end of the btree, or we're walking nodes
667                          * greater than some depth and there are no nodes >=
668                          * that depth
669                          */
670                         iter->level = depth_want;
671                         iter->nodes[iter->level] = NULL;
672                         return 0;
673                 }
674
675                 lock_type = btree_lock_want(iter, iter->level);
676                 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
677                                               iter, lock_type)))
678                         return -EINTR;
679
680                 if (likely(b == c->btree_roots[iter->btree_id].b &&
681                            b->level == iter->level &&
682                            !race_fault())) {
683                         for (i = 0; i < iter->level; i++)
684                                 iter->nodes[i] = BTREE_ITER_NOT_END;
685                         iter->nodes[iter->level] = b;
686
687                         mark_btree_node_locked(iter, iter->level, lock_type);
688                         btree_iter_node_set(iter, b);
689                         return 0;
690
691                 }
692
693                 six_unlock_type(&b->lock, lock_type);
694         }
695 }
696
697 noinline
698 static void btree_iter_prefetch(struct btree_iter *iter)
699 {
700         struct btree *b = iter->nodes[iter->level + 1];
701         struct btree_node_iter node_iter = iter->node_iters[iter->level + 1];
702         struct bkey_packed *k;
703         BKEY_PADDED(k) tmp;
704         unsigned nr = iter->level ? 1 : 8;
705         bool was_locked = btree_node_locked(iter, iter->level + 1);
706
707         while (nr) {
708                 if (!bch2_btree_node_relock(iter, iter->level + 1))
709                         return;
710
711                 bch2_btree_node_iter_advance(&node_iter, b);
712                 k = bch2_btree_node_iter_peek(&node_iter, b);
713                 if (!k)
714                         break;
715
716                 bch2_bkey_unpack(b, &tmp.k, k);
717                 bch2_btree_node_prefetch(iter->c, &tmp.k,
718                                          iter->level, iter->btree_id);
719         }
720
721         if (!was_locked)
722                 btree_node_unlock(iter, iter->level + 1);
723 }
724
725 static inline int btree_iter_down(struct btree_iter *iter)
726 {
727         struct btree *b;
728         struct bkey_s_c k = __btree_iter_peek(iter);
729         unsigned level = iter->level - 1;
730         enum six_lock_type lock_type = btree_lock_want(iter, level);
731         BKEY_PADDED(k) tmp;
732
733         bkey_reassemble(&tmp.k, k);
734
735         b = bch2_btree_node_get(iter->c, iter, &tmp.k, level, lock_type);
736         if (unlikely(IS_ERR(b)))
737                 return PTR_ERR(b);
738
739         iter->level = level;
740         mark_btree_node_locked(iter, level, lock_type);
741         btree_iter_node_set(iter, b);
742
743         if (iter->flags & BTREE_ITER_PREFETCH)
744                 btree_iter_prefetch(iter);
745
746         return 0;
747 }
748
749 static void btree_iter_up(struct btree_iter *iter)
750 {
751         btree_node_unlock(iter, iter->level++);
752 }
753
754 int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
755
756 static int btree_iter_traverse_error(struct btree_iter *iter, int ret)
757 {
758         struct bch_fs *c = iter->c;
759         struct btree_iter *linked, *sorted_iters, **i;
760 retry_all:
761         bch2_btree_iter_unlock(iter);
762
763         if (ret != -ENOMEM && ret != -EINTR)
764                 goto io_error;
765
766         if (ret == -ENOMEM) {
767                 struct closure cl;
768
769                 closure_init_stack(&cl);
770
771                 do {
772                         ret = bch2_btree_cache_cannibalize_lock(c, &cl);
773                         closure_sync(&cl);
774                 } while (ret);
775         }
776
777         /*
778          * Linked iters are normally a circular singly linked list - break cycle
779          * while we sort them:
780          */
781         linked = iter->next;
782         iter->next = NULL;
783         sorted_iters = NULL;
784
785         while (linked) {
786                 iter = linked;
787                 linked = linked->next;
788
789                 i = &sorted_iters;
790                 while (*i && btree_iter_cmp(iter, *i) > 0)
791                         i = &(*i)->next;
792
793                 iter->next = *i;
794                 *i = iter;
795         }
796
797         /* Make list circular again: */
798         iter = sorted_iters;
799         while (iter->next)
800                 iter = iter->next;
801         iter->next = sorted_iters;
802
803         /* Now, redo traversals in correct order: */
804
805         iter = sorted_iters;
806         do {
807 retry:
808                 ret = __bch2_btree_iter_traverse(iter);
809                 if (unlikely(ret)) {
810                         if (ret == -EINTR)
811                                 goto retry;
812                         goto retry_all;
813                 }
814
815                 iter = iter->next;
816         } while (iter != sorted_iters);
817
818         ret = btree_iter_linked(iter) ? -EINTR : 0;
819 out:
820         bch2_btree_cache_cannibalize_unlock(c);
821         return ret;
822 io_error:
823         BUG_ON(ret != -EIO);
824
825         iter->flags |= BTREE_ITER_ERROR;
826         iter->nodes[iter->level] = NULL;
827         goto out;
828 }
829
830 /*
831  * This is the main state machine for walking down the btree - walks down to a
832  * specified depth
833  *
834  * Returns 0 on success, -EIO on error (error reading in a btree node).
835  *
836  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
837  * stashed in the iterator and returned from bch2_btree_iter_unlock().
838  */
839 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
840 {
841         unsigned depth_want = iter->level;
842
843         if (unlikely(!iter->nodes[iter->level]))
844                 return 0;
845
846         iter->flags &= ~(BTREE_ITER_UPTODATE|BTREE_ITER_AT_END_OF_LEAF);
847
848         /* make sure we have all the intent locks we need - ugh */
849         if (unlikely(iter->nodes[iter->level] &&
850                      iter->level + 1 < iter->locks_want)) {
851                 unsigned i;
852
853                 for (i = iter->level + 1;
854                      i < iter->locks_want && iter->nodes[i];
855                      i++)
856                         if (!bch2_btree_node_relock(iter, i)) {
857                                 while (iter->nodes[iter->level] &&
858                                        iter->level + 1 < iter->locks_want)
859                                         btree_iter_up(iter);
860                                 break;
861                         }
862         }
863
864         /*
865          * If the current node isn't locked, go up until we have a locked node
866          * or run out of nodes:
867          */
868         while (iter->nodes[iter->level] &&
869                !(is_btree_node(iter, iter->level) &&
870                  bch2_btree_node_relock(iter, iter->level) &&
871                  btree_iter_pos_cmp(iter->pos,
872                                     &iter->nodes[iter->level]->key.k,
873                                     iter->flags & BTREE_ITER_IS_EXTENTS)))
874                 btree_iter_up(iter);
875
876         /*
877          * If we've got a btree node locked (i.e. we aren't about to relock the
878          * root) - advance its node iterator if necessary:
879          */
880         if (iter->nodes[iter->level]) {
881                 struct bkey_s_c k;
882
883                 while ((k = __btree_iter_peek_all(iter)).k &&
884                        !btree_iter_pos_cmp(iter->pos, k.k,
885                                            iter->flags & BTREE_ITER_IS_EXTENTS))
886                         __btree_iter_advance(iter);
887         }
888
889         /*
890          * Note: iter->nodes[iter->level] may be temporarily NULL here - that
891          * would indicate to other code that we got to the end of the btree,
892          * here it indicates that relocking the root failed - it's critical that
893          * btree_iter_lock_root() comes next and that it can't fail
894          */
895         while (iter->level > depth_want) {
896                 int ret = iter->nodes[iter->level]
897                         ? btree_iter_down(iter)
898                         : btree_iter_lock_root(iter, depth_want);
899                 if (unlikely(ret)) {
900                         iter->level = depth_want;
901                         iter->nodes[iter->level] = BTREE_ITER_NOT_END;
902                         return ret;
903                 }
904         }
905
906         return 0;
907 }
908
909 int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
910 {
911         int ret;
912
913         ret = __bch2_btree_iter_traverse(iter);
914         if (unlikely(ret))
915                 ret = btree_iter_traverse_error(iter, ret);
916
917         return ret;
918 }
919
920 /* Iterate across nodes (leaf and interior nodes) */
921
922 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
923 {
924         struct btree *b;
925         int ret;
926
927         EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
928
929         ret = bch2_btree_iter_traverse(iter);
930         if (ret)
931                 return ERR_PTR(ret);
932
933         b = iter->nodes[iter->level];
934
935         if (b) {
936                 EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
937                 iter->pos = b->key.k.p;
938         }
939
940         return b;
941 }
942
943 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
944 {
945         struct btree *b;
946         int ret;
947
948         EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
949
950         btree_iter_up(iter);
951
952         if (!iter->nodes[iter->level])
953                 return NULL;
954
955         /* parent node usually won't be locked: redo traversal if necessary */
956         ret = bch2_btree_iter_traverse(iter);
957         if (ret)
958                 return NULL;
959
960         b = iter->nodes[iter->level];
961         if (!b)
962                 return b;
963
964         if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
965                 /* Haven't gotten to the end of the parent node: */
966
967                 /* ick: */
968                 iter->pos       = iter->btree_id == BTREE_ID_INODES
969                         ? btree_type_successor(iter->btree_id, iter->pos)
970                         : bkey_successor(iter->pos);
971                 iter->level     = depth;
972
973                 ret = bch2_btree_iter_traverse(iter);
974                 if (ret)
975                         return NULL;
976
977                 b = iter->nodes[iter->level];
978         }
979
980         iter->pos = b->key.k.p;
981
982         return b;
983 }
984
985 /* Iterate across keys (in leaf nodes only) */
986
987 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
988 {
989         struct btree *b = iter->nodes[0];
990         struct btree_node_iter *node_iter = &iter->node_iters[0];
991         struct bkey_packed *k;
992
993         EBUG_ON(iter->level != 0);
994         EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
995         EBUG_ON(!btree_node_locked(iter, 0));
996         EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0);
997
998         while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
999                !btree_iter_pos_cmp_packed(b, &new_pos, k,
1000                                           iter->flags & BTREE_ITER_IS_EXTENTS))
1001                 bch2_btree_node_iter_advance(node_iter, b);
1002
1003         if (!k &&
1004             !btree_iter_pos_cmp(new_pos, &b->key.k,
1005                                 iter->flags & BTREE_ITER_IS_EXTENTS))
1006                 iter->flags |= BTREE_ITER_AT_END_OF_LEAF;
1007
1008         iter->pos = new_pos;
1009         iter->flags &= ~BTREE_ITER_UPTODATE;
1010 }
1011
1012 void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
1013 {
1014         EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */
1015         iter->pos = new_pos;
1016         iter->flags &= ~BTREE_ITER_UPTODATE;
1017 }
1018
1019 void bch2_btree_iter_advance_pos(struct btree_iter *iter)
1020 {
1021         if (iter->flags & BTREE_ITER_UPTODATE &&
1022             !(iter->flags & BTREE_ITER_WITH_HOLES)) {
1023                 struct bkey_s_c k;
1024
1025                 __btree_iter_advance(iter);
1026                 k = __btree_iter_peek(iter);
1027                 if (likely(k.k)) {
1028                         iter->pos = bkey_start_pos(k.k);
1029                         return;
1030                 }
1031         }
1032
1033         /*
1034          * We use iter->k instead of iter->pos for extents: iter->pos will be
1035          * equal to the start of the extent we returned, but we need to advance
1036          * to the end of the extent we returned.
1037          */
1038         bch2_btree_iter_set_pos(iter,
1039                 btree_type_successor(iter->btree_id, iter->k.p));
1040 }
1041
1042 /* XXX: expensive */
1043 void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
1044 {
1045         /* incapable of rewinding across nodes: */
1046         BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
1047
1048         iter->pos = pos;
1049         iter->flags &= ~BTREE_ITER_UPTODATE;
1050         __btree_iter_init(iter, iter->nodes[iter->level]);
1051 }
1052
1053 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1054 {
1055         struct bkey_s_c k;
1056         int ret;
1057
1058         EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
1059                 (iter->btree_id == BTREE_ID_EXTENTS));
1060
1061         if (iter->flags & BTREE_ITER_UPTODATE) {
1062                 struct btree *b = iter->nodes[0];
1063                 struct bkey_packed *k =
1064                         __bch2_btree_node_iter_peek_all(&iter->node_iters[0], b);
1065                 struct bkey_s_c ret = {
1066                         .k = &iter->k,
1067                         .v = bkeyp_val(&b->format, k)
1068                 };
1069
1070                 EBUG_ON(!btree_node_locked(iter, 0));
1071
1072                 if (debug_check_bkeys(iter->c))
1073                         bch2_bkey_debugcheck(iter->c, b, ret);
1074                 return ret;
1075         }
1076
1077         while (1) {
1078                 ret = bch2_btree_iter_traverse(iter);
1079                 if (unlikely(ret)) {
1080                         iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1081                         return bkey_s_c_err(ret);
1082                 }
1083
1084                 k = __btree_iter_peek(iter);
1085                 if (likely(k.k)) {
1086                         /*
1087                          * iter->pos should always be equal to the key we just
1088                          * returned - except extents can straddle iter->pos:
1089                          */
1090                         if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
1091                             bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1092                                 iter->pos = bkey_start_pos(k.k);
1093
1094                         iter->flags |= BTREE_ITER_UPTODATE;
1095                         return k;
1096                 }
1097
1098                 iter->pos = iter->nodes[0]->key.k.p;
1099
1100                 if (!bkey_cmp(iter->pos, POS_MAX)) {
1101                         iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1102                         bch2_btree_iter_unlock(iter);
1103                         return bkey_s_c_null;
1104                 }
1105
1106                 iter->pos = btree_type_successor(iter->btree_id, iter->pos);
1107         }
1108 }
1109
1110 struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
1111 {
1112         struct bkey_s_c k;
1113         struct bkey n;
1114         int ret;
1115
1116         EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
1117                 (iter->btree_id == BTREE_ID_EXTENTS));
1118
1119         iter->flags &= ~BTREE_ITER_UPTODATE;
1120
1121         while (1) {
1122                 ret = bch2_btree_iter_traverse(iter);
1123                 if (unlikely(ret)) {
1124                         iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1125                         return bkey_s_c_err(ret);
1126                 }
1127
1128                 k = __btree_iter_peek_all(iter);
1129 recheck:
1130                 if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) {
1131                         /* hole */
1132                         bkey_init(&n);
1133                         n.p = iter->pos;
1134
1135                         if (iter->flags & BTREE_ITER_IS_EXTENTS) {
1136                                 if (n.p.offset == KEY_OFFSET_MAX) {
1137                                         iter->pos = bkey_successor(iter->pos);
1138                                         goto recheck;
1139                                 }
1140
1141                                 if (!k.k)
1142                                         k.k = &iter->nodes[0]->key.k;
1143
1144                                 bch2_key_resize(&n,
1145                                        min_t(u64, KEY_SIZE_MAX,
1146                                              (k.k->p.inode == n.p.inode
1147                                               ? bkey_start_offset(k.k)
1148                                               : KEY_OFFSET_MAX) -
1149                                              n.p.offset));
1150
1151                                 EBUG_ON(!n.size);
1152                         }
1153
1154                         iter->k = n;
1155                         return (struct bkey_s_c) { &iter->k, NULL };
1156                 } else if (!bkey_deleted(k.k)) {
1157                         return k;
1158                 } else {
1159                         __btree_iter_advance(iter);
1160                 }
1161         }
1162 }
1163
1164 void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
1165                             enum btree_id btree_id, struct bpos pos,
1166                             unsigned locks_want, unsigned depth,
1167                             unsigned flags)
1168 {
1169         EBUG_ON(depth >= BTREE_MAX_DEPTH);
1170         EBUG_ON(locks_want > BTREE_MAX_DEPTH);
1171
1172         iter->c                         = c;
1173         iter->pos                       = pos;
1174         iter->flags                     = flags;
1175         iter->btree_id                  = btree_id;
1176         iter->level                     = depth;
1177         iter->locks_want                = locks_want;
1178         iter->nodes_locked              = 0;
1179         iter->nodes_intent_locked       = 0;
1180         memset(iter->nodes, 0, sizeof(iter->nodes));
1181         iter->nodes[iter->level]        = BTREE_ITER_NOT_END;
1182         iter->next                      = iter;
1183
1184         prefetch(c->btree_roots[btree_id].b);
1185 }
1186
1187 void bch2_btree_iter_unlink(struct btree_iter *iter)
1188 {
1189         struct btree_iter *linked;
1190
1191         __bch2_btree_iter_unlock(iter);
1192
1193         if (!btree_iter_linked(iter))
1194                 return;
1195
1196         for_each_linked_btree_iter(iter, linked) {
1197
1198                 if (linked->next == iter) {
1199                         linked->next = iter->next;
1200                         return;
1201                 }
1202         }
1203
1204         BUG();
1205 }
1206
1207 void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
1208 {
1209         BUG_ON(btree_iter_linked(new));
1210
1211         new->next = iter->next;
1212         iter->next = new;
1213
1214         if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1215                 unsigned nr_iters = 1;
1216
1217                 for_each_linked_btree_iter(iter, new)
1218                         nr_iters++;
1219
1220                 BUG_ON(nr_iters > SIX_LOCK_MAX_RECURSE);
1221         }
1222 }
1223
1224 void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
1225 {
1226         __bch2_btree_iter_unlock(dst);
1227         memcpy(dst, src, offsetof(struct btree_iter, next));
1228         dst->nodes_locked = dst->nodes_intent_locked = 0;
1229 }