]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_cache.c
bcachefs-in-userspace improvements
[bcachefs-tools-debian] / libbcachefs / btree_cache.c
1
2 #include "bcachefs.h"
3 #include "btree_cache.h"
4 #include "btree_io.h"
5 #include "btree_iter.h"
6 #include "btree_locking.h"
7 #include "debug.h"
8 #include "extents.h"
9
10 #include <linux/prefetch.h>
11 #include <trace/events/bcachefs.h>
12
13 #define DEF_BTREE_ID(kwd, val, name) name,
14
15 const char * const bch2_btree_ids[] = {
16         DEFINE_BCH_BTREE_IDS()
17         NULL
18 };
19
20 #undef DEF_BTREE_ID
21
22 void bch2_recalc_btree_reserve(struct bch_fs *c)
23 {
24         unsigned i, reserve = 16;
25
26         if (!c->btree_roots[0].b)
27                 reserve += 8;
28
29         for (i = 0; i < BTREE_ID_NR; i++)
30                 if (c->btree_roots[i].b)
31                         reserve += min_t(unsigned, 1,
32                                          c->btree_roots[i].b->level) * 8;
33
34         c->btree_cache_reserve = reserve;
35 }
36
37 #define mca_can_free(c)                                         \
38         max_t(int, 0, c->btree_cache_used - c->btree_cache_reserve)
39
40 static void __mca_data_free(struct bch_fs *c, struct btree *b)
41 {
42         EBUG_ON(btree_node_write_in_flight(b));
43
44         free_pages((unsigned long) b->data, btree_page_order(c));
45         b->data = NULL;
46         bch2_btree_keys_free(b);
47 }
48
49 static void mca_data_free(struct bch_fs *c, struct btree *b)
50 {
51         __mca_data_free(c, b);
52         c->btree_cache_used--;
53         list_move(&b->list, &c->btree_cache_freed);
54 }
55
56 #define PTR_HASH(_k)    (bkey_i_to_extent_c(_k)->v._data[0])
57
58 static const struct rhashtable_params bch_btree_cache_params = {
59         .head_offset    = offsetof(struct btree, hash),
60         .key_offset     = offsetof(struct btree, key.v),
61         .key_len        = sizeof(struct bch_extent_ptr),
62 };
63
64 static void mca_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
65 {
66         unsigned order = ilog2(btree_pages(c));
67
68         b->data = (void *) __get_free_pages(gfp, order);
69         if (!b->data)
70                 goto err;
71
72         if (bch2_btree_keys_alloc(b, order, gfp))
73                 goto err;
74
75         c->btree_cache_used++;
76         list_move(&b->list, &c->btree_cache_freeable);
77         return;
78 err:
79         free_pages((unsigned long) b->data, order);
80         b->data = NULL;
81         list_move(&b->list, &c->btree_cache_freed);
82 }
83
84 static struct btree *mca_bucket_alloc(struct bch_fs *c, gfp_t gfp)
85 {
86         struct btree *b = kzalloc(sizeof(struct btree), gfp);
87         if (!b)
88                 return NULL;
89
90         six_lock_init(&b->lock);
91         INIT_LIST_HEAD(&b->list);
92         INIT_LIST_HEAD(&b->write_blocked);
93
94         mca_data_alloc(c, b, gfp);
95         return b->data ? b : NULL;
96 }
97
98 /* Btree in memory cache - hash table */
99
100 void bch2_btree_node_hash_remove(struct bch_fs *c, struct btree *b)
101 {
102         BUG_ON(btree_node_dirty(b));
103
104         b->nsets = 0;
105
106         rhashtable_remove_fast(&c->btree_cache_table, &b->hash,
107                                bch_btree_cache_params);
108
109         /* Cause future lookups for this node to fail: */
110         bkey_i_to_extent(&b->key)->v._data[0] = 0;
111 }
112
113 int bch2_btree_node_hash_insert(struct bch_fs *c, struct btree *b,
114                     unsigned level, enum btree_id id)
115 {
116         int ret;
117         b->level        = level;
118         b->btree_id     = id;
119
120         ret = rhashtable_lookup_insert_fast(&c->btree_cache_table, &b->hash,
121                                             bch_btree_cache_params);
122         if (ret)
123                 return ret;
124
125         mutex_lock(&c->btree_cache_lock);
126         list_add(&b->list, &c->btree_cache);
127         mutex_unlock(&c->btree_cache_lock);
128
129         return 0;
130 }
131
132 __flatten
133 static inline struct btree *mca_find(struct bch_fs *c,
134                                      const struct bkey_i *k)
135 {
136         return rhashtable_lookup_fast(&c->btree_cache_table, &PTR_HASH(k),
137                                       bch_btree_cache_params);
138 }
139
140 /*
141  * this version is for btree nodes that have already been freed (we're not
142  * reaping a real btree node)
143  */
144 static int mca_reap_notrace(struct bch_fs *c, struct btree *b, bool flush)
145 {
146         lockdep_assert_held(&c->btree_cache_lock);
147
148         if (!six_trylock_intent(&b->lock))
149                 return -ENOMEM;
150
151         if (!six_trylock_write(&b->lock))
152                 goto out_unlock_intent;
153
154         if (btree_node_write_error(b) ||
155             btree_node_noevict(b))
156                 goto out_unlock;
157
158         if (!list_empty(&b->write_blocked))
159                 goto out_unlock;
160
161         if (!flush &&
162             (btree_node_dirty(b) ||
163              btree_node_write_in_flight(b)))
164                 goto out_unlock;
165
166         /*
167          * Using the underscore version because we don't want to compact bsets
168          * after the write, since this node is about to be evicted - unless
169          * btree verify mode is enabled, since it runs out of the post write
170          * cleanup:
171          */
172         if (btree_node_dirty(b)) {
173                 if (verify_btree_ondisk(c))
174                         bch2_btree_node_write(c, b, NULL, SIX_LOCK_intent, -1);
175                 else
176                         __bch2_btree_node_write(c, b, NULL, SIX_LOCK_read, -1);
177         }
178
179         /* wait for any in flight btree write */
180         wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
181                        TASK_UNINTERRUPTIBLE);
182
183         return 0;
184 out_unlock:
185         six_unlock_write(&b->lock);
186 out_unlock_intent:
187         six_unlock_intent(&b->lock);
188         return -ENOMEM;
189 }
190
191 static int mca_reap(struct bch_fs *c, struct btree *b, bool flush)
192 {
193         int ret = mca_reap_notrace(c, b, flush);
194
195         trace_btree_node_reap(c, b, ret);
196         return ret;
197 }
198
199 static unsigned long bch2_mca_scan(struct shrinker *shrink,
200                                    struct shrink_control *sc)
201 {
202         struct bch_fs *c = container_of(shrink, struct bch_fs,
203                                            btree_cache_shrink);
204         struct btree *b, *t;
205         unsigned long nr = sc->nr_to_scan;
206         unsigned long can_free;
207         unsigned long touched = 0;
208         unsigned long freed = 0;
209         unsigned i;
210
211         if (btree_shrinker_disabled(c))
212                 return SHRINK_STOP;
213
214         if (c->btree_cache_alloc_lock)
215                 return SHRINK_STOP;
216
217         /* Return -1 if we can't do anything right now */
218         if (sc->gfp_mask & __GFP_IO)
219                 mutex_lock(&c->btree_cache_lock);
220         else if (!mutex_trylock(&c->btree_cache_lock))
221                 return -1;
222
223         /*
224          * It's _really_ critical that we don't free too many btree nodes - we
225          * have to always leave ourselves a reserve. The reserve is how we
226          * guarantee that allocating memory for a new btree node can always
227          * succeed, so that inserting keys into the btree can always succeed and
228          * IO can always make forward progress:
229          */
230         nr /= btree_pages(c);
231         can_free = mca_can_free(c);
232         nr = min_t(unsigned long, nr, can_free);
233
234         i = 0;
235         list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
236                 touched++;
237
238                 if (freed >= nr)
239                         break;
240
241                 if (++i > 3 &&
242                     !mca_reap_notrace(c, b, false)) {
243                         mca_data_free(c, b);
244                         six_unlock_write(&b->lock);
245                         six_unlock_intent(&b->lock);
246                         freed++;
247                 }
248         }
249 restart:
250         list_for_each_entry_safe(b, t, &c->btree_cache, list) {
251                 touched++;
252
253                 if (freed >= nr) {
254                         /* Save position */
255                         if (&t->list != &c->btree_cache)
256                                 list_move_tail(&c->btree_cache, &t->list);
257                         break;
258                 }
259
260                 if (!btree_node_accessed(b) &&
261                     !mca_reap(c, b, false)) {
262                         /* can't call bch2_btree_node_hash_remove under btree_cache_lock  */
263                         freed++;
264                         if (&t->list != &c->btree_cache)
265                                 list_move_tail(&c->btree_cache, &t->list);
266
267                         mca_data_free(c, b);
268                         mutex_unlock(&c->btree_cache_lock);
269
270                         bch2_btree_node_hash_remove(c, b);
271                         six_unlock_write(&b->lock);
272                         six_unlock_intent(&b->lock);
273
274                         if (freed >= nr)
275                                 goto out;
276
277                         if (sc->gfp_mask & __GFP_IO)
278                                 mutex_lock(&c->btree_cache_lock);
279                         else if (!mutex_trylock(&c->btree_cache_lock))
280                                 goto out;
281                         goto restart;
282                 } else
283                         clear_btree_node_accessed(b);
284         }
285
286         mutex_unlock(&c->btree_cache_lock);
287 out:
288         return (unsigned long) freed * btree_pages(c);
289 }
290
291 static unsigned long bch2_mca_count(struct shrinker *shrink,
292                                     struct shrink_control *sc)
293 {
294         struct bch_fs *c = container_of(shrink, struct bch_fs,
295                                            btree_cache_shrink);
296
297         if (btree_shrinker_disabled(c))
298                 return 0;
299
300         if (c->btree_cache_alloc_lock)
301                 return 0;
302
303         return mca_can_free(c) * btree_pages(c);
304 }
305
306 void bch2_fs_btree_exit(struct bch_fs *c)
307 {
308         struct btree *b;
309         unsigned i;
310
311         if (c->btree_cache_shrink.list.next)
312                 unregister_shrinker(&c->btree_cache_shrink);
313
314         mutex_lock(&c->btree_cache_lock);
315
316 #ifdef CONFIG_BCACHEFS_DEBUG
317         if (c->verify_data)
318                 list_move(&c->verify_data->list, &c->btree_cache);
319
320         free_pages((unsigned long) c->verify_ondisk, ilog2(btree_pages(c)));
321 #endif
322
323         for (i = 0; i < BTREE_ID_NR; i++)
324                 if (c->btree_roots[i].b)
325                         list_add(&c->btree_roots[i].b->list, &c->btree_cache);
326
327         list_splice(&c->btree_cache_freeable,
328                     &c->btree_cache);
329
330         while (!list_empty(&c->btree_cache)) {
331                 b = list_first_entry(&c->btree_cache, struct btree, list);
332
333                 if (btree_node_dirty(b))
334                         bch2_btree_complete_write(c, b, btree_current_write(b));
335                 clear_btree_node_dirty(b);
336
337                 mca_data_free(c, b);
338         }
339
340         while (!list_empty(&c->btree_cache_freed)) {
341                 b = list_first_entry(&c->btree_cache_freed,
342                                      struct btree, list);
343                 list_del(&b->list);
344                 kfree(b);
345         }
346
347         mutex_unlock(&c->btree_cache_lock);
348
349         if (c->btree_cache_table_init_done)
350                 rhashtable_destroy(&c->btree_cache_table);
351 }
352
353 int bch2_fs_btree_init(struct bch_fs *c)
354 {
355         unsigned i;
356         int ret;
357
358         ret = rhashtable_init(&c->btree_cache_table, &bch_btree_cache_params);
359         if (ret)
360                 return ret;
361
362         c->btree_cache_table_init_done = true;
363
364         bch2_recalc_btree_reserve(c);
365
366         for (i = 0; i < c->btree_cache_reserve; i++)
367                 if (!mca_bucket_alloc(c, GFP_KERNEL))
368                         return -ENOMEM;
369
370         list_splice_init(&c->btree_cache,
371                          &c->btree_cache_freeable);
372
373 #ifdef CONFIG_BCACHEFS_DEBUG
374         mutex_init(&c->verify_lock);
375
376         c->verify_ondisk = (void *)
377                 __get_free_pages(GFP_KERNEL, ilog2(btree_pages(c)));
378         if (!c->verify_ondisk)
379                 return -ENOMEM;
380
381         c->verify_data = mca_bucket_alloc(c, GFP_KERNEL);
382         if (!c->verify_data)
383                 return -ENOMEM;
384
385         list_del_init(&c->verify_data->list);
386 #endif
387
388         c->btree_cache_shrink.count_objects = bch2_mca_count;
389         c->btree_cache_shrink.scan_objects = bch2_mca_scan;
390         c->btree_cache_shrink.seeks = 4;
391         c->btree_cache_shrink.batch = btree_pages(c) * 2;
392         register_shrinker(&c->btree_cache_shrink);
393
394         return 0;
395 }
396
397 /*
398  * We can only have one thread cannibalizing other cached btree nodes at a time,
399  * or we'll deadlock. We use an open coded mutex to ensure that, which a
400  * cannibalize_bucket() will take. This means every time we unlock the root of
401  * the btree, we need to release this lock if we have it held.
402  */
403 void bch2_btree_node_cannibalize_unlock(struct bch_fs *c)
404 {
405         if (c->btree_cache_alloc_lock == current) {
406                 trace_btree_node_cannibalize_unlock(c);
407                 c->btree_cache_alloc_lock = NULL;
408                 closure_wake_up(&c->mca_wait);
409         }
410 }
411
412 int bch2_btree_node_cannibalize_lock(struct bch_fs *c, struct closure *cl)
413 {
414         struct task_struct *old;
415
416         old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
417         if (old == NULL || old == current)
418                 goto success;
419
420         if (!cl) {
421                 trace_btree_node_cannibalize_lock_fail(c);
422                 return -ENOMEM;
423         }
424
425         closure_wait(&c->mca_wait, cl);
426
427         /* Try again, after adding ourselves to waitlist */
428         old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
429         if (old == NULL || old == current) {
430                 /* We raced */
431                 closure_wake_up(&c->mca_wait);
432                 goto success;
433         }
434
435         trace_btree_node_cannibalize_lock_fail(c);
436         return -EAGAIN;
437
438 success:
439         trace_btree_node_cannibalize_lock(c);
440         return 0;
441 }
442
443 static struct btree *mca_cannibalize(struct bch_fs *c)
444 {
445         struct btree *b;
446
447         list_for_each_entry_reverse(b, &c->btree_cache, list)
448                 if (!mca_reap(c, b, false))
449                         return b;
450
451         while (1) {
452                 list_for_each_entry_reverse(b, &c->btree_cache, list)
453                         if (!mca_reap(c, b, true))
454                                 return b;
455
456                 /*
457                  * Rare case: all nodes were intent-locked.
458                  * Just busy-wait.
459                  */
460                 WARN_ONCE(1, "btree cache cannibalize failed\n");
461                 cond_resched();
462         }
463 }
464
465 struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
466 {
467         struct btree *b;
468         u64 start_time = local_clock();
469
470         mutex_lock(&c->btree_cache_lock);
471
472         /*
473          * btree_free() doesn't free memory; it sticks the node on the end of
474          * the list. Check if there's any freed nodes there:
475          */
476         list_for_each_entry(b, &c->btree_cache_freeable, list)
477                 if (!mca_reap_notrace(c, b, false))
478                         goto out_unlock;
479
480         /*
481          * We never free struct btree itself, just the memory that holds the on
482          * disk node. Check the freed list before allocating a new one:
483          */
484         list_for_each_entry(b, &c->btree_cache_freed, list)
485                 if (!mca_reap_notrace(c, b, false)) {
486                         mca_data_alloc(c, b, __GFP_NOWARN|GFP_NOIO);
487                         if (b->data)
488                                 goto out_unlock;
489
490                         six_unlock_write(&b->lock);
491                         six_unlock_intent(&b->lock);
492                         goto err;
493                 }
494
495         b = mca_bucket_alloc(c, __GFP_NOWARN|GFP_NOIO);
496         if (!b)
497                 goto err;
498
499         BUG_ON(!six_trylock_intent(&b->lock));
500         BUG_ON(!six_trylock_write(&b->lock));
501 out_unlock:
502         BUG_ON(bkey_extent_is_data(&b->key.k) && PTR_HASH(&b->key));
503         BUG_ON(btree_node_write_in_flight(b));
504
505         list_del_init(&b->list);
506         mutex_unlock(&c->btree_cache_lock);
507 out:
508         b->flags                = 0;
509         b->written              = 0;
510         b->nsets                = 0;
511         b->sib_u64s[0]          = 0;
512         b->sib_u64s[1]          = 0;
513         b->whiteout_u64s        = 0;
514         b->uncompacted_whiteout_u64s = 0;
515         bch2_btree_keys_init(b, &c->expensive_debug_checks);
516
517         bch2_time_stats_update(&c->btree_node_mem_alloc_time, start_time);
518
519         return b;
520 err:
521         /* Try to cannibalize another cached btree node: */
522         if (c->btree_cache_alloc_lock == current) {
523                 b = mca_cannibalize(c);
524                 list_del_init(&b->list);
525                 mutex_unlock(&c->btree_cache_lock);
526
527                 bch2_btree_node_hash_remove(c, b);
528
529                 trace_btree_node_cannibalize(c);
530                 goto out;
531         }
532
533         mutex_unlock(&c->btree_cache_lock);
534         return ERR_PTR(-ENOMEM);
535 }
536
537 /* Slowpath, don't want it inlined into btree_iter_traverse() */
538 static noinline struct btree *bch2_btree_node_fill(struct btree_iter *iter,
539                                                    const struct bkey_i *k,
540                                                    unsigned level,
541                                                    enum six_lock_type lock_type)
542 {
543         struct bch_fs *c = iter->c;
544         struct btree *b;
545
546         b = bch2_btree_node_mem_alloc(c);
547         if (IS_ERR(b))
548                 return b;
549
550         bkey_copy(&b->key, k);
551         if (bch2_btree_node_hash_insert(c, b, level, iter->btree_id)) {
552                 /* raced with another fill: */
553
554                 /* mark as unhashed... */
555                 bkey_i_to_extent(&b->key)->v._data[0] = 0;
556
557                 mutex_lock(&c->btree_cache_lock);
558                 list_add(&b->list, &c->btree_cache_freeable);
559                 mutex_unlock(&c->btree_cache_lock);
560
561                 six_unlock_write(&b->lock);
562                 six_unlock_intent(&b->lock);
563                 return NULL;
564         }
565
566         /*
567          * If the btree node wasn't cached, we can't drop our lock on
568          * the parent until after it's added to the cache - because
569          * otherwise we could race with a btree_split() freeing the node
570          * we're trying to lock.
571          *
572          * But the deadlock described below doesn't exist in this case,
573          * so it's safe to not drop the parent lock until here:
574          */
575         if (btree_node_read_locked(iter, level + 1))
576                 btree_node_unlock(iter, level + 1);
577
578         bch2_btree_node_read(c, b);
579         six_unlock_write(&b->lock);
580
581         if (lock_type == SIX_LOCK_read)
582                 six_lock_downgrade(&b->lock);
583
584         return b;
585 }
586
587 /**
588  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
589  * in from disk if necessary.
590  *
591  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
592  *
593  * The btree node will have either a read or a write lock held, depending on
594  * the @write parameter.
595  */
596 struct btree *bch2_btree_node_get(struct btree_iter *iter,
597                                   const struct bkey_i *k, unsigned level,
598                                   enum six_lock_type lock_type)
599 {
600         struct btree *b;
601         struct bset_tree *t;
602
603         BUG_ON(level >= BTREE_MAX_DEPTH);
604 retry:
605         rcu_read_lock();
606         b = mca_find(iter->c, k);
607         rcu_read_unlock();
608
609         if (unlikely(!b)) {
610                 /*
611                  * We must have the parent locked to call bch2_btree_node_fill(),
612                  * else we could read in a btree node from disk that's been
613                  * freed:
614                  */
615                 b = bch2_btree_node_fill(iter, k, level, lock_type);
616
617                 /* We raced and found the btree node in the cache */
618                 if (!b)
619                         goto retry;
620
621                 if (IS_ERR(b))
622                         return b;
623         } else {
624                 /*
625                  * There's a potential deadlock with splits and insertions into
626                  * interior nodes we have to avoid:
627                  *
628                  * The other thread might be holding an intent lock on the node
629                  * we want, and they want to update its parent node so they're
630                  * going to upgrade their intent lock on the parent node to a
631                  * write lock.
632                  *
633                  * But if we're holding a read lock on the parent, and we're
634                  * trying to get the intent lock they're holding, we deadlock.
635                  *
636                  * So to avoid this we drop the read locks on parent nodes when
637                  * we're starting to take intent locks - and handle the race.
638                  *
639                  * The race is that they might be about to free the node we
640                  * want, and dropping our read lock on the parent node lets them
641                  * update the parent marking the node we want as freed, and then
642                  * free it:
643                  *
644                  * To guard against this, btree nodes are evicted from the cache
645                  * when they're freed - and PTR_HASH() is zeroed out, which we
646                  * check for after we lock the node.
647                  *
648                  * Then, bch2_btree_node_relock() on the parent will fail - because
649                  * the parent was modified, when the pointer to the node we want
650                  * was removed - and we'll bail out:
651                  */
652                 if (btree_node_read_locked(iter, level + 1))
653                         btree_node_unlock(iter, level + 1);
654
655                 if (!btree_node_lock(b, k->k.p, level, iter, lock_type))
656                         return ERR_PTR(-EINTR);
657
658                 if (unlikely(PTR_HASH(&b->key) != PTR_HASH(k) ||
659                              b->level != level ||
660                              race_fault())) {
661                         six_unlock_type(&b->lock, lock_type);
662                         if (bch2_btree_node_relock(iter, level + 1))
663                                 goto retry;
664
665                         return ERR_PTR(-EINTR);
666                 }
667         }
668
669         prefetch(b->aux_data);
670
671         for_each_bset(b, t) {
672                 void *p = (u64 *) b->aux_data + t->aux_data_offset;
673
674                 prefetch(p + L1_CACHE_BYTES * 0);
675                 prefetch(p + L1_CACHE_BYTES * 1);
676                 prefetch(p + L1_CACHE_BYTES * 2);
677         }
678
679         /* avoid atomic set bit if it's not needed: */
680         if (btree_node_accessed(b))
681                 set_btree_node_accessed(b);
682
683         if (unlikely(btree_node_read_error(b))) {
684                 six_unlock_type(&b->lock, lock_type);
685                 return ERR_PTR(-EIO);
686         }
687
688         EBUG_ON(!b->written);
689         EBUG_ON(b->btree_id != iter->btree_id ||
690                 BTREE_NODE_LEVEL(b->data) != level ||
691                 bkey_cmp(b->data->max_key, k->k.p));
692
693         return b;
694 }
695
696 int bch2_print_btree_node(struct bch_fs *c, struct btree *b,
697                           char *buf, size_t len)
698 {
699         const struct bkey_format *f = &b->format;
700         struct bset_stats stats;
701         char ptrs[100];
702
703         memset(&stats, 0, sizeof(stats));
704
705         bch2_val_to_text(c, BKEY_TYPE_BTREE, ptrs, sizeof(ptrs),
706                         bkey_i_to_s_c(&b->key));
707         bch2_btree_keys_stats(b, &stats);
708
709         return scnprintf(buf, len,
710                          "l %u %llu:%llu - %llu:%llu:\n"
711                          "    ptrs: %s\n"
712                          "    format: u64s %u fields %u %u %u %u %u\n"
713                          "    unpack fn len: %u\n"
714                          "    bytes used %zu/%zu (%zu%% full)\n"
715                          "    sib u64s: %u, %u (merge threshold %zu)\n"
716                          "    nr packed keys %u\n"
717                          "    nr unpacked keys %u\n"
718                          "    floats %zu\n"
719                          "    failed unpacked %zu\n"
720                          "    failed prev %zu\n"
721                          "    failed overflow %zu\n",
722                          b->level,
723                          b->data->min_key.inode,
724                          b->data->min_key.offset,
725                          b->data->max_key.inode,
726                          b->data->max_key.offset,
727                          ptrs,
728                          f->key_u64s,
729                          f->bits_per_field[0],
730                          f->bits_per_field[1],
731                          f->bits_per_field[2],
732                          f->bits_per_field[3],
733                          f->bits_per_field[4],
734                          b->unpack_fn_len,
735                          b->nr.live_u64s * sizeof(u64),
736                          btree_bytes(c) - sizeof(struct btree_node),
737                          b->nr.live_u64s * 100 / btree_max_u64s(c),
738                          b->sib_u64s[0],
739                          b->sib_u64s[1],
740                          BTREE_FOREGROUND_MERGE_THRESHOLD(c),
741                          b->nr.packed_keys,
742                          b->nr.unpacked_keys,
743                          stats.floats,
744                          stats.failed_unpacked,
745                          stats.failed_prev,
746                          stats.failed_overflow);
747 }