]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_foreground.c
Update bcachefs sources to 8e1519ccb6 bcachefs: Add tracepoint & counter for btree...
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2012 Google, Inc.
4  *
5  * Foreground allocator code: allocate buckets from freelist, and allocate in
6  * sector granularity from writepoints.
7  *
8  * bch2_bucket_alloc() allocates a single bucket from a specific device.
9  *
10  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11  * in a given filesystem.
12  */
13
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
20 #include "btree_gc.h"
21 #include "buckets.h"
22 #include "buckets_waiting_for_journal.h"
23 #include "clock.h"
24 #include "debug.h"
25 #include "disk_groups.h"
26 #include "ec.h"
27 #include "error.h"
28 #include "io.h"
29 #include "journal.h"
30 #include "movinggc.h"
31 #include "nocow_locking.h"
32
33 #include <linux/math64.h>
34 #include <linux/rculist.h>
35 #include <linux/rcupdate.h>
36 #include <trace/events/bcachefs.h>
37
38 const char * const bch2_alloc_reserves[] = {
39 #define x(t) #t,
40         BCH_ALLOC_RESERVES()
41 #undef x
42         NULL
43 };
44
45 /*
46  * Open buckets represent a bucket that's currently being allocated from.  They
47  * serve two purposes:
48  *
49  *  - They track buckets that have been partially allocated, allowing for
50  *    sub-bucket sized allocations - they're used by the sector allocator below
51  *
52  *  - They provide a reference to the buckets they own that mark and sweep GC
53  *    can find, until the new allocation has a pointer to it inserted into the
54  *    btree
55  *
56  * When allocating some space with the sector allocator, the allocation comes
57  * with a reference to an open bucket - the caller is required to put that
58  * reference _after_ doing the index update that makes its allocation reachable.
59  */
60
61 void bch2_reset_alloc_cursors(struct bch_fs *c)
62 {
63         struct bch_dev *ca;
64         unsigned i;
65
66         rcu_read_lock();
67         for_each_member_device_rcu(ca, c, i, NULL)
68                 ca->alloc_cursor = 0;
69         rcu_read_unlock();
70 }
71
72 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
73 {
74         open_bucket_idx_t idx = ob - c->open_buckets;
75         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
76
77         ob->hash = *slot;
78         *slot = idx;
79 }
80
81 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
82 {
83         open_bucket_idx_t idx = ob - c->open_buckets;
84         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
85
86         while (*slot != idx) {
87                 BUG_ON(!*slot);
88                 slot = &c->open_buckets[*slot].hash;
89         }
90
91         *slot = ob->hash;
92         ob->hash = 0;
93 }
94
95 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
96 {
97         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
98
99         if (ob->ec) {
100                 bch2_ec_bucket_written(c, ob);
101                 return;
102         }
103
104         percpu_down_read(&c->mark_lock);
105         spin_lock(&ob->lock);
106
107         ob->valid = false;
108         ob->data_type = 0;
109
110         spin_unlock(&ob->lock);
111         percpu_up_read(&c->mark_lock);
112
113         spin_lock(&c->freelist_lock);
114         bch2_open_bucket_hash_remove(c, ob);
115
116         ob->freelist = c->open_buckets_freelist;
117         c->open_buckets_freelist = ob - c->open_buckets;
118
119         c->open_buckets_nr_free++;
120         ca->nr_open_buckets--;
121         spin_unlock(&c->freelist_lock);
122
123         closure_wake_up(&c->open_buckets_wait);
124 }
125
126 void bch2_open_bucket_write_error(struct bch_fs *c,
127                                   struct open_buckets *obs,
128                                   unsigned dev)
129 {
130         struct open_bucket *ob;
131         unsigned i;
132
133         open_bucket_for_each(c, obs, ob, i)
134                 if (ob->dev == dev && ob->ec)
135                         bch2_ec_bucket_cancel(c, ob);
136 }
137
138 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
139 {
140         struct open_bucket *ob;
141
142         BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
143
144         ob = c->open_buckets + c->open_buckets_freelist;
145         c->open_buckets_freelist = ob->freelist;
146         atomic_set(&ob->pin, 1);
147         ob->data_type = 0;
148
149         c->open_buckets_nr_free--;
150         return ob;
151 }
152
153 static void open_bucket_free_unused(struct bch_fs *c,
154                                     struct write_point *wp,
155                                     struct open_bucket *ob)
156 {
157         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
158         bool may_realloc = wp->data_type == BCH_DATA_user;
159
160         BUG_ON(ca->open_buckets_partial_nr >
161                ARRAY_SIZE(ca->open_buckets_partial));
162
163         if (ca->open_buckets_partial_nr <
164             ARRAY_SIZE(ca->open_buckets_partial) &&
165             may_realloc) {
166                 spin_lock(&c->freelist_lock);
167                 ob->on_partial_list = true;
168                 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
169                         ob - c->open_buckets;
170                 spin_unlock(&c->freelist_lock);
171
172                 closure_wake_up(&c->open_buckets_wait);
173                 closure_wake_up(&c->freelist_wait);
174         } else {
175                 bch2_open_bucket_put(c, ob);
176         }
177 }
178
179 /* _only_ for allocating the journal on a new device: */
180 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
181 {
182         while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
183                 u64 b = ca->new_fs_bucket_idx++;
184
185                 if (!is_superblock_bucket(ca, b) &&
186                     (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
187                         return b;
188         }
189
190         return -1;
191 }
192
193 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
194 {
195         switch (reserve) {
196         case RESERVE_btree:
197         case RESERVE_btree_movinggc:
198                 return 0;
199         case RESERVE_movinggc:
200                 return OPEN_BUCKETS_COUNT / 4;
201         default:
202                 return OPEN_BUCKETS_COUNT / 2;
203         }
204 }
205
206 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
207                                               u64 bucket,
208                                               enum alloc_reserve reserve,
209                                               const struct bch_alloc_v4 *a,
210                                               struct bucket_alloc_state *s,
211                                               struct closure *cl)
212 {
213         struct open_bucket *ob;
214
215         if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
216                 s->skipped_nouse++;
217                 return NULL;
218         }
219
220         if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
221                 s->skipped_open++;
222                 return NULL;
223         }
224
225         if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
226                         c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
227                 s->skipped_need_journal_commit++;
228                 return NULL;
229         }
230
231         if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
232                 s->skipped_nocow++;
233                 return NULL;
234         }
235
236         spin_lock(&c->freelist_lock);
237
238         if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
239                 if (cl)
240                         closure_wait(&c->open_buckets_wait, cl);
241
242                 if (!c->blocked_allocate_open_bucket)
243                         c->blocked_allocate_open_bucket = local_clock();
244
245                 spin_unlock(&c->freelist_lock);
246                 return ERR_PTR(-BCH_ERR_open_buckets_empty);
247         }
248
249         /* Recheck under lock: */
250         if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
251                 spin_unlock(&c->freelist_lock);
252                 s->skipped_open++;
253                 return NULL;
254         }
255
256         ob = bch2_open_bucket_alloc(c);
257
258         spin_lock(&ob->lock);
259
260         ob->valid       = true;
261         ob->sectors_free = ca->mi.bucket_size;
262         ob->alloc_reserve = reserve;
263         ob->dev         = ca->dev_idx;
264         ob->gen         = a->gen;
265         ob->bucket      = bucket;
266         spin_unlock(&ob->lock);
267
268         ca->nr_open_buckets++;
269         bch2_open_bucket_hash_add(c, ob);
270
271         if (c->blocked_allocate_open_bucket) {
272                 bch2_time_stats_update(
273                         &c->times[BCH_TIME_blocked_allocate_open_bucket],
274                         c->blocked_allocate_open_bucket);
275                 c->blocked_allocate_open_bucket = 0;
276         }
277
278         if (c->blocked_allocate) {
279                 bch2_time_stats_update(
280                         &c->times[BCH_TIME_blocked_allocate],
281                         c->blocked_allocate);
282                 c->blocked_allocate = 0;
283         }
284
285         spin_unlock(&c->freelist_lock);
286         return ob;
287 }
288
289 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
290                                             enum alloc_reserve reserve, u64 free_entry,
291                                             struct bucket_alloc_state *s,
292                                             struct bkey_s_c freespace_k,
293                                             struct closure *cl)
294 {
295         struct bch_fs *c = trans->c;
296         struct btree_iter iter = { NULL };
297         struct bkey_s_c k;
298         struct open_bucket *ob;
299         struct bch_alloc_v4 a_convert;
300         const struct bch_alloc_v4 *a;
301         u64 b = free_entry & ~(~0ULL << 56);
302         unsigned genbits = free_entry >> 56;
303         struct printbuf buf = PRINTBUF;
304         int ret;
305
306         if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
307                 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
308                        "  freespace key ",
309                         ca->mi.first_bucket, ca->mi.nbuckets);
310                 bch2_bkey_val_to_text(&buf, c, freespace_k);
311                 bch2_trans_inconsistent(trans, "%s", buf.buf);
312                 ob = ERR_PTR(-EIO);
313                 goto err;
314         }
315
316         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
317         k = bch2_btree_iter_peek_slot(&iter);
318         ret = bkey_err(k);
319         if (ret) {
320                 ob = ERR_PTR(ret);
321                 goto err;
322         }
323
324         a = bch2_alloc_to_v4(k, &a_convert);
325
326         if (a->data_type != BCH_DATA_free) {
327                 if (!test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
328                         ob = NULL;
329                         goto err;
330                 }
331
332                 prt_printf(&buf, "non free bucket in freespace btree\n"
333                        "  freespace key ");
334                 bch2_bkey_val_to_text(&buf, c, freespace_k);
335                 prt_printf(&buf, "\n  ");
336                 bch2_bkey_val_to_text(&buf, c, k);
337                 bch2_trans_inconsistent(trans, "%s", buf.buf);
338                 ob = ERR_PTR(-EIO);
339                 goto err;
340         }
341
342         if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
343             test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
344                 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
345                        "  freespace key ",
346                        genbits, alloc_freespace_genbits(*a) >> 56);
347                 bch2_bkey_val_to_text(&buf, c, freespace_k);
348                 prt_printf(&buf, "\n  ");
349                 bch2_bkey_val_to_text(&buf, c, k);
350                 bch2_trans_inconsistent(trans, "%s", buf.buf);
351                 ob = ERR_PTR(-EIO);
352                 goto err;
353
354         }
355
356         if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
357                 struct bch_backpointer bp;
358                 u64 bp_offset = 0;
359
360                 ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
361                                                 &bp_offset, &bp,
362                                                 BTREE_ITER_NOPRESERVE);
363                 if (ret) {
364                         ob = ERR_PTR(ret);
365                         goto err;
366                 }
367
368                 if (bp_offset != U64_MAX) {
369                         /*
370                          * Bucket may have data in it - we don't call
371                          * bc2h_trans_inconnsistent() because fsck hasn't
372                          * finished yet
373                          */
374                         ob = NULL;
375                         goto err;
376                 }
377         }
378
379         ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
380         if (!ob)
381                 iter.path->preserve = false;
382 err:
383         set_btree_iter_dontneed(&iter);
384         bch2_trans_iter_exit(trans, &iter);
385         printbuf_exit(&buf);
386         return ob;
387 }
388
389 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
390                                                     enum alloc_reserve reserve)
391 {
392         struct open_bucket *ob;
393         int i;
394
395         spin_lock(&c->freelist_lock);
396
397         for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
398                 ob = c->open_buckets + ca->open_buckets_partial[i];
399
400                 if (reserve <= ob->alloc_reserve) {
401                         array_remove_item(ca->open_buckets_partial,
402                                           ca->open_buckets_partial_nr,
403                                           i);
404                         ob->on_partial_list = false;
405                         ob->alloc_reserve = reserve;
406                         spin_unlock(&c->freelist_lock);
407                         return ob;
408                 }
409         }
410
411         spin_unlock(&c->freelist_lock);
412         return NULL;
413 }
414
415 /*
416  * This path is for before the freespace btree is initialized:
417  *
418  * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
419  * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
420  */
421 static noinline struct open_bucket *
422 bch2_bucket_alloc_early(struct btree_trans *trans,
423                         struct bch_dev *ca,
424                         enum alloc_reserve reserve,
425                         struct bucket_alloc_state *s,
426                         struct closure *cl)
427 {
428         struct btree_iter iter;
429         struct bkey_s_c k;
430         struct open_bucket *ob = NULL;
431         u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
432         u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor));
433         int ret;
434 again:
435         for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
436                            BTREE_ITER_SLOTS, k, ret) {
437                 struct bch_alloc_v4 a_convert;
438                 const struct bch_alloc_v4 *a;
439
440                 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
441                         break;
442
443                 if (ca->new_fs_bucket_idx &&
444                     is_superblock_bucket(ca, k.k->p.offset))
445                         continue;
446
447                 a = bch2_alloc_to_v4(k, &a_convert);
448
449                 if (a->data_type != BCH_DATA_free)
450                         continue;
451
452                 s->buckets_seen++;
453
454                 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
455                 if (ob)
456                         break;
457         }
458         bch2_trans_iter_exit(trans, &iter);
459
460         ca->alloc_cursor = alloc_cursor;
461
462         if (!ob && ret)
463                 ob = ERR_PTR(ret);
464
465         if (!ob && alloc_cursor > alloc_start) {
466                 alloc_cursor = alloc_start;
467                 goto again;
468         }
469
470         return ob;
471 }
472
473 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
474                                                    struct bch_dev *ca,
475                                                    enum alloc_reserve reserve,
476                                                    struct bucket_alloc_state *s,
477                                                    struct closure *cl)
478 {
479         struct btree_iter iter;
480         struct bkey_s_c k;
481         struct open_bucket *ob = NULL;
482         u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor));
483         u64 alloc_cursor = alloc_start;
484         int ret;
485
486         BUG_ON(ca->new_fs_bucket_idx);
487 again:
488         for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
489                                      POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
490                 if (k.k->p.inode != ca->dev_idx)
491                         break;
492
493                 for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
494                      alloc_cursor < k.k->p.offset;
495                      alloc_cursor++) {
496                         ret = btree_trans_too_many_iters(trans);
497                         if (ret) {
498                                 ob = ERR_PTR(ret);
499                                 break;
500                         }
501
502                         s->buckets_seen++;
503
504                         ob = try_alloc_bucket(trans, ca, reserve,
505                                               alloc_cursor, s, k, cl);
506                         if (ob) {
507                                 iter.path->preserve = false;
508                                 break;
509                         }
510                 }
511
512                 if (ob || ret)
513                         break;
514         }
515         bch2_trans_iter_exit(trans, &iter);
516
517         ca->alloc_cursor = alloc_cursor;
518
519         if (!ob && ret)
520                 ob = ERR_PTR(ret);
521
522         if (!ob && alloc_start > ca->mi.first_bucket) {
523                 alloc_cursor = alloc_start = ca->mi.first_bucket;
524                 goto again;
525         }
526
527         return ob;
528 }
529
530 /**
531  * bch_bucket_alloc - allocate a single bucket from a specific device
532  *
533  * Returns index of bucket on success, 0 on failure
534  */
535 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
536                                       struct bch_dev *ca,
537                                       enum alloc_reserve reserve,
538                                       bool may_alloc_partial,
539                                       struct closure *cl,
540                                       struct bch_dev_usage *usage)
541 {
542         struct bch_fs *c = trans->c;
543         struct open_bucket *ob = NULL;
544         bool freespace = READ_ONCE(ca->mi.freespace_initialized);
545         u64 avail;
546         struct bucket_alloc_state s = { 0 };
547         bool waiting = false;
548 again:
549         bch2_dev_usage_read_fast(ca, usage);
550         avail = dev_buckets_free(ca, *usage, reserve);
551
552         if (usage->d[BCH_DATA_need_discard].buckets > avail)
553                 bch2_do_discards(c);
554
555         if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
556                 bch2_do_gc_gens(c);
557
558         if (should_invalidate_buckets(ca, *usage))
559                 bch2_do_invalidates(c);
560
561         if (!avail) {
562                 if (cl && !waiting) {
563                         closure_wait(&c->freelist_wait, cl);
564                         waiting = true;
565                         goto again;
566                 }
567
568                 if (!c->blocked_allocate)
569                         c->blocked_allocate = local_clock();
570
571                 ob = ERR_PTR(-BCH_ERR_freelist_empty);
572                 goto err;
573         }
574
575         if (waiting)
576                 closure_wake_up(&c->freelist_wait);
577
578         if (may_alloc_partial) {
579                 ob = try_alloc_partial_bucket(c, ca, reserve);
580                 if (ob)
581                         return ob;
582         }
583 alloc:
584         ob = likely(freespace)
585                 ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
586                 : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
587
588         if (s.skipped_need_journal_commit * 2 > avail)
589                 bch2_journal_flush_async(&c->journal, NULL);
590
591         if (!ob && freespace && !test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
592                 freespace = false;
593                 goto alloc;
594         }
595 err:
596         if (!ob)
597                 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
598
599         if (!IS_ERR(ob))
600                 trace_and_count(c, bucket_alloc, ca,
601                                 bch2_alloc_reserves[reserve],
602                                 may_alloc_partial,
603                                 ob->bucket,
604                                 usage->d[BCH_DATA_free].buckets,
605                                 avail,
606                                 bch2_copygc_wait_amount(c),
607                                 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
608                                 &s,
609                                 cl == NULL,
610                                 "");
611         else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
612                 trace_and_count(c, bucket_alloc_fail, ca,
613                                 bch2_alloc_reserves[reserve],
614                                 may_alloc_partial,
615                                 0,
616                                 usage->d[BCH_DATA_free].buckets,
617                                 avail,
618                                 bch2_copygc_wait_amount(c),
619                                 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
620                                 &s,
621                                 cl == NULL,
622                                 bch2_err_str(PTR_ERR(ob)));
623
624         return ob;
625 }
626
627 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
628                                       enum alloc_reserve reserve,
629                                       bool may_alloc_partial,
630                                       struct closure *cl)
631 {
632         struct bch_dev_usage usage;
633         struct open_bucket *ob;
634
635         bch2_trans_do(c, NULL, NULL, 0,
636                       PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
637                                                         may_alloc_partial, cl, &usage)));
638         return ob;
639 }
640
641 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
642                             unsigned l, unsigned r)
643 {
644         return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
645                 (stripe->next_alloc[l] < stripe->next_alloc[r]));
646 }
647
648 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
649
650 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
651                                           struct dev_stripe_state *stripe,
652                                           struct bch_devs_mask *devs)
653 {
654         struct dev_alloc_list ret = { .nr = 0 };
655         unsigned i;
656
657         for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
658                 ret.devs[ret.nr++] = i;
659
660         bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
661         return ret;
662 }
663
664 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
665                                struct dev_stripe_state *stripe,
666                                struct bch_dev_usage *usage)
667 {
668         u64 *v = stripe->next_alloc + ca->dev_idx;
669         u64 free_space = dev_buckets_available(ca, RESERVE_none);
670         u64 free_space_inv = free_space
671                 ? div64_u64(1ULL << 48, free_space)
672                 : 1ULL << 48;
673         u64 scale = *v / 4;
674
675         if (*v + free_space_inv >= *v)
676                 *v += free_space_inv;
677         else
678                 *v = U64_MAX;
679
680         for (v = stripe->next_alloc;
681              v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
682                 *v = *v < scale ? 0 : *v - scale;
683 }
684
685 void bch2_dev_stripe_increment(struct bch_dev *ca,
686                                struct dev_stripe_state *stripe)
687 {
688         struct bch_dev_usage usage;
689
690         bch2_dev_usage_read_fast(ca, &usage);
691         bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
692 }
693
694 #define BUCKET_MAY_ALLOC_PARTIAL        (1 << 0)
695 #define BUCKET_ALLOC_USE_DURABILITY     (1 << 1)
696
697 static void add_new_bucket(struct bch_fs *c,
698                            struct open_buckets *ptrs,
699                            struct bch_devs_mask *devs_may_alloc,
700                            unsigned *nr_effective,
701                            bool *have_cache,
702                            unsigned flags,
703                            struct open_bucket *ob)
704 {
705         unsigned durability =
706                 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
707
708         __clear_bit(ob->dev, devs_may_alloc->d);
709         *nr_effective   += (flags & BUCKET_ALLOC_USE_DURABILITY)
710                 ? durability : 1;
711         *have_cache     |= !durability;
712
713         ob_push(c, ptrs, ob);
714 }
715
716 int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
717                       struct open_buckets *ptrs,
718                       struct dev_stripe_state *stripe,
719                       struct bch_devs_mask *devs_may_alloc,
720                       unsigned nr_replicas,
721                       unsigned *nr_effective,
722                       bool *have_cache,
723                       enum alloc_reserve reserve,
724                       unsigned flags,
725                       struct closure *cl)
726 {
727         struct bch_fs *c = trans->c;
728         struct dev_alloc_list devs_sorted =
729                 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
730         unsigned dev;
731         struct bch_dev *ca;
732         int ret = -BCH_ERR_insufficient_devices;
733         unsigned i;
734
735         BUG_ON(*nr_effective >= nr_replicas);
736
737         for (i = 0; i < devs_sorted.nr; i++) {
738                 struct bch_dev_usage usage;
739                 struct open_bucket *ob;
740
741                 dev = devs_sorted.devs[i];
742
743                 rcu_read_lock();
744                 ca = rcu_dereference(c->devs[dev]);
745                 if (ca)
746                         percpu_ref_get(&ca->ref);
747                 rcu_read_unlock();
748
749                 if (!ca)
750                         continue;
751
752                 if (!ca->mi.durability && *have_cache) {
753                         percpu_ref_put(&ca->ref);
754                         continue;
755                 }
756
757                 ob = bch2_bucket_alloc_trans(trans, ca, reserve,
758                                 flags & BUCKET_MAY_ALLOC_PARTIAL, cl, &usage);
759                 if (!IS_ERR(ob))
760                         bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
761                 percpu_ref_put(&ca->ref);
762
763                 if (IS_ERR(ob)) {
764                         ret = PTR_ERR(ob);
765                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
766                                 break;
767                         continue;
768                 }
769
770                 add_new_bucket(c, ptrs, devs_may_alloc,
771                                nr_effective, have_cache, flags, ob);
772
773                 if (*nr_effective >= nr_replicas) {
774                         ret = 0;
775                         break;
776                 }
777         }
778
779         return ret;
780 }
781
782 /* Allocate from stripes: */
783
784 /*
785  * if we can't allocate a new stripe because there are already too many
786  * partially filled stripes, force allocating from an existing stripe even when
787  * it's to a device we don't want:
788  */
789
790 static int bucket_alloc_from_stripe(struct btree_trans *trans,
791                          struct open_buckets *ptrs,
792                          struct write_point *wp,
793                          struct bch_devs_mask *devs_may_alloc,
794                          u16 target,
795                          unsigned erasure_code,
796                          unsigned nr_replicas,
797                          unsigned *nr_effective,
798                          bool *have_cache,
799                          unsigned flags,
800                          struct closure *cl)
801 {
802         struct bch_fs *c = trans->c;
803         struct dev_alloc_list devs_sorted;
804         struct ec_stripe_head *h;
805         struct open_bucket *ob;
806         struct bch_dev *ca;
807         unsigned i, ec_idx;
808
809         if (!erasure_code)
810                 return 0;
811
812         if (nr_replicas < 2)
813                 return 0;
814
815         if (ec_open_bucket(c, ptrs))
816                 return 0;
817
818         h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1,
819                                     wp == &c->copygc_write_point,
820                                     cl);
821         if (IS_ERR(h))
822                 return PTR_ERR(h);
823         if (!h)
824                 return 0;
825
826         devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
827
828         for (i = 0; i < devs_sorted.nr; i++)
829                 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
830                         if (!h->s->blocks[ec_idx])
831                                 continue;
832
833                         ob = c->open_buckets + h->s->blocks[ec_idx];
834                         if (ob->dev == devs_sorted.devs[i] &&
835                             !test_and_set_bit(ec_idx, h->s->blocks_allocated))
836                                 goto got_bucket;
837                 }
838         goto out_put_head;
839 got_bucket:
840         ca = bch_dev_bkey_exists(c, ob->dev);
841
842         ob->ec_idx      = ec_idx;
843         ob->ec          = h->s;
844
845         add_new_bucket(c, ptrs, devs_may_alloc,
846                        nr_effective, have_cache, flags, ob);
847         atomic_inc(&h->s->pin);
848 out_put_head:
849         bch2_ec_stripe_head_put(c, h);
850         return 0;
851 }
852
853 /* Sector allocator */
854
855 static void get_buckets_from_writepoint(struct bch_fs *c,
856                                         struct open_buckets *ptrs,
857                                         struct write_point *wp,
858                                         struct bch_devs_mask *devs_may_alloc,
859                                         unsigned nr_replicas,
860                                         unsigned *nr_effective,
861                                         bool *have_cache,
862                                         unsigned flags,
863                                         bool need_ec)
864 {
865         struct open_buckets ptrs_skip = { .nr = 0 };
866         struct open_bucket *ob;
867         unsigned i;
868
869         open_bucket_for_each(c, &wp->ptrs, ob, i) {
870                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
871
872                 if (*nr_effective < nr_replicas &&
873                     test_bit(ob->dev, devs_may_alloc->d) &&
874                     (ca->mi.durability ||
875                      (wp->data_type == BCH_DATA_user && !*have_cache)) &&
876                     (ob->ec || !need_ec)) {
877                         add_new_bucket(c, ptrs, devs_may_alloc,
878                                        nr_effective, have_cache,
879                                        flags, ob);
880                 } else {
881                         ob_push(c, &ptrs_skip, ob);
882                 }
883         }
884         wp->ptrs = ptrs_skip;
885 }
886
887 static int open_bucket_add_buckets(struct btree_trans *trans,
888                         struct open_buckets *ptrs,
889                         struct write_point *wp,
890                         struct bch_devs_list *devs_have,
891                         u16 target,
892                         unsigned erasure_code,
893                         unsigned nr_replicas,
894                         unsigned *nr_effective,
895                         bool *have_cache,
896                         enum alloc_reserve reserve,
897                         unsigned flags,
898                         struct closure *_cl)
899 {
900         struct bch_fs *c = trans->c;
901         struct bch_devs_mask devs;
902         struct open_bucket *ob;
903         struct closure *cl = NULL;
904         int ret;
905         unsigned i;
906
907         rcu_read_lock();
908         devs = target_rw_devs(c, wp->data_type, target);
909         rcu_read_unlock();
910
911         /* Don't allocate from devices we already have pointers to: */
912         for (i = 0; i < devs_have->nr; i++)
913                 __clear_bit(devs_have->devs[i], devs.d);
914
915         open_bucket_for_each(c, ptrs, ob, i)
916                 __clear_bit(ob->dev, devs.d);
917
918         if (erasure_code) {
919                 if (!ec_open_bucket(c, ptrs)) {
920                         get_buckets_from_writepoint(c, ptrs, wp, &devs,
921                                                     nr_replicas, nr_effective,
922                                                     have_cache, flags, true);
923                         if (*nr_effective >= nr_replicas)
924                                 return 0;
925                 }
926
927                 if (!ec_open_bucket(c, ptrs)) {
928                         ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
929                                                  target, erasure_code,
930                                                  nr_replicas, nr_effective,
931                                                  have_cache, flags, _cl);
932                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
933                             bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
934                             bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
935                                 return ret;
936                         if (*nr_effective >= nr_replicas)
937                                 return 0;
938                 }
939         }
940
941         get_buckets_from_writepoint(c, ptrs, wp, &devs,
942                                     nr_replicas, nr_effective,
943                                     have_cache, flags, false);
944         if (*nr_effective >= nr_replicas)
945                 return 0;
946
947 retry_blocking:
948         /*
949          * Try nonblocking first, so that if one device is full we'll try from
950          * other devices:
951          */
952         ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
953                                 nr_replicas, nr_effective, have_cache,
954                                 reserve, flags, cl);
955         if (ret &&
956             !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
957             !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
958             !cl && _cl) {
959                 cl = _cl;
960                 goto retry_blocking;
961         }
962
963         return ret;
964 }
965
966 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
967                                 struct open_buckets *obs)
968 {
969         struct open_buckets ptrs = { .nr = 0 };
970         struct open_bucket *ob, *ob2;
971         unsigned i, j;
972
973         open_bucket_for_each(c, obs, ob, i) {
974                 bool drop = !ca || ob->dev == ca->dev_idx;
975
976                 if (!drop && ob->ec) {
977                         mutex_lock(&ob->ec->lock);
978                         for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
979                                 if (!ob->ec->blocks[j])
980                                         continue;
981
982                                 ob2 = c->open_buckets + ob->ec->blocks[j];
983                                 drop |= ob2->dev == ca->dev_idx;
984                         }
985                         mutex_unlock(&ob->ec->lock);
986                 }
987
988                 if (drop)
989                         bch2_open_bucket_put(c, ob);
990                 else
991                         ob_push(c, &ptrs, ob);
992         }
993
994         *obs = ptrs;
995 }
996
997 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
998                           struct write_point *wp)
999 {
1000         mutex_lock(&wp->lock);
1001         bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
1002         mutex_unlock(&wp->lock);
1003 }
1004
1005 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1006                                                  unsigned long write_point)
1007 {
1008         unsigned hash =
1009                 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1010
1011         return &c->write_points_hash[hash];
1012 }
1013
1014 static struct write_point *__writepoint_find(struct hlist_head *head,
1015                                              unsigned long write_point)
1016 {
1017         struct write_point *wp;
1018
1019         rcu_read_lock();
1020         hlist_for_each_entry_rcu(wp, head, node)
1021                 if (wp->write_point == write_point)
1022                         goto out;
1023         wp = NULL;
1024 out:
1025         rcu_read_unlock();
1026         return wp;
1027 }
1028
1029 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1030 {
1031         u64 stranded    = c->write_points_nr * c->bucket_size_max;
1032         u64 free        = bch2_fs_usage_read_short(c).free;
1033
1034         return stranded * factor > free;
1035 }
1036
1037 static bool try_increase_writepoints(struct bch_fs *c)
1038 {
1039         struct write_point *wp;
1040
1041         if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1042             too_many_writepoints(c, 32))
1043                 return false;
1044
1045         wp = c->write_points + c->write_points_nr++;
1046         hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1047         return true;
1048 }
1049
1050 static bool try_decrease_writepoints(struct bch_fs *c,
1051                                      unsigned old_nr)
1052 {
1053         struct write_point *wp;
1054
1055         mutex_lock(&c->write_points_hash_lock);
1056         if (c->write_points_nr < old_nr) {
1057                 mutex_unlock(&c->write_points_hash_lock);
1058                 return true;
1059         }
1060
1061         if (c->write_points_nr == 1 ||
1062             !too_many_writepoints(c, 8)) {
1063                 mutex_unlock(&c->write_points_hash_lock);
1064                 return false;
1065         }
1066
1067         wp = c->write_points + --c->write_points_nr;
1068
1069         hlist_del_rcu(&wp->node);
1070         mutex_unlock(&c->write_points_hash_lock);
1071
1072         bch2_writepoint_stop(c, NULL, wp);
1073         return true;
1074 }
1075
1076 static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
1077                                   struct mutex *lock)
1078 {
1079         if (!mutex_trylock(lock)) {
1080                 bch2_trans_unlock(trans);
1081                 mutex_lock(lock);
1082         }
1083 }
1084
1085 static struct write_point *writepoint_find(struct btree_trans *trans,
1086                                            unsigned long write_point)
1087 {
1088         struct bch_fs *c = trans->c;
1089         struct write_point *wp, *oldest;
1090         struct hlist_head *head;
1091
1092         if (!(write_point & 1UL)) {
1093                 wp = (struct write_point *) write_point;
1094                 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1095                 return wp;
1096         }
1097
1098         head = writepoint_hash(c, write_point);
1099 restart_find:
1100         wp = __writepoint_find(head, write_point);
1101         if (wp) {
1102 lock_wp:
1103                 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1104                 if (wp->write_point == write_point)
1105                         goto out;
1106                 mutex_unlock(&wp->lock);
1107                 goto restart_find;
1108         }
1109 restart_find_oldest:
1110         oldest = NULL;
1111         for (wp = c->write_points;
1112              wp < c->write_points + c->write_points_nr; wp++)
1113                 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1114                         oldest = wp;
1115
1116         bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1117         bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1118         if (oldest >= c->write_points + c->write_points_nr ||
1119             try_increase_writepoints(c)) {
1120                 mutex_unlock(&c->write_points_hash_lock);
1121                 mutex_unlock(&oldest->lock);
1122                 goto restart_find_oldest;
1123         }
1124
1125         wp = __writepoint_find(head, write_point);
1126         if (wp && wp != oldest) {
1127                 mutex_unlock(&c->write_points_hash_lock);
1128                 mutex_unlock(&oldest->lock);
1129                 goto lock_wp;
1130         }
1131
1132         wp = oldest;
1133         hlist_del_rcu(&wp->node);
1134         wp->write_point = write_point;
1135         hlist_add_head_rcu(&wp->node, head);
1136         mutex_unlock(&c->write_points_hash_lock);
1137 out:
1138         wp->last_used = local_clock();
1139         return wp;
1140 }
1141
1142 /*
1143  * Get us an open_bucket we can allocate from, return with it locked:
1144  */
1145 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1146                              unsigned target,
1147                              unsigned erasure_code,
1148                              struct write_point_specifier write_point,
1149                              struct bch_devs_list *devs_have,
1150                              unsigned nr_replicas,
1151                              unsigned nr_replicas_required,
1152                              enum alloc_reserve reserve,
1153                              unsigned flags,
1154                              struct closure *cl,
1155                              struct write_point **wp_ret)
1156 {
1157         struct bch_fs *c = trans->c;
1158         struct write_point *wp;
1159         struct open_bucket *ob;
1160         struct open_buckets ptrs;
1161         unsigned nr_effective, write_points_nr;
1162         unsigned ob_flags = 0;
1163         bool have_cache;
1164         int ret;
1165         int i;
1166
1167         if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
1168                 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
1169
1170         BUG_ON(!nr_replicas || !nr_replicas_required);
1171 retry:
1172         ptrs.nr         = 0;
1173         nr_effective    = 0;
1174         write_points_nr = c->write_points_nr;
1175         have_cache      = false;
1176
1177         *wp_ret = wp = writepoint_find(trans, write_point.v);
1178
1179         if (wp->data_type == BCH_DATA_user)
1180                 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1181
1182         /* metadata may not allocate on cache devices: */
1183         if (wp->data_type != BCH_DATA_user)
1184                 have_cache = true;
1185
1186         if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1187                 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1188                                               target, erasure_code,
1189                                               nr_replicas, &nr_effective,
1190                                               &have_cache, reserve,
1191                                               ob_flags, cl);
1192         } else {
1193                 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1194                                               target, erasure_code,
1195                                               nr_replicas, &nr_effective,
1196                                               &have_cache, reserve,
1197                                               ob_flags, NULL);
1198                 if (!ret ||
1199                     bch2_err_matches(ret, BCH_ERR_transaction_restart))
1200                         goto alloc_done;
1201
1202                 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1203                                               0, erasure_code,
1204                                               nr_replicas, &nr_effective,
1205                                               &have_cache, reserve,
1206                                               ob_flags, cl);
1207         }
1208 alloc_done:
1209         BUG_ON(!ret && nr_effective < nr_replicas);
1210
1211         if (erasure_code && !ec_open_bucket(c, &ptrs))
1212                 pr_debug("failed to get ec bucket: ret %u", ret);
1213
1214         if (ret == -BCH_ERR_insufficient_devices &&
1215             nr_effective >= nr_replicas_required)
1216                 ret = 0;
1217
1218         if (ret)
1219                 goto err;
1220
1221         /* Free buckets we didn't use: */
1222         open_bucket_for_each(c, &wp->ptrs, ob, i)
1223                 open_bucket_free_unused(c, wp, ob);
1224
1225         wp->ptrs = ptrs;
1226
1227         wp->sectors_free = UINT_MAX;
1228
1229         open_bucket_for_each(c, &wp->ptrs, ob, i)
1230                 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1231
1232         BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1233
1234         return 0;
1235 err:
1236         open_bucket_for_each(c, &wp->ptrs, ob, i)
1237                 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1238                         ob_push(c, &ptrs, ob);
1239                 else
1240                         open_bucket_free_unused(c, wp, ob);
1241         wp->ptrs = ptrs;
1242
1243         mutex_unlock(&wp->lock);
1244
1245         if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1246             try_decrease_writepoints(c, write_points_nr))
1247                 goto retry;
1248
1249         if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
1250             bch2_err_matches(ret, BCH_ERR_freelist_empty))
1251                 return cl
1252                         ? -BCH_ERR_bucket_alloc_blocked
1253                         : -BCH_ERR_ENOSPC_bucket_alloc;
1254
1255         return ret;
1256 }
1257
1258 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1259 {
1260         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1261
1262         return (struct bch_extent_ptr) {
1263                 .type   = 1 << BCH_EXTENT_ENTRY_ptr,
1264                 .gen    = ob->gen,
1265                 .dev    = ob->dev,
1266                 .offset = bucket_to_sector(ca, ob->bucket) +
1267                         ca->mi.bucket_size -
1268                         ob->sectors_free,
1269         };
1270 }
1271
1272 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1273                                     struct bkey_i *k, unsigned sectors,
1274                                     bool cached)
1275 {
1276         bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1277 }
1278
1279 /*
1280  * Append pointers to the space we just allocated to @k, and mark @sectors space
1281  * as allocated out of @ob
1282  */
1283 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1284 {
1285         bch2_alloc_sectors_done_inlined(c, wp);
1286 }
1287
1288 static inline void writepoint_init(struct write_point *wp,
1289                                    enum bch_data_type type)
1290 {
1291         mutex_init(&wp->lock);
1292         wp->data_type = type;
1293
1294         INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1295         INIT_LIST_HEAD(&wp->writes);
1296         spin_lock_init(&wp->writes_lock);
1297 }
1298
1299 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1300 {
1301         struct open_bucket *ob;
1302         struct write_point *wp;
1303
1304         mutex_init(&c->write_points_hash_lock);
1305         c->write_points_nr = ARRAY_SIZE(c->write_points);
1306
1307         /* open bucket 0 is a sentinal NULL: */
1308         spin_lock_init(&c->open_buckets[0].lock);
1309
1310         for (ob = c->open_buckets + 1;
1311              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1312                 spin_lock_init(&ob->lock);
1313                 c->open_buckets_nr_free++;
1314
1315                 ob->freelist = c->open_buckets_freelist;
1316                 c->open_buckets_freelist = ob - c->open_buckets;
1317         }
1318
1319         writepoint_init(&c->btree_write_point,          BCH_DATA_btree);
1320         writepoint_init(&c->rebalance_write_point,      BCH_DATA_user);
1321         writepoint_init(&c->copygc_write_point,         BCH_DATA_user);
1322
1323         for (wp = c->write_points;
1324              wp < c->write_points + c->write_points_nr; wp++) {
1325                 writepoint_init(wp, BCH_DATA_user);
1326
1327                 wp->last_used   = local_clock();
1328                 wp->write_point = (unsigned long) wp;
1329                 hlist_add_head_rcu(&wp->node,
1330                                    writepoint_hash(c, wp->write_point));
1331         }
1332 }
1333
1334 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1335 {
1336         struct open_bucket *ob;
1337
1338         for (ob = c->open_buckets;
1339              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1340              ob++) {
1341                 spin_lock(&ob->lock);
1342                 if (ob->valid && !ob->on_partial_list) {
1343                         prt_printf(out, "%zu ref %u type %s %u:%llu:%u\n",
1344                                ob - c->open_buckets,
1345                                atomic_read(&ob->pin),
1346                                bch2_data_types[ob->data_type],
1347                                ob->dev, ob->bucket, ob->gen);
1348                 }
1349                 spin_unlock(&ob->lock);
1350         }
1351 }
1352
1353 static const char * const bch2_write_point_states[] = {
1354 #define x(n)    #n,
1355         WRITE_POINT_STATES()
1356 #undef x
1357         NULL
1358 };
1359
1360 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1361 {
1362         struct write_point *wp;
1363         unsigned i;
1364
1365         for (wp = c->write_points;
1366              wp < c->write_points + ARRAY_SIZE(c->write_points);
1367              wp++) {
1368                 prt_printf(out, "%lu: ", wp->write_point);
1369                 prt_human_readable_u64(out, wp->sectors_allocated);
1370
1371                 prt_printf(out, " last wrote: ");
1372                 bch2_pr_time_units(out, sched_clock() - wp->last_used);
1373
1374                 for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1375                         prt_printf(out, " %s: ", bch2_write_point_states[i]);
1376                         bch2_pr_time_units(out, wp->time[i]);
1377                 }
1378
1379                 prt_newline(out);
1380         }
1381 }