]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_foreground.c
5b11493653896afad416af3d66c2c51ca9f4326a
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2012 Google, Inc.
4  *
5  * Foreground allocator code: allocate buckets from freelist, and allocate in
6  * sector granularity from writepoints.
7  *
8  * bch2_bucket_alloc() allocates a single bucket from a specific device.
9  *
10  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11  * in a given filesystem.
12  */
13
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "btree_iter.h"
18 #include "btree_update.h"
19 #include "btree_gc.h"
20 #include "buckets.h"
21 #include "buckets_waiting_for_journal.h"
22 #include "clock.h"
23 #include "debug.h"
24 #include "disk_groups.h"
25 #include "ec.h"
26 #include "error.h"
27 #include "io.h"
28 #include "journal.h"
29
30 #include <linux/math64.h>
31 #include <linux/rculist.h>
32 #include <linux/rcupdate.h>
33 #include <trace/events/bcachefs.h>
34
35 const char * const bch2_alloc_reserves[] = {
36 #define x(t) #t,
37         BCH_ALLOC_RESERVES()
38 #undef x
39         NULL
40 };
41
42 /*
43  * Open buckets represent a bucket that's currently being allocated from.  They
44  * serve two purposes:
45  *
46  *  - They track buckets that have been partially allocated, allowing for
47  *    sub-bucket sized allocations - they're used by the sector allocator below
48  *
49  *  - They provide a reference to the buckets they own that mark and sweep GC
50  *    can find, until the new allocation has a pointer to it inserted into the
51  *    btree
52  *
53  * When allocating some space with the sector allocator, the allocation comes
54  * with a reference to an open bucket - the caller is required to put that
55  * reference _after_ doing the index update that makes its allocation reachable.
56  */
57
58 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
59 {
60         open_bucket_idx_t idx = ob - c->open_buckets;
61         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
62
63         ob->hash = *slot;
64         *slot = idx;
65 }
66
67 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
68 {
69         open_bucket_idx_t idx = ob - c->open_buckets;
70         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
71
72         while (*slot != idx) {
73                 BUG_ON(!*slot);
74                 slot = &c->open_buckets[*slot].hash;
75         }
76
77         *slot = ob->hash;
78         ob->hash = 0;
79 }
80
81 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
82 {
83         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
84
85         if (ob->ec) {
86                 bch2_ec_bucket_written(c, ob);
87                 return;
88         }
89
90         percpu_down_read(&c->mark_lock);
91         spin_lock(&ob->lock);
92
93         ob->valid = false;
94         ob->data_type = 0;
95
96         spin_unlock(&ob->lock);
97         percpu_up_read(&c->mark_lock);
98
99         spin_lock(&c->freelist_lock);
100         bch2_open_bucket_hash_remove(c, ob);
101
102         ob->freelist = c->open_buckets_freelist;
103         c->open_buckets_freelist = ob - c->open_buckets;
104
105         c->open_buckets_nr_free++;
106         ca->nr_open_buckets--;
107         spin_unlock(&c->freelist_lock);
108
109         closure_wake_up(&c->open_buckets_wait);
110 }
111
112 void bch2_open_bucket_write_error(struct bch_fs *c,
113                                   struct open_buckets *obs,
114                                   unsigned dev)
115 {
116         struct open_bucket *ob;
117         unsigned i;
118
119         open_bucket_for_each(c, obs, ob, i)
120                 if (ob->dev == dev && ob->ec)
121                         bch2_ec_bucket_cancel(c, ob);
122 }
123
124 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
125 {
126         struct open_bucket *ob;
127
128         BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
129
130         ob = c->open_buckets + c->open_buckets_freelist;
131         c->open_buckets_freelist = ob->freelist;
132         atomic_set(&ob->pin, 1);
133         ob->data_type = 0;
134
135         c->open_buckets_nr_free--;
136         return ob;
137 }
138
139 static void open_bucket_free_unused(struct bch_fs *c,
140                                     struct write_point *wp,
141                                     struct open_bucket *ob)
142 {
143         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
144         bool may_realloc = wp->data_type == BCH_DATA_user;
145
146         BUG_ON(ca->open_buckets_partial_nr >
147                ARRAY_SIZE(ca->open_buckets_partial));
148
149         if (ca->open_buckets_partial_nr <
150             ARRAY_SIZE(ca->open_buckets_partial) &&
151             may_realloc) {
152                 spin_lock(&c->freelist_lock);
153                 ob->on_partial_list = true;
154                 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
155                         ob - c->open_buckets;
156                 spin_unlock(&c->freelist_lock);
157
158                 closure_wake_up(&c->open_buckets_wait);
159                 closure_wake_up(&c->freelist_wait);
160         } else {
161                 bch2_open_bucket_put(c, ob);
162         }
163 }
164
165 /* _only_ for allocating the journal on a new device: */
166 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
167 {
168         while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
169                 u64 b = ca->new_fs_bucket_idx++;
170
171                 if (!is_superblock_bucket(ca, b) &&
172                     (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
173                         return b;
174         }
175
176         return -1;
177 }
178
179 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
180 {
181         switch (reserve) {
182         case RESERVE_btree:
183         case RESERVE_btree_movinggc:
184                 return 0;
185         case RESERVE_movinggc:
186                 return OPEN_BUCKETS_COUNT / 4;
187         default:
188                 return OPEN_BUCKETS_COUNT / 2;
189         }
190 }
191
192 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
193                                               enum alloc_reserve reserve,
194                                               struct bkey_alloc_unpacked a,
195                                               size_t *need_journal_commit,
196                                               struct closure *cl)
197 {
198         struct open_bucket *ob;
199
200         if (unlikely(ca->buckets_nouse && test_bit(a.bucket, ca->buckets_nouse)))
201                 return NULL;
202
203         if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket))
204                 return NULL;
205
206         if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
207                         c->journal.flushed_seq_ondisk, ca->dev_idx, a.bucket)) {
208                 (*need_journal_commit)++;
209                 return NULL;
210         }
211
212         spin_lock(&c->freelist_lock);
213
214         if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
215                 if (cl)
216                         closure_wait(&c->open_buckets_wait, cl);
217
218                 if (!c->blocked_allocate_open_bucket)
219                         c->blocked_allocate_open_bucket = local_clock();
220
221                 spin_unlock(&c->freelist_lock);
222
223                 trace_open_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
224                 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
225         }
226
227         /* Recheck under lock: */
228         if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket)) {
229                 spin_unlock(&c->freelist_lock);
230                 return NULL;
231         }
232
233         ob = bch2_open_bucket_alloc(c);
234
235         spin_lock(&ob->lock);
236
237         ob->valid       = true;
238         ob->sectors_free = ca->mi.bucket_size;
239         ob->alloc_reserve = reserve;
240         ob->dev         = ca->dev_idx;
241         ob->gen         = a.gen;
242         ob->bucket      = a.bucket;
243         spin_unlock(&ob->lock);
244
245         ca->nr_open_buckets++;
246         bch2_open_bucket_hash_add(c, ob);
247
248         if (c->blocked_allocate_open_bucket) {
249                 bch2_time_stats_update(
250                         &c->times[BCH_TIME_blocked_allocate_open_bucket],
251                         c->blocked_allocate_open_bucket);
252                 c->blocked_allocate_open_bucket = 0;
253         }
254
255         if (c->blocked_allocate) {
256                 bch2_time_stats_update(
257                         &c->times[BCH_TIME_blocked_allocate],
258                         c->blocked_allocate);
259                 c->blocked_allocate = 0;
260         }
261
262         spin_unlock(&c->freelist_lock);
263
264         trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]);
265         return ob;
266 }
267
268 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
269                                             enum alloc_reserve reserve, u64 free_entry,
270                                             size_t *need_journal_commit,
271                                             struct closure *cl)
272 {
273         struct bch_fs *c = trans->c;
274         struct btree_iter iter;
275         struct bkey_s_c k;
276         struct open_bucket *ob;
277         struct bkey_alloc_unpacked a;
278         u64 b = free_entry & ~(~0ULL << 56);
279         unsigned genbits = free_entry >> 56;
280         struct printbuf buf = PRINTBUF;
281         int ret;
282
283         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
284         k = bch2_btree_iter_peek_slot(&iter);
285         ret = bkey_err(k);
286         if (ret) {
287                 ob = ERR_PTR(ret);
288                 goto err;
289         }
290
291         a = bch2_alloc_unpack(k);
292
293         if (bch2_fs_inconsistent_on(bucket_state(a) != BUCKET_free, c,
294                         "non free bucket in freespace btree (state %s)\n"
295                         "  %s\n"
296                         "  at %llu (genbits %u)",
297                         bch2_bucket_states[bucket_state(a)],
298                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
299                         free_entry, genbits)) {
300                 ob = ERR_PTR(-EIO);
301                 goto err;
302         }
303
304         if (bch2_fs_inconsistent_on(genbits != (alloc_freespace_genbits(a) >> 56), c,
305                         "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
306                         "  %s",
307                         genbits, alloc_freespace_genbits(a) >> 56,
308                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
309                 ob = ERR_PTR(-EIO);
310                 goto err;
311         }
312
313         if (bch2_fs_inconsistent_on(b < ca->mi.first_bucket || b >= ca->mi.nbuckets, c,
314                         "freespace btree has bucket outside allowed range (got %llu, valid %u-%llu)",
315                         b, ca->mi.first_bucket, ca->mi.nbuckets)) {
316                 ob = ERR_PTR(-EIO);
317                 goto err;
318         }
319
320         ob = __try_alloc_bucket(c, ca, reserve, a, need_journal_commit, cl);
321 err:
322         bch2_trans_iter_exit(trans, &iter);
323         printbuf_exit(&buf);
324         return ob;
325 }
326
327 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
328                                                     enum alloc_reserve reserve)
329 {
330         struct open_bucket *ob;
331         int i;
332
333         spin_lock(&c->freelist_lock);
334
335         for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
336                 ob = c->open_buckets + ca->open_buckets_partial[i];
337
338                 if (reserve <= ob->alloc_reserve) {
339                         array_remove_item(ca->open_buckets_partial,
340                                           ca->open_buckets_partial_nr,
341                                           i);
342                         ob->on_partial_list = false;
343                         ob->alloc_reserve = reserve;
344                         spin_unlock(&c->freelist_lock);
345                         return ob;
346                 }
347         }
348
349         spin_unlock(&c->freelist_lock);
350         return NULL;
351 }
352
353 /*
354  * This path is for before the freespace btree is initialized:
355  *
356  * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
357  * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
358  */
359 static noinline struct open_bucket *
360 bch2_bucket_alloc_trans_early(struct btree_trans *trans,
361                               struct bch_dev *ca,
362                               enum alloc_reserve reserve,
363                               u64 *b,
364                               size_t *need_journal_commit,
365                               struct closure *cl)
366 {
367         struct btree_iter iter;
368         struct bkey_s_c k;
369         struct open_bucket *ob = NULL;
370         int ret;
371
372         *b = max_t(u64, *b, ca->mi.first_bucket);
373         *b = max_t(u64, *b, ca->new_fs_bucket_idx);
374
375         for_each_btree_key(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *b),
376                            BTREE_ITER_SLOTS, k, ret) {
377                 struct bkey_alloc_unpacked a;
378
379                 if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
380                         break;
381
382                 if (ca->new_fs_bucket_idx &&
383                     is_superblock_bucket(ca, k.k->p.offset))
384                         continue;
385
386                 a = bch2_alloc_unpack(k);
387
388                 if (bucket_state(a) != BUCKET_free)
389                         continue;
390
391                 ob = __try_alloc_bucket(trans->c, ca, reserve, a,
392                                         need_journal_commit, cl);
393                 if (ob)
394                         break;
395         }
396         bch2_trans_iter_exit(trans, &iter);
397
398         *b = iter.pos.offset;
399
400         return ob ?: ERR_PTR(ret ?: -FREELIST_EMPTY);
401 }
402
403 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
404                                                    struct bch_dev *ca,
405                                                    enum alloc_reserve reserve,
406                                                    u64 *b,
407                                                    size_t *need_journal_commit,
408                                                    struct closure *cl)
409 {
410         struct btree_iter iter;
411         struct bkey_s_c k;
412         struct open_bucket *ob = NULL;
413         int ret;
414
415         if (unlikely(!ca->mi.freespace_initialized))
416                 return bch2_bucket_alloc_trans_early(trans, ca, reserve, b,
417                                                      need_journal_commit, cl);
418
419         BUG_ON(ca->new_fs_bucket_idx);
420
421         for_each_btree_key(trans, iter, BTREE_ID_freespace,
422                            POS(ca->dev_idx, *b), 0, k, ret) {
423                 if (k.k->p.inode != ca->dev_idx)
424                         break;
425
426                 for (*b = max(*b, bkey_start_offset(k.k));
427                      *b != k.k->p.offset && !ob;
428                      (*b)++) {
429                         if (btree_trans_too_many_iters(trans)) {
430                                 ob = ERR_PTR(-EINTR);
431                                 break;
432                         }
433
434                         ob = try_alloc_bucket(trans, ca, reserve, *b,
435                                               need_journal_commit, cl);
436                 }
437                 if (ob)
438                         break;
439         }
440         bch2_trans_iter_exit(trans, &iter);
441
442         return ob ?: ERR_PTR(ret);
443 }
444
445 /**
446  * bch_bucket_alloc - allocate a single bucket from a specific device
447  *
448  * Returns index of bucket on success, 0 on failure
449  * */
450 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
451                                       enum alloc_reserve reserve,
452                                       bool may_alloc_partial,
453                                       struct closure *cl)
454 {
455         struct open_bucket *ob = NULL;
456         size_t need_journal_commit = 0;
457         u64 avail = dev_buckets_available(ca, reserve);
458         u64 b = 0;
459         int ret;
460
461         if (may_alloc_partial) {
462                 ob = try_alloc_partial_bucket(c, ca, reserve);
463                 if (ob)
464                         return ob;
465         }
466 again:
467         if (!avail) {
468                 if (cl) {
469                         closure_wait(&c->freelist_wait, cl);
470                         /* recheck after putting ourself on waitlist */
471                         avail = dev_buckets_available(ca, reserve);
472                         if (avail) {
473                                 closure_wake_up(&c->freelist_wait);
474                                 goto again;
475                         }
476                 }
477
478                 if (!c->blocked_allocate)
479                         c->blocked_allocate = local_clock();
480
481                 ob = ERR_PTR(-FREELIST_EMPTY);
482                 goto err;
483         }
484
485         ret = bch2_trans_do(c, NULL, NULL, 0,
486                         PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans,
487                                                         ca, reserve, &b,
488                                                         &need_journal_commit, cl)));
489
490         if (need_journal_commit * 2 > avail)
491                 bch2_journal_flush_async(&c->journal, NULL);
492 err:
493         if (!ob)
494                 ob = ERR_PTR(ret ?: -FREELIST_EMPTY);
495
496         if (ob == ERR_PTR(-FREELIST_EMPTY)) {
497                 trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve], avail,
498                                         need_journal_commit, cl == NULL);
499                 atomic_long_inc(&c->bucket_alloc_fail);
500         }
501
502         return ob;
503 }
504
505 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
506                             unsigned l, unsigned r)
507 {
508         return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
509                 (stripe->next_alloc[l] < stripe->next_alloc[r]));
510 }
511
512 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
513
514 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
515                                           struct dev_stripe_state *stripe,
516                                           struct bch_devs_mask *devs)
517 {
518         struct dev_alloc_list ret = { .nr = 0 };
519         unsigned i;
520
521         for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
522                 ret.devs[ret.nr++] = i;
523
524         bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
525         return ret;
526 }
527
528 void bch2_dev_stripe_increment(struct bch_dev *ca,
529                                struct dev_stripe_state *stripe)
530 {
531         u64 *v = stripe->next_alloc + ca->dev_idx;
532         u64 free_space = dev_buckets_available(ca, RESERVE_none);
533         u64 free_space_inv = free_space
534                 ? div64_u64(1ULL << 48, free_space)
535                 : 1ULL << 48;
536         u64 scale = *v / 4;
537
538         if (*v + free_space_inv >= *v)
539                 *v += free_space_inv;
540         else
541                 *v = U64_MAX;
542
543         for (v = stripe->next_alloc;
544              v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
545                 *v = *v < scale ? 0 : *v - scale;
546 }
547
548 #define BUCKET_MAY_ALLOC_PARTIAL        (1 << 0)
549 #define BUCKET_ALLOC_USE_DURABILITY     (1 << 1)
550
551 static void add_new_bucket(struct bch_fs *c,
552                            struct open_buckets *ptrs,
553                            struct bch_devs_mask *devs_may_alloc,
554                            unsigned *nr_effective,
555                            bool *have_cache,
556                            unsigned flags,
557                            struct open_bucket *ob)
558 {
559         unsigned durability =
560                 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
561
562         __clear_bit(ob->dev, devs_may_alloc->d);
563         *nr_effective   += (flags & BUCKET_ALLOC_USE_DURABILITY)
564                 ? durability : 1;
565         *have_cache     |= !durability;
566
567         ob_push(c, ptrs, ob);
568 }
569
570 int bch2_bucket_alloc_set(struct bch_fs *c,
571                       struct open_buckets *ptrs,
572                       struct dev_stripe_state *stripe,
573                       struct bch_devs_mask *devs_may_alloc,
574                       unsigned nr_replicas,
575                       unsigned *nr_effective,
576                       bool *have_cache,
577                       enum alloc_reserve reserve,
578                       unsigned flags,
579                       struct closure *cl)
580 {
581         struct dev_alloc_list devs_sorted =
582                 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
583         unsigned dev;
584         struct bch_dev *ca;
585         int ret = -INSUFFICIENT_DEVICES;
586         unsigned i;
587
588         BUG_ON(*nr_effective >= nr_replicas);
589
590         for (i = 0; i < devs_sorted.nr; i++) {
591                 struct open_bucket *ob;
592
593                 dev = devs_sorted.devs[i];
594
595                 rcu_read_lock();
596                 ca = rcu_dereference(c->devs[dev]);
597                 if (ca)
598                         percpu_ref_get(&ca->ref);
599                 rcu_read_unlock();
600
601                 if (!ca)
602                         continue;
603
604                 if (!ca->mi.durability && *have_cache) {
605                         percpu_ref_put(&ca->ref);
606                         continue;
607                 }
608
609                 ob = bch2_bucket_alloc(c, ca, reserve,
610                                 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
611                 if (!IS_ERR(ob))
612                         bch2_dev_stripe_increment(ca, stripe);
613                 percpu_ref_put(&ca->ref);
614
615                 if (IS_ERR(ob)) {
616                         ret = PTR_ERR(ob);
617
618                         if (cl)
619                                 break;
620                         continue;
621                 }
622
623                 add_new_bucket(c, ptrs, devs_may_alloc,
624                                nr_effective, have_cache, flags, ob);
625
626                 if (*nr_effective >= nr_replicas) {
627                         ret = 0;
628                         break;
629                 }
630         }
631
632         return ret;
633 }
634
635 /* Allocate from stripes: */
636
637 /*
638  * if we can't allocate a new stripe because there are already too many
639  * partially filled stripes, force allocating from an existing stripe even when
640  * it's to a device we don't want:
641  */
642
643 static int bucket_alloc_from_stripe(struct bch_fs *c,
644                          struct open_buckets *ptrs,
645                          struct write_point *wp,
646                          struct bch_devs_mask *devs_may_alloc,
647                          u16 target,
648                          unsigned erasure_code,
649                          unsigned nr_replicas,
650                          unsigned *nr_effective,
651                          bool *have_cache,
652                          unsigned flags,
653                          struct closure *cl)
654 {
655         struct dev_alloc_list devs_sorted;
656         struct ec_stripe_head *h;
657         struct open_bucket *ob;
658         struct bch_dev *ca;
659         unsigned i, ec_idx;
660
661         if (!erasure_code)
662                 return 0;
663
664         if (nr_replicas < 2)
665                 return 0;
666
667         if (ec_open_bucket(c, ptrs))
668                 return 0;
669
670         h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
671                                     wp == &c->copygc_write_point,
672                                     cl);
673         if (IS_ERR(h))
674                 return -PTR_ERR(h);
675         if (!h)
676                 return 0;
677
678         devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
679
680         for (i = 0; i < devs_sorted.nr; i++)
681                 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
682                         if (!h->s->blocks[ec_idx])
683                                 continue;
684
685                         ob = c->open_buckets + h->s->blocks[ec_idx];
686                         if (ob->dev == devs_sorted.devs[i] &&
687                             !test_and_set_bit(ec_idx, h->s->blocks_allocated))
688                                 goto got_bucket;
689                 }
690         goto out_put_head;
691 got_bucket:
692         ca = bch_dev_bkey_exists(c, ob->dev);
693
694         ob->ec_idx      = ec_idx;
695         ob->ec          = h->s;
696
697         add_new_bucket(c, ptrs, devs_may_alloc,
698                        nr_effective, have_cache, flags, ob);
699         atomic_inc(&h->s->pin);
700 out_put_head:
701         bch2_ec_stripe_head_put(c, h);
702         return 0;
703 }
704
705 /* Sector allocator */
706
707 static void get_buckets_from_writepoint(struct bch_fs *c,
708                                         struct open_buckets *ptrs,
709                                         struct write_point *wp,
710                                         struct bch_devs_mask *devs_may_alloc,
711                                         unsigned nr_replicas,
712                                         unsigned *nr_effective,
713                                         bool *have_cache,
714                                         unsigned flags,
715                                         bool need_ec)
716 {
717         struct open_buckets ptrs_skip = { .nr = 0 };
718         struct open_bucket *ob;
719         unsigned i;
720
721         open_bucket_for_each(c, &wp->ptrs, ob, i) {
722                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
723
724                 if (*nr_effective < nr_replicas &&
725                     test_bit(ob->dev, devs_may_alloc->d) &&
726                     (ca->mi.durability ||
727                      (wp->data_type == BCH_DATA_user && !*have_cache)) &&
728                     (ob->ec || !need_ec)) {
729                         add_new_bucket(c, ptrs, devs_may_alloc,
730                                        nr_effective, have_cache,
731                                        flags, ob);
732                 } else {
733                         ob_push(c, &ptrs_skip, ob);
734                 }
735         }
736         wp->ptrs = ptrs_skip;
737 }
738
739 static int open_bucket_add_buckets(struct bch_fs *c,
740                         struct open_buckets *ptrs,
741                         struct write_point *wp,
742                         struct bch_devs_list *devs_have,
743                         u16 target,
744                         unsigned erasure_code,
745                         unsigned nr_replicas,
746                         unsigned *nr_effective,
747                         bool *have_cache,
748                         enum alloc_reserve reserve,
749                         unsigned flags,
750                         struct closure *_cl)
751 {
752         struct bch_devs_mask devs;
753         struct open_bucket *ob;
754         struct closure *cl = NULL;
755         int ret;
756         unsigned i;
757
758         rcu_read_lock();
759         devs = target_rw_devs(c, wp->data_type, target);
760         rcu_read_unlock();
761
762         /* Don't allocate from devices we already have pointers to: */
763         for (i = 0; i < devs_have->nr; i++)
764                 __clear_bit(devs_have->devs[i], devs.d);
765
766         open_bucket_for_each(c, ptrs, ob, i)
767                 __clear_bit(ob->dev, devs.d);
768
769         if (erasure_code) {
770                 if (!ec_open_bucket(c, ptrs)) {
771                         get_buckets_from_writepoint(c, ptrs, wp, &devs,
772                                                     nr_replicas, nr_effective,
773                                                     have_cache, flags, true);
774                         if (*nr_effective >= nr_replicas)
775                                 return 0;
776                 }
777
778                 if (!ec_open_bucket(c, ptrs)) {
779                         ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
780                                                  target, erasure_code,
781                                                  nr_replicas, nr_effective,
782                                                  have_cache, flags, _cl);
783                         if (ret == -FREELIST_EMPTY ||
784                             ret == -OPEN_BUCKETS_EMPTY)
785                                 return ret;
786                         if (*nr_effective >= nr_replicas)
787                                 return 0;
788                 }
789         }
790
791         get_buckets_from_writepoint(c, ptrs, wp, &devs,
792                                     nr_replicas, nr_effective,
793                                     have_cache, flags, false);
794         if (*nr_effective >= nr_replicas)
795                 return 0;
796
797 retry_blocking:
798         /*
799          * Try nonblocking first, so that if one device is full we'll try from
800          * other devices:
801          */
802         ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
803                                 nr_replicas, nr_effective, have_cache,
804                                 reserve, flags, cl);
805         if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
806                 cl = _cl;
807                 goto retry_blocking;
808         }
809
810         return ret;
811 }
812
813 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
814                                 struct open_buckets *obs)
815 {
816         struct open_buckets ptrs = { .nr = 0 };
817         struct open_bucket *ob, *ob2;
818         unsigned i, j;
819
820         open_bucket_for_each(c, obs, ob, i) {
821                 bool drop = !ca || ob->dev == ca->dev_idx;
822
823                 if (!drop && ob->ec) {
824                         mutex_lock(&ob->ec->lock);
825                         for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
826                                 if (!ob->ec->blocks[j])
827                                         continue;
828
829                                 ob2 = c->open_buckets + ob->ec->blocks[j];
830                                 drop |= ob2->dev == ca->dev_idx;
831                         }
832                         mutex_unlock(&ob->ec->lock);
833                 }
834
835                 if (drop)
836                         bch2_open_bucket_put(c, ob);
837                 else
838                         ob_push(c, &ptrs, ob);
839         }
840
841         *obs = ptrs;
842 }
843
844 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
845                           struct write_point *wp)
846 {
847         mutex_lock(&wp->lock);
848         bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
849         mutex_unlock(&wp->lock);
850 }
851
852 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
853                                                  unsigned long write_point)
854 {
855         unsigned hash =
856                 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
857
858         return &c->write_points_hash[hash];
859 }
860
861 static struct write_point *__writepoint_find(struct hlist_head *head,
862                                              unsigned long write_point)
863 {
864         struct write_point *wp;
865
866         rcu_read_lock();
867         hlist_for_each_entry_rcu(wp, head, node)
868                 if (wp->write_point == write_point)
869                         goto out;
870         wp = NULL;
871 out:
872         rcu_read_unlock();
873         return wp;
874 }
875
876 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
877 {
878         u64 stranded    = c->write_points_nr * c->bucket_size_max;
879         u64 free        = bch2_fs_usage_read_short(c).free;
880
881         return stranded * factor > free;
882 }
883
884 static bool try_increase_writepoints(struct bch_fs *c)
885 {
886         struct write_point *wp;
887
888         if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
889             too_many_writepoints(c, 32))
890                 return false;
891
892         wp = c->write_points + c->write_points_nr++;
893         hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
894         return true;
895 }
896
897 static bool try_decrease_writepoints(struct bch_fs *c,
898                                      unsigned old_nr)
899 {
900         struct write_point *wp;
901
902         mutex_lock(&c->write_points_hash_lock);
903         if (c->write_points_nr < old_nr) {
904                 mutex_unlock(&c->write_points_hash_lock);
905                 return true;
906         }
907
908         if (c->write_points_nr == 1 ||
909             !too_many_writepoints(c, 8)) {
910                 mutex_unlock(&c->write_points_hash_lock);
911                 return false;
912         }
913
914         wp = c->write_points + --c->write_points_nr;
915
916         hlist_del_rcu(&wp->node);
917         mutex_unlock(&c->write_points_hash_lock);
918
919         bch2_writepoint_stop(c, NULL, wp);
920         return true;
921 }
922
923 static struct write_point *writepoint_find(struct bch_fs *c,
924                                            unsigned long write_point)
925 {
926         struct write_point *wp, *oldest;
927         struct hlist_head *head;
928
929         if (!(write_point & 1UL)) {
930                 wp = (struct write_point *) write_point;
931                 mutex_lock(&wp->lock);
932                 return wp;
933         }
934
935         head = writepoint_hash(c, write_point);
936 restart_find:
937         wp = __writepoint_find(head, write_point);
938         if (wp) {
939 lock_wp:
940                 mutex_lock(&wp->lock);
941                 if (wp->write_point == write_point)
942                         goto out;
943                 mutex_unlock(&wp->lock);
944                 goto restart_find;
945         }
946 restart_find_oldest:
947         oldest = NULL;
948         for (wp = c->write_points;
949              wp < c->write_points + c->write_points_nr; wp++)
950                 if (!oldest || time_before64(wp->last_used, oldest->last_used))
951                         oldest = wp;
952
953         mutex_lock(&oldest->lock);
954         mutex_lock(&c->write_points_hash_lock);
955         if (oldest >= c->write_points + c->write_points_nr ||
956             try_increase_writepoints(c)) {
957                 mutex_unlock(&c->write_points_hash_lock);
958                 mutex_unlock(&oldest->lock);
959                 goto restart_find_oldest;
960         }
961
962         wp = __writepoint_find(head, write_point);
963         if (wp && wp != oldest) {
964                 mutex_unlock(&c->write_points_hash_lock);
965                 mutex_unlock(&oldest->lock);
966                 goto lock_wp;
967         }
968
969         wp = oldest;
970         hlist_del_rcu(&wp->node);
971         wp->write_point = write_point;
972         hlist_add_head_rcu(&wp->node, head);
973         mutex_unlock(&c->write_points_hash_lock);
974 out:
975         wp->last_used = sched_clock();
976         return wp;
977 }
978
979 /*
980  * Get us an open_bucket we can allocate from, return with it locked:
981  */
982 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
983                                 unsigned target,
984                                 unsigned erasure_code,
985                                 struct write_point_specifier write_point,
986                                 struct bch_devs_list *devs_have,
987                                 unsigned nr_replicas,
988                                 unsigned nr_replicas_required,
989                                 enum alloc_reserve reserve,
990                                 unsigned flags,
991                                 struct closure *cl)
992 {
993         struct write_point *wp;
994         struct open_bucket *ob;
995         struct open_buckets ptrs;
996         unsigned nr_effective, write_points_nr;
997         unsigned ob_flags = 0;
998         bool have_cache;
999         int ret;
1000         int i;
1001
1002         if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
1003                 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
1004
1005         BUG_ON(!nr_replicas || !nr_replicas_required);
1006 retry:
1007         ptrs.nr         = 0;
1008         nr_effective    = 0;
1009         write_points_nr = c->write_points_nr;
1010         have_cache      = false;
1011
1012         wp = writepoint_find(c, write_point.v);
1013
1014         if (wp->data_type == BCH_DATA_user)
1015                 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1016
1017         /* metadata may not allocate on cache devices: */
1018         if (wp->data_type != BCH_DATA_user)
1019                 have_cache = true;
1020
1021         if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1022                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1023                                               target, erasure_code,
1024                                               nr_replicas, &nr_effective,
1025                                               &have_cache, reserve,
1026                                               ob_flags, cl);
1027         } else {
1028                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1029                                               target, erasure_code,
1030                                               nr_replicas, &nr_effective,
1031                                               &have_cache, reserve,
1032                                               ob_flags, NULL);
1033                 if (!ret)
1034                         goto alloc_done;
1035
1036                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1037                                               0, erasure_code,
1038                                               nr_replicas, &nr_effective,
1039                                               &have_cache, reserve,
1040                                               ob_flags, cl);
1041         }
1042 alloc_done:
1043         BUG_ON(!ret && nr_effective < nr_replicas);
1044
1045         if (erasure_code && !ec_open_bucket(c, &ptrs))
1046                 pr_debug("failed to get ec bucket: ret %u", ret);
1047
1048         if (ret == -INSUFFICIENT_DEVICES &&
1049             nr_effective >= nr_replicas_required)
1050                 ret = 0;
1051
1052         if (ret)
1053                 goto err;
1054
1055         /* Free buckets we didn't use: */
1056         open_bucket_for_each(c, &wp->ptrs, ob, i)
1057                 open_bucket_free_unused(c, wp, ob);
1058
1059         wp->ptrs = ptrs;
1060
1061         wp->sectors_free = UINT_MAX;
1062
1063         open_bucket_for_each(c, &wp->ptrs, ob, i)
1064                 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1065
1066         BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1067
1068         return wp;
1069 err:
1070         open_bucket_for_each(c, &wp->ptrs, ob, i)
1071                 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1072                         ob_push(c, &ptrs, ob);
1073                 else
1074                         open_bucket_free_unused(c, wp, ob);
1075         wp->ptrs = ptrs;
1076
1077         mutex_unlock(&wp->lock);
1078
1079         if (ret == -FREELIST_EMPTY &&
1080             try_decrease_writepoints(c, write_points_nr))
1081                 goto retry;
1082
1083         switch (ret) {
1084         case -OPEN_BUCKETS_EMPTY:
1085         case -FREELIST_EMPTY:
1086                 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
1087         case -INSUFFICIENT_DEVICES:
1088                 return ERR_PTR(-EROFS);
1089         default:
1090                 return ERR_PTR(ret);
1091         }
1092 }
1093
1094 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1095 {
1096         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1097
1098         return (struct bch_extent_ptr) {
1099                 .type   = 1 << BCH_EXTENT_ENTRY_ptr,
1100                 .gen    = ob->gen,
1101                 .dev    = ob->dev,
1102                 .offset = bucket_to_sector(ca, ob->bucket) +
1103                         ca->mi.bucket_size -
1104                         ob->sectors_free,
1105         };
1106 }
1107
1108 /*
1109  * Append pointers to the space we just allocated to @k, and mark @sectors space
1110  * as allocated out of @ob
1111  */
1112 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1113                                     struct bkey_i *k, unsigned sectors,
1114                                     bool cached)
1115
1116 {
1117         struct open_bucket *ob;
1118         unsigned i;
1119
1120         BUG_ON(sectors > wp->sectors_free);
1121         wp->sectors_free -= sectors;
1122
1123         open_bucket_for_each(c, &wp->ptrs, ob, i) {
1124                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1125                 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
1126
1127                 ptr.cached = cached ||
1128                         (!ca->mi.durability &&
1129                          wp->data_type == BCH_DATA_user);
1130
1131                 bch2_bkey_append_ptr(k, ptr);
1132
1133                 BUG_ON(sectors > ob->sectors_free);
1134                 ob->sectors_free -= sectors;
1135         }
1136 }
1137
1138 /*
1139  * Append pointers to the space we just allocated to @k, and mark @sectors space
1140  * as allocated out of @ob
1141  */
1142 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1143 {
1144         struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
1145         struct open_bucket *ob;
1146         unsigned i;
1147
1148         open_bucket_for_each(c, &wp->ptrs, ob, i)
1149                 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
1150         wp->ptrs = keep;
1151
1152         mutex_unlock(&wp->lock);
1153
1154         bch2_open_buckets_put(c, &ptrs);
1155 }
1156
1157 static inline void writepoint_init(struct write_point *wp,
1158                                    enum bch_data_type type)
1159 {
1160         mutex_init(&wp->lock);
1161         wp->data_type = type;
1162 }
1163
1164 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1165 {
1166         struct open_bucket *ob;
1167         struct write_point *wp;
1168
1169         mutex_init(&c->write_points_hash_lock);
1170         c->write_points_nr = ARRAY_SIZE(c->write_points);
1171
1172         /* open bucket 0 is a sentinal NULL: */
1173         spin_lock_init(&c->open_buckets[0].lock);
1174
1175         for (ob = c->open_buckets + 1;
1176              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1177                 spin_lock_init(&ob->lock);
1178                 c->open_buckets_nr_free++;
1179
1180                 ob->freelist = c->open_buckets_freelist;
1181                 c->open_buckets_freelist = ob - c->open_buckets;
1182         }
1183
1184         writepoint_init(&c->btree_write_point,          BCH_DATA_btree);
1185         writepoint_init(&c->rebalance_write_point,      BCH_DATA_user);
1186         writepoint_init(&c->copygc_write_point,         BCH_DATA_user);
1187
1188         for (wp = c->write_points;
1189              wp < c->write_points + c->write_points_nr; wp++) {
1190                 writepoint_init(wp, BCH_DATA_user);
1191
1192                 wp->last_used   = sched_clock();
1193                 wp->write_point = (unsigned long) wp;
1194                 hlist_add_head_rcu(&wp->node,
1195                                    writepoint_hash(c, wp->write_point));
1196         }
1197 }
1198
1199 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1200 {
1201         struct open_bucket *ob;
1202
1203         for (ob = c->open_buckets;
1204              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1205              ob++) {
1206                 spin_lock(&ob->lock);
1207                 if (ob->valid && !ob->on_partial_list) {
1208                         pr_buf(out, "%zu ref %u type %s\n",
1209                                ob - c->open_buckets,
1210                                atomic_read(&ob->pin),
1211                                bch2_data_types[ob->data_type]);
1212                 }
1213                 spin_unlock(&ob->lock);
1214         }
1215
1216 }