]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_foreground.c
Update bcachefs sources to 1e6618c45d bcachefs: Improve bch2_open_buckets_to_text()
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2012 Google, Inc.
4  *
5  * Foreground allocator code: allocate buckets from freelist, and allocate in
6  * sector granularity from writepoints.
7  *
8  * bch2_bucket_alloc() allocates a single bucket from a specific device.
9  *
10  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11  * in a given filesystem.
12  */
13
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "btree_iter.h"
18 #include "btree_update.h"
19 #include "btree_gc.h"
20 #include "buckets.h"
21 #include "buckets_waiting_for_journal.h"
22 #include "clock.h"
23 #include "debug.h"
24 #include "disk_groups.h"
25 #include "ec.h"
26 #include "error.h"
27 #include "io.h"
28 #include "journal.h"
29
30 #include <linux/math64.h>
31 #include <linux/rculist.h>
32 #include <linux/rcupdate.h>
33 #include <trace/events/bcachefs.h>
34
35 const char * const bch2_alloc_reserves[] = {
36 #define x(t) #t,
37         BCH_ALLOC_RESERVES()
38 #undef x
39         NULL
40 };
41
42 /*
43  * Open buckets represent a bucket that's currently being allocated from.  They
44  * serve two purposes:
45  *
46  *  - They track buckets that have been partially allocated, allowing for
47  *    sub-bucket sized allocations - they're used by the sector allocator below
48  *
49  *  - They provide a reference to the buckets they own that mark and sweep GC
50  *    can find, until the new allocation has a pointer to it inserted into the
51  *    btree
52  *
53  * When allocating some space with the sector allocator, the allocation comes
54  * with a reference to an open bucket - the caller is required to put that
55  * reference _after_ doing the index update that makes its allocation reachable.
56  */
57
58 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
59 {
60         open_bucket_idx_t idx = ob - c->open_buckets;
61         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
62
63         ob->hash = *slot;
64         *slot = idx;
65 }
66
67 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
68 {
69         open_bucket_idx_t idx = ob - c->open_buckets;
70         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
71
72         while (*slot != idx) {
73                 BUG_ON(!*slot);
74                 slot = &c->open_buckets[*slot].hash;
75         }
76
77         *slot = ob->hash;
78         ob->hash = 0;
79 }
80
81 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
82 {
83         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
84
85         if (ob->ec) {
86                 bch2_ec_bucket_written(c, ob);
87                 return;
88         }
89
90         percpu_down_read(&c->mark_lock);
91         spin_lock(&ob->lock);
92
93         ob->valid = false;
94         ob->data_type = 0;
95
96         spin_unlock(&ob->lock);
97         percpu_up_read(&c->mark_lock);
98
99         spin_lock(&c->freelist_lock);
100         bch2_open_bucket_hash_remove(c, ob);
101
102         ob->freelist = c->open_buckets_freelist;
103         c->open_buckets_freelist = ob - c->open_buckets;
104
105         c->open_buckets_nr_free++;
106         ca->nr_open_buckets--;
107         spin_unlock(&c->freelist_lock);
108
109         closure_wake_up(&c->open_buckets_wait);
110 }
111
112 void bch2_open_bucket_write_error(struct bch_fs *c,
113                                   struct open_buckets *obs,
114                                   unsigned dev)
115 {
116         struct open_bucket *ob;
117         unsigned i;
118
119         open_bucket_for_each(c, obs, ob, i)
120                 if (ob->dev == dev && ob->ec)
121                         bch2_ec_bucket_cancel(c, ob);
122 }
123
124 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
125 {
126         struct open_bucket *ob;
127
128         BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
129
130         ob = c->open_buckets + c->open_buckets_freelist;
131         c->open_buckets_freelist = ob->freelist;
132         atomic_set(&ob->pin, 1);
133         ob->data_type = 0;
134
135         c->open_buckets_nr_free--;
136         return ob;
137 }
138
139 static void open_bucket_free_unused(struct bch_fs *c,
140                                     struct write_point *wp,
141                                     struct open_bucket *ob)
142 {
143         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
144         bool may_realloc = wp->data_type == BCH_DATA_user;
145
146         BUG_ON(ca->open_buckets_partial_nr >
147                ARRAY_SIZE(ca->open_buckets_partial));
148
149         if (ca->open_buckets_partial_nr <
150             ARRAY_SIZE(ca->open_buckets_partial) &&
151             may_realloc) {
152                 spin_lock(&c->freelist_lock);
153                 ob->on_partial_list = true;
154                 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
155                         ob - c->open_buckets;
156                 spin_unlock(&c->freelist_lock);
157
158                 closure_wake_up(&c->open_buckets_wait);
159                 closure_wake_up(&c->freelist_wait);
160         } else {
161                 bch2_open_bucket_put(c, ob);
162         }
163 }
164
165 /* _only_ for allocating the journal on a new device: */
166 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
167 {
168         while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
169                 u64 b = ca->new_fs_bucket_idx++;
170
171                 if (!is_superblock_bucket(ca, b) &&
172                     (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
173                         return b;
174         }
175
176         return -1;
177 }
178
179 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
180 {
181         switch (reserve) {
182         case RESERVE_btree:
183         case RESERVE_btree_movinggc:
184                 return 0;
185         case RESERVE_movinggc:
186                 return OPEN_BUCKETS_COUNT / 4;
187         default:
188                 return OPEN_BUCKETS_COUNT / 2;
189         }
190 }
191
192 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
193                                               u64 bucket,
194                                               enum alloc_reserve reserve,
195                                               struct bch_alloc_v4 *a,
196                                               u64 *skipped_open,
197                                               u64 *skipped_need_journal_commit,
198                                               u64 *skipped_nouse,
199                                               struct closure *cl)
200 {
201         struct open_bucket *ob;
202
203         if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
204                 (*skipped_nouse)++;
205                 return NULL;
206         }
207
208         if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
209                 (*skipped_open)++;
210                 return NULL;
211         }
212
213         if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
214                         c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
215                 (*skipped_need_journal_commit)++;
216                 return NULL;
217         }
218
219         spin_lock(&c->freelist_lock);
220
221         if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
222                 if (cl)
223                         closure_wait(&c->open_buckets_wait, cl);
224
225                 if (!c->blocked_allocate_open_bucket)
226                         c->blocked_allocate_open_bucket = local_clock();
227
228                 spin_unlock(&c->freelist_lock);
229
230                 trace_open_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
231                 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
232         }
233
234         /* Recheck under lock: */
235         if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
236                 spin_unlock(&c->freelist_lock);
237                 (*skipped_open)++;
238                 return NULL;
239         }
240
241         ob = bch2_open_bucket_alloc(c);
242
243         spin_lock(&ob->lock);
244
245         ob->valid       = true;
246         ob->sectors_free = ca->mi.bucket_size;
247         ob->alloc_reserve = reserve;
248         ob->dev         = ca->dev_idx;
249         ob->gen         = a->gen;
250         ob->bucket      = bucket;
251         spin_unlock(&ob->lock);
252
253         ca->nr_open_buckets++;
254         bch2_open_bucket_hash_add(c, ob);
255
256         if (c->blocked_allocate_open_bucket) {
257                 bch2_time_stats_update(
258                         &c->times[BCH_TIME_blocked_allocate_open_bucket],
259                         c->blocked_allocate_open_bucket);
260                 c->blocked_allocate_open_bucket = 0;
261         }
262
263         if (c->blocked_allocate) {
264                 bch2_time_stats_update(
265                         &c->times[BCH_TIME_blocked_allocate],
266                         c->blocked_allocate);
267                 c->blocked_allocate = 0;
268         }
269
270         spin_unlock(&c->freelist_lock);
271
272         trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]);
273         return ob;
274 }
275
276 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
277                                             enum alloc_reserve reserve, u64 free_entry,
278                                             u64 *skipped_open,
279                                             u64 *skipped_need_journal_commit,
280                                             u64 *skipped_nouse,
281                                             struct closure *cl)
282 {
283         struct bch_fs *c = trans->c;
284         struct btree_iter iter;
285         struct bkey_s_c k;
286         struct open_bucket *ob;
287         struct bch_alloc_v4 a;
288         u64 b = free_entry & ~(~0ULL << 56);
289         unsigned genbits = free_entry >> 56;
290         struct printbuf buf = PRINTBUF;
291         int ret;
292
293         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
294         k = bch2_btree_iter_peek_slot(&iter);
295         ret = bkey_err(k);
296         if (ret) {
297                 ob = ERR_PTR(ret);
298                 goto err;
299         }
300
301         bch2_alloc_to_v4(k, &a);
302
303         if (bch2_fs_inconsistent_on(a.data_type != BCH_DATA_free, c,
304                         "non free bucket in freespace btree (state %s)\n"
305                         "  %s\n"
306                         "  at %llu (genbits %u)",
307                         bch2_data_types[a.data_type],
308                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
309                         free_entry, genbits)) {
310                 ob = ERR_PTR(-EIO);
311                 goto err;
312         }
313
314         if (bch2_fs_inconsistent_on(genbits != (alloc_freespace_genbits(a) >> 56), c,
315                         "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
316                         "  %s",
317                         genbits, alloc_freespace_genbits(a) >> 56,
318                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
319                 ob = ERR_PTR(-EIO);
320                 goto err;
321         }
322
323         if (bch2_fs_inconsistent_on(b < ca->mi.first_bucket || b >= ca->mi.nbuckets, c,
324                         "freespace btree has bucket outside allowed range (got %llu, valid %u-%llu)",
325                         b, ca->mi.first_bucket, ca->mi.nbuckets)) {
326                 ob = ERR_PTR(-EIO);
327                 goto err;
328         }
329
330         ob = __try_alloc_bucket(c, ca, b, reserve, &a,
331                                 skipped_open,
332                                 skipped_need_journal_commit,
333                                 skipped_nouse,
334                                 cl);
335 err:
336         bch2_trans_iter_exit(trans, &iter);
337         printbuf_exit(&buf);
338         return ob;
339 }
340
341 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
342                                                     enum alloc_reserve reserve)
343 {
344         struct open_bucket *ob;
345         int i;
346
347         spin_lock(&c->freelist_lock);
348
349         for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
350                 ob = c->open_buckets + ca->open_buckets_partial[i];
351
352                 if (reserve <= ob->alloc_reserve) {
353                         array_remove_item(ca->open_buckets_partial,
354                                           ca->open_buckets_partial_nr,
355                                           i);
356                         ob->on_partial_list = false;
357                         ob->alloc_reserve = reserve;
358                         spin_unlock(&c->freelist_lock);
359                         return ob;
360                 }
361         }
362
363         spin_unlock(&c->freelist_lock);
364         return NULL;
365 }
366
367 /*
368  * This path is for before the freespace btree is initialized:
369  *
370  * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
371  * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
372  */
373 static noinline struct open_bucket *
374 bch2_bucket_alloc_trans_early(struct btree_trans *trans,
375                               struct bch_dev *ca,
376                               enum alloc_reserve reserve,
377                               u64 *cur_bucket,
378                               u64 *buckets_seen,
379                               u64 *skipped_open,
380                               u64 *skipped_need_journal_commit,
381                               u64 *skipped_nouse,
382                               struct closure *cl)
383 {
384         struct btree_iter iter;
385         struct bkey_s_c k;
386         struct open_bucket *ob = NULL;
387         int ret;
388
389         *cur_bucket = max_t(u64, *cur_bucket, ca->mi.first_bucket);
390         *cur_bucket = max_t(u64, *cur_bucket, ca->new_fs_bucket_idx);
391
392         for_each_btree_key(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
393                            BTREE_ITER_SLOTS, k, ret) {
394                 struct bch_alloc_v4 a;
395
396                 if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
397                         break;
398
399                 if (ca->new_fs_bucket_idx &&
400                     is_superblock_bucket(ca, k.k->p.offset))
401                         continue;
402
403                 bch2_alloc_to_v4(k, &a);
404
405                 if (a.data_type != BCH_DATA_free)
406                         continue;
407
408                 (*buckets_seen)++;
409
410                 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a,
411                                         skipped_open,
412                                         skipped_need_journal_commit,
413                                         skipped_nouse,
414                                         cl);
415                 if (ob)
416                         break;
417         }
418         bch2_trans_iter_exit(trans, &iter);
419
420         *cur_bucket = iter.pos.offset;
421
422         return ob ?: ERR_PTR(ret ?: -FREELIST_EMPTY);
423 }
424
425 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
426                                                    struct bch_dev *ca,
427                                                    enum alloc_reserve reserve,
428                                                    u64 *cur_bucket,
429                                                    u64 *buckets_seen,
430                                                    u64 *skipped_open,
431                                                    u64 *skipped_need_journal_commit,
432                                                    u64 *skipped_nouse,
433                                                    struct closure *cl)
434 {
435         struct btree_iter iter;
436         struct bkey_s_c k;
437         struct open_bucket *ob = NULL;
438         int ret;
439
440         if (unlikely(!ca->mi.freespace_initialized))
441                 return bch2_bucket_alloc_trans_early(trans, ca, reserve,
442                                                      cur_bucket,
443                                                      buckets_seen,
444                                                      skipped_open,
445                                                      skipped_need_journal_commit,
446                                                      skipped_nouse,
447                                                      cl);
448
449         BUG_ON(ca->new_fs_bucket_idx);
450
451         for_each_btree_key(trans, iter, BTREE_ID_freespace,
452                            POS(ca->dev_idx, *cur_bucket), 0, k, ret) {
453                 if (k.k->p.inode != ca->dev_idx)
454                         break;
455
456                 for (*cur_bucket = max(*cur_bucket, bkey_start_offset(k.k));
457                      *cur_bucket != k.k->p.offset && !ob;
458                      (*cur_bucket)++) {
459                         if (btree_trans_too_many_iters(trans)) {
460                                 ob = ERR_PTR(-EINTR);
461                                 break;
462                         }
463
464                         (*buckets_seen)++;
465
466                         ob = try_alloc_bucket(trans, ca, reserve,
467                                               *cur_bucket,
468                                               skipped_open,
469                                               skipped_need_journal_commit,
470                                               skipped_nouse,
471                                               cl);
472                 }
473                 if (ob)
474                         break;
475         }
476         bch2_trans_iter_exit(trans, &iter);
477
478         return ob ?: ERR_PTR(ret);
479 }
480
481 /**
482  * bch_bucket_alloc - allocate a single bucket from a specific device
483  *
484  * Returns index of bucket on success, 0 on failure
485  * */
486 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
487                                       enum alloc_reserve reserve,
488                                       bool may_alloc_partial,
489                                       struct closure *cl)
490 {
491         struct open_bucket *ob = NULL;
492         struct bch_dev_usage usage;
493         u64 avail;
494         u64 cur_bucket = 0;
495         u64 buckets_seen = 0;
496         u64 skipped_open = 0;
497         u64 skipped_need_journal_commit = 0;
498         u64 skipped_nouse = 0;
499         bool waiting = false;
500         int ret;
501 again:
502         usage = bch2_dev_usage_read(ca);
503         avail = __dev_buckets_available(ca, usage,reserve);
504
505         if (usage.d[BCH_DATA_need_discard].buckets > avail)
506                 bch2_do_discards(c);
507
508         if (usage.d[BCH_DATA_need_gc_gens].buckets > avail)
509                 bch2_do_gc_gens(c);
510
511         if (should_invalidate_buckets(ca, usage))
512                 bch2_do_invalidates(c);
513
514         if (!avail) {
515                 if (cl && !waiting) {
516                         closure_wait(&c->freelist_wait, cl);
517                         waiting = true;
518                         goto again;
519                 }
520
521                 if (!c->blocked_allocate)
522                         c->blocked_allocate = local_clock();
523
524                 ob = ERR_PTR(-FREELIST_EMPTY);
525                 goto err;
526         }
527
528         if (waiting)
529                 closure_wake_up(&c->freelist_wait);
530
531         if (may_alloc_partial) {
532                 ob = try_alloc_partial_bucket(c, ca, reserve);
533                 if (ob)
534                         return ob;
535         }
536
537         ret = bch2_trans_do(c, NULL, NULL, 0,
538                         PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
539                                                         &cur_bucket,
540                                                         &buckets_seen,
541                                                         &skipped_open,
542                                                         &skipped_need_journal_commit,
543                                                         &skipped_nouse,
544                                                         cl)));
545
546         if (skipped_need_journal_commit * 2 > avail)
547                 bch2_journal_flush_async(&c->journal, NULL);
548 err:
549         if (!ob)
550                 ob = ERR_PTR(ret ?: -FREELIST_EMPTY);
551
552         if (IS_ERR(ob)) {
553                 trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve], avail,
554                                         buckets_seen,
555                                         skipped_open,
556                                         skipped_need_journal_commit,
557                                         skipped_nouse,
558                                         cl == NULL, PTR_ERR(ob));
559                 atomic_long_inc(&c->bucket_alloc_fail);
560         }
561
562         return ob;
563 }
564
565 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
566                             unsigned l, unsigned r)
567 {
568         return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
569                 (stripe->next_alloc[l] < stripe->next_alloc[r]));
570 }
571
572 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
573
574 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
575                                           struct dev_stripe_state *stripe,
576                                           struct bch_devs_mask *devs)
577 {
578         struct dev_alloc_list ret = { .nr = 0 };
579         unsigned i;
580
581         for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
582                 ret.devs[ret.nr++] = i;
583
584         bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
585         return ret;
586 }
587
588 void bch2_dev_stripe_increment(struct bch_dev *ca,
589                                struct dev_stripe_state *stripe)
590 {
591         u64 *v = stripe->next_alloc + ca->dev_idx;
592         u64 free_space = dev_buckets_available(ca, RESERVE_none);
593         u64 free_space_inv = free_space
594                 ? div64_u64(1ULL << 48, free_space)
595                 : 1ULL << 48;
596         u64 scale = *v / 4;
597
598         if (*v + free_space_inv >= *v)
599                 *v += free_space_inv;
600         else
601                 *v = U64_MAX;
602
603         for (v = stripe->next_alloc;
604              v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
605                 *v = *v < scale ? 0 : *v - scale;
606 }
607
608 #define BUCKET_MAY_ALLOC_PARTIAL        (1 << 0)
609 #define BUCKET_ALLOC_USE_DURABILITY     (1 << 1)
610
611 static void add_new_bucket(struct bch_fs *c,
612                            struct open_buckets *ptrs,
613                            struct bch_devs_mask *devs_may_alloc,
614                            unsigned *nr_effective,
615                            bool *have_cache,
616                            unsigned flags,
617                            struct open_bucket *ob)
618 {
619         unsigned durability =
620                 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
621
622         __clear_bit(ob->dev, devs_may_alloc->d);
623         *nr_effective   += (flags & BUCKET_ALLOC_USE_DURABILITY)
624                 ? durability : 1;
625         *have_cache     |= !durability;
626
627         ob_push(c, ptrs, ob);
628 }
629
630 int bch2_bucket_alloc_set(struct bch_fs *c,
631                       struct open_buckets *ptrs,
632                       struct dev_stripe_state *stripe,
633                       struct bch_devs_mask *devs_may_alloc,
634                       unsigned nr_replicas,
635                       unsigned *nr_effective,
636                       bool *have_cache,
637                       enum alloc_reserve reserve,
638                       unsigned flags,
639                       struct closure *cl)
640 {
641         struct dev_alloc_list devs_sorted =
642                 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
643         unsigned dev;
644         struct bch_dev *ca;
645         int ret = -INSUFFICIENT_DEVICES;
646         unsigned i;
647
648         BUG_ON(*nr_effective >= nr_replicas);
649
650         for (i = 0; i < devs_sorted.nr; i++) {
651                 struct open_bucket *ob;
652
653                 dev = devs_sorted.devs[i];
654
655                 rcu_read_lock();
656                 ca = rcu_dereference(c->devs[dev]);
657                 if (ca)
658                         percpu_ref_get(&ca->ref);
659                 rcu_read_unlock();
660
661                 if (!ca)
662                         continue;
663
664                 if (!ca->mi.durability && *have_cache) {
665                         percpu_ref_put(&ca->ref);
666                         continue;
667                 }
668
669                 ob = bch2_bucket_alloc(c, ca, reserve,
670                                 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
671                 if (!IS_ERR(ob))
672                         bch2_dev_stripe_increment(ca, stripe);
673                 percpu_ref_put(&ca->ref);
674
675                 if (IS_ERR(ob)) {
676                         ret = PTR_ERR(ob);
677
678                         if (cl)
679                                 break;
680                         continue;
681                 }
682
683                 add_new_bucket(c, ptrs, devs_may_alloc,
684                                nr_effective, have_cache, flags, ob);
685
686                 if (*nr_effective >= nr_replicas) {
687                         ret = 0;
688                         break;
689                 }
690         }
691
692         return ret;
693 }
694
695 /* Allocate from stripes: */
696
697 /*
698  * if we can't allocate a new stripe because there are already too many
699  * partially filled stripes, force allocating from an existing stripe even when
700  * it's to a device we don't want:
701  */
702
703 static int bucket_alloc_from_stripe(struct bch_fs *c,
704                          struct open_buckets *ptrs,
705                          struct write_point *wp,
706                          struct bch_devs_mask *devs_may_alloc,
707                          u16 target,
708                          unsigned erasure_code,
709                          unsigned nr_replicas,
710                          unsigned *nr_effective,
711                          bool *have_cache,
712                          unsigned flags,
713                          struct closure *cl)
714 {
715         struct dev_alloc_list devs_sorted;
716         struct ec_stripe_head *h;
717         struct open_bucket *ob;
718         struct bch_dev *ca;
719         unsigned i, ec_idx;
720
721         if (!erasure_code)
722                 return 0;
723
724         if (nr_replicas < 2)
725                 return 0;
726
727         if (ec_open_bucket(c, ptrs))
728                 return 0;
729
730         h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
731                                     wp == &c->copygc_write_point,
732                                     cl);
733         if (IS_ERR(h))
734                 return -PTR_ERR(h);
735         if (!h)
736                 return 0;
737
738         devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
739
740         for (i = 0; i < devs_sorted.nr; i++)
741                 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
742                         if (!h->s->blocks[ec_idx])
743                                 continue;
744
745                         ob = c->open_buckets + h->s->blocks[ec_idx];
746                         if (ob->dev == devs_sorted.devs[i] &&
747                             !test_and_set_bit(ec_idx, h->s->blocks_allocated))
748                                 goto got_bucket;
749                 }
750         goto out_put_head;
751 got_bucket:
752         ca = bch_dev_bkey_exists(c, ob->dev);
753
754         ob->ec_idx      = ec_idx;
755         ob->ec          = h->s;
756
757         add_new_bucket(c, ptrs, devs_may_alloc,
758                        nr_effective, have_cache, flags, ob);
759         atomic_inc(&h->s->pin);
760 out_put_head:
761         bch2_ec_stripe_head_put(c, h);
762         return 0;
763 }
764
765 /* Sector allocator */
766
767 static void get_buckets_from_writepoint(struct bch_fs *c,
768                                         struct open_buckets *ptrs,
769                                         struct write_point *wp,
770                                         struct bch_devs_mask *devs_may_alloc,
771                                         unsigned nr_replicas,
772                                         unsigned *nr_effective,
773                                         bool *have_cache,
774                                         unsigned flags,
775                                         bool need_ec)
776 {
777         struct open_buckets ptrs_skip = { .nr = 0 };
778         struct open_bucket *ob;
779         unsigned i;
780
781         open_bucket_for_each(c, &wp->ptrs, ob, i) {
782                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
783
784                 if (*nr_effective < nr_replicas &&
785                     test_bit(ob->dev, devs_may_alloc->d) &&
786                     (ca->mi.durability ||
787                      (wp->data_type == BCH_DATA_user && !*have_cache)) &&
788                     (ob->ec || !need_ec)) {
789                         add_new_bucket(c, ptrs, devs_may_alloc,
790                                        nr_effective, have_cache,
791                                        flags, ob);
792                 } else {
793                         ob_push(c, &ptrs_skip, ob);
794                 }
795         }
796         wp->ptrs = ptrs_skip;
797 }
798
799 static int open_bucket_add_buckets(struct bch_fs *c,
800                         struct open_buckets *ptrs,
801                         struct write_point *wp,
802                         struct bch_devs_list *devs_have,
803                         u16 target,
804                         unsigned erasure_code,
805                         unsigned nr_replicas,
806                         unsigned *nr_effective,
807                         bool *have_cache,
808                         enum alloc_reserve reserve,
809                         unsigned flags,
810                         struct closure *_cl)
811 {
812         struct bch_devs_mask devs;
813         struct open_bucket *ob;
814         struct closure *cl = NULL;
815         int ret;
816         unsigned i;
817
818         rcu_read_lock();
819         devs = target_rw_devs(c, wp->data_type, target);
820         rcu_read_unlock();
821
822         /* Don't allocate from devices we already have pointers to: */
823         for (i = 0; i < devs_have->nr; i++)
824                 __clear_bit(devs_have->devs[i], devs.d);
825
826         open_bucket_for_each(c, ptrs, ob, i)
827                 __clear_bit(ob->dev, devs.d);
828
829         if (erasure_code) {
830                 if (!ec_open_bucket(c, ptrs)) {
831                         get_buckets_from_writepoint(c, ptrs, wp, &devs,
832                                                     nr_replicas, nr_effective,
833                                                     have_cache, flags, true);
834                         if (*nr_effective >= nr_replicas)
835                                 return 0;
836                 }
837
838                 if (!ec_open_bucket(c, ptrs)) {
839                         ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
840                                                  target, erasure_code,
841                                                  nr_replicas, nr_effective,
842                                                  have_cache, flags, _cl);
843                         if (ret == -FREELIST_EMPTY ||
844                             ret == -OPEN_BUCKETS_EMPTY)
845                                 return ret;
846                         if (*nr_effective >= nr_replicas)
847                                 return 0;
848                 }
849         }
850
851         get_buckets_from_writepoint(c, ptrs, wp, &devs,
852                                     nr_replicas, nr_effective,
853                                     have_cache, flags, false);
854         if (*nr_effective >= nr_replicas)
855                 return 0;
856
857 retry_blocking:
858         /*
859          * Try nonblocking first, so that if one device is full we'll try from
860          * other devices:
861          */
862         ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
863                                 nr_replicas, nr_effective, have_cache,
864                                 reserve, flags, cl);
865         if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
866                 cl = _cl;
867                 goto retry_blocking;
868         }
869
870         return ret;
871 }
872
873 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
874                                 struct open_buckets *obs)
875 {
876         struct open_buckets ptrs = { .nr = 0 };
877         struct open_bucket *ob, *ob2;
878         unsigned i, j;
879
880         open_bucket_for_each(c, obs, ob, i) {
881                 bool drop = !ca || ob->dev == ca->dev_idx;
882
883                 if (!drop && ob->ec) {
884                         mutex_lock(&ob->ec->lock);
885                         for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
886                                 if (!ob->ec->blocks[j])
887                                         continue;
888
889                                 ob2 = c->open_buckets + ob->ec->blocks[j];
890                                 drop |= ob2->dev == ca->dev_idx;
891                         }
892                         mutex_unlock(&ob->ec->lock);
893                 }
894
895                 if (drop)
896                         bch2_open_bucket_put(c, ob);
897                 else
898                         ob_push(c, &ptrs, ob);
899         }
900
901         *obs = ptrs;
902 }
903
904 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
905                           struct write_point *wp)
906 {
907         mutex_lock(&wp->lock);
908         bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
909         mutex_unlock(&wp->lock);
910 }
911
912 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
913                                                  unsigned long write_point)
914 {
915         unsigned hash =
916                 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
917
918         return &c->write_points_hash[hash];
919 }
920
921 static struct write_point *__writepoint_find(struct hlist_head *head,
922                                              unsigned long write_point)
923 {
924         struct write_point *wp;
925
926         rcu_read_lock();
927         hlist_for_each_entry_rcu(wp, head, node)
928                 if (wp->write_point == write_point)
929                         goto out;
930         wp = NULL;
931 out:
932         rcu_read_unlock();
933         return wp;
934 }
935
936 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
937 {
938         u64 stranded    = c->write_points_nr * c->bucket_size_max;
939         u64 free        = bch2_fs_usage_read_short(c).free;
940
941         return stranded * factor > free;
942 }
943
944 static bool try_increase_writepoints(struct bch_fs *c)
945 {
946         struct write_point *wp;
947
948         if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
949             too_many_writepoints(c, 32))
950                 return false;
951
952         wp = c->write_points + c->write_points_nr++;
953         hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
954         return true;
955 }
956
957 static bool try_decrease_writepoints(struct bch_fs *c,
958                                      unsigned old_nr)
959 {
960         struct write_point *wp;
961
962         mutex_lock(&c->write_points_hash_lock);
963         if (c->write_points_nr < old_nr) {
964                 mutex_unlock(&c->write_points_hash_lock);
965                 return true;
966         }
967
968         if (c->write_points_nr == 1 ||
969             !too_many_writepoints(c, 8)) {
970                 mutex_unlock(&c->write_points_hash_lock);
971                 return false;
972         }
973
974         wp = c->write_points + --c->write_points_nr;
975
976         hlist_del_rcu(&wp->node);
977         mutex_unlock(&c->write_points_hash_lock);
978
979         bch2_writepoint_stop(c, NULL, wp);
980         return true;
981 }
982
983 static struct write_point *writepoint_find(struct bch_fs *c,
984                                            unsigned long write_point)
985 {
986         struct write_point *wp, *oldest;
987         struct hlist_head *head;
988
989         if (!(write_point & 1UL)) {
990                 wp = (struct write_point *) write_point;
991                 mutex_lock(&wp->lock);
992                 return wp;
993         }
994
995         head = writepoint_hash(c, write_point);
996 restart_find:
997         wp = __writepoint_find(head, write_point);
998         if (wp) {
999 lock_wp:
1000                 mutex_lock(&wp->lock);
1001                 if (wp->write_point == write_point)
1002                         goto out;
1003                 mutex_unlock(&wp->lock);
1004                 goto restart_find;
1005         }
1006 restart_find_oldest:
1007         oldest = NULL;
1008         for (wp = c->write_points;
1009              wp < c->write_points + c->write_points_nr; wp++)
1010                 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1011                         oldest = wp;
1012
1013         mutex_lock(&oldest->lock);
1014         mutex_lock(&c->write_points_hash_lock);
1015         if (oldest >= c->write_points + c->write_points_nr ||
1016             try_increase_writepoints(c)) {
1017                 mutex_unlock(&c->write_points_hash_lock);
1018                 mutex_unlock(&oldest->lock);
1019                 goto restart_find_oldest;
1020         }
1021
1022         wp = __writepoint_find(head, write_point);
1023         if (wp && wp != oldest) {
1024                 mutex_unlock(&c->write_points_hash_lock);
1025                 mutex_unlock(&oldest->lock);
1026                 goto lock_wp;
1027         }
1028
1029         wp = oldest;
1030         hlist_del_rcu(&wp->node);
1031         wp->write_point = write_point;
1032         hlist_add_head_rcu(&wp->node, head);
1033         mutex_unlock(&c->write_points_hash_lock);
1034 out:
1035         wp->last_used = sched_clock();
1036         return wp;
1037 }
1038
1039 /*
1040  * Get us an open_bucket we can allocate from, return with it locked:
1041  */
1042 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1043                                 unsigned target,
1044                                 unsigned erasure_code,
1045                                 struct write_point_specifier write_point,
1046                                 struct bch_devs_list *devs_have,
1047                                 unsigned nr_replicas,
1048                                 unsigned nr_replicas_required,
1049                                 enum alloc_reserve reserve,
1050                                 unsigned flags,
1051                                 struct closure *cl)
1052 {
1053         struct write_point *wp;
1054         struct open_bucket *ob;
1055         struct open_buckets ptrs;
1056         unsigned nr_effective, write_points_nr;
1057         unsigned ob_flags = 0;
1058         bool have_cache;
1059         int ret;
1060         int i;
1061
1062         if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
1063                 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
1064
1065         BUG_ON(!nr_replicas || !nr_replicas_required);
1066 retry:
1067         ptrs.nr         = 0;
1068         nr_effective    = 0;
1069         write_points_nr = c->write_points_nr;
1070         have_cache      = false;
1071
1072         wp = writepoint_find(c, write_point.v);
1073
1074         if (wp->data_type == BCH_DATA_user)
1075                 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1076
1077         /* metadata may not allocate on cache devices: */
1078         if (wp->data_type != BCH_DATA_user)
1079                 have_cache = true;
1080
1081         if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1082                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1083                                               target, erasure_code,
1084                                               nr_replicas, &nr_effective,
1085                                               &have_cache, reserve,
1086                                               ob_flags, cl);
1087         } else {
1088                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1089                                               target, erasure_code,
1090                                               nr_replicas, &nr_effective,
1091                                               &have_cache, reserve,
1092                                               ob_flags, NULL);
1093                 if (!ret)
1094                         goto alloc_done;
1095
1096                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1097                                               0, erasure_code,
1098                                               nr_replicas, &nr_effective,
1099                                               &have_cache, reserve,
1100                                               ob_flags, cl);
1101         }
1102 alloc_done:
1103         BUG_ON(!ret && nr_effective < nr_replicas);
1104
1105         if (erasure_code && !ec_open_bucket(c, &ptrs))
1106                 pr_debug("failed to get ec bucket: ret %u", ret);
1107
1108         if (ret == -INSUFFICIENT_DEVICES &&
1109             nr_effective >= nr_replicas_required)
1110                 ret = 0;
1111
1112         if (ret)
1113                 goto err;
1114
1115         /* Free buckets we didn't use: */
1116         open_bucket_for_each(c, &wp->ptrs, ob, i)
1117                 open_bucket_free_unused(c, wp, ob);
1118
1119         wp->ptrs = ptrs;
1120
1121         wp->sectors_free = UINT_MAX;
1122
1123         open_bucket_for_each(c, &wp->ptrs, ob, i)
1124                 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1125
1126         BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1127
1128         return wp;
1129 err:
1130         open_bucket_for_each(c, &wp->ptrs, ob, i)
1131                 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1132                         ob_push(c, &ptrs, ob);
1133                 else
1134                         open_bucket_free_unused(c, wp, ob);
1135         wp->ptrs = ptrs;
1136
1137         mutex_unlock(&wp->lock);
1138
1139         if (ret == -FREELIST_EMPTY &&
1140             try_decrease_writepoints(c, write_points_nr))
1141                 goto retry;
1142
1143         switch (ret) {
1144         case -OPEN_BUCKETS_EMPTY:
1145         case -FREELIST_EMPTY:
1146                 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
1147         case -INSUFFICIENT_DEVICES:
1148                 return ERR_PTR(-EROFS);
1149         default:
1150                 return ERR_PTR(ret);
1151         }
1152 }
1153
1154 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1155 {
1156         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1157
1158         return (struct bch_extent_ptr) {
1159                 .type   = 1 << BCH_EXTENT_ENTRY_ptr,
1160                 .gen    = ob->gen,
1161                 .dev    = ob->dev,
1162                 .offset = bucket_to_sector(ca, ob->bucket) +
1163                         ca->mi.bucket_size -
1164                         ob->sectors_free,
1165         };
1166 }
1167
1168 /*
1169  * Append pointers to the space we just allocated to @k, and mark @sectors space
1170  * as allocated out of @ob
1171  */
1172 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1173                                     struct bkey_i *k, unsigned sectors,
1174                                     bool cached)
1175
1176 {
1177         struct open_bucket *ob;
1178         unsigned i;
1179
1180         BUG_ON(sectors > wp->sectors_free);
1181         wp->sectors_free -= sectors;
1182
1183         open_bucket_for_each(c, &wp->ptrs, ob, i) {
1184                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1185                 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
1186
1187                 ptr.cached = cached ||
1188                         (!ca->mi.durability &&
1189                          wp->data_type == BCH_DATA_user);
1190
1191                 bch2_bkey_append_ptr(k, ptr);
1192
1193                 BUG_ON(sectors > ob->sectors_free);
1194                 ob->sectors_free -= sectors;
1195         }
1196 }
1197
1198 /*
1199  * Append pointers to the space we just allocated to @k, and mark @sectors space
1200  * as allocated out of @ob
1201  */
1202 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1203 {
1204         struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
1205         struct open_bucket *ob;
1206         unsigned i;
1207
1208         open_bucket_for_each(c, &wp->ptrs, ob, i)
1209                 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
1210         wp->ptrs = keep;
1211
1212         mutex_unlock(&wp->lock);
1213
1214         bch2_open_buckets_put(c, &ptrs);
1215 }
1216
1217 static inline void writepoint_init(struct write_point *wp,
1218                                    enum bch_data_type type)
1219 {
1220         mutex_init(&wp->lock);
1221         wp->data_type = type;
1222 }
1223
1224 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1225 {
1226         struct open_bucket *ob;
1227         struct write_point *wp;
1228
1229         mutex_init(&c->write_points_hash_lock);
1230         c->write_points_nr = ARRAY_SIZE(c->write_points);
1231
1232         /* open bucket 0 is a sentinal NULL: */
1233         spin_lock_init(&c->open_buckets[0].lock);
1234
1235         for (ob = c->open_buckets + 1;
1236              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1237                 spin_lock_init(&ob->lock);
1238                 c->open_buckets_nr_free++;
1239
1240                 ob->freelist = c->open_buckets_freelist;
1241                 c->open_buckets_freelist = ob - c->open_buckets;
1242         }
1243
1244         writepoint_init(&c->btree_write_point,          BCH_DATA_btree);
1245         writepoint_init(&c->rebalance_write_point,      BCH_DATA_user);
1246         writepoint_init(&c->copygc_write_point,         BCH_DATA_user);
1247
1248         for (wp = c->write_points;
1249              wp < c->write_points + c->write_points_nr; wp++) {
1250                 writepoint_init(wp, BCH_DATA_user);
1251
1252                 wp->last_used   = sched_clock();
1253                 wp->write_point = (unsigned long) wp;
1254                 hlist_add_head_rcu(&wp->node,
1255                                    writepoint_hash(c, wp->write_point));
1256         }
1257 }
1258
1259 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1260 {
1261         struct open_bucket *ob;
1262
1263         for (ob = c->open_buckets;
1264              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1265              ob++) {
1266                 spin_lock(&ob->lock);
1267                 if (ob->valid && !ob->on_partial_list) {
1268                         pr_buf(out, "%zu ref %u type %s %u:%llu:%u\n",
1269                                ob - c->open_buckets,
1270                                atomic_read(&ob->pin),
1271                                bch2_data_types[ob->data_type],
1272                                ob->dev, ob->bucket, ob->gen);
1273                 }
1274                 spin_unlock(&ob->lock);
1275         }
1276 }