]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_foreground.c
538b597d845c845e5b5b1380183ec3f92af18aa6
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2012 Google, Inc.
4  *
5  * Foreground allocator code: allocate buckets from freelist, and allocate in
6  * sector granularity from writepoints.
7  *
8  * bch2_bucket_alloc() allocates a single bucket from a specific device.
9  *
10  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11  * in a given filesystem.
12  */
13
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "btree_iter.h"
18 #include "btree_update.h"
19 #include "btree_gc.h"
20 #include "buckets.h"
21 #include "buckets_waiting_for_journal.h"
22 #include "clock.h"
23 #include "debug.h"
24 #include "disk_groups.h"
25 #include "ec.h"
26 #include "error.h"
27 #include "io.h"
28 #include "journal.h"
29
30 #include <linux/math64.h>
31 #include <linux/rculist.h>
32 #include <linux/rcupdate.h>
33 #include <trace/events/bcachefs.h>
34
35 const char * const bch2_alloc_reserves[] = {
36 #define x(t) #t,
37         BCH_ALLOC_RESERVES()
38 #undef x
39         NULL
40 };
41
42 /*
43  * Open buckets represent a bucket that's currently being allocated from.  They
44  * serve two purposes:
45  *
46  *  - They track buckets that have been partially allocated, allowing for
47  *    sub-bucket sized allocations - they're used by the sector allocator below
48  *
49  *  - They provide a reference to the buckets they own that mark and sweep GC
50  *    can find, until the new allocation has a pointer to it inserted into the
51  *    btree
52  *
53  * When allocating some space with the sector allocator, the allocation comes
54  * with a reference to an open bucket - the caller is required to put that
55  * reference _after_ doing the index update that makes its allocation reachable.
56  */
57
58 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
59 {
60         open_bucket_idx_t idx = ob - c->open_buckets;
61         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
62
63         ob->hash = *slot;
64         *slot = idx;
65 }
66
67 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
68 {
69         open_bucket_idx_t idx = ob - c->open_buckets;
70         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
71
72         while (*slot != idx) {
73                 BUG_ON(!*slot);
74                 slot = &c->open_buckets[*slot].hash;
75         }
76
77         *slot = ob->hash;
78         ob->hash = 0;
79 }
80
81 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
82 {
83         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
84
85         if (ob->ec) {
86                 bch2_ec_bucket_written(c, ob);
87                 return;
88         }
89
90         percpu_down_read(&c->mark_lock);
91         spin_lock(&ob->lock);
92
93         ob->valid = false;
94         ob->data_type = 0;
95
96         spin_unlock(&ob->lock);
97         percpu_up_read(&c->mark_lock);
98
99         spin_lock(&c->freelist_lock);
100         bch2_open_bucket_hash_remove(c, ob);
101
102         ob->freelist = c->open_buckets_freelist;
103         c->open_buckets_freelist = ob - c->open_buckets;
104
105         c->open_buckets_nr_free++;
106         ca->nr_open_buckets--;
107         spin_unlock(&c->freelist_lock);
108
109         closure_wake_up(&c->open_buckets_wait);
110 }
111
112 void bch2_open_bucket_write_error(struct bch_fs *c,
113                                   struct open_buckets *obs,
114                                   unsigned dev)
115 {
116         struct open_bucket *ob;
117         unsigned i;
118
119         open_bucket_for_each(c, obs, ob, i)
120                 if (ob->dev == dev && ob->ec)
121                         bch2_ec_bucket_cancel(c, ob);
122 }
123
124 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
125 {
126         struct open_bucket *ob;
127
128         BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
129
130         ob = c->open_buckets + c->open_buckets_freelist;
131         c->open_buckets_freelist = ob->freelist;
132         atomic_set(&ob->pin, 1);
133         ob->data_type = 0;
134
135         c->open_buckets_nr_free--;
136         return ob;
137 }
138
139 static void open_bucket_free_unused(struct bch_fs *c,
140                                     struct write_point *wp,
141                                     struct open_bucket *ob)
142 {
143         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
144         bool may_realloc = wp->data_type == BCH_DATA_user;
145
146         BUG_ON(ca->open_buckets_partial_nr >
147                ARRAY_SIZE(ca->open_buckets_partial));
148
149         if (ca->open_buckets_partial_nr <
150             ARRAY_SIZE(ca->open_buckets_partial) &&
151             may_realloc) {
152                 spin_lock(&c->freelist_lock);
153                 ob->on_partial_list = true;
154                 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
155                         ob - c->open_buckets;
156                 spin_unlock(&c->freelist_lock);
157
158                 closure_wake_up(&c->open_buckets_wait);
159                 closure_wake_up(&c->freelist_wait);
160         } else {
161                 bch2_open_bucket_put(c, ob);
162         }
163 }
164
165 /* _only_ for allocating the journal on a new device: */
166 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
167 {
168         while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
169                 u64 b = ca->new_fs_bucket_idx++;
170
171                 if (!is_superblock_bucket(ca, b) &&
172                     (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
173                         return b;
174         }
175
176         return -1;
177 }
178
179 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
180 {
181         switch (reserve) {
182         case RESERVE_btree:
183         case RESERVE_btree_movinggc:
184                 return 0;
185         case RESERVE_movinggc:
186                 return OPEN_BUCKETS_COUNT / 4;
187         default:
188                 return OPEN_BUCKETS_COUNT / 2;
189         }
190 }
191
192 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
193                                               enum alloc_reserve reserve,
194                                               struct bkey_alloc_unpacked a,
195                                               u64 *skipped_open,
196                                               u64 *skipped_need_journal_commit,
197                                               u64 *skipped_nouse,
198                                               struct closure *cl)
199 {
200         struct open_bucket *ob;
201
202         if (unlikely(ca->buckets_nouse && test_bit(a.bucket, ca->buckets_nouse))) {
203                 (*skipped_nouse)++;
204                 return NULL;
205         }
206
207         if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket)) {
208                 (*skipped_open)++;
209                 return NULL;
210         }
211
212         if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
213                         c->journal.flushed_seq_ondisk, ca->dev_idx, a.bucket)) {
214                 (*skipped_need_journal_commit)++;
215                 return NULL;
216         }
217
218         spin_lock(&c->freelist_lock);
219
220         if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
221                 if (cl)
222                         closure_wait(&c->open_buckets_wait, cl);
223
224                 if (!c->blocked_allocate_open_bucket)
225                         c->blocked_allocate_open_bucket = local_clock();
226
227                 spin_unlock(&c->freelist_lock);
228
229                 trace_open_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
230                 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
231         }
232
233         /* Recheck under lock: */
234         if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket)) {
235                 spin_unlock(&c->freelist_lock);
236                 (*skipped_open)++;
237                 return NULL;
238         }
239
240         ob = bch2_open_bucket_alloc(c);
241
242         spin_lock(&ob->lock);
243
244         ob->valid       = true;
245         ob->sectors_free = ca->mi.bucket_size;
246         ob->alloc_reserve = reserve;
247         ob->dev         = ca->dev_idx;
248         ob->gen         = a.gen;
249         ob->bucket      = a.bucket;
250         spin_unlock(&ob->lock);
251
252         ca->nr_open_buckets++;
253         bch2_open_bucket_hash_add(c, ob);
254
255         if (c->blocked_allocate_open_bucket) {
256                 bch2_time_stats_update(
257                         &c->times[BCH_TIME_blocked_allocate_open_bucket],
258                         c->blocked_allocate_open_bucket);
259                 c->blocked_allocate_open_bucket = 0;
260         }
261
262         if (c->blocked_allocate) {
263                 bch2_time_stats_update(
264                         &c->times[BCH_TIME_blocked_allocate],
265                         c->blocked_allocate);
266                 c->blocked_allocate = 0;
267         }
268
269         spin_unlock(&c->freelist_lock);
270
271         trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]);
272         return ob;
273 }
274
275 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
276                                             enum alloc_reserve reserve, u64 free_entry,
277                                             u64 *skipped_open,
278                                             u64 *skipped_need_journal_commit,
279                                             u64 *skipped_nouse,
280                                             struct closure *cl)
281 {
282         struct bch_fs *c = trans->c;
283         struct btree_iter iter;
284         struct bkey_s_c k;
285         struct open_bucket *ob;
286         struct bkey_alloc_unpacked a;
287         u64 b = free_entry & ~(~0ULL << 56);
288         unsigned genbits = free_entry >> 56;
289         struct printbuf buf = PRINTBUF;
290         int ret;
291
292         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
293         k = bch2_btree_iter_peek_slot(&iter);
294         ret = bkey_err(k);
295         if (ret) {
296                 ob = ERR_PTR(ret);
297                 goto err;
298         }
299
300         a = bch2_alloc_unpack(k);
301
302         if (bch2_fs_inconsistent_on(bucket_state(a) != BUCKET_free, c,
303                         "non free bucket in freespace btree (state %s)\n"
304                         "  %s\n"
305                         "  at %llu (genbits %u)",
306                         bch2_bucket_states[bucket_state(a)],
307                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
308                         free_entry, genbits)) {
309                 ob = ERR_PTR(-EIO);
310                 goto err;
311         }
312
313         if (bch2_fs_inconsistent_on(genbits != (alloc_freespace_genbits(a) >> 56), c,
314                         "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
315                         "  %s",
316                         genbits, alloc_freespace_genbits(a) >> 56,
317                         (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
318                 ob = ERR_PTR(-EIO);
319                 goto err;
320         }
321
322         if (bch2_fs_inconsistent_on(b < ca->mi.first_bucket || b >= ca->mi.nbuckets, c,
323                         "freespace btree has bucket outside allowed range (got %llu, valid %u-%llu)",
324                         b, ca->mi.first_bucket, ca->mi.nbuckets)) {
325                 ob = ERR_PTR(-EIO);
326                 goto err;
327         }
328
329         ob = __try_alloc_bucket(c, ca, reserve, a,
330                                 skipped_open,
331                                 skipped_need_journal_commit,
332                                 skipped_nouse,
333                                 cl);
334 err:
335         bch2_trans_iter_exit(trans, &iter);
336         printbuf_exit(&buf);
337         return ob;
338 }
339
340 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
341                                                     enum alloc_reserve reserve)
342 {
343         struct open_bucket *ob;
344         int i;
345
346         spin_lock(&c->freelist_lock);
347
348         for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
349                 ob = c->open_buckets + ca->open_buckets_partial[i];
350
351                 if (reserve <= ob->alloc_reserve) {
352                         array_remove_item(ca->open_buckets_partial,
353                                           ca->open_buckets_partial_nr,
354                                           i);
355                         ob->on_partial_list = false;
356                         ob->alloc_reserve = reserve;
357                         spin_unlock(&c->freelist_lock);
358                         return ob;
359                 }
360         }
361
362         spin_unlock(&c->freelist_lock);
363         return NULL;
364 }
365
366 /*
367  * This path is for before the freespace btree is initialized:
368  *
369  * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
370  * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
371  */
372 static noinline struct open_bucket *
373 bch2_bucket_alloc_trans_early(struct btree_trans *trans,
374                               struct bch_dev *ca,
375                               enum alloc_reserve reserve,
376                               u64 *cur_bucket,
377                               u64 *buckets_seen,
378                               u64 *skipped_open,
379                               u64 *skipped_need_journal_commit,
380                               u64 *skipped_nouse,
381                               struct closure *cl)
382 {
383         struct btree_iter iter;
384         struct bkey_s_c k;
385         struct open_bucket *ob = NULL;
386         int ret;
387
388         *cur_bucket = max_t(u64, *cur_bucket, ca->mi.first_bucket);
389         *cur_bucket = max_t(u64, *cur_bucket, ca->new_fs_bucket_idx);
390
391         for_each_btree_key(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
392                            BTREE_ITER_SLOTS, k, ret) {
393                 struct bkey_alloc_unpacked a;
394
395                 if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
396                         break;
397
398                 if (ca->new_fs_bucket_idx &&
399                     is_superblock_bucket(ca, k.k->p.offset))
400                         continue;
401
402                 a = bch2_alloc_unpack(k);
403
404                 if (bucket_state(a) != BUCKET_free)
405                         continue;
406
407                 (*buckets_seen)++;
408
409                 ob = __try_alloc_bucket(trans->c, ca, reserve, a,
410                                         skipped_open,
411                                         skipped_need_journal_commit,
412                                         skipped_nouse,
413                                         cl);
414                 if (ob)
415                         break;
416         }
417         bch2_trans_iter_exit(trans, &iter);
418
419         *cur_bucket = iter.pos.offset;
420
421         return ob ?: ERR_PTR(ret ?: -FREELIST_EMPTY);
422 }
423
424 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
425                                                    struct bch_dev *ca,
426                                                    enum alloc_reserve reserve,
427                                                    u64 *cur_bucket,
428                                                    u64 *buckets_seen,
429                                                    u64 *skipped_open,
430                                                    u64 *skipped_need_journal_commit,
431                                                    u64 *skipped_nouse,
432                                                    struct closure *cl)
433 {
434         struct btree_iter iter;
435         struct bkey_s_c k;
436         struct open_bucket *ob = NULL;
437         int ret;
438
439         if (unlikely(!ca->mi.freespace_initialized))
440                 return bch2_bucket_alloc_trans_early(trans, ca, reserve,
441                                                      cur_bucket,
442                                                      buckets_seen,
443                                                      skipped_open,
444                                                      skipped_need_journal_commit,
445                                                      skipped_nouse,
446                                                      cl);
447
448         BUG_ON(ca->new_fs_bucket_idx);
449
450         for_each_btree_key(trans, iter, BTREE_ID_freespace,
451                            POS(ca->dev_idx, *cur_bucket), 0, k, ret) {
452                 if (k.k->p.inode != ca->dev_idx)
453                         break;
454
455                 for (*cur_bucket = max(*cur_bucket, bkey_start_offset(k.k));
456                      *cur_bucket != k.k->p.offset && !ob;
457                      (*cur_bucket)++) {
458                         if (btree_trans_too_many_iters(trans)) {
459                                 ob = ERR_PTR(-EINTR);
460                                 break;
461                         }
462
463                         (*buckets_seen)++;
464
465                         ob = try_alloc_bucket(trans, ca, reserve,
466                                               *cur_bucket,
467                                               skipped_open,
468                                               skipped_need_journal_commit,
469                                               skipped_nouse,
470                                               cl);
471                 }
472                 if (ob)
473                         break;
474         }
475         bch2_trans_iter_exit(trans, &iter);
476
477         return ob ?: ERR_PTR(ret);
478 }
479
480 /**
481  * bch_bucket_alloc - allocate a single bucket from a specific device
482  *
483  * Returns index of bucket on success, 0 on failure
484  * */
485 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
486                                       enum alloc_reserve reserve,
487                                       bool may_alloc_partial,
488                                       struct closure *cl)
489 {
490         struct open_bucket *ob = NULL;
491         u64 avail = dev_buckets_available(ca, reserve);
492         u64 cur_bucket = 0;
493         u64 buckets_seen = 0;
494         u64 skipped_open = 0;
495         u64 skipped_need_journal_commit = 0;
496         u64 skipped_nouse = 0;
497         int ret;
498
499         if (may_alloc_partial) {
500                 ob = try_alloc_partial_bucket(c, ca, reserve);
501                 if (ob)
502                         return ob;
503         }
504 again:
505         if (!avail) {
506                 if (cl) {
507                         closure_wait(&c->freelist_wait, cl);
508                         /* recheck after putting ourself on waitlist */
509                         avail = dev_buckets_available(ca, reserve);
510                         if (avail) {
511                                 closure_wake_up(&c->freelist_wait);
512                                 goto again;
513                         }
514                 }
515
516                 if (!c->blocked_allocate)
517                         c->blocked_allocate = local_clock();
518
519                 ob = ERR_PTR(-FREELIST_EMPTY);
520                 goto err;
521         }
522
523         ret = bch2_trans_do(c, NULL, NULL, 0,
524                         PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
525                                                         &cur_bucket,
526                                                         &buckets_seen,
527                                                         &skipped_open,
528                                                         &skipped_need_journal_commit,
529                                                         &skipped_nouse,
530                                                         cl)));
531
532         if (skipped_need_journal_commit * 2 > avail)
533                 bch2_journal_flush_async(&c->journal, NULL);
534 err:
535         if (!ob)
536                 ob = ERR_PTR(ret ?: -FREELIST_EMPTY);
537
538         if (IS_ERR(ob)) {
539                 trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve], avail,
540                                         buckets_seen,
541                                         skipped_open,
542                                         skipped_need_journal_commit,
543                                         skipped_nouse,
544                                         cl == NULL, PTR_ERR(ob));
545                 atomic_long_inc(&c->bucket_alloc_fail);
546         }
547
548         return ob;
549 }
550
551 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
552                             unsigned l, unsigned r)
553 {
554         return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
555                 (stripe->next_alloc[l] < stripe->next_alloc[r]));
556 }
557
558 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
559
560 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
561                                           struct dev_stripe_state *stripe,
562                                           struct bch_devs_mask *devs)
563 {
564         struct dev_alloc_list ret = { .nr = 0 };
565         unsigned i;
566
567         for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
568                 ret.devs[ret.nr++] = i;
569
570         bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
571         return ret;
572 }
573
574 void bch2_dev_stripe_increment(struct bch_dev *ca,
575                                struct dev_stripe_state *stripe)
576 {
577         u64 *v = stripe->next_alloc + ca->dev_idx;
578         u64 free_space = dev_buckets_available(ca, RESERVE_none);
579         u64 free_space_inv = free_space
580                 ? div64_u64(1ULL << 48, free_space)
581                 : 1ULL << 48;
582         u64 scale = *v / 4;
583
584         if (*v + free_space_inv >= *v)
585                 *v += free_space_inv;
586         else
587                 *v = U64_MAX;
588
589         for (v = stripe->next_alloc;
590              v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
591                 *v = *v < scale ? 0 : *v - scale;
592 }
593
594 #define BUCKET_MAY_ALLOC_PARTIAL        (1 << 0)
595 #define BUCKET_ALLOC_USE_DURABILITY     (1 << 1)
596
597 static void add_new_bucket(struct bch_fs *c,
598                            struct open_buckets *ptrs,
599                            struct bch_devs_mask *devs_may_alloc,
600                            unsigned *nr_effective,
601                            bool *have_cache,
602                            unsigned flags,
603                            struct open_bucket *ob)
604 {
605         unsigned durability =
606                 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
607
608         __clear_bit(ob->dev, devs_may_alloc->d);
609         *nr_effective   += (flags & BUCKET_ALLOC_USE_DURABILITY)
610                 ? durability : 1;
611         *have_cache     |= !durability;
612
613         ob_push(c, ptrs, ob);
614 }
615
616 int bch2_bucket_alloc_set(struct bch_fs *c,
617                       struct open_buckets *ptrs,
618                       struct dev_stripe_state *stripe,
619                       struct bch_devs_mask *devs_may_alloc,
620                       unsigned nr_replicas,
621                       unsigned *nr_effective,
622                       bool *have_cache,
623                       enum alloc_reserve reserve,
624                       unsigned flags,
625                       struct closure *cl)
626 {
627         struct dev_alloc_list devs_sorted =
628                 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
629         unsigned dev;
630         struct bch_dev *ca;
631         int ret = -INSUFFICIENT_DEVICES;
632         unsigned i;
633
634         BUG_ON(*nr_effective >= nr_replicas);
635
636         for (i = 0; i < devs_sorted.nr; i++) {
637                 struct open_bucket *ob;
638
639                 dev = devs_sorted.devs[i];
640
641                 rcu_read_lock();
642                 ca = rcu_dereference(c->devs[dev]);
643                 if (ca)
644                         percpu_ref_get(&ca->ref);
645                 rcu_read_unlock();
646
647                 if (!ca)
648                         continue;
649
650                 if (!ca->mi.durability && *have_cache) {
651                         percpu_ref_put(&ca->ref);
652                         continue;
653                 }
654
655                 ob = bch2_bucket_alloc(c, ca, reserve,
656                                 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
657                 if (!IS_ERR(ob))
658                         bch2_dev_stripe_increment(ca, stripe);
659                 percpu_ref_put(&ca->ref);
660
661                 if (IS_ERR(ob)) {
662                         ret = PTR_ERR(ob);
663
664                         if (cl)
665                                 break;
666                         continue;
667                 }
668
669                 add_new_bucket(c, ptrs, devs_may_alloc,
670                                nr_effective, have_cache, flags, ob);
671
672                 if (*nr_effective >= nr_replicas) {
673                         ret = 0;
674                         break;
675                 }
676         }
677
678         return ret;
679 }
680
681 /* Allocate from stripes: */
682
683 /*
684  * if we can't allocate a new stripe because there are already too many
685  * partially filled stripes, force allocating from an existing stripe even when
686  * it's to a device we don't want:
687  */
688
689 static int bucket_alloc_from_stripe(struct bch_fs *c,
690                          struct open_buckets *ptrs,
691                          struct write_point *wp,
692                          struct bch_devs_mask *devs_may_alloc,
693                          u16 target,
694                          unsigned erasure_code,
695                          unsigned nr_replicas,
696                          unsigned *nr_effective,
697                          bool *have_cache,
698                          unsigned flags,
699                          struct closure *cl)
700 {
701         struct dev_alloc_list devs_sorted;
702         struct ec_stripe_head *h;
703         struct open_bucket *ob;
704         struct bch_dev *ca;
705         unsigned i, ec_idx;
706
707         if (!erasure_code)
708                 return 0;
709
710         if (nr_replicas < 2)
711                 return 0;
712
713         if (ec_open_bucket(c, ptrs))
714                 return 0;
715
716         h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
717                                     wp == &c->copygc_write_point,
718                                     cl);
719         if (IS_ERR(h))
720                 return -PTR_ERR(h);
721         if (!h)
722                 return 0;
723
724         devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
725
726         for (i = 0; i < devs_sorted.nr; i++)
727                 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
728                         if (!h->s->blocks[ec_idx])
729                                 continue;
730
731                         ob = c->open_buckets + h->s->blocks[ec_idx];
732                         if (ob->dev == devs_sorted.devs[i] &&
733                             !test_and_set_bit(ec_idx, h->s->blocks_allocated))
734                                 goto got_bucket;
735                 }
736         goto out_put_head;
737 got_bucket:
738         ca = bch_dev_bkey_exists(c, ob->dev);
739
740         ob->ec_idx      = ec_idx;
741         ob->ec          = h->s;
742
743         add_new_bucket(c, ptrs, devs_may_alloc,
744                        nr_effective, have_cache, flags, ob);
745         atomic_inc(&h->s->pin);
746 out_put_head:
747         bch2_ec_stripe_head_put(c, h);
748         return 0;
749 }
750
751 /* Sector allocator */
752
753 static void get_buckets_from_writepoint(struct bch_fs *c,
754                                         struct open_buckets *ptrs,
755                                         struct write_point *wp,
756                                         struct bch_devs_mask *devs_may_alloc,
757                                         unsigned nr_replicas,
758                                         unsigned *nr_effective,
759                                         bool *have_cache,
760                                         unsigned flags,
761                                         bool need_ec)
762 {
763         struct open_buckets ptrs_skip = { .nr = 0 };
764         struct open_bucket *ob;
765         unsigned i;
766
767         open_bucket_for_each(c, &wp->ptrs, ob, i) {
768                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
769
770                 if (*nr_effective < nr_replicas &&
771                     test_bit(ob->dev, devs_may_alloc->d) &&
772                     (ca->mi.durability ||
773                      (wp->data_type == BCH_DATA_user && !*have_cache)) &&
774                     (ob->ec || !need_ec)) {
775                         add_new_bucket(c, ptrs, devs_may_alloc,
776                                        nr_effective, have_cache,
777                                        flags, ob);
778                 } else {
779                         ob_push(c, &ptrs_skip, ob);
780                 }
781         }
782         wp->ptrs = ptrs_skip;
783 }
784
785 static int open_bucket_add_buckets(struct bch_fs *c,
786                         struct open_buckets *ptrs,
787                         struct write_point *wp,
788                         struct bch_devs_list *devs_have,
789                         u16 target,
790                         unsigned erasure_code,
791                         unsigned nr_replicas,
792                         unsigned *nr_effective,
793                         bool *have_cache,
794                         enum alloc_reserve reserve,
795                         unsigned flags,
796                         struct closure *_cl)
797 {
798         struct bch_devs_mask devs;
799         struct open_bucket *ob;
800         struct closure *cl = NULL;
801         int ret;
802         unsigned i;
803
804         rcu_read_lock();
805         devs = target_rw_devs(c, wp->data_type, target);
806         rcu_read_unlock();
807
808         /* Don't allocate from devices we already have pointers to: */
809         for (i = 0; i < devs_have->nr; i++)
810                 __clear_bit(devs_have->devs[i], devs.d);
811
812         open_bucket_for_each(c, ptrs, ob, i)
813                 __clear_bit(ob->dev, devs.d);
814
815         if (erasure_code) {
816                 if (!ec_open_bucket(c, ptrs)) {
817                         get_buckets_from_writepoint(c, ptrs, wp, &devs,
818                                                     nr_replicas, nr_effective,
819                                                     have_cache, flags, true);
820                         if (*nr_effective >= nr_replicas)
821                                 return 0;
822                 }
823
824                 if (!ec_open_bucket(c, ptrs)) {
825                         ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
826                                                  target, erasure_code,
827                                                  nr_replicas, nr_effective,
828                                                  have_cache, flags, _cl);
829                         if (ret == -FREELIST_EMPTY ||
830                             ret == -OPEN_BUCKETS_EMPTY)
831                                 return ret;
832                         if (*nr_effective >= nr_replicas)
833                                 return 0;
834                 }
835         }
836
837         get_buckets_from_writepoint(c, ptrs, wp, &devs,
838                                     nr_replicas, nr_effective,
839                                     have_cache, flags, false);
840         if (*nr_effective >= nr_replicas)
841                 return 0;
842
843 retry_blocking:
844         /*
845          * Try nonblocking first, so that if one device is full we'll try from
846          * other devices:
847          */
848         ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
849                                 nr_replicas, nr_effective, have_cache,
850                                 reserve, flags, cl);
851         if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
852                 cl = _cl;
853                 goto retry_blocking;
854         }
855
856         return ret;
857 }
858
859 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
860                                 struct open_buckets *obs)
861 {
862         struct open_buckets ptrs = { .nr = 0 };
863         struct open_bucket *ob, *ob2;
864         unsigned i, j;
865
866         open_bucket_for_each(c, obs, ob, i) {
867                 bool drop = !ca || ob->dev == ca->dev_idx;
868
869                 if (!drop && ob->ec) {
870                         mutex_lock(&ob->ec->lock);
871                         for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
872                                 if (!ob->ec->blocks[j])
873                                         continue;
874
875                                 ob2 = c->open_buckets + ob->ec->blocks[j];
876                                 drop |= ob2->dev == ca->dev_idx;
877                         }
878                         mutex_unlock(&ob->ec->lock);
879                 }
880
881                 if (drop)
882                         bch2_open_bucket_put(c, ob);
883                 else
884                         ob_push(c, &ptrs, ob);
885         }
886
887         *obs = ptrs;
888 }
889
890 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
891                           struct write_point *wp)
892 {
893         mutex_lock(&wp->lock);
894         bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
895         mutex_unlock(&wp->lock);
896 }
897
898 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
899                                                  unsigned long write_point)
900 {
901         unsigned hash =
902                 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
903
904         return &c->write_points_hash[hash];
905 }
906
907 static struct write_point *__writepoint_find(struct hlist_head *head,
908                                              unsigned long write_point)
909 {
910         struct write_point *wp;
911
912         rcu_read_lock();
913         hlist_for_each_entry_rcu(wp, head, node)
914                 if (wp->write_point == write_point)
915                         goto out;
916         wp = NULL;
917 out:
918         rcu_read_unlock();
919         return wp;
920 }
921
922 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
923 {
924         u64 stranded    = c->write_points_nr * c->bucket_size_max;
925         u64 free        = bch2_fs_usage_read_short(c).free;
926
927         return stranded * factor > free;
928 }
929
930 static bool try_increase_writepoints(struct bch_fs *c)
931 {
932         struct write_point *wp;
933
934         if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
935             too_many_writepoints(c, 32))
936                 return false;
937
938         wp = c->write_points + c->write_points_nr++;
939         hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
940         return true;
941 }
942
943 static bool try_decrease_writepoints(struct bch_fs *c,
944                                      unsigned old_nr)
945 {
946         struct write_point *wp;
947
948         mutex_lock(&c->write_points_hash_lock);
949         if (c->write_points_nr < old_nr) {
950                 mutex_unlock(&c->write_points_hash_lock);
951                 return true;
952         }
953
954         if (c->write_points_nr == 1 ||
955             !too_many_writepoints(c, 8)) {
956                 mutex_unlock(&c->write_points_hash_lock);
957                 return false;
958         }
959
960         wp = c->write_points + --c->write_points_nr;
961
962         hlist_del_rcu(&wp->node);
963         mutex_unlock(&c->write_points_hash_lock);
964
965         bch2_writepoint_stop(c, NULL, wp);
966         return true;
967 }
968
969 static struct write_point *writepoint_find(struct bch_fs *c,
970                                            unsigned long write_point)
971 {
972         struct write_point *wp, *oldest;
973         struct hlist_head *head;
974
975         if (!(write_point & 1UL)) {
976                 wp = (struct write_point *) write_point;
977                 mutex_lock(&wp->lock);
978                 return wp;
979         }
980
981         head = writepoint_hash(c, write_point);
982 restart_find:
983         wp = __writepoint_find(head, write_point);
984         if (wp) {
985 lock_wp:
986                 mutex_lock(&wp->lock);
987                 if (wp->write_point == write_point)
988                         goto out;
989                 mutex_unlock(&wp->lock);
990                 goto restart_find;
991         }
992 restart_find_oldest:
993         oldest = NULL;
994         for (wp = c->write_points;
995              wp < c->write_points + c->write_points_nr; wp++)
996                 if (!oldest || time_before64(wp->last_used, oldest->last_used))
997                         oldest = wp;
998
999         mutex_lock(&oldest->lock);
1000         mutex_lock(&c->write_points_hash_lock);
1001         if (oldest >= c->write_points + c->write_points_nr ||
1002             try_increase_writepoints(c)) {
1003                 mutex_unlock(&c->write_points_hash_lock);
1004                 mutex_unlock(&oldest->lock);
1005                 goto restart_find_oldest;
1006         }
1007
1008         wp = __writepoint_find(head, write_point);
1009         if (wp && wp != oldest) {
1010                 mutex_unlock(&c->write_points_hash_lock);
1011                 mutex_unlock(&oldest->lock);
1012                 goto lock_wp;
1013         }
1014
1015         wp = oldest;
1016         hlist_del_rcu(&wp->node);
1017         wp->write_point = write_point;
1018         hlist_add_head_rcu(&wp->node, head);
1019         mutex_unlock(&c->write_points_hash_lock);
1020 out:
1021         wp->last_used = sched_clock();
1022         return wp;
1023 }
1024
1025 /*
1026  * Get us an open_bucket we can allocate from, return with it locked:
1027  */
1028 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1029                                 unsigned target,
1030                                 unsigned erasure_code,
1031                                 struct write_point_specifier write_point,
1032                                 struct bch_devs_list *devs_have,
1033                                 unsigned nr_replicas,
1034                                 unsigned nr_replicas_required,
1035                                 enum alloc_reserve reserve,
1036                                 unsigned flags,
1037                                 struct closure *cl)
1038 {
1039         struct write_point *wp;
1040         struct open_bucket *ob;
1041         struct open_buckets ptrs;
1042         unsigned nr_effective, write_points_nr;
1043         unsigned ob_flags = 0;
1044         bool have_cache;
1045         int ret;
1046         int i;
1047
1048         if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
1049                 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
1050
1051         BUG_ON(!nr_replicas || !nr_replicas_required);
1052 retry:
1053         ptrs.nr         = 0;
1054         nr_effective    = 0;
1055         write_points_nr = c->write_points_nr;
1056         have_cache      = false;
1057
1058         wp = writepoint_find(c, write_point.v);
1059
1060         if (wp->data_type == BCH_DATA_user)
1061                 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1062
1063         /* metadata may not allocate on cache devices: */
1064         if (wp->data_type != BCH_DATA_user)
1065                 have_cache = true;
1066
1067         if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1068                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1069                                               target, erasure_code,
1070                                               nr_replicas, &nr_effective,
1071                                               &have_cache, reserve,
1072                                               ob_flags, cl);
1073         } else {
1074                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1075                                               target, erasure_code,
1076                                               nr_replicas, &nr_effective,
1077                                               &have_cache, reserve,
1078                                               ob_flags, NULL);
1079                 if (!ret)
1080                         goto alloc_done;
1081
1082                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1083                                               0, erasure_code,
1084                                               nr_replicas, &nr_effective,
1085                                               &have_cache, reserve,
1086                                               ob_flags, cl);
1087         }
1088 alloc_done:
1089         BUG_ON(!ret && nr_effective < nr_replicas);
1090
1091         if (erasure_code && !ec_open_bucket(c, &ptrs))
1092                 pr_debug("failed to get ec bucket: ret %u", ret);
1093
1094         if (ret == -INSUFFICIENT_DEVICES &&
1095             nr_effective >= nr_replicas_required)
1096                 ret = 0;
1097
1098         if (ret)
1099                 goto err;
1100
1101         /* Free buckets we didn't use: */
1102         open_bucket_for_each(c, &wp->ptrs, ob, i)
1103                 open_bucket_free_unused(c, wp, ob);
1104
1105         wp->ptrs = ptrs;
1106
1107         wp->sectors_free = UINT_MAX;
1108
1109         open_bucket_for_each(c, &wp->ptrs, ob, i)
1110                 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1111
1112         BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1113
1114         return wp;
1115 err:
1116         open_bucket_for_each(c, &wp->ptrs, ob, i)
1117                 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1118                         ob_push(c, &ptrs, ob);
1119                 else
1120                         open_bucket_free_unused(c, wp, ob);
1121         wp->ptrs = ptrs;
1122
1123         mutex_unlock(&wp->lock);
1124
1125         if (ret == -FREELIST_EMPTY &&
1126             try_decrease_writepoints(c, write_points_nr))
1127                 goto retry;
1128
1129         switch (ret) {
1130         case -OPEN_BUCKETS_EMPTY:
1131         case -FREELIST_EMPTY:
1132                 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
1133         case -INSUFFICIENT_DEVICES:
1134                 return ERR_PTR(-EROFS);
1135         default:
1136                 return ERR_PTR(ret);
1137         }
1138 }
1139
1140 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1141 {
1142         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1143
1144         return (struct bch_extent_ptr) {
1145                 .type   = 1 << BCH_EXTENT_ENTRY_ptr,
1146                 .gen    = ob->gen,
1147                 .dev    = ob->dev,
1148                 .offset = bucket_to_sector(ca, ob->bucket) +
1149                         ca->mi.bucket_size -
1150                         ob->sectors_free,
1151         };
1152 }
1153
1154 /*
1155  * Append pointers to the space we just allocated to @k, and mark @sectors space
1156  * as allocated out of @ob
1157  */
1158 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1159                                     struct bkey_i *k, unsigned sectors,
1160                                     bool cached)
1161
1162 {
1163         struct open_bucket *ob;
1164         unsigned i;
1165
1166         BUG_ON(sectors > wp->sectors_free);
1167         wp->sectors_free -= sectors;
1168
1169         open_bucket_for_each(c, &wp->ptrs, ob, i) {
1170                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1171                 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
1172
1173                 ptr.cached = cached ||
1174                         (!ca->mi.durability &&
1175                          wp->data_type == BCH_DATA_user);
1176
1177                 bch2_bkey_append_ptr(k, ptr);
1178
1179                 BUG_ON(sectors > ob->sectors_free);
1180                 ob->sectors_free -= sectors;
1181         }
1182 }
1183
1184 /*
1185  * Append pointers to the space we just allocated to @k, and mark @sectors space
1186  * as allocated out of @ob
1187  */
1188 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1189 {
1190         struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
1191         struct open_bucket *ob;
1192         unsigned i;
1193
1194         open_bucket_for_each(c, &wp->ptrs, ob, i)
1195                 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
1196         wp->ptrs = keep;
1197
1198         mutex_unlock(&wp->lock);
1199
1200         bch2_open_buckets_put(c, &ptrs);
1201 }
1202
1203 static inline void writepoint_init(struct write_point *wp,
1204                                    enum bch_data_type type)
1205 {
1206         mutex_init(&wp->lock);
1207         wp->data_type = type;
1208 }
1209
1210 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1211 {
1212         struct open_bucket *ob;
1213         struct write_point *wp;
1214
1215         mutex_init(&c->write_points_hash_lock);
1216         c->write_points_nr = ARRAY_SIZE(c->write_points);
1217
1218         /* open bucket 0 is a sentinal NULL: */
1219         spin_lock_init(&c->open_buckets[0].lock);
1220
1221         for (ob = c->open_buckets + 1;
1222              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1223                 spin_lock_init(&ob->lock);
1224                 c->open_buckets_nr_free++;
1225
1226                 ob->freelist = c->open_buckets_freelist;
1227                 c->open_buckets_freelist = ob - c->open_buckets;
1228         }
1229
1230         writepoint_init(&c->btree_write_point,          BCH_DATA_btree);
1231         writepoint_init(&c->rebalance_write_point,      BCH_DATA_user);
1232         writepoint_init(&c->copygc_write_point,         BCH_DATA_user);
1233
1234         for (wp = c->write_points;
1235              wp < c->write_points + c->write_points_nr; wp++) {
1236                 writepoint_init(wp, BCH_DATA_user);
1237
1238                 wp->last_used   = sched_clock();
1239                 wp->write_point = (unsigned long) wp;
1240                 hlist_add_head_rcu(&wp->node,
1241                                    writepoint_hash(c, wp->write_point));
1242         }
1243 }
1244
1245 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1246 {
1247         struct open_bucket *ob;
1248
1249         for (ob = c->open_buckets;
1250              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1251              ob++) {
1252                 spin_lock(&ob->lock);
1253                 if (ob->valid && !ob->on_partial_list) {
1254                         pr_buf(out, "%zu ref %u type %s\n",
1255                                ob - c->open_buckets,
1256                                atomic_read(&ob->pin),
1257                                bch2_data_types[ob->data_type]);
1258                 }
1259                 spin_unlock(&ob->lock);
1260         }
1261
1262 }