]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_foreground.c
Update bcachefs sources to 24f7e08cd8 bcachefs: shrinker.to_text() methods
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2012 Google, Inc.
4  *
5  * Foreground allocator code: allocate buckets from freelist, and allocate in
6  * sector granularity from writepoints.
7  *
8  * bch2_bucket_alloc() allocates a single bucket from a specific device.
9  *
10  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11  * in a given filesystem.
12  */
13
14 #include "bcachefs.h"
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "btree_iter.h"
18 #include "btree_update.h"
19 #include "btree_gc.h"
20 #include "buckets.h"
21 #include "buckets_waiting_for_journal.h"
22 #include "clock.h"
23 #include "debug.h"
24 #include "disk_groups.h"
25 #include "ec.h"
26 #include "error.h"
27 #include "io.h"
28 #include "journal.h"
29
30 #include <linux/math64.h>
31 #include <linux/rculist.h>
32 #include <linux/rcupdate.h>
33 #include <trace/events/bcachefs.h>
34
35 const char * const bch2_alloc_reserves[] = {
36 #define x(t) #t,
37         BCH_ALLOC_RESERVES()
38 #undef x
39         NULL
40 };
41
42 /*
43  * Open buckets represent a bucket that's currently being allocated from.  They
44  * serve two purposes:
45  *
46  *  - They track buckets that have been partially allocated, allowing for
47  *    sub-bucket sized allocations - they're used by the sector allocator below
48  *
49  *  - They provide a reference to the buckets they own that mark and sweep GC
50  *    can find, until the new allocation has a pointer to it inserted into the
51  *    btree
52  *
53  * When allocating some space with the sector allocator, the allocation comes
54  * with a reference to an open bucket - the caller is required to put that
55  * reference _after_ doing the index update that makes its allocation reachable.
56  */
57
58 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
59 {
60         open_bucket_idx_t idx = ob - c->open_buckets;
61         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
62
63         ob->hash = *slot;
64         *slot = idx;
65 }
66
67 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
68 {
69         open_bucket_idx_t idx = ob - c->open_buckets;
70         open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
71
72         while (*slot != idx) {
73                 BUG_ON(!*slot);
74                 slot = &c->open_buckets[*slot].hash;
75         }
76
77         *slot = ob->hash;
78         ob->hash = 0;
79 }
80
81 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
82 {
83         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
84
85         if (ob->ec) {
86                 bch2_ec_bucket_written(c, ob);
87                 return;
88         }
89
90         percpu_down_read(&c->mark_lock);
91         spin_lock(&ob->lock);
92
93         ob->valid = false;
94         ob->data_type = 0;
95
96         spin_unlock(&ob->lock);
97         percpu_up_read(&c->mark_lock);
98
99         spin_lock(&c->freelist_lock);
100         bch2_open_bucket_hash_remove(c, ob);
101
102         ob->freelist = c->open_buckets_freelist;
103         c->open_buckets_freelist = ob - c->open_buckets;
104
105         c->open_buckets_nr_free++;
106         ca->nr_open_buckets--;
107         spin_unlock(&c->freelist_lock);
108
109         closure_wake_up(&c->open_buckets_wait);
110 }
111
112 void bch2_open_bucket_write_error(struct bch_fs *c,
113                                   struct open_buckets *obs,
114                                   unsigned dev)
115 {
116         struct open_bucket *ob;
117         unsigned i;
118
119         open_bucket_for_each(c, obs, ob, i)
120                 if (ob->dev == dev && ob->ec)
121                         bch2_ec_bucket_cancel(c, ob);
122 }
123
124 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
125 {
126         struct open_bucket *ob;
127
128         BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
129
130         ob = c->open_buckets + c->open_buckets_freelist;
131         c->open_buckets_freelist = ob->freelist;
132         atomic_set(&ob->pin, 1);
133         ob->data_type = 0;
134
135         c->open_buckets_nr_free--;
136         return ob;
137 }
138
139 static void open_bucket_free_unused(struct bch_fs *c,
140                                     struct write_point *wp,
141                                     struct open_bucket *ob)
142 {
143         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
144         bool may_realloc = wp->data_type == BCH_DATA_user;
145
146         BUG_ON(ca->open_buckets_partial_nr >
147                ARRAY_SIZE(ca->open_buckets_partial));
148
149         if (ca->open_buckets_partial_nr <
150             ARRAY_SIZE(ca->open_buckets_partial) &&
151             may_realloc) {
152                 spin_lock(&c->freelist_lock);
153                 ob->on_partial_list = true;
154                 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
155                         ob - c->open_buckets;
156                 spin_unlock(&c->freelist_lock);
157
158                 closure_wake_up(&c->open_buckets_wait);
159                 closure_wake_up(&c->freelist_wait);
160         } else {
161                 bch2_open_bucket_put(c, ob);
162         }
163 }
164
165 /* _only_ for allocating the journal on a new device: */
166 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
167 {
168         while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
169                 u64 b = ca->new_fs_bucket_idx++;
170
171                 if (!is_superblock_bucket(ca, b) &&
172                     (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
173                         return b;
174         }
175
176         return -1;
177 }
178
179 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
180 {
181         switch (reserve) {
182         case RESERVE_btree:
183         case RESERVE_btree_movinggc:
184                 return 0;
185         case RESERVE_movinggc:
186                 return OPEN_BUCKETS_COUNT / 4;
187         default:
188                 return OPEN_BUCKETS_COUNT / 2;
189         }
190 }
191
192 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
193                                               u64 bucket,
194                                               enum alloc_reserve reserve,
195                                               struct bch_alloc_v4 *a,
196                                               u64 *skipped_open,
197                                               u64 *skipped_need_journal_commit,
198                                               u64 *skipped_nouse,
199                                               struct closure *cl)
200 {
201         struct open_bucket *ob;
202
203         if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
204                 (*skipped_nouse)++;
205                 return NULL;
206         }
207
208         if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
209                 (*skipped_open)++;
210                 return NULL;
211         }
212
213         if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
214                         c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
215                 (*skipped_need_journal_commit)++;
216                 return NULL;
217         }
218
219         spin_lock(&c->freelist_lock);
220
221         if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
222                 if (cl)
223                         closure_wait(&c->open_buckets_wait, cl);
224
225                 if (!c->blocked_allocate_open_bucket)
226                         c->blocked_allocate_open_bucket = local_clock();
227
228                 spin_unlock(&c->freelist_lock);
229                 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
230         }
231
232         /* Recheck under lock: */
233         if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
234                 spin_unlock(&c->freelist_lock);
235                 (*skipped_open)++;
236                 return NULL;
237         }
238
239         ob = bch2_open_bucket_alloc(c);
240
241         spin_lock(&ob->lock);
242
243         ob->valid       = true;
244         ob->sectors_free = ca->mi.bucket_size;
245         ob->alloc_reserve = reserve;
246         ob->dev         = ca->dev_idx;
247         ob->gen         = a->gen;
248         ob->bucket      = bucket;
249         spin_unlock(&ob->lock);
250
251         ca->nr_open_buckets++;
252         bch2_open_bucket_hash_add(c, ob);
253
254         if (c->blocked_allocate_open_bucket) {
255                 bch2_time_stats_update(
256                         &c->times[BCH_TIME_blocked_allocate_open_bucket],
257                         c->blocked_allocate_open_bucket);
258                 c->blocked_allocate_open_bucket = 0;
259         }
260
261         if (c->blocked_allocate) {
262                 bch2_time_stats_update(
263                         &c->times[BCH_TIME_blocked_allocate],
264                         c->blocked_allocate);
265                 c->blocked_allocate = 0;
266         }
267
268         spin_unlock(&c->freelist_lock);
269
270         trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]);
271         return ob;
272 }
273
274 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
275                                             enum alloc_reserve reserve, u64 free_entry,
276                                             u64 *skipped_open,
277                                             u64 *skipped_need_journal_commit,
278                                             u64 *skipped_nouse,
279                                             struct bkey_s_c freespace_k,
280                                             struct closure *cl)
281 {
282         struct bch_fs *c = trans->c;
283         struct btree_iter iter = { NULL };
284         struct bkey_s_c k;
285         struct open_bucket *ob;
286         struct bch_alloc_v4 a;
287         u64 b = free_entry & ~(~0ULL << 56);
288         unsigned genbits = free_entry >> 56;
289         struct printbuf buf = PRINTBUF;
290         int ret;
291
292         if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
293                 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
294                        "  freespace key ",
295                         ca->mi.first_bucket, ca->mi.nbuckets);
296                 bch2_bkey_val_to_text(&buf, c, freespace_k);
297                 bch2_trans_inconsistent(trans, "%s", buf.buf);
298                 ob = ERR_PTR(-EIO);
299                 goto err;
300         }
301
302         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
303         k = bch2_btree_iter_peek_slot(&iter);
304         ret = bkey_err(k);
305         if (ret) {
306                 ob = ERR_PTR(ret);
307                 goto err;
308         }
309
310         bch2_alloc_to_v4(k, &a);
311
312         if (genbits != (alloc_freespace_genbits(a) >> 56)) {
313                 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
314                        "  freespace key ",
315                        genbits, alloc_freespace_genbits(a) >> 56);
316                 bch2_bkey_val_to_text(&buf, c, freespace_k);
317                 prt_printf(&buf, "\n  ");
318                 bch2_bkey_val_to_text(&buf, c, k);
319                 bch2_trans_inconsistent(trans, "%s", buf.buf);
320                 ob = ERR_PTR(-EIO);
321                 goto err;
322
323         }
324
325         if (a.data_type != BCH_DATA_free) {
326                 prt_printf(&buf, "non free bucket in freespace btree\n"
327                        "  freespace key ");
328                 bch2_bkey_val_to_text(&buf, c, freespace_k);
329                 prt_printf(&buf, "\n  ");
330                 bch2_bkey_val_to_text(&buf, c, k);
331                 bch2_trans_inconsistent(trans, "%s", buf.buf);
332                 ob = ERR_PTR(-EIO);
333                 goto err;
334         }
335
336         ob = __try_alloc_bucket(c, ca, b, reserve, &a,
337                                 skipped_open,
338                                 skipped_need_journal_commit,
339                                 skipped_nouse,
340                                 cl);
341 err:
342         bch2_trans_iter_exit(trans, &iter);
343         printbuf_exit(&buf);
344         return ob;
345 }
346
347 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
348                                                     enum alloc_reserve reserve)
349 {
350         struct open_bucket *ob;
351         int i;
352
353         spin_lock(&c->freelist_lock);
354
355         for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
356                 ob = c->open_buckets + ca->open_buckets_partial[i];
357
358                 if (reserve <= ob->alloc_reserve) {
359                         array_remove_item(ca->open_buckets_partial,
360                                           ca->open_buckets_partial_nr,
361                                           i);
362                         ob->on_partial_list = false;
363                         ob->alloc_reserve = reserve;
364                         spin_unlock(&c->freelist_lock);
365                         return ob;
366                 }
367         }
368
369         spin_unlock(&c->freelist_lock);
370         return NULL;
371 }
372
373 /*
374  * This path is for before the freespace btree is initialized:
375  *
376  * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
377  * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
378  */
379 static noinline struct open_bucket *
380 bch2_bucket_alloc_trans_early(struct btree_trans *trans,
381                               struct bch_dev *ca,
382                               enum alloc_reserve reserve,
383                               u64 *cur_bucket,
384                               u64 *buckets_seen,
385                               u64 *skipped_open,
386                               u64 *skipped_need_journal_commit,
387                               u64 *skipped_nouse,
388                               struct closure *cl)
389 {
390         struct btree_iter iter;
391         struct bkey_s_c k;
392         struct open_bucket *ob = NULL;
393         int ret;
394
395         *cur_bucket = max_t(u64, *cur_bucket, ca->mi.first_bucket);
396         *cur_bucket = max_t(u64, *cur_bucket, ca->new_fs_bucket_idx);
397
398         for_each_btree_key(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
399                            BTREE_ITER_SLOTS, k, ret) {
400                 struct bch_alloc_v4 a;
401
402                 if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
403                         break;
404
405                 if (ca->new_fs_bucket_idx &&
406                     is_superblock_bucket(ca, k.k->p.offset))
407                         continue;
408
409                 bch2_alloc_to_v4(k, &a);
410
411                 if (a.data_type != BCH_DATA_free)
412                         continue;
413
414                 (*buckets_seen)++;
415
416                 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a,
417                                         skipped_open,
418                                         skipped_need_journal_commit,
419                                         skipped_nouse,
420                                         cl);
421                 if (ob)
422                         break;
423         }
424         bch2_trans_iter_exit(trans, &iter);
425
426         *cur_bucket = iter.pos.offset;
427
428         return ob ?: ERR_PTR(ret ?: -FREELIST_EMPTY);
429 }
430
431 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
432                                                    struct bch_dev *ca,
433                                                    enum alloc_reserve reserve,
434                                                    u64 *cur_bucket,
435                                                    u64 *buckets_seen,
436                                                    u64 *skipped_open,
437                                                    u64 *skipped_need_journal_commit,
438                                                    u64 *skipped_nouse,
439                                                    struct closure *cl)
440 {
441         struct btree_iter iter;
442         struct bkey_s_c k;
443         struct open_bucket *ob = NULL;
444         int ret;
445
446         if (unlikely(!ca->mi.freespace_initialized))
447                 return bch2_bucket_alloc_trans_early(trans, ca, reserve,
448                                                      cur_bucket,
449                                                      buckets_seen,
450                                                      skipped_open,
451                                                      skipped_need_journal_commit,
452                                                      skipped_nouse,
453                                                      cl);
454
455         BUG_ON(ca->new_fs_bucket_idx);
456
457         for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
458                                      POS(ca->dev_idx, *cur_bucket), 0, k, ret) {
459                 if (k.k->p.inode != ca->dev_idx)
460                         break;
461
462                 for (*cur_bucket = max(*cur_bucket, bkey_start_offset(k.k));
463                      *cur_bucket < k.k->p.offset && !ob;
464                      (*cur_bucket)++) {
465                         if (btree_trans_too_many_iters(trans)) {
466                                 ob = ERR_PTR(-EINTR);
467                                 break;
468                         }
469
470                         (*buckets_seen)++;
471
472                         ob = try_alloc_bucket(trans, ca, reserve,
473                                               *cur_bucket,
474                                               skipped_open,
475                                               skipped_need_journal_commit,
476                                               skipped_nouse,
477                                               k, cl);
478                 }
479                 if (ob)
480                         break;
481         }
482         bch2_trans_iter_exit(trans, &iter);
483
484         return ob ?: ERR_PTR(ret);
485 }
486
487 /**
488  * bch_bucket_alloc - allocate a single bucket from a specific device
489  *
490  * Returns index of bucket on success, 0 on failure
491  * */
492 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
493                                       enum alloc_reserve reserve,
494                                       bool may_alloc_partial,
495                                       struct closure *cl)
496 {
497         struct open_bucket *ob = NULL;
498         struct bch_dev_usage usage;
499         u64 avail;
500         u64 cur_bucket = 0;
501         u64 buckets_seen = 0;
502         u64 skipped_open = 0;
503         u64 skipped_need_journal_commit = 0;
504         u64 skipped_nouse = 0;
505         bool waiting = false;
506         int ret;
507 again:
508         usage = bch2_dev_usage_read(ca);
509         avail = __dev_buckets_available(ca, usage,reserve);
510
511         if (usage.d[BCH_DATA_need_discard].buckets > avail)
512                 bch2_do_discards(c);
513
514         if (usage.d[BCH_DATA_need_gc_gens].buckets > avail)
515                 bch2_do_gc_gens(c);
516
517         if (should_invalidate_buckets(ca, usage))
518                 bch2_do_invalidates(c);
519
520         if (!avail) {
521                 if (cl && !waiting) {
522                         closure_wait(&c->freelist_wait, cl);
523                         waiting = true;
524                         goto again;
525                 }
526
527                 if (!c->blocked_allocate)
528                         c->blocked_allocate = local_clock();
529
530                 ob = ERR_PTR(-FREELIST_EMPTY);
531                 goto err;
532         }
533
534         if (waiting)
535                 closure_wake_up(&c->freelist_wait);
536
537         if (may_alloc_partial) {
538                 ob = try_alloc_partial_bucket(c, ca, reserve);
539                 if (ob)
540                         return ob;
541         }
542
543         ret = bch2_trans_do(c, NULL, NULL, 0,
544                         PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
545                                                         &cur_bucket,
546                                                         &buckets_seen,
547                                                         &skipped_open,
548                                                         &skipped_need_journal_commit,
549                                                         &skipped_nouse,
550                                                         cl)));
551
552         if (skipped_need_journal_commit * 2 > avail)
553                 bch2_journal_flush_async(&c->journal, NULL);
554 err:
555         if (!ob)
556                 ob = ERR_PTR(ret ?: -FREELIST_EMPTY);
557
558         if (IS_ERR(ob)) {
559                 trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve], avail,
560                                         buckets_seen,
561                                         skipped_open,
562                                         skipped_need_journal_commit,
563                                         skipped_nouse,
564                                         cl == NULL, PTR_ERR(ob));
565                 atomic_long_inc(&c->bucket_alloc_fail);
566         }
567
568         return ob;
569 }
570
571 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
572                             unsigned l, unsigned r)
573 {
574         return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
575                 (stripe->next_alloc[l] < stripe->next_alloc[r]));
576 }
577
578 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
579
580 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
581                                           struct dev_stripe_state *stripe,
582                                           struct bch_devs_mask *devs)
583 {
584         struct dev_alloc_list ret = { .nr = 0 };
585         unsigned i;
586
587         for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
588                 ret.devs[ret.nr++] = i;
589
590         bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
591         return ret;
592 }
593
594 void bch2_dev_stripe_increment(struct bch_dev *ca,
595                                struct dev_stripe_state *stripe)
596 {
597         u64 *v = stripe->next_alloc + ca->dev_idx;
598         u64 free_space = dev_buckets_available(ca, RESERVE_none);
599         u64 free_space_inv = free_space
600                 ? div64_u64(1ULL << 48, free_space)
601                 : 1ULL << 48;
602         u64 scale = *v / 4;
603
604         if (*v + free_space_inv >= *v)
605                 *v += free_space_inv;
606         else
607                 *v = U64_MAX;
608
609         for (v = stripe->next_alloc;
610              v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
611                 *v = *v < scale ? 0 : *v - scale;
612 }
613
614 #define BUCKET_MAY_ALLOC_PARTIAL        (1 << 0)
615 #define BUCKET_ALLOC_USE_DURABILITY     (1 << 1)
616
617 static void add_new_bucket(struct bch_fs *c,
618                            struct open_buckets *ptrs,
619                            struct bch_devs_mask *devs_may_alloc,
620                            unsigned *nr_effective,
621                            bool *have_cache,
622                            unsigned flags,
623                            struct open_bucket *ob)
624 {
625         unsigned durability =
626                 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
627
628         __clear_bit(ob->dev, devs_may_alloc->d);
629         *nr_effective   += (flags & BUCKET_ALLOC_USE_DURABILITY)
630                 ? durability : 1;
631         *have_cache     |= !durability;
632
633         ob_push(c, ptrs, ob);
634 }
635
636 int bch2_bucket_alloc_set(struct bch_fs *c,
637                       struct open_buckets *ptrs,
638                       struct dev_stripe_state *stripe,
639                       struct bch_devs_mask *devs_may_alloc,
640                       unsigned nr_replicas,
641                       unsigned *nr_effective,
642                       bool *have_cache,
643                       enum alloc_reserve reserve,
644                       unsigned flags,
645                       struct closure *cl)
646 {
647         struct dev_alloc_list devs_sorted =
648                 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
649         unsigned dev;
650         struct bch_dev *ca;
651         int ret = -INSUFFICIENT_DEVICES;
652         unsigned i;
653
654         BUG_ON(*nr_effective >= nr_replicas);
655
656         for (i = 0; i < devs_sorted.nr; i++) {
657                 struct open_bucket *ob;
658
659                 dev = devs_sorted.devs[i];
660
661                 rcu_read_lock();
662                 ca = rcu_dereference(c->devs[dev]);
663                 if (ca)
664                         percpu_ref_get(&ca->ref);
665                 rcu_read_unlock();
666
667                 if (!ca)
668                         continue;
669
670                 if (!ca->mi.durability && *have_cache) {
671                         percpu_ref_put(&ca->ref);
672                         continue;
673                 }
674
675                 ob = bch2_bucket_alloc(c, ca, reserve,
676                                 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
677                 if (!IS_ERR(ob))
678                         bch2_dev_stripe_increment(ca, stripe);
679                 percpu_ref_put(&ca->ref);
680
681                 if (IS_ERR(ob)) {
682                         ret = PTR_ERR(ob);
683
684                         if (cl)
685                                 break;
686                         continue;
687                 }
688
689                 add_new_bucket(c, ptrs, devs_may_alloc,
690                                nr_effective, have_cache, flags, ob);
691
692                 if (*nr_effective >= nr_replicas) {
693                         ret = 0;
694                         break;
695                 }
696         }
697
698         return ret;
699 }
700
701 /* Allocate from stripes: */
702
703 /*
704  * if we can't allocate a new stripe because there are already too many
705  * partially filled stripes, force allocating from an existing stripe even when
706  * it's to a device we don't want:
707  */
708
709 static int bucket_alloc_from_stripe(struct bch_fs *c,
710                          struct open_buckets *ptrs,
711                          struct write_point *wp,
712                          struct bch_devs_mask *devs_may_alloc,
713                          u16 target,
714                          unsigned erasure_code,
715                          unsigned nr_replicas,
716                          unsigned *nr_effective,
717                          bool *have_cache,
718                          unsigned flags,
719                          struct closure *cl)
720 {
721         struct dev_alloc_list devs_sorted;
722         struct ec_stripe_head *h;
723         struct open_bucket *ob;
724         struct bch_dev *ca;
725         unsigned i, ec_idx;
726
727         if (!erasure_code)
728                 return 0;
729
730         if (nr_replicas < 2)
731                 return 0;
732
733         if (ec_open_bucket(c, ptrs))
734                 return 0;
735
736         h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
737                                     wp == &c->copygc_write_point,
738                                     cl);
739         if (IS_ERR(h))
740                 return -PTR_ERR(h);
741         if (!h)
742                 return 0;
743
744         devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
745
746         for (i = 0; i < devs_sorted.nr; i++)
747                 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
748                         if (!h->s->blocks[ec_idx])
749                                 continue;
750
751                         ob = c->open_buckets + h->s->blocks[ec_idx];
752                         if (ob->dev == devs_sorted.devs[i] &&
753                             !test_and_set_bit(ec_idx, h->s->blocks_allocated))
754                                 goto got_bucket;
755                 }
756         goto out_put_head;
757 got_bucket:
758         ca = bch_dev_bkey_exists(c, ob->dev);
759
760         ob->ec_idx      = ec_idx;
761         ob->ec          = h->s;
762
763         add_new_bucket(c, ptrs, devs_may_alloc,
764                        nr_effective, have_cache, flags, ob);
765         atomic_inc(&h->s->pin);
766 out_put_head:
767         bch2_ec_stripe_head_put(c, h);
768         return 0;
769 }
770
771 /* Sector allocator */
772
773 static void get_buckets_from_writepoint(struct bch_fs *c,
774                                         struct open_buckets *ptrs,
775                                         struct write_point *wp,
776                                         struct bch_devs_mask *devs_may_alloc,
777                                         unsigned nr_replicas,
778                                         unsigned *nr_effective,
779                                         bool *have_cache,
780                                         unsigned flags,
781                                         bool need_ec)
782 {
783         struct open_buckets ptrs_skip = { .nr = 0 };
784         struct open_bucket *ob;
785         unsigned i;
786
787         open_bucket_for_each(c, &wp->ptrs, ob, i) {
788                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
789
790                 if (*nr_effective < nr_replicas &&
791                     test_bit(ob->dev, devs_may_alloc->d) &&
792                     (ca->mi.durability ||
793                      (wp->data_type == BCH_DATA_user && !*have_cache)) &&
794                     (ob->ec || !need_ec)) {
795                         add_new_bucket(c, ptrs, devs_may_alloc,
796                                        nr_effective, have_cache,
797                                        flags, ob);
798                 } else {
799                         ob_push(c, &ptrs_skip, ob);
800                 }
801         }
802         wp->ptrs = ptrs_skip;
803 }
804
805 static int open_bucket_add_buckets(struct bch_fs *c,
806                         struct open_buckets *ptrs,
807                         struct write_point *wp,
808                         struct bch_devs_list *devs_have,
809                         u16 target,
810                         unsigned erasure_code,
811                         unsigned nr_replicas,
812                         unsigned *nr_effective,
813                         bool *have_cache,
814                         enum alloc_reserve reserve,
815                         unsigned flags,
816                         struct closure *_cl)
817 {
818         struct bch_devs_mask devs;
819         struct open_bucket *ob;
820         struct closure *cl = NULL;
821         int ret;
822         unsigned i;
823
824         rcu_read_lock();
825         devs = target_rw_devs(c, wp->data_type, target);
826         rcu_read_unlock();
827
828         /* Don't allocate from devices we already have pointers to: */
829         for (i = 0; i < devs_have->nr; i++)
830                 __clear_bit(devs_have->devs[i], devs.d);
831
832         open_bucket_for_each(c, ptrs, ob, i)
833                 __clear_bit(ob->dev, devs.d);
834
835         if (erasure_code) {
836                 if (!ec_open_bucket(c, ptrs)) {
837                         get_buckets_from_writepoint(c, ptrs, wp, &devs,
838                                                     nr_replicas, nr_effective,
839                                                     have_cache, flags, true);
840                         if (*nr_effective >= nr_replicas)
841                                 return 0;
842                 }
843
844                 if (!ec_open_bucket(c, ptrs)) {
845                         ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
846                                                  target, erasure_code,
847                                                  nr_replicas, nr_effective,
848                                                  have_cache, flags, _cl);
849                         if (ret == -FREELIST_EMPTY ||
850                             ret == -OPEN_BUCKETS_EMPTY)
851                                 return ret;
852                         if (*nr_effective >= nr_replicas)
853                                 return 0;
854                 }
855         }
856
857         get_buckets_from_writepoint(c, ptrs, wp, &devs,
858                                     nr_replicas, nr_effective,
859                                     have_cache, flags, false);
860         if (*nr_effective >= nr_replicas)
861                 return 0;
862
863 retry_blocking:
864         /*
865          * Try nonblocking first, so that if one device is full we'll try from
866          * other devices:
867          */
868         ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
869                                 nr_replicas, nr_effective, have_cache,
870                                 reserve, flags, cl);
871         if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
872                 cl = _cl;
873                 goto retry_blocking;
874         }
875
876         return ret;
877 }
878
879 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
880                                 struct open_buckets *obs)
881 {
882         struct open_buckets ptrs = { .nr = 0 };
883         struct open_bucket *ob, *ob2;
884         unsigned i, j;
885
886         open_bucket_for_each(c, obs, ob, i) {
887                 bool drop = !ca || ob->dev == ca->dev_idx;
888
889                 if (!drop && ob->ec) {
890                         mutex_lock(&ob->ec->lock);
891                         for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
892                                 if (!ob->ec->blocks[j])
893                                         continue;
894
895                                 ob2 = c->open_buckets + ob->ec->blocks[j];
896                                 drop |= ob2->dev == ca->dev_idx;
897                         }
898                         mutex_unlock(&ob->ec->lock);
899                 }
900
901                 if (drop)
902                         bch2_open_bucket_put(c, ob);
903                 else
904                         ob_push(c, &ptrs, ob);
905         }
906
907         *obs = ptrs;
908 }
909
910 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
911                           struct write_point *wp)
912 {
913         mutex_lock(&wp->lock);
914         bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
915         mutex_unlock(&wp->lock);
916 }
917
918 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
919                                                  unsigned long write_point)
920 {
921         unsigned hash =
922                 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
923
924         return &c->write_points_hash[hash];
925 }
926
927 static struct write_point *__writepoint_find(struct hlist_head *head,
928                                              unsigned long write_point)
929 {
930         struct write_point *wp;
931
932         rcu_read_lock();
933         hlist_for_each_entry_rcu(wp, head, node)
934                 if (wp->write_point == write_point)
935                         goto out;
936         wp = NULL;
937 out:
938         rcu_read_unlock();
939         return wp;
940 }
941
942 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
943 {
944         u64 stranded    = c->write_points_nr * c->bucket_size_max;
945         u64 free        = bch2_fs_usage_read_short(c).free;
946
947         return stranded * factor > free;
948 }
949
950 static bool try_increase_writepoints(struct bch_fs *c)
951 {
952         struct write_point *wp;
953
954         if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
955             too_many_writepoints(c, 32))
956                 return false;
957
958         wp = c->write_points + c->write_points_nr++;
959         hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
960         return true;
961 }
962
963 static bool try_decrease_writepoints(struct bch_fs *c,
964                                      unsigned old_nr)
965 {
966         struct write_point *wp;
967
968         mutex_lock(&c->write_points_hash_lock);
969         if (c->write_points_nr < old_nr) {
970                 mutex_unlock(&c->write_points_hash_lock);
971                 return true;
972         }
973
974         if (c->write_points_nr == 1 ||
975             !too_many_writepoints(c, 8)) {
976                 mutex_unlock(&c->write_points_hash_lock);
977                 return false;
978         }
979
980         wp = c->write_points + --c->write_points_nr;
981
982         hlist_del_rcu(&wp->node);
983         mutex_unlock(&c->write_points_hash_lock);
984
985         bch2_writepoint_stop(c, NULL, wp);
986         return true;
987 }
988
989 static struct write_point *writepoint_find(struct bch_fs *c,
990                                            unsigned long write_point)
991 {
992         struct write_point *wp, *oldest;
993         struct hlist_head *head;
994
995         if (!(write_point & 1UL)) {
996                 wp = (struct write_point *) write_point;
997                 mutex_lock(&wp->lock);
998                 return wp;
999         }
1000
1001         head = writepoint_hash(c, write_point);
1002 restart_find:
1003         wp = __writepoint_find(head, write_point);
1004         if (wp) {
1005 lock_wp:
1006                 mutex_lock(&wp->lock);
1007                 if (wp->write_point == write_point)
1008                         goto out;
1009                 mutex_unlock(&wp->lock);
1010                 goto restart_find;
1011         }
1012 restart_find_oldest:
1013         oldest = NULL;
1014         for (wp = c->write_points;
1015              wp < c->write_points + c->write_points_nr; wp++)
1016                 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1017                         oldest = wp;
1018
1019         mutex_lock(&oldest->lock);
1020         mutex_lock(&c->write_points_hash_lock);
1021         if (oldest >= c->write_points + c->write_points_nr ||
1022             try_increase_writepoints(c)) {
1023                 mutex_unlock(&c->write_points_hash_lock);
1024                 mutex_unlock(&oldest->lock);
1025                 goto restart_find_oldest;
1026         }
1027
1028         wp = __writepoint_find(head, write_point);
1029         if (wp && wp != oldest) {
1030                 mutex_unlock(&c->write_points_hash_lock);
1031                 mutex_unlock(&oldest->lock);
1032                 goto lock_wp;
1033         }
1034
1035         wp = oldest;
1036         hlist_del_rcu(&wp->node);
1037         wp->write_point = write_point;
1038         hlist_add_head_rcu(&wp->node, head);
1039         mutex_unlock(&c->write_points_hash_lock);
1040 out:
1041         wp->last_used = sched_clock();
1042         return wp;
1043 }
1044
1045 /*
1046  * Get us an open_bucket we can allocate from, return with it locked:
1047  */
1048 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1049                                 unsigned target,
1050                                 unsigned erasure_code,
1051                                 struct write_point_specifier write_point,
1052                                 struct bch_devs_list *devs_have,
1053                                 unsigned nr_replicas,
1054                                 unsigned nr_replicas_required,
1055                                 enum alloc_reserve reserve,
1056                                 unsigned flags,
1057                                 struct closure *cl)
1058 {
1059         struct write_point *wp;
1060         struct open_bucket *ob;
1061         struct open_buckets ptrs;
1062         unsigned nr_effective, write_points_nr;
1063         unsigned ob_flags = 0;
1064         bool have_cache;
1065         int ret;
1066         int i;
1067
1068         if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
1069                 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
1070
1071         BUG_ON(!nr_replicas || !nr_replicas_required);
1072 retry:
1073         ptrs.nr         = 0;
1074         nr_effective    = 0;
1075         write_points_nr = c->write_points_nr;
1076         have_cache      = false;
1077
1078         wp = writepoint_find(c, write_point.v);
1079
1080         if (wp->data_type == BCH_DATA_user)
1081                 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1082
1083         /* metadata may not allocate on cache devices: */
1084         if (wp->data_type != BCH_DATA_user)
1085                 have_cache = true;
1086
1087         if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1088                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1089                                               target, erasure_code,
1090                                               nr_replicas, &nr_effective,
1091                                               &have_cache, reserve,
1092                                               ob_flags, cl);
1093         } else {
1094                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1095                                               target, erasure_code,
1096                                               nr_replicas, &nr_effective,
1097                                               &have_cache, reserve,
1098                                               ob_flags, NULL);
1099                 if (!ret)
1100                         goto alloc_done;
1101
1102                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1103                                               0, erasure_code,
1104                                               nr_replicas, &nr_effective,
1105                                               &have_cache, reserve,
1106                                               ob_flags, cl);
1107         }
1108 alloc_done:
1109         BUG_ON(!ret && nr_effective < nr_replicas);
1110
1111         if (erasure_code && !ec_open_bucket(c, &ptrs))
1112                 pr_debug("failed to get ec bucket: ret %u", ret);
1113
1114         if (ret == -INSUFFICIENT_DEVICES &&
1115             nr_effective >= nr_replicas_required)
1116                 ret = 0;
1117
1118         if (ret)
1119                 goto err;
1120
1121         /* Free buckets we didn't use: */
1122         open_bucket_for_each(c, &wp->ptrs, ob, i)
1123                 open_bucket_free_unused(c, wp, ob);
1124
1125         wp->ptrs = ptrs;
1126
1127         wp->sectors_free = UINT_MAX;
1128
1129         open_bucket_for_each(c, &wp->ptrs, ob, i)
1130                 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1131
1132         BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1133
1134         return wp;
1135 err:
1136         open_bucket_for_each(c, &wp->ptrs, ob, i)
1137                 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1138                         ob_push(c, &ptrs, ob);
1139                 else
1140                         open_bucket_free_unused(c, wp, ob);
1141         wp->ptrs = ptrs;
1142
1143         mutex_unlock(&wp->lock);
1144
1145         if (ret == -FREELIST_EMPTY &&
1146             try_decrease_writepoints(c, write_points_nr))
1147                 goto retry;
1148
1149         switch (ret) {
1150         case -OPEN_BUCKETS_EMPTY:
1151         case -FREELIST_EMPTY:
1152                 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
1153         case -INSUFFICIENT_DEVICES:
1154                 return ERR_PTR(-EROFS);
1155         default:
1156                 return ERR_PTR(ret);
1157         }
1158 }
1159
1160 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1161 {
1162         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1163
1164         return (struct bch_extent_ptr) {
1165                 .type   = 1 << BCH_EXTENT_ENTRY_ptr,
1166                 .gen    = ob->gen,
1167                 .dev    = ob->dev,
1168                 .offset = bucket_to_sector(ca, ob->bucket) +
1169                         ca->mi.bucket_size -
1170                         ob->sectors_free,
1171         };
1172 }
1173
1174 /*
1175  * Append pointers to the space we just allocated to @k, and mark @sectors space
1176  * as allocated out of @ob
1177  */
1178 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1179                                     struct bkey_i *k, unsigned sectors,
1180                                     bool cached)
1181
1182 {
1183         struct open_bucket *ob;
1184         unsigned i;
1185
1186         BUG_ON(sectors > wp->sectors_free);
1187         wp->sectors_free -= sectors;
1188
1189         open_bucket_for_each(c, &wp->ptrs, ob, i) {
1190                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1191                 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
1192
1193                 ptr.cached = cached ||
1194                         (!ca->mi.durability &&
1195                          wp->data_type == BCH_DATA_user);
1196
1197                 bch2_bkey_append_ptr(k, ptr);
1198
1199                 BUG_ON(sectors > ob->sectors_free);
1200                 ob->sectors_free -= sectors;
1201         }
1202 }
1203
1204 /*
1205  * Append pointers to the space we just allocated to @k, and mark @sectors space
1206  * as allocated out of @ob
1207  */
1208 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1209 {
1210         struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
1211         struct open_bucket *ob;
1212         unsigned i;
1213
1214         open_bucket_for_each(c, &wp->ptrs, ob, i)
1215                 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
1216         wp->ptrs = keep;
1217
1218         mutex_unlock(&wp->lock);
1219
1220         bch2_open_buckets_put(c, &ptrs);
1221 }
1222
1223 static inline void writepoint_init(struct write_point *wp,
1224                                    enum bch_data_type type)
1225 {
1226         mutex_init(&wp->lock);
1227         wp->data_type = type;
1228 }
1229
1230 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1231 {
1232         struct open_bucket *ob;
1233         struct write_point *wp;
1234
1235         mutex_init(&c->write_points_hash_lock);
1236         c->write_points_nr = ARRAY_SIZE(c->write_points);
1237
1238         /* open bucket 0 is a sentinal NULL: */
1239         spin_lock_init(&c->open_buckets[0].lock);
1240
1241         for (ob = c->open_buckets + 1;
1242              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1243                 spin_lock_init(&ob->lock);
1244                 c->open_buckets_nr_free++;
1245
1246                 ob->freelist = c->open_buckets_freelist;
1247                 c->open_buckets_freelist = ob - c->open_buckets;
1248         }
1249
1250         writepoint_init(&c->btree_write_point,          BCH_DATA_btree);
1251         writepoint_init(&c->rebalance_write_point,      BCH_DATA_user);
1252         writepoint_init(&c->copygc_write_point,         BCH_DATA_user);
1253
1254         for (wp = c->write_points;
1255              wp < c->write_points + c->write_points_nr; wp++) {
1256                 writepoint_init(wp, BCH_DATA_user);
1257
1258                 wp->last_used   = sched_clock();
1259                 wp->write_point = (unsigned long) wp;
1260                 hlist_add_head_rcu(&wp->node,
1261                                    writepoint_hash(c, wp->write_point));
1262         }
1263 }
1264
1265 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1266 {
1267         struct open_bucket *ob;
1268
1269         for (ob = c->open_buckets;
1270              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1271              ob++) {
1272                 spin_lock(&ob->lock);
1273                 if (ob->valid && !ob->on_partial_list) {
1274                         prt_printf(out, "%zu ref %u type %s %u:%llu:%u\n",
1275                                ob - c->open_buckets,
1276                                atomic_read(&ob->pin),
1277                                bch2_data_types[ob->data_type],
1278                                ob->dev, ob->bucket, ob->gen);
1279                 }
1280                 spin_unlock(&ob->lock);
1281         }
1282 }