]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc_foreground.c
Update bcachefs sources to 8bf4b038d4 bcachefs: Assorted fixes for running on very...
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
1 /*
2  * Primary bucket allocation code
3  *
4  * Copyright 2012 Google, Inc.
5  *
6  * Allocation in bcache is done in terms of buckets:
7  *
8  * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9  * btree pointers - they must match for the pointer to be considered valid.
10  *
11  * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12  * bucket simply by incrementing its gen.
13  *
14  * The gens (along with the priorities; it's really the gens are important but
15  * the code is named as if it's the priorities) are written in an arbitrary list
16  * of buckets on disk, with a pointer to them in the journal header.
17  *
18  * When we invalidate a bucket, we have to write its new gen to disk and wait
19  * for that write to complete before we use it - otherwise after a crash we
20  * could have pointers that appeared to be good but pointed to data that had
21  * been overwritten.
22  *
23  * Since the gens and priorities are all stored contiguously on disk, we can
24  * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25  * call prio_write(), and when prio_write() finishes we pull buckets off the
26  * free_inc list and optionally discard them.
27  *
28  * free_inc isn't the only freelist - if it was, we'd often have to sleep while
29  * priorities and gens were being written before we could allocate. c->free is a
30  * smaller freelist, and buckets on that list are always ready to be used.
31  *
32  * If we've got discards enabled, that happens when a bucket moves from the
33  * free_inc list to the free list.
34  *
35  * It's important to ensure that gens don't wrap around - with respect to
36  * either the oldest gen in the btree or the gen on disk. This is quite
37  * difficult to do in practice, but we explicitly guard against it anyways - if
38  * a bucket is in danger of wrapping around we simply skip invalidating it that
39  * time around, and we garbage collect or rewrite the priorities sooner than we
40  * would have otherwise.
41  *
42  * bch2_bucket_alloc() allocates a single bucket from a specific device.
43  *
44  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
45  * in a given filesystem.
46  *
47  * invalidate_buckets() drives all the processes described above. It's called
48  * from bch2_bucket_alloc() and a few other places that need to make sure free
49  * buckets are ready.
50  *
51  * invalidate_buckets_(lru|fifo)() find buckets that are available to be
52  * invalidated, and then invalidate them and stick them on the free_inc list -
53  * in either lru or fifo order.
54  */
55
56 #include "bcachefs.h"
57 #include "alloc_background.h"
58 #include "alloc_foreground.h"
59 #include "btree_gc.h"
60 #include "buckets.h"
61 #include "clock.h"
62 #include "debug.h"
63 #include "disk_groups.h"
64 #include "io.h"
65
66 #include <linux/math64.h>
67 #include <linux/rculist.h>
68 #include <linux/rcupdate.h>
69 #include <trace/events/bcachefs.h>
70
71 enum bucket_alloc_ret {
72         ALLOC_SUCCESS,
73         OPEN_BUCKETS_EMPTY,
74         FREELIST_EMPTY,         /* Allocator thread not keeping up */
75 };
76
77 /*
78  * Open buckets represent a bucket that's currently being allocated from.  They
79  * serve two purposes:
80  *
81  *  - They track buckets that have been partially allocated, allowing for
82  *    sub-bucket sized allocations - they're used by the sector allocator below
83  *
84  *  - They provide a reference to the buckets they own that mark and sweep GC
85  *    can find, until the new allocation has a pointer to it inserted into the
86  *    btree
87  *
88  * When allocating some space with the sector allocator, the allocation comes
89  * with a reference to an open bucket - the caller is required to put that
90  * reference _after_ doing the index update that makes its allocation reachable.
91  */
92
93 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
94 {
95         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
96
97         percpu_down_read_preempt_disable(&c->usage_lock);
98         spin_lock(&ob->lock);
99
100         bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
101                                false, gc_pos_alloc(c, ob), 0);
102         ob->valid = false;
103
104         spin_unlock(&ob->lock);
105         percpu_up_read_preempt_enable(&c->usage_lock);
106
107         spin_lock(&c->freelist_lock);
108         ob->freelist = c->open_buckets_freelist;
109         c->open_buckets_freelist = ob - c->open_buckets;
110         c->open_buckets_nr_free++;
111         spin_unlock(&c->freelist_lock);
112
113         closure_wake_up(&c->open_buckets_wait);
114 }
115
116 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
117 {
118         struct open_bucket *ob;
119
120         BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
121
122         ob = c->open_buckets + c->open_buckets_freelist;
123         c->open_buckets_freelist = ob->freelist;
124         atomic_set(&ob->pin, 1);
125
126         c->open_buckets_nr_free--;
127         return ob;
128 }
129
130 static void open_bucket_free_unused(struct bch_fs *c,
131                                     struct write_point *wp,
132                                     struct open_bucket *ob)
133 {
134         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
135
136         BUG_ON(ca->open_buckets_partial_nr >=
137                ARRAY_SIZE(ca->open_buckets_partial));
138
139         if (wp->type == BCH_DATA_USER) {
140                 spin_lock(&c->freelist_lock);
141                 ob->on_partial_list = true;
142                 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
143                         ob - c->open_buckets;
144                 spin_unlock(&c->freelist_lock);
145
146                 closure_wake_up(&c->open_buckets_wait);
147                 closure_wake_up(&c->freelist_wait);
148         } else {
149                 bch2_open_bucket_put(c, ob);
150         }
151 }
152
153 static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
154 {
155 #ifdef CONFIG_BCACHEFS_DEBUG
156         struct open_bucket *ob;
157         unsigned i;
158
159         open_bucket_for_each(c, obs, ob, i) {
160                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
161
162                 BUG_ON(ptr_stale(ca, &ob->ptr));
163         }
164 #endif
165 }
166
167 /* _only_ for allocating the journal on a new device: */
168 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
169 {
170         struct bucket_array *buckets;
171         ssize_t b;
172
173         rcu_read_lock();
174         buckets = bucket_array(ca);
175
176         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
177                 if (is_available_bucket(buckets->b[b].mark))
178                         goto success;
179         b = -1;
180 success:
181         rcu_read_unlock();
182         return b;
183 }
184
185 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
186 {
187         switch (reserve) {
188         case RESERVE_ALLOC:
189                 return 0;
190         case RESERVE_BTREE:
191                 return BTREE_NODE_RESERVE / 2;
192         default:
193                 return BTREE_NODE_RESERVE;
194         }
195 }
196
197 /**
198  * bch_bucket_alloc - allocate a single bucket from a specific device
199  *
200  * Returns index of bucket on success, 0 on failure
201  * */
202 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
203                                       enum alloc_reserve reserve,
204                                       bool may_alloc_partial,
205                                       struct closure *cl)
206 {
207         struct bucket_array *buckets;
208         struct open_bucket *ob;
209         long bucket = 0;
210
211         spin_lock(&c->freelist_lock);
212
213         if (may_alloc_partial &&
214             ca->open_buckets_partial_nr) {
215                 ob = c->open_buckets +
216                         ca->open_buckets_partial[--ca->open_buckets_partial_nr];
217                 ob->on_partial_list = false;
218                 spin_unlock(&c->freelist_lock);
219                 return ob;
220         }
221
222         if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
223                 if (cl)
224                         closure_wait(&c->open_buckets_wait, cl);
225                 spin_unlock(&c->freelist_lock);
226                 trace_open_bucket_alloc_fail(ca, reserve);
227                 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
228         }
229
230         if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
231                 goto out;
232
233         switch (reserve) {
234         case RESERVE_ALLOC:
235                 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
236                         goto out;
237                 break;
238         case RESERVE_BTREE:
239                 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
240                     ca->free[RESERVE_BTREE].size &&
241                     fifo_pop(&ca->free[RESERVE_BTREE], bucket))
242                         goto out;
243                 break;
244         case RESERVE_MOVINGGC:
245                 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
246                         goto out;
247                 break;
248         default:
249                 break;
250         }
251
252         if (cl)
253                 closure_wait(&c->freelist_wait, cl);
254
255         spin_unlock(&c->freelist_lock);
256
257         trace_bucket_alloc_fail(ca, reserve);
258         return ERR_PTR(-FREELIST_EMPTY);
259 out:
260         verify_not_on_freelist(c, ca, bucket);
261
262         ob = bch2_open_bucket_alloc(c);
263
264         spin_lock(&ob->lock);
265         buckets = bucket_array(ca);
266
267         ob->valid       = true;
268         ob->sectors_free = ca->mi.bucket_size;
269         ob->ptr         = (struct bch_extent_ptr) {
270                 .gen    = buckets->b[bucket].mark.gen,
271                 .offset = bucket_to_sector(ca, bucket),
272                 .dev    = ca->dev_idx,
273         };
274
275         bucket_io_clock_reset(c, ca, bucket, READ);
276         bucket_io_clock_reset(c, ca, bucket, WRITE);
277         spin_unlock(&ob->lock);
278
279         spin_unlock(&c->freelist_lock);
280
281         bch2_wake_allocator(ca);
282
283         trace_bucket_alloc(ca, reserve);
284         return ob;
285 }
286
287 static int __dev_alloc_cmp(struct write_point *wp,
288                            unsigned l, unsigned r)
289 {
290         return ((wp->next_alloc[l] > wp->next_alloc[r]) -
291                 (wp->next_alloc[l] < wp->next_alloc[r]));
292 }
293
294 #define dev_alloc_cmp(l, r) __dev_alloc_cmp(wp, l, r)
295
296 struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
297                                          struct write_point *wp,
298                                          struct bch_devs_mask *devs)
299 {
300         struct dev_alloc_list ret = { .nr = 0 };
301         struct bch_dev *ca;
302         unsigned i;
303
304         for_each_member_device_rcu(ca, c, i, devs)
305                 ret.devs[ret.nr++] = i;
306
307         bubble_sort(ret.devs, ret.nr, dev_alloc_cmp);
308         return ret;
309 }
310
311 void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca,
312                      struct write_point *wp)
313 {
314         u64 *v = wp->next_alloc + ca->dev_idx;
315         u64 free_space = dev_buckets_free(c, ca);
316         u64 free_space_inv = free_space
317                 ? div64_u64(1ULL << 48, free_space)
318                 : 1ULL << 48;
319         u64 scale = *v / 4;
320
321         if (*v + free_space_inv >= *v)
322                 *v += free_space_inv;
323         else
324                 *v = U64_MAX;
325
326         for (v = wp->next_alloc;
327              v < wp->next_alloc + ARRAY_SIZE(wp->next_alloc); v++)
328                 *v = *v < scale ? 0 : *v - scale;
329 }
330
331 static int bch2_bucket_alloc_set(struct bch_fs *c,
332                                  struct open_buckets *ptrs,
333                                  struct write_point *wp,
334                                  struct bch_devs_mask *devs_may_alloc,
335                                  unsigned nr_replicas,
336                                  unsigned *nr_effective,
337                                  bool *have_cache,
338                                  enum alloc_reserve reserve,
339                                  struct closure *cl)
340 {
341         struct dev_alloc_list devs_sorted =
342                 bch2_wp_alloc_list(c, wp, devs_may_alloc);
343         struct bch_dev *ca;
344         bool alloc_failure = false;
345         unsigned i;
346
347         BUG_ON(*nr_effective >= nr_replicas);
348
349         for (i = 0; i < devs_sorted.nr; i++) {
350                 struct open_bucket *ob;
351
352                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
353                 if (!ca)
354                         continue;
355
356                 if (!ca->mi.durability &&
357                     (*have_cache ||
358                      wp->type != BCH_DATA_USER))
359                         continue;
360
361                 ob = bch2_bucket_alloc(c, ca, reserve,
362                                        wp->type == BCH_DATA_USER, cl);
363                 if (IS_ERR(ob)) {
364                         enum bucket_alloc_ret ret = -PTR_ERR(ob);
365
366                         WARN_ON(reserve == RESERVE_MOVINGGC &&
367                                 ret != OPEN_BUCKETS_EMPTY);
368
369                         if (cl)
370                                 return -EAGAIN;
371                         if (ret == OPEN_BUCKETS_EMPTY)
372                                 return -ENOSPC;
373                         alloc_failure = true;
374                         continue;
375                 }
376
377                 __clear_bit(ca->dev_idx, devs_may_alloc->d);
378                 *nr_effective   += ca->mi.durability;
379                 *have_cache     |= !ca->mi.durability;
380
381                 ob_push(c, ptrs, ob);
382
383                 bch2_wp_rescale(c, ca, wp);
384
385                 if (*nr_effective >= nr_replicas)
386                         return 0;
387         }
388
389         return alloc_failure ? -ENOSPC : -EROFS;
390 }
391
392 /* Sector allocator */
393
394 static int get_buckets_from_writepoint(struct bch_fs *c,
395                                        struct open_buckets *ptrs,
396                                        struct write_point *wp,
397                                        struct bch_devs_mask *devs_may_alloc,
398                                        unsigned nr_replicas,
399                                        unsigned *nr_effective,
400                                        bool *have_cache)
401 {
402         struct open_buckets ptrs_skip = { .nr = 0 };
403         struct open_bucket *ob;
404         unsigned i;
405
406         open_bucket_for_each(c, &wp->ptrs, ob, i) {
407                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
408
409                 if (*nr_effective < nr_replicas &&
410                     test_bit(ob->ptr.dev, devs_may_alloc->d) &&
411                     (ca->mi.durability ||
412                      (wp->type == BCH_DATA_USER && !*have_cache))) {
413                         __clear_bit(ob->ptr.dev, devs_may_alloc->d);
414                         *nr_effective   += ca->mi.durability;
415                         *have_cache     |= !ca->mi.durability;
416
417                         ob_push(c, ptrs, ob);
418                 } else {
419                         ob_push(c, &ptrs_skip, ob);
420                 }
421         }
422         wp->ptrs = ptrs_skip;
423
424         return *nr_effective < nr_replicas ? -ENOSPC : 0;
425 }
426
427 static int open_bucket_add_buckets(struct bch_fs *c,
428                                    struct open_buckets *ptrs,
429                                    struct write_point *wp,
430                                    struct bch_devs_list *devs_have,
431                                    u16 target,
432                                    unsigned nr_replicas,
433                                    unsigned *nr_effective,
434                                    bool *have_cache,
435                                    enum alloc_reserve reserve,
436                                    struct closure *cl)
437 {
438         struct bch_devs_mask devs;
439         const struct bch_devs_mask *t;
440         struct open_bucket *ob;
441         unsigned i;
442         int ret;
443
444         percpu_down_read_preempt_disable(&c->usage_lock);
445         rcu_read_lock();
446
447         devs = c->rw_devs[wp->type];
448
449         /* Don't allocate from devices we already have pointers to: */
450         for (i = 0; i < devs_have->nr; i++)
451                 __clear_bit(devs_have->devs[i], devs.d);
452
453         open_bucket_for_each(c, ptrs, ob, i)
454                 __clear_bit(ob->ptr.dev, devs.d);
455
456         t = bch2_target_to_mask(c, target);
457         if (t)
458                 bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX);
459
460         ret = get_buckets_from_writepoint(c, ptrs, wp, &devs,
461                                 nr_replicas, nr_effective, have_cache);
462         if (!ret)
463                 goto out;
464
465         /*
466          * Try nonblocking first, so that if one device is full we'll try from
467          * other devices:
468          */
469         ret = bch2_bucket_alloc_set(c, ptrs, wp, &devs,
470                                 nr_replicas, nr_effective, have_cache,
471                                 reserve, NULL);
472         if (!ret || ret == -EROFS || !cl)
473                 goto out;
474
475         ret = bch2_bucket_alloc_set(c, ptrs, wp, &devs,
476                                 nr_replicas, nr_effective, have_cache,
477                                 reserve, cl);
478 out:
479         rcu_read_unlock();
480         percpu_up_read_preempt_enable(&c->usage_lock);
481
482         return ret;
483 }
484
485 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
486                           struct write_point *wp)
487 {
488         struct open_buckets ptrs = { .nr = 0 };
489         struct open_bucket *ob;
490         unsigned i;
491
492         mutex_lock(&wp->lock);
493         open_bucket_for_each(c, &wp->ptrs, ob, i)
494                 if (!ca || ob->ptr.dev == ca->dev_idx)
495                         open_bucket_free_unused(c, wp, ob);
496                 else
497                         ob_push(c, &ptrs, ob);
498
499         wp->ptrs = ptrs;
500         mutex_unlock(&wp->lock);
501 }
502
503 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
504                                                  unsigned long write_point)
505 {
506         unsigned hash =
507                 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
508
509         return &c->write_points_hash[hash];
510 }
511
512 static struct write_point *__writepoint_find(struct hlist_head *head,
513                                              unsigned long write_point)
514 {
515         struct write_point *wp;
516
517         hlist_for_each_entry_rcu(wp, head, node)
518                 if (wp->write_point == write_point)
519                         return wp;
520
521         return NULL;
522 }
523
524 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
525 {
526         u64 stranded    = c->write_points_nr * c->bucket_size_max;
527         u64 free        = bch2_fs_sectors_free(c, bch2_fs_usage_read(c));
528
529         return stranded * factor > free;
530 }
531
532 static bool try_increase_writepoints(struct bch_fs *c)
533 {
534         struct write_point *wp;
535
536         if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
537             too_many_writepoints(c, 32))
538                 return false;
539
540         wp = c->write_points + c->write_points_nr++;
541         hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
542         return true;
543 }
544
545 static bool try_decrease_writepoints(struct bch_fs *c,
546                                      unsigned old_nr)
547 {
548         struct write_point *wp;
549
550         mutex_lock(&c->write_points_hash_lock);
551         if (c->write_points_nr < old_nr) {
552                 mutex_unlock(&c->write_points_hash_lock);
553                 return true;
554         }
555
556         if (c->write_points_nr == 1 ||
557             !too_many_writepoints(c, 8)) {
558                 mutex_unlock(&c->write_points_hash_lock);
559                 return false;
560         }
561
562         wp = c->write_points + --c->write_points_nr;
563
564         hlist_del_rcu(&wp->node);
565         mutex_unlock(&c->write_points_hash_lock);
566
567         bch2_writepoint_stop(c, NULL, wp);
568         return true;
569 }
570
571 static struct write_point *writepoint_find(struct bch_fs *c,
572                                            unsigned long write_point)
573 {
574         struct write_point *wp, *oldest;
575         struct hlist_head *head;
576
577         if (!(write_point & 1UL)) {
578                 wp = (struct write_point *) write_point;
579                 mutex_lock(&wp->lock);
580                 return wp;
581         }
582
583         head = writepoint_hash(c, write_point);
584 restart_find:
585         wp = __writepoint_find(head, write_point);
586         if (wp) {
587 lock_wp:
588                 mutex_lock(&wp->lock);
589                 if (wp->write_point == write_point)
590                         goto out;
591                 mutex_unlock(&wp->lock);
592                 goto restart_find;
593         }
594 restart_find_oldest:
595         oldest = NULL;
596         for (wp = c->write_points;
597              wp < c->write_points + c->write_points_nr; wp++)
598                 if (!oldest || time_before64(wp->last_used, oldest->last_used))
599                         oldest = wp;
600
601         mutex_lock(&oldest->lock);
602         mutex_lock(&c->write_points_hash_lock);
603         if (oldest >= c->write_points + c->write_points_nr ||
604             try_increase_writepoints(c)) {
605                 mutex_unlock(&c->write_points_hash_lock);
606                 mutex_unlock(&oldest->lock);
607                 goto restart_find_oldest;
608         }
609
610         wp = __writepoint_find(head, write_point);
611         if (wp && wp != oldest) {
612                 mutex_unlock(&c->write_points_hash_lock);
613                 mutex_unlock(&oldest->lock);
614                 goto lock_wp;
615         }
616
617         wp = oldest;
618         hlist_del_rcu(&wp->node);
619         wp->write_point = write_point;
620         hlist_add_head_rcu(&wp->node, head);
621         mutex_unlock(&c->write_points_hash_lock);
622 out:
623         wp->last_used = sched_clock();
624         return wp;
625 }
626
627 /*
628  * Get us an open_bucket we can allocate from, return with it locked:
629  */
630 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
631                                 unsigned target,
632                                 struct write_point_specifier write_point,
633                                 struct bch_devs_list *devs_have,
634                                 unsigned nr_replicas,
635                                 unsigned nr_replicas_required,
636                                 enum alloc_reserve reserve,
637                                 unsigned flags,
638                                 struct closure *cl)
639 {
640         struct write_point *wp;
641         struct open_bucket *ob;
642         unsigned nr_effective = 0;
643         struct open_buckets ptrs = { .nr = 0 };
644         bool have_cache = false;
645         unsigned write_points_nr;
646         int ret = 0, i;
647
648         BUG_ON(!nr_replicas || !nr_replicas_required);
649 retry:
650         write_points_nr = c->write_points_nr;
651         wp = writepoint_find(c, write_point.v);
652
653         if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
654                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, target,
655                                               nr_replicas, &nr_effective,
656                                               &have_cache, reserve, cl);
657         } else {
658                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, target,
659                                               nr_replicas, &nr_effective,
660                                               &have_cache, reserve, NULL);
661                 if (!ret)
662                         goto alloc_done;
663
664                 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, 0,
665                                               nr_replicas, &nr_effective,
666                                               &have_cache, reserve, cl);
667         }
668 alloc_done:
669         BUG_ON(!ret && nr_effective < nr_replicas);
670
671         if (ret == -EROFS &&
672             nr_effective >= nr_replicas_required)
673                 ret = 0;
674
675         if (ret)
676                 goto err;
677
678         /* Free buckets we didn't use: */
679         open_bucket_for_each(c, &wp->ptrs, ob, i)
680                 open_bucket_free_unused(c, wp, ob);
681
682         wp->ptrs = ptrs;
683
684         wp->sectors_free = UINT_MAX;
685
686         open_bucket_for_each(c, &wp->ptrs, ob, i)
687                 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
688
689         BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
690
691         verify_not_stale(c, &wp->ptrs);
692
693         return wp;
694 err:
695         open_bucket_for_each(c, &wp->ptrs, ob, i)
696                 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
697                         ob_push(c, &ptrs, ob);
698                 else
699                         open_bucket_free_unused(c, wp, ob);
700         wp->ptrs = ptrs;
701
702         mutex_unlock(&wp->lock);
703
704         if (ret == -ENOSPC &&
705             try_decrease_writepoints(c, write_points_nr))
706                 goto retry;
707
708         return ERR_PTR(ret);
709 }
710
711 /*
712  * Append pointers to the space we just allocated to @k, and mark @sectors space
713  * as allocated out of @ob
714  */
715 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
716                                     struct bkey_i_extent *e, unsigned sectors)
717 {
718         struct open_bucket *ob;
719         unsigned i;
720
721         BUG_ON(sectors > wp->sectors_free);
722         wp->sectors_free -= sectors;
723
724         open_bucket_for_each(c, &wp->ptrs, ob, i) {
725                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
726                 struct bch_extent_ptr tmp = ob->ptr;
727
728                 EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
729
730                 tmp.cached = bkey_extent_is_cached(&e->k) ||
731                         (!ca->mi.durability && wp->type == BCH_DATA_USER);
732
733                 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
734                 extent_ptr_append(e, tmp);
735
736                 BUG_ON(sectors > ob->sectors_free);
737                 ob->sectors_free -= sectors;
738         }
739 }
740
741 /*
742  * Append pointers to the space we just allocated to @k, and mark @sectors space
743  * as allocated out of @ob
744  */
745 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
746 {
747         struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
748         struct open_bucket *ob;
749         unsigned i;
750
751         open_bucket_for_each(c, &wp->ptrs, ob, i)
752                 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
753         wp->ptrs = keep;
754
755         mutex_unlock(&wp->lock);
756
757         bch2_open_buckets_put(c, &ptrs);
758 }
759
760 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
761 {
762         struct open_bucket *ob;
763         struct write_point *wp;
764
765         mutex_init(&c->write_points_hash_lock);
766         c->write_points_nr = ARRAY_SIZE(c->write_points);
767
768         /* open bucket 0 is a sentinal NULL: */
769         spin_lock_init(&c->open_buckets[0].lock);
770
771         for (ob = c->open_buckets + 1;
772              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
773                 spin_lock_init(&ob->lock);
774                 c->open_buckets_nr_free++;
775
776                 ob->freelist = c->open_buckets_freelist;
777                 c->open_buckets_freelist = ob - c->open_buckets;
778         }
779
780         writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
781         writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
782
783         for (wp = c->write_points;
784              wp < c->write_points + c->write_points_nr; wp++) {
785                 writepoint_init(wp, BCH_DATA_USER);
786
787                 wp->last_used   = sched_clock();
788                 wp->write_point = (unsigned long) wp;
789                 hlist_add_head_rcu(&wp->node,
790                                    writepoint_hash(c, wp->write_point));
791         }
792 }