]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/alloc.c
Update bcachefs sources to 9abf628c70 bcachefs: Fix a spurious error in fsck
[bcachefs-tools-debian] / libbcachefs / alloc.c
1 /*
2  * Primary bucket allocation code
3  *
4  * Copyright 2012 Google, Inc.
5  *
6  * Allocation in bcache is done in terms of buckets:
7  *
8  * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9  * btree pointers - they must match for the pointer to be considered valid.
10  *
11  * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12  * bucket simply by incrementing its gen.
13  *
14  * The gens (along with the priorities; it's really the gens are important but
15  * the code is named as if it's the priorities) are written in an arbitrary list
16  * of buckets on disk, with a pointer to them in the journal header.
17  *
18  * When we invalidate a bucket, we have to write its new gen to disk and wait
19  * for that write to complete before we use it - otherwise after a crash we
20  * could have pointers that appeared to be good but pointed to data that had
21  * been overwritten.
22  *
23  * Since the gens and priorities are all stored contiguously on disk, we can
24  * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25  * call prio_write(), and when prio_write() finishes we pull buckets off the
26  * free_inc list and optionally discard them.
27  *
28  * free_inc isn't the only freelist - if it was, we'd often have to sleep while
29  * priorities and gens were being written before we could allocate. c->free is a
30  * smaller freelist, and buckets on that list are always ready to be used.
31  *
32  * If we've got discards enabled, that happens when a bucket moves from the
33  * free_inc list to the free list.
34  *
35  * It's important to ensure that gens don't wrap around - with respect to
36  * either the oldest gen in the btree or the gen on disk. This is quite
37  * difficult to do in practice, but we explicitly guard against it anyways - if
38  * a bucket is in danger of wrapping around we simply skip invalidating it that
39  * time around, and we garbage collect or rewrite the priorities sooner than we
40  * would have otherwise.
41  *
42  * bch2_bucket_alloc() allocates a single bucket from a specific device.
43  *
44  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
45  * in a given filesystem.
46  *
47  * invalidate_buckets() drives all the processes described above. It's called
48  * from bch2_bucket_alloc() and a few other places that need to make sure free
49  * buckets are ready.
50  *
51  * invalidate_buckets_(lru|fifo)() find buckets that are available to be
52  * invalidated, and then invalidate them and stick them on the free_inc list -
53  * in either lru or fifo order.
54  */
55
56 #include "bcachefs.h"
57 #include "alloc.h"
58 #include "btree_cache.h"
59 #include "btree_io.h"
60 #include "btree_update.h"
61 #include "btree_update_interior.h"
62 #include "btree_gc.h"
63 #include "buckets.h"
64 #include "checksum.h"
65 #include "clock.h"
66 #include "debug.h"
67 #include "disk_groups.h"
68 #include "error.h"
69 #include "extents.h"
70 #include "io.h"
71 #include "journal.h"
72 #include "journal_io.h"
73 #include "super-io.h"
74
75 #include <linux/blkdev.h>
76 #include <linux/kthread.h>
77 #include <linux/math64.h>
78 #include <linux/random.h>
79 #include <linux/rculist.h>
80 #include <linux/rcupdate.h>
81 #include <linux/sched/task.h>
82 #include <linux/sort.h>
83 #include <trace/events/bcachefs.h>
84
85 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
86
87 /* Ratelimiting/PD controllers */
88
89 static void pd_controllers_update(struct work_struct *work)
90 {
91         struct bch_fs *c = container_of(to_delayed_work(work),
92                                            struct bch_fs,
93                                            pd_controllers_update);
94         struct bch_dev *ca;
95         unsigned i;
96
97         for_each_member_device(ca, c, i) {
98                 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
99
100                 u64 free = bucket_to_sector(ca,
101                                 __dev_buckets_free(ca, stats)) << 9;
102                 /*
103                  * Bytes of internal fragmentation, which can be
104                  * reclaimed by copy GC
105                  */
106                 s64 fragmented = (bucket_to_sector(ca,
107                                         stats.buckets[BCH_DATA_USER] +
108                                         stats.buckets[BCH_DATA_CACHED]) -
109                                   (stats.sectors[BCH_DATA_USER] +
110                                    stats.sectors[BCH_DATA_CACHED])) << 9;
111
112                 fragmented = max(0LL, fragmented);
113
114                 bch2_pd_controller_update(&ca->copygc_pd,
115                                          free, fragmented, -1);
116         }
117
118         schedule_delayed_work(&c->pd_controllers_update,
119                               c->pd_controllers_update_seconds * HZ);
120 }
121
122 /* Persistent alloc info: */
123
124 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
125 {
126         unsigned bytes = offsetof(struct bch_alloc, data);
127
128         if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
129                 bytes += 2;
130         if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
131                 bytes += 2;
132
133         return DIV_ROUND_UP(bytes, sizeof(u64));
134 }
135
136 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
137 {
138         if (k.k->p.inode >= c->sb.nr_devices ||
139             !c->devs[k.k->p.inode])
140                 return "invalid device";
141
142         switch (k.k->type) {
143         case BCH_ALLOC: {
144                 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
145
146                 if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k))
147                         return "incorrect value size";
148                 break;
149         }
150         default:
151                 return "invalid type";
152         }
153
154         return NULL;
155 }
156
157 void bch2_alloc_to_text(struct bch_fs *c, char *buf,
158                         size_t size, struct bkey_s_c k)
159 {
160         buf[0] = '\0';
161
162         switch (k.k->type) {
163         case BCH_ALLOC:
164                 break;
165         }
166 }
167
168 static inline unsigned get_alloc_field(const u8 **p, unsigned bytes)
169 {
170         unsigned v;
171
172         switch (bytes) {
173         case 1:
174                 v = **p;
175                 break;
176         case 2:
177                 v = le16_to_cpup((void *) *p);
178                 break;
179         case 4:
180                 v = le32_to_cpup((void *) *p);
181                 break;
182         default:
183                 BUG();
184         }
185
186         *p += bytes;
187         return v;
188 }
189
190 static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v)
191 {
192         switch (bytes) {
193         case 1:
194                 **p = v;
195                 break;
196         case 2:
197                 *((__le16 *) *p) = cpu_to_le16(v);
198                 break;
199         case 4:
200                 *((__le32 *) *p) = cpu_to_le32(v);
201                 break;
202         default:
203                 BUG();
204         }
205
206         *p += bytes;
207 }
208
209 static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
210 {
211         struct bch_dev *ca;
212         struct bkey_s_c_alloc a;
213         struct bucket_mark new;
214         struct bucket *g;
215         const u8 *d;
216
217         if (k.k->type != BCH_ALLOC)
218                 return;
219
220         a = bkey_s_c_to_alloc(k);
221         ca = bch_dev_bkey_exists(c, a.k->p.inode);
222
223         if (a.k->p.offset >= ca->mi.nbuckets)
224                 return;
225
226         percpu_down_read_preempt_disable(&c->usage_lock);
227
228         g = bucket(ca, a.k->p.offset);
229         bucket_cmpxchg(g, new, ({
230                 new.gen = a.v->gen;
231                 new.gen_valid = 1;
232         }));
233
234         d = a.v->data;
235         if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
236                 g->io_time[READ] = get_alloc_field(&d, 2);
237         if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
238                 g->io_time[WRITE] = get_alloc_field(&d, 2);
239
240         percpu_up_read_preempt_enable(&c->usage_lock);
241 }
242
243 int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
244 {
245         struct journal_replay *r;
246         struct btree_iter iter;
247         struct bkey_s_c k;
248         struct bch_dev *ca;
249         unsigned i;
250         int ret;
251
252         for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
253                 bch2_alloc_read_key(c, k);
254                 bch2_btree_iter_cond_resched(&iter);
255         }
256
257         ret = bch2_btree_iter_unlock(&iter);
258         if (ret)
259                 return ret;
260
261         list_for_each_entry(r, journal_replay_list, list) {
262                 struct bkey_i *k, *n;
263                 struct jset_entry *entry;
264
265                 for_each_jset_key(k, n, entry, &r->j)
266                         if (entry->btree_id == BTREE_ID_ALLOC)
267                                 bch2_alloc_read_key(c, bkey_i_to_s_c(k));
268         }
269
270         mutex_lock(&c->bucket_clock[READ].lock);
271         for_each_member_device(ca, c, i) {
272                 down_read(&ca->bucket_lock);
273                 bch2_recalc_oldest_io(c, ca, READ);
274                 up_read(&ca->bucket_lock);
275         }
276         mutex_unlock(&c->bucket_clock[READ].lock);
277
278         mutex_lock(&c->bucket_clock[WRITE].lock);
279         for_each_member_device(ca, c, i) {
280                 down_read(&ca->bucket_lock);
281                 bch2_recalc_oldest_io(c, ca, WRITE);
282                 up_read(&ca->bucket_lock);
283         }
284         mutex_unlock(&c->bucket_clock[WRITE].lock);
285
286         return 0;
287 }
288
289 static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
290                                   size_t b, struct btree_iter *iter,
291                                   u64 *journal_seq, bool nowait)
292 {
293         struct bucket_mark m;
294         __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key;
295         struct bucket *g;
296         struct bkey_i_alloc *a;
297         u8 *d;
298         int ret;
299         unsigned flags = BTREE_INSERT_ATOMIC|
300                 BTREE_INSERT_NOFAIL|
301                 BTREE_INSERT_USE_RESERVE|
302                 BTREE_INSERT_USE_ALLOC_RESERVE;
303
304         if (nowait)
305                 flags |= BTREE_INSERT_NOWAIT;
306
307         bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
308
309         do {
310                 ret = btree_iter_err(bch2_btree_iter_peek_slot(iter));
311                 if (ret)
312                         break;
313
314                 percpu_down_read_preempt_disable(&c->usage_lock);
315                 g = bucket(ca, b);
316
317                 /* read mark under btree node lock: */
318                 m = READ_ONCE(g->mark);
319                 a = bkey_alloc_init(&alloc_key.k);
320                 a->k.p          = iter->pos;
321                 a->v.fields     = 0;
322                 a->v.gen        = m.gen;
323                 set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v));
324
325                 d = a->v.data;
326                 if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
327                         put_alloc_field(&d, 2, g->io_time[READ]);
328                 if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
329                         put_alloc_field(&d, 2, g->io_time[WRITE]);
330                 percpu_up_read_preempt_enable(&c->usage_lock);
331
332                 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, flags,
333                                            BTREE_INSERT_ENTRY(iter, &a->k_i));
334                 bch2_btree_iter_cond_resched(iter);
335         } while (ret == -EINTR);
336
337         return ret;
338 }
339
340 int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
341 {
342         struct bch_dev *ca;
343         struct btree_iter iter;
344         int ret;
345
346         if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
347                 return 0;
348
349         ca = bch_dev_bkey_exists(c, pos.inode);
350
351         if (pos.offset >= ca->mi.nbuckets)
352                 return 0;
353
354         bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
355                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
356
357         ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter,
358                                      NULL, false);
359         bch2_btree_iter_unlock(&iter);
360         return ret;
361 }
362
363 int bch2_alloc_write(struct bch_fs *c)
364 {
365         struct bch_dev *ca;
366         unsigned i;
367         int ret = 0;
368
369         for_each_rw_member(ca, c, i) {
370                 struct btree_iter iter;
371                 unsigned long bucket;
372
373                 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
374                                      BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
375
376                 down_read(&ca->bucket_lock);
377                 for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
378                         ret = __bch2_alloc_write_key(c, ca, bucket, &iter,
379                                                      NULL, false);
380                         if (ret)
381                                 break;
382
383                         clear_bit(bucket, ca->buckets_dirty);
384                 }
385                 up_read(&ca->bucket_lock);
386                 bch2_btree_iter_unlock(&iter);
387
388                 if (ret) {
389                         percpu_ref_put(&ca->io_ref);
390                         break;
391                 }
392         }
393
394         return ret;
395 }
396
397 /* Bucket IO clocks: */
398
399 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
400 {
401         struct bucket_clock *clock = &c->bucket_clock[rw];
402         struct bucket_array *buckets = bucket_array(ca);
403         struct bucket *g;
404         u16 max_last_io = 0;
405         unsigned i;
406
407         lockdep_assert_held(&c->bucket_clock[rw].lock);
408
409         /* Recalculate max_last_io for this device: */
410         for_each_bucket(g, buckets)
411                 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
412
413         ca->max_last_bucket_io[rw] = max_last_io;
414
415         /* Recalculate global max_last_io: */
416         max_last_io = 0;
417
418         for_each_member_device(ca, c, i)
419                 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
420
421         clock->max_last_io = max_last_io;
422 }
423
424 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
425 {
426         struct bucket_clock *clock = &c->bucket_clock[rw];
427         struct bucket_array *buckets;
428         struct bch_dev *ca;
429         struct bucket *g;
430         unsigned i;
431
432         trace_rescale_prios(c);
433
434         for_each_member_device(ca, c, i) {
435                 down_read(&ca->bucket_lock);
436                 buckets = bucket_array(ca);
437
438                 for_each_bucket(g, buckets)
439                         g->io_time[rw] = clock->hand -
440                         bucket_last_io(c, g, rw) / 2;
441
442                 bch2_recalc_oldest_io(c, ca, rw);
443
444                 up_read(&ca->bucket_lock);
445         }
446 }
447
448 static void bch2_inc_clock_hand(struct io_timer *timer)
449 {
450         struct bucket_clock *clock = container_of(timer,
451                                                 struct bucket_clock, rescale);
452         struct bch_fs *c = container_of(clock,
453                                         struct bch_fs, bucket_clock[clock->rw]);
454         struct bch_dev *ca;
455         u64 capacity;
456         unsigned i;
457
458         mutex_lock(&clock->lock);
459
460         /* if clock cannot be advanced more, rescale prio */
461         if (clock->max_last_io >= U16_MAX - 2)
462                 bch2_rescale_bucket_io_times(c, clock->rw);
463
464         BUG_ON(clock->max_last_io >= U16_MAX - 2);
465
466         for_each_member_device(ca, c, i)
467                 ca->max_last_bucket_io[clock->rw]++;
468         clock->max_last_io++;
469         clock->hand++;
470
471         mutex_unlock(&clock->lock);
472
473         capacity = READ_ONCE(c->capacity);
474
475         if (!capacity)
476                 return;
477
478         /*
479          * we only increment when 0.1% of the filesystem capacity has been read
480          * or written too, this determines if it's time
481          *
482          * XXX: we shouldn't really be going off of the capacity of devices in
483          * RW mode (that will be 0 when we're RO, yet we can still service
484          * reads)
485          */
486         timer->expire += capacity >> 10;
487
488         bch2_io_timer_add(&c->io_clock[clock->rw], timer);
489 }
490
491 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
492 {
493         struct bucket_clock *clock = &c->bucket_clock[rw];
494
495         clock->hand             = 1;
496         clock->rw               = rw;
497         clock->rescale.fn       = bch2_inc_clock_hand;
498         clock->rescale.expire   = c->capacity >> 10;
499         mutex_init(&clock->lock);
500 }
501
502 /* Background allocator thread: */
503
504 /*
505  * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
506  * (marking them as invalidated on disk), then optionally issues discard
507  * commands to the newly free buckets, then puts them on the various freelists.
508  */
509
510 static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
511                                    size_t bucket)
512 {
513         if (expensive_debug_checks(c) &&
514             test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
515                 size_t iter;
516                 long i;
517                 unsigned j;
518
519                 for (j = 0; j < RESERVE_NR; j++)
520                         fifo_for_each_entry(i, &ca->free[j], iter)
521                                 BUG_ON(i == bucket);
522                 fifo_for_each_entry(i, &ca->free_inc, iter)
523                         BUG_ON(i == bucket);
524         }
525 }
526
527 #define BUCKET_GC_GEN_MAX       96U
528
529 /**
530  * wait_buckets_available - wait on reclaimable buckets
531  *
532  * If there aren't enough available buckets to fill up free_inc, wait until
533  * there are.
534  */
535 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
536 {
537         unsigned long gc_count = c->gc_count;
538         int ret = 0;
539
540         while (1) {
541                 set_current_state(TASK_INTERRUPTIBLE);
542                 if (kthread_should_stop()) {
543                         ret = 1;
544                         break;
545                 }
546
547                 if (gc_count != c->gc_count)
548                         ca->inc_gen_really_needs_gc = 0;
549
550                 if ((ssize_t) (dev_buckets_available(c, ca) -
551                                ca->inc_gen_really_needs_gc) >=
552                     (ssize_t) fifo_free(&ca->free_inc))
553                         break;
554
555                 up_read(&c->gc_lock);
556                 schedule();
557                 try_to_freeze();
558                 down_read(&c->gc_lock);
559         }
560
561         __set_current_state(TASK_RUNNING);
562         return ret;
563 }
564
565 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
566                                        size_t bucket,
567                                        struct bucket_mark mark)
568 {
569         u8 gc_gen;
570
571         if (!is_available_bucket(mark))
572                 return false;
573
574         gc_gen = bucket_gc_gen(ca, bucket);
575
576         if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
577                 ca->inc_gen_needs_gc++;
578
579         if (gc_gen >= BUCKET_GC_GEN_MAX)
580                 ca->inc_gen_really_needs_gc++;
581
582         return gc_gen < BUCKET_GC_GEN_MAX;
583 }
584
585 static void bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
586                                        size_t bucket)
587 {
588         struct bucket_mark m;
589
590         percpu_down_read_preempt_disable(&c->usage_lock);
591         spin_lock(&c->freelist_lock);
592
593         if (!bch2_invalidate_bucket(c, ca, bucket, &m)) {
594                 spin_unlock(&c->freelist_lock);
595                 percpu_up_read_preempt_enable(&c->usage_lock);
596                 return;
597         }
598
599         verify_not_on_freelist(c, ca, bucket);
600         BUG_ON(!fifo_push(&ca->free_inc, bucket));
601
602         spin_unlock(&c->freelist_lock);
603         percpu_up_read_preempt_enable(&c->usage_lock);
604
605         /* gc lock held: */
606         bucket_io_clock_reset(c, ca, bucket, READ);
607         bucket_io_clock_reset(c, ca, bucket, WRITE);
608
609         if (m.cached_sectors) {
610                 ca->allocator_invalidating_data = true;
611         } else if (m.journal_seq_valid) {
612                 u64 journal_seq = atomic64_read(&c->journal.seq);
613                 u64 bucket_seq  = journal_seq;
614
615                 bucket_seq &= ~((u64) U16_MAX);
616                 bucket_seq |= m.journal_seq;
617
618                 if (bucket_seq > journal_seq)
619                         bucket_seq -= 1 << 16;
620
621                 ca->allocator_journal_seq_flush =
622                         max(ca->allocator_journal_seq_flush, bucket_seq);
623         }
624 }
625
626 /*
627  * Determines what order we're going to reuse buckets, smallest bucket_key()
628  * first.
629  *
630  *
631  * - We take into account the read prio of the bucket, which gives us an
632  *   indication of how hot the data is -- we scale the prio so that the prio
633  *   farthest from the clock is worth 1/8th of the closest.
634  *
635  * - The number of sectors of cached data in the bucket, which gives us an
636  *   indication of the cost in cache misses this eviction will cause.
637  *
638  * - If hotness * sectors used compares equal, we pick the bucket with the
639  *   smallest bucket_gc_gen() - since incrementing the same bucket's generation
640  *   number repeatedly forces us to run mark and sweep gc to avoid generation
641  *   number wraparound.
642  */
643
644 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
645                                      size_t b, struct bucket_mark m)
646 {
647         unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
648         unsigned max_last_io = ca->max_last_bucket_io[READ];
649
650         /*
651          * Time since last read, scaled to [0, 8) where larger value indicates
652          * more recently read data:
653          */
654         unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
655
656         /* How much we want to keep the data in this bucket: */
657         unsigned long data_wantness =
658                 (hotness + 1) * bucket_sectors_used(m);
659
660         unsigned long needs_journal_commit =
661                 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
662
663         return  (data_wantness << 9) |
664                 (needs_journal_commit << 8) |
665                 bucket_gc_gen(ca, b);
666 }
667
668 static inline int bucket_alloc_cmp(alloc_heap *h,
669                                    struct alloc_heap_entry l,
670                                    struct alloc_heap_entry r)
671 {
672         return (l.key > r.key) - (l.key < r.key) ?:
673                 (l.nr < r.nr)  - (l.nr  > r.nr) ?:
674                 (l.bucket > r.bucket) - (l.bucket < r.bucket);
675 }
676
677 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
678 {
679         struct bucket_array *buckets;
680         struct alloc_heap_entry e = { 0 };
681         size_t b;
682
683         ca->alloc_heap.used = 0;
684
685         mutex_lock(&c->bucket_clock[READ].lock);
686         down_read(&ca->bucket_lock);
687
688         buckets = bucket_array(ca);
689
690         bch2_recalc_oldest_io(c, ca, READ);
691
692         /*
693          * Find buckets with lowest read priority, by building a maxheap sorted
694          * by read priority and repeatedly replacing the maximum element until
695          * all buckets have been visited.
696          */
697         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
698                 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
699                 unsigned long key = bucket_sort_key(c, ca, b, m);
700
701                 if (!bch2_can_invalidate_bucket(ca, b, m))
702                         continue;
703
704                 if (e.nr && e.bucket + e.nr == b && e.key == key) {
705                         e.nr++;
706                 } else {
707                         if (e.nr)
708                                 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
709
710                         e = (struct alloc_heap_entry) {
711                                 .bucket = b,
712                                 .nr     = 1,
713                                 .key    = key,
714                         };
715                 }
716
717                 cond_resched();
718         }
719
720         if (e.nr)
721                 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
722
723         up_read(&ca->bucket_lock);
724         mutex_unlock(&c->bucket_clock[READ].lock);
725
726         heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
727
728         while (heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp)) {
729                 for (b = e.bucket;
730                      b < e.bucket + e.nr;
731                      b++) {
732                         if (fifo_full(&ca->free_inc))
733                                 return;
734
735                         bch2_invalidate_one_bucket(c, ca, b);
736                 }
737         }
738 }
739
740 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
741 {
742         struct bucket_array *buckets = bucket_array(ca);
743         struct bucket_mark m;
744         size_t b, checked;
745
746         for (checked = 0;
747              checked < ca->mi.nbuckets && !fifo_full(&ca->free_inc);
748              checked++) {
749                 if (ca->fifo_last_bucket <  ca->mi.first_bucket ||
750                     ca->fifo_last_bucket >= ca->mi.nbuckets)
751                         ca->fifo_last_bucket = ca->mi.first_bucket;
752
753                 b = ca->fifo_last_bucket++;
754
755                 m = READ_ONCE(buckets->b[b].mark);
756
757                 if (bch2_can_invalidate_bucket(ca, b, m))
758                         bch2_invalidate_one_bucket(c, ca, b);
759
760                 cond_resched();
761         }
762 }
763
764 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
765 {
766         struct bucket_array *buckets = bucket_array(ca);
767         struct bucket_mark m;
768         size_t checked;
769
770         for (checked = 0;
771              checked < ca->mi.nbuckets / 2 && !fifo_full(&ca->free_inc);
772              checked++) {
773                 size_t b = bch2_rand_range(ca->mi.nbuckets -
774                                            ca->mi.first_bucket) +
775                         ca->mi.first_bucket;
776
777                 m = READ_ONCE(buckets->b[b].mark);
778
779                 if (bch2_can_invalidate_bucket(ca, b, m))
780                         bch2_invalidate_one_bucket(c, ca, b);
781
782                 cond_resched();
783         }
784 }
785
786 static void find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
787 {
788         ca->inc_gen_needs_gc                    = 0;
789         ca->inc_gen_really_needs_gc             = 0;
790
791         switch (ca->mi.replacement) {
792         case CACHE_REPLACEMENT_LRU:
793                 find_reclaimable_buckets_lru(c, ca);
794                 break;
795         case CACHE_REPLACEMENT_FIFO:
796                 find_reclaimable_buckets_fifo(c, ca);
797                 break;
798         case CACHE_REPLACEMENT_RANDOM:
799                 find_reclaimable_buckets_random(c, ca);
800                 break;
801         }
802 }
803
804 static int size_t_cmp(const void *_l, const void *_r)
805 {
806         const size_t *l = _l, *r = _r;
807
808         return (*l > *r) - (*l < *r);
809 }
810
811 static void sort_free_inc(struct bch_fs *c, struct bch_dev *ca)
812 {
813         BUG_ON(ca->free_inc.front);
814
815         spin_lock(&c->freelist_lock);
816         sort(ca->free_inc.data,
817              ca->free_inc.back,
818              sizeof(ca->free_inc.data[0]),
819              size_t_cmp, NULL);
820         spin_unlock(&c->freelist_lock);
821 }
822
823 static int bch2_invalidate_free_inc(struct bch_fs *c, struct bch_dev *ca,
824                                     u64 *journal_seq, size_t nr,
825                                     bool nowait)
826 {
827         struct btree_iter iter;
828         int ret = 0;
829
830         bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
831                              BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
832
833         /* Only use nowait if we've already invalidated at least one bucket: */
834         while (ca->nr_invalidated < min(nr, fifo_used(&ca->free_inc))) {
835                 size_t b = fifo_idx_entry(&ca->free_inc, ca->nr_invalidated);
836
837                 ret = __bch2_alloc_write_key(c, ca, b, &iter, journal_seq,
838                                              nowait && ca->nr_invalidated);
839                 if (ret)
840                         break;
841
842                 ca->nr_invalidated++;
843         }
844
845         bch2_btree_iter_unlock(&iter);
846
847         /* If we used NOWAIT, don't return the error: */
848         return ca->nr_invalidated ? 0 : ret;
849 }
850
851 static bool __push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
852 {
853         unsigned i;
854
855         /*
856          * Don't remove from free_inc until after it's added to
857          * freelist, so gc can find it:
858          */
859         spin_lock(&c->freelist_lock);
860         for (i = 0; i < RESERVE_NR; i++)
861                 if (fifo_push(&ca->free[i], bucket)) {
862                         fifo_pop(&ca->free_inc, bucket);
863                         --ca->nr_invalidated;
864                         closure_wake_up(&c->freelist_wait);
865                         spin_unlock(&c->freelist_lock);
866                         return true;
867                 }
868         spin_unlock(&c->freelist_lock);
869
870         return false;
871 }
872
873 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
874 {
875         int ret = 0;
876
877         while (1) {
878                 set_current_state(TASK_INTERRUPTIBLE);
879
880                 if (__push_invalidated_bucket(c, ca, bucket))
881                         break;
882
883                 if ((current->flags & PF_KTHREAD) &&
884                     kthread_should_stop()) {
885                         ret = 1;
886                         break;
887                 }
888
889                 schedule();
890                 try_to_freeze();
891         }
892
893         __set_current_state(TASK_RUNNING);
894         return ret;
895 }
896
897 /*
898  * Given an invalidated, ready to use bucket: issue a discard to it if enabled,
899  * then add it to the freelist, waiting until there's room if necessary:
900  */
901 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
902 {
903         while (ca->nr_invalidated) {
904                 size_t bucket = fifo_peek(&ca->free_inc);
905
906                 BUG_ON(fifo_empty(&ca->free_inc) || !ca->nr_invalidated);
907
908                 if (ca->mi.discard &&
909                     blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
910                         blkdev_issue_discard(ca->disk_sb.bdev,
911                                              bucket_to_sector(ca, bucket),
912                                              ca->mi.bucket_size, GFP_NOIO, 0);
913
914                 if (push_invalidated_bucket(c, ca, bucket))
915                         return 1;
916         }
917
918         return 0;
919 }
920
921 /**
922  * bch_allocator_thread - move buckets from free_inc to reserves
923  *
924  * The free_inc FIFO is populated by find_reclaimable_buckets(), and
925  * the reserves are depleted by bucket allocation. When we run out
926  * of free_inc, try to invalidate some buckets and write out
927  * prios and gens.
928  */
929 static int bch2_allocator_thread(void *arg)
930 {
931         struct bch_dev *ca = arg;
932         struct bch_fs *c = ca->fs;
933         u64 journal_seq;
934         int ret;
935
936         set_freezable();
937
938         while (1) {
939                 while (1) {
940                         cond_resched();
941
942                         pr_debug("discarding %zu invalidated buckets",
943                                  ca->nr_invalidated);
944
945                         ret = discard_invalidated_buckets(c, ca);
946                         if (ret)
947                                 goto stop;
948
949                         if (fifo_empty(&ca->free_inc))
950                                 break;
951
952                         pr_debug("invalidating %zu buckets",
953                                  fifo_used(&ca->free_inc));
954
955                         journal_seq = 0;
956                         ret = bch2_invalidate_free_inc(c, ca, &journal_seq,
957                                                        SIZE_MAX, true);
958                         if (ret) {
959                                 bch_err(ca, "error invalidating buckets: %i", ret);
960                                 goto stop;
961                         }
962
963                         if (!ca->nr_invalidated) {
964                                 bch_err(ca, "allocator thread unable to make forward progress!");
965                                 goto stop;
966                         }
967
968                         if (ca->allocator_invalidating_data)
969                                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
970                         else if (ca->allocator_journal_seq_flush)
971                                 ret = bch2_journal_flush_seq(&c->journal,
972                                                        ca->allocator_journal_seq_flush);
973
974                         /*
975                          * journal error - buckets haven't actually been
976                          * invalidated, can't discard them:
977                          */
978                         if (ret) {
979                                 bch_err(ca, "journal error: %i", ret);
980                                 goto stop;
981                         }
982                 }
983
984                 pr_debug("free_inc now empty");
985
986                 /* Reset front/back so we can easily sort fifo entries later: */
987                 ca->free_inc.front = ca->free_inc.back  = 0;
988                 ca->allocator_journal_seq_flush         = 0;
989                 ca->allocator_invalidating_data         = false;
990
991                 down_read(&c->gc_lock);
992                 while (1) {
993                         size_t prev = fifo_used(&ca->free_inc);
994
995                         if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) {
996                                 up_read(&c->gc_lock);
997                                 bch_err(ca, "gc failure");
998                                 goto stop;
999                         }
1000
1001                         /*
1002                          * Find some buckets that we can invalidate, either
1003                          * they're completely unused, or only contain clean data
1004                          * that's been written back to the backing device or
1005                          * another cache tier
1006                          */
1007
1008                         pr_debug("scanning for reclaimable buckets");
1009
1010                         find_reclaimable_buckets(c, ca);
1011
1012                         pr_debug("found %zu buckets (free_inc %zu/%zu)",
1013                                  fifo_used(&ca->free_inc) - prev,
1014                                  fifo_used(&ca->free_inc), ca->free_inc.size);
1015
1016                         trace_alloc_batch(ca, fifo_used(&ca->free_inc),
1017                                           ca->free_inc.size);
1018
1019                         if ((ca->inc_gen_needs_gc >= ca->free_inc.size ||
1020                              (!fifo_full(&ca->free_inc) &&
1021                               ca->inc_gen_really_needs_gc >=
1022                               fifo_free(&ca->free_inc))) &&
1023                             c->gc_thread) {
1024                                 atomic_inc(&c->kick_gc);
1025                                 wake_up_process(c->gc_thread);
1026                         }
1027
1028                         if (fifo_full(&ca->free_inc))
1029                                 break;
1030
1031                         if (!fifo_empty(&ca->free_inc) &&
1032                             !fifo_full(&ca->free[RESERVE_MOVINGGC]))
1033                                 break;
1034
1035                         /*
1036                          * copygc may be waiting until either its reserve fills
1037                          * up, or we can't make forward progress:
1038                          */
1039                         ca->allocator_blocked = true;
1040                         closure_wake_up(&c->freelist_wait);
1041
1042                         ret = wait_buckets_available(c, ca);
1043                         if (ret) {
1044                                 up_read(&c->gc_lock);
1045                                 goto stop;
1046                         }
1047                 }
1048
1049                 ca->allocator_blocked = false;
1050                 up_read(&c->gc_lock);
1051
1052                 pr_debug("free_inc now %zu/%zu",
1053                          fifo_used(&ca->free_inc),
1054                          ca->free_inc.size);
1055
1056                 sort_free_inc(c, ca);
1057
1058                 /*
1059                  * free_inc is now full of newly-invalidated buckets: next,
1060                  * write out the new bucket gens:
1061                  */
1062         }
1063
1064 stop:
1065         pr_debug("alloc thread stopping (ret %i)", ret);
1066         return 0;
1067 }
1068
1069 /* Allocation */
1070
1071 /*
1072  * Open buckets represent a bucket that's currently being allocated from.  They
1073  * serve two purposes:
1074  *
1075  *  - They track buckets that have been partially allocated, allowing for
1076  *    sub-bucket sized allocations - they're used by the sector allocator below
1077  *
1078  *  - They provide a reference to the buckets they own that mark and sweep GC
1079  *    can find, until the new allocation has a pointer to it inserted into the
1080  *    btree
1081  *
1082  * When allocating some space with the sector allocator, the allocation comes
1083  * with a reference to an open bucket - the caller is required to put that
1084  * reference _after_ doing the index update that makes its allocation reachable.
1085  */
1086
1087 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
1088 {
1089         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1090
1091         percpu_down_read_preempt_disable(&c->usage_lock);
1092         spin_lock(&ob->lock);
1093
1094         bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
1095                                false, gc_pos_alloc(c, ob), 0);
1096         ob->valid = false;
1097
1098         spin_unlock(&ob->lock);
1099         percpu_up_read_preempt_enable(&c->usage_lock);
1100
1101         spin_lock(&c->freelist_lock);
1102         ob->freelist = c->open_buckets_freelist;
1103         c->open_buckets_freelist = ob - c->open_buckets;
1104         c->open_buckets_nr_free++;
1105         spin_unlock(&c->freelist_lock);
1106
1107         closure_wake_up(&c->open_buckets_wait);
1108 }
1109
1110 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
1111 {
1112         struct open_bucket *ob;
1113
1114         BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
1115
1116         ob = c->open_buckets + c->open_buckets_freelist;
1117         c->open_buckets_freelist = ob->freelist;
1118         atomic_set(&ob->pin, 1);
1119
1120         c->open_buckets_nr_free--;
1121         return ob;
1122 }
1123
1124 /* _only_ for allocating the journal on a new device: */
1125 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
1126 {
1127         struct bucket_array *buckets;
1128         ssize_t b;
1129
1130         rcu_read_lock();
1131         buckets = bucket_array(ca);
1132
1133         for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
1134                 if (is_available_bucket(buckets->b[b].mark))
1135                         goto success;
1136         b = -1;
1137 success:
1138         rcu_read_unlock();
1139         return b;
1140 }
1141
1142 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
1143 {
1144         switch (reserve) {
1145         case RESERVE_ALLOC:
1146                 return 0;
1147         case RESERVE_BTREE:
1148                 return BTREE_NODE_RESERVE / 2;
1149         default:
1150                 return BTREE_NODE_RESERVE;
1151         }
1152 }
1153
1154 /**
1155  * bch_bucket_alloc - allocate a single bucket from a specific device
1156  *
1157  * Returns index of bucket on success, 0 on failure
1158  * */
1159 int bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
1160                       enum alloc_reserve reserve,
1161                       bool may_alloc_partial,
1162                       struct closure *cl)
1163 {
1164         struct bucket_array *buckets;
1165         struct open_bucket *ob;
1166         long bucket;
1167
1168         spin_lock(&c->freelist_lock);
1169
1170         if (may_alloc_partial &&
1171             ca->open_buckets_partial_nr) {
1172                 int ret = ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1173                 c->open_buckets[ret].on_partial_list = false;
1174                 spin_unlock(&c->freelist_lock);
1175                 return ret;
1176         }
1177
1178         if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
1179                 if (cl)
1180                         closure_wait(&c->open_buckets_wait, cl);
1181                 spin_unlock(&c->freelist_lock);
1182                 trace_open_bucket_alloc_fail(ca, reserve);
1183                 return OPEN_BUCKETS_EMPTY;
1184         }
1185
1186         if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
1187                 goto out;
1188
1189         switch (reserve) {
1190         case RESERVE_ALLOC:
1191                 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1192                         goto out;
1193                 break;
1194         case RESERVE_BTREE:
1195                 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
1196                     ca->free[RESERVE_BTREE].size &&
1197                     fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1198                         goto out;
1199                 break;
1200         case RESERVE_MOVINGGC:
1201                 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
1202                         goto out;
1203                 break;
1204         default:
1205                 break;
1206         }
1207
1208         if (cl)
1209                 closure_wait(&c->freelist_wait, cl);
1210
1211         spin_unlock(&c->freelist_lock);
1212
1213         trace_bucket_alloc_fail(ca, reserve);
1214         return FREELIST_EMPTY;
1215 out:
1216         verify_not_on_freelist(c, ca, bucket);
1217
1218         ob = bch2_open_bucket_alloc(c);
1219
1220         spin_lock(&ob->lock);
1221         buckets = bucket_array(ca);
1222
1223         ob->valid       = true;
1224         ob->sectors_free = ca->mi.bucket_size;
1225         ob->ptr         = (struct bch_extent_ptr) {
1226                 .gen    = buckets->b[bucket].mark.gen,
1227                 .offset = bucket_to_sector(ca, bucket),
1228                 .dev    = ca->dev_idx,
1229         };
1230
1231         bucket_io_clock_reset(c, ca, bucket, READ);
1232         bucket_io_clock_reset(c, ca, bucket, WRITE);
1233         spin_unlock(&ob->lock);
1234
1235         spin_unlock(&c->freelist_lock);
1236
1237         bch2_wake_allocator(ca);
1238
1239         trace_bucket_alloc(ca, reserve);
1240         return ob - c->open_buckets;
1241 }
1242
1243 static int __dev_alloc_cmp(struct write_point *wp,
1244                            unsigned l, unsigned r)
1245 {
1246         return ((wp->next_alloc[l] > wp->next_alloc[r]) -
1247                 (wp->next_alloc[l] < wp->next_alloc[r]));
1248 }
1249
1250 #define dev_alloc_cmp(l, r) __dev_alloc_cmp(wp, l, r)
1251
1252 struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
1253                                          struct write_point *wp,
1254                                          struct bch_devs_mask *devs)
1255 {
1256         struct dev_alloc_list ret = { .nr = 0 };
1257         struct bch_dev *ca;
1258         unsigned i;
1259
1260         for_each_member_device_rcu(ca, c, i, devs)
1261                 ret.devs[ret.nr++] = i;
1262
1263         bubble_sort(ret.devs, ret.nr, dev_alloc_cmp);
1264         return ret;
1265 }
1266
1267 void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca,
1268                      struct write_point *wp)
1269 {
1270         u64 *v = wp->next_alloc + ca->dev_idx;
1271         u64 free_space = dev_buckets_free(c, ca);
1272         u64 free_space_inv = free_space
1273                 ? div64_u64(1ULL << 48, free_space)
1274                 : 1ULL << 48;
1275         u64 scale = *v / 4;
1276
1277         if (*v + free_space_inv >= *v)
1278                 *v += free_space_inv;
1279         else
1280                 *v = U64_MAX;
1281
1282         for (v = wp->next_alloc;
1283              v < wp->next_alloc + ARRAY_SIZE(wp->next_alloc); v++)
1284                 *v = *v < scale ? 0 : *v - scale;
1285 }
1286
1287 static enum bucket_alloc_ret bch2_bucket_alloc_set(struct bch_fs *c,
1288                                         struct write_point *wp,
1289                                         unsigned nr_replicas,
1290                                         enum alloc_reserve reserve,
1291                                         struct bch_devs_mask *devs,
1292                                         struct closure *cl)
1293 {
1294         enum bucket_alloc_ret ret = NO_DEVICES;
1295         struct dev_alloc_list devs_sorted;
1296         struct bch_dev *ca;
1297         unsigned i, nr_ptrs_effective = 0;
1298         bool have_cache_dev = false;
1299
1300         BUG_ON(nr_replicas > ARRAY_SIZE(wp->ptrs));
1301
1302         for (i = wp->first_ptr; i < wp->nr_ptrs; i++) {
1303                 ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1304
1305                 nr_ptrs_effective += ca->mi.durability;
1306                 have_cache_dev |= !ca->mi.durability;
1307         }
1308
1309         if (nr_ptrs_effective >= nr_replicas)
1310                 return ALLOC_SUCCESS;
1311
1312         devs_sorted = bch2_wp_alloc_list(c, wp, devs);
1313
1314         for (i = 0; i < devs_sorted.nr; i++) {
1315                 int ob;
1316
1317                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1318                 if (!ca)
1319                         continue;
1320
1321                 if (!ca->mi.durability &&
1322                     (have_cache_dev ||
1323                      wp->type != BCH_DATA_USER))
1324                         continue;
1325
1326                 ob = bch2_bucket_alloc(c, ca, reserve,
1327                                        wp->type == BCH_DATA_USER, cl);
1328                 if (ob < 0) {
1329                         ret = ob;
1330                         if (ret == OPEN_BUCKETS_EMPTY)
1331                                 break;
1332                         continue;
1333                 }
1334
1335                 BUG_ON(ob <= 0 || ob > U8_MAX);
1336                 BUG_ON(wp->nr_ptrs >= ARRAY_SIZE(wp->ptrs));
1337
1338                 wp->ptrs[wp->nr_ptrs++] = c->open_buckets + ob;
1339
1340                 bch2_wp_rescale(c, ca, wp);
1341
1342                 nr_ptrs_effective += ca->mi.durability;
1343                 have_cache_dev |= !ca->mi.durability;
1344
1345                 __clear_bit(ca->dev_idx, devs->d);
1346
1347                 if (nr_ptrs_effective >= nr_replicas) {
1348                         ret = ALLOC_SUCCESS;
1349                         break;
1350                 }
1351         }
1352
1353         EBUG_ON(reserve == RESERVE_MOVINGGC &&
1354                 ret != ALLOC_SUCCESS &&
1355                 ret != OPEN_BUCKETS_EMPTY);
1356
1357         switch (ret) {
1358         case ALLOC_SUCCESS:
1359                 return 0;
1360         case NO_DEVICES:
1361                 return -EROFS;
1362         case FREELIST_EMPTY:
1363         case OPEN_BUCKETS_EMPTY:
1364                 return cl ? -EAGAIN : -ENOSPC;
1365         default:
1366                 BUG();
1367         }
1368 }
1369
1370 /* Sector allocator */
1371
1372 static void writepoint_drop_ptr(struct bch_fs *c,
1373                                 struct write_point *wp,
1374                                 unsigned i)
1375 {
1376         struct open_bucket *ob = wp->ptrs[i];
1377         struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1378
1379         BUG_ON(ca->open_buckets_partial_nr >=
1380                ARRAY_SIZE(ca->open_buckets_partial));
1381
1382         if (wp->type == BCH_DATA_USER) {
1383                 spin_lock(&c->freelist_lock);
1384                 ob->on_partial_list = true;
1385                 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
1386                         ob - c->open_buckets;
1387                 spin_unlock(&c->freelist_lock);
1388
1389                 closure_wake_up(&c->open_buckets_wait);
1390                 closure_wake_up(&c->freelist_wait);
1391         } else {
1392                 bch2_open_bucket_put(c, ob);
1393         }
1394
1395         array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1396
1397         if (i < wp->first_ptr)
1398                 wp->first_ptr--;
1399 }
1400
1401 static void writepoint_drop_ptrs(struct bch_fs *c,
1402                                  struct write_point *wp,
1403                                  u16 target, bool in_target)
1404 {
1405         int i;
1406
1407         for (i = wp->first_ptr - 1; i >= 0; --i)
1408                 if (bch2_dev_in_target(c, wp->ptrs[i]->ptr.dev,
1409                                        target) == in_target)
1410                         writepoint_drop_ptr(c, wp, i);
1411 }
1412
1413 static void verify_not_stale(struct bch_fs *c, const struct write_point *wp)
1414 {
1415 #ifdef CONFIG_BCACHEFS_DEBUG
1416         struct open_bucket *ob;
1417         unsigned i;
1418
1419         writepoint_for_each_ptr_all(wp, ob, i) {
1420                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1421
1422                 BUG_ON(ptr_stale(ca, &ob->ptr));
1423         }
1424 #endif
1425 }
1426
1427 static int open_bucket_add_buckets(struct bch_fs *c,
1428                                    u16 target,
1429                                    struct write_point *wp,
1430                                    struct bch_devs_list *devs_have,
1431                                    unsigned nr_replicas,
1432                                    enum alloc_reserve reserve,
1433                                    struct closure *cl)
1434 {
1435         struct bch_devs_mask devs = c->rw_devs[wp->type];
1436         const struct bch_devs_mask *t;
1437         struct open_bucket *ob;
1438         unsigned i;
1439         int ret;
1440
1441         percpu_down_read_preempt_disable(&c->usage_lock);
1442         rcu_read_lock();
1443
1444         /* Don't allocate from devices we already have pointers to: */
1445         for (i = 0; i < devs_have->nr; i++)
1446                 __clear_bit(devs_have->devs[i], devs.d);
1447
1448         writepoint_for_each_ptr_all(wp, ob, i)
1449                 __clear_bit(ob->ptr.dev, devs.d);
1450
1451         t = bch2_target_to_mask(c, target);
1452         if (t)
1453                 bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX);
1454
1455         ret = bch2_bucket_alloc_set(c, wp, nr_replicas, reserve, &devs, cl);
1456
1457         rcu_read_unlock();
1458         percpu_up_read_preempt_enable(&c->usage_lock);
1459
1460         return ret;
1461 }
1462
1463 static struct write_point *__writepoint_find(struct hlist_head *head,
1464                                              unsigned long write_point)
1465 {
1466         struct write_point *wp;
1467
1468         hlist_for_each_entry_rcu(wp, head, node)
1469                 if (wp->write_point == write_point)
1470                         return wp;
1471
1472         return NULL;
1473 }
1474
1475 static struct hlist_head *writepoint_hash(struct bch_fs *c,
1476                                           unsigned long write_point)
1477 {
1478         unsigned hash =
1479                 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1480
1481         return &c->write_points_hash[hash];
1482 }
1483
1484 static struct write_point *writepoint_find(struct bch_fs *c,
1485                                            unsigned long write_point)
1486 {
1487         struct write_point *wp, *oldest;
1488         struct hlist_head *head;
1489
1490         if (!(write_point & 1UL)) {
1491                 wp = (struct write_point *) write_point;
1492                 mutex_lock(&wp->lock);
1493                 return wp;
1494         }
1495
1496         head = writepoint_hash(c, write_point);
1497 restart_find:
1498         wp = __writepoint_find(head, write_point);
1499         if (wp) {
1500 lock_wp:
1501                 mutex_lock(&wp->lock);
1502                 if (wp->write_point == write_point)
1503                         goto out;
1504                 mutex_unlock(&wp->lock);
1505                 goto restart_find;
1506         }
1507
1508         oldest = NULL;
1509         for (wp = c->write_points;
1510              wp < c->write_points + ARRAY_SIZE(c->write_points);
1511              wp++)
1512                 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1513                         oldest = wp;
1514
1515         mutex_lock(&oldest->lock);
1516         mutex_lock(&c->write_points_hash_lock);
1517         wp = __writepoint_find(head, write_point);
1518         if (wp && wp != oldest) {
1519                 mutex_unlock(&c->write_points_hash_lock);
1520                 mutex_unlock(&oldest->lock);
1521                 goto lock_wp;
1522         }
1523
1524         wp = oldest;
1525         hlist_del_rcu(&wp->node);
1526         wp->write_point = write_point;
1527         hlist_add_head_rcu(&wp->node, head);
1528         mutex_unlock(&c->write_points_hash_lock);
1529 out:
1530         wp->last_used = sched_clock();
1531         return wp;
1532 }
1533
1534 /*
1535  * Get us an open_bucket we can allocate from, return with it locked:
1536  */
1537 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1538                                 unsigned target,
1539                                 struct write_point_specifier write_point,
1540                                 struct bch_devs_list *devs_have,
1541                                 unsigned nr_replicas,
1542                                 unsigned nr_replicas_required,
1543                                 enum alloc_reserve reserve,
1544                                 unsigned flags,
1545                                 struct closure *cl)
1546 {
1547         struct write_point *wp;
1548         struct open_bucket *ob;
1549         struct bch_dev *ca;
1550         unsigned nr_ptrs_have, nr_ptrs_effective;
1551         int ret, i, cache_idx = -1;
1552
1553         BUG_ON(!nr_replicas || !nr_replicas_required);
1554
1555         wp = writepoint_find(c, write_point.v);
1556
1557         wp->first_ptr = 0;
1558
1559         /* does writepoint have ptrs we can't use? */
1560         writepoint_for_each_ptr(wp, ob, i)
1561                 if (bch2_dev_list_has_dev(*devs_have, ob->ptr.dev)) {
1562                         swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1563                         wp->first_ptr++;
1564                 }
1565
1566         nr_ptrs_have = wp->first_ptr;
1567
1568         /* does writepoint have ptrs we don't want to use? */
1569         if (target)
1570                 writepoint_for_each_ptr(wp, ob, i)
1571                         if (!bch2_dev_in_target(c, ob->ptr.dev, target)) {
1572                                 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1573                                 wp->first_ptr++;
1574                         }
1575
1576         if (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS) {
1577                 ret = open_bucket_add_buckets(c, target, wp, devs_have,
1578                                               nr_replicas, reserve, cl);
1579         } else {
1580                 ret = open_bucket_add_buckets(c, target, wp, devs_have,
1581                                               nr_replicas, reserve, NULL);
1582                 if (!ret)
1583                         goto alloc_done;
1584
1585                 wp->first_ptr = nr_ptrs_have;
1586
1587                 ret = open_bucket_add_buckets(c, 0, wp, devs_have,
1588                                               nr_replicas, reserve, cl);
1589         }
1590
1591         if (ret && ret != -EROFS)
1592                 goto err;
1593 alloc_done:
1594         /* check for more than one cache: */
1595         for (i = wp->nr_ptrs - 1; i >= wp->first_ptr; --i) {
1596                 ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1597
1598                 if (ca->mi.durability)
1599                         continue;
1600
1601                 /*
1602                  * if we ended up with more than one cache device, prefer the
1603                  * one in the target we want:
1604                  */
1605                 if (cache_idx >= 0) {
1606                         if (!bch2_dev_in_target(c, wp->ptrs[i]->ptr.dev,
1607                                                 target)) {
1608                                 writepoint_drop_ptr(c, wp, i);
1609                         } else {
1610                                 writepoint_drop_ptr(c, wp, cache_idx);
1611                                 cache_idx = i;
1612                         }
1613                 } else {
1614                         cache_idx = i;
1615                 }
1616         }
1617
1618         /* we might have more effective replicas than required: */
1619         nr_ptrs_effective = 0;
1620         writepoint_for_each_ptr(wp, ob, i) {
1621                 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1622                 nr_ptrs_effective += ca->mi.durability;
1623         }
1624
1625         if (ret == -EROFS &&
1626             nr_ptrs_effective >= nr_replicas_required)
1627                 ret = 0;
1628
1629         if (ret)
1630                 goto err;
1631
1632         if (nr_ptrs_effective > nr_replicas) {
1633                 writepoint_for_each_ptr(wp, ob, i) {
1634                         ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1635
1636                         if (ca->mi.durability &&
1637                             ca->mi.durability <= nr_ptrs_effective - nr_replicas &&
1638                             !bch2_dev_in_target(c, ob->ptr.dev, target)) {
1639                                 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1640                                 wp->first_ptr++;
1641                                 nr_ptrs_effective -= ca->mi.durability;
1642                         }
1643                 }
1644         }
1645
1646         if (nr_ptrs_effective > nr_replicas) {
1647                 writepoint_for_each_ptr(wp, ob, i) {
1648                         ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1649
1650                         if (ca->mi.durability &&
1651                             ca->mi.durability <= nr_ptrs_effective - nr_replicas) {
1652                                 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1653                                 wp->first_ptr++;
1654                                 nr_ptrs_effective -= ca->mi.durability;
1655                         }
1656                 }
1657         }
1658
1659         /* Remove pointers we don't want to use: */
1660         if (target)
1661                 writepoint_drop_ptrs(c, wp, target, false);
1662
1663         BUG_ON(wp->first_ptr >= wp->nr_ptrs);
1664         BUG_ON(nr_ptrs_effective < nr_replicas_required);
1665
1666         wp->sectors_free = UINT_MAX;
1667
1668         writepoint_for_each_ptr(wp, ob, i)
1669                 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1670
1671         BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1672
1673         verify_not_stale(c, wp);
1674
1675         return wp;
1676 err:
1677         mutex_unlock(&wp->lock);
1678         return ERR_PTR(ret);
1679 }
1680
1681 /*
1682  * Append pointers to the space we just allocated to @k, and mark @sectors space
1683  * as allocated out of @ob
1684  */
1685 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1686                                     struct bkey_i_extent *e, unsigned sectors)
1687 {
1688         struct open_bucket *ob;
1689         unsigned i;
1690
1691         BUG_ON(sectors > wp->sectors_free);
1692         wp->sectors_free -= sectors;
1693
1694         writepoint_for_each_ptr(wp, ob, i) {
1695                 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1696                 struct bch_extent_ptr tmp = ob->ptr;
1697
1698                 EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
1699
1700                 tmp.cached = bkey_extent_is_cached(&e->k) ||
1701                         (!ca->mi.durability && wp->type == BCH_DATA_USER);
1702
1703                 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
1704                 extent_ptr_append(e, tmp);
1705
1706                 BUG_ON(sectors > ob->sectors_free);
1707                 ob->sectors_free -= sectors;
1708         }
1709 }
1710
1711 /*
1712  * Append pointers to the space we just allocated to @k, and mark @sectors space
1713  * as allocated out of @ob
1714  */
1715 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1716 {
1717         int i;
1718
1719         for (i = wp->nr_ptrs - 1; i >= 0; --i) {
1720                 struct open_bucket *ob = wp->ptrs[i];
1721
1722                 if (!ob->sectors_free) {
1723                         array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1724                         bch2_open_bucket_put(c, ob);
1725                 }
1726         }
1727
1728         mutex_unlock(&wp->lock);
1729 }
1730
1731 /* Startup/shutdown (ro/rw): */
1732
1733 void bch2_recalc_capacity(struct bch_fs *c)
1734 {
1735         struct bch_dev *ca;
1736         u64 total_capacity, capacity = 0, reserved_sectors = 0;
1737         unsigned long ra_pages = 0;
1738         unsigned i, j;
1739
1740         lockdep_assert_held(&c->state_lock);
1741
1742         for_each_online_member(ca, c, i) {
1743                 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1744
1745                 ra_pages += bdi->ra_pages;
1746         }
1747
1748         bch2_set_ra_pages(c, ra_pages);
1749
1750         for_each_rw_member(ca, c, i) {
1751                 size_t reserve = 0;
1752
1753                 /*
1754                  * We need to reserve buckets (from the number
1755                  * of currently available buckets) against
1756                  * foreground writes so that mainly copygc can
1757                  * make forward progress.
1758                  *
1759                  * We need enough to refill the various reserves
1760                  * from scratch - copygc will use its entire
1761                  * reserve all at once, then run against when
1762                  * its reserve is refilled (from the formerly
1763                  * available buckets).
1764                  *
1765                  * This reserve is just used when considering if
1766                  * allocations for foreground writes must wait -
1767                  * not -ENOSPC calculations.
1768                  */
1769                 for (j = 0; j < RESERVE_NONE; j++)
1770                         reserve += ca->free[j].size;
1771
1772                 reserve += ca->free_inc.size;
1773
1774                 reserve += ARRAY_SIZE(c->write_points);
1775
1776                 reserve += 1;   /* btree write point */
1777
1778                 reserved_sectors += bucket_to_sector(ca, reserve);
1779
1780                 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1781                                              ca->mi.first_bucket);
1782         }
1783
1784         total_capacity = capacity;
1785
1786         capacity *= (100 - c->opts.gc_reserve_percent);
1787         capacity = div64_u64(capacity, 100);
1788
1789         BUG_ON(reserved_sectors > total_capacity);
1790
1791         capacity = min(capacity, total_capacity - reserved_sectors);
1792
1793         c->capacity = capacity;
1794
1795         if (c->capacity) {
1796                 bch2_io_timer_add(&c->io_clock[READ],
1797                                  &c->bucket_clock[READ].rescale);
1798                 bch2_io_timer_add(&c->io_clock[WRITE],
1799                                  &c->bucket_clock[WRITE].rescale);
1800         } else {
1801                 bch2_io_timer_del(&c->io_clock[READ],
1802                                  &c->bucket_clock[READ].rescale);
1803                 bch2_io_timer_del(&c->io_clock[WRITE],
1804                                  &c->bucket_clock[WRITE].rescale);
1805         }
1806
1807         /* Wake up case someone was waiting for buckets */
1808         closure_wake_up(&c->freelist_wait);
1809 }
1810
1811 static void bch2_stop_write_point(struct bch_fs *c, struct bch_dev *ca,
1812                                   struct write_point *wp)
1813 {
1814         struct bch_devs_mask not_self;
1815
1816         bitmap_complement(not_self.d, ca->self.d, BCH_SB_MEMBERS_MAX);
1817
1818         mutex_lock(&wp->lock);
1819         wp->first_ptr = wp->nr_ptrs;
1820         writepoint_drop_ptrs(c, wp, dev_to_target(ca->dev_idx), true);
1821         mutex_unlock(&wp->lock);
1822 }
1823
1824 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1825 {
1826         struct open_bucket *ob;
1827         bool ret = false;
1828
1829         for (ob = c->open_buckets;
1830              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1831              ob++) {
1832                 spin_lock(&ob->lock);
1833                 if (ob->valid && !ob->on_partial_list &&
1834                     ob->ptr.dev == ca->dev_idx)
1835                         ret = true;
1836                 spin_unlock(&ob->lock);
1837         }
1838
1839         return ret;
1840 }
1841
1842 /* device goes ro: */
1843 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1844 {
1845         unsigned i;
1846
1847         BUG_ON(ca->alloc_thread);
1848
1849         /* First, remove device from allocation groups: */
1850
1851         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1852                 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1853
1854         /*
1855          * Capacity is calculated based off of devices in allocation groups:
1856          */
1857         bch2_recalc_capacity(c);
1858
1859         /* Next, close write points that point to this device... */
1860         for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1861                 bch2_stop_write_point(c, ca, &c->write_points[i]);
1862
1863         bch2_stop_write_point(c, ca, &ca->copygc_write_point);
1864         bch2_stop_write_point(c, ca, &c->rebalance_write_point);
1865         bch2_stop_write_point(c, ca, &c->btree_write_point);
1866
1867         mutex_lock(&c->btree_reserve_cache_lock);
1868         while (c->btree_reserve_cache_nr) {
1869                 struct btree_alloc *a =
1870                         &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1871
1872                 bch2_open_bucket_put_refs(c, &a->ob.nr, a->ob.refs);
1873         }
1874         mutex_unlock(&c->btree_reserve_cache_lock);
1875
1876         /*
1877          * Wake up threads that were blocked on allocation, so they can notice
1878          * the device can no longer be removed and the capacity has changed:
1879          */
1880         closure_wake_up(&c->freelist_wait);
1881
1882         /*
1883          * journal_res_get() can block waiting for free space in the journal -
1884          * it needs to notice there may not be devices to allocate from anymore:
1885          */
1886         wake_up(&c->journal.wait);
1887
1888         /* Now wait for any in flight writes: */
1889
1890         closure_wait_event(&c->open_buckets_wait,
1891                            !bch2_dev_has_open_write_point(c, ca));
1892 }
1893
1894 /* device goes rw: */
1895 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1896 {
1897         unsigned i;
1898
1899         for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1900                 if (ca->mi.data_allowed & (1 << i))
1901                         set_bit(ca->dev_idx, c->rw_devs[i].d);
1902 }
1903
1904 /* stop allocator thread: */
1905 void bch2_dev_allocator_stop(struct bch_dev *ca)
1906 {
1907         struct task_struct *p;
1908
1909         p = rcu_dereference_protected(ca->alloc_thread, 1);
1910         ca->alloc_thread = NULL;
1911
1912         /*
1913          * We need an rcu barrier between setting ca->alloc_thread = NULL and
1914          * the thread shutting down to avoid bch2_wake_allocator() racing:
1915          *
1916          * XXX: it would be better to have the rcu barrier be asynchronous
1917          * instead of blocking us here
1918          */
1919         synchronize_rcu();
1920
1921         if (p) {
1922                 kthread_stop(p);
1923                 put_task_struct(p);
1924         }
1925 }
1926
1927 /* start allocator thread: */
1928 int bch2_dev_allocator_start(struct bch_dev *ca)
1929 {
1930         struct task_struct *p;
1931
1932         /*
1933          * allocator thread already started?
1934          */
1935         if (ca->alloc_thread)
1936                 return 0;
1937
1938         p = kthread_create(bch2_allocator_thread, ca,
1939                            "bch_alloc[%s]", ca->name);
1940         if (IS_ERR(p))
1941                 return PTR_ERR(p);
1942
1943         get_task_struct(p);
1944         rcu_assign_pointer(ca->alloc_thread, p);
1945         wake_up_process(p);
1946         return 0;
1947 }
1948
1949 static void allocator_start_issue_discards(struct bch_fs *c)
1950 {
1951         struct bch_dev *ca;
1952         unsigned dev_iter;
1953         size_t i, bu;
1954
1955         for_each_rw_member(ca, c, dev_iter) {
1956                 unsigned done = 0;
1957
1958                 fifo_for_each_entry(bu, &ca->free_inc, i) {
1959                         if (done == ca->nr_invalidated)
1960                                 break;
1961
1962                         blkdev_issue_discard(ca->disk_sb.bdev,
1963                                              bucket_to_sector(ca, bu),
1964                                              ca->mi.bucket_size, GFP_NOIO, 0);
1965                         done++;
1966                 }
1967         }
1968 }
1969
1970 static int __bch2_fs_allocator_start(struct bch_fs *c)
1971 {
1972         struct bch_dev *ca;
1973         size_t bu, i;
1974         unsigned dev_iter;
1975         u64 journal_seq = 0;
1976         bool invalidating_data = false;
1977         int ret = 0;
1978
1979         if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
1980                 return -1;
1981
1982         /* Scan for buckets that are already invalidated: */
1983         for_each_rw_member(ca, c, dev_iter) {
1984                 struct btree_iter iter;
1985                 struct bucket_mark m;
1986                 struct bkey_s_c k;
1987
1988                 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) {
1989                         if (k.k->type != BCH_ALLOC)
1990                                 continue;
1991
1992                         bu = k.k->p.offset;
1993                         m = READ_ONCE(bucket(ca, bu)->mark);
1994
1995                         if (!is_available_bucket(m) || m.cached_sectors)
1996                                 continue;
1997
1998                         percpu_down_read_preempt_disable(&c->usage_lock);
1999                         bch2_mark_alloc_bucket(c, ca, bu, true,
2000                                         gc_pos_alloc(c, NULL),
2001                                         BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
2002                                         BCH_BUCKET_MARK_GC_LOCK_HELD);
2003                         percpu_up_read_preempt_enable(&c->usage_lock);
2004
2005                         fifo_push(&ca->free_inc, bu);
2006                         ca->nr_invalidated++;
2007
2008                         if (fifo_full(&ca->free_inc))
2009                                 break;
2010                 }
2011                 bch2_btree_iter_unlock(&iter);
2012         }
2013
2014         /* did we find enough buckets? */
2015         for_each_rw_member(ca, c, dev_iter)
2016                 if (fifo_used(&ca->free_inc) < ca->free[RESERVE_BTREE].size) {
2017                         percpu_ref_put(&ca->io_ref);
2018                         goto not_enough;
2019                 }
2020
2021         return 0;
2022 not_enough:
2023         pr_debug("did not find enough empty buckets; issuing discards");
2024
2025         /* clear out free_inc - find_reclaimable_buckets() assumes it's empty */
2026         for_each_rw_member(ca, c, dev_iter)
2027                 discard_invalidated_buckets(c, ca);
2028
2029         pr_debug("scanning for reclaimable buckets");
2030
2031         for_each_rw_member(ca, c, dev_iter) {
2032                 BUG_ON(!fifo_empty(&ca->free_inc));
2033                 ca->free_inc.front = ca->free_inc.back  = 0;
2034
2035                 find_reclaimable_buckets(c, ca);
2036                 sort_free_inc(c, ca);
2037
2038                 invalidating_data |= ca->allocator_invalidating_data;
2039
2040                 fifo_for_each_entry(bu, &ca->free_inc, i)
2041                         if (!fifo_push(&ca->free[RESERVE_BTREE], bu))
2042                                 break;
2043         }
2044
2045         pr_debug("done scanning for reclaimable buckets");
2046
2047         /*
2048          * We're moving buckets to freelists _before_ they've been marked as
2049          * invalidated on disk - we have to so that we can allocate new btree
2050          * nodes to mark them as invalidated on disk.
2051          *
2052          * However, we can't _write_ to any of these buckets yet - they might
2053          * have cached data in them, which is live until they're marked as
2054          * invalidated on disk:
2055          */
2056         if (invalidating_data) {
2057                 pr_debug("invalidating existing data");
2058                 set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
2059         } else {
2060                 pr_debug("issuing discards");
2061                 allocator_start_issue_discards(c);
2062         }
2063
2064         /*
2065          * XXX: it's possible for this to deadlock waiting on journal reclaim,
2066          * since we're holding btree writes. What then?
2067          */
2068
2069         for_each_rw_member(ca, c, dev_iter) {
2070                 ret = bch2_invalidate_free_inc(c, ca, &journal_seq,
2071                                                ca->free[RESERVE_BTREE].size,
2072                                                false);
2073                 if (ret) {
2074                         percpu_ref_put(&ca->io_ref);
2075                         return ret;
2076                 }
2077         }
2078
2079         if (invalidating_data) {
2080                 pr_debug("flushing journal");
2081
2082                 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
2083                 if (ret)
2084                         return ret;
2085
2086                 pr_debug("issuing discards");
2087                 allocator_start_issue_discards(c);
2088         }
2089
2090         for_each_rw_member(ca, c, dev_iter)
2091                 while (ca->nr_invalidated) {
2092                         BUG_ON(!fifo_pop(&ca->free_inc, bu));
2093                         ca->nr_invalidated--;
2094                 }
2095
2096         set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
2097
2098         /* now flush dirty btree nodes: */
2099         if (invalidating_data) {
2100                 struct bucket_table *tbl;
2101                 struct rhash_head *pos;
2102                 struct btree *b;
2103                 bool flush_updates;
2104                 size_t nr_pending_updates;
2105
2106                 clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
2107 again:
2108                 pr_debug("flushing dirty btree nodes");
2109                 cond_resched();
2110
2111                 flush_updates = false;
2112                 nr_pending_updates = bch2_btree_interior_updates_nr_pending(c);
2113
2114
2115                 rcu_read_lock();
2116                 for_each_cached_btree(b, c, tbl, i, pos)
2117                         if (btree_node_dirty(b) && (!b->written || b->level)) {
2118                                 if (btree_node_may_write(b)) {
2119                                         rcu_read_unlock();
2120                                         btree_node_lock_type(c, b, SIX_LOCK_read);
2121                                         bch2_btree_node_write(c, b, SIX_LOCK_read);
2122                                         six_unlock_read(&b->lock);
2123                                         goto again;
2124                                 } else {
2125                                         flush_updates = true;
2126                                 }
2127                         }
2128                 rcu_read_unlock();
2129
2130                 /*
2131                  * This is ugly, but it's needed to flush btree node writes
2132                  * without spinning...
2133                  */
2134                 if (flush_updates) {
2135                         closure_wait_event(&c->btree_interior_update_wait,
2136                                 bch2_btree_interior_updates_nr_pending(c) <
2137                                 nr_pending_updates);
2138                         goto again;
2139                 }
2140         }
2141
2142         return 0;
2143 }
2144
2145 int bch2_fs_allocator_start(struct bch_fs *c)
2146 {
2147         struct bch_dev *ca;
2148         unsigned i;
2149         int ret;
2150
2151         down_read(&c->gc_lock);
2152         ret = __bch2_fs_allocator_start(c);
2153         up_read(&c->gc_lock);
2154
2155         if (ret)
2156                 return ret;
2157
2158         for_each_rw_member(ca, c, i) {
2159                 ret = bch2_dev_allocator_start(ca);
2160                 if (ret) {
2161                         percpu_ref_put(&ca->io_ref);
2162                         return ret;
2163                 }
2164         }
2165
2166         return bch2_alloc_write(c);
2167 }
2168
2169 void bch2_fs_allocator_init(struct bch_fs *c)
2170 {
2171         struct open_bucket *ob;
2172         struct write_point *wp;
2173
2174         mutex_init(&c->write_points_hash_lock);
2175         spin_lock_init(&c->freelist_lock);
2176         bch2_bucket_clock_init(c, READ);
2177         bch2_bucket_clock_init(c, WRITE);
2178
2179         /* open bucket 0 is a sentinal NULL: */
2180         spin_lock_init(&c->open_buckets[0].lock);
2181
2182         for (ob = c->open_buckets + 1;
2183              ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
2184                 spin_lock_init(&ob->lock);
2185                 c->open_buckets_nr_free++;
2186
2187                 ob->freelist = c->open_buckets_freelist;
2188                 c->open_buckets_freelist = ob - c->open_buckets;
2189         }
2190
2191         writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
2192         writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
2193
2194         for (wp = c->write_points;
2195              wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) {
2196                 writepoint_init(wp, BCH_DATA_USER);
2197
2198                 wp->last_used   = sched_clock();
2199                 wp->write_point = (unsigned long) wp;
2200                 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
2201         }
2202
2203         c->pd_controllers_update_seconds = 5;
2204         INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);
2205 }