]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_reclaim.c
Update bcachefs sources to 0e705f5944 fixup! bcachefs: Refactor bch2_btree_node_mem_a...
[bcachefs-tools-debian] / libbcachefs / journal_reclaim.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_key_cache.h"
5 #include "error.h"
6 #include "journal.h"
7 #include "journal_io.h"
8 #include "journal_reclaim.h"
9 #include "replicas.h"
10 #include "super.h"
11
12 #include <linux/kthread.h>
13 #include <linux/sched/mm.h>
14 #include <trace/events/bcachefs.h>
15
16 /* Free space calculations: */
17
18 static unsigned journal_space_from(struct journal_device *ja,
19                                    enum journal_space_from from)
20 {
21         switch (from) {
22         case journal_space_discarded:
23                 return ja->discard_idx;
24         case journal_space_clean_ondisk:
25                 return ja->dirty_idx_ondisk;
26         case journal_space_clean:
27                 return ja->dirty_idx;
28         default:
29                 BUG();
30         }
31 }
32
33 unsigned bch2_journal_dev_buckets_available(struct journal *j,
34                                             struct journal_device *ja,
35                                             enum journal_space_from from)
36 {
37         unsigned available = (journal_space_from(ja, from) -
38                               ja->cur_idx - 1 + ja->nr) % ja->nr;
39
40         /*
41          * Don't use the last bucket unless writing the new last_seq
42          * will make another bucket available:
43          */
44         if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
45                 --available;
46
47         return available;
48 }
49
50 static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
51 {
52         union journal_preres_state old, new;
53         u64 v = atomic64_read(&j->prereserved.counter);
54
55         do {
56                 old.v = new.v = v;
57                 new.remaining = u64s_remaining;
58         } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
59                                        old.v, new.v)) != old.v);
60 }
61
62 static struct journal_space
63 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
64                             enum journal_space_from from)
65 {
66         struct journal_device *ja = &ca->journal;
67         unsigned sectors, buckets, unwritten;
68         u64 seq;
69
70         if (from == journal_space_total)
71                 return (struct journal_space) {
72                         .next_entry     = ca->mi.bucket_size,
73                         .total          = ca->mi.bucket_size * ja->nr,
74                 };
75
76         buckets = bch2_journal_dev_buckets_available(j, ja, from);
77         sectors = ja->sectors_free;
78
79         /*
80          * We that we don't allocate the space for a journal entry
81          * until we write it out - thus, account for it here:
82          */
83         for (seq = journal_last_unwritten_seq(j);
84              seq <= journal_cur_seq(j);
85              seq++) {
86                 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
87
88                 if (!unwritten)
89                         continue;
90
91                 /* entry won't fit on this device, skip: */
92                 if (unwritten > ca->mi.bucket_size)
93                         continue;
94
95                 if (unwritten >= sectors) {
96                         if (!buckets) {
97                                 sectors = 0;
98                                 break;
99                         }
100
101                         buckets--;
102                         sectors = ca->mi.bucket_size;
103                 }
104
105                 sectors -= unwritten;
106         }
107
108         if (sectors < ca->mi.bucket_size && buckets) {
109                 buckets--;
110                 sectors = ca->mi.bucket_size;
111         }
112
113         return (struct journal_space) {
114                 .next_entry     = sectors,
115                 .total          = sectors + buckets * ca->mi.bucket_size,
116         };
117 }
118
119 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
120                             enum journal_space_from from)
121 {
122         struct bch_fs *c = container_of(j, struct bch_fs, journal);
123         struct bch_dev *ca;
124         unsigned i, pos, nr_devs = 0;
125         struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
126
127         BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
128
129         rcu_read_lock();
130         for_each_member_device_rcu(ca, c, i,
131                                    &c->rw_devs[BCH_DATA_journal]) {
132                 if (!ca->journal.nr)
133                         continue;
134
135                 space = journal_dev_space_available(j, ca, from);
136                 if (!space.next_entry)
137                         continue;
138
139                 for (pos = 0; pos < nr_devs; pos++)
140                         if (space.total > dev_space[pos].total)
141                                 break;
142
143                 array_insert_item(dev_space, nr_devs, pos, space);
144         }
145         rcu_read_unlock();
146
147         if (nr_devs < nr_devs_want)
148                 return (struct journal_space) { 0, 0 };
149
150         /*
151          * We sorted largest to smallest, and we want the smallest out of the
152          * @nr_devs_want largest devices:
153          */
154         return dev_space[nr_devs_want - 1];
155 }
156
157 void bch2_journal_space_available(struct journal *j)
158 {
159         struct bch_fs *c = container_of(j, struct bch_fs, journal);
160         struct bch_dev *ca;
161         unsigned clean, clean_ondisk, total;
162         s64 u64s_remaining = 0;
163         unsigned max_entry_size  = min(j->buf[0].buf_size >> 9,
164                                        j->buf[1].buf_size >> 9);
165         unsigned i, nr_online = 0, nr_devs_want;
166         bool can_discard = false;
167         int ret = 0;
168
169         lockdep_assert_held(&j->lock);
170
171         rcu_read_lock();
172         for_each_member_device_rcu(ca, c, i,
173                                    &c->rw_devs[BCH_DATA_journal]) {
174                 struct journal_device *ja = &ca->journal;
175
176                 if (!ja->nr)
177                         continue;
178
179                 while (ja->dirty_idx != ja->cur_idx &&
180                        ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
181                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
182
183                 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
184                        ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
185                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
186
187                 if (ja->discard_idx != ja->dirty_idx_ondisk)
188                         can_discard = true;
189
190                 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
191                 nr_online++;
192         }
193         rcu_read_unlock();
194
195         j->can_discard = can_discard;
196
197         if (nr_online < c->opts.metadata_replicas_required) {
198                 ret = JOURNAL_ERR_insufficient_devices;
199                 goto out;
200         }
201
202         nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
203
204         for (i = 0; i < journal_space_nr; i++)
205                 j->space[i] = __journal_space_available(j, nr_devs_want, i);
206
207         clean_ondisk    = j->space[journal_space_clean_ondisk].total;
208         clean           = j->space[journal_space_clean].total;
209         total           = j->space[journal_space_total].total;
210
211         if (!clean_ondisk &&
212             journal_cur_seq(j) == j->seq_ondisk) {
213                 struct printbuf buf = PRINTBUF;
214
215                 __bch2_journal_debug_to_text(&buf, j);
216                 bch_err(c, "journal stuck\n%s", buf.buf);
217                 printbuf_exit(&buf);
218
219                 bch2_fatal_error(c);
220                 ret = JOURNAL_ERR_journal_stuck;
221         } else if (!j->space[journal_space_discarded].next_entry)
222                 ret = JOURNAL_ERR_journal_full;
223
224         if ((j->space[journal_space_clean_ondisk].next_entry <
225              j->space[journal_space_clean_ondisk].total) &&
226             (clean - clean_ondisk <= total / 8) &&
227             (clean_ondisk * 2 > clean ))
228                 set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
229         else
230                 clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
231
232         u64s_remaining  = (u64) clean << 6;
233         u64s_remaining -= (u64) total << 3;
234         u64s_remaining = max(0LL, u64s_remaining);
235         u64s_remaining /= 4;
236         u64s_remaining = min_t(u64, u64s_remaining, U32_MAX);
237 out:
238         j->cur_entry_sectors    = !ret ? j->space[journal_space_discarded].next_entry : 0;
239         j->cur_entry_error      = ret;
240         journal_set_remaining(j, u64s_remaining);
241         journal_set_watermark(j);
242
243         if (!ret)
244                 journal_wake(j);
245 }
246
247 /* Discards - last part of journal reclaim: */
248
249 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
250 {
251         bool ret;
252
253         spin_lock(&j->lock);
254         ret = ja->discard_idx != ja->dirty_idx_ondisk;
255         spin_unlock(&j->lock);
256
257         return ret;
258 }
259
260 /*
261  * Advance ja->discard_idx as long as it points to buckets that are no longer
262  * dirty, issuing discards if necessary:
263  */
264 void bch2_journal_do_discards(struct journal *j)
265 {
266         struct bch_fs *c = container_of(j, struct bch_fs, journal);
267         struct bch_dev *ca;
268         unsigned iter;
269
270         mutex_lock(&j->discard_lock);
271
272         for_each_rw_member(ca, c, iter) {
273                 struct journal_device *ja = &ca->journal;
274
275                 while (should_discard_bucket(j, ja)) {
276                         if (!c->opts.nochanges &&
277                             ca->mi.discard &&
278                             blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
279                                 blkdev_issue_discard(ca->disk_sb.bdev,
280                                         bucket_to_sector(ca,
281                                                 ja->buckets[ja->discard_idx]),
282                                         ca->mi.bucket_size, GFP_NOIO, 0);
283
284                         spin_lock(&j->lock);
285                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
286
287                         bch2_journal_space_available(j);
288                         spin_unlock(&j->lock);
289                 }
290         }
291
292         mutex_unlock(&j->discard_lock);
293 }
294
295 /*
296  * Journal entry pinning - machinery for holding a reference on a given journal
297  * entry, holding it open to ensure it gets replayed during recovery:
298  */
299
300 static void bch2_journal_reclaim_fast(struct journal *j)
301 {
302         struct journal_entry_pin_list temp;
303         bool popped = false;
304
305         lockdep_assert_held(&j->lock);
306
307         /*
308          * Unpin journal entries whose reference counts reached zero, meaning
309          * all btree nodes got written out
310          */
311         while (!fifo_empty(&j->pin) &&
312                !atomic_read(&fifo_peek_front(&j->pin).count)) {
313                 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
314                 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).flushed));
315                 BUG_ON(!fifo_pop(&j->pin, temp));
316                 popped = true;
317         }
318
319         if (popped)
320                 bch2_journal_space_available(j);
321 }
322
323 void __bch2_journal_pin_put(struct journal *j, u64 seq)
324 {
325         struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
326
327         if (atomic_dec_and_test(&pin_list->count))
328                 bch2_journal_reclaim_fast(j);
329 }
330
331 void bch2_journal_pin_put(struct journal *j, u64 seq)
332 {
333         struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
334
335         if (atomic_dec_and_test(&pin_list->count)) {
336                 spin_lock(&j->lock);
337                 bch2_journal_reclaim_fast(j);
338                 spin_unlock(&j->lock);
339         }
340 }
341
342 static inline void __journal_pin_drop(struct journal *j,
343                                       struct journal_entry_pin *pin)
344 {
345         struct journal_entry_pin_list *pin_list;
346
347         if (!journal_pin_active(pin))
348                 return;
349
350         if (j->flush_in_progress == pin)
351                 j->flush_in_progress_dropped = true;
352
353         pin_list = journal_seq_pin(j, pin->seq);
354         pin->seq = 0;
355         list_del_init(&pin->list);
356
357         /*
358          * Unpinning a journal entry make make journal_next_bucket() succeed, if
359          * writing a new last_seq will now make another bucket available:
360          */
361         if (atomic_dec_and_test(&pin_list->count) &&
362             pin_list == &fifo_peek_front(&j->pin))
363                 bch2_journal_reclaim_fast(j);
364 }
365
366 void bch2_journal_pin_drop(struct journal *j,
367                            struct journal_entry_pin *pin)
368 {
369         spin_lock(&j->lock);
370         __journal_pin_drop(j, pin);
371         spin_unlock(&j->lock);
372 }
373
374 void bch2_journal_pin_set(struct journal *j, u64 seq,
375                           struct journal_entry_pin *pin,
376                           journal_pin_flush_fn flush_fn)
377 {
378         struct journal_entry_pin_list *pin_list;
379
380         spin_lock(&j->lock);
381
382         if (seq < journal_last_seq(j)) {
383                 /*
384                  * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
385                  * the src pin - with the pin dropped, the entry to pin might no
386                  * longer to exist, but that means there's no longer anything to
387                  * copy and we can bail out here:
388                  */
389                 spin_unlock(&j->lock);
390                 return;
391         }
392
393         pin_list = journal_seq_pin(j, seq);
394
395         __journal_pin_drop(j, pin);
396
397         atomic_inc(&pin_list->count);
398         pin->seq        = seq;
399         pin->flush      = flush_fn;
400
401         if (flush_fn == bch2_btree_key_cache_journal_flush)
402                 list_add(&pin->list, &pin_list->key_cache_list);
403         else if (flush_fn)
404                 list_add(&pin->list, &pin_list->list);
405         else
406                 list_add(&pin->list, &pin_list->flushed);
407         spin_unlock(&j->lock);
408
409         /*
410          * If the journal is currently full,  we might want to call flush_fn
411          * immediately:
412          */
413         journal_wake(j);
414 }
415
416 /**
417  * bch2_journal_pin_flush: ensure journal pin callback is no longer running
418  */
419 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
420 {
421         BUG_ON(journal_pin_active(pin));
422
423         wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
424 }
425
426 /*
427  * Journal reclaim: flush references to open journal entries to reclaim space in
428  * the journal
429  *
430  * May be done by the journal code in the background as needed to free up space
431  * for more journal entries, or as part of doing a clean shutdown, or to migrate
432  * data off of a specific device:
433  */
434
435 static struct journal_entry_pin *
436 journal_get_next_pin(struct journal *j,
437                      bool get_any,
438                      bool get_key_cache,
439                      u64 max_seq, u64 *seq)
440 {
441         struct journal_entry_pin_list *pin_list;
442         struct journal_entry_pin *ret = NULL;
443
444         fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
445                 if (*seq > max_seq && !get_any && !get_key_cache)
446                         break;
447
448                 if (*seq <= max_seq || get_any) {
449                         ret = list_first_entry_or_null(&pin_list->list,
450                                 struct journal_entry_pin, list);
451                         if (ret)
452                                 return ret;
453                 }
454
455                 if (*seq <= max_seq || get_any || get_key_cache) {
456                         ret = list_first_entry_or_null(&pin_list->key_cache_list,
457                                 struct journal_entry_pin, list);
458                         if (ret)
459                                 return ret;
460                 }
461         }
462
463         return NULL;
464 }
465
466 /* returns true if we did work */
467 static size_t journal_flush_pins(struct journal *j, u64 seq_to_flush,
468                                  unsigned min_any,
469                                  unsigned min_key_cache)
470 {
471         struct journal_entry_pin *pin;
472         size_t nr_flushed = 0;
473         journal_pin_flush_fn flush_fn;
474         u64 seq;
475         int err;
476
477         lockdep_assert_held(&j->reclaim_lock);
478
479         while (1) {
480                 cond_resched();
481
482                 j->last_flushed = jiffies;
483
484                 spin_lock(&j->lock);
485                 pin = journal_get_next_pin(j,
486                                            min_any != 0,
487                                            min_key_cache != 0,
488                                            seq_to_flush, &seq);
489                 if (pin) {
490                         BUG_ON(j->flush_in_progress);
491                         j->flush_in_progress = pin;
492                         j->flush_in_progress_dropped = false;
493                         flush_fn = pin->flush;
494                 }
495                 spin_unlock(&j->lock);
496
497                 if (!pin)
498                         break;
499
500                 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
501                         min_key_cache--;
502
503                 if (min_any)
504                         min_any--;
505
506                 err = flush_fn(j, pin, seq);
507
508                 spin_lock(&j->lock);
509                 /* Pin might have been dropped or rearmed: */
510                 if (likely(!err && !j->flush_in_progress_dropped))
511                         list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
512                 j->flush_in_progress = NULL;
513                 j->flush_in_progress_dropped = false;
514                 spin_unlock(&j->lock);
515
516                 wake_up(&j->pin_flush_wait);
517
518                 if (err)
519                         break;
520
521                 nr_flushed++;
522         }
523
524         return nr_flushed;
525 }
526
527 static u64 journal_seq_to_flush(struct journal *j)
528 {
529         struct bch_fs *c = container_of(j, struct bch_fs, journal);
530         struct bch_dev *ca;
531         u64 seq_to_flush = 0;
532         unsigned iter;
533
534         spin_lock(&j->lock);
535
536         for_each_rw_member(ca, c, iter) {
537                 struct journal_device *ja = &ca->journal;
538                 unsigned nr_buckets, bucket_to_flush;
539
540                 if (!ja->nr)
541                         continue;
542
543                 /* Try to keep the journal at most half full: */
544                 nr_buckets = ja->nr / 2;
545
546                 /* And include pre-reservations: */
547                 nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
548                                            (ca->mi.bucket_size << 6) -
549                                            journal_entry_overhead(j));
550
551                 nr_buckets = min(nr_buckets, ja->nr);
552
553                 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
554                 seq_to_flush = max(seq_to_flush,
555                                    ja->bucket_seq[bucket_to_flush]);
556         }
557
558         /* Also flush if the pin fifo is more than half full */
559         seq_to_flush = max_t(s64, seq_to_flush,
560                              (s64) journal_cur_seq(j) -
561                              (j->pin.size >> 1));
562         spin_unlock(&j->lock);
563
564         return seq_to_flush;
565 }
566
567 /**
568  * bch2_journal_reclaim - free up journal buckets
569  *
570  * Background journal reclaim writes out btree nodes. It should be run
571  * early enough so that we never completely run out of journal buckets.
572  *
573  * High watermarks for triggering background reclaim:
574  * - FIFO has fewer than 512 entries left
575  * - fewer than 25% journal buckets free
576  *
577  * Background reclaim runs until low watermarks are reached:
578  * - FIFO has more than 1024 entries left
579  * - more than 50% journal buckets free
580  *
581  * As long as a reclaim can complete in the time it takes to fill up
582  * 512 journal entries or 25% of all journal buckets, then
583  * journal_next_bucket() should not stall.
584  */
585 static int __bch2_journal_reclaim(struct journal *j, bool direct)
586 {
587         struct bch_fs *c = container_of(j, struct bch_fs, journal);
588         bool kthread = (current->flags & PF_KTHREAD) != 0;
589         u64 seq_to_flush;
590         size_t min_nr, min_key_cache, nr_flushed;
591         unsigned flags;
592         int ret = 0;
593
594         /*
595          * We can't invoke memory reclaim while holding the reclaim_lock -
596          * journal reclaim is required to make progress for memory reclaim
597          * (cleaning the caches), so we can't get stuck in memory reclaim while
598          * we're holding the reclaim lock:
599          */
600         lockdep_assert_held(&j->reclaim_lock);
601         flags = memalloc_noreclaim_save();
602
603         do {
604                 if (kthread && kthread_should_stop())
605                         break;
606
607                 if (bch2_journal_error(j)) {
608                         ret = -EIO;
609                         break;
610                 }
611
612                 bch2_journal_do_discards(j);
613
614                 seq_to_flush = journal_seq_to_flush(j);
615                 min_nr = 0;
616
617                 /*
618                  * If it's been longer than j->reclaim_delay_ms since we last flushed,
619                  * make sure to flush at least one journal pin:
620                  */
621                 if (time_after(jiffies, j->last_flushed +
622                                msecs_to_jiffies(c->opts.journal_reclaim_delay)))
623                         min_nr = 1;
624
625                 if (j->prereserved.reserved * 4 > j->prereserved.remaining)
626                         min_nr = 1;
627
628                 if (fifo_free(&j->pin) <= 32)
629                         min_nr = 1;
630
631                 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
632                         min_nr = 1;
633
634                 trace_journal_reclaim_start(c,
635                                 min_nr,
636                                 j->prereserved.reserved,
637                                 j->prereserved.remaining,
638                                 atomic_read(&c->btree_cache.dirty),
639                                 c->btree_cache.used,
640                                 atomic_long_read(&c->btree_key_cache.nr_dirty),
641                                 atomic_long_read(&c->btree_key_cache.nr_keys));
642
643                 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
644
645                 nr_flushed = journal_flush_pins(j, seq_to_flush,
646                                                 min_nr, min_key_cache);
647
648                 if (direct)
649                         j->nr_direct_reclaim += nr_flushed;
650                 else
651                         j->nr_background_reclaim += nr_flushed;
652                 trace_journal_reclaim_finish(c, nr_flushed);
653
654                 if (nr_flushed)
655                         wake_up(&j->reclaim_wait);
656         } while ((min_nr || min_key_cache) && nr_flushed && !direct);
657
658         memalloc_noreclaim_restore(flags);
659
660         return ret;
661 }
662
663 int bch2_journal_reclaim(struct journal *j)
664 {
665         return __bch2_journal_reclaim(j, true);
666 }
667
668 static int bch2_journal_reclaim_thread(void *arg)
669 {
670         struct journal *j = arg;
671         struct bch_fs *c = container_of(j, struct bch_fs, journal);
672         unsigned long delay, now;
673         bool journal_empty;
674         int ret = 0;
675
676         set_freezable();
677
678         j->last_flushed = jiffies;
679
680         while (!ret && !kthread_should_stop()) {
681                 j->reclaim_kicked = false;
682
683                 mutex_lock(&j->reclaim_lock);
684                 ret = __bch2_journal_reclaim(j, false);
685                 mutex_unlock(&j->reclaim_lock);
686
687                 now = jiffies;
688                 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
689                 j->next_reclaim = j->last_flushed + delay;
690
691                 if (!time_in_range(j->next_reclaim, now, now + delay))
692                         j->next_reclaim = now + delay;
693
694                 while (1) {
695                         set_current_state(TASK_INTERRUPTIBLE);
696                         if (kthread_should_stop())
697                                 break;
698                         if (j->reclaim_kicked)
699                                 break;
700
701                         spin_lock(&j->lock);
702                         journal_empty = fifo_empty(&j->pin);
703                         spin_unlock(&j->lock);
704
705                         if (journal_empty)
706                                 freezable_schedule();
707                         else if (time_after(j->next_reclaim, jiffies))
708                                 freezable_schedule_timeout(j->next_reclaim - jiffies);
709                         else
710                                 break;
711                 }
712                 __set_current_state(TASK_RUNNING);
713         }
714
715         return 0;
716 }
717
718 void bch2_journal_reclaim_stop(struct journal *j)
719 {
720         struct task_struct *p = j->reclaim_thread;
721
722         j->reclaim_thread = NULL;
723
724         if (p) {
725                 kthread_stop(p);
726                 put_task_struct(p);
727         }
728 }
729
730 int bch2_journal_reclaim_start(struct journal *j)
731 {
732         struct bch_fs *c = container_of(j, struct bch_fs, journal);
733         struct task_struct *p;
734
735         if (j->reclaim_thread)
736                 return 0;
737
738         p = kthread_create(bch2_journal_reclaim_thread, j,
739                            "bch-reclaim/%s", c->name);
740         if (IS_ERR(p)) {
741                 bch_err(c, "error creating journal reclaim thread: %li", PTR_ERR(p));
742                 return PTR_ERR(p);
743         }
744
745         get_task_struct(p);
746         j->reclaim_thread = p;
747         wake_up_process(p);
748         return 0;
749 }
750
751 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
752                               bool *did_work)
753 {
754         int ret;
755
756         ret = bch2_journal_error(j);
757         if (ret)
758                 return ret;
759
760         mutex_lock(&j->reclaim_lock);
761
762         if (journal_flush_pins(j, seq_to_flush, 0, 0))
763                 *did_work = true;
764
765         spin_lock(&j->lock);
766         /*
767          * If journal replay hasn't completed, the unreplayed journal entries
768          * hold refs on their corresponding sequence numbers
769          */
770         ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
771                 journal_last_seq(j) > seq_to_flush ||
772                 !fifo_used(&j->pin);
773
774         spin_unlock(&j->lock);
775         mutex_unlock(&j->reclaim_lock);
776
777         return ret;
778 }
779
780 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
781 {
782         bool did_work = false;
783
784         if (!test_bit(JOURNAL_STARTED, &j->flags))
785                 return false;
786
787         closure_wait_event(&j->async_wait,
788                 journal_flush_done(j, seq_to_flush, &did_work));
789
790         return did_work;
791 }
792
793 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
794 {
795         struct bch_fs *c = container_of(j, struct bch_fs, journal);
796         struct journal_entry_pin_list *p;
797         u64 iter, seq = 0;
798         int ret = 0;
799
800         spin_lock(&j->lock);
801         fifo_for_each_entry_ptr(p, &j->pin, iter)
802                 if (dev_idx >= 0
803                     ? bch2_dev_list_has_dev(p->devs, dev_idx)
804                     : p->devs.nr < c->opts.metadata_replicas)
805                         seq = iter;
806         spin_unlock(&j->lock);
807
808         bch2_journal_flush_pins(j, seq);
809
810         ret = bch2_journal_error(j);
811         if (ret)
812                 return ret;
813
814         mutex_lock(&c->replicas_gc_lock);
815         bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
816
817         seq = 0;
818
819         spin_lock(&j->lock);
820         while (!ret) {
821                 struct bch_replicas_padded replicas;
822
823                 seq = max(seq, journal_last_seq(j));
824                 if (seq >= j->pin.back)
825                         break;
826                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
827                                          journal_seq_pin(j, seq)->devs);
828                 seq++;
829
830                 spin_unlock(&j->lock);
831                 ret = bch2_mark_replicas(c, &replicas.e);
832                 spin_lock(&j->lock);
833         }
834         spin_unlock(&j->lock);
835
836         ret = bch2_replicas_gc_end(c, ret);
837         mutex_unlock(&c->replicas_gc_lock);
838
839         return ret;
840 }