]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_reclaim.c
Disable pristine-tar option in gbp.conf, since there is no pristine-tar branch.
[bcachefs-tools-debian] / libbcachefs / journal_reclaim.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
6 #include "btree_write_buffer.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "journal.h"
11 #include "journal_io.h"
12 #include "journal_reclaim.h"
13 #include "replicas.h"
14 #include "sb-members.h"
15 #include "trace.h"
16
17 #include <linux/kthread.h>
18 #include <linux/sched/mm.h>
19
20 /* Free space calculations: */
21
22 static unsigned journal_space_from(struct journal_device *ja,
23                                    enum journal_space_from from)
24 {
25         switch (from) {
26         case journal_space_discarded:
27                 return ja->discard_idx;
28         case journal_space_clean_ondisk:
29                 return ja->dirty_idx_ondisk;
30         case journal_space_clean:
31                 return ja->dirty_idx;
32         default:
33                 BUG();
34         }
35 }
36
37 unsigned bch2_journal_dev_buckets_available(struct journal *j,
38                                             struct journal_device *ja,
39                                             enum journal_space_from from)
40 {
41         unsigned available = (journal_space_from(ja, from) -
42                               ja->cur_idx - 1 + ja->nr) % ja->nr;
43
44         /*
45          * Don't use the last bucket unless writing the new last_seq
46          * will make another bucket available:
47          */
48         if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
49                 --available;
50
51         return available;
52 }
53
54 void bch2_journal_set_watermark(struct journal *j)
55 {
56         struct bch_fs *c = container_of(j, struct bch_fs, journal);
57         bool low_on_space = j->space[journal_space_clean].total * 4 <=
58                 j->space[journal_space_total].total;
59         bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
60         bool low_on_wb = bch2_btree_write_buffer_must_wait(c);
61         unsigned watermark = low_on_space || low_on_pin || low_on_wb
62                 ? BCH_WATERMARK_reclaim
63                 : BCH_WATERMARK_stripe;
64
65         if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) ||
66             track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) ||
67             track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
68                 trace_and_count(c, journal_full, c);
69
70         swap(watermark, j->watermark);
71         if (watermark > j->watermark)
72                 journal_wake(j);
73 }
74
75 static struct journal_space
76 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
77                             enum journal_space_from from)
78 {
79         struct journal_device *ja = &ca->journal;
80         unsigned sectors, buckets, unwritten;
81         u64 seq;
82
83         if (from == journal_space_total)
84                 return (struct journal_space) {
85                         .next_entry     = ca->mi.bucket_size,
86                         .total          = ca->mi.bucket_size * ja->nr,
87                 };
88
89         buckets = bch2_journal_dev_buckets_available(j, ja, from);
90         sectors = ja->sectors_free;
91
92         /*
93          * We that we don't allocate the space for a journal entry
94          * until we write it out - thus, account for it here:
95          */
96         for (seq = journal_last_unwritten_seq(j);
97              seq <= journal_cur_seq(j);
98              seq++) {
99                 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
100
101                 if (!unwritten)
102                         continue;
103
104                 /* entry won't fit on this device, skip: */
105                 if (unwritten > ca->mi.bucket_size)
106                         continue;
107
108                 if (unwritten >= sectors) {
109                         if (!buckets) {
110                                 sectors = 0;
111                                 break;
112                         }
113
114                         buckets--;
115                         sectors = ca->mi.bucket_size;
116                 }
117
118                 sectors -= unwritten;
119         }
120
121         if (sectors < ca->mi.bucket_size && buckets) {
122                 buckets--;
123                 sectors = ca->mi.bucket_size;
124         }
125
126         return (struct journal_space) {
127                 .next_entry     = sectors,
128                 .total          = sectors + buckets * ca->mi.bucket_size,
129         };
130 }
131
132 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
133                             enum journal_space_from from)
134 {
135         struct bch_fs *c = container_of(j, struct bch_fs, journal);
136         unsigned pos, nr_devs = 0;
137         struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
138
139         BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
140
141         rcu_read_lock();
142         for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
143                 if (!ca->journal.nr)
144                         continue;
145
146                 space = journal_dev_space_available(j, ca, from);
147                 if (!space.next_entry)
148                         continue;
149
150                 for (pos = 0; pos < nr_devs; pos++)
151                         if (space.total > dev_space[pos].total)
152                                 break;
153
154                 array_insert_item(dev_space, nr_devs, pos, space);
155         }
156         rcu_read_unlock();
157
158         if (nr_devs < nr_devs_want)
159                 return (struct journal_space) { 0, 0 };
160
161         /*
162          * We sorted largest to smallest, and we want the smallest out of the
163          * @nr_devs_want largest devices:
164          */
165         return dev_space[nr_devs_want - 1];
166 }
167
168 void bch2_journal_space_available(struct journal *j)
169 {
170         struct bch_fs *c = container_of(j, struct bch_fs, journal);
171         unsigned clean, clean_ondisk, total;
172         unsigned max_entry_size  = min(j->buf[0].buf_size >> 9,
173                                        j->buf[1].buf_size >> 9);
174         unsigned nr_online = 0, nr_devs_want;
175         bool can_discard = false;
176         int ret = 0;
177
178         lockdep_assert_held(&j->lock);
179
180         rcu_read_lock();
181         for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
182                 struct journal_device *ja = &ca->journal;
183
184                 if (!ja->nr)
185                         continue;
186
187                 while (ja->dirty_idx != ja->cur_idx &&
188                        ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
189                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
190
191                 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
192                        ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
193                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
194
195                 if (ja->discard_idx != ja->dirty_idx_ondisk)
196                         can_discard = true;
197
198                 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
199                 nr_online++;
200         }
201         rcu_read_unlock();
202
203         j->can_discard = can_discard;
204
205         if (nr_online < metadata_replicas_required(c)) {
206                 ret = JOURNAL_ERR_insufficient_devices;
207                 goto out;
208         }
209
210         nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
211
212         for (unsigned i = 0; i < journal_space_nr; i++)
213                 j->space[i] = __journal_space_available(j, nr_devs_want, i);
214
215         clean_ondisk    = j->space[journal_space_clean_ondisk].total;
216         clean           = j->space[journal_space_clean].total;
217         total           = j->space[journal_space_total].total;
218
219         if (!j->space[journal_space_discarded].next_entry)
220                 ret = JOURNAL_ERR_journal_full;
221
222         if ((j->space[journal_space_clean_ondisk].next_entry <
223              j->space[journal_space_clean_ondisk].total) &&
224             (clean - clean_ondisk <= total / 8) &&
225             (clean_ondisk * 2 > clean))
226                 set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
227         else
228                 clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
229
230         bch2_journal_set_watermark(j);
231 out:
232         j->cur_entry_sectors    = !ret ? j->space[journal_space_discarded].next_entry : 0;
233         j->cur_entry_error      = ret;
234
235         if (!ret)
236                 journal_wake(j);
237 }
238
239 /* Discards - last part of journal reclaim: */
240
241 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
242 {
243         bool ret;
244
245         spin_lock(&j->lock);
246         ret = ja->discard_idx != ja->dirty_idx_ondisk;
247         spin_unlock(&j->lock);
248
249         return ret;
250 }
251
252 /*
253  * Advance ja->discard_idx as long as it points to buckets that are no longer
254  * dirty, issuing discards if necessary:
255  */
256 void bch2_journal_do_discards(struct journal *j)
257 {
258         struct bch_fs *c = container_of(j, struct bch_fs, journal);
259
260         mutex_lock(&j->discard_lock);
261
262         for_each_rw_member(c, ca) {
263                 struct journal_device *ja = &ca->journal;
264
265                 while (should_discard_bucket(j, ja)) {
266                         if (!c->opts.nochanges &&
267                             ca->mi.discard &&
268                             bdev_max_discard_sectors(ca->disk_sb.bdev))
269                                 blkdev_issue_discard(ca->disk_sb.bdev,
270                                         bucket_to_sector(ca,
271                                                 ja->buckets[ja->discard_idx]),
272                                         ca->mi.bucket_size, GFP_NOFS);
273
274                         spin_lock(&j->lock);
275                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
276
277                         bch2_journal_space_available(j);
278                         spin_unlock(&j->lock);
279                 }
280         }
281
282         mutex_unlock(&j->discard_lock);
283 }
284
285 /*
286  * Journal entry pinning - machinery for holding a reference on a given journal
287  * entry, holding it open to ensure it gets replayed during recovery:
288  */
289
290 void bch2_journal_reclaim_fast(struct journal *j)
291 {
292         bool popped = false;
293
294         lockdep_assert_held(&j->lock);
295
296         /*
297          * Unpin journal entries whose reference counts reached zero, meaning
298          * all btree nodes got written out
299          */
300         while (!fifo_empty(&j->pin) &&
301                j->pin.front <= j->seq_ondisk &&
302                !atomic_read(&fifo_peek_front(&j->pin).count)) {
303                 j->pin.front++;
304                 popped = true;
305         }
306
307         if (popped)
308                 bch2_journal_space_available(j);
309 }
310
311 bool __bch2_journal_pin_put(struct journal *j, u64 seq)
312 {
313         struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
314
315         return atomic_dec_and_test(&pin_list->count);
316 }
317
318 void bch2_journal_pin_put(struct journal *j, u64 seq)
319 {
320         if (__bch2_journal_pin_put(j, seq)) {
321                 spin_lock(&j->lock);
322                 bch2_journal_reclaim_fast(j);
323                 spin_unlock(&j->lock);
324         }
325 }
326
327 static inline bool __journal_pin_drop(struct journal *j,
328                                       struct journal_entry_pin *pin)
329 {
330         struct journal_entry_pin_list *pin_list;
331
332         if (!journal_pin_active(pin))
333                 return false;
334
335         if (j->flush_in_progress == pin)
336                 j->flush_in_progress_dropped = true;
337
338         pin_list = journal_seq_pin(j, pin->seq);
339         pin->seq = 0;
340         list_del_init(&pin->list);
341
342         /*
343          * Unpinning a journal entry may make journal_next_bucket() succeed, if
344          * writing a new last_seq will now make another bucket available:
345          */
346         return atomic_dec_and_test(&pin_list->count) &&
347                 pin_list == &fifo_peek_front(&j->pin);
348 }
349
350 void bch2_journal_pin_drop(struct journal *j,
351                            struct journal_entry_pin *pin)
352 {
353         spin_lock(&j->lock);
354         if (__journal_pin_drop(j, pin))
355                 bch2_journal_reclaim_fast(j);
356         spin_unlock(&j->lock);
357 }
358
359 static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
360 {
361         if (fn == bch2_btree_node_flush0 ||
362             fn == bch2_btree_node_flush1)
363                 return JOURNAL_PIN_btree;
364         else if (fn == bch2_btree_key_cache_journal_flush)
365                 return JOURNAL_PIN_key_cache;
366         else
367                 return JOURNAL_PIN_other;
368 }
369
370 static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
371                           struct journal_entry_pin *pin,
372                           journal_pin_flush_fn flush_fn,
373                           enum journal_pin_type type)
374 {
375         struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
376
377         /*
378          * flush_fn is how we identify journal pins in debugfs, so must always
379          * exist, even if it doesn't do anything:
380          */
381         BUG_ON(!flush_fn);
382
383         atomic_inc(&pin_list->count);
384         pin->seq        = seq;
385         pin->flush      = flush_fn;
386         list_add(&pin->list, &pin_list->list[type]);
387 }
388
389 void bch2_journal_pin_copy(struct journal *j,
390                            struct journal_entry_pin *dst,
391                            struct journal_entry_pin *src,
392                            journal_pin_flush_fn flush_fn)
393 {
394         spin_lock(&j->lock);
395
396         u64 seq = READ_ONCE(src->seq);
397
398         if (seq < journal_last_seq(j)) {
399                 /*
400                  * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
401                  * the src pin - with the pin dropped, the entry to pin might no
402                  * longer to exist, but that means there's no longer anything to
403                  * copy and we can bail out here:
404                  */
405                 spin_unlock(&j->lock);
406                 return;
407         }
408
409         bool reclaim = __journal_pin_drop(j, dst);
410
411         bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn));
412
413         if (reclaim)
414                 bch2_journal_reclaim_fast(j);
415
416         /*
417          * If the journal is currently full,  we might want to call flush_fn
418          * immediately:
419          */
420         if (seq == journal_last_seq(j))
421                 journal_wake(j);
422         spin_unlock(&j->lock);
423 }
424
425 void bch2_journal_pin_set(struct journal *j, u64 seq,
426                           struct journal_entry_pin *pin,
427                           journal_pin_flush_fn flush_fn)
428 {
429         spin_lock(&j->lock);
430
431         BUG_ON(seq < journal_last_seq(j));
432
433         bool reclaim = __journal_pin_drop(j, pin);
434
435         bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn));
436
437         if (reclaim)
438                 bch2_journal_reclaim_fast(j);
439         /*
440          * If the journal is currently full,  we might want to call flush_fn
441          * immediately:
442          */
443         if (seq == journal_last_seq(j))
444                 journal_wake(j);
445
446         spin_unlock(&j->lock);
447 }
448
449 /**
450  * bch2_journal_pin_flush: ensure journal pin callback is no longer running
451  * @j:          journal object
452  * @pin:        pin to flush
453  */
454 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
455 {
456         BUG_ON(journal_pin_active(pin));
457
458         wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
459 }
460
461 /*
462  * Journal reclaim: flush references to open journal entries to reclaim space in
463  * the journal
464  *
465  * May be done by the journal code in the background as needed to free up space
466  * for more journal entries, or as part of doing a clean shutdown, or to migrate
467  * data off of a specific device:
468  */
469
470 static struct journal_entry_pin *
471 journal_get_next_pin(struct journal *j,
472                      u64 seq_to_flush,
473                      unsigned allowed_below_seq,
474                      unsigned allowed_above_seq,
475                      u64 *seq)
476 {
477         struct journal_entry_pin_list *pin_list;
478         struct journal_entry_pin *ret = NULL;
479         unsigned i;
480
481         fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
482                 if (*seq > seq_to_flush && !allowed_above_seq)
483                         break;
484
485                 for (i = 0; i < JOURNAL_PIN_NR; i++)
486                         if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
487                             ((1U << i) & allowed_above_seq)) {
488                                 ret = list_first_entry_or_null(&pin_list->list[i],
489                                         struct journal_entry_pin, list);
490                                 if (ret)
491                                         return ret;
492                         }
493         }
494
495         return NULL;
496 }
497
498 /* returns true if we did work */
499 static size_t journal_flush_pins(struct journal *j,
500                                  u64 seq_to_flush,
501                                  unsigned allowed_below_seq,
502                                  unsigned allowed_above_seq,
503                                  unsigned min_any,
504                                  unsigned min_key_cache)
505 {
506         struct journal_entry_pin *pin;
507         size_t nr_flushed = 0;
508         journal_pin_flush_fn flush_fn;
509         u64 seq;
510         int err;
511
512         lockdep_assert_held(&j->reclaim_lock);
513
514         while (1) {
515                 unsigned allowed_above = allowed_above_seq;
516                 unsigned allowed_below = allowed_below_seq;
517
518                 if (min_any) {
519                         allowed_above |= ~0;
520                         allowed_below |= ~0;
521                 }
522
523                 if (min_key_cache) {
524                         allowed_above |= 1U << JOURNAL_PIN_key_cache;
525                         allowed_below |= 1U << JOURNAL_PIN_key_cache;
526                 }
527
528                 cond_resched();
529
530                 j->last_flushed = jiffies;
531
532                 spin_lock(&j->lock);
533                 pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
534                 if (pin) {
535                         BUG_ON(j->flush_in_progress);
536                         j->flush_in_progress = pin;
537                         j->flush_in_progress_dropped = false;
538                         flush_fn = pin->flush;
539                 }
540                 spin_unlock(&j->lock);
541
542                 if (!pin)
543                         break;
544
545                 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
546                         min_key_cache--;
547
548                 if (min_any)
549                         min_any--;
550
551                 err = flush_fn(j, pin, seq);
552
553                 spin_lock(&j->lock);
554                 /* Pin might have been dropped or rearmed: */
555                 if (likely(!err && !j->flush_in_progress_dropped))
556                         list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
557                 j->flush_in_progress = NULL;
558                 j->flush_in_progress_dropped = false;
559                 spin_unlock(&j->lock);
560
561                 wake_up(&j->pin_flush_wait);
562
563                 if (err)
564                         break;
565
566                 nr_flushed++;
567         }
568
569         return nr_flushed;
570 }
571
572 static u64 journal_seq_to_flush(struct journal *j)
573 {
574         struct bch_fs *c = container_of(j, struct bch_fs, journal);
575         u64 seq_to_flush = 0;
576
577         spin_lock(&j->lock);
578
579         for_each_rw_member(c, ca) {
580                 struct journal_device *ja = &ca->journal;
581                 unsigned nr_buckets, bucket_to_flush;
582
583                 if (!ja->nr)
584                         continue;
585
586                 /* Try to keep the journal at most half full: */
587                 nr_buckets = ja->nr / 2;
588
589                 nr_buckets = min(nr_buckets, ja->nr);
590
591                 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
592                 seq_to_flush = max(seq_to_flush,
593                                    ja->bucket_seq[bucket_to_flush]);
594         }
595
596         /* Also flush if the pin fifo is more than half full */
597         seq_to_flush = max_t(s64, seq_to_flush,
598                              (s64) journal_cur_seq(j) -
599                              (j->pin.size >> 1));
600         spin_unlock(&j->lock);
601
602         return seq_to_flush;
603 }
604
605 /**
606  * __bch2_journal_reclaim - free up journal buckets
607  * @j:          journal object
608  * @direct:     direct or background reclaim?
609  * @kicked:     requested to run since we last ran?
610  * Returns:     0 on success, or -EIO if the journal has been shutdown
611  *
612  * Background journal reclaim writes out btree nodes. It should be run
613  * early enough so that we never completely run out of journal buckets.
614  *
615  * High watermarks for triggering background reclaim:
616  * - FIFO has fewer than 512 entries left
617  * - fewer than 25% journal buckets free
618  *
619  * Background reclaim runs until low watermarks are reached:
620  * - FIFO has more than 1024 entries left
621  * - more than 50% journal buckets free
622  *
623  * As long as a reclaim can complete in the time it takes to fill up
624  * 512 journal entries or 25% of all journal buckets, then
625  * journal_next_bucket() should not stall.
626  */
627 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
628 {
629         struct bch_fs *c = container_of(j, struct bch_fs, journal);
630         bool kthread = (current->flags & PF_KTHREAD) != 0;
631         u64 seq_to_flush;
632         size_t min_nr, min_key_cache, nr_flushed;
633         unsigned flags;
634         int ret = 0;
635
636         /*
637          * We can't invoke memory reclaim while holding the reclaim_lock -
638          * journal reclaim is required to make progress for memory reclaim
639          * (cleaning the caches), so we can't get stuck in memory reclaim while
640          * we're holding the reclaim lock:
641          */
642         lockdep_assert_held(&j->reclaim_lock);
643         flags = memalloc_noreclaim_save();
644
645         do {
646                 if (kthread && kthread_should_stop())
647                         break;
648
649                 if (bch2_journal_error(j)) {
650                         ret = -EIO;
651                         break;
652                 }
653
654                 bch2_journal_do_discards(j);
655
656                 seq_to_flush = journal_seq_to_flush(j);
657                 min_nr = 0;
658
659                 /*
660                  * If it's been longer than j->reclaim_delay_ms since we last flushed,
661                  * make sure to flush at least one journal pin:
662                  */
663                 if (time_after(jiffies, j->last_flushed +
664                                msecs_to_jiffies(c->opts.journal_reclaim_delay)))
665                         min_nr = 1;
666
667                 if (j->watermark != BCH_WATERMARK_stripe)
668                         min_nr = 1;
669
670                 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
671                         min_nr = 1;
672
673                 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
674
675                 trace_and_count(c, journal_reclaim_start, c,
676                                 direct, kicked,
677                                 min_nr, min_key_cache,
678                                 atomic_read(&c->btree_cache.dirty),
679                                 c->btree_cache.used,
680                                 atomic_long_read(&c->btree_key_cache.nr_dirty),
681                                 atomic_long_read(&c->btree_key_cache.nr_keys));
682
683                 nr_flushed = journal_flush_pins(j, seq_to_flush,
684                                                 ~0, 0,
685                                                 min_nr, min_key_cache);
686
687                 if (direct)
688                         j->nr_direct_reclaim += nr_flushed;
689                 else
690                         j->nr_background_reclaim += nr_flushed;
691                 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
692
693                 if (nr_flushed)
694                         wake_up(&j->reclaim_wait);
695         } while ((min_nr || min_key_cache) && nr_flushed && !direct);
696
697         memalloc_noreclaim_restore(flags);
698
699         return ret;
700 }
701
702 int bch2_journal_reclaim(struct journal *j)
703 {
704         return __bch2_journal_reclaim(j, true, true);
705 }
706
707 static int bch2_journal_reclaim_thread(void *arg)
708 {
709         struct journal *j = arg;
710         struct bch_fs *c = container_of(j, struct bch_fs, journal);
711         unsigned long delay, now;
712         bool journal_empty;
713         int ret = 0;
714
715         set_freezable();
716
717         j->last_flushed = jiffies;
718
719         while (!ret && !kthread_should_stop()) {
720                 bool kicked = j->reclaim_kicked;
721
722                 j->reclaim_kicked = false;
723
724                 mutex_lock(&j->reclaim_lock);
725                 ret = __bch2_journal_reclaim(j, false, kicked);
726                 mutex_unlock(&j->reclaim_lock);
727
728                 now = jiffies;
729                 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
730                 j->next_reclaim = j->last_flushed + delay;
731
732                 if (!time_in_range(j->next_reclaim, now, now + delay))
733                         j->next_reclaim = now + delay;
734
735                 while (1) {
736                         set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
737                         if (kthread_should_stop())
738                                 break;
739                         if (j->reclaim_kicked)
740                                 break;
741
742                         spin_lock(&j->lock);
743                         journal_empty = fifo_empty(&j->pin);
744                         spin_unlock(&j->lock);
745
746                         if (journal_empty)
747                                 schedule();
748                         else if (time_after(j->next_reclaim, jiffies))
749                                 schedule_timeout(j->next_reclaim - jiffies);
750                         else
751                                 break;
752                 }
753                 __set_current_state(TASK_RUNNING);
754         }
755
756         return 0;
757 }
758
759 void bch2_journal_reclaim_stop(struct journal *j)
760 {
761         struct task_struct *p = j->reclaim_thread;
762
763         j->reclaim_thread = NULL;
764
765         if (p) {
766                 kthread_stop(p);
767                 put_task_struct(p);
768         }
769 }
770
771 int bch2_journal_reclaim_start(struct journal *j)
772 {
773         struct bch_fs *c = container_of(j, struct bch_fs, journal);
774         struct task_struct *p;
775         int ret;
776
777         if (j->reclaim_thread)
778                 return 0;
779
780         p = kthread_create(bch2_journal_reclaim_thread, j,
781                            "bch-reclaim/%s", c->name);
782         ret = PTR_ERR_OR_ZERO(p);
783         bch_err_msg(c, ret, "creating journal reclaim thread");
784         if (ret)
785                 return ret;
786
787         get_task_struct(p);
788         j->reclaim_thread = p;
789         wake_up_process(p);
790         return 0;
791 }
792
793 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
794                               bool *did_work)
795 {
796         int ret;
797
798         ret = bch2_journal_error(j);
799         if (ret)
800                 return ret;
801
802         mutex_lock(&j->reclaim_lock);
803
804         if (journal_flush_pins(j, seq_to_flush,
805                                (1U << JOURNAL_PIN_key_cache)|
806                                (1U << JOURNAL_PIN_other), 0, 0, 0) ||
807             journal_flush_pins(j, seq_to_flush,
808                                (1U << JOURNAL_PIN_btree), 0, 0, 0))
809                 *did_work = true;
810
811         if (seq_to_flush > journal_cur_seq(j))
812                 bch2_journal_entry_close(j);
813
814         spin_lock(&j->lock);
815         /*
816          * If journal replay hasn't completed, the unreplayed journal entries
817          * hold refs on their corresponding sequence numbers
818          */
819         ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
820                 journal_last_seq(j) > seq_to_flush ||
821                 !fifo_used(&j->pin);
822
823         spin_unlock(&j->lock);
824         mutex_unlock(&j->reclaim_lock);
825
826         return ret;
827 }
828
829 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
830 {
831         /* time_stats this */
832         bool did_work = false;
833
834         if (!test_bit(JOURNAL_STARTED, &j->flags))
835                 return false;
836
837         closure_wait_event(&j->async_wait,
838                 journal_flush_done(j, seq_to_flush, &did_work));
839
840         return did_work;
841 }
842
843 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
844 {
845         struct bch_fs *c = container_of(j, struct bch_fs, journal);
846         struct journal_entry_pin_list *p;
847         u64 iter, seq = 0;
848         int ret = 0;
849
850         spin_lock(&j->lock);
851         fifo_for_each_entry_ptr(p, &j->pin, iter)
852                 if (dev_idx >= 0
853                     ? bch2_dev_list_has_dev(p->devs, dev_idx)
854                     : p->devs.nr < c->opts.metadata_replicas)
855                         seq = iter;
856         spin_unlock(&j->lock);
857
858         bch2_journal_flush_pins(j, seq);
859
860         ret = bch2_journal_error(j);
861         if (ret)
862                 return ret;
863
864         mutex_lock(&c->replicas_gc_lock);
865         bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
866
867         /*
868          * Now that we've populated replicas_gc, write to the journal to mark
869          * active journal devices. This handles the case where the journal might
870          * be empty. Otherwise we could clear all journal replicas and
871          * temporarily put the fs into an unrecoverable state. Journal recovery
872          * expects to find devices marked for journal data on unclean mount.
873          */
874         ret = bch2_journal_meta(&c->journal);
875         if (ret)
876                 goto err;
877
878         seq = 0;
879         spin_lock(&j->lock);
880         while (!ret) {
881                 struct bch_replicas_padded replicas;
882
883                 seq = max(seq, journal_last_seq(j));
884                 if (seq >= j->pin.back)
885                         break;
886                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
887                                          journal_seq_pin(j, seq)->devs);
888                 seq++;
889
890                 if (replicas.e.nr_devs) {
891                         spin_unlock(&j->lock);
892                         ret = bch2_mark_replicas(c, &replicas.e);
893                         spin_lock(&j->lock);
894                 }
895         }
896         spin_unlock(&j->lock);
897 err:
898         ret = bch2_replicas_gc_end(c, ret);
899         mutex_unlock(&c->replicas_gc_lock);
900
901         return ret;
902 }