]> git.sesse.net Git - bcachefs-tools-debian/blob - c_src/libbcachefs/journal_reclaim.c
move Rust sources to top level, C sources into c_src
[bcachefs-tools-debian] / c_src / libbcachefs / journal_reclaim.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
6 #include "btree_write_buffer.h"
7 #include "buckets.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "journal.h"
11 #include "journal_io.h"
12 #include "journal_reclaim.h"
13 #include "replicas.h"
14 #include "sb-members.h"
15 #include "trace.h"
16
17 #include <linux/kthread.h>
18 #include <linux/sched/mm.h>
19
20 /* Free space calculations: */
21
22 static unsigned journal_space_from(struct journal_device *ja,
23                                    enum journal_space_from from)
24 {
25         switch (from) {
26         case journal_space_discarded:
27                 return ja->discard_idx;
28         case journal_space_clean_ondisk:
29                 return ja->dirty_idx_ondisk;
30         case journal_space_clean:
31                 return ja->dirty_idx;
32         default:
33                 BUG();
34         }
35 }
36
37 unsigned bch2_journal_dev_buckets_available(struct journal *j,
38                                             struct journal_device *ja,
39                                             enum journal_space_from from)
40 {
41         unsigned available = (journal_space_from(ja, from) -
42                               ja->cur_idx - 1 + ja->nr) % ja->nr;
43
44         /*
45          * Don't use the last bucket unless writing the new last_seq
46          * will make another bucket available:
47          */
48         if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
49                 --available;
50
51         return available;
52 }
53
54 void bch2_journal_set_watermark(struct journal *j)
55 {
56         struct bch_fs *c = container_of(j, struct bch_fs, journal);
57         bool low_on_space = j->space[journal_space_clean].total * 4 <=
58                 j->space[journal_space_total].total;
59         bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
60         bool low_on_wb = bch2_btree_write_buffer_must_wait(c);
61         unsigned watermark = low_on_space || low_on_pin || low_on_wb
62                 ? BCH_WATERMARK_reclaim
63                 : BCH_WATERMARK_stripe;
64
65         if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space],
66                                &j->low_on_space_start, low_on_space) ||
67             track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin],
68                                &j->low_on_pin_start, low_on_pin) ||
69             track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full],
70                                &j->write_buffer_full_start, low_on_wb))
71                 trace_and_count(c, journal_full, c);
72
73         swap(watermark, j->watermark);
74         if (watermark > j->watermark)
75                 journal_wake(j);
76 }
77
78 static struct journal_space
79 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
80                             enum journal_space_from from)
81 {
82         struct journal_device *ja = &ca->journal;
83         unsigned sectors, buckets, unwritten;
84         u64 seq;
85
86         if (from == journal_space_total)
87                 return (struct journal_space) {
88                         .next_entry     = ca->mi.bucket_size,
89                         .total          = ca->mi.bucket_size * ja->nr,
90                 };
91
92         buckets = bch2_journal_dev_buckets_available(j, ja, from);
93         sectors = ja->sectors_free;
94
95         /*
96          * We that we don't allocate the space for a journal entry
97          * until we write it out - thus, account for it here:
98          */
99         for (seq = journal_last_unwritten_seq(j);
100              seq <= journal_cur_seq(j);
101              seq++) {
102                 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
103
104                 if (!unwritten)
105                         continue;
106
107                 /* entry won't fit on this device, skip: */
108                 if (unwritten > ca->mi.bucket_size)
109                         continue;
110
111                 if (unwritten >= sectors) {
112                         if (!buckets) {
113                                 sectors = 0;
114                                 break;
115                         }
116
117                         buckets--;
118                         sectors = ca->mi.bucket_size;
119                 }
120
121                 sectors -= unwritten;
122         }
123
124         if (sectors < ca->mi.bucket_size && buckets) {
125                 buckets--;
126                 sectors = ca->mi.bucket_size;
127         }
128
129         return (struct journal_space) {
130                 .next_entry     = sectors,
131                 .total          = sectors + buckets * ca->mi.bucket_size,
132         };
133 }
134
135 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
136                             enum journal_space_from from)
137 {
138         struct bch_fs *c = container_of(j, struct bch_fs, journal);
139         unsigned pos, nr_devs = 0;
140         struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
141
142         BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
143
144         rcu_read_lock();
145         for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
146                 if (!ca->journal.nr)
147                         continue;
148
149                 space = journal_dev_space_available(j, ca, from);
150                 if (!space.next_entry)
151                         continue;
152
153                 for (pos = 0; pos < nr_devs; pos++)
154                         if (space.total > dev_space[pos].total)
155                                 break;
156
157                 array_insert_item(dev_space, nr_devs, pos, space);
158         }
159         rcu_read_unlock();
160
161         if (nr_devs < nr_devs_want)
162                 return (struct journal_space) { 0, 0 };
163
164         /*
165          * We sorted largest to smallest, and we want the smallest out of the
166          * @nr_devs_want largest devices:
167          */
168         return dev_space[nr_devs_want - 1];
169 }
170
171 void bch2_journal_space_available(struct journal *j)
172 {
173         struct bch_fs *c = container_of(j, struct bch_fs, journal);
174         unsigned clean, clean_ondisk, total;
175         unsigned max_entry_size  = min(j->buf[0].buf_size >> 9,
176                                        j->buf[1].buf_size >> 9);
177         unsigned nr_online = 0, nr_devs_want;
178         bool can_discard = false;
179         int ret = 0;
180
181         lockdep_assert_held(&j->lock);
182
183         rcu_read_lock();
184         for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
185                 struct journal_device *ja = &ca->journal;
186
187                 if (!ja->nr)
188                         continue;
189
190                 while (ja->dirty_idx != ja->cur_idx &&
191                        ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
192                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
193
194                 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
195                        ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
196                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
197
198                 if (ja->discard_idx != ja->dirty_idx_ondisk)
199                         can_discard = true;
200
201                 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
202                 nr_online++;
203         }
204         rcu_read_unlock();
205
206         j->can_discard = can_discard;
207
208         if (nr_online < c->opts.metadata_replicas_required) {
209                 ret = JOURNAL_ERR_insufficient_devices;
210                 goto out;
211         }
212
213         nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
214
215         for (unsigned i = 0; i < journal_space_nr; i++)
216                 j->space[i] = __journal_space_available(j, nr_devs_want, i);
217
218         clean_ondisk    = j->space[journal_space_clean_ondisk].total;
219         clean           = j->space[journal_space_clean].total;
220         total           = j->space[journal_space_total].total;
221
222         if (!j->space[journal_space_discarded].next_entry)
223                 ret = JOURNAL_ERR_journal_full;
224
225         if ((j->space[journal_space_clean_ondisk].next_entry <
226              j->space[journal_space_clean_ondisk].total) &&
227             (clean - clean_ondisk <= total / 8) &&
228             (clean_ondisk * 2 > clean))
229                 set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
230         else
231                 clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
232
233         bch2_journal_set_watermark(j);
234 out:
235         j->cur_entry_sectors    = !ret ? j->space[journal_space_discarded].next_entry : 0;
236         j->cur_entry_error      = ret;
237
238         if (!ret)
239                 journal_wake(j);
240 }
241
242 /* Discards - last part of journal reclaim: */
243
244 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
245 {
246         bool ret;
247
248         spin_lock(&j->lock);
249         ret = ja->discard_idx != ja->dirty_idx_ondisk;
250         spin_unlock(&j->lock);
251
252         return ret;
253 }
254
255 /*
256  * Advance ja->discard_idx as long as it points to buckets that are no longer
257  * dirty, issuing discards if necessary:
258  */
259 void bch2_journal_do_discards(struct journal *j)
260 {
261         struct bch_fs *c = container_of(j, struct bch_fs, journal);
262
263         mutex_lock(&j->discard_lock);
264
265         for_each_rw_member(c, ca) {
266                 struct journal_device *ja = &ca->journal;
267
268                 while (should_discard_bucket(j, ja)) {
269                         if (!c->opts.nochanges &&
270                             ca->mi.discard &&
271                             bdev_max_discard_sectors(ca->disk_sb.bdev))
272                                 blkdev_issue_discard(ca->disk_sb.bdev,
273                                         bucket_to_sector(ca,
274                                                 ja->buckets[ja->discard_idx]),
275                                         ca->mi.bucket_size, GFP_NOFS);
276
277                         spin_lock(&j->lock);
278                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
279
280                         bch2_journal_space_available(j);
281                         spin_unlock(&j->lock);
282                 }
283         }
284
285         mutex_unlock(&j->discard_lock);
286 }
287
288 /*
289  * Journal entry pinning - machinery for holding a reference on a given journal
290  * entry, holding it open to ensure it gets replayed during recovery:
291  */
292
293 void bch2_journal_reclaim_fast(struct journal *j)
294 {
295         bool popped = false;
296
297         lockdep_assert_held(&j->lock);
298
299         /*
300          * Unpin journal entries whose reference counts reached zero, meaning
301          * all btree nodes got written out
302          */
303         while (!fifo_empty(&j->pin) &&
304                j->pin.front <= j->seq_ondisk &&
305                !atomic_read(&fifo_peek_front(&j->pin).count)) {
306                 j->pin.front++;
307                 popped = true;
308         }
309
310         if (popped)
311                 bch2_journal_space_available(j);
312 }
313
314 bool __bch2_journal_pin_put(struct journal *j, u64 seq)
315 {
316         struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
317
318         return atomic_dec_and_test(&pin_list->count);
319 }
320
321 void bch2_journal_pin_put(struct journal *j, u64 seq)
322 {
323         if (__bch2_journal_pin_put(j, seq)) {
324                 spin_lock(&j->lock);
325                 bch2_journal_reclaim_fast(j);
326                 spin_unlock(&j->lock);
327         }
328 }
329
330 static inline bool __journal_pin_drop(struct journal *j,
331                                       struct journal_entry_pin *pin)
332 {
333         struct journal_entry_pin_list *pin_list;
334
335         if (!journal_pin_active(pin))
336                 return false;
337
338         if (j->flush_in_progress == pin)
339                 j->flush_in_progress_dropped = true;
340
341         pin_list = journal_seq_pin(j, pin->seq);
342         pin->seq = 0;
343         list_del_init(&pin->list);
344
345         /*
346          * Unpinning a journal entry may make journal_next_bucket() succeed, if
347          * writing a new last_seq will now make another bucket available:
348          */
349         return atomic_dec_and_test(&pin_list->count) &&
350                 pin_list == &fifo_peek_front(&j->pin);
351 }
352
353 void bch2_journal_pin_drop(struct journal *j,
354                            struct journal_entry_pin *pin)
355 {
356         spin_lock(&j->lock);
357         if (__journal_pin_drop(j, pin))
358                 bch2_journal_reclaim_fast(j);
359         spin_unlock(&j->lock);
360 }
361
362 static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
363 {
364         if (fn == bch2_btree_node_flush0 ||
365             fn == bch2_btree_node_flush1)
366                 return JOURNAL_PIN_btree;
367         else if (fn == bch2_btree_key_cache_journal_flush)
368                 return JOURNAL_PIN_key_cache;
369         else
370                 return JOURNAL_PIN_other;
371 }
372
373 static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
374                           struct journal_entry_pin *pin,
375                           journal_pin_flush_fn flush_fn,
376                           enum journal_pin_type type)
377 {
378         struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
379
380         /*
381          * flush_fn is how we identify journal pins in debugfs, so must always
382          * exist, even if it doesn't do anything:
383          */
384         BUG_ON(!flush_fn);
385
386         atomic_inc(&pin_list->count);
387         pin->seq        = seq;
388         pin->flush      = flush_fn;
389         list_add(&pin->list, &pin_list->list[type]);
390 }
391
392 void bch2_journal_pin_copy(struct journal *j,
393                            struct journal_entry_pin *dst,
394                            struct journal_entry_pin *src,
395                            journal_pin_flush_fn flush_fn)
396 {
397         bool reclaim;
398
399         spin_lock(&j->lock);
400
401         u64 seq = READ_ONCE(src->seq);
402
403         if (seq < journal_last_seq(j)) {
404                 /*
405                  * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
406                  * the src pin - with the pin dropped, the entry to pin might no
407                  * longer to exist, but that means there's no longer anything to
408                  * copy and we can bail out here:
409                  */
410                 spin_unlock(&j->lock);
411                 return;
412         }
413
414         reclaim = __journal_pin_drop(j, dst);
415
416         bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn));
417
418         if (reclaim)
419                 bch2_journal_reclaim_fast(j);
420         spin_unlock(&j->lock);
421
422         /*
423          * If the journal is currently full,  we might want to call flush_fn
424          * immediately:
425          */
426         journal_wake(j);
427 }
428
429 void bch2_journal_pin_set(struct journal *j, u64 seq,
430                           struct journal_entry_pin *pin,
431                           journal_pin_flush_fn flush_fn)
432 {
433         bool reclaim;
434
435         spin_lock(&j->lock);
436
437         BUG_ON(seq < journal_last_seq(j));
438
439         reclaim = __journal_pin_drop(j, pin);
440
441         bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn));
442
443         if (reclaim)
444                 bch2_journal_reclaim_fast(j);
445         spin_unlock(&j->lock);
446
447         /*
448          * If the journal is currently full,  we might want to call flush_fn
449          * immediately:
450          */
451         journal_wake(j);
452 }
453
454 /**
455  * bch2_journal_pin_flush: ensure journal pin callback is no longer running
456  * @j:          journal object
457  * @pin:        pin to flush
458  */
459 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
460 {
461         BUG_ON(journal_pin_active(pin));
462
463         wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
464 }
465
466 /*
467  * Journal reclaim: flush references to open journal entries to reclaim space in
468  * the journal
469  *
470  * May be done by the journal code in the background as needed to free up space
471  * for more journal entries, or as part of doing a clean shutdown, or to migrate
472  * data off of a specific device:
473  */
474
475 static struct journal_entry_pin *
476 journal_get_next_pin(struct journal *j,
477                      u64 seq_to_flush,
478                      unsigned allowed_below_seq,
479                      unsigned allowed_above_seq,
480                      u64 *seq)
481 {
482         struct journal_entry_pin_list *pin_list;
483         struct journal_entry_pin *ret = NULL;
484         unsigned i;
485
486         fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
487                 if (*seq > seq_to_flush && !allowed_above_seq)
488                         break;
489
490                 for (i = 0; i < JOURNAL_PIN_NR; i++)
491                         if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
492                             ((1U << i) & allowed_above_seq)) {
493                                 ret = list_first_entry_or_null(&pin_list->list[i],
494                                         struct journal_entry_pin, list);
495                                 if (ret)
496                                         return ret;
497                         }
498         }
499
500         return NULL;
501 }
502
503 /* returns true if we did work */
504 static size_t journal_flush_pins(struct journal *j,
505                                  u64 seq_to_flush,
506                                  unsigned allowed_below_seq,
507                                  unsigned allowed_above_seq,
508                                  unsigned min_any,
509                                  unsigned min_key_cache)
510 {
511         struct journal_entry_pin *pin;
512         size_t nr_flushed = 0;
513         journal_pin_flush_fn flush_fn;
514         u64 seq;
515         int err;
516
517         lockdep_assert_held(&j->reclaim_lock);
518
519         while (1) {
520                 unsigned allowed_above = allowed_above_seq;
521                 unsigned allowed_below = allowed_below_seq;
522
523                 if (min_any) {
524                         allowed_above |= ~0;
525                         allowed_below |= ~0;
526                 }
527
528                 if (min_key_cache) {
529                         allowed_above |= 1U << JOURNAL_PIN_key_cache;
530                         allowed_below |= 1U << JOURNAL_PIN_key_cache;
531                 }
532
533                 cond_resched();
534
535                 j->last_flushed = jiffies;
536
537                 spin_lock(&j->lock);
538                 pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
539                 if (pin) {
540                         BUG_ON(j->flush_in_progress);
541                         j->flush_in_progress = pin;
542                         j->flush_in_progress_dropped = false;
543                         flush_fn = pin->flush;
544                 }
545                 spin_unlock(&j->lock);
546
547                 if (!pin)
548                         break;
549
550                 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
551                         min_key_cache--;
552
553                 if (min_any)
554                         min_any--;
555
556                 err = flush_fn(j, pin, seq);
557
558                 spin_lock(&j->lock);
559                 /* Pin might have been dropped or rearmed: */
560                 if (likely(!err && !j->flush_in_progress_dropped))
561                         list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
562                 j->flush_in_progress = NULL;
563                 j->flush_in_progress_dropped = false;
564                 spin_unlock(&j->lock);
565
566                 wake_up(&j->pin_flush_wait);
567
568                 if (err)
569                         break;
570
571                 nr_flushed++;
572         }
573
574         return nr_flushed;
575 }
576
577 static u64 journal_seq_to_flush(struct journal *j)
578 {
579         struct bch_fs *c = container_of(j, struct bch_fs, journal);
580         u64 seq_to_flush = 0;
581
582         spin_lock(&j->lock);
583
584         for_each_rw_member(c, ca) {
585                 struct journal_device *ja = &ca->journal;
586                 unsigned nr_buckets, bucket_to_flush;
587
588                 if (!ja->nr)
589                         continue;
590
591                 /* Try to keep the journal at most half full: */
592                 nr_buckets = ja->nr / 2;
593
594                 nr_buckets = min(nr_buckets, ja->nr);
595
596                 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
597                 seq_to_flush = max(seq_to_flush,
598                                    ja->bucket_seq[bucket_to_flush]);
599         }
600
601         /* Also flush if the pin fifo is more than half full */
602         seq_to_flush = max_t(s64, seq_to_flush,
603                              (s64) journal_cur_seq(j) -
604                              (j->pin.size >> 1));
605         spin_unlock(&j->lock);
606
607         return seq_to_flush;
608 }
609
610 /**
611  * __bch2_journal_reclaim - free up journal buckets
612  * @j:          journal object
613  * @direct:     direct or background reclaim?
614  * @kicked:     requested to run since we last ran?
615  * Returns:     0 on success, or -EIO if the journal has been shutdown
616  *
617  * Background journal reclaim writes out btree nodes. It should be run
618  * early enough so that we never completely run out of journal buckets.
619  *
620  * High watermarks for triggering background reclaim:
621  * - FIFO has fewer than 512 entries left
622  * - fewer than 25% journal buckets free
623  *
624  * Background reclaim runs until low watermarks are reached:
625  * - FIFO has more than 1024 entries left
626  * - more than 50% journal buckets free
627  *
628  * As long as a reclaim can complete in the time it takes to fill up
629  * 512 journal entries or 25% of all journal buckets, then
630  * journal_next_bucket() should not stall.
631  */
632 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
633 {
634         struct bch_fs *c = container_of(j, struct bch_fs, journal);
635         bool kthread = (current->flags & PF_KTHREAD) != 0;
636         u64 seq_to_flush;
637         size_t min_nr, min_key_cache, nr_flushed;
638         unsigned flags;
639         int ret = 0;
640
641         /*
642          * We can't invoke memory reclaim while holding the reclaim_lock -
643          * journal reclaim is required to make progress for memory reclaim
644          * (cleaning the caches), so we can't get stuck in memory reclaim while
645          * we're holding the reclaim lock:
646          */
647         lockdep_assert_held(&j->reclaim_lock);
648         flags = memalloc_noreclaim_save();
649
650         do {
651                 if (kthread && kthread_should_stop())
652                         break;
653
654                 if (bch2_journal_error(j)) {
655                         ret = -EIO;
656                         break;
657                 }
658
659                 bch2_journal_do_discards(j);
660
661                 seq_to_flush = journal_seq_to_flush(j);
662                 min_nr = 0;
663
664                 /*
665                  * If it's been longer than j->reclaim_delay_ms since we last flushed,
666                  * make sure to flush at least one journal pin:
667                  */
668                 if (time_after(jiffies, j->last_flushed +
669                                msecs_to_jiffies(c->opts.journal_reclaim_delay)))
670                         min_nr = 1;
671
672                 if (j->watermark != BCH_WATERMARK_stripe)
673                         min_nr = 1;
674
675                 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
676                         min_nr = 1;
677
678                 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
679
680                 trace_and_count(c, journal_reclaim_start, c,
681                                 direct, kicked,
682                                 min_nr, min_key_cache,
683                                 atomic_read(&c->btree_cache.dirty),
684                                 c->btree_cache.used,
685                                 atomic_long_read(&c->btree_key_cache.nr_dirty),
686                                 atomic_long_read(&c->btree_key_cache.nr_keys));
687
688                 nr_flushed = journal_flush_pins(j, seq_to_flush,
689                                                 ~0, 0,
690                                                 min_nr, min_key_cache);
691
692                 if (direct)
693                         j->nr_direct_reclaim += nr_flushed;
694                 else
695                         j->nr_background_reclaim += nr_flushed;
696                 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
697
698                 if (nr_flushed)
699                         wake_up(&j->reclaim_wait);
700         } while ((min_nr || min_key_cache) && nr_flushed && !direct);
701
702         memalloc_noreclaim_restore(flags);
703
704         return ret;
705 }
706
707 int bch2_journal_reclaim(struct journal *j)
708 {
709         return __bch2_journal_reclaim(j, true, true);
710 }
711
712 static int bch2_journal_reclaim_thread(void *arg)
713 {
714         struct journal *j = arg;
715         struct bch_fs *c = container_of(j, struct bch_fs, journal);
716         unsigned long delay, now;
717         bool journal_empty;
718         int ret = 0;
719
720         set_freezable();
721
722         j->last_flushed = jiffies;
723
724         while (!ret && !kthread_should_stop()) {
725                 bool kicked = j->reclaim_kicked;
726
727                 j->reclaim_kicked = false;
728
729                 mutex_lock(&j->reclaim_lock);
730                 ret = __bch2_journal_reclaim(j, false, kicked);
731                 mutex_unlock(&j->reclaim_lock);
732
733                 now = jiffies;
734                 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
735                 j->next_reclaim = j->last_flushed + delay;
736
737                 if (!time_in_range(j->next_reclaim, now, now + delay))
738                         j->next_reclaim = now + delay;
739
740                 while (1) {
741                         set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
742                         if (kthread_should_stop())
743                                 break;
744                         if (j->reclaim_kicked)
745                                 break;
746
747                         spin_lock(&j->lock);
748                         journal_empty = fifo_empty(&j->pin);
749                         spin_unlock(&j->lock);
750
751                         if (journal_empty)
752                                 schedule();
753                         else if (time_after(j->next_reclaim, jiffies))
754                                 schedule_timeout(j->next_reclaim - jiffies);
755                         else
756                                 break;
757                 }
758                 __set_current_state(TASK_RUNNING);
759         }
760
761         return 0;
762 }
763
764 void bch2_journal_reclaim_stop(struct journal *j)
765 {
766         struct task_struct *p = j->reclaim_thread;
767
768         j->reclaim_thread = NULL;
769
770         if (p) {
771                 kthread_stop(p);
772                 put_task_struct(p);
773         }
774 }
775
776 int bch2_journal_reclaim_start(struct journal *j)
777 {
778         struct bch_fs *c = container_of(j, struct bch_fs, journal);
779         struct task_struct *p;
780         int ret;
781
782         if (j->reclaim_thread)
783                 return 0;
784
785         p = kthread_create(bch2_journal_reclaim_thread, j,
786                            "bch-reclaim/%s", c->name);
787         ret = PTR_ERR_OR_ZERO(p);
788         bch_err_msg(c, ret, "creating journal reclaim thread");
789         if (ret)
790                 return ret;
791
792         get_task_struct(p);
793         j->reclaim_thread = p;
794         wake_up_process(p);
795         return 0;
796 }
797
798 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
799                               bool *did_work)
800 {
801         int ret;
802
803         ret = bch2_journal_error(j);
804         if (ret)
805                 return ret;
806
807         mutex_lock(&j->reclaim_lock);
808
809         if (journal_flush_pins(j, seq_to_flush,
810                                (1U << JOURNAL_PIN_key_cache)|
811                                (1U << JOURNAL_PIN_other), 0, 0, 0) ||
812             journal_flush_pins(j, seq_to_flush,
813                                (1U << JOURNAL_PIN_btree), 0, 0, 0))
814                 *did_work = true;
815
816         if (seq_to_flush > journal_cur_seq(j))
817                 bch2_journal_entry_close(j);
818
819         spin_lock(&j->lock);
820         /*
821          * If journal replay hasn't completed, the unreplayed journal entries
822          * hold refs on their corresponding sequence numbers
823          */
824         ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
825                 journal_last_seq(j) > seq_to_flush ||
826                 !fifo_used(&j->pin);
827
828         spin_unlock(&j->lock);
829         mutex_unlock(&j->reclaim_lock);
830
831         return ret;
832 }
833
834 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
835 {
836         /* time_stats this */
837         bool did_work = false;
838
839         if (!test_bit(JOURNAL_STARTED, &j->flags))
840                 return false;
841
842         closure_wait_event(&j->async_wait,
843                 journal_flush_done(j, seq_to_flush, &did_work));
844
845         return did_work;
846 }
847
848 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
849 {
850         struct bch_fs *c = container_of(j, struct bch_fs, journal);
851         struct journal_entry_pin_list *p;
852         u64 iter, seq = 0;
853         int ret = 0;
854
855         spin_lock(&j->lock);
856         fifo_for_each_entry_ptr(p, &j->pin, iter)
857                 if (dev_idx >= 0
858                     ? bch2_dev_list_has_dev(p->devs, dev_idx)
859                     : p->devs.nr < c->opts.metadata_replicas)
860                         seq = iter;
861         spin_unlock(&j->lock);
862
863         bch2_journal_flush_pins(j, seq);
864
865         ret = bch2_journal_error(j);
866         if (ret)
867                 return ret;
868
869         mutex_lock(&c->replicas_gc_lock);
870         bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
871
872         /*
873          * Now that we've populated replicas_gc, write to the journal to mark
874          * active journal devices. This handles the case where the journal might
875          * be empty. Otherwise we could clear all journal replicas and
876          * temporarily put the fs into an unrecoverable state. Journal recovery
877          * expects to find devices marked for journal data on unclean mount.
878          */
879         ret = bch2_journal_meta(&c->journal);
880         if (ret)
881                 goto err;
882
883         seq = 0;
884         spin_lock(&j->lock);
885         while (!ret) {
886                 struct bch_replicas_padded replicas;
887
888                 seq = max(seq, journal_last_seq(j));
889                 if (seq >= j->pin.back)
890                         break;
891                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
892                                          journal_seq_pin(j, seq)->devs);
893                 seq++;
894
895                 spin_unlock(&j->lock);
896                 ret = bch2_mark_replicas(c, &replicas.e);
897                 spin_lock(&j->lock);
898         }
899         spin_unlock(&j->lock);
900 err:
901         ret = bch2_replicas_gc_end(c, ret);
902         mutex_unlock(&c->replicas_gc_lock);
903
904         return ret;
905 }