]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to 8fd009dd76 bcachefs: Rip out code for storing backpointers...
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "error.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_sb.h"
19 #include "journal_seq_blacklist.h"
20
21 #include <trace/events/bcachefs.h>
22
23 #define x(n)    #n,
24 static const char * const bch2_journal_watermarks[] = {
25         JOURNAL_WATERMARKS()
26         NULL
27 };
28
29 static const char * const bch2_journal_errors[] = {
30         JOURNAL_ERRORS()
31         NULL
32 };
33 #undef x
34
35 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
36 {
37         return seq > j->seq_ondisk;
38 }
39
40 static bool __journal_entry_is_open(union journal_res_state state)
41 {
42         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
43 }
44
45 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
46 {
47         return atomic64_read(&j->seq) - j->seq_ondisk;
48 }
49
50 static bool journal_entry_is_open(struct journal *j)
51 {
52         return __journal_entry_is_open(j->reservations);
53 }
54
55 static inline struct journal_buf *
56 journal_seq_to_buf(struct journal *j, u64 seq)
57 {
58         struct journal_buf *buf = NULL;
59
60         EBUG_ON(seq > journal_cur_seq(j));
61
62         if (journal_seq_unwritten(j, seq)) {
63                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
64                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
65         }
66         return buf;
67 }
68
69 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
70 {
71         unsigned i;
72         for (i = 0; i < ARRAY_SIZE(p->list); i++)
73                 INIT_LIST_HEAD(&p->list[i]);
74         INIT_LIST_HEAD(&p->flushed);
75         atomic_set(&p->count, count);
76         p->devs.nr = 0;
77 }
78
79 /*
80  * Detect stuck journal conditions and trigger shutdown. Technically the journal
81  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
82  * reservation lockup, etc. Since this is a fatal error with potentially
83  * unpredictable characteristics, we want to be fairly conservative before we
84  * decide to shut things down.
85  *
86  * Consider the journal stuck when it appears full with no ability to commit
87  * btree transactions, to discard journal buckets, nor acquire priority
88  * (reserved watermark) reservation.
89  */
90 static inline bool
91 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
92 {
93         struct bch_fs *c = container_of(j, struct bch_fs, journal);
94         bool stuck = false;
95         struct printbuf buf = PRINTBUF;
96
97         if (!(error == JOURNAL_ERR_journal_full ||
98               error == JOURNAL_ERR_journal_pin_full) ||
99             nr_unwritten_journal_entries(j) ||
100             (flags & JOURNAL_WATERMARK_MASK) != JOURNAL_WATERMARK_reserved)
101                 return stuck;
102
103         spin_lock(&j->lock);
104
105         if (j->can_discard) {
106                 spin_unlock(&j->lock);
107                 return stuck;
108         }
109
110         stuck = true;
111
112         /*
113          * The journal shutdown path will set ->err_seq, but do it here first to
114          * serialize against concurrent failures and avoid duplicate error
115          * reports.
116          */
117         if (j->err_seq) {
118                 spin_unlock(&j->lock);
119                 return stuck;
120         }
121         j->err_seq = journal_cur_seq(j);
122         spin_unlock(&j->lock);
123
124         bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
125                 bch2_journal_errors[error]);
126         bch2_journal_debug_to_text(&buf, j);
127         bch_err(c, "%s", buf.buf);
128
129         printbuf_reset(&buf);
130         bch2_journal_pins_to_text(&buf, j);
131         bch_err(c, "Journal pins:\n%s", buf.buf);
132         printbuf_exit(&buf);
133
134         bch2_fatal_error(c);
135         dump_stack();
136
137         return stuck;
138 }
139
140 /* journal entry close/open: */
141
142 void __bch2_journal_buf_put(struct journal *j)
143 {
144         struct bch_fs *c = container_of(j, struct bch_fs, journal);
145
146         closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
147 }
148
149 /*
150  * Returns true if journal entry is now closed:
151  *
152  * We don't close a journal_buf until the next journal_buf is finished writing,
153  * and can be opened again - this also initializes the next journal_buf:
154  */
155 static void __journal_entry_close(struct journal *j, unsigned closed_val)
156 {
157         struct bch_fs *c = container_of(j, struct bch_fs, journal);
158         struct journal_buf *buf = journal_cur_buf(j);
159         union journal_res_state old, new;
160         u64 v = atomic64_read(&j->reservations.counter);
161         unsigned sectors;
162
163         BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
164                closed_val != JOURNAL_ENTRY_ERROR_VAL);
165
166         lockdep_assert_held(&j->lock);
167
168         do {
169                 old.v = new.v = v;
170                 new.cur_entry_offset = closed_val;
171
172                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
173                     old.cur_entry_offset == new.cur_entry_offset)
174                         return;
175         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
176                                        old.v, new.v)) != old.v);
177
178         if (!__journal_entry_is_open(old))
179                 return;
180
181         /* Close out old buffer: */
182         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
183
184         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
185                                       buf->u64s_reserved) << c->block_bits;
186         BUG_ON(sectors > buf->sectors);
187         buf->sectors = sectors;
188
189         /*
190          * We have to set last_seq here, _before_ opening a new journal entry:
191          *
192          * A threads may replace an old pin with a new pin on their current
193          * journal reservation - the expectation being that the journal will
194          * contain either what the old pin protected or what the new pin
195          * protects.
196          *
197          * After the old pin is dropped journal_last_seq() won't include the old
198          * pin, so we can only write the updated last_seq on the entry that
199          * contains whatever the new pin protects.
200          *
201          * Restated, we can _not_ update last_seq for a given entry if there
202          * could be a newer entry open with reservations/pins that have been
203          * taken against it.
204          *
205          * Hence, we want update/set last_seq on the current journal entry right
206          * before we open a new one:
207          */
208         buf->last_seq           = journal_last_seq(j);
209         buf->data->last_seq     = cpu_to_le64(buf->last_seq);
210         BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
211
212         __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
213
214         cancel_delayed_work(&j->write_work);
215
216         bch2_journal_space_available(j);
217
218         bch2_journal_buf_put(j, old.idx);
219 }
220
221 void bch2_journal_halt(struct journal *j)
222 {
223         spin_lock(&j->lock);
224         __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
225         if (!j->err_seq)
226                 j->err_seq = journal_cur_seq(j);
227         journal_wake(j);
228         spin_unlock(&j->lock);
229 }
230
231 static bool journal_entry_want_write(struct journal *j)
232 {
233         bool ret = !journal_entry_is_open(j) ||
234                 journal_cur_seq(j) == journal_last_unwritten_seq(j);
235
236         /* Don't close it yet if we already have a write in flight: */
237         if (ret)
238                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
239         else if (nr_unwritten_journal_entries(j)) {
240                 struct journal_buf *buf = journal_cur_buf(j);
241
242                 if (!buf->flush_time) {
243                         buf->flush_time = local_clock() ?: 1;
244                         buf->expires = jiffies;
245                 }
246         }
247
248         return ret;
249 }
250
251 static bool journal_entry_close(struct journal *j)
252 {
253         bool ret;
254
255         spin_lock(&j->lock);
256         ret = journal_entry_want_write(j);
257         spin_unlock(&j->lock);
258
259         return ret;
260 }
261
262 /*
263  * should _only_ called from journal_res_get() - when we actually want a
264  * journal reservation - journal entry is open means journal is dirty:
265  */
266 static int journal_entry_open(struct journal *j)
267 {
268         struct bch_fs *c = container_of(j, struct bch_fs, journal);
269         struct journal_buf *buf = j->buf +
270                 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
271         union journal_res_state old, new;
272         int u64s;
273         u64 v;
274
275         lockdep_assert_held(&j->lock);
276         BUG_ON(journal_entry_is_open(j));
277         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
278
279         if (j->blocked)
280                 return JOURNAL_ERR_blocked;
281
282         if (j->cur_entry_error)
283                 return j->cur_entry_error;
284
285         if (bch2_journal_error(j))
286                 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
287
288         if (!fifo_free(&j->pin))
289                 return JOURNAL_ERR_journal_pin_full;
290
291         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
292                 return JOURNAL_ERR_max_in_flight;
293
294         BUG_ON(!j->cur_entry_sectors);
295
296         buf->expires            =
297                 (journal_cur_seq(j) == j->flushed_seq_ondisk
298                  ? jiffies
299                  : j->last_flush_write) +
300                 msecs_to_jiffies(c->opts.journal_flush_delay);
301
302         buf->u64s_reserved      = j->entry_u64s_reserved;
303         buf->disk_sectors       = j->cur_entry_sectors;
304         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
305
306         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
307                 journal_entry_overhead(j);
308         u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
309
310         if (u64s <= (ssize_t) j->early_journal_entries.nr)
311                 return JOURNAL_ERR_journal_full;
312
313         if (fifo_empty(&j->pin) && j->reclaim_thread)
314                 wake_up_process(j->reclaim_thread);
315
316         /*
317          * The fifo_push() needs to happen at the same time as j->seq is
318          * incremented for journal_last_seq() to be calculated correctly
319          */
320         atomic64_inc(&j->seq);
321         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
322
323         BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
324
325         bkey_extent_init(&buf->key);
326         buf->noflush    = false;
327         buf->must_flush = false;
328         buf->separate_flush = false;
329         buf->flush_time = 0;
330
331         memset(buf->data, 0, sizeof(*buf->data));
332         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
333         buf->data->u64s = 0;
334
335         if (j->early_journal_entries.nr) {
336                 memcpy(buf->data->_data, j->early_journal_entries.data,
337                        j->early_journal_entries.nr * sizeof(u64));
338                 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
339         }
340
341         /*
342          * Must be set before marking the journal entry as open:
343          */
344         j->cur_entry_u64s = u64s;
345
346         v = atomic64_read(&j->reservations.counter);
347         do {
348                 old.v = new.v = v;
349
350                 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
351
352                 new.idx++;
353                 BUG_ON(journal_state_count(new, new.idx));
354                 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
355
356                 journal_state_inc(&new);
357
358                 /* Handle any already added entries */
359                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
360         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
361                                        old.v, new.v)) != old.v);
362
363         if (j->res_get_blocked_start)
364                 bch2_time_stats_update(j->blocked_time,
365                                        j->res_get_blocked_start);
366         j->res_get_blocked_start = 0;
367
368         mod_delayed_work(c->io_complete_wq,
369                          &j->write_work,
370                          msecs_to_jiffies(c->opts.journal_flush_delay));
371         journal_wake(j);
372
373         if (j->early_journal_entries.nr)
374                 darray_exit(&j->early_journal_entries);
375         return 0;
376 }
377
378 static bool journal_quiesced(struct journal *j)
379 {
380         bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
381
382         if (!ret)
383                 journal_entry_close(j);
384         return ret;
385 }
386
387 static void journal_quiesce(struct journal *j)
388 {
389         wait_event(j->wait, journal_quiesced(j));
390 }
391
392 static void journal_write_work(struct work_struct *work)
393 {
394         struct journal *j = container_of(work, struct journal, write_work.work);
395         struct bch_fs *c = container_of(j, struct bch_fs, journal);
396         long delta;
397
398         spin_lock(&j->lock);
399         if (!__journal_entry_is_open(j->reservations))
400                 goto unlock;
401
402         delta = journal_cur_buf(j)->expires - jiffies;
403
404         if (delta > 0)
405                 mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
406         else
407                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
408 unlock:
409         spin_unlock(&j->lock);
410 }
411
412 static int __journal_res_get(struct journal *j, struct journal_res *res,
413                              unsigned flags)
414 {
415         struct bch_fs *c = container_of(j, struct bch_fs, journal);
416         struct journal_buf *buf;
417         bool can_discard;
418         int ret;
419 retry:
420         if (journal_res_get_fast(j, res, flags))
421                 return 0;
422
423         if (bch2_journal_error(j))
424                 return -BCH_ERR_erofs_journal_err;
425
426         spin_lock(&j->lock);
427
428         /* check once more in case somebody else shut things down... */
429         if (bch2_journal_error(j)) {
430                 spin_unlock(&j->lock);
431                 return -BCH_ERR_erofs_journal_err;
432         }
433
434         /*
435          * Recheck after taking the lock, so we don't race with another thread
436          * that just did journal_entry_open() and call journal_entry_close()
437          * unnecessarily
438          */
439         if (journal_res_get_fast(j, res, flags)) {
440                 spin_unlock(&j->lock);
441                 return 0;
442         }
443
444         if ((flags & JOURNAL_WATERMARK_MASK) < j->watermark) {
445                 /*
446                  * Don't want to close current journal entry, just need to
447                  * invoke reclaim:
448                  */
449                 ret = JOURNAL_ERR_journal_full;
450                 goto unlock;
451         }
452
453         /*
454          * If we couldn't get a reservation because the current buf filled up,
455          * and we had room for a bigger entry on disk, signal that we want to
456          * realloc the journal bufs:
457          */
458         buf = journal_cur_buf(j);
459         if (journal_entry_is_open(j) &&
460             buf->buf_size >> 9 < buf->disk_sectors &&
461             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
462                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
463
464         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
465         ret = journal_entry_open(j);
466
467         if (ret == JOURNAL_ERR_max_in_flight)
468                 trace_and_count(c, journal_entry_full, c);
469 unlock:
470         if ((ret && ret != JOURNAL_ERR_insufficient_devices) &&
471             !j->res_get_blocked_start) {
472                 j->res_get_blocked_start = local_clock() ?: 1;
473                 trace_and_count(c, journal_full, c);
474         }
475
476         can_discard = j->can_discard;
477         spin_unlock(&j->lock);
478
479         if (!ret)
480                 goto retry;
481         if (journal_error_check_stuck(j, ret, flags))
482                 ret = -BCH_ERR_journal_res_get_blocked;
483
484         /*
485          * Journal is full - can't rely on reclaim from work item due to
486          * freezing:
487          */
488         if ((ret == JOURNAL_ERR_journal_full ||
489              ret == JOURNAL_ERR_journal_pin_full) &&
490             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
491                 if (can_discard) {
492                         bch2_journal_do_discards(j);
493                         goto retry;
494                 }
495
496                 if (mutex_trylock(&j->reclaim_lock)) {
497                         bch2_journal_reclaim(j);
498                         mutex_unlock(&j->reclaim_lock);
499                 }
500         }
501
502         return ret == JOURNAL_ERR_insufficient_devices
503                 ? -EROFS
504                 : -BCH_ERR_journal_res_get_blocked;
505 }
506
507 /*
508  * Essentially the entry function to the journaling code. When bcachefs is doing
509  * a btree insert, it calls this function to get the current journal write.
510  * Journal write is the structure used set up journal writes. The calling
511  * function will then add its keys to the structure, queuing them for the next
512  * write.
513  *
514  * To ensure forward progress, the current task must not be holding any
515  * btree node write locks.
516  */
517 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
518                                   unsigned flags)
519 {
520         int ret;
521
522         closure_wait_event(&j->async_wait,
523                    (ret = __journal_res_get(j, res, flags)) !=
524                    -BCH_ERR_journal_res_get_blocked||
525                    (flags & JOURNAL_RES_GET_NONBLOCK));
526         return ret;
527 }
528
529 /* journal_preres: */
530
531 static bool journal_preres_available(struct journal *j,
532                                      struct journal_preres *res,
533                                      unsigned new_u64s,
534                                      unsigned flags)
535 {
536         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
537
538         if (!ret && mutex_trylock(&j->reclaim_lock)) {
539                 bch2_journal_reclaim(j);
540                 mutex_unlock(&j->reclaim_lock);
541         }
542
543         return ret;
544 }
545
546 int __bch2_journal_preres_get(struct journal *j,
547                               struct journal_preres *res,
548                               unsigned new_u64s,
549                               unsigned flags)
550 {
551         int ret;
552
553         closure_wait_event(&j->preres_wait,
554                    (ret = bch2_journal_error(j)) ||
555                    journal_preres_available(j, res, new_u64s, flags));
556         return ret;
557 }
558
559 /* journal_entry_res: */
560
561 void bch2_journal_entry_res_resize(struct journal *j,
562                                    struct journal_entry_res *res,
563                                    unsigned new_u64s)
564 {
565         union journal_res_state state;
566         int d = new_u64s - res->u64s;
567
568         spin_lock(&j->lock);
569
570         j->entry_u64s_reserved += d;
571         if (d <= 0)
572                 goto out;
573
574         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
575         smp_mb();
576         state = READ_ONCE(j->reservations);
577
578         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
579             state.cur_entry_offset > j->cur_entry_u64s) {
580                 j->cur_entry_u64s += d;
581                 /*
582                  * Not enough room in current journal entry, have to flush it:
583                  */
584                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
585         } else {
586                 journal_cur_buf(j)->u64s_reserved += d;
587         }
588 out:
589         spin_unlock(&j->lock);
590         res->u64s += d;
591 }
592
593 /* journal flushing: */
594
595 /**
596  * bch2_journal_flush_seq_async - wait for a journal entry to be written
597  *
598  * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
599  * necessary
600  */
601 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
602                                  struct closure *parent)
603 {
604         struct journal_buf *buf;
605         int ret = 0;
606
607         if (seq <= j->flushed_seq_ondisk)
608                 return 1;
609
610         spin_lock(&j->lock);
611
612         if (WARN_ONCE(seq > journal_cur_seq(j),
613                       "requested to flush journal seq %llu, but currently at %llu",
614                       seq, journal_cur_seq(j)))
615                 goto out;
616
617         /* Recheck under lock: */
618         if (j->err_seq && seq >= j->err_seq) {
619                 ret = -EIO;
620                 goto out;
621         }
622
623         if (seq <= j->flushed_seq_ondisk) {
624                 ret = 1;
625                 goto out;
626         }
627
628         /* if seq was written, but not flushed - flush a newer one instead */
629         seq = max(seq, journal_last_unwritten_seq(j));
630
631 recheck_need_open:
632         if (seq > journal_cur_seq(j)) {
633                 struct journal_res res = { 0 };
634
635                 if (journal_entry_is_open(j))
636                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
637
638                 spin_unlock(&j->lock);
639
640                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
641                 if (ret)
642                         return ret;
643
644                 seq = res.seq;
645                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
646                 buf->must_flush = true;
647
648                 if (!buf->flush_time) {
649                         buf->flush_time = local_clock() ?: 1;
650                         buf->expires = jiffies;
651                 }
652
653                 if (parent && !closure_wait(&buf->wait, parent))
654                         BUG();
655
656                 bch2_journal_res_put(j, &res);
657
658                 spin_lock(&j->lock);
659                 goto want_write;
660         }
661
662         /*
663          * if write was kicked off without a flush, flush the next sequence
664          * number instead
665          */
666         buf = journal_seq_to_buf(j, seq);
667         if (buf->noflush) {
668                 seq++;
669                 goto recheck_need_open;
670         }
671
672         buf->must_flush = true;
673
674         if (parent && !closure_wait(&buf->wait, parent))
675                 BUG();
676 want_write:
677         if (seq == journal_cur_seq(j))
678                 journal_entry_want_write(j);
679 out:
680         spin_unlock(&j->lock);
681         return ret;
682 }
683
684 int bch2_journal_flush_seq(struct journal *j, u64 seq)
685 {
686         u64 start_time = local_clock();
687         int ret, ret2;
688
689         /*
690          * Don't update time_stats when @seq is already flushed:
691          */
692         if (seq <= j->flushed_seq_ondisk)
693                 return 0;
694
695         ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
696
697         if (!ret)
698                 bch2_time_stats_update(j->flush_seq_time, start_time);
699
700         return ret ?: ret2 < 0 ? ret2 : 0;
701 }
702
703 /*
704  * bch2_journal_flush_async - if there is an open journal entry, or a journal
705  * still being written, write it and wait for the write to complete
706  */
707 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
708 {
709         bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
710 }
711
712 int bch2_journal_flush(struct journal *j)
713 {
714         return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
715 }
716
717 /*
718  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
719  * @seq
720  */
721 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
722 {
723         struct bch_fs *c = container_of(j, struct bch_fs, journal);
724         u64 unwritten_seq;
725         bool ret = false;
726
727         if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
728                 return false;
729
730         if (seq <= c->journal.flushed_seq_ondisk)
731                 return false;
732
733         spin_lock(&j->lock);
734         if (seq <= c->journal.flushed_seq_ondisk)
735                 goto out;
736
737         for (unwritten_seq = journal_last_unwritten_seq(j);
738              unwritten_seq < seq;
739              unwritten_seq++) {
740                 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
741
742                 /* journal write is already in flight, and was a flush write: */
743                 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
744                         goto out;
745
746                 buf->noflush = true;
747         }
748
749         ret = true;
750 out:
751         spin_unlock(&j->lock);
752         return ret;
753 }
754
755 int bch2_journal_meta(struct journal *j)
756 {
757         struct journal_buf *buf;
758         struct journal_res res;
759         int ret;
760
761         memset(&res, 0, sizeof(res));
762
763         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
764         if (ret)
765                 return ret;
766
767         buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
768         buf->must_flush = true;
769
770         if (!buf->flush_time) {
771                 buf->flush_time = local_clock() ?: 1;
772                 buf->expires = jiffies;
773         }
774
775         bch2_journal_res_put(j, &res);
776
777         return bch2_journal_flush_seq(j, res.seq);
778 }
779
780 /* block/unlock the journal: */
781
782 void bch2_journal_unblock(struct journal *j)
783 {
784         spin_lock(&j->lock);
785         j->blocked--;
786         spin_unlock(&j->lock);
787
788         journal_wake(j);
789 }
790
791 void bch2_journal_block(struct journal *j)
792 {
793         spin_lock(&j->lock);
794         j->blocked++;
795         spin_unlock(&j->lock);
796
797         journal_quiesce(j);
798 }
799
800 /* allocate journal on a device: */
801
802 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
803                                          bool new_fs, struct closure *cl)
804 {
805         struct bch_fs *c = ca->fs;
806         struct journal_device *ja = &ca->journal;
807         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
808         struct open_bucket **ob = NULL;
809         long *bu = NULL;
810         unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
811         int ret = 0;
812
813         BUG_ON(nr <= ja->nr);
814
815         bu              = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
816         ob              = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
817         new_buckets     = kcalloc(nr, sizeof(u64), GFP_KERNEL);
818         new_bucket_seq  = kcalloc(nr, sizeof(u64), GFP_KERNEL);
819         if (!bu || !ob || !new_buckets || !new_bucket_seq) {
820                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
821                 goto err_free;
822         }
823
824         for (nr_got = 0; nr_got < nr_want; nr_got++) {
825                 if (new_fs) {
826                         bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
827                         if (bu[nr_got] < 0) {
828                                 ret = -BCH_ERR_ENOSPC_bucket_alloc;
829                                 break;
830                         }
831                 } else {
832                         ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, cl);
833                         ret = PTR_ERR_OR_ZERO(ob[nr_got]);
834                         if (ret)
835                                 break;
836
837                         ret = bch2_trans_run(c,
838                                 bch2_trans_mark_metadata_bucket(&trans, ca,
839                                                 ob[nr_got]->bucket, BCH_DATA_journal,
840                                                 ca->mi.bucket_size));
841                         if (ret) {
842                                 bch2_open_bucket_put(c, ob[nr_got]);
843                                 bch_err(c, "error marking new journal buckets: %s", bch2_err_str(ret));
844                                 break;
845                         }
846
847                         bu[nr_got] = ob[nr_got]->bucket;
848                 }
849         }
850
851         if (!nr_got)
852                 goto err_free;
853
854         /* Don't return an error if we successfully allocated some buckets: */
855         ret = 0;
856
857         if (c) {
858                 bch2_journal_flush_all_pins(&c->journal);
859                 bch2_journal_block(&c->journal);
860                 mutex_lock(&c->sb_lock);
861         }
862
863         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
864         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
865
866         BUG_ON(ja->discard_idx > ja->nr);
867
868         pos = ja->discard_idx ?: ja->nr;
869
870         memmove(new_buckets + pos + nr_got,
871                 new_buckets + pos,
872                 sizeof(new_buckets[0]) * (ja->nr - pos));
873         memmove(new_bucket_seq + pos + nr_got,
874                 new_bucket_seq + pos,
875                 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
876
877         for (i = 0; i < nr_got; i++) {
878                 new_buckets[pos + i] = bu[i];
879                 new_bucket_seq[pos + i] = 0;
880         }
881
882         nr = ja->nr + nr_got;
883
884         ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
885         if (ret)
886                 goto err_unblock;
887
888         if (!new_fs)
889                 bch2_write_super(c);
890
891         /* Commit: */
892         if (c)
893                 spin_lock(&c->journal.lock);
894
895         swap(new_buckets,       ja->buckets);
896         swap(new_bucket_seq,    ja->bucket_seq);
897         ja->nr = nr;
898
899         if (pos <= ja->discard_idx)
900                 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
901         if (pos <= ja->dirty_idx_ondisk)
902                 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
903         if (pos <= ja->dirty_idx)
904                 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
905         if (pos <= ja->cur_idx)
906                 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
907
908         if (c)
909                 spin_unlock(&c->journal.lock);
910 err_unblock:
911         if (c) {
912                 bch2_journal_unblock(&c->journal);
913                 mutex_unlock(&c->sb_lock);
914         }
915
916         if (ret && !new_fs)
917                 for (i = 0; i < nr_got; i++)
918                         bch2_trans_run(c,
919                                 bch2_trans_mark_metadata_bucket(&trans, ca,
920                                                 bu[i], BCH_DATA_free, 0));
921 err_free:
922         if (!new_fs)
923                 for (i = 0; i < nr_got; i++)
924                         bch2_open_bucket_put(c, ob[i]);
925
926         kfree(new_bucket_seq);
927         kfree(new_buckets);
928         kfree(ob);
929         kfree(bu);
930         return ret;
931 }
932
933 /*
934  * Allocate more journal space at runtime - not currently making use if it, but
935  * the code works:
936  */
937 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
938                                 unsigned nr)
939 {
940         struct journal_device *ja = &ca->journal;
941         struct closure cl;
942         int ret = 0;
943
944         closure_init_stack(&cl);
945
946         down_write(&c->state_lock);
947
948         /* don't handle reducing nr of buckets yet: */
949         if (nr < ja->nr)
950                 goto unlock;
951
952         while (ja->nr < nr) {
953                 struct disk_reservation disk_res = { 0, 0 };
954
955                 /*
956                  * note: journal buckets aren't really counted as _sectors_ used yet, so
957                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
958                  * when space used goes up without a reservation - but we do need the
959                  * reservation to ensure we'll actually be able to allocate:
960                  *
961                  * XXX: that's not right, disk reservations only ensure a
962                  * filesystem-wide allocation will succeed, this is a device
963                  * specific allocation - we can hang here:
964                  */
965
966                 ret = bch2_disk_reservation_get(c, &disk_res,
967                                                 bucket_to_sector(ca, nr - ja->nr), 1, 0);
968                 if (ret)
969                         break;
970
971                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
972
973                 bch2_disk_reservation_put(c, &disk_res);
974
975                 closure_sync(&cl);
976
977                 if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
978                         break;
979         }
980
981         if (ret)
982                 bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
983 unlock:
984         up_write(&c->state_lock);
985         return ret;
986 }
987
988 int bch2_dev_journal_alloc(struct bch_dev *ca)
989 {
990         unsigned nr;
991
992         if (dynamic_fault("bcachefs:add:journal_alloc"))
993                 return -BCH_ERR_ENOMEM_set_nr_journal_buckets;
994
995         /* 1/128th of the device by default: */
996         nr = ca->mi.nbuckets >> 7;
997
998         /*
999          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1000          * is smaller:
1001          */
1002         nr = clamp_t(unsigned, nr,
1003                      BCH_JOURNAL_BUCKETS_MIN,
1004                      min(1 << 13,
1005                          (1 << 24) / ca->mi.bucket_size));
1006
1007         return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
1008 }
1009
1010 /* startup/shutdown: */
1011
1012 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1013 {
1014         bool ret = false;
1015         u64 seq;
1016
1017         spin_lock(&j->lock);
1018         for (seq = journal_last_unwritten_seq(j);
1019              seq <= journal_cur_seq(j) && !ret;
1020              seq++) {
1021                 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1022
1023                 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1024                         ret = true;
1025         }
1026         spin_unlock(&j->lock);
1027
1028         return ret;
1029 }
1030
1031 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1032 {
1033         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1034 }
1035
1036 void bch2_fs_journal_stop(struct journal *j)
1037 {
1038         bch2_journal_reclaim_stop(j);
1039         bch2_journal_flush_all_pins(j);
1040
1041         wait_event(j->wait, journal_entry_close(j));
1042
1043         /*
1044          * Always write a new journal entry, to make sure the clock hands are up
1045          * to date (and match the superblock)
1046          */
1047         bch2_journal_meta(j);
1048
1049         journal_quiesce(j);
1050
1051         BUG_ON(!bch2_journal_error(j) &&
1052                test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1053                j->last_empty_seq != journal_cur_seq(j));
1054
1055         cancel_delayed_work_sync(&j->write_work);
1056 }
1057
1058 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1059 {
1060         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1061         struct journal_entry_pin_list *p;
1062         struct journal_replay *i, **_i;
1063         struct genradix_iter iter;
1064         bool had_entries = false;
1065         unsigned ptr;
1066         u64 last_seq = cur_seq, nr, seq;
1067
1068         genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1069                 i = *_i;
1070
1071                 if (!i || i->ignore)
1072                         continue;
1073
1074                 last_seq = le64_to_cpu(i->j.last_seq);
1075                 break;
1076         }
1077
1078         nr = cur_seq - last_seq;
1079
1080         if (nr + 1 > j->pin.size) {
1081                 free_fifo(&j->pin);
1082                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1083                 if (!j->pin.data) {
1084                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1085                         return -BCH_ERR_ENOMEM_journal_pin_fifo;
1086                 }
1087         }
1088
1089         j->replay_journal_seq   = last_seq;
1090         j->replay_journal_seq_end = cur_seq;
1091         j->last_seq_ondisk      = last_seq;
1092         j->flushed_seq_ondisk   = cur_seq - 1;
1093         j->seq_ondisk           = cur_seq - 1;
1094         j->pin.front            = last_seq;
1095         j->pin.back             = cur_seq;
1096         atomic64_set(&j->seq, cur_seq - 1);
1097
1098         fifo_for_each_entry_ptr(p, &j->pin, seq)
1099                 journal_pin_list_init(p, 1);
1100
1101         genradix_for_each(&c->journal_entries, iter, _i) {
1102                 i = *_i;
1103
1104                 if (!i || i->ignore)
1105                         continue;
1106
1107                 seq = le64_to_cpu(i->j.seq);
1108                 BUG_ON(seq >= cur_seq);
1109
1110                 if (seq < last_seq)
1111                         continue;
1112
1113                 if (journal_entry_empty(&i->j))
1114                         j->last_empty_seq = le64_to_cpu(i->j.seq);
1115
1116                 p = journal_seq_pin(j, seq);
1117
1118                 p->devs.nr = 0;
1119                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1120                         bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1121
1122                 had_entries = true;
1123         }
1124
1125         if (!had_entries)
1126                 j->last_empty_seq = cur_seq;
1127
1128         spin_lock(&j->lock);
1129
1130         set_bit(JOURNAL_STARTED, &j->flags);
1131         j->last_flush_write = jiffies;
1132
1133         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1134         j->reservations.unwritten_idx++;
1135
1136         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1137
1138         bch2_journal_space_available(j);
1139         spin_unlock(&j->lock);
1140
1141         return bch2_journal_reclaim_start(j);
1142 }
1143
1144 /* init/exit: */
1145
1146 void bch2_dev_journal_exit(struct bch_dev *ca)
1147 {
1148         kfree(ca->journal.bio);
1149         kfree(ca->journal.buckets);
1150         kfree(ca->journal.bucket_seq);
1151
1152         ca->journal.bio         = NULL;
1153         ca->journal.buckets     = NULL;
1154         ca->journal.bucket_seq  = NULL;
1155 }
1156
1157 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1158 {
1159         struct journal_device *ja = &ca->journal;
1160         struct bch_sb_field_journal *journal_buckets =
1161                 bch2_sb_get_journal(sb);
1162         struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1163                 bch2_sb_get_journal_v2(sb);
1164         unsigned i, nr_bvecs;
1165
1166         ja->nr = 0;
1167
1168         if (journal_buckets_v2) {
1169                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1170
1171                 for (i = 0; i < nr; i++)
1172                         ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1173         } else if (journal_buckets) {
1174                 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1175         }
1176
1177         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1178         if (!ja->bucket_seq)
1179                 return -BCH_ERR_ENOMEM_dev_journal_init;
1180
1181         nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1182
1183         ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
1184         if (!ca->journal.bio)
1185                 return -BCH_ERR_ENOMEM_dev_journal_init;
1186
1187         bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
1188
1189         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1190         if (!ja->buckets)
1191                 return -BCH_ERR_ENOMEM_dev_journal_init;
1192
1193         if (journal_buckets_v2) {
1194                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1195                 unsigned j, dst = 0;
1196
1197                 for (i = 0; i < nr; i++)
1198                         for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1199                                 ja->buckets[dst++] =
1200                                         le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1201         } else if (journal_buckets) {
1202                 for (i = 0; i < ja->nr; i++)
1203                         ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1204         }
1205
1206         return 0;
1207 }
1208
1209 void bch2_fs_journal_exit(struct journal *j)
1210 {
1211         unsigned i;
1212
1213         darray_exit(&j->early_journal_entries);
1214
1215         for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1216                 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1217         free_fifo(&j->pin);
1218 }
1219
1220 int bch2_fs_journal_init(struct journal *j)
1221 {
1222         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1223         static struct lock_class_key res_key;
1224         unsigned i;
1225         int ret = 0;
1226
1227         pr_verbose_init(c->opts, "");
1228
1229         spin_lock_init(&j->lock);
1230         spin_lock_init(&j->err_lock);
1231         init_waitqueue_head(&j->wait);
1232         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1233         init_waitqueue_head(&j->reclaim_wait);
1234         init_waitqueue_head(&j->pin_flush_wait);
1235         mutex_init(&j->reclaim_lock);
1236         mutex_init(&j->discard_lock);
1237
1238         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1239
1240         atomic64_set(&j->reservations.counter,
1241                 ((union journal_res_state)
1242                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1243
1244         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1245                 ret = -BCH_ERR_ENOMEM_journal_pin_fifo;
1246                 goto out;
1247         }
1248
1249         for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1250                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1251                 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1252                 if (!j->buf[i].data) {
1253                         ret = -BCH_ERR_ENOMEM_journal_buf;
1254                         goto out;
1255                 }
1256         }
1257
1258         j->pin.front = j->pin.back = 1;
1259 out:
1260         pr_verbose_init(c->opts, "ret %i", ret);
1261         return ret;
1262 }
1263
1264 /* debug: */
1265
1266 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1267 {
1268         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1269         union journal_res_state s;
1270         struct bch_dev *ca;
1271         unsigned long now = jiffies;
1272         u64 seq;
1273         unsigned i;
1274
1275         if (!out->nr_tabstops)
1276                 printbuf_tabstop_push(out, 24);
1277         out->atomic++;
1278
1279         rcu_read_lock();
1280         s = READ_ONCE(j->reservations);
1281
1282         prt_printf(out, "dirty journal entries:\t%llu/%llu\n",  fifo_used(&j->pin), j->pin.size);
1283         prt_printf(out, "seq:\t\t\t%llu\n",                     journal_cur_seq(j));
1284         prt_printf(out, "seq_ondisk:\t\t%llu\n",                j->seq_ondisk);
1285         prt_printf(out, "last_seq:\t\t%llu\n",          journal_last_seq(j));
1286         prt_printf(out, "last_seq_ondisk:\t%llu\n",             j->last_seq_ondisk);
1287         prt_printf(out, "flushed_seq_ondisk:\t%llu\n",  j->flushed_seq_ondisk);
1288         prt_printf(out, "prereserved:\t\t%u/%u\n",              j->prereserved.reserved, j->prereserved.remaining);
1289         prt_printf(out, "watermark:\t\t%s\n",           bch2_journal_watermarks[j->watermark]);
1290         prt_printf(out, "each entry reserved:\t%u\n",   j->entry_u64s_reserved);
1291         prt_printf(out, "nr flush writes:\t%llu\n",             j->nr_flush_writes);
1292         prt_printf(out, "nr noflush writes:\t%llu\n",   j->nr_noflush_writes);
1293         prt_printf(out, "nr direct reclaim:\t%llu\n",   j->nr_direct_reclaim);
1294         prt_printf(out, "nr background reclaim:\t%llu\n",       j->nr_background_reclaim);
1295         prt_printf(out, "reclaim kicked:\t\t%u\n",              j->reclaim_kicked);
1296         prt_printf(out, "reclaim runs in:\t%u ms\n",    time_after(j->next_reclaim, now)
1297                ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1298         prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
1299         prt_printf(out, "current entry error:\t%s\n",   bch2_journal_errors[j->cur_entry_error]);
1300         prt_printf(out, "current entry:\t\t");
1301
1302         switch (s.cur_entry_offset) {
1303         case JOURNAL_ENTRY_ERROR_VAL:
1304                 prt_printf(out, "error");
1305                 break;
1306         case JOURNAL_ENTRY_CLOSED_VAL:
1307                 prt_printf(out, "closed");
1308                 break;
1309         default:
1310                 prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1311                 break;
1312         }
1313
1314         prt_newline(out);
1315
1316         for (seq = journal_cur_seq(j);
1317              seq >= journal_last_unwritten_seq(j);
1318              --seq) {
1319                 i = seq & JOURNAL_BUF_MASK;
1320
1321                 prt_printf(out, "unwritten entry:");
1322                 prt_tab(out);
1323                 prt_printf(out, "%llu", seq);
1324                 prt_newline(out);
1325                 printbuf_indent_add(out, 2);
1326
1327                 prt_printf(out, "refcount:");
1328                 prt_tab(out);
1329                 prt_printf(out, "%u", journal_state_count(s, i));
1330                 prt_newline(out);
1331
1332                 prt_printf(out, "sectors:");
1333                 prt_tab(out);
1334                 prt_printf(out, "%u", j->buf[i].sectors);
1335                 prt_newline(out);
1336
1337                 prt_printf(out, "expires");
1338                 prt_tab(out);
1339                 prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
1340                 prt_newline(out);
1341
1342                 printbuf_indent_sub(out, 2);
1343         }
1344
1345         prt_printf(out,
1346                "replay done:\t\t%i\n",
1347                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1348
1349         prt_printf(out, "space:\n");
1350         prt_printf(out, "\tdiscarded\t%u:%u\n",
1351                j->space[journal_space_discarded].next_entry,
1352                j->space[journal_space_discarded].total);
1353         prt_printf(out, "\tclean ondisk\t%u:%u\n",
1354                j->space[journal_space_clean_ondisk].next_entry,
1355                j->space[journal_space_clean_ondisk].total);
1356         prt_printf(out, "\tclean\t\t%u:%u\n",
1357                j->space[journal_space_clean].next_entry,
1358                j->space[journal_space_clean].total);
1359         prt_printf(out, "\ttotal\t\t%u:%u\n",
1360                j->space[journal_space_total].next_entry,
1361                j->space[journal_space_total].total);
1362
1363         for_each_member_device_rcu(ca, c, i,
1364                                    &c->rw_devs[BCH_DATA_journal]) {
1365                 struct journal_device *ja = &ca->journal;
1366
1367                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1368                         continue;
1369
1370                 if (!ja->nr)
1371                         continue;
1372
1373                 prt_printf(out, "dev %u:\n",            i);
1374                 prt_printf(out, "\tnr\t\t%u\n",         ja->nr);
1375                 prt_printf(out, "\tbucket size\t%u\n",  ca->mi.bucket_size);
1376                 prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1377                 prt_printf(out, "\tdiscard_idx\t%u\n",  ja->discard_idx);
1378                 prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,        ja->bucket_seq[ja->dirty_idx_ondisk]);
1379                 prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,          ja->bucket_seq[ja->dirty_idx]);
1380                 prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,            ja->bucket_seq[ja->cur_idx]);
1381         }
1382
1383         rcu_read_unlock();
1384
1385         --out->atomic;
1386 }
1387
1388 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1389 {
1390         spin_lock(&j->lock);
1391         __bch2_journal_debug_to_text(out, j);
1392         spin_unlock(&j->lock);
1393 }
1394
1395 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1396 {
1397         struct journal_entry_pin_list *pin_list;
1398         struct journal_entry_pin *pin;
1399         unsigned i;
1400
1401         spin_lock(&j->lock);
1402         *seq = max(*seq, j->pin.front);
1403
1404         if (*seq >= j->pin.back) {
1405                 spin_unlock(&j->lock);
1406                 return true;
1407         }
1408
1409         out->atomic++;
1410
1411         pin_list = journal_seq_pin(j, *seq);
1412
1413         prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1414         prt_newline(out);
1415         printbuf_indent_add(out, 2);
1416
1417         for (i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1418                 list_for_each_entry(pin, &pin_list->list[i], list) {
1419                         prt_printf(out, "\t%px %ps", pin, pin->flush);
1420                         prt_newline(out);
1421                 }
1422
1423         if (!list_empty(&pin_list->flushed)) {
1424                 prt_printf(out, "flushed:");
1425                 prt_newline(out);
1426         }
1427
1428         list_for_each_entry(pin, &pin_list->flushed, list) {
1429                 prt_printf(out, "\t%px %ps", pin, pin->flush);
1430                 prt_newline(out);
1431         }
1432
1433         printbuf_indent_sub(out, 2);
1434
1435         --out->atomic;
1436         spin_unlock(&j->lock);
1437
1438         return false;
1439 }
1440
1441 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1442 {
1443         u64 seq = 0;
1444
1445         while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1446                 seq++;
1447 }