]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to 7c0fe6f104 bcachefs: Fix bch2_fsck_ask_yn()
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "error.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_sb.h"
19 #include "journal_seq_blacklist.h"
20 #include "trace.h"
21
22 #define x(n)    #n,
23 static const char * const bch2_journal_watermarks[] = {
24         JOURNAL_WATERMARKS()
25         NULL
26 };
27
28 static const char * const bch2_journal_errors[] = {
29         JOURNAL_ERRORS()
30         NULL
31 };
32 #undef x
33
34 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
35 {
36         return seq > j->seq_ondisk;
37 }
38
39 static bool __journal_entry_is_open(union journal_res_state state)
40 {
41         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
42 }
43
44 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
45 {
46         return atomic64_read(&j->seq) - j->seq_ondisk;
47 }
48
49 static bool journal_entry_is_open(struct journal *j)
50 {
51         return __journal_entry_is_open(j->reservations);
52 }
53
54 static inline struct journal_buf *
55 journal_seq_to_buf(struct journal *j, u64 seq)
56 {
57         struct journal_buf *buf = NULL;
58
59         EBUG_ON(seq > journal_cur_seq(j));
60
61         if (journal_seq_unwritten(j, seq)) {
62                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
63                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
64         }
65         return buf;
66 }
67
68 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
69 {
70         unsigned i;
71         for (i = 0; i < ARRAY_SIZE(p->list); i++)
72                 INIT_LIST_HEAD(&p->list[i]);
73         INIT_LIST_HEAD(&p->flushed);
74         atomic_set(&p->count, count);
75         p->devs.nr = 0;
76 }
77
78 /*
79  * Detect stuck journal conditions and trigger shutdown. Technically the journal
80  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
81  * reservation lockup, etc. Since this is a fatal error with potentially
82  * unpredictable characteristics, we want to be fairly conservative before we
83  * decide to shut things down.
84  *
85  * Consider the journal stuck when it appears full with no ability to commit
86  * btree transactions, to discard journal buckets, nor acquire priority
87  * (reserved watermark) reservation.
88  */
89 static inline bool
90 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
91 {
92         struct bch_fs *c = container_of(j, struct bch_fs, journal);
93         bool stuck = false;
94         struct printbuf buf = PRINTBUF;
95
96         if (!(error == JOURNAL_ERR_journal_full ||
97               error == JOURNAL_ERR_journal_pin_full) ||
98             nr_unwritten_journal_entries(j) ||
99             (flags & JOURNAL_WATERMARK_MASK) != JOURNAL_WATERMARK_reserved)
100                 return stuck;
101
102         spin_lock(&j->lock);
103
104         if (j->can_discard) {
105                 spin_unlock(&j->lock);
106                 return stuck;
107         }
108
109         stuck = true;
110
111         /*
112          * The journal shutdown path will set ->err_seq, but do it here first to
113          * serialize against concurrent failures and avoid duplicate error
114          * reports.
115          */
116         if (j->err_seq) {
117                 spin_unlock(&j->lock);
118                 return stuck;
119         }
120         j->err_seq = journal_cur_seq(j);
121         spin_unlock(&j->lock);
122
123         bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
124                 bch2_journal_errors[error]);
125         bch2_journal_debug_to_text(&buf, j);
126         bch_err(c, "%s", buf.buf);
127
128         printbuf_reset(&buf);
129         bch2_journal_pins_to_text(&buf, j);
130         bch_err(c, "Journal pins:\n%s", buf.buf);
131         printbuf_exit(&buf);
132
133         bch2_fatal_error(c);
134         dump_stack();
135
136         return stuck;
137 }
138
139 /* journal entry close/open: */
140
141 void __bch2_journal_buf_put(struct journal *j)
142 {
143         struct bch_fs *c = container_of(j, struct bch_fs, journal);
144
145         closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
146 }
147
148 /*
149  * Returns true if journal entry is now closed:
150  *
151  * We don't close a journal_buf until the next journal_buf is finished writing,
152  * and can be opened again - this also initializes the next journal_buf:
153  */
154 static void __journal_entry_close(struct journal *j, unsigned closed_val)
155 {
156         struct bch_fs *c = container_of(j, struct bch_fs, journal);
157         struct journal_buf *buf = journal_cur_buf(j);
158         union journal_res_state old, new;
159         u64 v = atomic64_read(&j->reservations.counter);
160         unsigned sectors;
161
162         BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
163                closed_val != JOURNAL_ENTRY_ERROR_VAL);
164
165         lockdep_assert_held(&j->lock);
166
167         do {
168                 old.v = new.v = v;
169                 new.cur_entry_offset = closed_val;
170
171                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
172                     old.cur_entry_offset == new.cur_entry_offset)
173                         return;
174         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
175                                        old.v, new.v)) != old.v);
176
177         if (!__journal_entry_is_open(old))
178                 return;
179
180         /* Close out old buffer: */
181         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
182
183         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
184                                       buf->u64s_reserved) << c->block_bits;
185         BUG_ON(sectors > buf->sectors);
186         buf->sectors = sectors;
187
188         /*
189          * We have to set last_seq here, _before_ opening a new journal entry:
190          *
191          * A threads may replace an old pin with a new pin on their current
192          * journal reservation - the expectation being that the journal will
193          * contain either what the old pin protected or what the new pin
194          * protects.
195          *
196          * After the old pin is dropped journal_last_seq() won't include the old
197          * pin, so we can only write the updated last_seq on the entry that
198          * contains whatever the new pin protects.
199          *
200          * Restated, we can _not_ update last_seq for a given entry if there
201          * could be a newer entry open with reservations/pins that have been
202          * taken against it.
203          *
204          * Hence, we want update/set last_seq on the current journal entry right
205          * before we open a new one:
206          */
207         buf->last_seq           = journal_last_seq(j);
208         buf->data->last_seq     = cpu_to_le64(buf->last_seq);
209         BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
210
211         __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
212
213         cancel_delayed_work(&j->write_work);
214
215         bch2_journal_space_available(j);
216
217         bch2_journal_buf_put(j, old.idx);
218 }
219
220 void bch2_journal_halt(struct journal *j)
221 {
222         spin_lock(&j->lock);
223         __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
224         if (!j->err_seq)
225                 j->err_seq = journal_cur_seq(j);
226         journal_wake(j);
227         spin_unlock(&j->lock);
228 }
229
230 static bool journal_entry_want_write(struct journal *j)
231 {
232         bool ret = !journal_entry_is_open(j) ||
233                 journal_cur_seq(j) == journal_last_unwritten_seq(j);
234
235         /* Don't close it yet if we already have a write in flight: */
236         if (ret)
237                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
238         else if (nr_unwritten_journal_entries(j)) {
239                 struct journal_buf *buf = journal_cur_buf(j);
240
241                 if (!buf->flush_time) {
242                         buf->flush_time = local_clock() ?: 1;
243                         buf->expires = jiffies;
244                 }
245         }
246
247         return ret;
248 }
249
250 static bool journal_entry_close(struct journal *j)
251 {
252         bool ret;
253
254         spin_lock(&j->lock);
255         ret = journal_entry_want_write(j);
256         spin_unlock(&j->lock);
257
258         return ret;
259 }
260
261 /*
262  * should _only_ called from journal_res_get() - when we actually want a
263  * journal reservation - journal entry is open means journal is dirty:
264  */
265 static int journal_entry_open(struct journal *j)
266 {
267         struct bch_fs *c = container_of(j, struct bch_fs, journal);
268         struct journal_buf *buf = j->buf +
269                 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
270         union journal_res_state old, new;
271         int u64s;
272         u64 v;
273
274         lockdep_assert_held(&j->lock);
275         BUG_ON(journal_entry_is_open(j));
276         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
277
278         if (j->blocked)
279                 return JOURNAL_ERR_blocked;
280
281         if (j->cur_entry_error)
282                 return j->cur_entry_error;
283
284         if (bch2_journal_error(j))
285                 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
286
287         if (!fifo_free(&j->pin))
288                 return JOURNAL_ERR_journal_pin_full;
289
290         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
291                 return JOURNAL_ERR_max_in_flight;
292
293         BUG_ON(!j->cur_entry_sectors);
294
295         buf->expires            =
296                 (journal_cur_seq(j) == j->flushed_seq_ondisk
297                  ? jiffies
298                  : j->last_flush_write) +
299                 msecs_to_jiffies(c->opts.journal_flush_delay);
300
301         buf->u64s_reserved      = j->entry_u64s_reserved;
302         buf->disk_sectors       = j->cur_entry_sectors;
303         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
304
305         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
306                 journal_entry_overhead(j);
307         u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
308
309         if (u64s <= (ssize_t) j->early_journal_entries.nr)
310                 return JOURNAL_ERR_journal_full;
311
312         if (fifo_empty(&j->pin) && j->reclaim_thread)
313                 wake_up_process(j->reclaim_thread);
314
315         /*
316          * The fifo_push() needs to happen at the same time as j->seq is
317          * incremented for journal_last_seq() to be calculated correctly
318          */
319         atomic64_inc(&j->seq);
320         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
321
322         BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
323
324         bkey_extent_init(&buf->key);
325         buf->noflush    = false;
326         buf->must_flush = false;
327         buf->separate_flush = false;
328         buf->flush_time = 0;
329
330         memset(buf->data, 0, sizeof(*buf->data));
331         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
332         buf->data->u64s = 0;
333
334         if (j->early_journal_entries.nr) {
335                 memcpy(buf->data->_data, j->early_journal_entries.data,
336                        j->early_journal_entries.nr * sizeof(u64));
337                 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
338         }
339
340         /*
341          * Must be set before marking the journal entry as open:
342          */
343         j->cur_entry_u64s = u64s;
344
345         v = atomic64_read(&j->reservations.counter);
346         do {
347                 old.v = new.v = v;
348
349                 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
350
351                 new.idx++;
352                 BUG_ON(journal_state_count(new, new.idx));
353                 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
354
355                 journal_state_inc(&new);
356
357                 /* Handle any already added entries */
358                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
359         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
360                                        old.v, new.v)) != old.v);
361
362         if (j->res_get_blocked_start)
363                 bch2_time_stats_update(j->blocked_time,
364                                        j->res_get_blocked_start);
365         j->res_get_blocked_start = 0;
366
367         mod_delayed_work(c->io_complete_wq,
368                          &j->write_work,
369                          msecs_to_jiffies(c->opts.journal_flush_delay));
370         journal_wake(j);
371
372         if (j->early_journal_entries.nr)
373                 darray_exit(&j->early_journal_entries);
374         return 0;
375 }
376
377 static bool journal_quiesced(struct journal *j)
378 {
379         bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
380
381         if (!ret)
382                 journal_entry_close(j);
383         return ret;
384 }
385
386 static void journal_quiesce(struct journal *j)
387 {
388         wait_event(j->wait, journal_quiesced(j));
389 }
390
391 static void journal_write_work(struct work_struct *work)
392 {
393         struct journal *j = container_of(work, struct journal, write_work.work);
394         struct bch_fs *c = container_of(j, struct bch_fs, journal);
395         long delta;
396
397         spin_lock(&j->lock);
398         if (!__journal_entry_is_open(j->reservations))
399                 goto unlock;
400
401         delta = journal_cur_buf(j)->expires - jiffies;
402
403         if (delta > 0)
404                 mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
405         else
406                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
407 unlock:
408         spin_unlock(&j->lock);
409 }
410
411 static int __journal_res_get(struct journal *j, struct journal_res *res,
412                              unsigned flags)
413 {
414         struct bch_fs *c = container_of(j, struct bch_fs, journal);
415         struct journal_buf *buf;
416         bool can_discard;
417         int ret;
418 retry:
419         if (journal_res_get_fast(j, res, flags))
420                 return 0;
421
422         if (bch2_journal_error(j))
423                 return -BCH_ERR_erofs_journal_err;
424
425         spin_lock(&j->lock);
426
427         /* check once more in case somebody else shut things down... */
428         if (bch2_journal_error(j)) {
429                 spin_unlock(&j->lock);
430                 return -BCH_ERR_erofs_journal_err;
431         }
432
433         /*
434          * Recheck after taking the lock, so we don't race with another thread
435          * that just did journal_entry_open() and call journal_entry_close()
436          * unnecessarily
437          */
438         if (journal_res_get_fast(j, res, flags)) {
439                 spin_unlock(&j->lock);
440                 return 0;
441         }
442
443         if ((flags & JOURNAL_WATERMARK_MASK) < j->watermark) {
444                 /*
445                  * Don't want to close current journal entry, just need to
446                  * invoke reclaim:
447                  */
448                 ret = JOURNAL_ERR_journal_full;
449                 goto unlock;
450         }
451
452         /*
453          * If we couldn't get a reservation because the current buf filled up,
454          * and we had room for a bigger entry on disk, signal that we want to
455          * realloc the journal bufs:
456          */
457         buf = journal_cur_buf(j);
458         if (journal_entry_is_open(j) &&
459             buf->buf_size >> 9 < buf->disk_sectors &&
460             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
461                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
462
463         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
464         ret = journal_entry_open(j);
465
466         if (ret == JOURNAL_ERR_max_in_flight)
467                 trace_and_count(c, journal_entry_full, c);
468 unlock:
469         if ((ret && ret != JOURNAL_ERR_insufficient_devices) &&
470             !j->res_get_blocked_start) {
471                 j->res_get_blocked_start = local_clock() ?: 1;
472                 trace_and_count(c, journal_full, c);
473         }
474
475         can_discard = j->can_discard;
476         spin_unlock(&j->lock);
477
478         if (!ret)
479                 goto retry;
480         if (journal_error_check_stuck(j, ret, flags))
481                 ret = -BCH_ERR_journal_res_get_blocked;
482
483         /*
484          * Journal is full - can't rely on reclaim from work item due to
485          * freezing:
486          */
487         if ((ret == JOURNAL_ERR_journal_full ||
488              ret == JOURNAL_ERR_journal_pin_full) &&
489             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
490                 if (can_discard) {
491                         bch2_journal_do_discards(j);
492                         goto retry;
493                 }
494
495                 if (mutex_trylock(&j->reclaim_lock)) {
496                         bch2_journal_reclaim(j);
497                         mutex_unlock(&j->reclaim_lock);
498                 }
499         }
500
501         return ret == JOURNAL_ERR_insufficient_devices
502                 ? -EROFS
503                 : -BCH_ERR_journal_res_get_blocked;
504 }
505
506 /*
507  * Essentially the entry function to the journaling code. When bcachefs is doing
508  * a btree insert, it calls this function to get the current journal write.
509  * Journal write is the structure used set up journal writes. The calling
510  * function will then add its keys to the structure, queuing them for the next
511  * write.
512  *
513  * To ensure forward progress, the current task must not be holding any
514  * btree node write locks.
515  */
516 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
517                                   unsigned flags)
518 {
519         int ret;
520
521         closure_wait_event(&j->async_wait,
522                    (ret = __journal_res_get(j, res, flags)) !=
523                    -BCH_ERR_journal_res_get_blocked||
524                    (flags & JOURNAL_RES_GET_NONBLOCK));
525         return ret;
526 }
527
528 /* journal_preres: */
529
530 static bool journal_preres_available(struct journal *j,
531                                      struct journal_preres *res,
532                                      unsigned new_u64s,
533                                      unsigned flags)
534 {
535         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
536
537         if (!ret && mutex_trylock(&j->reclaim_lock)) {
538                 bch2_journal_reclaim(j);
539                 mutex_unlock(&j->reclaim_lock);
540         }
541
542         return ret;
543 }
544
545 int __bch2_journal_preres_get(struct journal *j,
546                               struct journal_preres *res,
547                               unsigned new_u64s,
548                               unsigned flags)
549 {
550         int ret;
551
552         closure_wait_event(&j->preres_wait,
553                    (ret = bch2_journal_error(j)) ||
554                    journal_preres_available(j, res, new_u64s, flags));
555         return ret;
556 }
557
558 /* journal_entry_res: */
559
560 void bch2_journal_entry_res_resize(struct journal *j,
561                                    struct journal_entry_res *res,
562                                    unsigned new_u64s)
563 {
564         union journal_res_state state;
565         int d = new_u64s - res->u64s;
566
567         spin_lock(&j->lock);
568
569         j->entry_u64s_reserved += d;
570         if (d <= 0)
571                 goto out;
572
573         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
574         smp_mb();
575         state = READ_ONCE(j->reservations);
576
577         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
578             state.cur_entry_offset > j->cur_entry_u64s) {
579                 j->cur_entry_u64s += d;
580                 /*
581                  * Not enough room in current journal entry, have to flush it:
582                  */
583                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
584         } else {
585                 journal_cur_buf(j)->u64s_reserved += d;
586         }
587 out:
588         spin_unlock(&j->lock);
589         res->u64s += d;
590 }
591
592 /* journal flushing: */
593
594 /**
595  * bch2_journal_flush_seq_async - wait for a journal entry to be written
596  *
597  * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
598  * necessary
599  */
600 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
601                                  struct closure *parent)
602 {
603         struct journal_buf *buf;
604         int ret = 0;
605
606         if (seq <= j->flushed_seq_ondisk)
607                 return 1;
608
609         spin_lock(&j->lock);
610
611         if (WARN_ONCE(seq > journal_cur_seq(j),
612                       "requested to flush journal seq %llu, but currently at %llu",
613                       seq, journal_cur_seq(j)))
614                 goto out;
615
616         /* Recheck under lock: */
617         if (j->err_seq && seq >= j->err_seq) {
618                 ret = -EIO;
619                 goto out;
620         }
621
622         if (seq <= j->flushed_seq_ondisk) {
623                 ret = 1;
624                 goto out;
625         }
626
627         /* if seq was written, but not flushed - flush a newer one instead */
628         seq = max(seq, journal_last_unwritten_seq(j));
629
630 recheck_need_open:
631         if (seq > journal_cur_seq(j)) {
632                 struct journal_res res = { 0 };
633
634                 if (journal_entry_is_open(j))
635                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
636
637                 spin_unlock(&j->lock);
638
639                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
640                 if (ret)
641                         return ret;
642
643                 seq = res.seq;
644                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
645                 buf->must_flush = true;
646
647                 if (!buf->flush_time) {
648                         buf->flush_time = local_clock() ?: 1;
649                         buf->expires = jiffies;
650                 }
651
652                 if (parent && !closure_wait(&buf->wait, parent))
653                         BUG();
654
655                 bch2_journal_res_put(j, &res);
656
657                 spin_lock(&j->lock);
658                 goto want_write;
659         }
660
661         /*
662          * if write was kicked off without a flush, flush the next sequence
663          * number instead
664          */
665         buf = journal_seq_to_buf(j, seq);
666         if (buf->noflush) {
667                 seq++;
668                 goto recheck_need_open;
669         }
670
671         buf->must_flush = true;
672
673         if (parent && !closure_wait(&buf->wait, parent))
674                 BUG();
675 want_write:
676         if (seq == journal_cur_seq(j))
677                 journal_entry_want_write(j);
678 out:
679         spin_unlock(&j->lock);
680         return ret;
681 }
682
683 int bch2_journal_flush_seq(struct journal *j, u64 seq)
684 {
685         u64 start_time = local_clock();
686         int ret, ret2;
687
688         /*
689          * Don't update time_stats when @seq is already flushed:
690          */
691         if (seq <= j->flushed_seq_ondisk)
692                 return 0;
693
694         ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
695
696         if (!ret)
697                 bch2_time_stats_update(j->flush_seq_time, start_time);
698
699         return ret ?: ret2 < 0 ? ret2 : 0;
700 }
701
702 /*
703  * bch2_journal_flush_async - if there is an open journal entry, or a journal
704  * still being written, write it and wait for the write to complete
705  */
706 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
707 {
708         bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
709 }
710
711 int bch2_journal_flush(struct journal *j)
712 {
713         return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
714 }
715
716 /*
717  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
718  * @seq
719  */
720 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
721 {
722         struct bch_fs *c = container_of(j, struct bch_fs, journal);
723         u64 unwritten_seq;
724         bool ret = false;
725
726         if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
727                 return false;
728
729         if (seq <= c->journal.flushed_seq_ondisk)
730                 return false;
731
732         spin_lock(&j->lock);
733         if (seq <= c->journal.flushed_seq_ondisk)
734                 goto out;
735
736         for (unwritten_seq = journal_last_unwritten_seq(j);
737              unwritten_seq < seq;
738              unwritten_seq++) {
739                 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
740
741                 /* journal write is already in flight, and was a flush write: */
742                 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
743                         goto out;
744
745                 buf->noflush = true;
746         }
747
748         ret = true;
749 out:
750         spin_unlock(&j->lock);
751         return ret;
752 }
753
754 int bch2_journal_meta(struct journal *j)
755 {
756         struct journal_buf *buf;
757         struct journal_res res;
758         int ret;
759
760         memset(&res, 0, sizeof(res));
761
762         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
763         if (ret)
764                 return ret;
765
766         buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
767         buf->must_flush = true;
768
769         if (!buf->flush_time) {
770                 buf->flush_time = local_clock() ?: 1;
771                 buf->expires = jiffies;
772         }
773
774         bch2_journal_res_put(j, &res);
775
776         return bch2_journal_flush_seq(j, res.seq);
777 }
778
779 /* block/unlock the journal: */
780
781 void bch2_journal_unblock(struct journal *j)
782 {
783         spin_lock(&j->lock);
784         j->blocked--;
785         spin_unlock(&j->lock);
786
787         journal_wake(j);
788 }
789
790 void bch2_journal_block(struct journal *j)
791 {
792         spin_lock(&j->lock);
793         j->blocked++;
794         spin_unlock(&j->lock);
795
796         journal_quiesce(j);
797 }
798
799 /* allocate journal on a device: */
800
801 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
802                                          bool new_fs, struct closure *cl)
803 {
804         struct bch_fs *c = ca->fs;
805         struct journal_device *ja = &ca->journal;
806         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
807         struct open_bucket **ob = NULL;
808         long *bu = NULL;
809         unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
810         int ret = 0;
811
812         BUG_ON(nr <= ja->nr);
813
814         bu              = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
815         ob              = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
816         new_buckets     = kcalloc(nr, sizeof(u64), GFP_KERNEL);
817         new_bucket_seq  = kcalloc(nr, sizeof(u64), GFP_KERNEL);
818         if (!bu || !ob || !new_buckets || !new_bucket_seq) {
819                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
820                 goto err_free;
821         }
822
823         for (nr_got = 0; nr_got < nr_want; nr_got++) {
824                 if (new_fs) {
825                         bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
826                         if (bu[nr_got] < 0) {
827                                 ret = -BCH_ERR_ENOSPC_bucket_alloc;
828                                 break;
829                         }
830                 } else {
831                         ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, cl);
832                         ret = PTR_ERR_OR_ZERO(ob[nr_got]);
833                         if (ret)
834                                 break;
835
836                         ret = bch2_trans_run(c,
837                                 bch2_trans_mark_metadata_bucket(&trans, ca,
838                                                 ob[nr_got]->bucket, BCH_DATA_journal,
839                                                 ca->mi.bucket_size));
840                         if (ret) {
841                                 bch2_open_bucket_put(c, ob[nr_got]);
842                                 bch_err(c, "error marking new journal buckets: %s", bch2_err_str(ret));
843                                 break;
844                         }
845
846                         bu[nr_got] = ob[nr_got]->bucket;
847                 }
848         }
849
850         if (!nr_got)
851                 goto err_free;
852
853         /* Don't return an error if we successfully allocated some buckets: */
854         ret = 0;
855
856         if (c) {
857                 bch2_journal_flush_all_pins(&c->journal);
858                 bch2_journal_block(&c->journal);
859                 mutex_lock(&c->sb_lock);
860         }
861
862         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
863         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
864
865         BUG_ON(ja->discard_idx > ja->nr);
866
867         pos = ja->discard_idx ?: ja->nr;
868
869         memmove(new_buckets + pos + nr_got,
870                 new_buckets + pos,
871                 sizeof(new_buckets[0]) * (ja->nr - pos));
872         memmove(new_bucket_seq + pos + nr_got,
873                 new_bucket_seq + pos,
874                 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
875
876         for (i = 0; i < nr_got; i++) {
877                 new_buckets[pos + i] = bu[i];
878                 new_bucket_seq[pos + i] = 0;
879         }
880
881         nr = ja->nr + nr_got;
882
883         ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
884         if (ret)
885                 goto err_unblock;
886
887         if (!new_fs)
888                 bch2_write_super(c);
889
890         /* Commit: */
891         if (c)
892                 spin_lock(&c->journal.lock);
893
894         swap(new_buckets,       ja->buckets);
895         swap(new_bucket_seq,    ja->bucket_seq);
896         ja->nr = nr;
897
898         if (pos <= ja->discard_idx)
899                 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
900         if (pos <= ja->dirty_idx_ondisk)
901                 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
902         if (pos <= ja->dirty_idx)
903                 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
904         if (pos <= ja->cur_idx)
905                 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
906
907         if (c)
908                 spin_unlock(&c->journal.lock);
909 err_unblock:
910         if (c) {
911                 bch2_journal_unblock(&c->journal);
912                 mutex_unlock(&c->sb_lock);
913         }
914
915         if (ret && !new_fs)
916                 for (i = 0; i < nr_got; i++)
917                         bch2_trans_run(c,
918                                 bch2_trans_mark_metadata_bucket(&trans, ca,
919                                                 bu[i], BCH_DATA_free, 0));
920 err_free:
921         if (!new_fs)
922                 for (i = 0; i < nr_got; i++)
923                         bch2_open_bucket_put(c, ob[i]);
924
925         kfree(new_bucket_seq);
926         kfree(new_buckets);
927         kfree(ob);
928         kfree(bu);
929         return ret;
930 }
931
932 /*
933  * Allocate more journal space at runtime - not currently making use if it, but
934  * the code works:
935  */
936 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
937                                 unsigned nr)
938 {
939         struct journal_device *ja = &ca->journal;
940         struct closure cl;
941         int ret = 0;
942
943         closure_init_stack(&cl);
944
945         down_write(&c->state_lock);
946
947         /* don't handle reducing nr of buckets yet: */
948         if (nr < ja->nr)
949                 goto unlock;
950
951         while (ja->nr < nr) {
952                 struct disk_reservation disk_res = { 0, 0 };
953
954                 /*
955                  * note: journal buckets aren't really counted as _sectors_ used yet, so
956                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
957                  * when space used goes up without a reservation - but we do need the
958                  * reservation to ensure we'll actually be able to allocate:
959                  *
960                  * XXX: that's not right, disk reservations only ensure a
961                  * filesystem-wide allocation will succeed, this is a device
962                  * specific allocation - we can hang here:
963                  */
964
965                 ret = bch2_disk_reservation_get(c, &disk_res,
966                                                 bucket_to_sector(ca, nr - ja->nr), 1, 0);
967                 if (ret)
968                         break;
969
970                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
971
972                 bch2_disk_reservation_put(c, &disk_res);
973
974                 closure_sync(&cl);
975
976                 if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
977                         break;
978         }
979
980         if (ret)
981                 bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
982 unlock:
983         up_write(&c->state_lock);
984         return ret;
985 }
986
987 int bch2_dev_journal_alloc(struct bch_dev *ca)
988 {
989         unsigned nr;
990
991         if (dynamic_fault("bcachefs:add:journal_alloc"))
992                 return -BCH_ERR_ENOMEM_set_nr_journal_buckets;
993
994         /* 1/128th of the device by default: */
995         nr = ca->mi.nbuckets >> 7;
996
997         /*
998          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
999          * is smaller:
1000          */
1001         nr = clamp_t(unsigned, nr,
1002                      BCH_JOURNAL_BUCKETS_MIN,
1003                      min(1 << 13,
1004                          (1 << 24) / ca->mi.bucket_size));
1005
1006         return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
1007 }
1008
1009 /* startup/shutdown: */
1010
1011 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1012 {
1013         bool ret = false;
1014         u64 seq;
1015
1016         spin_lock(&j->lock);
1017         for (seq = journal_last_unwritten_seq(j);
1018              seq <= journal_cur_seq(j) && !ret;
1019              seq++) {
1020                 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1021
1022                 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1023                         ret = true;
1024         }
1025         spin_unlock(&j->lock);
1026
1027         return ret;
1028 }
1029
1030 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1031 {
1032         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1033 }
1034
1035 void bch2_fs_journal_stop(struct journal *j)
1036 {
1037         bch2_journal_reclaim_stop(j);
1038         bch2_journal_flush_all_pins(j);
1039
1040         wait_event(j->wait, journal_entry_close(j));
1041
1042         /*
1043          * Always write a new journal entry, to make sure the clock hands are up
1044          * to date (and match the superblock)
1045          */
1046         bch2_journal_meta(j);
1047
1048         journal_quiesce(j);
1049
1050         BUG_ON(!bch2_journal_error(j) &&
1051                test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1052                j->last_empty_seq != journal_cur_seq(j));
1053
1054         cancel_delayed_work_sync(&j->write_work);
1055 }
1056
1057 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1058 {
1059         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1060         struct journal_entry_pin_list *p;
1061         struct journal_replay *i, **_i;
1062         struct genradix_iter iter;
1063         bool had_entries = false;
1064         unsigned ptr;
1065         u64 last_seq = cur_seq, nr, seq;
1066
1067         genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1068                 i = *_i;
1069
1070                 if (!i || i->ignore)
1071                         continue;
1072
1073                 last_seq = le64_to_cpu(i->j.last_seq);
1074                 break;
1075         }
1076
1077         nr = cur_seq - last_seq;
1078
1079         if (nr + 1 > j->pin.size) {
1080                 free_fifo(&j->pin);
1081                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1082                 if (!j->pin.data) {
1083                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1084                         return -BCH_ERR_ENOMEM_journal_pin_fifo;
1085                 }
1086         }
1087
1088         j->replay_journal_seq   = last_seq;
1089         j->replay_journal_seq_end = cur_seq;
1090         j->last_seq_ondisk      = last_seq;
1091         j->flushed_seq_ondisk   = cur_seq - 1;
1092         j->seq_ondisk           = cur_seq - 1;
1093         j->pin.front            = last_seq;
1094         j->pin.back             = cur_seq;
1095         atomic64_set(&j->seq, cur_seq - 1);
1096
1097         fifo_for_each_entry_ptr(p, &j->pin, seq)
1098                 journal_pin_list_init(p, 1);
1099
1100         genradix_for_each(&c->journal_entries, iter, _i) {
1101                 i = *_i;
1102
1103                 if (!i || i->ignore)
1104                         continue;
1105
1106                 seq = le64_to_cpu(i->j.seq);
1107                 BUG_ON(seq >= cur_seq);
1108
1109                 if (seq < last_seq)
1110                         continue;
1111
1112                 if (journal_entry_empty(&i->j))
1113                         j->last_empty_seq = le64_to_cpu(i->j.seq);
1114
1115                 p = journal_seq_pin(j, seq);
1116
1117                 p->devs.nr = 0;
1118                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1119                         bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1120
1121                 had_entries = true;
1122         }
1123
1124         if (!had_entries)
1125                 j->last_empty_seq = cur_seq;
1126
1127         spin_lock(&j->lock);
1128
1129         set_bit(JOURNAL_STARTED, &j->flags);
1130         j->last_flush_write = jiffies;
1131
1132         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1133         j->reservations.unwritten_idx++;
1134
1135         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1136
1137         bch2_journal_space_available(j);
1138         spin_unlock(&j->lock);
1139
1140         return bch2_journal_reclaim_start(j);
1141 }
1142
1143 /* init/exit: */
1144
1145 void bch2_dev_journal_exit(struct bch_dev *ca)
1146 {
1147         kfree(ca->journal.bio);
1148         kfree(ca->journal.buckets);
1149         kfree(ca->journal.bucket_seq);
1150
1151         ca->journal.bio         = NULL;
1152         ca->journal.buckets     = NULL;
1153         ca->journal.bucket_seq  = NULL;
1154 }
1155
1156 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1157 {
1158         struct journal_device *ja = &ca->journal;
1159         struct bch_sb_field_journal *journal_buckets =
1160                 bch2_sb_get_journal(sb);
1161         struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1162                 bch2_sb_get_journal_v2(sb);
1163         unsigned i, nr_bvecs;
1164
1165         ja->nr = 0;
1166
1167         if (journal_buckets_v2) {
1168                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1169
1170                 for (i = 0; i < nr; i++)
1171                         ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1172         } else if (journal_buckets) {
1173                 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1174         }
1175
1176         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1177         if (!ja->bucket_seq)
1178                 return -BCH_ERR_ENOMEM_dev_journal_init;
1179
1180         nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1181
1182         ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
1183         if (!ca->journal.bio)
1184                 return -BCH_ERR_ENOMEM_dev_journal_init;
1185
1186         bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
1187
1188         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1189         if (!ja->buckets)
1190                 return -BCH_ERR_ENOMEM_dev_journal_init;
1191
1192         if (journal_buckets_v2) {
1193                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1194                 unsigned j, dst = 0;
1195
1196                 for (i = 0; i < nr; i++)
1197                         for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1198                                 ja->buckets[dst++] =
1199                                         le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1200         } else if (journal_buckets) {
1201                 for (i = 0; i < ja->nr; i++)
1202                         ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1203         }
1204
1205         return 0;
1206 }
1207
1208 void bch2_fs_journal_exit(struct journal *j)
1209 {
1210         unsigned i;
1211
1212         darray_exit(&j->early_journal_entries);
1213
1214         for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1215                 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1216         free_fifo(&j->pin);
1217 }
1218
1219 int bch2_fs_journal_init(struct journal *j)
1220 {
1221         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1222         static struct lock_class_key res_key;
1223         unsigned i;
1224         int ret = 0;
1225
1226         pr_verbose_init(c->opts, "");
1227
1228         spin_lock_init(&j->lock);
1229         spin_lock_init(&j->err_lock);
1230         init_waitqueue_head(&j->wait);
1231         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1232         init_waitqueue_head(&j->reclaim_wait);
1233         init_waitqueue_head(&j->pin_flush_wait);
1234         mutex_init(&j->reclaim_lock);
1235         mutex_init(&j->discard_lock);
1236
1237         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1238
1239         atomic64_set(&j->reservations.counter,
1240                 ((union journal_res_state)
1241                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1242
1243         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1244                 ret = -BCH_ERR_ENOMEM_journal_pin_fifo;
1245                 goto out;
1246         }
1247
1248         for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1249                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1250                 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1251                 if (!j->buf[i].data) {
1252                         ret = -BCH_ERR_ENOMEM_journal_buf;
1253                         goto out;
1254                 }
1255         }
1256
1257         j->pin.front = j->pin.back = 1;
1258 out:
1259         pr_verbose_init(c->opts, "ret %i", ret);
1260         return ret;
1261 }
1262
1263 /* debug: */
1264
1265 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1266 {
1267         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1268         union journal_res_state s;
1269         struct bch_dev *ca;
1270         unsigned long now = jiffies;
1271         u64 seq;
1272         unsigned i;
1273
1274         if (!out->nr_tabstops)
1275                 printbuf_tabstop_push(out, 24);
1276         out->atomic++;
1277
1278         rcu_read_lock();
1279         s = READ_ONCE(j->reservations);
1280
1281         prt_printf(out, "dirty journal entries:\t%llu/%llu\n",  fifo_used(&j->pin), j->pin.size);
1282         prt_printf(out, "seq:\t\t\t%llu\n",                     journal_cur_seq(j));
1283         prt_printf(out, "seq_ondisk:\t\t%llu\n",                j->seq_ondisk);
1284         prt_printf(out, "last_seq:\t\t%llu\n",          journal_last_seq(j));
1285         prt_printf(out, "last_seq_ondisk:\t%llu\n",             j->last_seq_ondisk);
1286         prt_printf(out, "flushed_seq_ondisk:\t%llu\n",  j->flushed_seq_ondisk);
1287         prt_printf(out, "prereserved:\t\t%u/%u\n",              j->prereserved.reserved, j->prereserved.remaining);
1288         prt_printf(out, "watermark:\t\t%s\n",           bch2_journal_watermarks[j->watermark]);
1289         prt_printf(out, "each entry reserved:\t%u\n",   j->entry_u64s_reserved);
1290         prt_printf(out, "nr flush writes:\t%llu\n",             j->nr_flush_writes);
1291         prt_printf(out, "nr noflush writes:\t%llu\n",   j->nr_noflush_writes);
1292         prt_printf(out, "nr direct reclaim:\t%llu\n",   j->nr_direct_reclaim);
1293         prt_printf(out, "nr background reclaim:\t%llu\n",       j->nr_background_reclaim);
1294         prt_printf(out, "reclaim kicked:\t\t%u\n",              j->reclaim_kicked);
1295         prt_printf(out, "reclaim runs in:\t%u ms\n",    time_after(j->next_reclaim, now)
1296                ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1297         prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
1298         prt_printf(out, "current entry error:\t%s\n",   bch2_journal_errors[j->cur_entry_error]);
1299         prt_printf(out, "current entry:\t\t");
1300
1301         switch (s.cur_entry_offset) {
1302         case JOURNAL_ENTRY_ERROR_VAL:
1303                 prt_printf(out, "error");
1304                 break;
1305         case JOURNAL_ENTRY_CLOSED_VAL:
1306                 prt_printf(out, "closed");
1307                 break;
1308         default:
1309                 prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1310                 break;
1311         }
1312
1313         prt_newline(out);
1314
1315         for (seq = journal_cur_seq(j);
1316              seq >= journal_last_unwritten_seq(j);
1317              --seq) {
1318                 i = seq & JOURNAL_BUF_MASK;
1319
1320                 prt_printf(out, "unwritten entry:");
1321                 prt_tab(out);
1322                 prt_printf(out, "%llu", seq);
1323                 prt_newline(out);
1324                 printbuf_indent_add(out, 2);
1325
1326                 prt_printf(out, "refcount:");
1327                 prt_tab(out);
1328                 prt_printf(out, "%u", journal_state_count(s, i));
1329                 prt_newline(out);
1330
1331                 prt_printf(out, "sectors:");
1332                 prt_tab(out);
1333                 prt_printf(out, "%u", j->buf[i].sectors);
1334                 prt_newline(out);
1335
1336                 prt_printf(out, "expires");
1337                 prt_tab(out);
1338                 prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
1339                 prt_newline(out);
1340
1341                 printbuf_indent_sub(out, 2);
1342         }
1343
1344         prt_printf(out,
1345                "replay done:\t\t%i\n",
1346                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1347
1348         prt_printf(out, "space:\n");
1349         prt_printf(out, "\tdiscarded\t%u:%u\n",
1350                j->space[journal_space_discarded].next_entry,
1351                j->space[journal_space_discarded].total);
1352         prt_printf(out, "\tclean ondisk\t%u:%u\n",
1353                j->space[journal_space_clean_ondisk].next_entry,
1354                j->space[journal_space_clean_ondisk].total);
1355         prt_printf(out, "\tclean\t\t%u:%u\n",
1356                j->space[journal_space_clean].next_entry,
1357                j->space[journal_space_clean].total);
1358         prt_printf(out, "\ttotal\t\t%u:%u\n",
1359                j->space[journal_space_total].next_entry,
1360                j->space[journal_space_total].total);
1361
1362         for_each_member_device_rcu(ca, c, i,
1363                                    &c->rw_devs[BCH_DATA_journal]) {
1364                 struct journal_device *ja = &ca->journal;
1365
1366                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1367                         continue;
1368
1369                 if (!ja->nr)
1370                         continue;
1371
1372                 prt_printf(out, "dev %u:\n",            i);
1373                 prt_printf(out, "\tnr\t\t%u\n",         ja->nr);
1374                 prt_printf(out, "\tbucket size\t%u\n",  ca->mi.bucket_size);
1375                 prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1376                 prt_printf(out, "\tdiscard_idx\t%u\n",  ja->discard_idx);
1377                 prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,        ja->bucket_seq[ja->dirty_idx_ondisk]);
1378                 prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,          ja->bucket_seq[ja->dirty_idx]);
1379                 prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,            ja->bucket_seq[ja->cur_idx]);
1380         }
1381
1382         rcu_read_unlock();
1383
1384         --out->atomic;
1385 }
1386
1387 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1388 {
1389         spin_lock(&j->lock);
1390         __bch2_journal_debug_to_text(out, j);
1391         spin_unlock(&j->lock);
1392 }
1393
1394 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1395 {
1396         struct journal_entry_pin_list *pin_list;
1397         struct journal_entry_pin *pin;
1398         unsigned i;
1399
1400         spin_lock(&j->lock);
1401         *seq = max(*seq, j->pin.front);
1402
1403         if (*seq >= j->pin.back) {
1404                 spin_unlock(&j->lock);
1405                 return true;
1406         }
1407
1408         out->atomic++;
1409
1410         pin_list = journal_seq_pin(j, *seq);
1411
1412         prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1413         prt_newline(out);
1414         printbuf_indent_add(out, 2);
1415
1416         for (i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1417                 list_for_each_entry(pin, &pin_list->list[i], list) {
1418                         prt_printf(out, "\t%px %ps", pin, pin->flush);
1419                         prt_newline(out);
1420                 }
1421
1422         if (!list_empty(&pin_list->flushed)) {
1423                 prt_printf(out, "flushed:");
1424                 prt_newline(out);
1425         }
1426
1427         list_for_each_entry(pin, &pin_list->flushed, list) {
1428                 prt_printf(out, "\t%px %ps", pin, pin->flush);
1429                 prt_newline(out);
1430         }
1431
1432         printbuf_indent_sub(out, 2);
1433
1434         --out->atomic;
1435         spin_unlock(&j->lock);
1436
1437         return false;
1438 }
1439
1440 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1441 {
1442         u64 seq = 0;
1443
1444         while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1445                 seq++;
1446 }