]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to f9c612bbf82d bcachefs: Fixes for building in userspace
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "error.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_sb.h"
19 #include "journal_seq_blacklist.h"
20 #include "trace.h"
21
22 static const char * const bch2_journal_errors[] = {
23 #define x(n)    #n,
24         JOURNAL_ERRORS()
25 #undef x
26         NULL
27 };
28
29 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
30 {
31         return seq > j->seq_ondisk;
32 }
33
34 static bool __journal_entry_is_open(union journal_res_state state)
35 {
36         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
37 }
38
39 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
40 {
41         return atomic64_read(&j->seq) - j->seq_ondisk;
42 }
43
44 static bool journal_entry_is_open(struct journal *j)
45 {
46         return __journal_entry_is_open(j->reservations);
47 }
48
49 static inline struct journal_buf *
50 journal_seq_to_buf(struct journal *j, u64 seq)
51 {
52         struct journal_buf *buf = NULL;
53
54         EBUG_ON(seq > journal_cur_seq(j));
55
56         if (journal_seq_unwritten(j, seq)) {
57                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
58                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
59         }
60         return buf;
61 }
62
63 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
64 {
65         unsigned i;
66
67         for (i = 0; i < ARRAY_SIZE(p->list); i++)
68                 INIT_LIST_HEAD(&p->list[i]);
69         INIT_LIST_HEAD(&p->flushed);
70         atomic_set(&p->count, count);
71         p->devs.nr = 0;
72 }
73
74 /*
75  * Detect stuck journal conditions and trigger shutdown. Technically the journal
76  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
77  * reservation lockup, etc. Since this is a fatal error with potentially
78  * unpredictable characteristics, we want to be fairly conservative before we
79  * decide to shut things down.
80  *
81  * Consider the journal stuck when it appears full with no ability to commit
82  * btree transactions, to discard journal buckets, nor acquire priority
83  * (reserved watermark) reservation.
84  */
85 static inline bool
86 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
87 {
88         struct bch_fs *c = container_of(j, struct bch_fs, journal);
89         bool stuck = false;
90         struct printbuf buf = PRINTBUF;
91
92         if (!(error == JOURNAL_ERR_journal_full ||
93               error == JOURNAL_ERR_journal_pin_full) ||
94             nr_unwritten_journal_entries(j) ||
95             (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
96                 return stuck;
97
98         spin_lock(&j->lock);
99
100         if (j->can_discard) {
101                 spin_unlock(&j->lock);
102                 return stuck;
103         }
104
105         stuck = true;
106
107         /*
108          * The journal shutdown path will set ->err_seq, but do it here first to
109          * serialize against concurrent failures and avoid duplicate error
110          * reports.
111          */
112         if (j->err_seq) {
113                 spin_unlock(&j->lock);
114                 return stuck;
115         }
116         j->err_seq = journal_cur_seq(j);
117         spin_unlock(&j->lock);
118
119         bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
120                 bch2_journal_errors[error]);
121         bch2_journal_debug_to_text(&buf, j);
122         bch_err(c, "%s", buf.buf);
123
124         printbuf_reset(&buf);
125         bch2_journal_pins_to_text(&buf, j);
126         bch_err(c, "Journal pins:\n%s", buf.buf);
127         printbuf_exit(&buf);
128
129         bch2_fatal_error(c);
130         dump_stack();
131
132         return stuck;
133 }
134
135 /*
136  * Final processing when the last reference of a journal buffer has been
137  * dropped. Drop the pin list reference acquired at journal entry open and write
138  * the buffer, if requested.
139  */
140 void bch2_journal_buf_put_final(struct journal *j, u64 seq, bool write)
141 {
142         struct bch_fs *c = container_of(j, struct bch_fs, journal);
143
144         lockdep_assert_held(&j->lock);
145
146         if (__bch2_journal_pin_put(j, seq))
147                 bch2_journal_reclaim_fast(j);
148         if (write)
149                 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
150 }
151
152 /*
153  * Returns true if journal entry is now closed:
154  *
155  * We don't close a journal_buf until the next journal_buf is finished writing,
156  * and can be opened again - this also initializes the next journal_buf:
157  */
158 static void __journal_entry_close(struct journal *j, unsigned closed_val)
159 {
160         struct bch_fs *c = container_of(j, struct bch_fs, journal);
161         struct journal_buf *buf = journal_cur_buf(j);
162         union journal_res_state old, new;
163         u64 v = atomic64_read(&j->reservations.counter);
164         unsigned sectors;
165
166         BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
167                closed_val != JOURNAL_ENTRY_ERROR_VAL);
168
169         lockdep_assert_held(&j->lock);
170
171         do {
172                 old.v = new.v = v;
173                 new.cur_entry_offset = closed_val;
174
175                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
176                     old.cur_entry_offset == new.cur_entry_offset)
177                         return;
178         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
179                                        old.v, new.v)) != old.v);
180
181         if (!__journal_entry_is_open(old))
182                 return;
183
184         /* Close out old buffer: */
185         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
186
187         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
188                                       buf->u64s_reserved) << c->block_bits;
189         BUG_ON(sectors > buf->sectors);
190         buf->sectors = sectors;
191
192         /*
193          * We have to set last_seq here, _before_ opening a new journal entry:
194          *
195          * A threads may replace an old pin with a new pin on their current
196          * journal reservation - the expectation being that the journal will
197          * contain either what the old pin protected or what the new pin
198          * protects.
199          *
200          * After the old pin is dropped journal_last_seq() won't include the old
201          * pin, so we can only write the updated last_seq on the entry that
202          * contains whatever the new pin protects.
203          *
204          * Restated, we can _not_ update last_seq for a given entry if there
205          * could be a newer entry open with reservations/pins that have been
206          * taken against it.
207          *
208          * Hence, we want update/set last_seq on the current journal entry right
209          * before we open a new one:
210          */
211         buf->last_seq           = journal_last_seq(j);
212         buf->data->last_seq     = cpu_to_le64(buf->last_seq);
213         BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
214
215         cancel_delayed_work(&j->write_work);
216
217         bch2_journal_space_available(j);
218
219         __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
220 }
221
222 void bch2_journal_halt(struct journal *j)
223 {
224         spin_lock(&j->lock);
225         __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
226         if (!j->err_seq)
227                 j->err_seq = journal_cur_seq(j);
228         journal_wake(j);
229         spin_unlock(&j->lock);
230 }
231
232 static bool journal_entry_want_write(struct journal *j)
233 {
234         bool ret = !journal_entry_is_open(j) ||
235                 journal_cur_seq(j) == journal_last_unwritten_seq(j);
236
237         /* Don't close it yet if we already have a write in flight: */
238         if (ret)
239                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
240         else if (nr_unwritten_journal_entries(j)) {
241                 struct journal_buf *buf = journal_cur_buf(j);
242
243                 if (!buf->flush_time) {
244                         buf->flush_time = local_clock() ?: 1;
245                         buf->expires = jiffies;
246                 }
247         }
248
249         return ret;
250 }
251
252 static bool journal_entry_close(struct journal *j)
253 {
254         bool ret;
255
256         spin_lock(&j->lock);
257         ret = journal_entry_want_write(j);
258         spin_unlock(&j->lock);
259
260         return ret;
261 }
262
263 /*
264  * should _only_ called from journal_res_get() - when we actually want a
265  * journal reservation - journal entry is open means journal is dirty:
266  */
267 static int journal_entry_open(struct journal *j)
268 {
269         struct bch_fs *c = container_of(j, struct bch_fs, journal);
270         struct journal_buf *buf = j->buf +
271                 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
272         union journal_res_state old, new;
273         int u64s;
274         u64 v;
275
276         lockdep_assert_held(&j->lock);
277         BUG_ON(journal_entry_is_open(j));
278         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
279
280         if (j->blocked)
281                 return JOURNAL_ERR_blocked;
282
283         if (j->cur_entry_error)
284                 return j->cur_entry_error;
285
286         if (bch2_journal_error(j))
287                 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
288
289         if (!fifo_free(&j->pin))
290                 return JOURNAL_ERR_journal_pin_full;
291
292         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
293                 return JOURNAL_ERR_max_in_flight;
294
295         BUG_ON(!j->cur_entry_sectors);
296
297         buf->expires            =
298                 (journal_cur_seq(j) == j->flushed_seq_ondisk
299                  ? jiffies
300                  : j->last_flush_write) +
301                 msecs_to_jiffies(c->opts.journal_flush_delay);
302
303         buf->u64s_reserved      = j->entry_u64s_reserved;
304         buf->disk_sectors       = j->cur_entry_sectors;
305         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
306
307         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
308                 journal_entry_overhead(j);
309         u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
310
311         if (u64s <= (ssize_t) j->early_journal_entries.nr)
312                 return JOURNAL_ERR_journal_full;
313
314         if (fifo_empty(&j->pin) && j->reclaim_thread)
315                 wake_up_process(j->reclaim_thread);
316
317         /*
318          * The fifo_push() needs to happen at the same time as j->seq is
319          * incremented for journal_last_seq() to be calculated correctly
320          */
321         atomic64_inc(&j->seq);
322         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
323
324         BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
325
326         bkey_extent_init(&buf->key);
327         buf->noflush    = false;
328         buf->must_flush = false;
329         buf->separate_flush = false;
330         buf->flush_time = 0;
331
332         memset(buf->data, 0, sizeof(*buf->data));
333         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
334         buf->data->u64s = 0;
335
336         if (j->early_journal_entries.nr) {
337                 memcpy(buf->data->_data, j->early_journal_entries.data,
338                        j->early_journal_entries.nr * sizeof(u64));
339                 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
340         }
341
342         /*
343          * Must be set before marking the journal entry as open:
344          */
345         j->cur_entry_u64s = u64s;
346
347         v = atomic64_read(&j->reservations.counter);
348         do {
349                 old.v = new.v = v;
350
351                 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
352
353                 new.idx++;
354                 BUG_ON(journal_state_count(new, new.idx));
355                 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
356
357                 journal_state_inc(&new);
358
359                 /* Handle any already added entries */
360                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
361         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
362                                        old.v, new.v)) != old.v);
363
364         if (j->res_get_blocked_start)
365                 bch2_time_stats_update(j->blocked_time,
366                                        j->res_get_blocked_start);
367         j->res_get_blocked_start = 0;
368
369         mod_delayed_work(c->io_complete_wq,
370                          &j->write_work,
371                          msecs_to_jiffies(c->opts.journal_flush_delay));
372         journal_wake(j);
373
374         if (j->early_journal_entries.nr)
375                 darray_exit(&j->early_journal_entries);
376         return 0;
377 }
378
379 static bool journal_quiesced(struct journal *j)
380 {
381         bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
382
383         if (!ret)
384                 journal_entry_close(j);
385         return ret;
386 }
387
388 static void journal_quiesce(struct journal *j)
389 {
390         wait_event(j->wait, journal_quiesced(j));
391 }
392
393 static void journal_write_work(struct work_struct *work)
394 {
395         struct journal *j = container_of(work, struct journal, write_work.work);
396         struct bch_fs *c = container_of(j, struct bch_fs, journal);
397         long delta;
398
399         spin_lock(&j->lock);
400         if (!__journal_entry_is_open(j->reservations))
401                 goto unlock;
402
403         delta = journal_cur_buf(j)->expires - jiffies;
404
405         if (delta > 0)
406                 mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
407         else
408                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
409 unlock:
410         spin_unlock(&j->lock);
411 }
412
413 static int __journal_res_get(struct journal *j, struct journal_res *res,
414                              unsigned flags)
415 {
416         struct bch_fs *c = container_of(j, struct bch_fs, journal);
417         struct journal_buf *buf;
418         bool can_discard;
419         int ret;
420 retry:
421         if (journal_res_get_fast(j, res, flags))
422                 return 0;
423
424         if (bch2_journal_error(j))
425                 return -BCH_ERR_erofs_journal_err;
426
427         spin_lock(&j->lock);
428
429         /* check once more in case somebody else shut things down... */
430         if (bch2_journal_error(j)) {
431                 spin_unlock(&j->lock);
432                 return -BCH_ERR_erofs_journal_err;
433         }
434
435         /*
436          * Recheck after taking the lock, so we don't race with another thread
437          * that just did journal_entry_open() and call journal_entry_close()
438          * unnecessarily
439          */
440         if (journal_res_get_fast(j, res, flags)) {
441                 spin_unlock(&j->lock);
442                 return 0;
443         }
444
445         if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
446                 /*
447                  * Don't want to close current journal entry, just need to
448                  * invoke reclaim:
449                  */
450                 ret = JOURNAL_ERR_journal_full;
451                 goto unlock;
452         }
453
454         /*
455          * If we couldn't get a reservation because the current buf filled up,
456          * and we had room for a bigger entry on disk, signal that we want to
457          * realloc the journal bufs:
458          */
459         buf = journal_cur_buf(j);
460         if (journal_entry_is_open(j) &&
461             buf->buf_size >> 9 < buf->disk_sectors &&
462             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
463                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
464
465         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
466         ret = journal_entry_open(j);
467
468         if (ret == JOURNAL_ERR_max_in_flight)
469                 trace_and_count(c, journal_entry_full, c);
470 unlock:
471         if ((ret && ret != JOURNAL_ERR_insufficient_devices) &&
472             !j->res_get_blocked_start) {
473                 j->res_get_blocked_start = local_clock() ?: 1;
474                 trace_and_count(c, journal_full, c);
475         }
476
477         can_discard = j->can_discard;
478         spin_unlock(&j->lock);
479
480         if (!ret)
481                 goto retry;
482         if (journal_error_check_stuck(j, ret, flags))
483                 ret = -BCH_ERR_journal_res_get_blocked;
484
485         /*
486          * Journal is full - can't rely on reclaim from work item due to
487          * freezing:
488          */
489         if ((ret == JOURNAL_ERR_journal_full ||
490              ret == JOURNAL_ERR_journal_pin_full) &&
491             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
492                 if (can_discard) {
493                         bch2_journal_do_discards(j);
494                         goto retry;
495                 }
496
497                 if (mutex_trylock(&j->reclaim_lock)) {
498                         bch2_journal_reclaim(j);
499                         mutex_unlock(&j->reclaim_lock);
500                 }
501         }
502
503         return ret == JOURNAL_ERR_insufficient_devices
504                 ? -BCH_ERR_erofs_journal_err
505                 : -BCH_ERR_journal_res_get_blocked;
506 }
507
508 /*
509  * Essentially the entry function to the journaling code. When bcachefs is doing
510  * a btree insert, it calls this function to get the current journal write.
511  * Journal write is the structure used set up journal writes. The calling
512  * function will then add its keys to the structure, queuing them for the next
513  * write.
514  *
515  * To ensure forward progress, the current task must not be holding any
516  * btree node write locks.
517  */
518 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
519                                   unsigned flags)
520 {
521         int ret;
522
523         closure_wait_event(&j->async_wait,
524                    (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
525                    (flags & JOURNAL_RES_GET_NONBLOCK));
526         return ret;
527 }
528
529 /* journal_preres: */
530
531 static bool journal_preres_available(struct journal *j,
532                                      struct journal_preres *res,
533                                      unsigned new_u64s,
534                                      unsigned flags)
535 {
536         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
537
538         if (!ret && mutex_trylock(&j->reclaim_lock)) {
539                 bch2_journal_reclaim(j);
540                 mutex_unlock(&j->reclaim_lock);
541         }
542
543         return ret;
544 }
545
546 int __bch2_journal_preres_get(struct journal *j,
547                               struct journal_preres *res,
548                               unsigned new_u64s,
549                               unsigned flags)
550 {
551         int ret;
552
553         closure_wait_event(&j->preres_wait,
554                    (ret = bch2_journal_error(j)) ||
555                    journal_preres_available(j, res, new_u64s, flags));
556         return ret;
557 }
558
559 /* journal_entry_res: */
560
561 void bch2_journal_entry_res_resize(struct journal *j,
562                                    struct journal_entry_res *res,
563                                    unsigned new_u64s)
564 {
565         union journal_res_state state;
566         int d = new_u64s - res->u64s;
567
568         spin_lock(&j->lock);
569
570         j->entry_u64s_reserved += d;
571         if (d <= 0)
572                 goto out;
573
574         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
575         smp_mb();
576         state = READ_ONCE(j->reservations);
577
578         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
579             state.cur_entry_offset > j->cur_entry_u64s) {
580                 j->cur_entry_u64s += d;
581                 /*
582                  * Not enough room in current journal entry, have to flush it:
583                  */
584                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
585         } else {
586                 journal_cur_buf(j)->u64s_reserved += d;
587         }
588 out:
589         spin_unlock(&j->lock);
590         res->u64s += d;
591 }
592
593 /* journal flushing: */
594
595 /**
596  * bch2_journal_flush_seq_async - wait for a journal entry to be written
597  * @j:          journal object
598  * @seq:        seq to flush
599  * @parent:     closure object to wait with
600  * Returns:     1 if @seq has already been flushed, 0 if @seq is being flushed,
601  *              -EIO if @seq will never be flushed
602  *
603  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
604  * necessary
605  */
606 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
607                                  struct closure *parent)
608 {
609         struct journal_buf *buf;
610         int ret = 0;
611
612         if (seq <= j->flushed_seq_ondisk)
613                 return 1;
614
615         spin_lock(&j->lock);
616
617         if (WARN_ONCE(seq > journal_cur_seq(j),
618                       "requested to flush journal seq %llu, but currently at %llu",
619                       seq, journal_cur_seq(j)))
620                 goto out;
621
622         /* Recheck under lock: */
623         if (j->err_seq && seq >= j->err_seq) {
624                 ret = -EIO;
625                 goto out;
626         }
627
628         if (seq <= j->flushed_seq_ondisk) {
629                 ret = 1;
630                 goto out;
631         }
632
633         /* if seq was written, but not flushed - flush a newer one instead */
634         seq = max(seq, journal_last_unwritten_seq(j));
635
636 recheck_need_open:
637         if (seq > journal_cur_seq(j)) {
638                 struct journal_res res = { 0 };
639
640                 if (journal_entry_is_open(j))
641                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
642
643                 spin_unlock(&j->lock);
644
645                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
646                 if (ret)
647                         return ret;
648
649                 seq = res.seq;
650                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
651                 buf->must_flush = true;
652
653                 if (!buf->flush_time) {
654                         buf->flush_time = local_clock() ?: 1;
655                         buf->expires = jiffies;
656                 }
657
658                 if (parent && !closure_wait(&buf->wait, parent))
659                         BUG();
660
661                 bch2_journal_res_put(j, &res);
662
663                 spin_lock(&j->lock);
664                 goto want_write;
665         }
666
667         /*
668          * if write was kicked off without a flush, flush the next sequence
669          * number instead
670          */
671         buf = journal_seq_to_buf(j, seq);
672         if (buf->noflush) {
673                 seq++;
674                 goto recheck_need_open;
675         }
676
677         buf->must_flush = true;
678
679         if (parent && !closure_wait(&buf->wait, parent))
680                 BUG();
681 want_write:
682         if (seq == journal_cur_seq(j))
683                 journal_entry_want_write(j);
684 out:
685         spin_unlock(&j->lock);
686         return ret;
687 }
688
689 int bch2_journal_flush_seq(struct journal *j, u64 seq)
690 {
691         u64 start_time = local_clock();
692         int ret, ret2;
693
694         /*
695          * Don't update time_stats when @seq is already flushed:
696          */
697         if (seq <= j->flushed_seq_ondisk)
698                 return 0;
699
700         ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
701
702         if (!ret)
703                 bch2_time_stats_update(j->flush_seq_time, start_time);
704
705         return ret ?: ret2 < 0 ? ret2 : 0;
706 }
707
708 /*
709  * bch2_journal_flush_async - if there is an open journal entry, or a journal
710  * still being written, write it and wait for the write to complete
711  */
712 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
713 {
714         bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
715 }
716
717 int bch2_journal_flush(struct journal *j)
718 {
719         return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
720 }
721
722 /*
723  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
724  * @seq
725  */
726 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
727 {
728         struct bch_fs *c = container_of(j, struct bch_fs, journal);
729         u64 unwritten_seq;
730         bool ret = false;
731
732         if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
733                 return false;
734
735         if (seq <= c->journal.flushed_seq_ondisk)
736                 return false;
737
738         spin_lock(&j->lock);
739         if (seq <= c->journal.flushed_seq_ondisk)
740                 goto out;
741
742         for (unwritten_seq = journal_last_unwritten_seq(j);
743              unwritten_seq < seq;
744              unwritten_seq++) {
745                 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
746
747                 /* journal write is already in flight, and was a flush write: */
748                 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
749                         goto out;
750
751                 buf->noflush = true;
752         }
753
754         ret = true;
755 out:
756         spin_unlock(&j->lock);
757         return ret;
758 }
759
760 int bch2_journal_meta(struct journal *j)
761 {
762         struct journal_buf *buf;
763         struct journal_res res;
764         int ret;
765
766         memset(&res, 0, sizeof(res));
767
768         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
769         if (ret)
770                 return ret;
771
772         buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
773         buf->must_flush = true;
774
775         if (!buf->flush_time) {
776                 buf->flush_time = local_clock() ?: 1;
777                 buf->expires = jiffies;
778         }
779
780         bch2_journal_res_put(j, &res);
781
782         return bch2_journal_flush_seq(j, res.seq);
783 }
784
785 /* block/unlock the journal: */
786
787 void bch2_journal_unblock(struct journal *j)
788 {
789         spin_lock(&j->lock);
790         j->blocked--;
791         spin_unlock(&j->lock);
792
793         journal_wake(j);
794 }
795
796 void bch2_journal_block(struct journal *j)
797 {
798         spin_lock(&j->lock);
799         j->blocked++;
800         spin_unlock(&j->lock);
801
802         journal_quiesce(j);
803 }
804
805 /* allocate journal on a device: */
806
807 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
808                                          bool new_fs, struct closure *cl)
809 {
810         struct bch_fs *c = ca->fs;
811         struct journal_device *ja = &ca->journal;
812         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
813         struct open_bucket **ob = NULL;
814         long *bu = NULL;
815         unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
816         int ret = 0;
817
818         BUG_ON(nr <= ja->nr);
819
820         bu              = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
821         ob              = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
822         new_buckets     = kcalloc(nr, sizeof(u64), GFP_KERNEL);
823         new_bucket_seq  = kcalloc(nr, sizeof(u64), GFP_KERNEL);
824         if (!bu || !ob || !new_buckets || !new_bucket_seq) {
825                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
826                 goto err_free;
827         }
828
829         for (nr_got = 0; nr_got < nr_want; nr_got++) {
830                 if (new_fs) {
831                         bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
832                         if (bu[nr_got] < 0) {
833                                 ret = -BCH_ERR_ENOSPC_bucket_alloc;
834                                 break;
835                         }
836                 } else {
837                         ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
838                         ret = PTR_ERR_OR_ZERO(ob[nr_got]);
839                         if (ret)
840                                 break;
841
842                         ret = bch2_trans_run(c,
843                                 bch2_trans_mark_metadata_bucket(trans, ca,
844                                                 ob[nr_got]->bucket, BCH_DATA_journal,
845                                                 ca->mi.bucket_size));
846                         if (ret) {
847                                 bch2_open_bucket_put(c, ob[nr_got]);
848                                 bch_err_msg(c, ret, "marking new journal buckets");
849                                 break;
850                         }
851
852                         bu[nr_got] = ob[nr_got]->bucket;
853                 }
854         }
855
856         if (!nr_got)
857                 goto err_free;
858
859         /* Don't return an error if we successfully allocated some buckets: */
860         ret = 0;
861
862         if (c) {
863                 bch2_journal_flush_all_pins(&c->journal);
864                 bch2_journal_block(&c->journal);
865                 mutex_lock(&c->sb_lock);
866         }
867
868         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
869         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
870
871         BUG_ON(ja->discard_idx > ja->nr);
872
873         pos = ja->discard_idx ?: ja->nr;
874
875         memmove(new_buckets + pos + nr_got,
876                 new_buckets + pos,
877                 sizeof(new_buckets[0]) * (ja->nr - pos));
878         memmove(new_bucket_seq + pos + nr_got,
879                 new_bucket_seq + pos,
880                 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
881
882         for (i = 0; i < nr_got; i++) {
883                 new_buckets[pos + i] = bu[i];
884                 new_bucket_seq[pos + i] = 0;
885         }
886
887         nr = ja->nr + nr_got;
888
889         ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
890         if (ret)
891                 goto err_unblock;
892
893         if (!new_fs)
894                 bch2_write_super(c);
895
896         /* Commit: */
897         if (c)
898                 spin_lock(&c->journal.lock);
899
900         swap(new_buckets,       ja->buckets);
901         swap(new_bucket_seq,    ja->bucket_seq);
902         ja->nr = nr;
903
904         if (pos <= ja->discard_idx)
905                 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
906         if (pos <= ja->dirty_idx_ondisk)
907                 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
908         if (pos <= ja->dirty_idx)
909                 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
910         if (pos <= ja->cur_idx)
911                 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
912
913         if (c)
914                 spin_unlock(&c->journal.lock);
915 err_unblock:
916         if (c) {
917                 bch2_journal_unblock(&c->journal);
918                 mutex_unlock(&c->sb_lock);
919         }
920
921         if (ret && !new_fs)
922                 for (i = 0; i < nr_got; i++)
923                         bch2_trans_run(c,
924                                 bch2_trans_mark_metadata_bucket(trans, ca,
925                                                 bu[i], BCH_DATA_free, 0));
926 err_free:
927         if (!new_fs)
928                 for (i = 0; i < nr_got; i++)
929                         bch2_open_bucket_put(c, ob[i]);
930
931         kfree(new_bucket_seq);
932         kfree(new_buckets);
933         kfree(ob);
934         kfree(bu);
935         return ret;
936 }
937
938 /*
939  * Allocate more journal space at runtime - not currently making use if it, but
940  * the code works:
941  */
942 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
943                                 unsigned nr)
944 {
945         struct journal_device *ja = &ca->journal;
946         struct closure cl;
947         int ret = 0;
948
949         closure_init_stack(&cl);
950
951         down_write(&c->state_lock);
952
953         /* don't handle reducing nr of buckets yet: */
954         if (nr < ja->nr)
955                 goto unlock;
956
957         while (ja->nr < nr) {
958                 struct disk_reservation disk_res = { 0, 0, 0 };
959
960                 /*
961                  * note: journal buckets aren't really counted as _sectors_ used yet, so
962                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
963                  * when space used goes up without a reservation - but we do need the
964                  * reservation to ensure we'll actually be able to allocate:
965                  *
966                  * XXX: that's not right, disk reservations only ensure a
967                  * filesystem-wide allocation will succeed, this is a device
968                  * specific allocation - we can hang here:
969                  */
970
971                 ret = bch2_disk_reservation_get(c, &disk_res,
972                                                 bucket_to_sector(ca, nr - ja->nr), 1, 0);
973                 if (ret)
974                         break;
975
976                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
977
978                 bch2_disk_reservation_put(c, &disk_res);
979
980                 closure_sync(&cl);
981
982                 if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
983                         break;
984         }
985
986         if (ret)
987                 bch_err_fn(c, ret);
988 unlock:
989         up_write(&c->state_lock);
990         return ret;
991 }
992
993 int bch2_dev_journal_alloc(struct bch_dev *ca)
994 {
995         unsigned nr;
996         int ret;
997
998         if (dynamic_fault("bcachefs:add:journal_alloc")) {
999                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1000                 goto err;
1001         }
1002
1003         /* 1/128th of the device by default: */
1004         nr = ca->mi.nbuckets >> 7;
1005
1006         /*
1007          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1008          * is smaller:
1009          */
1010         nr = clamp_t(unsigned, nr,
1011                      BCH_JOURNAL_BUCKETS_MIN,
1012                      min(1 << 13,
1013                          (1 << 24) / ca->mi.bucket_size));
1014
1015         ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
1016 err:
1017         if (ret)
1018                 bch_err_fn(ca, ret);
1019         return ret;
1020 }
1021
1022 /* startup/shutdown: */
1023
1024 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1025 {
1026         bool ret = false;
1027         u64 seq;
1028
1029         spin_lock(&j->lock);
1030         for (seq = journal_last_unwritten_seq(j);
1031              seq <= journal_cur_seq(j) && !ret;
1032              seq++) {
1033                 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1034
1035                 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1036                         ret = true;
1037         }
1038         spin_unlock(&j->lock);
1039
1040         return ret;
1041 }
1042
1043 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1044 {
1045         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1046 }
1047
1048 void bch2_fs_journal_stop(struct journal *j)
1049 {
1050         bch2_journal_reclaim_stop(j);
1051         bch2_journal_flush_all_pins(j);
1052
1053         wait_event(j->wait, journal_entry_close(j));
1054
1055         /*
1056          * Always write a new journal entry, to make sure the clock hands are up
1057          * to date (and match the superblock)
1058          */
1059         bch2_journal_meta(j);
1060
1061         journal_quiesce(j);
1062
1063         BUG_ON(!bch2_journal_error(j) &&
1064                test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1065                j->last_empty_seq != journal_cur_seq(j));
1066
1067         cancel_delayed_work_sync(&j->write_work);
1068 }
1069
1070 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1071 {
1072         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1073         struct journal_entry_pin_list *p;
1074         struct journal_replay *i, **_i;
1075         struct genradix_iter iter;
1076         bool had_entries = false;
1077         unsigned ptr;
1078         u64 last_seq = cur_seq, nr, seq;
1079
1080         genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1081                 i = *_i;
1082
1083                 if (!i || i->ignore)
1084                         continue;
1085
1086                 last_seq = le64_to_cpu(i->j.last_seq);
1087                 break;
1088         }
1089
1090         nr = cur_seq - last_seq;
1091
1092         if (nr + 1 > j->pin.size) {
1093                 free_fifo(&j->pin);
1094                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1095                 if (!j->pin.data) {
1096                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1097                         return -BCH_ERR_ENOMEM_journal_pin_fifo;
1098                 }
1099         }
1100
1101         j->replay_journal_seq   = last_seq;
1102         j->replay_journal_seq_end = cur_seq;
1103         j->last_seq_ondisk      = last_seq;
1104         j->flushed_seq_ondisk   = cur_seq - 1;
1105         j->seq_ondisk           = cur_seq - 1;
1106         j->pin.front            = last_seq;
1107         j->pin.back             = cur_seq;
1108         atomic64_set(&j->seq, cur_seq - 1);
1109
1110         fifo_for_each_entry_ptr(p, &j->pin, seq)
1111                 journal_pin_list_init(p, 1);
1112
1113         genradix_for_each(&c->journal_entries, iter, _i) {
1114                 i = *_i;
1115
1116                 if (!i || i->ignore)
1117                         continue;
1118
1119                 seq = le64_to_cpu(i->j.seq);
1120                 BUG_ON(seq >= cur_seq);
1121
1122                 if (seq < last_seq)
1123                         continue;
1124
1125                 if (journal_entry_empty(&i->j))
1126                         j->last_empty_seq = le64_to_cpu(i->j.seq);
1127
1128                 p = journal_seq_pin(j, seq);
1129
1130                 p->devs.nr = 0;
1131                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1132                         bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1133
1134                 had_entries = true;
1135         }
1136
1137         if (!had_entries)
1138                 j->last_empty_seq = cur_seq;
1139
1140         spin_lock(&j->lock);
1141
1142         set_bit(JOURNAL_STARTED, &j->flags);
1143         j->last_flush_write = jiffies;
1144
1145         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1146         j->reservations.unwritten_idx++;
1147
1148         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1149
1150         bch2_journal_space_available(j);
1151         spin_unlock(&j->lock);
1152
1153         return bch2_journal_reclaim_start(j);
1154 }
1155
1156 /* init/exit: */
1157
1158 void bch2_dev_journal_exit(struct bch_dev *ca)
1159 {
1160         kfree(ca->journal.bio);
1161         kfree(ca->journal.buckets);
1162         kfree(ca->journal.bucket_seq);
1163
1164         ca->journal.bio         = NULL;
1165         ca->journal.buckets     = NULL;
1166         ca->journal.bucket_seq  = NULL;
1167 }
1168
1169 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1170 {
1171         struct journal_device *ja = &ca->journal;
1172         struct bch_sb_field_journal *journal_buckets =
1173                 bch2_sb_get_journal(sb);
1174         struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1175                 bch2_sb_get_journal_v2(sb);
1176         unsigned i, nr_bvecs;
1177
1178         ja->nr = 0;
1179
1180         if (journal_buckets_v2) {
1181                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1182
1183                 for (i = 0; i < nr; i++)
1184                         ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1185         } else if (journal_buckets) {
1186                 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1187         }
1188
1189         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1190         if (!ja->bucket_seq)
1191                 return -BCH_ERR_ENOMEM_dev_journal_init;
1192
1193         nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1194
1195         ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
1196         if (!ca->journal.bio)
1197                 return -BCH_ERR_ENOMEM_dev_journal_init;
1198
1199         bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
1200
1201         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1202         if (!ja->buckets)
1203                 return -BCH_ERR_ENOMEM_dev_journal_init;
1204
1205         if (journal_buckets_v2) {
1206                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1207                 unsigned j, dst = 0;
1208
1209                 for (i = 0; i < nr; i++)
1210                         for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1211                                 ja->buckets[dst++] =
1212                                         le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1213         } else if (journal_buckets) {
1214                 for (i = 0; i < ja->nr; i++)
1215                         ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1216         }
1217
1218         return 0;
1219 }
1220
1221 void bch2_fs_journal_exit(struct journal *j)
1222 {
1223         unsigned i;
1224
1225         darray_exit(&j->early_journal_entries);
1226
1227         for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1228                 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1229         free_fifo(&j->pin);
1230 }
1231
1232 int bch2_fs_journal_init(struct journal *j)
1233 {
1234         static struct lock_class_key res_key;
1235         unsigned i;
1236
1237         spin_lock_init(&j->lock);
1238         spin_lock_init(&j->err_lock);
1239         init_waitqueue_head(&j->wait);
1240         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1241         init_waitqueue_head(&j->reclaim_wait);
1242         init_waitqueue_head(&j->pin_flush_wait);
1243         mutex_init(&j->reclaim_lock);
1244         mutex_init(&j->discard_lock);
1245
1246         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1247
1248         atomic64_set(&j->reservations.counter,
1249                 ((union journal_res_state)
1250                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1251
1252         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1253                 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1254
1255         for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1256                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1257                 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1258                 if (!j->buf[i].data)
1259                         return -BCH_ERR_ENOMEM_journal_buf;
1260         }
1261
1262         j->pin.front = j->pin.back = 1;
1263         return 0;
1264 }
1265
1266 /* debug: */
1267
1268 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1269 {
1270         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1271         union journal_res_state s;
1272         struct bch_dev *ca;
1273         unsigned long now = jiffies;
1274         u64 seq;
1275         unsigned i;
1276
1277         if (!out->nr_tabstops)
1278                 printbuf_tabstop_push(out, 24);
1279         out->atomic++;
1280
1281         rcu_read_lock();
1282         s = READ_ONCE(j->reservations);
1283
1284         prt_printf(out, "dirty journal entries:\t%llu/%llu\n",  fifo_used(&j->pin), j->pin.size);
1285         prt_printf(out, "seq:\t\t\t%llu\n",                     journal_cur_seq(j));
1286         prt_printf(out, "seq_ondisk:\t\t%llu\n",                j->seq_ondisk);
1287         prt_printf(out, "last_seq:\t\t%llu\n",          journal_last_seq(j));
1288         prt_printf(out, "last_seq_ondisk:\t%llu\n",             j->last_seq_ondisk);
1289         prt_printf(out, "flushed_seq_ondisk:\t%llu\n",  j->flushed_seq_ondisk);
1290         prt_printf(out, "prereserved:\t\t%u/%u\n",              j->prereserved.reserved, j->prereserved.remaining);
1291         prt_printf(out, "watermark:\t\t%s\n",           bch2_watermarks[j->watermark]);
1292         prt_printf(out, "each entry reserved:\t%u\n",   j->entry_u64s_reserved);
1293         prt_printf(out, "nr flush writes:\t%llu\n",             j->nr_flush_writes);
1294         prt_printf(out, "nr noflush writes:\t%llu\n",   j->nr_noflush_writes);
1295         prt_printf(out, "nr direct reclaim:\t%llu\n",   j->nr_direct_reclaim);
1296         prt_printf(out, "nr background reclaim:\t%llu\n",       j->nr_background_reclaim);
1297         prt_printf(out, "reclaim kicked:\t\t%u\n",              j->reclaim_kicked);
1298         prt_printf(out, "reclaim runs in:\t%u ms\n",    time_after(j->next_reclaim, now)
1299                ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1300         prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
1301         prt_printf(out, "current entry error:\t%s\n",   bch2_journal_errors[j->cur_entry_error]);
1302         prt_printf(out, "current entry:\t\t");
1303
1304         switch (s.cur_entry_offset) {
1305         case JOURNAL_ENTRY_ERROR_VAL:
1306                 prt_printf(out, "error");
1307                 break;
1308         case JOURNAL_ENTRY_CLOSED_VAL:
1309                 prt_printf(out, "closed");
1310                 break;
1311         default:
1312                 prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1313                 break;
1314         }
1315
1316         prt_newline(out);
1317
1318         for (seq = journal_cur_seq(j);
1319              seq >= journal_last_unwritten_seq(j);
1320              --seq) {
1321                 i = seq & JOURNAL_BUF_MASK;
1322
1323                 prt_printf(out, "unwritten entry:");
1324                 prt_tab(out);
1325                 prt_printf(out, "%llu", seq);
1326                 prt_newline(out);
1327                 printbuf_indent_add(out, 2);
1328
1329                 prt_printf(out, "refcount:");
1330                 prt_tab(out);
1331                 prt_printf(out, "%u", journal_state_count(s, i));
1332                 prt_newline(out);
1333
1334                 prt_printf(out, "sectors:");
1335                 prt_tab(out);
1336                 prt_printf(out, "%u", j->buf[i].sectors);
1337                 prt_newline(out);
1338
1339                 prt_printf(out, "expires");
1340                 prt_tab(out);
1341                 prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
1342                 prt_newline(out);
1343
1344                 printbuf_indent_sub(out, 2);
1345         }
1346
1347         prt_printf(out,
1348                "replay done:\t\t%i\n",
1349                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1350
1351         prt_printf(out, "space:\n");
1352         prt_printf(out, "\tdiscarded\t%u:%u\n",
1353                j->space[journal_space_discarded].next_entry,
1354                j->space[journal_space_discarded].total);
1355         prt_printf(out, "\tclean ondisk\t%u:%u\n",
1356                j->space[journal_space_clean_ondisk].next_entry,
1357                j->space[journal_space_clean_ondisk].total);
1358         prt_printf(out, "\tclean\t\t%u:%u\n",
1359                j->space[journal_space_clean].next_entry,
1360                j->space[journal_space_clean].total);
1361         prt_printf(out, "\ttotal\t\t%u:%u\n",
1362                j->space[journal_space_total].next_entry,
1363                j->space[journal_space_total].total);
1364
1365         for_each_member_device_rcu(ca, c, i,
1366                                    &c->rw_devs[BCH_DATA_journal]) {
1367                 struct journal_device *ja = &ca->journal;
1368
1369                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1370                         continue;
1371
1372                 if (!ja->nr)
1373                         continue;
1374
1375                 prt_printf(out, "dev %u:\n",            i);
1376                 prt_printf(out, "\tnr\t\t%u\n",         ja->nr);
1377                 prt_printf(out, "\tbucket size\t%u\n",  ca->mi.bucket_size);
1378                 prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1379                 prt_printf(out, "\tdiscard_idx\t%u\n",  ja->discard_idx);
1380                 prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,        ja->bucket_seq[ja->dirty_idx_ondisk]);
1381                 prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,          ja->bucket_seq[ja->dirty_idx]);
1382                 prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,            ja->bucket_seq[ja->cur_idx]);
1383         }
1384
1385         rcu_read_unlock();
1386
1387         --out->atomic;
1388 }
1389
1390 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1391 {
1392         spin_lock(&j->lock);
1393         __bch2_journal_debug_to_text(out, j);
1394         spin_unlock(&j->lock);
1395 }
1396
1397 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1398 {
1399         struct journal_entry_pin_list *pin_list;
1400         struct journal_entry_pin *pin;
1401         unsigned i;
1402
1403         spin_lock(&j->lock);
1404         *seq = max(*seq, j->pin.front);
1405
1406         if (*seq >= j->pin.back) {
1407                 spin_unlock(&j->lock);
1408                 return true;
1409         }
1410
1411         out->atomic++;
1412
1413         pin_list = journal_seq_pin(j, *seq);
1414
1415         prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1416         prt_newline(out);
1417         printbuf_indent_add(out, 2);
1418
1419         for (i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1420                 list_for_each_entry(pin, &pin_list->list[i], list) {
1421                         prt_printf(out, "\t%px %ps", pin, pin->flush);
1422                         prt_newline(out);
1423                 }
1424
1425         if (!list_empty(&pin_list->flushed)) {
1426                 prt_printf(out, "flushed:");
1427                 prt_newline(out);
1428         }
1429
1430         list_for_each_entry(pin, &pin_list->flushed, list) {
1431                 prt_printf(out, "\t%px %ps", pin, pin->flush);
1432                 prt_newline(out);
1433         }
1434
1435         printbuf_indent_sub(out, 2);
1436
1437         --out->atomic;
1438         spin_unlock(&j->lock);
1439
1440         return false;
1441 }
1442
1443 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1444 {
1445         u64 seq = 0;
1446
1447         while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1448                 seq++;
1449 }