]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
86b148d9bea343ddc4c5bcf7bee26fdaf866209d
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22
23 static const char * const bch2_journal_errors[] = {
24 #define x(n)    #n,
25         JOURNAL_ERRORS()
26 #undef x
27         NULL
28 };
29
30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
31 {
32         return seq > j->seq_ondisk;
33 }
34
35 static bool __journal_entry_is_open(union journal_res_state state)
36 {
37         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
38 }
39
40 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
41 {
42         return atomic64_read(&j->seq) - j->seq_ondisk;
43 }
44
45 static bool journal_entry_is_open(struct journal *j)
46 {
47         return __journal_entry_is_open(j->reservations);
48 }
49
50 static inline struct journal_buf *
51 journal_seq_to_buf(struct journal *j, u64 seq)
52 {
53         struct journal_buf *buf = NULL;
54
55         EBUG_ON(seq > journal_cur_seq(j));
56
57         if (journal_seq_unwritten(j, seq)) {
58                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
59                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
60         }
61         return buf;
62 }
63
64 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
65 {
66         unsigned i;
67
68         for (i = 0; i < ARRAY_SIZE(p->list); i++)
69                 INIT_LIST_HEAD(&p->list[i]);
70         INIT_LIST_HEAD(&p->flushed);
71         atomic_set(&p->count, count);
72         p->devs.nr = 0;
73 }
74
75 /*
76  * Detect stuck journal conditions and trigger shutdown. Technically the journal
77  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
78  * reservation lockup, etc. Since this is a fatal error with potentially
79  * unpredictable characteristics, we want to be fairly conservative before we
80  * decide to shut things down.
81  *
82  * Consider the journal stuck when it appears full with no ability to commit
83  * btree transactions, to discard journal buckets, nor acquire priority
84  * (reserved watermark) reservation.
85  */
86 static inline bool
87 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
88 {
89         struct bch_fs *c = container_of(j, struct bch_fs, journal);
90         bool stuck = false;
91         struct printbuf buf = PRINTBUF;
92
93         if (!(error == JOURNAL_ERR_journal_full ||
94               error == JOURNAL_ERR_journal_pin_full) ||
95             nr_unwritten_journal_entries(j) ||
96             (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
97                 return stuck;
98
99         spin_lock(&j->lock);
100
101         if (j->can_discard) {
102                 spin_unlock(&j->lock);
103                 return stuck;
104         }
105
106         stuck = true;
107
108         /*
109          * The journal shutdown path will set ->err_seq, but do it here first to
110          * serialize against concurrent failures and avoid duplicate error
111          * reports.
112          */
113         if (j->err_seq) {
114                 spin_unlock(&j->lock);
115                 return stuck;
116         }
117         j->err_seq = journal_cur_seq(j);
118         spin_unlock(&j->lock);
119
120         bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
121                 bch2_journal_errors[error]);
122         bch2_journal_debug_to_text(&buf, j);
123         bch_err(c, "%s", buf.buf);
124
125         printbuf_reset(&buf);
126         bch2_journal_pins_to_text(&buf, j);
127         bch_err(c, "Journal pins:\n%s", buf.buf);
128         printbuf_exit(&buf);
129
130         bch2_fatal_error(c);
131         dump_stack();
132
133         return stuck;
134 }
135
136 /*
137  * Final processing when the last reference of a journal buffer has been
138  * dropped. Drop the pin list reference acquired at journal entry open and write
139  * the buffer, if requested.
140  */
141 void bch2_journal_buf_put_final(struct journal *j, u64 seq, bool write)
142 {
143         struct bch_fs *c = container_of(j, struct bch_fs, journal);
144
145         lockdep_assert_held(&j->lock);
146
147         if (__bch2_journal_pin_put(j, seq))
148                 bch2_journal_reclaim_fast(j);
149         if (write)
150                 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
151         wake_up(&j->wait);
152 }
153
154 /*
155  * Returns true if journal entry is now closed:
156  *
157  * We don't close a journal_buf until the next journal_buf is finished writing,
158  * and can be opened again - this also initializes the next journal_buf:
159  */
160 static void __journal_entry_close(struct journal *j, unsigned closed_val)
161 {
162         struct bch_fs *c = container_of(j, struct bch_fs, journal);
163         struct journal_buf *buf = journal_cur_buf(j);
164         union journal_res_state old, new;
165         u64 v = atomic64_read(&j->reservations.counter);
166         unsigned sectors;
167
168         BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
169                closed_val != JOURNAL_ENTRY_ERROR_VAL);
170
171         lockdep_assert_held(&j->lock);
172
173         do {
174                 old.v = new.v = v;
175                 new.cur_entry_offset = closed_val;
176
177                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
178                     old.cur_entry_offset == new.cur_entry_offset)
179                         return;
180         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
181                                        old.v, new.v)) != old.v);
182
183         if (!__journal_entry_is_open(old))
184                 return;
185
186         /* Close out old buffer: */
187         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
188
189         trace_journal_entry_close(c, vstruct_bytes(buf->data));
190
191         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
192                                       buf->u64s_reserved) << c->block_bits;
193         BUG_ON(sectors > buf->sectors);
194         buf->sectors = sectors;
195
196         /*
197          * We have to set last_seq here, _before_ opening a new journal entry:
198          *
199          * A threads may replace an old pin with a new pin on their current
200          * journal reservation - the expectation being that the journal will
201          * contain either what the old pin protected or what the new pin
202          * protects.
203          *
204          * After the old pin is dropped journal_last_seq() won't include the old
205          * pin, so we can only write the updated last_seq on the entry that
206          * contains whatever the new pin protects.
207          *
208          * Restated, we can _not_ update last_seq for a given entry if there
209          * could be a newer entry open with reservations/pins that have been
210          * taken against it.
211          *
212          * Hence, we want update/set last_seq on the current journal entry right
213          * before we open a new one:
214          */
215         buf->last_seq           = journal_last_seq(j);
216         buf->data->last_seq     = cpu_to_le64(buf->last_seq);
217         BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
218
219         cancel_delayed_work(&j->write_work);
220
221         bch2_journal_space_available(j);
222
223         __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
224 }
225
226 void bch2_journal_halt(struct journal *j)
227 {
228         spin_lock(&j->lock);
229         __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
230         if (!j->err_seq)
231                 j->err_seq = journal_cur_seq(j);
232         journal_wake(j);
233         spin_unlock(&j->lock);
234 }
235
236 static bool journal_entry_want_write(struct journal *j)
237 {
238         bool ret = !journal_entry_is_open(j) ||
239                 journal_cur_seq(j) == journal_last_unwritten_seq(j);
240
241         /* Don't close it yet if we already have a write in flight: */
242         if (ret)
243                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
244         else if (nr_unwritten_journal_entries(j)) {
245                 struct journal_buf *buf = journal_cur_buf(j);
246
247                 if (!buf->flush_time) {
248                         buf->flush_time = local_clock() ?: 1;
249                         buf->expires = jiffies;
250                 }
251         }
252
253         return ret;
254 }
255
256 static bool journal_entry_close(struct journal *j)
257 {
258         bool ret;
259
260         spin_lock(&j->lock);
261         ret = journal_entry_want_write(j);
262         spin_unlock(&j->lock);
263
264         return ret;
265 }
266
267 /*
268  * should _only_ called from journal_res_get() - when we actually want a
269  * journal reservation - journal entry is open means journal is dirty:
270  */
271 static int journal_entry_open(struct journal *j)
272 {
273         struct bch_fs *c = container_of(j, struct bch_fs, journal);
274         struct journal_buf *buf = j->buf +
275                 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
276         union journal_res_state old, new;
277         int u64s;
278         u64 v;
279
280         lockdep_assert_held(&j->lock);
281         BUG_ON(journal_entry_is_open(j));
282         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
283
284         if (j->blocked)
285                 return JOURNAL_ERR_blocked;
286
287         if (j->cur_entry_error)
288                 return j->cur_entry_error;
289
290         if (bch2_journal_error(j))
291                 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
292
293         if (!fifo_free(&j->pin))
294                 return JOURNAL_ERR_journal_pin_full;
295
296         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
297                 return JOURNAL_ERR_max_in_flight;
298
299         BUG_ON(!j->cur_entry_sectors);
300
301         buf->expires            =
302                 (journal_cur_seq(j) == j->flushed_seq_ondisk
303                  ? jiffies
304                  : j->last_flush_write) +
305                 msecs_to_jiffies(c->opts.journal_flush_delay);
306
307         buf->u64s_reserved      = j->entry_u64s_reserved;
308         buf->disk_sectors       = j->cur_entry_sectors;
309         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
310
311         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
312                 journal_entry_overhead(j);
313         u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
314
315         if (u64s <= (ssize_t) j->early_journal_entries.nr)
316                 return JOURNAL_ERR_journal_full;
317
318         if (fifo_empty(&j->pin) && j->reclaim_thread)
319                 wake_up_process(j->reclaim_thread);
320
321         /*
322          * The fifo_push() needs to happen at the same time as j->seq is
323          * incremented for journal_last_seq() to be calculated correctly
324          */
325         atomic64_inc(&j->seq);
326         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
327
328         BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
329
330         bkey_extent_init(&buf->key);
331         buf->noflush    = false;
332         buf->must_flush = false;
333         buf->separate_flush = false;
334         buf->flush_time = 0;
335         buf->need_flush_to_write_buffer = true;
336
337         memset(buf->data, 0, sizeof(*buf->data));
338         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
339         buf->data->u64s = 0;
340
341         if (j->early_journal_entries.nr) {
342                 memcpy(buf->data->_data, j->early_journal_entries.data,
343                        j->early_journal_entries.nr * sizeof(u64));
344                 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
345         }
346
347         /*
348          * Must be set before marking the journal entry as open:
349          */
350         j->cur_entry_u64s = u64s;
351
352         v = atomic64_read(&j->reservations.counter);
353         do {
354                 old.v = new.v = v;
355
356                 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
357
358                 new.idx++;
359                 BUG_ON(journal_state_count(new, new.idx));
360                 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
361
362                 journal_state_inc(&new);
363
364                 /* Handle any already added entries */
365                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
366         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
367                                        old.v, new.v)) != old.v);
368
369         mod_delayed_work(c->io_complete_wq,
370                          &j->write_work,
371                          msecs_to_jiffies(c->opts.journal_flush_delay));
372         journal_wake(j);
373
374         if (j->early_journal_entries.nr)
375                 darray_exit(&j->early_journal_entries);
376         return 0;
377 }
378
379 static bool journal_quiesced(struct journal *j)
380 {
381         bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
382
383         if (!ret)
384                 journal_entry_close(j);
385         return ret;
386 }
387
388 static void journal_quiesce(struct journal *j)
389 {
390         wait_event(j->wait, journal_quiesced(j));
391 }
392
393 static void journal_write_work(struct work_struct *work)
394 {
395         struct journal *j = container_of(work, struct journal, write_work.work);
396         struct bch_fs *c = container_of(j, struct bch_fs, journal);
397         long delta;
398
399         spin_lock(&j->lock);
400         if (!__journal_entry_is_open(j->reservations))
401                 goto unlock;
402
403         delta = journal_cur_buf(j)->expires - jiffies;
404
405         if (delta > 0)
406                 mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
407         else
408                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
409 unlock:
410         spin_unlock(&j->lock);
411 }
412
413 static int __journal_res_get(struct journal *j, struct journal_res *res,
414                              unsigned flags)
415 {
416         struct bch_fs *c = container_of(j, struct bch_fs, journal);
417         struct journal_buf *buf;
418         bool can_discard;
419         int ret;
420 retry:
421         if (journal_res_get_fast(j, res, flags))
422                 return 0;
423
424         if (bch2_journal_error(j))
425                 return -BCH_ERR_erofs_journal_err;
426
427         spin_lock(&j->lock);
428
429         /* check once more in case somebody else shut things down... */
430         if (bch2_journal_error(j)) {
431                 spin_unlock(&j->lock);
432                 return -BCH_ERR_erofs_journal_err;
433         }
434
435         /*
436          * Recheck after taking the lock, so we don't race with another thread
437          * that just did journal_entry_open() and call journal_entry_close()
438          * unnecessarily
439          */
440         if (journal_res_get_fast(j, res, flags)) {
441                 spin_unlock(&j->lock);
442                 return 0;
443         }
444
445         if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
446                 /*
447                  * Don't want to close current journal entry, just need to
448                  * invoke reclaim:
449                  */
450                 ret = JOURNAL_ERR_journal_full;
451                 goto unlock;
452         }
453
454         /*
455          * If we couldn't get a reservation because the current buf filled up,
456          * and we had room for a bigger entry on disk, signal that we want to
457          * realloc the journal bufs:
458          */
459         buf = journal_cur_buf(j);
460         if (journal_entry_is_open(j) &&
461             buf->buf_size >> 9 < buf->disk_sectors &&
462             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
463                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
464
465         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
466         ret = journal_entry_open(j);
467
468         if (ret == JOURNAL_ERR_max_in_flight) {
469                 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight],
470                                    &j->max_in_flight_start, true);
471                 trace_and_count(c, journal_entry_full, c);
472         }
473 unlock:
474         can_discard = j->can_discard;
475         spin_unlock(&j->lock);
476
477         if (!ret)
478                 goto retry;
479         if (journal_error_check_stuck(j, ret, flags))
480                 ret = -BCH_ERR_journal_res_get_blocked;
481
482         /*
483          * Journal is full - can't rely on reclaim from work item due to
484          * freezing:
485          */
486         if ((ret == JOURNAL_ERR_journal_full ||
487              ret == JOURNAL_ERR_journal_pin_full) &&
488             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
489                 if (can_discard) {
490                         bch2_journal_do_discards(j);
491                         goto retry;
492                 }
493
494                 if (mutex_trylock(&j->reclaim_lock)) {
495                         bch2_journal_reclaim(j);
496                         mutex_unlock(&j->reclaim_lock);
497                 }
498         }
499
500         return ret == JOURNAL_ERR_insufficient_devices
501                 ? -BCH_ERR_erofs_journal_err
502                 : -BCH_ERR_journal_res_get_blocked;
503 }
504
505 /*
506  * Essentially the entry function to the journaling code. When bcachefs is doing
507  * a btree insert, it calls this function to get the current journal write.
508  * Journal write is the structure used set up journal writes. The calling
509  * function will then add its keys to the structure, queuing them for the next
510  * write.
511  *
512  * To ensure forward progress, the current task must not be holding any
513  * btree node write locks.
514  */
515 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
516                                   unsigned flags)
517 {
518         int ret;
519
520         closure_wait_event(&j->async_wait,
521                    (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
522                    (flags & JOURNAL_RES_GET_NONBLOCK));
523         return ret;
524 }
525
526 /* journal_entry_res: */
527
528 void bch2_journal_entry_res_resize(struct journal *j,
529                                    struct journal_entry_res *res,
530                                    unsigned new_u64s)
531 {
532         union journal_res_state state;
533         int d = new_u64s - res->u64s;
534
535         spin_lock(&j->lock);
536
537         j->entry_u64s_reserved += d;
538         if (d <= 0)
539                 goto out;
540
541         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
542         smp_mb();
543         state = READ_ONCE(j->reservations);
544
545         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
546             state.cur_entry_offset > j->cur_entry_u64s) {
547                 j->cur_entry_u64s += d;
548                 /*
549                  * Not enough room in current journal entry, have to flush it:
550                  */
551                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
552         } else {
553                 journal_cur_buf(j)->u64s_reserved += d;
554         }
555 out:
556         spin_unlock(&j->lock);
557         res->u64s += d;
558 }
559
560 /* journal flushing: */
561
562 /**
563  * bch2_journal_flush_seq_async - wait for a journal entry to be written
564  * @j:          journal object
565  * @seq:        seq to flush
566  * @parent:     closure object to wait with
567  * Returns:     1 if @seq has already been flushed, 0 if @seq is being flushed,
568  *              -EIO if @seq will never be flushed
569  *
570  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
571  * necessary
572  */
573 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
574                                  struct closure *parent)
575 {
576         struct journal_buf *buf;
577         int ret = 0;
578
579         if (seq <= j->flushed_seq_ondisk)
580                 return 1;
581
582         spin_lock(&j->lock);
583
584         if (WARN_ONCE(seq > journal_cur_seq(j),
585                       "requested to flush journal seq %llu, but currently at %llu",
586                       seq, journal_cur_seq(j)))
587                 goto out;
588
589         /* Recheck under lock: */
590         if (j->err_seq && seq >= j->err_seq) {
591                 ret = -EIO;
592                 goto out;
593         }
594
595         if (seq <= j->flushed_seq_ondisk) {
596                 ret = 1;
597                 goto out;
598         }
599
600         /* if seq was written, but not flushed - flush a newer one instead */
601         seq = max(seq, journal_last_unwritten_seq(j));
602
603 recheck_need_open:
604         if (seq > journal_cur_seq(j)) {
605                 struct journal_res res = { 0 };
606
607                 if (journal_entry_is_open(j))
608                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
609
610                 spin_unlock(&j->lock);
611
612                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
613                 if (ret)
614                         return ret;
615
616                 seq = res.seq;
617                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
618                 buf->must_flush = true;
619
620                 if (!buf->flush_time) {
621                         buf->flush_time = local_clock() ?: 1;
622                         buf->expires = jiffies;
623                 }
624
625                 if (parent && !closure_wait(&buf->wait, parent))
626                         BUG();
627
628                 bch2_journal_res_put(j, &res);
629
630                 spin_lock(&j->lock);
631                 goto want_write;
632         }
633
634         /*
635          * if write was kicked off without a flush, flush the next sequence
636          * number instead
637          */
638         buf = journal_seq_to_buf(j, seq);
639         if (buf->noflush) {
640                 seq++;
641                 goto recheck_need_open;
642         }
643
644         buf->must_flush = true;
645
646         if (parent && !closure_wait(&buf->wait, parent))
647                 BUG();
648 want_write:
649         if (seq == journal_cur_seq(j))
650                 journal_entry_want_write(j);
651 out:
652         spin_unlock(&j->lock);
653         return ret;
654 }
655
656 int bch2_journal_flush_seq(struct journal *j, u64 seq)
657 {
658         u64 start_time = local_clock();
659         int ret, ret2;
660
661         /*
662          * Don't update time_stats when @seq is already flushed:
663          */
664         if (seq <= j->flushed_seq_ondisk)
665                 return 0;
666
667         ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
668
669         if (!ret)
670                 bch2_time_stats_update(j->flush_seq_time, start_time);
671
672         return ret ?: ret2 < 0 ? ret2 : 0;
673 }
674
675 /*
676  * bch2_journal_flush_async - if there is an open journal entry, or a journal
677  * still being written, write it and wait for the write to complete
678  */
679 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
680 {
681         bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
682 }
683
684 int bch2_journal_flush(struct journal *j)
685 {
686         return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
687 }
688
689 /*
690  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
691  * @seq
692  */
693 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
694 {
695         struct bch_fs *c = container_of(j, struct bch_fs, journal);
696         u64 unwritten_seq;
697         bool ret = false;
698
699         if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
700                 return false;
701
702         if (seq <= c->journal.flushed_seq_ondisk)
703                 return false;
704
705         spin_lock(&j->lock);
706         if (seq <= c->journal.flushed_seq_ondisk)
707                 goto out;
708
709         for (unwritten_seq = journal_last_unwritten_seq(j);
710              unwritten_seq < seq;
711              unwritten_seq++) {
712                 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
713
714                 /* journal write is already in flight, and was a flush write: */
715                 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
716                         goto out;
717
718                 buf->noflush = true;
719         }
720
721         ret = true;
722 out:
723         spin_unlock(&j->lock);
724         return ret;
725 }
726
727 int bch2_journal_meta(struct journal *j)
728 {
729         struct journal_buf *buf;
730         struct journal_res res;
731         int ret;
732
733         memset(&res, 0, sizeof(res));
734
735         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
736         if (ret)
737                 return ret;
738
739         buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
740         buf->must_flush = true;
741
742         if (!buf->flush_time) {
743                 buf->flush_time = local_clock() ?: 1;
744                 buf->expires = jiffies;
745         }
746
747         bch2_journal_res_put(j, &res);
748
749         return bch2_journal_flush_seq(j, res.seq);
750 }
751
752 /* block/unlock the journal: */
753
754 void bch2_journal_unblock(struct journal *j)
755 {
756         spin_lock(&j->lock);
757         j->blocked--;
758         spin_unlock(&j->lock);
759
760         journal_wake(j);
761 }
762
763 void bch2_journal_block(struct journal *j)
764 {
765         spin_lock(&j->lock);
766         j->blocked++;
767         spin_unlock(&j->lock);
768
769         journal_quiesce(j);
770 }
771
772 /*
773  * XXX: ideally this would not be closing the current journal entry, but
774  * otherwise we do not have a way to avoid racing with res_get() - j->blocked
775  * will race.
776  */
777 static bool journal_reservations_stopped(struct journal *j)
778 {
779         union journal_res_state s;
780
781         journal_entry_close(j);
782
783         s.v = atomic64_read_acquire(&j->reservations.counter);
784
785         return  s.buf0_count == 0 &&
786                 s.buf1_count == 0 &&
787                 s.buf2_count == 0 &&
788                 s.buf3_count == 0;
789 }
790
791 void bch2_journal_block_reservations(struct journal *j)
792 {
793         spin_lock(&j->lock);
794         j->blocked++;
795         spin_unlock(&j->lock);
796
797         wait_event(j->wait, journal_reservations_stopped(j));
798 }
799
800 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
801 {
802         spin_lock(&j->lock);
803         max_seq = min(max_seq, journal_cur_seq(j));
804
805         for (u64 seq = journal_last_unwritten_seq(j);
806              seq <= max_seq;
807              seq++) {
808                 unsigned idx = seq & JOURNAL_BUF_MASK;
809                 struct journal_buf *buf = j->buf + idx;
810                 union journal_res_state s;
811
812                 if (!buf->need_flush_to_write_buffer)
813                         continue;
814
815                 if (seq == journal_cur_seq(j))
816                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
817
818                 s.v = atomic64_read_acquire(&j->reservations.counter);
819
820                 if (journal_state_count(s, idx)) {
821                         spin_unlock(&j->lock);
822                         return ERR_PTR(-EAGAIN);
823                 }
824
825                 spin_unlock(&j->lock);
826                 return buf;
827         }
828
829         spin_unlock(&j->lock);
830         return NULL;
831 }
832
833 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
834 {
835         struct journal_buf *ret;
836
837         wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN));
838         return ret;
839 }
840
841 /* allocate journal on a device: */
842
843 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
844                                          bool new_fs, struct closure *cl)
845 {
846         struct bch_fs *c = ca->fs;
847         struct journal_device *ja = &ca->journal;
848         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
849         struct open_bucket **ob = NULL;
850         long *bu = NULL;
851         unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
852         int ret = 0;
853
854         BUG_ON(nr <= ja->nr);
855
856         bu              = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
857         ob              = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
858         new_buckets     = kcalloc(nr, sizeof(u64), GFP_KERNEL);
859         new_bucket_seq  = kcalloc(nr, sizeof(u64), GFP_KERNEL);
860         if (!bu || !ob || !new_buckets || !new_bucket_seq) {
861                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
862                 goto err_free;
863         }
864
865         for (nr_got = 0; nr_got < nr_want; nr_got++) {
866                 if (new_fs) {
867                         bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
868                         if (bu[nr_got] < 0) {
869                                 ret = -BCH_ERR_ENOSPC_bucket_alloc;
870                                 break;
871                         }
872                 } else {
873                         ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
874                         ret = PTR_ERR_OR_ZERO(ob[nr_got]);
875                         if (ret)
876                                 break;
877
878                         ret = bch2_trans_run(c,
879                                 bch2_trans_mark_metadata_bucket(trans, ca,
880                                                 ob[nr_got]->bucket, BCH_DATA_journal,
881                                                 ca->mi.bucket_size));
882                         if (ret) {
883                                 bch2_open_bucket_put(c, ob[nr_got]);
884                                 bch_err_msg(c, ret, "marking new journal buckets");
885                                 break;
886                         }
887
888                         bu[nr_got] = ob[nr_got]->bucket;
889                 }
890         }
891
892         if (!nr_got)
893                 goto err_free;
894
895         /* Don't return an error if we successfully allocated some buckets: */
896         ret = 0;
897
898         if (c) {
899                 bch2_journal_flush_all_pins(&c->journal);
900                 bch2_journal_block(&c->journal);
901                 mutex_lock(&c->sb_lock);
902         }
903
904         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
905         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
906
907         BUG_ON(ja->discard_idx > ja->nr);
908
909         pos = ja->discard_idx ?: ja->nr;
910
911         memmove(new_buckets + pos + nr_got,
912                 new_buckets + pos,
913                 sizeof(new_buckets[0]) * (ja->nr - pos));
914         memmove(new_bucket_seq + pos + nr_got,
915                 new_bucket_seq + pos,
916                 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
917
918         for (i = 0; i < nr_got; i++) {
919                 new_buckets[pos + i] = bu[i];
920                 new_bucket_seq[pos + i] = 0;
921         }
922
923         nr = ja->nr + nr_got;
924
925         ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
926         if (ret)
927                 goto err_unblock;
928
929         if (!new_fs)
930                 bch2_write_super(c);
931
932         /* Commit: */
933         if (c)
934                 spin_lock(&c->journal.lock);
935
936         swap(new_buckets,       ja->buckets);
937         swap(new_bucket_seq,    ja->bucket_seq);
938         ja->nr = nr;
939
940         if (pos <= ja->discard_idx)
941                 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
942         if (pos <= ja->dirty_idx_ondisk)
943                 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
944         if (pos <= ja->dirty_idx)
945                 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
946         if (pos <= ja->cur_idx)
947                 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
948
949         if (c)
950                 spin_unlock(&c->journal.lock);
951 err_unblock:
952         if (c) {
953                 bch2_journal_unblock(&c->journal);
954                 mutex_unlock(&c->sb_lock);
955         }
956
957         if (ret && !new_fs)
958                 for (i = 0; i < nr_got; i++)
959                         bch2_trans_run(c,
960                                 bch2_trans_mark_metadata_bucket(trans, ca,
961                                                 bu[i], BCH_DATA_free, 0));
962 err_free:
963         if (!new_fs)
964                 for (i = 0; i < nr_got; i++)
965                         bch2_open_bucket_put(c, ob[i]);
966
967         kfree(new_bucket_seq);
968         kfree(new_buckets);
969         kfree(ob);
970         kfree(bu);
971         return ret;
972 }
973
974 /*
975  * Allocate more journal space at runtime - not currently making use if it, but
976  * the code works:
977  */
978 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
979                                 unsigned nr)
980 {
981         struct journal_device *ja = &ca->journal;
982         struct closure cl;
983         int ret = 0;
984
985         closure_init_stack(&cl);
986
987         down_write(&c->state_lock);
988
989         /* don't handle reducing nr of buckets yet: */
990         if (nr < ja->nr)
991                 goto unlock;
992
993         while (ja->nr < nr) {
994                 struct disk_reservation disk_res = { 0, 0, 0 };
995
996                 /*
997                  * note: journal buckets aren't really counted as _sectors_ used yet, so
998                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
999                  * when space used goes up without a reservation - but we do need the
1000                  * reservation to ensure we'll actually be able to allocate:
1001                  *
1002                  * XXX: that's not right, disk reservations only ensure a
1003                  * filesystem-wide allocation will succeed, this is a device
1004                  * specific allocation - we can hang here:
1005                  */
1006
1007                 ret = bch2_disk_reservation_get(c, &disk_res,
1008                                                 bucket_to_sector(ca, nr - ja->nr), 1, 0);
1009                 if (ret)
1010                         break;
1011
1012                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
1013
1014                 bch2_disk_reservation_put(c, &disk_res);
1015
1016                 closure_sync(&cl);
1017
1018                 if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
1019                         break;
1020         }
1021
1022         if (ret)
1023                 bch_err_fn(c, ret);
1024 unlock:
1025         up_write(&c->state_lock);
1026         return ret;
1027 }
1028
1029 int bch2_dev_journal_alloc(struct bch_dev *ca)
1030 {
1031         unsigned nr;
1032         int ret;
1033
1034         if (dynamic_fault("bcachefs:add:journal_alloc")) {
1035                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1036                 goto err;
1037         }
1038
1039         /* 1/128th of the device by default: */
1040         nr = ca->mi.nbuckets >> 7;
1041
1042         /*
1043          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1044          * is smaller:
1045          */
1046         nr = clamp_t(unsigned, nr,
1047                      BCH_JOURNAL_BUCKETS_MIN,
1048                      min(1 << 13,
1049                          (1 << 24) / ca->mi.bucket_size));
1050
1051         ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
1052 err:
1053         if (ret)
1054                 bch_err_fn(ca, ret);
1055         return ret;
1056 }
1057
1058 int bch2_fs_journal_alloc(struct bch_fs *c)
1059 {
1060         struct bch_dev *ca;
1061         unsigned i;
1062
1063         for_each_online_member(ca, c, i) {
1064                 if (ca->journal.nr)
1065                         continue;
1066
1067                 int ret = bch2_dev_journal_alloc(ca);
1068                 if (ret) {
1069                         percpu_ref_put(&ca->io_ref);
1070                         return ret;
1071                 }
1072         }
1073
1074         return 0;
1075 }
1076
1077 /* startup/shutdown: */
1078
1079 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1080 {
1081         bool ret = false;
1082         u64 seq;
1083
1084         spin_lock(&j->lock);
1085         for (seq = journal_last_unwritten_seq(j);
1086              seq <= journal_cur_seq(j) && !ret;
1087              seq++) {
1088                 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1089
1090                 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1091                         ret = true;
1092         }
1093         spin_unlock(&j->lock);
1094
1095         return ret;
1096 }
1097
1098 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1099 {
1100         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1101 }
1102
1103 void bch2_fs_journal_stop(struct journal *j)
1104 {
1105         bch2_journal_reclaim_stop(j);
1106         bch2_journal_flush_all_pins(j);
1107
1108         wait_event(j->wait, journal_entry_close(j));
1109
1110         /*
1111          * Always write a new journal entry, to make sure the clock hands are up
1112          * to date (and match the superblock)
1113          */
1114         bch2_journal_meta(j);
1115
1116         journal_quiesce(j);
1117
1118         BUG_ON(!bch2_journal_error(j) &&
1119                test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1120                j->last_empty_seq != journal_cur_seq(j));
1121
1122         cancel_delayed_work_sync(&j->write_work);
1123 }
1124
1125 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1126 {
1127         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1128         struct journal_entry_pin_list *p;
1129         struct journal_replay *i, **_i;
1130         struct genradix_iter iter;
1131         bool had_entries = false;
1132         unsigned ptr;
1133         u64 last_seq = cur_seq, nr, seq;
1134
1135         genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1136                 i = *_i;
1137
1138                 if (!i || i->ignore)
1139                         continue;
1140
1141                 last_seq = le64_to_cpu(i->j.last_seq);
1142                 break;
1143         }
1144
1145         nr = cur_seq - last_seq;
1146
1147         if (nr + 1 > j->pin.size) {
1148                 free_fifo(&j->pin);
1149                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1150                 if (!j->pin.data) {
1151                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1152                         return -BCH_ERR_ENOMEM_journal_pin_fifo;
1153                 }
1154         }
1155
1156         j->replay_journal_seq   = last_seq;
1157         j->replay_journal_seq_end = cur_seq;
1158         j->last_seq_ondisk      = last_seq;
1159         j->flushed_seq_ondisk   = cur_seq - 1;
1160         j->seq_ondisk           = cur_seq - 1;
1161         j->pin.front            = last_seq;
1162         j->pin.back             = cur_seq;
1163         atomic64_set(&j->seq, cur_seq - 1);
1164
1165         fifo_for_each_entry_ptr(p, &j->pin, seq)
1166                 journal_pin_list_init(p, 1);
1167
1168         genradix_for_each(&c->journal_entries, iter, _i) {
1169                 i = *_i;
1170
1171                 if (!i || i->ignore)
1172                         continue;
1173
1174                 seq = le64_to_cpu(i->j.seq);
1175                 BUG_ON(seq >= cur_seq);
1176
1177                 if (seq < last_seq)
1178                         continue;
1179
1180                 if (journal_entry_empty(&i->j))
1181                         j->last_empty_seq = le64_to_cpu(i->j.seq);
1182
1183                 p = journal_seq_pin(j, seq);
1184
1185                 p->devs.nr = 0;
1186                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1187                         bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1188
1189                 had_entries = true;
1190         }
1191
1192         if (!had_entries)
1193                 j->last_empty_seq = cur_seq;
1194
1195         spin_lock(&j->lock);
1196
1197         set_bit(JOURNAL_STARTED, &j->flags);
1198         j->last_flush_write = jiffies;
1199
1200         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1201         j->reservations.unwritten_idx++;
1202
1203         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1204
1205         bch2_journal_space_available(j);
1206         spin_unlock(&j->lock);
1207
1208         return bch2_journal_reclaim_start(j);
1209 }
1210
1211 /* init/exit: */
1212
1213 void bch2_dev_journal_exit(struct bch_dev *ca)
1214 {
1215         kfree(ca->journal.bio);
1216         kfree(ca->journal.buckets);
1217         kfree(ca->journal.bucket_seq);
1218
1219         ca->journal.bio         = NULL;
1220         ca->journal.buckets     = NULL;
1221         ca->journal.bucket_seq  = NULL;
1222 }
1223
1224 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1225 {
1226         struct journal_device *ja = &ca->journal;
1227         struct bch_sb_field_journal *journal_buckets =
1228                 bch2_sb_field_get(sb, journal);
1229         struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1230                 bch2_sb_field_get(sb, journal_v2);
1231         unsigned i, nr_bvecs;
1232
1233         ja->nr = 0;
1234
1235         if (journal_buckets_v2) {
1236                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1237
1238                 for (i = 0; i < nr; i++)
1239                         ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1240         } else if (journal_buckets) {
1241                 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1242         }
1243
1244         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1245         if (!ja->bucket_seq)
1246                 return -BCH_ERR_ENOMEM_dev_journal_init;
1247
1248         nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1249
1250         ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
1251         if (!ca->journal.bio)
1252                 return -BCH_ERR_ENOMEM_dev_journal_init;
1253
1254         bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
1255
1256         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1257         if (!ja->buckets)
1258                 return -BCH_ERR_ENOMEM_dev_journal_init;
1259
1260         if (journal_buckets_v2) {
1261                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1262                 unsigned j, dst = 0;
1263
1264                 for (i = 0; i < nr; i++)
1265                         for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1266                                 ja->buckets[dst++] =
1267                                         le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1268         } else if (journal_buckets) {
1269                 for (i = 0; i < ja->nr; i++)
1270                         ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1271         }
1272
1273         return 0;
1274 }
1275
1276 void bch2_fs_journal_exit(struct journal *j)
1277 {
1278         unsigned i;
1279
1280         darray_exit(&j->early_journal_entries);
1281
1282         for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1283                 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1284         free_fifo(&j->pin);
1285 }
1286
1287 int bch2_fs_journal_init(struct journal *j)
1288 {
1289         static struct lock_class_key res_key;
1290         unsigned i;
1291
1292         mutex_init(&j->buf_lock);
1293         spin_lock_init(&j->lock);
1294         spin_lock_init(&j->err_lock);
1295         init_waitqueue_head(&j->wait);
1296         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1297         init_waitqueue_head(&j->reclaim_wait);
1298         init_waitqueue_head(&j->pin_flush_wait);
1299         mutex_init(&j->reclaim_lock);
1300         mutex_init(&j->discard_lock);
1301
1302         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1303
1304         atomic64_set(&j->reservations.counter,
1305                 ((union journal_res_state)
1306                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1307
1308         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1309                 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1310
1311         for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1312                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1313                 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1314                 if (!j->buf[i].data)
1315                         return -BCH_ERR_ENOMEM_journal_buf;
1316         }
1317
1318         j->pin.front = j->pin.back = 1;
1319         return 0;
1320 }
1321
1322 /* debug: */
1323
1324 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1325 {
1326         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1327         union journal_res_state s;
1328         struct bch_dev *ca;
1329         unsigned long now = jiffies;
1330         u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1331         u64 seq;
1332         unsigned i;
1333
1334         if (!out->nr_tabstops)
1335                 printbuf_tabstop_push(out, 24);
1336         out->atomic++;
1337
1338         rcu_read_lock();
1339         s = READ_ONCE(j->reservations);
1340
1341         prt_printf(out, "dirty journal entries:\t%llu/%llu\n",  fifo_used(&j->pin), j->pin.size);
1342         prt_printf(out, "seq:\t\t\t%llu\n",                     journal_cur_seq(j));
1343         prt_printf(out, "seq_ondisk:\t\t%llu\n",                j->seq_ondisk);
1344         prt_printf(out, "last_seq:\t\t%llu\n",                  journal_last_seq(j));
1345         prt_printf(out, "last_seq_ondisk:\t%llu\n",             j->last_seq_ondisk);
1346         prt_printf(out, "flushed_seq_ondisk:\t%llu\n",          j->flushed_seq_ondisk);
1347         prt_printf(out, "watermark:\t\t%s\n",                   bch2_watermarks[j->watermark]);
1348         prt_printf(out, "each entry reserved:\t%u\n",           j->entry_u64s_reserved);
1349         prt_printf(out, "nr flush writes:\t%llu\n",             j->nr_flush_writes);
1350         prt_printf(out, "nr noflush writes:\t%llu\n",           j->nr_noflush_writes);
1351         prt_printf(out, "average write size:\t");
1352         prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1353         prt_newline(out);
1354         prt_printf(out, "nr direct reclaim:\t%llu\n",           j->nr_direct_reclaim);
1355         prt_printf(out, "nr background reclaim:\t%llu\n",       j->nr_background_reclaim);
1356         prt_printf(out, "reclaim kicked:\t\t%u\n",              j->reclaim_kicked);
1357         prt_printf(out, "reclaim runs in:\t%u ms\n",            time_after(j->next_reclaim, now)
1358                ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1359         prt_printf(out, "current entry sectors:\t%u\n",         j->cur_entry_sectors);
1360         prt_printf(out, "current entry error:\t%s\n",           bch2_journal_errors[j->cur_entry_error]);
1361         prt_printf(out, "current entry:\t\t");
1362
1363         switch (s.cur_entry_offset) {
1364         case JOURNAL_ENTRY_ERROR_VAL:
1365                 prt_printf(out, "error");
1366                 break;
1367         case JOURNAL_ENTRY_CLOSED_VAL:
1368                 prt_printf(out, "closed");
1369                 break;
1370         default:
1371                 prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1372                 break;
1373         }
1374
1375         prt_newline(out);
1376
1377         for (seq = journal_cur_seq(j);
1378              seq >= journal_last_unwritten_seq(j);
1379              --seq) {
1380                 i = seq & JOURNAL_BUF_MASK;
1381
1382                 prt_printf(out, "unwritten entry:");
1383                 prt_tab(out);
1384                 prt_printf(out, "%llu", seq);
1385                 prt_newline(out);
1386                 printbuf_indent_add(out, 2);
1387
1388                 prt_printf(out, "refcount:");
1389                 prt_tab(out);
1390                 prt_printf(out, "%u", journal_state_count(s, i));
1391                 prt_newline(out);
1392
1393                 prt_printf(out, "sectors:");
1394                 prt_tab(out);
1395                 prt_printf(out, "%u", j->buf[i].sectors);
1396                 prt_newline(out);
1397
1398                 prt_printf(out, "expires");
1399                 prt_tab(out);
1400                 prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
1401                 prt_newline(out);
1402
1403                 printbuf_indent_sub(out, 2);
1404         }
1405
1406         prt_printf(out,
1407                "replay done:\t\t%i\n",
1408                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1409
1410         prt_printf(out, "space:\n");
1411         prt_printf(out, "\tdiscarded\t%u:%u\n",
1412                j->space[journal_space_discarded].next_entry,
1413                j->space[journal_space_discarded].total);
1414         prt_printf(out, "\tclean ondisk\t%u:%u\n",
1415                j->space[journal_space_clean_ondisk].next_entry,
1416                j->space[journal_space_clean_ondisk].total);
1417         prt_printf(out, "\tclean\t\t%u:%u\n",
1418                j->space[journal_space_clean].next_entry,
1419                j->space[journal_space_clean].total);
1420         prt_printf(out, "\ttotal\t\t%u:%u\n",
1421                j->space[journal_space_total].next_entry,
1422                j->space[journal_space_total].total);
1423
1424         for_each_member_device_rcu(ca, c, i,
1425                                    &c->rw_devs[BCH_DATA_journal]) {
1426                 struct journal_device *ja = &ca->journal;
1427
1428                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1429                         continue;
1430
1431                 if (!ja->nr)
1432                         continue;
1433
1434                 prt_printf(out, "dev %u:\n",            i);
1435                 prt_printf(out, "\tnr\t\t%u\n",         ja->nr);
1436                 prt_printf(out, "\tbucket size\t%u\n",  ca->mi.bucket_size);
1437                 prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1438                 prt_printf(out, "\tdiscard_idx\t%u\n",  ja->discard_idx);
1439                 prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,        ja->bucket_seq[ja->dirty_idx_ondisk]);
1440                 prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,          ja->bucket_seq[ja->dirty_idx]);
1441                 prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,            ja->bucket_seq[ja->cur_idx]);
1442         }
1443
1444         rcu_read_unlock();
1445
1446         --out->atomic;
1447 }
1448
1449 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1450 {
1451         spin_lock(&j->lock);
1452         __bch2_journal_debug_to_text(out, j);
1453         spin_unlock(&j->lock);
1454 }
1455
1456 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1457 {
1458         struct journal_entry_pin_list *pin_list;
1459         struct journal_entry_pin *pin;
1460         unsigned i;
1461
1462         spin_lock(&j->lock);
1463         *seq = max(*seq, j->pin.front);
1464
1465         if (*seq >= j->pin.back) {
1466                 spin_unlock(&j->lock);
1467                 return true;
1468         }
1469
1470         out->atomic++;
1471
1472         pin_list = journal_seq_pin(j, *seq);
1473
1474         prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1475         prt_newline(out);
1476         printbuf_indent_add(out, 2);
1477
1478         for (i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1479                 list_for_each_entry(pin, &pin_list->list[i], list) {
1480                         prt_printf(out, "\t%px %ps", pin, pin->flush);
1481                         prt_newline(out);
1482                 }
1483
1484         if (!list_empty(&pin_list->flushed)) {
1485                 prt_printf(out, "flushed:");
1486                 prt_newline(out);
1487         }
1488
1489         list_for_each_entry(pin, &pin_list->flushed, list) {
1490                 prt_printf(out, "\t%px %ps", pin, pin->flush);
1491                 prt_newline(out);
1492         }
1493
1494         printbuf_indent_sub(out, 2);
1495
1496         --out->atomic;
1497         spin_unlock(&j->lock);
1498
1499         return false;
1500 }
1501
1502 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1503 {
1504         u64 seq = 0;
1505
1506         while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1507                 seq++;
1508 }