]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to 39a84c99af2d bcachefs: Clamp replicas_required to replicas
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "error.h"
16 #include "journal.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
21 #include "trace.h"
22
23 static const char * const bch2_journal_errors[] = {
24 #define x(n)    #n,
25         JOURNAL_ERRORS()
26 #undef x
27         NULL
28 };
29
30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
31 {
32         return seq > j->seq_ondisk;
33 }
34
35 static bool __journal_entry_is_open(union journal_res_state state)
36 {
37         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
38 }
39
40 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
41 {
42         return atomic64_read(&j->seq) - j->seq_ondisk;
43 }
44
45 static bool journal_entry_is_open(struct journal *j)
46 {
47         return __journal_entry_is_open(j->reservations);
48 }
49
50 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
51 {
52         union journal_res_state s = READ_ONCE(j->reservations);
53         unsigned i = seq & JOURNAL_BUF_MASK;
54         struct journal_buf *buf = j->buf + i;
55
56         prt_printf(out, "seq:");
57         prt_tab(out);
58         prt_printf(out, "%llu", seq);
59         prt_newline(out);
60         printbuf_indent_add(out, 2);
61
62         prt_printf(out, "refcount:");
63         prt_tab(out);
64         prt_printf(out, "%u", journal_state_count(s, i));
65         prt_newline(out);
66
67         prt_printf(out, "size:");
68         prt_tab(out);
69         prt_human_readable_u64(out, vstruct_bytes(buf->data));
70         prt_newline(out);
71
72         prt_printf(out, "expires");
73         prt_tab(out);
74         prt_printf(out, "%li jiffies", buf->expires - jiffies);
75         prt_newline(out);
76
77         if (buf->write_done)
78                 prt_printf(out, "write done\n");
79         else if (buf->write_allocated)
80                 prt_printf(out, "write allocated\n");
81         else if (buf->write_started)
82                 prt_printf(out, "write started\n");
83
84         printbuf_indent_sub(out, 2);
85 }
86
87 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
88 {
89         if (!out->nr_tabstops)
90                 printbuf_tabstop_push(out, 24);
91
92         for (u64 seq = journal_last_unwritten_seq(j);
93              seq <= journal_cur_seq(j);
94              seq++)
95                 bch2_journal_buf_to_text(out, j, seq);
96         prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
97 }
98
99 static inline struct journal_buf *
100 journal_seq_to_buf(struct journal *j, u64 seq)
101 {
102         struct journal_buf *buf = NULL;
103
104         EBUG_ON(seq > journal_cur_seq(j));
105
106         if (journal_seq_unwritten(j, seq)) {
107                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
108                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
109         }
110         return buf;
111 }
112
113 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
114 {
115         unsigned i;
116
117         for (i = 0; i < ARRAY_SIZE(p->list); i++)
118                 INIT_LIST_HEAD(&p->list[i]);
119         INIT_LIST_HEAD(&p->flushed);
120         atomic_set(&p->count, count);
121         p->devs.nr = 0;
122 }
123
124 /*
125  * Detect stuck journal conditions and trigger shutdown. Technically the journal
126  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
127  * reservation lockup, etc. Since this is a fatal error with potentially
128  * unpredictable characteristics, we want to be fairly conservative before we
129  * decide to shut things down.
130  *
131  * Consider the journal stuck when it appears full with no ability to commit
132  * btree transactions, to discard journal buckets, nor acquire priority
133  * (reserved watermark) reservation.
134  */
135 static inline bool
136 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
137 {
138         struct bch_fs *c = container_of(j, struct bch_fs, journal);
139         bool stuck = false;
140         struct printbuf buf = PRINTBUF;
141
142         if (!(error == JOURNAL_ERR_journal_full ||
143               error == JOURNAL_ERR_journal_pin_full) ||
144             nr_unwritten_journal_entries(j) ||
145             (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
146                 return stuck;
147
148         spin_lock(&j->lock);
149
150         if (j->can_discard) {
151                 spin_unlock(&j->lock);
152                 return stuck;
153         }
154
155         stuck = true;
156
157         /*
158          * The journal shutdown path will set ->err_seq, but do it here first to
159          * serialize against concurrent failures and avoid duplicate error
160          * reports.
161          */
162         if (j->err_seq) {
163                 spin_unlock(&j->lock);
164                 return stuck;
165         }
166         j->err_seq = journal_cur_seq(j);
167         spin_unlock(&j->lock);
168
169         bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
170                 bch2_journal_errors[error]);
171         bch2_journal_debug_to_text(&buf, j);
172         bch_err(c, "%s", buf.buf);
173
174         printbuf_reset(&buf);
175         bch2_journal_pins_to_text(&buf, j);
176         bch_err(c, "Journal pins:\n%s", buf.buf);
177         printbuf_exit(&buf);
178
179         bch2_fatal_error(c);
180         dump_stack();
181
182         return stuck;
183 }
184
185 void bch2_journal_do_writes(struct journal *j)
186 {
187         for (u64 seq = journal_last_unwritten_seq(j);
188              seq <= journal_cur_seq(j);
189              seq++) {
190                 unsigned idx = seq & JOURNAL_BUF_MASK;
191                 struct journal_buf *w = j->buf + idx;
192
193                 if (w->write_started && !w->write_allocated)
194                         break;
195                 if (w->write_started)
196                         continue;
197
198                 if (!journal_state_count(j->reservations, idx)) {
199                         w->write_started = true;
200                         closure_call(&w->io, bch2_journal_write, j->wq, NULL);
201                 }
202
203                 break;
204         }
205 }
206
207 /*
208  * Final processing when the last reference of a journal buffer has been
209  * dropped. Drop the pin list reference acquired at journal entry open and write
210  * the buffer, if requested.
211  */
212 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
213 {
214         lockdep_assert_held(&j->lock);
215
216         if (__bch2_journal_pin_put(j, seq))
217                 bch2_journal_reclaim_fast(j);
218         bch2_journal_do_writes(j);
219 }
220
221 /*
222  * Returns true if journal entry is now closed:
223  *
224  * We don't close a journal_buf until the next journal_buf is finished writing,
225  * and can be opened again - this also initializes the next journal_buf:
226  */
227 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
228 {
229         struct bch_fs *c = container_of(j, struct bch_fs, journal);
230         struct journal_buf *buf = journal_cur_buf(j);
231         union journal_res_state old, new;
232         u64 v = atomic64_read(&j->reservations.counter);
233         unsigned sectors;
234
235         BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
236                closed_val != JOURNAL_ENTRY_ERROR_VAL);
237
238         lockdep_assert_held(&j->lock);
239
240         do {
241                 old.v = new.v = v;
242                 new.cur_entry_offset = closed_val;
243
244                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
245                     old.cur_entry_offset == new.cur_entry_offset)
246                         return;
247         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
248                                        old.v, new.v)) != old.v);
249
250         if (!__journal_entry_is_open(old))
251                 return;
252
253         /* Close out old buffer: */
254         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
255
256         if (trace_journal_entry_close_enabled() && trace) {
257                 struct printbuf pbuf = PRINTBUF;
258                 pbuf.atomic++;
259
260                 prt_str(&pbuf, "entry size: ");
261                 prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
262                 prt_newline(&pbuf);
263                 bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
264                 trace_journal_entry_close(c, pbuf.buf);
265                 printbuf_exit(&pbuf);
266         }
267
268         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
269                                       buf->u64s_reserved) << c->block_bits;
270         BUG_ON(sectors > buf->sectors);
271         buf->sectors = sectors;
272
273         /*
274          * We have to set last_seq here, _before_ opening a new journal entry:
275          *
276          * A threads may replace an old pin with a new pin on their current
277          * journal reservation - the expectation being that the journal will
278          * contain either what the old pin protected or what the new pin
279          * protects.
280          *
281          * After the old pin is dropped journal_last_seq() won't include the old
282          * pin, so we can only write the updated last_seq on the entry that
283          * contains whatever the new pin protects.
284          *
285          * Restated, we can _not_ update last_seq for a given entry if there
286          * could be a newer entry open with reservations/pins that have been
287          * taken against it.
288          *
289          * Hence, we want update/set last_seq on the current journal entry right
290          * before we open a new one:
291          */
292         buf->last_seq           = journal_last_seq(j);
293         buf->data->last_seq     = cpu_to_le64(buf->last_seq);
294         BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
295
296         cancel_delayed_work(&j->write_work);
297
298         bch2_journal_space_available(j);
299
300         __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
301 }
302
303 void bch2_journal_halt(struct journal *j)
304 {
305         spin_lock(&j->lock);
306         __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
307         if (!j->err_seq)
308                 j->err_seq = journal_cur_seq(j);
309         journal_wake(j);
310         spin_unlock(&j->lock);
311 }
312
313 static bool journal_entry_want_write(struct journal *j)
314 {
315         bool ret = !journal_entry_is_open(j) ||
316                 journal_cur_seq(j) == journal_last_unwritten_seq(j);
317
318         /* Don't close it yet if we already have a write in flight: */
319         if (ret)
320                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
321         else if (nr_unwritten_journal_entries(j)) {
322                 struct journal_buf *buf = journal_cur_buf(j);
323
324                 if (!buf->flush_time) {
325                         buf->flush_time = local_clock() ?: 1;
326                         buf->expires = jiffies;
327                 }
328         }
329
330         return ret;
331 }
332
333 bool bch2_journal_entry_close(struct journal *j)
334 {
335         bool ret;
336
337         spin_lock(&j->lock);
338         ret = journal_entry_want_write(j);
339         spin_unlock(&j->lock);
340
341         return ret;
342 }
343
344 /*
345  * should _only_ called from journal_res_get() - when we actually want a
346  * journal reservation - journal entry is open means journal is dirty:
347  */
348 static int journal_entry_open(struct journal *j)
349 {
350         struct bch_fs *c = container_of(j, struct bch_fs, journal);
351         struct journal_buf *buf = j->buf +
352                 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
353         union journal_res_state old, new;
354         int u64s;
355         u64 v;
356
357         lockdep_assert_held(&j->lock);
358         BUG_ON(journal_entry_is_open(j));
359         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
360
361         if (j->blocked)
362                 return JOURNAL_ERR_blocked;
363
364         if (j->cur_entry_error)
365                 return j->cur_entry_error;
366
367         if (bch2_journal_error(j))
368                 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
369
370         if (!fifo_free(&j->pin))
371                 return JOURNAL_ERR_journal_pin_full;
372
373         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
374                 return JOURNAL_ERR_max_in_flight;
375
376         BUG_ON(!j->cur_entry_sectors);
377
378         buf->expires            =
379                 (journal_cur_seq(j) == j->flushed_seq_ondisk
380                  ? jiffies
381                  : j->last_flush_write) +
382                 msecs_to_jiffies(c->opts.journal_flush_delay);
383
384         buf->u64s_reserved      = j->entry_u64s_reserved;
385         buf->disk_sectors       = j->cur_entry_sectors;
386         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
387
388         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
389                 journal_entry_overhead(j);
390         u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
391
392         if (u64s <= (ssize_t) j->early_journal_entries.nr)
393                 return JOURNAL_ERR_journal_full;
394
395         if (fifo_empty(&j->pin) && j->reclaim_thread)
396                 wake_up_process(j->reclaim_thread);
397
398         /*
399          * The fifo_push() needs to happen at the same time as j->seq is
400          * incremented for journal_last_seq() to be calculated correctly
401          */
402         atomic64_inc(&j->seq);
403         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
404
405         BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
406
407         BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
408
409         bkey_extent_init(&buf->key);
410         buf->noflush            = false;
411         buf->must_flush         = false;
412         buf->separate_flush     = false;
413         buf->flush_time         = 0;
414         buf->need_flush_to_write_buffer = true;
415         buf->write_started      = false;
416         buf->write_allocated    = false;
417         buf->write_done         = false;
418
419         memset(buf->data, 0, sizeof(*buf->data));
420         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
421         buf->data->u64s = 0;
422
423         if (j->early_journal_entries.nr) {
424                 memcpy(buf->data->_data, j->early_journal_entries.data,
425                        j->early_journal_entries.nr * sizeof(u64));
426                 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
427         }
428
429         /*
430          * Must be set before marking the journal entry as open:
431          */
432         j->cur_entry_u64s = u64s;
433
434         v = atomic64_read(&j->reservations.counter);
435         do {
436                 old.v = new.v = v;
437
438                 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
439
440                 new.idx++;
441                 BUG_ON(journal_state_count(new, new.idx));
442                 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
443
444                 journal_state_inc(&new);
445
446                 /* Handle any already added entries */
447                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
448         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
449                                        old.v, new.v)) != old.v);
450
451         if (nr_unwritten_journal_entries(j) == 1)
452                 mod_delayed_work(j->wq,
453                                  &j->write_work,
454                                  msecs_to_jiffies(c->opts.journal_flush_delay));
455         journal_wake(j);
456
457         if (j->early_journal_entries.nr)
458                 darray_exit(&j->early_journal_entries);
459         return 0;
460 }
461
462 static bool journal_quiesced(struct journal *j)
463 {
464         bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
465
466         if (!ret)
467                 bch2_journal_entry_close(j);
468         return ret;
469 }
470
471 static void journal_quiesce(struct journal *j)
472 {
473         wait_event(j->wait, journal_quiesced(j));
474 }
475
476 static void journal_write_work(struct work_struct *work)
477 {
478         struct journal *j = container_of(work, struct journal, write_work.work);
479
480         spin_lock(&j->lock);
481         if (__journal_entry_is_open(j->reservations)) {
482                 long delta = journal_cur_buf(j)->expires - jiffies;
483
484                 if (delta > 0)
485                         mod_delayed_work(j->wq, &j->write_work, delta);
486                 else
487                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
488         }
489         spin_unlock(&j->lock);
490 }
491
492 static int __journal_res_get(struct journal *j, struct journal_res *res,
493                              unsigned flags)
494 {
495         struct bch_fs *c = container_of(j, struct bch_fs, journal);
496         struct journal_buf *buf;
497         bool can_discard;
498         int ret;
499 retry:
500         if (journal_res_get_fast(j, res, flags))
501                 return 0;
502
503         if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
504                 ret = JOURNAL_ERR_journal_full;
505                 can_discard = j->can_discard;
506                 goto out;
507         }
508
509         if (j->blocked)
510                 return -BCH_ERR_journal_res_get_blocked;
511
512         if (bch2_journal_error(j))
513                 return -BCH_ERR_erofs_journal_err;
514
515         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
516                 ret = JOURNAL_ERR_max_in_flight;
517                 goto out;
518         }
519
520         spin_lock(&j->lock);
521
522         /*
523          * Recheck after taking the lock, so we don't race with another thread
524          * that just did journal_entry_open() and call bch2_journal_entry_close()
525          * unnecessarily
526          */
527         if (journal_res_get_fast(j, res, flags)) {
528                 ret = 0;
529                 goto unlock;
530         }
531
532         /*
533          * If we couldn't get a reservation because the current buf filled up,
534          * and we had room for a bigger entry on disk, signal that we want to
535          * realloc the journal bufs:
536          */
537         buf = journal_cur_buf(j);
538         if (journal_entry_is_open(j) &&
539             buf->buf_size >> 9 < buf->disk_sectors &&
540             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
541                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
542
543         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
544         ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
545 unlock:
546         can_discard = j->can_discard;
547         spin_unlock(&j->lock);
548 out:
549         if (ret == JOURNAL_ERR_retry)
550                 goto retry;
551         if (!ret)
552                 return 0;
553
554         if (journal_error_check_stuck(j, ret, flags))
555                 ret = -BCH_ERR_journal_res_get_blocked;
556
557         if (ret == JOURNAL_ERR_max_in_flight &&
558             track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
559
560                 struct printbuf buf = PRINTBUF;
561                 prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
562                 bch2_journal_bufs_to_text(&buf, j);
563                 trace_journal_entry_full(c, buf.buf);
564                 printbuf_exit(&buf);
565                 count_event(c, journal_entry_full);
566         }
567
568         /*
569          * Journal is full - can't rely on reclaim from work item due to
570          * freezing:
571          */
572         if ((ret == JOURNAL_ERR_journal_full ||
573              ret == JOURNAL_ERR_journal_pin_full) &&
574             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
575                 if (can_discard) {
576                         bch2_journal_do_discards(j);
577                         goto retry;
578                 }
579
580                 if (mutex_trylock(&j->reclaim_lock)) {
581                         bch2_journal_reclaim(j);
582                         mutex_unlock(&j->reclaim_lock);
583                 }
584         }
585
586         return ret == JOURNAL_ERR_insufficient_devices
587                 ? -BCH_ERR_erofs_journal_err
588                 : -BCH_ERR_journal_res_get_blocked;
589 }
590
591 /*
592  * Essentially the entry function to the journaling code. When bcachefs is doing
593  * a btree insert, it calls this function to get the current journal write.
594  * Journal write is the structure used set up journal writes. The calling
595  * function will then add its keys to the structure, queuing them for the next
596  * write.
597  *
598  * To ensure forward progress, the current task must not be holding any
599  * btree node write locks.
600  */
601 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
602                                   unsigned flags)
603 {
604         int ret;
605
606         closure_wait_event(&j->async_wait,
607                    (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
608                    (flags & JOURNAL_RES_GET_NONBLOCK));
609         return ret;
610 }
611
612 /* journal_entry_res: */
613
614 void bch2_journal_entry_res_resize(struct journal *j,
615                                    struct journal_entry_res *res,
616                                    unsigned new_u64s)
617 {
618         union journal_res_state state;
619         int d = new_u64s - res->u64s;
620
621         spin_lock(&j->lock);
622
623         j->entry_u64s_reserved += d;
624         if (d <= 0)
625                 goto out;
626
627         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
628         smp_mb();
629         state = READ_ONCE(j->reservations);
630
631         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
632             state.cur_entry_offset > j->cur_entry_u64s) {
633                 j->cur_entry_u64s += d;
634                 /*
635                  * Not enough room in current journal entry, have to flush it:
636                  */
637                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
638         } else {
639                 journal_cur_buf(j)->u64s_reserved += d;
640         }
641 out:
642         spin_unlock(&j->lock);
643         res->u64s += d;
644 }
645
646 /* journal flushing: */
647
648 /**
649  * bch2_journal_flush_seq_async - wait for a journal entry to be written
650  * @j:          journal object
651  * @seq:        seq to flush
652  * @parent:     closure object to wait with
653  * Returns:     1 if @seq has already been flushed, 0 if @seq is being flushed,
654  *              -EIO if @seq will never be flushed
655  *
656  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
657  * necessary
658  */
659 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
660                                  struct closure *parent)
661 {
662         struct journal_buf *buf;
663         int ret = 0;
664
665         if (seq <= j->flushed_seq_ondisk)
666                 return 1;
667
668         spin_lock(&j->lock);
669
670         if (WARN_ONCE(seq > journal_cur_seq(j),
671                       "requested to flush journal seq %llu, but currently at %llu",
672                       seq, journal_cur_seq(j)))
673                 goto out;
674
675         /* Recheck under lock: */
676         if (j->err_seq && seq >= j->err_seq) {
677                 ret = -EIO;
678                 goto out;
679         }
680
681         if (seq <= j->flushed_seq_ondisk) {
682                 ret = 1;
683                 goto out;
684         }
685
686         /* if seq was written, but not flushed - flush a newer one instead */
687         seq = max(seq, journal_last_unwritten_seq(j));
688
689 recheck_need_open:
690         if (seq > journal_cur_seq(j)) {
691                 struct journal_res res = { 0 };
692
693                 if (journal_entry_is_open(j))
694                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
695
696                 spin_unlock(&j->lock);
697
698                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
699                 if (ret)
700                         return ret;
701
702                 seq = res.seq;
703                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
704                 buf->must_flush = true;
705
706                 if (!buf->flush_time) {
707                         buf->flush_time = local_clock() ?: 1;
708                         buf->expires = jiffies;
709                 }
710
711                 if (parent && !closure_wait(&buf->wait, parent))
712                         BUG();
713
714                 bch2_journal_res_put(j, &res);
715
716                 spin_lock(&j->lock);
717                 goto want_write;
718         }
719
720         /*
721          * if write was kicked off without a flush, flush the next sequence
722          * number instead
723          */
724         buf = journal_seq_to_buf(j, seq);
725         if (buf->noflush) {
726                 seq++;
727                 goto recheck_need_open;
728         }
729
730         buf->must_flush = true;
731
732         if (parent && !closure_wait(&buf->wait, parent))
733                 BUG();
734 want_write:
735         if (seq == journal_cur_seq(j))
736                 journal_entry_want_write(j);
737 out:
738         spin_unlock(&j->lock);
739         return ret;
740 }
741
742 int bch2_journal_flush_seq(struct journal *j, u64 seq)
743 {
744         u64 start_time = local_clock();
745         int ret, ret2;
746
747         /*
748          * Don't update time_stats when @seq is already flushed:
749          */
750         if (seq <= j->flushed_seq_ondisk)
751                 return 0;
752
753         ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
754
755         if (!ret)
756                 time_stats_update(j->flush_seq_time, start_time);
757
758         return ret ?: ret2 < 0 ? ret2 : 0;
759 }
760
761 /*
762  * bch2_journal_flush_async - if there is an open journal entry, or a journal
763  * still being written, write it and wait for the write to complete
764  */
765 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
766 {
767         bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
768 }
769
770 int bch2_journal_flush(struct journal *j)
771 {
772         return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
773 }
774
775 /*
776  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
777  * @seq
778  */
779 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
780 {
781         struct bch_fs *c = container_of(j, struct bch_fs, journal);
782         u64 unwritten_seq;
783         bool ret = false;
784
785         if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
786                 return false;
787
788         if (seq <= c->journal.flushed_seq_ondisk)
789                 return false;
790
791         spin_lock(&j->lock);
792         if (seq <= c->journal.flushed_seq_ondisk)
793                 goto out;
794
795         for (unwritten_seq = journal_last_unwritten_seq(j);
796              unwritten_seq < seq;
797              unwritten_seq++) {
798                 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
799
800                 /* journal write is already in flight, and was a flush write: */
801                 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
802                         goto out;
803
804                 buf->noflush = true;
805         }
806
807         ret = true;
808 out:
809         spin_unlock(&j->lock);
810         return ret;
811 }
812
813 int bch2_journal_meta(struct journal *j)
814 {
815         struct journal_buf *buf;
816         struct journal_res res;
817         int ret;
818
819         memset(&res, 0, sizeof(res));
820
821         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
822         if (ret)
823                 return ret;
824
825         buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
826         buf->must_flush = true;
827
828         if (!buf->flush_time) {
829                 buf->flush_time = local_clock() ?: 1;
830                 buf->expires = jiffies;
831         }
832
833         bch2_journal_res_put(j, &res);
834
835         return bch2_journal_flush_seq(j, res.seq);
836 }
837
838 /* block/unlock the journal: */
839
840 void bch2_journal_unblock(struct journal *j)
841 {
842         spin_lock(&j->lock);
843         j->blocked--;
844         spin_unlock(&j->lock);
845
846         journal_wake(j);
847 }
848
849 void bch2_journal_block(struct journal *j)
850 {
851         spin_lock(&j->lock);
852         j->blocked++;
853         spin_unlock(&j->lock);
854
855         journal_quiesce(j);
856 }
857
858 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
859 {
860         struct journal_buf *ret = NULL;
861
862         mutex_lock(&j->buf_lock);
863         spin_lock(&j->lock);
864         max_seq = min(max_seq, journal_cur_seq(j));
865
866         for (u64 seq = journal_last_unwritten_seq(j);
867              seq <= max_seq;
868              seq++) {
869                 unsigned idx = seq & JOURNAL_BUF_MASK;
870                 struct journal_buf *buf = j->buf + idx;
871
872                 if (buf->need_flush_to_write_buffer) {
873                         if (seq == journal_cur_seq(j))
874                                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
875
876                         union journal_res_state s;
877                         s.v = atomic64_read_acquire(&j->reservations.counter);
878
879                         ret = journal_state_count(s, idx)
880                                 ? ERR_PTR(-EAGAIN)
881                                 : buf;
882                         break;
883                 }
884         }
885
886         spin_unlock(&j->lock);
887         if (IS_ERR_OR_NULL(ret))
888                 mutex_unlock(&j->buf_lock);
889         return ret;
890 }
891
892 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
893 {
894         struct journal_buf *ret;
895
896         wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN));
897         return ret;
898 }
899
900 /* allocate journal on a device: */
901
902 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
903                                          bool new_fs, struct closure *cl)
904 {
905         struct bch_fs *c = ca->fs;
906         struct journal_device *ja = &ca->journal;
907         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
908         struct open_bucket **ob = NULL;
909         long *bu = NULL;
910         unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
911         int ret = 0;
912
913         BUG_ON(nr <= ja->nr);
914
915         bu              = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
916         ob              = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
917         new_buckets     = kcalloc(nr, sizeof(u64), GFP_KERNEL);
918         new_bucket_seq  = kcalloc(nr, sizeof(u64), GFP_KERNEL);
919         if (!bu || !ob || !new_buckets || !new_bucket_seq) {
920                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
921                 goto err_free;
922         }
923
924         for (nr_got = 0; nr_got < nr_want; nr_got++) {
925                 if (new_fs) {
926                         bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
927                         if (bu[nr_got] < 0) {
928                                 ret = -BCH_ERR_ENOSPC_bucket_alloc;
929                                 break;
930                         }
931                 } else {
932                         ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
933                         ret = PTR_ERR_OR_ZERO(ob[nr_got]);
934                         if (ret)
935                                 break;
936
937                         ret = bch2_trans_run(c,
938                                 bch2_trans_mark_metadata_bucket(trans, ca,
939                                                 ob[nr_got]->bucket, BCH_DATA_journal,
940                                                 ca->mi.bucket_size));
941                         if (ret) {
942                                 bch2_open_bucket_put(c, ob[nr_got]);
943                                 bch_err_msg(c, ret, "marking new journal buckets");
944                                 break;
945                         }
946
947                         bu[nr_got] = ob[nr_got]->bucket;
948                 }
949         }
950
951         if (!nr_got)
952                 goto err_free;
953
954         /* Don't return an error if we successfully allocated some buckets: */
955         ret = 0;
956
957         if (c) {
958                 bch2_journal_flush_all_pins(&c->journal);
959                 bch2_journal_block(&c->journal);
960                 mutex_lock(&c->sb_lock);
961         }
962
963         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
964         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
965
966         BUG_ON(ja->discard_idx > ja->nr);
967
968         pos = ja->discard_idx ?: ja->nr;
969
970         memmove(new_buckets + pos + nr_got,
971                 new_buckets + pos,
972                 sizeof(new_buckets[0]) * (ja->nr - pos));
973         memmove(new_bucket_seq + pos + nr_got,
974                 new_bucket_seq + pos,
975                 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
976
977         for (i = 0; i < nr_got; i++) {
978                 new_buckets[pos + i] = bu[i];
979                 new_bucket_seq[pos + i] = 0;
980         }
981
982         nr = ja->nr + nr_got;
983
984         ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
985         if (ret)
986                 goto err_unblock;
987
988         if (!new_fs)
989                 bch2_write_super(c);
990
991         /* Commit: */
992         if (c)
993                 spin_lock(&c->journal.lock);
994
995         swap(new_buckets,       ja->buckets);
996         swap(new_bucket_seq,    ja->bucket_seq);
997         ja->nr = nr;
998
999         if (pos <= ja->discard_idx)
1000                 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1001         if (pos <= ja->dirty_idx_ondisk)
1002                 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1003         if (pos <= ja->dirty_idx)
1004                 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1005         if (pos <= ja->cur_idx)
1006                 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1007
1008         if (c)
1009                 spin_unlock(&c->journal.lock);
1010 err_unblock:
1011         if (c) {
1012                 bch2_journal_unblock(&c->journal);
1013                 mutex_unlock(&c->sb_lock);
1014         }
1015
1016         if (ret && !new_fs)
1017                 for (i = 0; i < nr_got; i++)
1018                         bch2_trans_run(c,
1019                                 bch2_trans_mark_metadata_bucket(trans, ca,
1020                                                 bu[i], BCH_DATA_free, 0));
1021 err_free:
1022         if (!new_fs)
1023                 for (i = 0; i < nr_got; i++)
1024                         bch2_open_bucket_put(c, ob[i]);
1025
1026         kfree(new_bucket_seq);
1027         kfree(new_buckets);
1028         kfree(ob);
1029         kfree(bu);
1030         return ret;
1031 }
1032
1033 /*
1034  * Allocate more journal space at runtime - not currently making use if it, but
1035  * the code works:
1036  */
1037 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1038                                 unsigned nr)
1039 {
1040         struct journal_device *ja = &ca->journal;
1041         struct closure cl;
1042         int ret = 0;
1043
1044         closure_init_stack(&cl);
1045
1046         down_write(&c->state_lock);
1047
1048         /* don't handle reducing nr of buckets yet: */
1049         if (nr < ja->nr)
1050                 goto unlock;
1051
1052         while (ja->nr < nr) {
1053                 struct disk_reservation disk_res = { 0, 0, 0 };
1054
1055                 /*
1056                  * note: journal buckets aren't really counted as _sectors_ used yet, so
1057                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1058                  * when space used goes up without a reservation - but we do need the
1059                  * reservation to ensure we'll actually be able to allocate:
1060                  *
1061                  * XXX: that's not right, disk reservations only ensure a
1062                  * filesystem-wide allocation will succeed, this is a device
1063                  * specific allocation - we can hang here:
1064                  */
1065
1066                 ret = bch2_disk_reservation_get(c, &disk_res,
1067                                                 bucket_to_sector(ca, nr - ja->nr), 1, 0);
1068                 if (ret)
1069                         break;
1070
1071                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
1072
1073                 bch2_disk_reservation_put(c, &disk_res);
1074
1075                 closure_sync(&cl);
1076
1077                 if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
1078                         break;
1079         }
1080
1081         bch_err_fn(c, ret);
1082 unlock:
1083         up_write(&c->state_lock);
1084         return ret;
1085 }
1086
1087 int bch2_dev_journal_alloc(struct bch_dev *ca)
1088 {
1089         unsigned nr;
1090         int ret;
1091
1092         if (dynamic_fault("bcachefs:add:journal_alloc")) {
1093                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1094                 goto err;
1095         }
1096
1097         /* 1/128th of the device by default: */
1098         nr = ca->mi.nbuckets >> 7;
1099
1100         /*
1101          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1102          * is smaller:
1103          */
1104         nr = clamp_t(unsigned, nr,
1105                      BCH_JOURNAL_BUCKETS_MIN,
1106                      min(1 << 13,
1107                          (1 << 24) / ca->mi.bucket_size));
1108
1109         ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
1110 err:
1111         bch_err_fn(ca, ret);
1112         return ret;
1113 }
1114
1115 int bch2_fs_journal_alloc(struct bch_fs *c)
1116 {
1117         for_each_online_member(c, ca) {
1118                 if (ca->journal.nr)
1119                         continue;
1120
1121                 int ret = bch2_dev_journal_alloc(ca);
1122                 if (ret) {
1123                         percpu_ref_put(&ca->io_ref);
1124                         return ret;
1125                 }
1126         }
1127
1128         return 0;
1129 }
1130
1131 /* startup/shutdown: */
1132
1133 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1134 {
1135         bool ret = false;
1136         u64 seq;
1137
1138         spin_lock(&j->lock);
1139         for (seq = journal_last_unwritten_seq(j);
1140              seq <= journal_cur_seq(j) && !ret;
1141              seq++) {
1142                 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1143
1144                 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1145                         ret = true;
1146         }
1147         spin_unlock(&j->lock);
1148
1149         return ret;
1150 }
1151
1152 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1153 {
1154         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1155 }
1156
1157 void bch2_fs_journal_stop(struct journal *j)
1158 {
1159         bch2_journal_reclaim_stop(j);
1160         bch2_journal_flush_all_pins(j);
1161
1162         wait_event(j->wait, bch2_journal_entry_close(j));
1163
1164         /*
1165          * Always write a new journal entry, to make sure the clock hands are up
1166          * to date (and match the superblock)
1167          */
1168         bch2_journal_meta(j);
1169
1170         journal_quiesce(j);
1171
1172         BUG_ON(!bch2_journal_error(j) &&
1173                test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1174                j->last_empty_seq != journal_cur_seq(j));
1175
1176         cancel_delayed_work_sync(&j->write_work);
1177 }
1178
1179 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1180 {
1181         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1182         struct journal_entry_pin_list *p;
1183         struct journal_replay *i, **_i;
1184         struct genradix_iter iter;
1185         bool had_entries = false;
1186         u64 last_seq = cur_seq, nr, seq;
1187
1188         genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1189                 i = *_i;
1190
1191                 if (!i || i->ignore)
1192                         continue;
1193
1194                 last_seq = le64_to_cpu(i->j.last_seq);
1195                 break;
1196         }
1197
1198         nr = cur_seq - last_seq;
1199
1200         if (nr + 1 > j->pin.size) {
1201                 free_fifo(&j->pin);
1202                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1203                 if (!j->pin.data) {
1204                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1205                         return -BCH_ERR_ENOMEM_journal_pin_fifo;
1206                 }
1207         }
1208
1209         j->replay_journal_seq   = last_seq;
1210         j->replay_journal_seq_end = cur_seq;
1211         j->last_seq_ondisk      = last_seq;
1212         j->flushed_seq_ondisk   = cur_seq - 1;
1213         j->seq_ondisk           = cur_seq - 1;
1214         j->pin.front            = last_seq;
1215         j->pin.back             = cur_seq;
1216         atomic64_set(&j->seq, cur_seq - 1);
1217
1218         fifo_for_each_entry_ptr(p, &j->pin, seq)
1219                 journal_pin_list_init(p, 1);
1220
1221         genradix_for_each(&c->journal_entries, iter, _i) {
1222                 i = *_i;
1223
1224                 if (!i || i->ignore)
1225                         continue;
1226
1227                 seq = le64_to_cpu(i->j.seq);
1228                 BUG_ON(seq >= cur_seq);
1229
1230                 if (seq < last_seq)
1231                         continue;
1232
1233                 if (journal_entry_empty(&i->j))
1234                         j->last_empty_seq = le64_to_cpu(i->j.seq);
1235
1236                 p = journal_seq_pin(j, seq);
1237
1238                 p->devs.nr = 0;
1239                 darray_for_each(i->ptrs, ptr)
1240                         bch2_dev_list_add_dev(&p->devs, ptr->dev);
1241
1242                 had_entries = true;
1243         }
1244
1245         if (!had_entries)
1246                 j->last_empty_seq = cur_seq;
1247
1248         spin_lock(&j->lock);
1249
1250         set_bit(JOURNAL_STARTED, &j->flags);
1251         j->last_flush_write = jiffies;
1252
1253         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1254         j->reservations.unwritten_idx++;
1255
1256         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1257
1258         bch2_journal_space_available(j);
1259         spin_unlock(&j->lock);
1260
1261         return bch2_journal_reclaim_start(j);
1262 }
1263
1264 /* init/exit: */
1265
1266 void bch2_dev_journal_exit(struct bch_dev *ca)
1267 {
1268         struct journal_device *ja = &ca->journal;
1269
1270         for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1271                 kfree(ja->bio[i]);
1272                 ja->bio[i] = NULL;
1273         }
1274
1275         kfree(ja->buckets);
1276         kfree(ja->bucket_seq);
1277         ja->buckets     = NULL;
1278         ja->bucket_seq  = NULL;
1279 }
1280
1281 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1282 {
1283         struct journal_device *ja = &ca->journal;
1284         struct bch_sb_field_journal *journal_buckets =
1285                 bch2_sb_field_get(sb, journal);
1286         struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1287                 bch2_sb_field_get(sb, journal_v2);
1288
1289         ja->nr = 0;
1290
1291         if (journal_buckets_v2) {
1292                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1293
1294                 for (unsigned i = 0; i < nr; i++)
1295                         ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1296         } else if (journal_buckets) {
1297                 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1298         }
1299
1300         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1301         if (!ja->bucket_seq)
1302                 return -BCH_ERR_ENOMEM_dev_journal_init;
1303
1304         unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1305
1306         for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1307                 ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1308                                      nr_bvecs), GFP_KERNEL);
1309                 if (!ja->bio[i])
1310                         return -BCH_ERR_ENOMEM_dev_journal_init;
1311
1312                 ja->bio[i]->ca = ca;
1313                 ja->bio[i]->buf_idx = i;
1314                 bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1315         }
1316
1317         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1318         if (!ja->buckets)
1319                 return -BCH_ERR_ENOMEM_dev_journal_init;
1320
1321         if (journal_buckets_v2) {
1322                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1323                 unsigned dst = 0;
1324
1325                 for (unsigned i = 0; i < nr; i++)
1326                         for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1327                                 ja->buckets[dst++] =
1328                                         le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1329         } else if (journal_buckets) {
1330                 for (unsigned i = 0; i < ja->nr; i++)
1331                         ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1332         }
1333
1334         return 0;
1335 }
1336
1337 void bch2_fs_journal_exit(struct journal *j)
1338 {
1339         if (j->wq)
1340                 destroy_workqueue(j->wq);
1341
1342         darray_exit(&j->early_journal_entries);
1343
1344         for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1345                 kvfree(j->buf[i].data);
1346         free_fifo(&j->pin);
1347 }
1348
1349 int bch2_fs_journal_init(struct journal *j)
1350 {
1351         static struct lock_class_key res_key;
1352
1353         mutex_init(&j->buf_lock);
1354         spin_lock_init(&j->lock);
1355         spin_lock_init(&j->err_lock);
1356         init_waitqueue_head(&j->wait);
1357         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1358         init_waitqueue_head(&j->reclaim_wait);
1359         init_waitqueue_head(&j->pin_flush_wait);
1360         mutex_init(&j->reclaim_lock);
1361         mutex_init(&j->discard_lock);
1362
1363         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1364
1365         atomic64_set(&j->reservations.counter,
1366                 ((union journal_res_state)
1367                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1368
1369         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1370                 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1371
1372         for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
1373                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1374                 j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
1375                 if (!j->buf[i].data)
1376                         return -BCH_ERR_ENOMEM_journal_buf;
1377                 j->buf[i].idx = i;
1378         }
1379
1380         j->pin.front = j->pin.back = 1;
1381
1382         j->wq = alloc_workqueue("bcachefs_journal",
1383                                 WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1384         if (!j->wq)
1385                 return -BCH_ERR_ENOMEM_fs_other_alloc;
1386         return 0;
1387 }
1388
1389 /* debug: */
1390
1391 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1392 {
1393         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1394         union journal_res_state s;
1395         unsigned long now = jiffies;
1396         u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1397
1398         if (!out->nr_tabstops)
1399                 printbuf_tabstop_push(out, 24);
1400         out->atomic++;
1401
1402         rcu_read_lock();
1403         s = READ_ONCE(j->reservations);
1404
1405         prt_printf(out, "dirty journal entries:\t%llu/%llu\n",  fifo_used(&j->pin), j->pin.size);
1406         prt_printf(out, "seq:\t\t\t%llu\n",                     journal_cur_seq(j));
1407         prt_printf(out, "seq_ondisk:\t\t%llu\n",                j->seq_ondisk);
1408         prt_printf(out, "last_seq:\t\t%llu\n",                  journal_last_seq(j));
1409         prt_printf(out, "last_seq_ondisk:\t%llu\n",             j->last_seq_ondisk);
1410         prt_printf(out, "flushed_seq_ondisk:\t%llu\n",          j->flushed_seq_ondisk);
1411         prt_printf(out, "watermark:\t\t%s\n",                   bch2_watermarks[j->watermark]);
1412         prt_printf(out, "each entry reserved:\t%u\n",           j->entry_u64s_reserved);
1413         prt_printf(out, "nr flush writes:\t%llu\n",             j->nr_flush_writes);
1414         prt_printf(out, "nr noflush writes:\t%llu\n",           j->nr_noflush_writes);
1415         prt_printf(out, "average write size:\t");
1416         prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1417         prt_newline(out);
1418         prt_printf(out, "nr direct reclaim:\t%llu\n",           j->nr_direct_reclaim);
1419         prt_printf(out, "nr background reclaim:\t%llu\n",       j->nr_background_reclaim);
1420         prt_printf(out, "reclaim kicked:\t\t%u\n",              j->reclaim_kicked);
1421         prt_printf(out, "reclaim runs in:\t%u ms\n",            time_after(j->next_reclaim, now)
1422                ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1423         prt_printf(out, "current entry sectors:\t%u\n",         j->cur_entry_sectors);
1424         prt_printf(out, "current entry error:\t%s\n",           bch2_journal_errors[j->cur_entry_error]);
1425         prt_printf(out, "current entry:\t\t");
1426
1427         switch (s.cur_entry_offset) {
1428         case JOURNAL_ENTRY_ERROR_VAL:
1429                 prt_printf(out, "error");
1430                 break;
1431         case JOURNAL_ENTRY_CLOSED_VAL:
1432                 prt_printf(out, "closed");
1433                 break;
1434         default:
1435                 prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1436                 break;
1437         }
1438
1439         prt_newline(out);
1440         prt_printf(out, "unwritten entries:");
1441         prt_newline(out);
1442         bch2_journal_bufs_to_text(out, j);
1443
1444         prt_printf(out,
1445                "replay done:\t\t%i\n",
1446                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1447
1448         prt_printf(out, "space:\n");
1449         prt_printf(out, "\tdiscarded\t%u:%u\n",
1450                j->space[journal_space_discarded].next_entry,
1451                j->space[journal_space_discarded].total);
1452         prt_printf(out, "\tclean ondisk\t%u:%u\n",
1453                j->space[journal_space_clean_ondisk].next_entry,
1454                j->space[journal_space_clean_ondisk].total);
1455         prt_printf(out, "\tclean\t\t%u:%u\n",
1456                j->space[journal_space_clean].next_entry,
1457                j->space[journal_space_clean].total);
1458         prt_printf(out, "\ttotal\t\t%u:%u\n",
1459                j->space[journal_space_total].next_entry,
1460                j->space[journal_space_total].total);
1461
1462         for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1463                 struct journal_device *ja = &ca->journal;
1464
1465                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1466                         continue;
1467
1468                 if (!ja->nr)
1469                         continue;
1470
1471                 prt_printf(out, "dev %u:\n",            ca->dev_idx);
1472                 prt_printf(out, "\tnr\t\t%u\n",         ja->nr);
1473                 prt_printf(out, "\tbucket size\t%u\n",  ca->mi.bucket_size);
1474                 prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1475                 prt_printf(out, "\tdiscard_idx\t%u\n",  ja->discard_idx);
1476                 prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,        ja->bucket_seq[ja->dirty_idx_ondisk]);
1477                 prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,          ja->bucket_seq[ja->dirty_idx]);
1478                 prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,            ja->bucket_seq[ja->cur_idx]);
1479         }
1480
1481         rcu_read_unlock();
1482
1483         --out->atomic;
1484 }
1485
1486 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1487 {
1488         spin_lock(&j->lock);
1489         __bch2_journal_debug_to_text(out, j);
1490         spin_unlock(&j->lock);
1491 }
1492
1493 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1494 {
1495         struct journal_entry_pin_list *pin_list;
1496         struct journal_entry_pin *pin;
1497
1498         spin_lock(&j->lock);
1499         *seq = max(*seq, j->pin.front);
1500
1501         if (*seq >= j->pin.back) {
1502                 spin_unlock(&j->lock);
1503                 return true;
1504         }
1505
1506         out->atomic++;
1507
1508         pin_list = journal_seq_pin(j, *seq);
1509
1510         prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1511         prt_newline(out);
1512         printbuf_indent_add(out, 2);
1513
1514         for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1515                 list_for_each_entry(pin, &pin_list->list[i], list) {
1516                         prt_printf(out, "\t%px %ps", pin, pin->flush);
1517                         prt_newline(out);
1518                 }
1519
1520         if (!list_empty(&pin_list->flushed)) {
1521                 prt_printf(out, "flushed:");
1522                 prt_newline(out);
1523         }
1524
1525         list_for_each_entry(pin, &pin_list->flushed, list) {
1526                 prt_printf(out, "\t%px %ps", pin, pin->flush);
1527                 prt_newline(out);
1528         }
1529
1530         printbuf_indent_sub(out, 2);
1531
1532         --out->atomic;
1533         spin_unlock(&j->lock);
1534
1535         return false;
1536 }
1537
1538 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1539 {
1540         u64 seq = 0;
1541
1542         while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1543                 seq++;
1544 }