]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to e48731a188 bcachefs: Fix BTREE_TRIGGER_WANTS_OLD_AND_NEW
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "error.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "super-io.h"
20
21 #include <trace/events/bcachefs.h>
22
23 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
24 {
25         return seq > j->seq_ondisk;
26 }
27
28 static bool __journal_entry_is_open(union journal_res_state state)
29 {
30         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
31 }
32
33 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
34 {
35         return atomic64_read(&j->seq) - j->seq_ondisk;
36 }
37
38 static bool journal_entry_is_open(struct journal *j)
39 {
40         return __journal_entry_is_open(j->reservations);
41 }
42
43 static inline struct journal_buf *
44 journal_seq_to_buf(struct journal *j, u64 seq)
45 {
46         struct journal_buf *buf = NULL;
47
48         EBUG_ON(seq > journal_cur_seq(j));
49
50         if (journal_seq_unwritten(j, seq)) {
51                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
52                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
53         }
54         return buf;
55 }
56
57 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
58 {
59         INIT_LIST_HEAD(&p->list);
60         INIT_LIST_HEAD(&p->key_cache_list);
61         INIT_LIST_HEAD(&p->flushed);
62         atomic_set(&p->count, count);
63         p->devs.nr = 0;
64 }
65
66 /* journal entry close/open: */
67
68 void __bch2_journal_buf_put(struct journal *j)
69 {
70         struct bch_fs *c = container_of(j, struct bch_fs, journal);
71
72         closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
73 }
74
75 /*
76  * Returns true if journal entry is now closed:
77  *
78  * We don't close a journal_buf until the next journal_buf is finished writing,
79  * and can be opened again - this also initializes the next journal_buf:
80  */
81 static void __journal_entry_close(struct journal *j, unsigned closed_val)
82 {
83         struct bch_fs *c = container_of(j, struct bch_fs, journal);
84         struct journal_buf *buf = journal_cur_buf(j);
85         union journal_res_state old, new;
86         u64 v = atomic64_read(&j->reservations.counter);
87         unsigned sectors;
88
89         BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
90                closed_val != JOURNAL_ENTRY_ERROR_VAL);
91
92         lockdep_assert_held(&j->lock);
93
94         do {
95                 old.v = new.v = v;
96                 new.cur_entry_offset = closed_val;
97
98                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
99                     old.cur_entry_offset == new.cur_entry_offset)
100                         return;
101         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
102                                        old.v, new.v)) != old.v);
103
104         if (!__journal_entry_is_open(old))
105                 return;
106
107         /* Close out old buffer: */
108         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
109
110         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
111                                       buf->u64s_reserved) << c->block_bits;
112         BUG_ON(sectors > buf->sectors);
113         buf->sectors = sectors;
114
115         /*
116          * We have to set last_seq here, _before_ opening a new journal entry:
117          *
118          * A threads may replace an old pin with a new pin on their current
119          * journal reservation - the expectation being that the journal will
120          * contain either what the old pin protected or what the new pin
121          * protects.
122          *
123          * After the old pin is dropped journal_last_seq() won't include the old
124          * pin, so we can only write the updated last_seq on the entry that
125          * contains whatever the new pin protects.
126          *
127          * Restated, we can _not_ update last_seq for a given entry if there
128          * could be a newer entry open with reservations/pins that have been
129          * taken against it.
130          *
131          * Hence, we want update/set last_seq on the current journal entry right
132          * before we open a new one:
133          */
134         buf->last_seq           = journal_last_seq(j);
135         buf->data->last_seq     = cpu_to_le64(buf->last_seq);
136         BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
137
138         __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
139
140         cancel_delayed_work(&j->write_work);
141
142         bch2_journal_space_available(j);
143
144         bch2_journal_buf_put(j, old.idx);
145 }
146
147 void bch2_journal_halt(struct journal *j)
148 {
149         spin_lock(&j->lock);
150         __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
151         if (!j->err_seq)
152                 j->err_seq = journal_cur_seq(j);
153         spin_unlock(&j->lock);
154 }
155
156 static bool journal_entry_want_write(struct journal *j)
157 {
158         bool ret = !journal_entry_is_open(j) ||
159                 journal_cur_seq(j) == journal_last_unwritten_seq(j);
160
161         /* Don't close it yet if we already have a write in flight: */
162         if (ret)
163                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
164         else if (nr_unwritten_journal_entries(j)) {
165                 struct journal_buf *buf = journal_cur_buf(j);
166
167                 if (!buf->flush_time) {
168                         buf->flush_time = local_clock() ?: 1;
169                         buf->expires = jiffies;
170                 }
171         }
172
173         return ret;
174 }
175
176 static bool journal_entry_close(struct journal *j)
177 {
178         bool ret;
179
180         spin_lock(&j->lock);
181         ret = journal_entry_want_write(j);
182         spin_unlock(&j->lock);
183
184         return ret;
185 }
186
187 /*
188  * should _only_ called from journal_res_get() - when we actually want a
189  * journal reservation - journal entry is open means journal is dirty:
190  *
191  * returns:
192  * 0:           success
193  * -ENOSPC:     journal currently full, must invoke reclaim
194  * -EAGAIN:     journal blocked, must wait
195  * -EROFS:      insufficient rw devices or journal error
196  */
197 static int journal_entry_open(struct journal *j)
198 {
199         struct bch_fs *c = container_of(j, struct bch_fs, journal);
200         struct journal_buf *buf = j->buf +
201                 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
202         union journal_res_state old, new;
203         int u64s;
204         u64 v;
205
206         lockdep_assert_held(&j->lock);
207         BUG_ON(journal_entry_is_open(j));
208         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
209
210         if (j->blocked)
211                 return cur_entry_blocked;
212
213         if (j->cur_entry_error)
214                 return j->cur_entry_error;
215
216         if (bch2_journal_error(j))
217                 return cur_entry_insufficient_devices; /* -EROFS */
218
219         if (!fifo_free(&j->pin))
220                 return cur_entry_journal_pin_full;
221
222         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) - 1)
223                 return cur_entry_max_in_flight;
224
225         BUG_ON(!j->cur_entry_sectors);
226
227         buf->expires            =
228                 (journal_cur_seq(j) == j->flushed_seq_ondisk
229                  ? jiffies
230                  : j->last_flush_write) +
231                 msecs_to_jiffies(c->opts.journal_flush_delay);
232
233         buf->u64s_reserved      = j->entry_u64s_reserved;
234         buf->disk_sectors       = j->cur_entry_sectors;
235         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
236
237         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
238                 journal_entry_overhead(j);
239         u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
240
241         if (u64s <= 0)
242                 return cur_entry_journal_full;
243
244         if (fifo_empty(&j->pin) && j->reclaim_thread)
245                 wake_up_process(j->reclaim_thread);
246
247         /*
248          * The fifo_push() needs to happen at the same time as j->seq is
249          * incremented for journal_last_seq() to be calculated correctly
250          */
251         atomic64_inc(&j->seq);
252         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
253
254         BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
255
256         bkey_extent_init(&buf->key);
257         buf->noflush    = false;
258         buf->must_flush = false;
259         buf->separate_flush = false;
260         buf->flush_time = 0;
261
262         memset(buf->data, 0, sizeof(*buf->data));
263         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
264         buf->data->u64s = 0;
265
266         /*
267          * Must be set before marking the journal entry as open:
268          */
269         j->cur_entry_u64s = u64s;
270
271         v = atomic64_read(&j->reservations.counter);
272         do {
273                 old.v = new.v = v;
274
275                 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
276
277                 new.idx++;
278                 BUG_ON(journal_state_count(new, new.idx));
279                 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
280
281                 journal_state_inc(&new);
282                 new.cur_entry_offset = 0;
283         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
284                                        old.v, new.v)) != old.v);
285
286         if (j->res_get_blocked_start)
287                 bch2_time_stats_update(j->blocked_time,
288                                        j->res_get_blocked_start);
289         j->res_get_blocked_start = 0;
290
291         mod_delayed_work(c->io_complete_wq,
292                          &j->write_work,
293                          msecs_to_jiffies(c->opts.journal_flush_delay));
294         journal_wake(j);
295         return 0;
296 }
297
298 static bool journal_quiesced(struct journal *j)
299 {
300         bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
301
302         if (!ret)
303                 journal_entry_close(j);
304         return ret;
305 }
306
307 static void journal_quiesce(struct journal *j)
308 {
309         wait_event(j->wait, journal_quiesced(j));
310 }
311
312 static void journal_write_work(struct work_struct *work)
313 {
314         struct journal *j = container_of(work, struct journal, write_work.work);
315         struct bch_fs *c = container_of(j, struct bch_fs, journal);
316         long delta;
317
318         spin_lock(&j->lock);
319         if (!__journal_entry_is_open(j->reservations))
320                 goto unlock;
321
322         delta = journal_cur_buf(j)->expires - jiffies;
323
324         if (delta > 0)
325                 mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
326         else
327                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
328 unlock:
329         spin_unlock(&j->lock);
330 }
331
332 static int __journal_res_get(struct journal *j, struct journal_res *res,
333                              unsigned flags)
334 {
335         struct bch_fs *c = container_of(j, struct bch_fs, journal);
336         struct journal_buf *buf;
337         bool can_discard;
338         int ret;
339 retry:
340         if (journal_res_get_fast(j, res, flags))
341                 return 0;
342
343         if (bch2_journal_error(j))
344                 return -EROFS;
345
346         spin_lock(&j->lock);
347
348         /*
349          * Recheck after taking the lock, so we don't race with another thread
350          * that just did journal_entry_open() and call journal_entry_close()
351          * unnecessarily
352          */
353         if (journal_res_get_fast(j, res, flags)) {
354                 spin_unlock(&j->lock);
355                 return 0;
356         }
357
358         if (!(flags & JOURNAL_RES_GET_RESERVED) &&
359             !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
360                 /*
361                  * Don't want to close current journal entry, just need to
362                  * invoke reclaim:
363                  */
364                 ret = cur_entry_journal_full;
365                 goto unlock;
366         }
367
368         /*
369          * If we couldn't get a reservation because the current buf filled up,
370          * and we had room for a bigger entry on disk, signal that we want to
371          * realloc the journal bufs:
372          */
373         buf = journal_cur_buf(j);
374         if (journal_entry_is_open(j) &&
375             buf->buf_size >> 9 < buf->disk_sectors &&
376             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
377                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
378
379         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
380         ret = journal_entry_open(j);
381
382         if (ret == cur_entry_max_in_flight)
383                 trace_journal_entry_full(c);
384 unlock:
385         if ((ret && ret != cur_entry_insufficient_devices) &&
386             !j->res_get_blocked_start) {
387                 j->res_get_blocked_start = local_clock() ?: 1;
388                 trace_journal_full(c);
389         }
390
391         can_discard = j->can_discard;
392         spin_unlock(&j->lock);
393
394         if (!ret)
395                 goto retry;
396
397         if ((ret == cur_entry_journal_full ||
398              ret == cur_entry_journal_pin_full) &&
399             !can_discard &&
400             !nr_unwritten_journal_entries(j) &&
401             (flags & JOURNAL_RES_GET_RESERVED)) {
402                 struct printbuf buf = PRINTBUF;
403
404                 bch_err(c, "Journal stuck! Hava a pre-reservation but journal full");
405
406                 bch2_journal_debug_to_text(&buf, j);
407                 bch_err(c, "%s", buf.buf);
408
409                 printbuf_reset(&buf);
410                 bch2_journal_pins_to_text(&buf, j);
411                 bch_err(c, "Journal pins:\n%s", buf.buf);
412
413                 printbuf_exit(&buf);
414                 bch2_fatal_error(c);
415                 dump_stack();
416         }
417
418         /*
419          * Journal is full - can't rely on reclaim from work item due to
420          * freezing:
421          */
422         if ((ret == cur_entry_journal_full ||
423              ret == cur_entry_journal_pin_full) &&
424             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
425                 if (can_discard) {
426                         bch2_journal_do_discards(j);
427                         goto retry;
428                 }
429
430                 if (mutex_trylock(&j->reclaim_lock)) {
431                         bch2_journal_reclaim(j);
432                         mutex_unlock(&j->reclaim_lock);
433                 }
434         }
435
436         return ret == cur_entry_insufficient_devices ? -EROFS : -EAGAIN;
437 }
438
439 /*
440  * Essentially the entry function to the journaling code. When bcachefs is doing
441  * a btree insert, it calls this function to get the current journal write.
442  * Journal write is the structure used set up journal writes. The calling
443  * function will then add its keys to the structure, queuing them for the next
444  * write.
445  *
446  * To ensure forward progress, the current task must not be holding any
447  * btree node write locks.
448  */
449 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
450                                   unsigned flags)
451 {
452         int ret;
453
454         closure_wait_event(&j->async_wait,
455                    (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
456                    (flags & JOURNAL_RES_GET_NONBLOCK));
457         return ret;
458 }
459
460 /* journal_preres: */
461
462 static bool journal_preres_available(struct journal *j,
463                                      struct journal_preres *res,
464                                      unsigned new_u64s,
465                                      unsigned flags)
466 {
467         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
468
469         if (!ret && mutex_trylock(&j->reclaim_lock)) {
470                 bch2_journal_reclaim(j);
471                 mutex_unlock(&j->reclaim_lock);
472         }
473
474         return ret;
475 }
476
477 int __bch2_journal_preres_get(struct journal *j,
478                               struct journal_preres *res,
479                               unsigned new_u64s,
480                               unsigned flags)
481 {
482         int ret;
483
484         closure_wait_event(&j->preres_wait,
485                    (ret = bch2_journal_error(j)) ||
486                    journal_preres_available(j, res, new_u64s, flags));
487         return ret;
488 }
489
490 /* journal_entry_res: */
491
492 void bch2_journal_entry_res_resize(struct journal *j,
493                                    struct journal_entry_res *res,
494                                    unsigned new_u64s)
495 {
496         union journal_res_state state;
497         int d = new_u64s - res->u64s;
498
499         spin_lock(&j->lock);
500
501         j->entry_u64s_reserved += d;
502         if (d <= 0)
503                 goto out;
504
505         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
506         smp_mb();
507         state = READ_ONCE(j->reservations);
508
509         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
510             state.cur_entry_offset > j->cur_entry_u64s) {
511                 j->cur_entry_u64s += d;
512                 /*
513                  * Not enough room in current journal entry, have to flush it:
514                  */
515                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
516         } else {
517                 journal_cur_buf(j)->u64s_reserved += d;
518         }
519 out:
520         spin_unlock(&j->lock);
521         res->u64s += d;
522 }
523
524 /* journal flushing: */
525
526 /**
527  * bch2_journal_flush_seq_async - wait for a journal entry to be written
528  *
529  * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
530  * necessary
531  */
532 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
533                                  struct closure *parent)
534 {
535         struct journal_buf *buf;
536         int ret = 0;
537
538         if (seq <= j->flushed_seq_ondisk)
539                 return 1;
540
541         spin_lock(&j->lock);
542
543         if (WARN_ONCE(seq > journal_cur_seq(j),
544                       "requested to flush journal seq %llu, but currently at %llu",
545                       seq, journal_cur_seq(j)))
546                 goto out;
547
548         /* Recheck under lock: */
549         if (j->err_seq && seq >= j->err_seq) {
550                 ret = -EIO;
551                 goto out;
552         }
553
554         if (seq <= j->flushed_seq_ondisk) {
555                 ret = 1;
556                 goto out;
557         }
558
559         /* if seq was written, but not flushed - flush a newer one instead */
560         seq = max(seq, journal_last_unwritten_seq(j));
561
562 recheck_need_open:
563         if (seq > journal_cur_seq(j)) {
564                 struct journal_res res = { 0 };
565
566                 if (journal_entry_is_open(j))
567                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
568
569                 spin_unlock(&j->lock);
570
571                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
572                 if (ret)
573                         return ret;
574
575                 seq = res.seq;
576                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
577                 buf->must_flush = true;
578
579                 if (!buf->flush_time) {
580                         buf->flush_time = local_clock() ?: 1;
581                         buf->expires = jiffies;
582                 }
583
584                 if (parent && !closure_wait(&buf->wait, parent))
585                         BUG();
586
587                 bch2_journal_res_put(j, &res);
588
589                 spin_lock(&j->lock);
590                 goto want_write;
591         }
592
593         /*
594          * if write was kicked off without a flush, flush the next sequence
595          * number instead
596          */
597         buf = journal_seq_to_buf(j, seq);
598         if (buf->noflush) {
599                 seq++;
600                 goto recheck_need_open;
601         }
602
603         buf->must_flush = true;
604
605         if (parent && !closure_wait(&buf->wait, parent))
606                 BUG();
607 want_write:
608         if (seq == journal_cur_seq(j))
609                 journal_entry_want_write(j);
610 out:
611         spin_unlock(&j->lock);
612         return ret;
613 }
614
615 int bch2_journal_flush_seq(struct journal *j, u64 seq)
616 {
617         u64 start_time = local_clock();
618         int ret, ret2;
619
620         /*
621          * Don't update time_stats when @seq is already flushed:
622          */
623         if (seq <= j->flushed_seq_ondisk)
624                 return 0;
625
626         ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
627
628         if (!ret)
629                 bch2_time_stats_update(j->flush_seq_time, start_time);
630
631         return ret ?: ret2 < 0 ? ret2 : 0;
632 }
633
634 /*
635  * bch2_journal_flush_async - if there is an open journal entry, or a journal
636  * still being written, write it and wait for the write to complete
637  */
638 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
639 {
640         bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
641 }
642
643 int bch2_journal_flush(struct journal *j)
644 {
645         return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
646 }
647
648 /*
649  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
650  * @seq
651  */
652 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
653 {
654         struct bch_fs *c = container_of(j, struct bch_fs, journal);
655         u64 unwritten_seq;
656         bool ret = false;
657
658         if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
659                 return false;
660
661         if (seq <= c->journal.flushed_seq_ondisk)
662                 return false;
663
664         spin_lock(&j->lock);
665         if (seq <= c->journal.flushed_seq_ondisk)
666                 goto out;
667
668         for (unwritten_seq = journal_last_unwritten_seq(j);
669              unwritten_seq < seq;
670              unwritten_seq++) {
671                 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
672
673                 /* journal write is already in flight, and was a flush write: */
674                 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
675                         goto out;
676
677                 buf->noflush = true;
678         }
679
680         ret = true;
681 out:
682         spin_unlock(&j->lock);
683         return ret;
684 }
685
686 int bch2_journal_meta(struct journal *j)
687 {
688         struct journal_buf *buf;
689         struct journal_res res;
690         int ret;
691
692         memset(&res, 0, sizeof(res));
693
694         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
695         if (ret)
696                 return ret;
697
698         buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
699         buf->must_flush = true;
700
701         if (!buf->flush_time) {
702                 buf->flush_time = local_clock() ?: 1;
703                 buf->expires = jiffies;
704         }
705
706         bch2_journal_res_put(j, &res);
707
708         return bch2_journal_flush_seq(j, res.seq);
709 }
710
711 int bch2_journal_log_msg(struct journal *j, const char *fmt, ...)
712 {
713         struct jset_entry_log *entry;
714         struct journal_res res = { 0 };
715         unsigned msglen, u64s;
716         va_list args;
717         int ret;
718
719         va_start(args, fmt);
720         msglen = vsnprintf(NULL, 0, fmt, args) + 1;
721         va_end(args);
722
723         u64s = jset_u64s(DIV_ROUND_UP(msglen, sizeof(u64)));
724
725         ret = bch2_journal_res_get(j, &res, u64s, 0);
726         if (ret)
727                 return ret;
728
729         entry = container_of(journal_res_entry(j, &res),
730                              struct jset_entry_log, entry);;
731         memset(entry, 0, u64s * sizeof(u64));
732         entry->entry.type = BCH_JSET_ENTRY_log;
733         entry->entry.u64s = u64s - 1;
734
735         va_start(args, fmt);
736         vsnprintf(entry->d, INT_MAX, fmt, args);
737         va_end(args);
738
739         bch2_journal_res_put(j, &res);
740
741         return bch2_journal_flush_seq(j, res.seq);
742 }
743
744 /* block/unlock the journal: */
745
746 void bch2_journal_unblock(struct journal *j)
747 {
748         spin_lock(&j->lock);
749         j->blocked--;
750         spin_unlock(&j->lock);
751
752         journal_wake(j);
753 }
754
755 void bch2_journal_block(struct journal *j)
756 {
757         spin_lock(&j->lock);
758         j->blocked++;
759         spin_unlock(&j->lock);
760
761         journal_quiesce(j);
762 }
763
764 /* allocate journal on a device: */
765
766 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
767                                          bool new_fs, struct closure *cl)
768 {
769         struct bch_fs *c = ca->fs;
770         struct journal_device *ja = &ca->journal;
771         struct bch_sb_field_journal *journal_buckets;
772         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
773         int ret = 0;
774
775         /* don't handle reducing nr of buckets yet: */
776         if (nr <= ja->nr)
777                 return 0;
778
779         new_buckets     = kzalloc(nr * sizeof(u64), GFP_KERNEL);
780         new_bucket_seq  = kzalloc(nr * sizeof(u64), GFP_KERNEL);
781         if (!new_buckets || !new_bucket_seq) {
782                 ret = -ENOMEM;
783                 goto err;
784         }
785
786         journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
787                                         nr + sizeof(*journal_buckets) / sizeof(u64));
788         if (!journal_buckets) {
789                 ret = -ENOSPC;
790                 goto err;
791         }
792
793         /*
794          * We may be called from the device add path, before the new device has
795          * actually been added to the running filesystem:
796          */
797         if (!new_fs)
798                 spin_lock(&c->journal.lock);
799
800         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
801         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
802         swap(new_buckets,       ja->buckets);
803         swap(new_bucket_seq,    ja->bucket_seq);
804
805         if (!new_fs)
806                 spin_unlock(&c->journal.lock);
807
808         while (ja->nr < nr) {
809                 struct open_bucket *ob = NULL;
810                 unsigned pos;
811                 long b;
812
813                 if (new_fs) {
814                         b = bch2_bucket_alloc_new_fs(ca);
815                         if (b < 0) {
816                                 ret = -ENOSPC;
817                                 goto err;
818                         }
819                 } else {
820                         rcu_read_lock();
821                         ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
822                                                false, cl);
823                         rcu_read_unlock();
824                         if (IS_ERR(ob)) {
825                                 ret = cl ? -EAGAIN : -ENOSPC;
826                                 goto err;
827                         }
828
829                         b = ob->bucket;
830                 }
831
832                 if (c)
833                         spin_lock(&c->journal.lock);
834
835                 /*
836                  * XXX
837                  * For resize at runtime, we should be writing the new
838                  * superblock before inserting into the journal array
839                  */
840
841                 pos = ja->discard_idx ?: ja->nr;
842                 __array_insert_item(ja->buckets,                ja->nr, pos);
843                 __array_insert_item(ja->bucket_seq,             ja->nr, pos);
844                 __array_insert_item(journal_buckets->buckets,   ja->nr, pos);
845                 ja->nr++;
846
847                 ja->buckets[pos] = b;
848                 ja->bucket_seq[pos] = 0;
849                 journal_buckets->buckets[pos] = cpu_to_le64(b);
850
851                 if (pos <= ja->discard_idx)
852                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
853                 if (pos <= ja->dirty_idx_ondisk)
854                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
855                 if (pos <= ja->dirty_idx)
856                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
857                 if (pos <= ja->cur_idx)
858                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
859
860                 if (c)
861                         spin_unlock(&c->journal.lock);
862
863                 if (!new_fs) {
864                         ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
865                                 bch2_trans_mark_metadata_bucket(&trans, ca,
866                                                 b, BCH_DATA_journal,
867                                                 ca->mi.bucket_size));
868
869                         bch2_open_bucket_put(c, ob);
870
871                         if (ret)
872                                 goto err;
873                 }
874         }
875 err:
876         bch2_sb_resize_journal(&ca->disk_sb,
877                 ja->nr + sizeof(*journal_buckets) / sizeof(u64));
878         kfree(new_bucket_seq);
879         kfree(new_buckets);
880
881         return ret;
882 }
883
884 /*
885  * Allocate more journal space at runtime - not currently making use if it, but
886  * the code works:
887  */
888 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
889                                 unsigned nr)
890 {
891         struct journal_device *ja = &ca->journal;
892         struct closure cl;
893         unsigned current_nr;
894         int ret;
895
896         closure_init_stack(&cl);
897
898         do {
899                 struct disk_reservation disk_res = { 0, 0 };
900
901                 closure_sync(&cl);
902
903                 mutex_lock(&c->sb_lock);
904                 current_nr = ja->nr;
905
906                 /*
907                  * note: journal buckets aren't really counted as _sectors_ used yet, so
908                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
909                  * when space used goes up without a reservation - but we do need the
910                  * reservation to ensure we'll actually be able to allocate:
911                  */
912
913                 if (bch2_disk_reservation_get(c, &disk_res,
914                                               bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
915                         mutex_unlock(&c->sb_lock);
916                         return -ENOSPC;
917                 }
918
919                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
920
921                 bch2_disk_reservation_put(c, &disk_res);
922
923                 if (ja->nr != current_nr)
924                         bch2_write_super(c);
925                 mutex_unlock(&c->sb_lock);
926         } while (ret == -EAGAIN);
927
928         return ret;
929 }
930
931 int bch2_dev_journal_alloc(struct bch_dev *ca)
932 {
933         unsigned nr;
934
935         if (dynamic_fault("bcachefs:add:journal_alloc"))
936                 return -ENOMEM;
937
938         /* 1/128th of the device by default: */
939         nr = ca->mi.nbuckets >> 7;
940
941         /*
942          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
943          * is smaller:
944          */
945         nr = clamp_t(unsigned, nr,
946                      BCH_JOURNAL_BUCKETS_MIN,
947                      min(1 << 13,
948                          (1 << 24) / ca->mi.bucket_size));
949
950         return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
951 }
952
953 /* startup/shutdown: */
954
955 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
956 {
957         bool ret = false;
958         u64 seq;
959
960         spin_lock(&j->lock);
961         for (seq = journal_last_unwritten_seq(j);
962              seq <= journal_cur_seq(j) && !ret;
963              seq++) {
964                 struct journal_buf *buf = journal_seq_to_buf(j, seq);
965
966                 if (bch2_bkey_has_device(bkey_i_to_s_c(&buf->key), dev_idx))
967                         ret = true;
968         }
969         spin_unlock(&j->lock);
970
971         return ret;
972 }
973
974 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
975 {
976         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
977 }
978
979 void bch2_fs_journal_stop(struct journal *j)
980 {
981         bch2_journal_reclaim_stop(j);
982         bch2_journal_flush_all_pins(j);
983
984         wait_event(j->wait, journal_entry_close(j));
985
986         /*
987          * Always write a new journal entry, to make sure the clock hands are up
988          * to date (and match the superblock)
989          */
990         bch2_journal_meta(j);
991
992         journal_quiesce(j);
993
994         BUG_ON(!bch2_journal_error(j) &&
995                test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
996                j->last_empty_seq != journal_cur_seq(j));
997
998         cancel_delayed_work_sync(&j->write_work);
999 }
1000
1001 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
1002                           struct list_head *journal_entries)
1003 {
1004         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1005         struct journal_entry_pin_list *p;
1006         struct journal_replay *i;
1007         u64 last_seq = cur_seq, nr, seq;
1008
1009         if (!list_empty(journal_entries))
1010                 last_seq = le64_to_cpu(list_last_entry(journal_entries,
1011                                 struct journal_replay, list)->j.last_seq);
1012
1013         nr = cur_seq - last_seq;
1014
1015         if (nr + 1 > j->pin.size) {
1016                 free_fifo(&j->pin);
1017                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1018                 if (!j->pin.data) {
1019                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1020                         return -ENOMEM;
1021                 }
1022         }
1023
1024         j->replay_journal_seq   = last_seq;
1025         j->replay_journal_seq_end = cur_seq;
1026         j->last_seq_ondisk      = last_seq;
1027         j->flushed_seq_ondisk   = cur_seq - 1;
1028         j->seq_ondisk           = cur_seq - 1;
1029         j->pin.front            = last_seq;
1030         j->pin.back             = cur_seq;
1031         atomic64_set(&j->seq, cur_seq - 1);
1032
1033         if (list_empty(journal_entries))
1034                 j->last_empty_seq = cur_seq - 1;
1035
1036         fifo_for_each_entry_ptr(p, &j->pin, seq)
1037                 journal_pin_list_init(p, 1);
1038
1039         list_for_each_entry(i, journal_entries, list) {
1040                 unsigned ptr;
1041
1042                 seq = le64_to_cpu(i->j.seq);
1043                 BUG_ON(seq >= cur_seq);
1044
1045                 if (seq < last_seq)
1046                         continue;
1047
1048                 if (journal_entry_empty(&i->j))
1049                         j->last_empty_seq = le64_to_cpu(i->j.seq);
1050
1051                 p = journal_seq_pin(j, seq);
1052
1053                 p->devs.nr = 0;
1054                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1055                         bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1056         }
1057
1058         if (list_empty(journal_entries))
1059                 j->last_empty_seq = cur_seq;
1060
1061         spin_lock(&j->lock);
1062
1063         set_bit(JOURNAL_STARTED, &j->flags);
1064         j->last_flush_write = jiffies;
1065
1066         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1067         j->reservations.unwritten_idx++;
1068
1069         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1070
1071         bch2_journal_space_available(j);
1072         spin_unlock(&j->lock);
1073
1074         return bch2_journal_reclaim_start(j);
1075 }
1076
1077 /* init/exit: */
1078
1079 void bch2_dev_journal_exit(struct bch_dev *ca)
1080 {
1081         kfree(ca->journal.bio);
1082         kfree(ca->journal.buckets);
1083         kfree(ca->journal.bucket_seq);
1084
1085         ca->journal.bio         = NULL;
1086         ca->journal.buckets     = NULL;
1087         ca->journal.bucket_seq  = NULL;
1088 }
1089
1090 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1091 {
1092         struct journal_device *ja = &ca->journal;
1093         struct bch_sb_field_journal *journal_buckets =
1094                 bch2_sb_get_journal(sb);
1095         unsigned i;
1096
1097         ja->nr = bch2_nr_journal_buckets(journal_buckets);
1098
1099         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1100         if (!ja->bucket_seq)
1101                 return -ENOMEM;
1102
1103         ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1104                         DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1105         if (!ca->journal.bio)
1106                 return -ENOMEM;
1107
1108         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1109         if (!ja->buckets)
1110                 return -ENOMEM;
1111
1112         for (i = 0; i < ja->nr; i++)
1113                 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1114
1115         return 0;
1116 }
1117
1118 void bch2_fs_journal_exit(struct journal *j)
1119 {
1120         unsigned i;
1121
1122         for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1123                 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1124         free_fifo(&j->pin);
1125 }
1126
1127 int bch2_fs_journal_init(struct journal *j)
1128 {
1129         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1130         static struct lock_class_key res_key;
1131         unsigned i;
1132         int ret = 0;
1133
1134         pr_verbose_init(c->opts, "");
1135
1136         spin_lock_init(&j->lock);
1137         spin_lock_init(&j->err_lock);
1138         init_waitqueue_head(&j->wait);
1139         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1140         init_waitqueue_head(&j->reclaim_wait);
1141         init_waitqueue_head(&j->pin_flush_wait);
1142         mutex_init(&j->reclaim_lock);
1143         mutex_init(&j->discard_lock);
1144
1145         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1146
1147         atomic64_set(&j->reservations.counter,
1148                 ((union journal_res_state)
1149                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1150
1151         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1152                 ret = -ENOMEM;
1153                 goto out;
1154         }
1155
1156         for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1157                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1158                 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1159                 if (!j->buf[i].data) {
1160                         ret = -ENOMEM;
1161                         goto out;
1162                 }
1163         }
1164
1165         j->pin.front = j->pin.back = 1;
1166 out:
1167         pr_verbose_init(c->opts, "ret %i", ret);
1168         return ret;
1169 }
1170
1171 /* debug: */
1172
1173 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1174 {
1175         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1176         union journal_res_state s;
1177         struct bch_dev *ca;
1178         unsigned long now = jiffies;
1179         u64 seq;
1180         unsigned i;
1181
1182         out->atomic++;
1183         out->tabstops[0] = 24;
1184
1185         rcu_read_lock();
1186         s = READ_ONCE(j->reservations);
1187
1188         pr_buf(out, "dirty journal entries:\t%llu\n",   fifo_used(&j->pin));
1189         pr_buf(out, "seq:\t\t\t%llu\n",                 journal_cur_seq(j));
1190         pr_buf(out, "seq_ondisk:\t\t%llu\n",            j->seq_ondisk);
1191         pr_buf(out, "last_seq:\t\t%llu\n",              journal_last_seq(j));
1192         pr_buf(out, "last_seq_ondisk:\t%llu\n",         j->last_seq_ondisk);
1193         pr_buf(out, "flushed_seq_ondisk:\t%llu\n",      j->flushed_seq_ondisk);
1194         pr_buf(out, "prereserved:\t\t%u/%u\n",          j->prereserved.reserved, j->prereserved.remaining);
1195         pr_buf(out, "each entry reserved:\t%u\n",       j->entry_u64s_reserved);
1196         pr_buf(out, "nr flush writes:\t%llu\n",         j->nr_flush_writes);
1197         pr_buf(out, "nr noflush writes:\t%llu\n",       j->nr_noflush_writes);
1198         pr_buf(out, "nr direct reclaim:\t%llu\n",       j->nr_direct_reclaim);
1199         pr_buf(out, "nr background reclaim:\t%llu\n",   j->nr_background_reclaim);
1200         pr_buf(out, "reclaim kicked:\t\t%u\n",          j->reclaim_kicked);
1201         pr_buf(out, "reclaim runs in:\t%u ms\n",        time_after(j->next_reclaim, now)
1202                ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1203         pr_buf(out, "current entry sectors:\t%u\n",     j->cur_entry_sectors);
1204         pr_buf(out, "current entry error:\t%u\n",       j->cur_entry_error);
1205         pr_buf(out, "current entry:\t\t");
1206
1207         switch (s.cur_entry_offset) {
1208         case JOURNAL_ENTRY_ERROR_VAL:
1209                 pr_buf(out, "error");
1210                 break;
1211         case JOURNAL_ENTRY_CLOSED_VAL:
1212                 pr_buf(out, "closed");
1213                 break;
1214         default:
1215                 pr_buf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1216                 break;
1217         }
1218
1219         pr_newline(out);
1220
1221         for (seq = journal_cur_seq(j);
1222              seq >= journal_last_unwritten_seq(j);
1223              --seq) {
1224                 i = seq & JOURNAL_BUF_MASK;
1225
1226                 pr_buf(out, "unwritten entry:");
1227                 pr_tab(out);
1228                 pr_buf(out, "%llu", seq);
1229                 pr_newline(out);
1230                 pr_indent_push(out, 2);
1231
1232                 pr_buf(out, "refcount:");
1233                 pr_tab(out);
1234                 pr_buf(out, "%u", journal_state_count(s, i));
1235                 pr_newline(out);
1236
1237                 pr_buf(out, "sectors:");
1238                 pr_tab(out);
1239                 pr_buf(out, "%u", j->buf[i].sectors);
1240                 pr_newline(out);
1241
1242                 pr_buf(out, "expires");
1243                 pr_tab(out);
1244                 pr_buf(out, "%li jiffies", j->buf[i].expires - jiffies);
1245                 pr_newline(out);
1246
1247                 pr_indent_pop(out, 2);
1248         }
1249
1250         pr_buf(out,
1251                "replay done:\t\t%i\n",
1252                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1253
1254         pr_buf(out, "space:\n");
1255         pr_buf(out, "\tdiscarded\t%u:%u\n",
1256                j->space[journal_space_discarded].next_entry,
1257                j->space[journal_space_discarded].total);
1258         pr_buf(out, "\tclean ondisk\t%u:%u\n",
1259                j->space[journal_space_clean_ondisk].next_entry,
1260                j->space[journal_space_clean_ondisk].total);
1261         pr_buf(out, "\tclean\t\t%u:%u\n",
1262                j->space[journal_space_clean].next_entry,
1263                j->space[journal_space_clean].total);
1264         pr_buf(out, "\ttotal\t\t%u:%u\n",
1265                j->space[journal_space_total].next_entry,
1266                j->space[journal_space_total].total);
1267
1268         for_each_member_device_rcu(ca, c, i,
1269                                    &c->rw_devs[BCH_DATA_journal]) {
1270                 struct journal_device *ja = &ca->journal;
1271
1272                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1273                         continue;
1274
1275                 if (!ja->nr)
1276                         continue;
1277
1278                 pr_buf(out, "dev %u:\n",                i);
1279                 pr_buf(out, "\tnr\t\t%u\n",             ja->nr);
1280                 pr_buf(out, "\tbucket size\t%u\n",      ca->mi.bucket_size);
1281                 pr_buf(out, "\tavailable\t%u:%u\n",     bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1282                 pr_buf(out, "\tdiscard_idx\t%u\n",      ja->discard_idx);
1283                 pr_buf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,    ja->bucket_seq[ja->dirty_idx_ondisk]);
1284                 pr_buf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,              ja->bucket_seq[ja->dirty_idx]);
1285                 pr_buf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,                ja->bucket_seq[ja->cur_idx]);
1286         }
1287
1288         rcu_read_unlock();
1289
1290         --out->atomic;
1291 }
1292
1293 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1294 {
1295         spin_lock(&j->lock);
1296         __bch2_journal_debug_to_text(out, j);
1297         spin_unlock(&j->lock);
1298 }
1299
1300 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1301 {
1302         struct journal_entry_pin_list *pin_list;
1303         struct journal_entry_pin *pin;
1304
1305         spin_lock(&j->lock);
1306         *seq = max(*seq, j->pin.front);
1307
1308         if (*seq >= j->pin.back) {
1309                 spin_unlock(&j->lock);
1310                 return true;
1311         }
1312
1313         out->atomic++;
1314
1315         pin_list = journal_seq_pin(j, *seq);
1316
1317         pr_buf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1318         pr_newline(out);
1319         pr_indent_push(out, 2);
1320
1321         list_for_each_entry(pin, &pin_list->list, list) {
1322                 pr_buf(out, "\t%px %ps", pin, pin->flush);
1323                 pr_newline(out);
1324         }
1325
1326         list_for_each_entry(pin, &pin_list->key_cache_list, list) {
1327                 pr_buf(out, "\t%px %ps", pin, pin->flush);
1328                 pr_newline(out);
1329         }
1330
1331         if (!list_empty(&pin_list->flushed)) {
1332                 pr_buf(out, "flushed:");
1333                 pr_newline(out);
1334         }
1335
1336         list_for_each_entry(pin, &pin_list->flushed, list) {
1337                 pr_buf(out, "\t%px %ps", pin, pin->flush);
1338                 pr_newline(out);
1339         }
1340
1341         pr_indent_pop(out, 2);
1342
1343         --out->atomic;
1344         spin_unlock(&j->lock);
1345
1346         return false;
1347 }
1348
1349 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1350 {
1351         u64 seq = 0;
1352
1353         while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1354                 seq++;
1355 }