]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to 6bb1ba5c94 bcachefs: Improve alloc_mem_to_key()
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "error.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "super-io.h"
20
21 #include <trace/events/bcachefs.h>
22
23 static u64 last_unwritten_seq(struct journal *j)
24 {
25         union journal_res_state s = READ_ONCE(j->reservations);
26
27         lockdep_assert_held(&j->lock);
28
29         return journal_cur_seq(j) - ((s.idx - s.unwritten_idx) & JOURNAL_BUF_MASK);
30 }
31
32 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
33 {
34         return seq >= last_unwritten_seq(j);
35 }
36
37 static bool __journal_entry_is_open(union journal_res_state state)
38 {
39         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
40 }
41
42 static bool journal_entry_is_open(struct journal *j)
43 {
44         return __journal_entry_is_open(j->reservations);
45 }
46
47 static inline struct journal_buf *
48 journal_seq_to_buf(struct journal *j, u64 seq)
49 {
50         struct journal_buf *buf = NULL;
51
52         EBUG_ON(seq > journal_cur_seq(j));
53         EBUG_ON(seq == journal_cur_seq(j) &&
54                 j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
55
56         if (journal_seq_unwritten(j, seq)) {
57                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
58                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
59         }
60         return buf;
61 }
62
63 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
64 {
65         INIT_LIST_HEAD(&p->list);
66         INIT_LIST_HEAD(&p->key_cache_list);
67         INIT_LIST_HEAD(&p->flushed);
68         atomic_set(&p->count, count);
69         p->devs.nr = 0;
70 }
71
72 static void journal_pin_new_entry(struct journal *j)
73 {
74         /*
75          * The fifo_push() needs to happen at the same time as j->seq is
76          * incremented for journal_last_seq() to be calculated correctly
77          */
78         atomic64_inc(&j->seq);
79         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
80 }
81
82 static void bch2_journal_buf_init(struct journal *j)
83 {
84         struct journal_buf *buf = journal_cur_buf(j);
85
86         bkey_extent_init(&buf->key);
87         buf->noflush    = false;
88         buf->must_flush = false;
89         buf->separate_flush = false;
90
91         memset(buf->data, 0, sizeof(*buf->data));
92         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
93         buf->data->u64s = 0;
94 }
95
96 void bch2_journal_halt(struct journal *j)
97 {
98         union journal_res_state old, new;
99         u64 v = atomic64_read(&j->reservations.counter);
100
101         do {
102                 old.v = new.v = v;
103                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
104                         return;
105
106                 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
107         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
108                                        old.v, new.v)) != old.v);
109
110         /*
111          * XXX: we're not using j->lock here because this can be called from
112          * interrupt context, this can race with journal_write_done()
113          */
114         if (!j->err_seq)
115                 j->err_seq = journal_cur_seq(j);
116         journal_wake(j);
117         closure_wake_up(&journal_cur_buf(j)->wait);
118 }
119
120 /* journal entry close/open: */
121
122 void __bch2_journal_buf_put(struct journal *j)
123 {
124         struct bch_fs *c = container_of(j, struct bch_fs, journal);
125
126         closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
127 }
128
129 /*
130  * Returns true if journal entry is now closed:
131  *
132  * We don't close a journal_buf until the next journal_buf is finished writing,
133  * and can be opened again - this also initializes the next journal_buf:
134  */
135 static bool __journal_entry_close(struct journal *j)
136 {
137         struct bch_fs *c = container_of(j, struct bch_fs, journal);
138         struct journal_buf *buf = journal_cur_buf(j);
139         union journal_res_state old, new;
140         u64 v = atomic64_read(&j->reservations.counter);
141         unsigned sectors;
142
143         lockdep_assert_held(&j->lock);
144
145         do {
146                 old.v = new.v = v;
147                 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
148                         return true;
149
150                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
151                         /* this entry will never be written: */
152                         closure_wake_up(&buf->wait);
153                         return true;
154                 }
155
156                 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
157                         set_bit(JOURNAL_NEED_WRITE, &j->flags);
158                         j->need_write_time = local_clock();
159                 }
160
161                 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
162                 new.idx++;
163
164                 if (new.idx == new.unwritten_idx)
165                         return false;
166
167                 BUG_ON(journal_state_count(new, new.idx));
168         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
169                                        old.v, new.v)) != old.v);
170
171         /* Close out old buffer: */
172         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
173
174         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
175                                       buf->u64s_reserved) << c->block_bits;
176         BUG_ON(sectors > buf->sectors);
177         buf->sectors = sectors;
178
179         /*
180          * We have to set last_seq here, _before_ opening a new journal entry:
181          *
182          * A threads may replace an old pin with a new pin on their current
183          * journal reservation - the expectation being that the journal will
184          * contain either what the old pin protected or what the new pin
185          * protects.
186          *
187          * After the old pin is dropped journal_last_seq() won't include the old
188          * pin, so we can only write the updated last_seq on the entry that
189          * contains whatever the new pin protects.
190          *
191          * Restated, we can _not_ update last_seq for a given entry if there
192          * could be a newer entry open with reservations/pins that have been
193          * taken against it.
194          *
195          * Hence, we want update/set last_seq on the current journal entry right
196          * before we open a new one:
197          */
198         buf->last_seq           = journal_last_seq(j);
199         buf->data->last_seq     = cpu_to_le64(buf->last_seq);
200
201         __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
202
203         /* Initialize new buffer: */
204         journal_pin_new_entry(j);
205
206         bch2_journal_buf_init(j);
207
208         cancel_delayed_work(&j->write_work);
209         clear_bit(JOURNAL_NEED_WRITE, &j->flags);
210
211         bch2_journal_space_available(j);
212
213         bch2_journal_buf_put(j, old.idx);
214         return true;
215 }
216
217 static bool journal_entry_want_write(struct journal *j)
218 {
219         union journal_res_state s = READ_ONCE(j->reservations);
220         bool ret = false;
221
222         /*
223          * Don't close it yet if we already have a write in flight, but do set
224          * NEED_WRITE:
225          */
226         if (s.idx != s.unwritten_idx)
227                 set_bit(JOURNAL_NEED_WRITE, &j->flags);
228         else
229                 ret = __journal_entry_close(j);
230
231         return ret;
232 }
233
234 static bool journal_entry_close(struct journal *j)
235 {
236         bool ret;
237
238         spin_lock(&j->lock);
239         ret = journal_entry_want_write(j);
240         spin_unlock(&j->lock);
241
242         return ret;
243 }
244
245 /*
246  * should _only_ called from journal_res_get() - when we actually want a
247  * journal reservation - journal entry is open means journal is dirty:
248  *
249  * returns:
250  * 0:           success
251  * -ENOSPC:     journal currently full, must invoke reclaim
252  * -EAGAIN:     journal blocked, must wait
253  * -EROFS:      insufficient rw devices or journal error
254  */
255 static int journal_entry_open(struct journal *j)
256 {
257         struct bch_fs *c = container_of(j, struct bch_fs, journal);
258         struct journal_buf *buf = journal_cur_buf(j);
259         union journal_res_state old, new;
260         int u64s;
261         u64 v;
262
263         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
264
265         lockdep_assert_held(&j->lock);
266         BUG_ON(journal_entry_is_open(j));
267
268         if (j->blocked)
269                 return cur_entry_blocked;
270
271         if (j->cur_entry_error)
272                 return j->cur_entry_error;
273
274         BUG_ON(!j->cur_entry_sectors);
275
276         buf->u64s_reserved      = j->entry_u64s_reserved;
277         buf->disk_sectors       = j->cur_entry_sectors;
278         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
279
280         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
281                 journal_entry_overhead(j);
282         u64s  = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
283
284         if (u64s <= le32_to_cpu(buf->data->u64s))
285                 return cur_entry_journal_full;
286
287         /*
288          * Must be set before marking the journal entry as open:
289          */
290         j->cur_entry_u64s = u64s;
291
292         v = atomic64_read(&j->reservations.counter);
293         do {
294                 old.v = new.v = v;
295
296                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
297                         return cur_entry_insufficient_devices;
298
299                 /* Handle any already added entries */
300                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
301
302                 EBUG_ON(journal_state_count(new, new.idx));
303                 journal_state_inc(&new);
304         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
305                                        old.v, new.v)) != old.v);
306
307         if (j->res_get_blocked_start)
308                 bch2_time_stats_update(j->blocked_time,
309                                        j->res_get_blocked_start);
310         j->res_get_blocked_start = 0;
311
312         mod_delayed_work(c->io_complete_wq,
313                          &j->write_work,
314                          msecs_to_jiffies(c->opts.journal_flush_delay));
315         journal_wake(j);
316         return 0;
317 }
318
319 static bool journal_quiesced(struct journal *j)
320 {
321         union journal_res_state s = READ_ONCE(j->reservations);
322         bool ret = s.idx == s.unwritten_idx && !__journal_entry_is_open(s);
323
324         if (!ret)
325                 journal_entry_close(j);
326         return ret;
327 }
328
329 static void journal_quiesce(struct journal *j)
330 {
331         wait_event(j->wait, journal_quiesced(j));
332 }
333
334 static void journal_write_work(struct work_struct *work)
335 {
336         struct journal *j = container_of(work, struct journal, write_work.work);
337
338         journal_entry_close(j);
339 }
340
341 static int __journal_res_get(struct journal *j, struct journal_res *res,
342                              unsigned flags)
343 {
344         struct bch_fs *c = container_of(j, struct bch_fs, journal);
345         struct journal_buf *buf;
346         bool can_discard;
347         int ret;
348 retry:
349         if (journal_res_get_fast(j, res, flags))
350                 return 0;
351
352         if (bch2_journal_error(j))
353                 return -EROFS;
354
355         spin_lock(&j->lock);
356
357         /*
358          * Recheck after taking the lock, so we don't race with another thread
359          * that just did journal_entry_open() and call journal_entry_close()
360          * unnecessarily
361          */
362         if (journal_res_get_fast(j, res, flags)) {
363                 spin_unlock(&j->lock);
364                 return 0;
365         }
366
367         if (!(flags & JOURNAL_RES_GET_RESERVED) &&
368             !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
369                 /*
370                  * Don't want to close current journal entry, just need to
371                  * invoke reclaim:
372                  */
373                 ret = cur_entry_journal_full;
374                 goto unlock;
375         }
376
377         /*
378          * If we couldn't get a reservation because the current buf filled up,
379          * and we had room for a bigger entry on disk, signal that we want to
380          * realloc the journal bufs:
381          */
382         buf = journal_cur_buf(j);
383         if (journal_entry_is_open(j) &&
384             buf->buf_size >> 9 < buf->disk_sectors &&
385             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
386                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
387
388         if (journal_entry_is_open(j) &&
389             !__journal_entry_close(j)) {
390                 /*
391                  * We failed to get a reservation on the current open journal
392                  * entry because it's full, and we can't close it because
393                  * there's still a previous one in flight:
394                  */
395                 trace_journal_entry_full(c);
396                 ret = cur_entry_blocked;
397         } else {
398                 ret = journal_entry_open(j);
399         }
400 unlock:
401         if ((ret && ret != cur_entry_insufficient_devices) &&
402             !j->res_get_blocked_start) {
403                 j->res_get_blocked_start = local_clock() ?: 1;
404                 trace_journal_full(c);
405         }
406
407         can_discard = j->can_discard;
408         spin_unlock(&j->lock);
409
410         if (!ret)
411                 goto retry;
412
413         if ((ret == cur_entry_journal_full ||
414              ret == cur_entry_journal_pin_full) &&
415             !can_discard &&
416             j->reservations.idx == j->reservations.unwritten_idx &&
417             (flags & JOURNAL_RES_GET_RESERVED)) {
418                 char *journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
419
420                 bch_err(c, "Journal stuck!");
421                 if (journal_debug_buf) {
422                         bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
423                         bch_err(c, "%s", journal_debug_buf);
424
425                         bch2_journal_pins_to_text(&_PBUF(journal_debug_buf, 4096), j);
426                         bch_err(c, "Journal pins:\n%s", journal_debug_buf);
427                         kfree(journal_debug_buf);
428                 }
429
430                 bch2_fatal_error(c);
431                 dump_stack();
432         }
433
434         /*
435          * Journal is full - can't rely on reclaim from work item due to
436          * freezing:
437          */
438         if ((ret == cur_entry_journal_full ||
439              ret == cur_entry_journal_pin_full) &&
440             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
441                 if (can_discard) {
442                         bch2_journal_do_discards(j);
443                         goto retry;
444                 }
445
446                 if (mutex_trylock(&j->reclaim_lock)) {
447                         bch2_journal_reclaim(j);
448                         mutex_unlock(&j->reclaim_lock);
449                 }
450         }
451
452         return ret == cur_entry_insufficient_devices ? -EROFS : -EAGAIN;
453 }
454
455 /*
456  * Essentially the entry function to the journaling code. When bcachefs is doing
457  * a btree insert, it calls this function to get the current journal write.
458  * Journal write is the structure used set up journal writes. The calling
459  * function will then add its keys to the structure, queuing them for the next
460  * write.
461  *
462  * To ensure forward progress, the current task must not be holding any
463  * btree node write locks.
464  */
465 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
466                                   unsigned flags)
467 {
468         int ret;
469
470         closure_wait_event(&j->async_wait,
471                    (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
472                    (flags & JOURNAL_RES_GET_NONBLOCK));
473         return ret;
474 }
475
476 /* journal_preres: */
477
478 static bool journal_preres_available(struct journal *j,
479                                      struct journal_preres *res,
480                                      unsigned new_u64s,
481                                      unsigned flags)
482 {
483         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
484
485         if (!ret && mutex_trylock(&j->reclaim_lock)) {
486                 bch2_journal_reclaim(j);
487                 mutex_unlock(&j->reclaim_lock);
488         }
489
490         return ret;
491 }
492
493 int __bch2_journal_preres_get(struct journal *j,
494                               struct journal_preres *res,
495                               unsigned new_u64s,
496                               unsigned flags)
497 {
498         int ret;
499
500         closure_wait_event(&j->preres_wait,
501                    (ret = bch2_journal_error(j)) ||
502                    journal_preres_available(j, res, new_u64s, flags));
503         return ret;
504 }
505
506 /* journal_entry_res: */
507
508 void bch2_journal_entry_res_resize(struct journal *j,
509                                    struct journal_entry_res *res,
510                                    unsigned new_u64s)
511 {
512         union journal_res_state state;
513         int d = new_u64s - res->u64s;
514
515         spin_lock(&j->lock);
516
517         j->entry_u64s_reserved += d;
518         if (d <= 0)
519                 goto out;
520
521         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
522         smp_mb();
523         state = READ_ONCE(j->reservations);
524
525         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
526             state.cur_entry_offset > j->cur_entry_u64s) {
527                 j->cur_entry_u64s += d;
528                 /*
529                  * Not enough room in current journal entry, have to flush it:
530                  */
531                 __journal_entry_close(j);
532         } else {
533                 journal_cur_buf(j)->u64s_reserved += d;
534         }
535 out:
536         spin_unlock(&j->lock);
537         res->u64s += d;
538 }
539
540 /* journal flushing: */
541
542 /**
543  * bch2_journal_flush_seq_async - wait for a journal entry to be written
544  *
545  * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
546  * necessary
547  */
548 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
549                                  struct closure *parent)
550 {
551         struct journal_buf *buf;
552         int ret = 0;
553
554         if (seq <= j->flushed_seq_ondisk)
555                 return 1;
556
557         spin_lock(&j->lock);
558
559         if (WARN_ONCE(seq > journal_cur_seq(j),
560                       "requested to flush journal seq %llu, but currently at %llu",
561                       seq, journal_cur_seq(j)))
562                 goto out;
563
564         /* Recheck under lock: */
565         if (j->err_seq && seq >= j->err_seq) {
566                 ret = -EIO;
567                 goto out;
568         }
569
570         if (seq <= j->flushed_seq_ondisk) {
571                 ret = 1;
572                 goto out;
573         }
574
575         /* if seq was written, but not flushed - flush a newer one instead */
576         seq = max(seq, last_unwritten_seq(j));
577
578 recheck_need_open:
579         if (seq == journal_cur_seq(j) && !journal_entry_is_open(j)) {
580                 struct journal_res res = { 0 };
581
582                 spin_unlock(&j->lock);
583
584                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
585                 if (ret)
586                         return ret;
587
588                 seq = res.seq;
589                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
590                 buf->must_flush = true;
591                 set_bit(JOURNAL_NEED_WRITE, &j->flags);
592
593                 if (parent && !closure_wait(&buf->wait, parent))
594                         BUG();
595
596                 bch2_journal_res_put(j, &res);
597
598                 spin_lock(&j->lock);
599                 goto want_write;
600         }
601
602         /*
603          * if write was kicked off without a flush, flush the next sequence
604          * number instead
605          */
606         buf = journal_seq_to_buf(j, seq);
607         if (buf->noflush) {
608                 seq++;
609                 goto recheck_need_open;
610         }
611
612         buf->must_flush = true;
613
614         if (parent && !closure_wait(&buf->wait, parent))
615                 BUG();
616 want_write:
617         if (seq == journal_cur_seq(j))
618                 journal_entry_want_write(j);
619 out:
620         spin_unlock(&j->lock);
621         return ret;
622 }
623
624 int bch2_journal_flush_seq(struct journal *j, u64 seq)
625 {
626         u64 start_time = local_clock();
627         int ret, ret2;
628
629         /*
630          * Don't update time_stats when @seq is already flushed:
631          */
632         if (seq <= j->flushed_seq_ondisk)
633                 return 0;
634
635         ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
636
637         if (!ret)
638                 bch2_time_stats_update(j->flush_seq_time, start_time);
639
640         return ret ?: ret2 < 0 ? ret2 : 0;
641 }
642
643 int bch2_journal_meta(struct journal *j)
644 {
645         struct journal_res res;
646         int ret;
647
648         memset(&res, 0, sizeof(res));
649
650         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
651         if (ret)
652                 return ret;
653
654         bch2_journal_res_put(j, &res);
655
656         return bch2_journal_flush_seq(j, res.seq);
657 }
658
659 /*
660  * bch2_journal_flush_async - if there is an open journal entry, or a journal
661  * still being written, write it and wait for the write to complete
662  */
663 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
664 {
665         u64 seq, journal_seq;
666
667         spin_lock(&j->lock);
668         journal_seq = journal_cur_seq(j);
669
670         if (journal_entry_is_open(j)) {
671                 seq = journal_seq;
672         } else if (journal_seq) {
673                 seq = journal_seq - 1;
674         } else {
675                 spin_unlock(&j->lock);
676                 return;
677         }
678         spin_unlock(&j->lock);
679
680         bch2_journal_flush_seq_async(j, seq, parent);
681 }
682
683 int bch2_journal_flush(struct journal *j)
684 {
685         u64 seq, journal_seq;
686
687         spin_lock(&j->lock);
688         journal_seq = journal_cur_seq(j);
689
690         if (journal_entry_is_open(j)) {
691                 seq = journal_seq;
692         } else if (journal_seq) {
693                 seq = journal_seq - 1;
694         } else {
695                 spin_unlock(&j->lock);
696                 return 0;
697         }
698         spin_unlock(&j->lock);
699
700         return bch2_journal_flush_seq(j, seq);
701 }
702
703 /* block/unlock the journal: */
704
705 void bch2_journal_unblock(struct journal *j)
706 {
707         spin_lock(&j->lock);
708         j->blocked--;
709         spin_unlock(&j->lock);
710
711         journal_wake(j);
712 }
713
714 void bch2_journal_block(struct journal *j)
715 {
716         spin_lock(&j->lock);
717         j->blocked++;
718         spin_unlock(&j->lock);
719
720         journal_quiesce(j);
721 }
722
723 /* allocate journal on a device: */
724
725 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
726                                          bool new_fs, struct closure *cl)
727 {
728         struct bch_fs *c = ca->fs;
729         struct journal_device *ja = &ca->journal;
730         struct bch_sb_field_journal *journal_buckets;
731         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
732         int ret = 0;
733
734         /* don't handle reducing nr of buckets yet: */
735         if (nr <= ja->nr)
736                 return 0;
737
738         new_buckets     = kzalloc(nr * sizeof(u64), GFP_KERNEL);
739         new_bucket_seq  = kzalloc(nr * sizeof(u64), GFP_KERNEL);
740         if (!new_buckets || !new_bucket_seq) {
741                 ret = -ENOMEM;
742                 goto err;
743         }
744
745         journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
746                                         nr + sizeof(*journal_buckets) / sizeof(u64));
747         if (!journal_buckets) {
748                 ret = -ENOSPC;
749                 goto err;
750         }
751
752         /*
753          * We may be called from the device add path, before the new device has
754          * actually been added to the running filesystem:
755          */
756         if (!new_fs)
757                 spin_lock(&c->journal.lock);
758
759         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
760         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
761         swap(new_buckets,       ja->buckets);
762         swap(new_bucket_seq,    ja->bucket_seq);
763
764         if (!new_fs)
765                 spin_unlock(&c->journal.lock);
766
767         while (ja->nr < nr) {
768                 struct open_bucket *ob = NULL;
769                 unsigned pos;
770                 long b;
771
772                 if (new_fs) {
773                         if (c)
774                                 percpu_down_read(&c->mark_lock);
775                         b = bch2_bucket_alloc_new_fs(ca);
776                         if (b < 0) {
777                                 percpu_up_read(&c->mark_lock);
778                                 ret = -ENOSPC;
779                                 goto err;
780                         }
781                 } else {
782                         rcu_read_lock();
783                         ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
784                                                false, cl);
785                         rcu_read_unlock();
786                         if (IS_ERR(ob)) {
787                                 ret = cl ? -EAGAIN : -ENOSPC;
788                                 goto err;
789                         }
790
791                         b = sector_to_bucket(ca, ob->ptr.offset);
792                 }
793
794                 if (c)
795                         spin_lock(&c->journal.lock);
796
797                 /*
798                  * XXX
799                  * For resize at runtime, we should be writing the new
800                  * superblock before inserting into the journal array
801                  */
802
803                 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
804                 __array_insert_item(ja->buckets,                ja->nr, pos);
805                 __array_insert_item(ja->bucket_seq,             ja->nr, pos);
806                 __array_insert_item(journal_buckets->buckets,   ja->nr, pos);
807                 ja->nr++;
808
809                 ja->buckets[pos] = b;
810                 ja->bucket_seq[pos] = 0;
811                 journal_buckets->buckets[pos] = cpu_to_le64(b);
812
813                 if (pos <= ja->discard_idx)
814                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
815                 if (pos <= ja->dirty_idx_ondisk)
816                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
817                 if (pos <= ja->dirty_idx)
818                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
819                 if (pos <= ja->cur_idx)
820                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
821
822                 if (c)
823                         spin_unlock(&c->journal.lock);
824
825                 if (new_fs) {
826                         bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
827                                                   ca->mi.bucket_size,
828                                                   gc_phase(GC_PHASE_SB),
829                                                   0);
830                         if (c)
831                                 percpu_up_read(&c->mark_lock);
832                 } else {
833                         ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
834                                 bch2_trans_mark_metadata_bucket(&trans, ca,
835                                                 b, BCH_DATA_journal,
836                                                 ca->mi.bucket_size));
837
838                         bch2_open_bucket_put(c, ob);
839
840                         if (ret)
841                                 goto err;
842                 }
843         }
844 err:
845         bch2_sb_resize_journal(&ca->disk_sb,
846                 ja->nr + sizeof(*journal_buckets) / sizeof(u64));
847         kfree(new_bucket_seq);
848         kfree(new_buckets);
849
850         return ret;
851 }
852
853 /*
854  * Allocate more journal space at runtime - not currently making use if it, but
855  * the code works:
856  */
857 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
858                                 unsigned nr)
859 {
860         struct journal_device *ja = &ca->journal;
861         struct closure cl;
862         unsigned current_nr;
863         int ret;
864
865         closure_init_stack(&cl);
866
867         do {
868                 struct disk_reservation disk_res = { 0, 0 };
869
870                 closure_sync(&cl);
871
872                 mutex_lock(&c->sb_lock);
873                 current_nr = ja->nr;
874
875                 /*
876                  * note: journal buckets aren't really counted as _sectors_ used yet, so
877                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
878                  * when space used goes up without a reservation - but we do need the
879                  * reservation to ensure we'll actually be able to allocate:
880                  */
881
882                 if (bch2_disk_reservation_get(c, &disk_res,
883                                               bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
884                         mutex_unlock(&c->sb_lock);
885                         return -ENOSPC;
886                 }
887
888                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
889
890                 bch2_disk_reservation_put(c, &disk_res);
891
892                 if (ja->nr != current_nr)
893                         bch2_write_super(c);
894                 mutex_unlock(&c->sb_lock);
895         } while (ret == -EAGAIN);
896
897         return ret;
898 }
899
900 int bch2_dev_journal_alloc(struct bch_dev *ca)
901 {
902         unsigned nr;
903
904         if (dynamic_fault("bcachefs:add:journal_alloc"))
905                 return -ENOMEM;
906
907         /* 1/128th of the device by default: */
908         nr = ca->mi.nbuckets >> 7;
909
910         /*
911          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
912          * is smaller:
913          */
914         nr = clamp_t(unsigned, nr,
915                      BCH_JOURNAL_BUCKETS_MIN,
916                      min(1 << 13,
917                          (1 << 24) / ca->mi.bucket_size));
918
919         return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
920 }
921
922 /* startup/shutdown: */
923
924 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
925 {
926         union journal_res_state state;
927         bool ret = false;
928         unsigned i;
929
930         spin_lock(&j->lock);
931         state = READ_ONCE(j->reservations);
932         i = state.idx;
933
934         while (i != state.unwritten_idx) {
935                 i = (i - 1) & JOURNAL_BUF_MASK;
936                 if (bch2_bkey_has_device(bkey_i_to_s_c(&j->buf[i].key), dev_idx))
937                         ret = true;
938         }
939         spin_unlock(&j->lock);
940
941         return ret;
942 }
943
944 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
945 {
946         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
947 }
948
949 void bch2_fs_journal_stop(struct journal *j)
950 {
951         bch2_journal_flush_all_pins(j);
952
953         wait_event(j->wait, journal_entry_close(j));
954
955         /*
956          * Always write a new journal entry, to make sure the clock hands are up
957          * to date (and match the superblock)
958          */
959         bch2_journal_meta(j);
960
961         journal_quiesce(j);
962
963         BUG_ON(!bch2_journal_error(j) &&
964                test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
965                (journal_entry_is_open(j) ||
966                 j->last_empty_seq + 1 != journal_cur_seq(j)));
967
968         cancel_delayed_work_sync(&j->write_work);
969         bch2_journal_reclaim_stop(j);
970 }
971
972 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
973                           struct list_head *journal_entries)
974 {
975         struct bch_fs *c = container_of(j, struct bch_fs, journal);
976         struct journal_entry_pin_list *p;
977         struct journal_replay *i;
978         u64 last_seq = cur_seq, nr, seq;
979
980         if (!list_empty(journal_entries))
981                 last_seq = le64_to_cpu(list_last_entry(journal_entries,
982                                 struct journal_replay, list)->j.last_seq);
983
984         nr = cur_seq - last_seq;
985
986         if (nr + 1 > j->pin.size) {
987                 free_fifo(&j->pin);
988                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
989                 if (!j->pin.data) {
990                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
991                         return -ENOMEM;
992                 }
993         }
994
995         j->replay_journal_seq   = last_seq;
996         j->replay_journal_seq_end = cur_seq;
997         j->last_seq_ondisk      = last_seq;
998         j->pin.front            = last_seq;
999         j->pin.back             = cur_seq;
1000         atomic64_set(&j->seq, cur_seq - 1);
1001
1002         fifo_for_each_entry_ptr(p, &j->pin, seq)
1003                 journal_pin_list_init(p, 1);
1004
1005         list_for_each_entry(i, journal_entries, list) {
1006                 unsigned ptr;
1007
1008                 seq = le64_to_cpu(i->j.seq);
1009                 BUG_ON(seq >= cur_seq);
1010
1011                 if (seq < last_seq)
1012                         continue;
1013
1014                 p = journal_seq_pin(j, seq);
1015
1016                 p->devs.nr = 0;
1017                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1018                         bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1019         }
1020
1021         spin_lock(&j->lock);
1022
1023         set_bit(JOURNAL_STARTED, &j->flags);
1024         j->last_flush_write = jiffies;
1025
1026         journal_pin_new_entry(j);
1027
1028         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1029
1030         bch2_journal_buf_init(j);
1031
1032         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1033
1034         bch2_journal_space_available(j);
1035         spin_unlock(&j->lock);
1036
1037         return bch2_journal_reclaim_start(j);
1038 }
1039
1040 /* init/exit: */
1041
1042 void bch2_dev_journal_exit(struct bch_dev *ca)
1043 {
1044         kfree(ca->journal.bio);
1045         kfree(ca->journal.buckets);
1046         kfree(ca->journal.bucket_seq);
1047
1048         ca->journal.bio         = NULL;
1049         ca->journal.buckets     = NULL;
1050         ca->journal.bucket_seq  = NULL;
1051 }
1052
1053 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1054 {
1055         struct journal_device *ja = &ca->journal;
1056         struct bch_sb_field_journal *journal_buckets =
1057                 bch2_sb_get_journal(sb);
1058         unsigned i;
1059
1060         ja->nr = bch2_nr_journal_buckets(journal_buckets);
1061
1062         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1063         if (!ja->bucket_seq)
1064                 return -ENOMEM;
1065
1066         ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1067                         DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1068         if (!ca->journal.bio)
1069                 return -ENOMEM;
1070
1071         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1072         if (!ja->buckets)
1073                 return -ENOMEM;
1074
1075         for (i = 0; i < ja->nr; i++)
1076                 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1077
1078         return 0;
1079 }
1080
1081 void bch2_fs_journal_exit(struct journal *j)
1082 {
1083         unsigned i;
1084
1085         for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1086                 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1087         free_fifo(&j->pin);
1088 }
1089
1090 int bch2_fs_journal_init(struct journal *j)
1091 {
1092         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1093         static struct lock_class_key res_key;
1094         unsigned i;
1095         int ret = 0;
1096
1097         pr_verbose_init(c->opts, "");
1098
1099         spin_lock_init(&j->lock);
1100         spin_lock_init(&j->err_lock);
1101         init_waitqueue_head(&j->wait);
1102         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1103         init_waitqueue_head(&j->reclaim_wait);
1104         init_waitqueue_head(&j->pin_flush_wait);
1105         mutex_init(&j->reclaim_lock);
1106         mutex_init(&j->discard_lock);
1107
1108         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1109
1110         atomic64_set(&j->reservations.counter,
1111                 ((union journal_res_state)
1112                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1113
1114         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1115                 ret = -ENOMEM;
1116                 goto out;
1117         }
1118
1119         for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1120                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1121                 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1122                 if (!j->buf[i].data) {
1123                         ret = -ENOMEM;
1124                         goto out;
1125                 }
1126         }
1127
1128         j->pin.front = j->pin.back = 1;
1129 out:
1130         pr_verbose_init(c->opts, "ret %i", ret);
1131         return ret;
1132 }
1133
1134 /* debug: */
1135
1136 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1137 {
1138         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1139         union journal_res_state s;
1140         struct bch_dev *ca;
1141         unsigned long now = jiffies;
1142         unsigned i;
1143
1144         rcu_read_lock();
1145         s = READ_ONCE(j->reservations);
1146
1147         pr_buf(out, "active journal entries:\t%llu\n",  fifo_used(&j->pin));
1148         pr_buf(out, "seq:\t\t\t%llu\n",                 journal_cur_seq(j));
1149         pr_buf(out, "last_seq:\t\t%llu\n",              journal_last_seq(j));
1150         pr_buf(out, "last_seq_ondisk:\t%llu\n",         j->last_seq_ondisk);
1151         pr_buf(out, "flushed_seq_ondisk:\t%llu\n",      j->flushed_seq_ondisk);
1152         pr_buf(out, "prereserved:\t\t%u/%u\n",          j->prereserved.reserved, j->prereserved.remaining);
1153         pr_buf(out, "each entry reserved:\t%u\n",       j->entry_u64s_reserved);
1154         pr_buf(out, "nr flush writes:\t%llu\n",         j->nr_flush_writes);
1155         pr_buf(out, "nr noflush writes:\t%llu\n",       j->nr_noflush_writes);
1156         pr_buf(out, "nr direct reclaim:\t%llu\n",       j->nr_direct_reclaim);
1157         pr_buf(out, "nr background reclaim:\t%llu\n",   j->nr_background_reclaim);
1158         pr_buf(out, "reclaim kicked:\t\t%u\n",          j->reclaim_kicked);
1159         pr_buf(out, "reclaim runs in:\t%u ms\n",        time_after(j->next_reclaim, now)
1160                ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1161         pr_buf(out, "current entry sectors:\t%u\n",     j->cur_entry_sectors);
1162         pr_buf(out, "current entry error:\t%u\n",       j->cur_entry_error);
1163         pr_buf(out, "current entry:\t\t");
1164
1165         switch (s.cur_entry_offset) {
1166         case JOURNAL_ENTRY_ERROR_VAL:
1167                 pr_buf(out, "error\n");
1168                 break;
1169         case JOURNAL_ENTRY_CLOSED_VAL:
1170                 pr_buf(out, "closed\n");
1171                 break;
1172         default:
1173                 pr_buf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
1174                 break;
1175         }
1176
1177         pr_buf(out, "current entry:\t\tidx %u refcount %u\n", s.idx, journal_state_count(s, s.idx));
1178
1179         i = s.idx;
1180         while (i != s.unwritten_idx) {
1181                 i = (i - 1) & JOURNAL_BUF_MASK;
1182
1183                 pr_buf(out, "unwritten entry:\tidx %u refcount %u sectors %u\n",
1184                        i, journal_state_count(s, i), j->buf[i].sectors);
1185         }
1186
1187         pr_buf(out,
1188                "need write:\t\t%i\n"
1189                "replay done:\t\t%i\n",
1190                test_bit(JOURNAL_NEED_WRITE,     &j->flags),
1191                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1192
1193         pr_buf(out, "space:\n");
1194         pr_buf(out, "\tdiscarded\t%u:%u\n",
1195                j->space[journal_space_discarded].next_entry,
1196                j->space[journal_space_discarded].total);
1197         pr_buf(out, "\tclean ondisk\t%u:%u\n",
1198                j->space[journal_space_clean_ondisk].next_entry,
1199                j->space[journal_space_clean_ondisk].total);
1200         pr_buf(out, "\tclean\t\t%u:%u\n",
1201                j->space[journal_space_clean].next_entry,
1202                j->space[journal_space_clean].total);
1203         pr_buf(out, "\ttotal\t\t%u:%u\n",
1204                j->space[journal_space_total].next_entry,
1205                j->space[journal_space_total].total);
1206
1207         for_each_member_device_rcu(ca, c, i,
1208                                    &c->rw_devs[BCH_DATA_journal]) {
1209                 struct journal_device *ja = &ca->journal;
1210
1211                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1212                         continue;
1213
1214                 if (!ja->nr)
1215                         continue;
1216
1217                 pr_buf(out, "dev %u:\n",                i);
1218                 pr_buf(out, "\tnr\t\t%u\n",             ja->nr);
1219                 pr_buf(out, "\tbucket size\t%u\n",      ca->mi.bucket_size);
1220                 pr_buf(out, "\tavailable\t%u:%u\n",     bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1221                 pr_buf(out, "\tdiscard_idx\t%u\n",      ja->discard_idx);
1222                 pr_buf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk,    ja->bucket_seq[ja->dirty_idx_ondisk]);
1223                 pr_buf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx,              ja->bucket_seq[ja->dirty_idx]);
1224                 pr_buf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx,                ja->bucket_seq[ja->cur_idx]);
1225         }
1226
1227         rcu_read_unlock();
1228 }
1229
1230 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1231 {
1232         spin_lock(&j->lock);
1233         __bch2_journal_debug_to_text(out, j);
1234         spin_unlock(&j->lock);
1235 }
1236
1237 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1238 {
1239         struct journal_entry_pin_list *pin_list;
1240         struct journal_entry_pin *pin;
1241         u64 i;
1242
1243         spin_lock(&j->lock);
1244         fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1245                 pr_buf(out, "%llu: count %u\n",
1246                        i, atomic_read(&pin_list->count));
1247
1248                 list_for_each_entry(pin, &pin_list->list, list)
1249                         pr_buf(out, "\t%px %ps\n",
1250                                pin, pin->flush);
1251
1252                 if (!list_empty(&pin_list->flushed))
1253                         pr_buf(out, "flushed:\n");
1254
1255                 list_for_each_entry(pin, &pin_list->flushed, list)
1256                         pr_buf(out, "\t%px %ps\n",
1257                                pin, pin->flush);
1258         }
1259         spin_unlock(&j->lock);
1260 }