]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to 26409a8f75 bcachefs: Journal updates to dev usage
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "journal.h"
15 #include "journal_io.h"
16 #include "journal_reclaim.h"
17 #include "journal_seq_blacklist.h"
18 #include "super-io.h"
19
20 #include <trace/events/bcachefs.h>
21
22 static u64 last_unwritten_seq(struct journal *j)
23 {
24         union journal_res_state s = READ_ONCE(j->reservations);
25
26         lockdep_assert_held(&j->lock);
27
28         return journal_cur_seq(j) - ((s.idx - s.unwritten_idx) & JOURNAL_BUF_MASK);
29 }
30
31 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
32 {
33         return seq >= last_unwritten_seq(j);
34 }
35
36 static bool __journal_entry_is_open(union journal_res_state state)
37 {
38         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
39 }
40
41 static bool journal_entry_is_open(struct journal *j)
42 {
43         return __journal_entry_is_open(j->reservations);
44 }
45
46 static inline struct journal_buf *
47 journal_seq_to_buf(struct journal *j, u64 seq)
48 {
49         struct journal_buf *buf = NULL;
50
51         EBUG_ON(seq > journal_cur_seq(j));
52         EBUG_ON(seq == journal_cur_seq(j) &&
53                 j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
54
55         if (journal_seq_unwritten(j, seq)) {
56                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
57                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
58         }
59         return buf;
60 }
61
62 static void journal_pin_new_entry(struct journal *j, int count)
63 {
64         struct journal_entry_pin_list *p;
65
66         /*
67          * The fifo_push() needs to happen at the same time as j->seq is
68          * incremented for journal_last_seq() to be calculated correctly
69          */
70         atomic64_inc(&j->seq);
71         p = fifo_push_ref(&j->pin);
72
73         INIT_LIST_HEAD(&p->list);
74         INIT_LIST_HEAD(&p->flushed);
75         atomic_set(&p->count, count);
76         p->devs.nr = 0;
77 }
78
79 static void bch2_journal_buf_init(struct journal *j)
80 {
81         struct journal_buf *buf = journal_cur_buf(j);
82
83         bkey_extent_init(&buf->key);
84         buf->noflush    = false;
85         buf->must_flush = false;
86         buf->separate_flush = false;
87
88         memset(buf->has_inode, 0, sizeof(buf->has_inode));
89
90         memset(buf->data, 0, sizeof(*buf->data));
91         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
92         buf->data->u64s = 0;
93 }
94
95 void bch2_journal_halt(struct journal *j)
96 {
97         union journal_res_state old, new;
98         u64 v = atomic64_read(&j->reservations.counter);
99
100         do {
101                 old.v = new.v = v;
102                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
103                         return;
104
105                 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
106         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
107                                        old.v, new.v)) != old.v);
108
109         j->err_seq = journal_cur_seq(j);
110         journal_wake(j);
111         closure_wake_up(&journal_cur_buf(j)->wait);
112 }
113
114 /* journal entry close/open: */
115
116 void __bch2_journal_buf_put(struct journal *j)
117 {
118         closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
119 }
120
121 /*
122  * Returns true if journal entry is now closed:
123  *
124  * We don't close a journal_buf until the next journal_buf is finished writing,
125  * and can be opened again - this also initializes the next journal_buf:
126  */
127 static bool __journal_entry_close(struct journal *j)
128 {
129         struct bch_fs *c = container_of(j, struct bch_fs, journal);
130         struct journal_buf *buf = journal_cur_buf(j);
131         union journal_res_state old, new;
132         u64 v = atomic64_read(&j->reservations.counter);
133         unsigned sectors;
134
135         lockdep_assert_held(&j->lock);
136
137         do {
138                 old.v = new.v = v;
139                 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
140                         return true;
141
142                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
143                         /* this entry will never be written: */
144                         closure_wake_up(&buf->wait);
145                         return true;
146                 }
147
148                 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
149                         set_bit(JOURNAL_NEED_WRITE, &j->flags);
150                         j->need_write_time = local_clock();
151                 }
152
153                 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
154                 new.idx++;
155
156                 if (new.idx == new.unwritten_idx)
157                         return false;
158
159                 BUG_ON(journal_state_count(new, new.idx));
160         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
161                                        old.v, new.v)) != old.v);
162
163         /* Close out old buffer: */
164         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
165
166         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
167                                       buf->u64s_reserved) << c->block_bits;
168         BUG_ON(sectors > buf->sectors);
169         buf->sectors = sectors;
170
171         /*
172          * We have to set last_seq here, _before_ opening a new journal entry:
173          *
174          * A threads may replace an old pin with a new pin on their current
175          * journal reservation - the expectation being that the journal will
176          * contain either what the old pin protected or what the new pin
177          * protects.
178          *
179          * After the old pin is dropped journal_last_seq() won't include the old
180          * pin, so we can only write the updated last_seq on the entry that
181          * contains whatever the new pin protects.
182          *
183          * Restated, we can _not_ update last_seq for a given entry if there
184          * could be a newer entry open with reservations/pins that have been
185          * taken against it.
186          *
187          * Hence, we want update/set last_seq on the current journal entry right
188          * before we open a new one:
189          */
190         buf->data->last_seq     = cpu_to_le64(journal_last_seq(j));
191
192         __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
193
194         /* Initialize new buffer: */
195         journal_pin_new_entry(j, 1);
196
197         bch2_journal_buf_init(j);
198
199         cancel_delayed_work(&j->write_work);
200         clear_bit(JOURNAL_NEED_WRITE, &j->flags);
201
202         bch2_journal_space_available(j);
203
204         bch2_journal_buf_put(j, old.idx);
205         return true;
206 }
207
208 static bool journal_entry_want_write(struct journal *j)
209 {
210         union journal_res_state s = READ_ONCE(j->reservations);
211         bool ret = false;
212
213         /*
214          * Don't close it yet if we already have a write in flight, but do set
215          * NEED_WRITE:
216          */
217         if (s.idx != s.unwritten_idx)
218                 set_bit(JOURNAL_NEED_WRITE, &j->flags);
219         else
220                 ret = __journal_entry_close(j);
221
222         return ret;
223 }
224
225 static bool journal_entry_close(struct journal *j)
226 {
227         bool ret;
228
229         spin_lock(&j->lock);
230         ret = journal_entry_want_write(j);
231         spin_unlock(&j->lock);
232
233         return ret;
234 }
235
236 /*
237  * should _only_ called from journal_res_get() - when we actually want a
238  * journal reservation - journal entry is open means journal is dirty:
239  *
240  * returns:
241  * 0:           success
242  * -ENOSPC:     journal currently full, must invoke reclaim
243  * -EAGAIN:     journal blocked, must wait
244  * -EROFS:      insufficient rw devices or journal error
245  */
246 static int journal_entry_open(struct journal *j)
247 {
248         struct bch_fs *c = container_of(j, struct bch_fs, journal);
249         struct journal_buf *buf = journal_cur_buf(j);
250         union journal_res_state old, new;
251         int u64s;
252         u64 v;
253
254         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
255
256         lockdep_assert_held(&j->lock);
257         BUG_ON(journal_entry_is_open(j));
258
259         if (j->blocked)
260                 return cur_entry_blocked;
261
262         if (j->cur_entry_error)
263                 return j->cur_entry_error;
264
265         BUG_ON(!j->cur_entry_sectors);
266
267         buf->u64s_reserved      = j->entry_u64s_reserved;
268         buf->disk_sectors       = j->cur_entry_sectors;
269         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
270
271         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
272                 journal_entry_overhead(j);
273         u64s  = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
274
275         if (u64s <= le32_to_cpu(buf->data->u64s))
276                 return cur_entry_journal_full;
277
278         /*
279          * Must be set before marking the journal entry as open:
280          */
281         j->cur_entry_u64s = u64s;
282
283         v = atomic64_read(&j->reservations.counter);
284         do {
285                 old.v = new.v = v;
286
287                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
288                         return cur_entry_insufficient_devices;
289
290                 /* Handle any already added entries */
291                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
292
293                 EBUG_ON(journal_state_count(new, new.idx));
294                 journal_state_inc(&new);
295         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
296                                        old.v, new.v)) != old.v);
297
298         if (j->res_get_blocked_start)
299                 bch2_time_stats_update(j->blocked_time,
300                                        j->res_get_blocked_start);
301         j->res_get_blocked_start = 0;
302
303         mod_delayed_work(system_freezable_wq,
304                          &j->write_work,
305                          msecs_to_jiffies(j->write_delay_ms));
306         journal_wake(j);
307         return 0;
308 }
309
310 static bool journal_quiesced(struct journal *j)
311 {
312         union journal_res_state s = READ_ONCE(j->reservations);
313         bool ret = s.idx == s.unwritten_idx && !__journal_entry_is_open(s);
314
315         if (!ret)
316                 journal_entry_close(j);
317         return ret;
318 }
319
320 static void journal_quiesce(struct journal *j)
321 {
322         wait_event(j->wait, journal_quiesced(j));
323 }
324
325 static void journal_write_work(struct work_struct *work)
326 {
327         struct journal *j = container_of(work, struct journal, write_work.work);
328
329         journal_entry_close(j);
330 }
331
332 /*
333  * Given an inode number, if that inode number has data in the journal that
334  * hasn't yet been flushed, return the journal sequence number that needs to be
335  * flushed:
336  */
337 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
338 {
339         size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
340         union journal_res_state s;
341         unsigned i;
342         u64 seq;
343
344
345         spin_lock(&j->lock);
346         seq = journal_cur_seq(j);
347         s = READ_ONCE(j->reservations);
348         i = s.idx;
349
350         while (1) {
351                 if (test_bit(h, j->buf[i].has_inode))
352                         goto out;
353
354                 if (i == s.unwritten_idx)
355                         break;
356
357                 i = (i - 1) & JOURNAL_BUF_MASK;
358                 seq--;
359         }
360
361         seq = 0;
362 out:
363         spin_unlock(&j->lock);
364
365         return seq;
366 }
367
368 void bch2_journal_set_has_inum(struct journal *j, u64 inode, u64 seq)
369 {
370         size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
371         struct journal_buf *buf;
372
373         spin_lock(&j->lock);
374
375         if ((buf = journal_seq_to_buf(j, seq)))
376                 set_bit(h, buf->has_inode);
377
378         spin_unlock(&j->lock);
379 }
380
381 static int __journal_res_get(struct journal *j, struct journal_res *res,
382                              unsigned flags)
383 {
384         struct bch_fs *c = container_of(j, struct bch_fs, journal);
385         struct journal_buf *buf;
386         bool can_discard;
387         int ret;
388 retry:
389         if (journal_res_get_fast(j, res, flags))
390                 return 0;
391
392         if (bch2_journal_error(j))
393                 return -EROFS;
394
395         spin_lock(&j->lock);
396
397         /*
398          * Recheck after taking the lock, so we don't race with another thread
399          * that just did journal_entry_open() and call journal_entry_close()
400          * unnecessarily
401          */
402         if (journal_res_get_fast(j, res, flags)) {
403                 spin_unlock(&j->lock);
404                 return 0;
405         }
406
407         if (!(flags & JOURNAL_RES_GET_RESERVED) &&
408             !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
409                 /*
410                  * Don't want to close current journal entry, just need to
411                  * invoke reclaim:
412                  */
413                 ret = cur_entry_journal_full;
414                 goto unlock;
415         }
416
417         /*
418          * If we couldn't get a reservation because the current buf filled up,
419          * and we had room for a bigger entry on disk, signal that we want to
420          * realloc the journal bufs:
421          */
422         buf = journal_cur_buf(j);
423         if (journal_entry_is_open(j) &&
424             buf->buf_size >> 9 < buf->disk_sectors &&
425             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
426                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
427
428         if (journal_entry_is_open(j) &&
429             !__journal_entry_close(j)) {
430                 /*
431                  * We failed to get a reservation on the current open journal
432                  * entry because it's full, and we can't close it because
433                  * there's still a previous one in flight:
434                  */
435                 trace_journal_entry_full(c);
436                 ret = cur_entry_blocked;
437         } else {
438                 ret = journal_entry_open(j);
439         }
440 unlock:
441         if ((ret && ret != cur_entry_insufficient_devices) &&
442             !j->res_get_blocked_start) {
443                 j->res_get_blocked_start = local_clock() ?: 1;
444                 trace_journal_full(c);
445         }
446
447         can_discard = j->can_discard;
448         spin_unlock(&j->lock);
449
450         if (!ret)
451                 goto retry;
452
453         /*
454          * Journal is full - can't rely on reclaim from work item due to
455          * freezing:
456          */
457         if ((ret == cur_entry_journal_full ||
458              ret == cur_entry_journal_pin_full) &&
459             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
460                 if (can_discard) {
461                         bch2_journal_do_discards(j);
462                         goto retry;
463                 }
464
465                 if (mutex_trylock(&j->reclaim_lock)) {
466                         bch2_journal_reclaim(j);
467                         mutex_unlock(&j->reclaim_lock);
468                 }
469         }
470
471         return ret == cur_entry_insufficient_devices ? -EROFS : -EAGAIN;
472 }
473
474 /*
475  * Essentially the entry function to the journaling code. When bcachefs is doing
476  * a btree insert, it calls this function to get the current journal write.
477  * Journal write is the structure used set up journal writes. The calling
478  * function will then add its keys to the structure, queuing them for the next
479  * write.
480  *
481  * To ensure forward progress, the current task must not be holding any
482  * btree node write locks.
483  */
484 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
485                                   unsigned flags)
486 {
487         int ret;
488
489         closure_wait_event(&j->async_wait,
490                    (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
491                    (flags & JOURNAL_RES_GET_NONBLOCK));
492         return ret;
493 }
494
495 /* journal_preres: */
496
497 static bool journal_preres_available(struct journal *j,
498                                      struct journal_preres *res,
499                                      unsigned new_u64s,
500                                      unsigned flags)
501 {
502         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags);
503
504         if (!ret && mutex_trylock(&j->reclaim_lock)) {
505                 bch2_journal_reclaim(j);
506                 mutex_unlock(&j->reclaim_lock);
507         }
508
509         return ret;
510 }
511
512 int __bch2_journal_preres_get(struct journal *j,
513                               struct journal_preres *res,
514                               unsigned new_u64s,
515                               unsigned flags)
516 {
517         int ret;
518
519         closure_wait_event(&j->preres_wait,
520                    (ret = bch2_journal_error(j)) ||
521                    journal_preres_available(j, res, new_u64s, flags));
522         return ret;
523 }
524
525 /* journal_entry_res: */
526
527 void bch2_journal_entry_res_resize(struct journal *j,
528                                    struct journal_entry_res *res,
529                                    unsigned new_u64s)
530 {
531         union journal_res_state state;
532         int d = new_u64s - res->u64s;
533
534         spin_lock(&j->lock);
535
536         j->entry_u64s_reserved += d;
537         if (d <= 0)
538                 goto out;
539
540         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
541         smp_mb();
542         state = READ_ONCE(j->reservations);
543
544         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
545             state.cur_entry_offset > j->cur_entry_u64s) {
546                 j->cur_entry_u64s += d;
547                 /*
548                  * Not enough room in current journal entry, have to flush it:
549                  */
550                 __journal_entry_close(j);
551         } else {
552                 journal_cur_buf(j)->u64s_reserved += d;
553         }
554 out:
555         spin_unlock(&j->lock);
556         res->u64s += d;
557 }
558
559 /* journal flushing: */
560
561 /**
562  * bch2_journal_flush_seq_async - wait for a journal entry to be written
563  *
564  * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
565  * necessary
566  */
567 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
568                                  struct closure *parent)
569 {
570         struct journal_buf *buf;
571         int ret = 0;
572
573         if (seq <= j->flushed_seq_ondisk)
574                 return 1;
575
576         spin_lock(&j->lock);
577
578         /* Recheck under lock: */
579         if (j->err_seq && seq >= j->err_seq) {
580                 ret = -EIO;
581                 goto out;
582         }
583
584         if (seq <= j->flushed_seq_ondisk) {
585                 ret = 1;
586                 goto out;
587         }
588
589         /* if seq was written, but not flushed - flush a newer one instead */
590         seq = max(seq, last_unwritten_seq(j));
591
592 recheck_need_open:
593         if (seq == journal_cur_seq(j) && !journal_entry_is_open(j)) {
594                 struct journal_res res = { 0 };
595
596                 spin_unlock(&j->lock);
597
598                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
599                 if (ret)
600                         return ret;
601
602                 seq = res.seq;
603                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
604                 buf->must_flush = true;
605                 set_bit(JOURNAL_NEED_WRITE, &j->flags);
606
607                 if (parent && !closure_wait(&buf->wait, parent))
608                         BUG();
609
610                 bch2_journal_res_put(j, &res);
611
612                 spin_lock(&j->lock);
613                 goto want_write;
614         }
615
616         /*
617          * if write was kicked off without a flush, flush the next sequence
618          * number instead
619          */
620         buf = journal_seq_to_buf(j, seq);
621         if (buf->noflush) {
622                 seq++;
623                 goto recheck_need_open;
624         }
625
626         buf->must_flush = true;
627
628         if (parent && !closure_wait(&buf->wait, parent))
629                 BUG();
630 want_write:
631         if (seq == journal_cur_seq(j))
632                 journal_entry_want_write(j);
633 out:
634         spin_unlock(&j->lock);
635         return ret;
636 }
637
638 int bch2_journal_flush_seq(struct journal *j, u64 seq)
639 {
640         u64 start_time = local_clock();
641         int ret, ret2;
642
643         ret = wait_event_killable(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
644
645         bch2_time_stats_update(j->flush_seq_time, start_time);
646
647         return ret ?: ret2 < 0 ? ret2 : 0;
648 }
649
650 int bch2_journal_meta(struct journal *j)
651 {
652         struct journal_res res;
653         int ret;
654
655         memset(&res, 0, sizeof(res));
656
657         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
658         if (ret)
659                 return ret;
660
661         bch2_journal_res_put(j, &res);
662
663         return bch2_journal_flush_seq(j, res.seq);
664 }
665
666 /*
667  * bch2_journal_flush_async - if there is an open journal entry, or a journal
668  * still being written, write it and wait for the write to complete
669  */
670 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
671 {
672         u64 seq, journal_seq;
673
674         spin_lock(&j->lock);
675         journal_seq = journal_cur_seq(j);
676
677         if (journal_entry_is_open(j)) {
678                 seq = journal_seq;
679         } else if (journal_seq) {
680                 seq = journal_seq - 1;
681         } else {
682                 spin_unlock(&j->lock);
683                 return;
684         }
685         spin_unlock(&j->lock);
686
687         bch2_journal_flush_seq_async(j, seq, parent);
688 }
689
690 int bch2_journal_flush(struct journal *j)
691 {
692         u64 seq, journal_seq;
693
694         spin_lock(&j->lock);
695         journal_seq = journal_cur_seq(j);
696
697         if (journal_entry_is_open(j)) {
698                 seq = journal_seq;
699         } else if (journal_seq) {
700                 seq = journal_seq - 1;
701         } else {
702                 spin_unlock(&j->lock);
703                 return 0;
704         }
705         spin_unlock(&j->lock);
706
707         return bch2_journal_flush_seq(j, seq);
708 }
709
710 /* block/unlock the journal: */
711
712 void bch2_journal_unblock(struct journal *j)
713 {
714         spin_lock(&j->lock);
715         j->blocked--;
716         spin_unlock(&j->lock);
717
718         journal_wake(j);
719 }
720
721 void bch2_journal_block(struct journal *j)
722 {
723         spin_lock(&j->lock);
724         j->blocked++;
725         spin_unlock(&j->lock);
726
727         journal_quiesce(j);
728 }
729
730 /* allocate journal on a device: */
731
732 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
733                                          bool new_fs, struct closure *cl)
734 {
735         struct bch_fs *c = ca->fs;
736         struct journal_device *ja = &ca->journal;
737         struct bch_sb_field_journal *journal_buckets;
738         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
739         int ret = 0;
740
741         /* don't handle reducing nr of buckets yet: */
742         if (nr <= ja->nr)
743                 return 0;
744
745         new_buckets     = kzalloc(nr * sizeof(u64), GFP_KERNEL);
746         new_bucket_seq  = kzalloc(nr * sizeof(u64), GFP_KERNEL);
747         if (!new_buckets || !new_bucket_seq) {
748                 ret = -ENOMEM;
749                 goto err;
750         }
751
752         journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
753                                         nr + sizeof(*journal_buckets) / sizeof(u64));
754         if (!journal_buckets) {
755                 ret = -ENOSPC;
756                 goto err;
757         }
758
759         /*
760          * We may be called from the device add path, before the new device has
761          * actually been added to the running filesystem:
762          */
763         if (c)
764                 spin_lock(&c->journal.lock);
765
766         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
767         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
768         swap(new_buckets,       ja->buckets);
769         swap(new_bucket_seq,    ja->bucket_seq);
770
771         if (c)
772                 spin_unlock(&c->journal.lock);
773
774         while (ja->nr < nr) {
775                 struct open_bucket *ob = NULL;
776                 unsigned pos;
777                 long bucket;
778
779                 if (new_fs) {
780                         bucket = bch2_bucket_alloc_new_fs(ca);
781                         if (bucket < 0) {
782                                 ret = -ENOSPC;
783                                 goto err;
784                         }
785                 } else {
786                         rcu_read_lock();
787                         ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
788                                                false, cl);
789                         rcu_read_unlock();
790                         if (IS_ERR(ob)) {
791                                 ret = cl ? -EAGAIN : -ENOSPC;
792                                 goto err;
793                         }
794
795                         bucket = sector_to_bucket(ca, ob->ptr.offset);
796                 }
797
798                 if (c) {
799                         percpu_down_read(&c->mark_lock);
800                         spin_lock(&c->journal.lock);
801                 }
802
803                 /*
804                  * XXX
805                  * For resize at runtime, we should be writing the new
806                  * superblock before inserting into the journal array
807                  */
808
809                 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
810                 __array_insert_item(ja->buckets,                ja->nr, pos);
811                 __array_insert_item(ja->bucket_seq,             ja->nr, pos);
812                 __array_insert_item(journal_buckets->buckets,   ja->nr, pos);
813                 ja->nr++;
814
815                 ja->buckets[pos] = bucket;
816                 ja->bucket_seq[pos] = 0;
817                 journal_buckets->buckets[pos] = cpu_to_le64(bucket);
818
819                 if (pos <= ja->discard_idx)
820                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
821                 if (pos <= ja->dirty_idx_ondisk)
822                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
823                 if (pos <= ja->dirty_idx)
824                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
825                 if (pos <= ja->cur_idx)
826                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
827
828                 if (!c || new_fs)
829                         bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal,
830                                                   ca->mi.bucket_size,
831                                                   gc_phase(GC_PHASE_SB),
832                                                   0);
833
834                 if (c) {
835                         spin_unlock(&c->journal.lock);
836                         percpu_up_read(&c->mark_lock);
837                 }
838
839                 if (c && !new_fs)
840                         ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
841                                 bch2_trans_mark_metadata_bucket(&trans, NULL, ca,
842                                                 bucket, BCH_DATA_journal,
843                                                 ca->mi.bucket_size));
844
845                 if (!new_fs)
846                         bch2_open_bucket_put(c, ob);
847
848                 if (ret)
849                         goto err;
850         }
851 err:
852         bch2_sb_resize_journal(&ca->disk_sb,
853                 ja->nr + sizeof(*journal_buckets) / sizeof(u64));
854         kfree(new_bucket_seq);
855         kfree(new_buckets);
856
857         return ret;
858 }
859
860 /*
861  * Allocate more journal space at runtime - not currently making use if it, but
862  * the code works:
863  */
864 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
865                                 unsigned nr)
866 {
867         struct journal_device *ja = &ca->journal;
868         struct closure cl;
869         unsigned current_nr;
870         int ret;
871
872         closure_init_stack(&cl);
873
874         do {
875                 struct disk_reservation disk_res = { 0, 0 };
876
877                 closure_sync(&cl);
878
879                 mutex_lock(&c->sb_lock);
880                 current_nr = ja->nr;
881
882                 /*
883                  * note: journal buckets aren't really counted as _sectors_ used yet, so
884                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
885                  * when space used goes up without a reservation - but we do need the
886                  * reservation to ensure we'll actually be able to allocate:
887                  */
888
889                 if (bch2_disk_reservation_get(c, &disk_res,
890                                               bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
891                         mutex_unlock(&c->sb_lock);
892                         return -ENOSPC;
893                 }
894
895                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
896
897                 bch2_disk_reservation_put(c, &disk_res);
898
899                 if (ja->nr != current_nr)
900                         bch2_write_super(c);
901                 mutex_unlock(&c->sb_lock);
902         } while (ret == -EAGAIN);
903
904         return ret;
905 }
906
907 int bch2_dev_journal_alloc(struct bch_dev *ca)
908 {
909         unsigned nr;
910
911         if (dynamic_fault("bcachefs:add:journal_alloc"))
912                 return -ENOMEM;
913
914         /*
915          * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
916          * is smaller:
917          */
918         nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
919                      BCH_JOURNAL_BUCKETS_MIN,
920                      min(1 << 10,
921                          (1 << 20) / ca->mi.bucket_size));
922
923         return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
924 }
925
926 /* startup/shutdown: */
927
928 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
929 {
930         union journal_res_state state;
931         bool ret = false;
932         unsigned i;
933
934         spin_lock(&j->lock);
935         state = READ_ONCE(j->reservations);
936         i = state.idx;
937
938         while (i != state.unwritten_idx) {
939                 i = (i - 1) & JOURNAL_BUF_MASK;
940                 if (bch2_bkey_has_device(bkey_i_to_s_c(&j->buf[i].key), dev_idx))
941                         ret = true;
942         }
943         spin_unlock(&j->lock);
944
945         return ret;
946 }
947
948 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
949 {
950         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
951 }
952
953 void bch2_fs_journal_stop(struct journal *j)
954 {
955         bch2_journal_flush_all_pins(j);
956
957         wait_event(j->wait, journal_entry_close(j));
958
959         /*
960          * Always write a new journal entry, to make sure the clock hands are up
961          * to date (and match the superblock)
962          */
963         bch2_journal_meta(j);
964
965         journal_quiesce(j);
966
967         BUG_ON(!bch2_journal_error(j) &&
968                test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
969                (journal_entry_is_open(j) ||
970                 j->last_empty_seq + 1 != journal_cur_seq(j)));
971
972         cancel_delayed_work_sync(&j->write_work);
973         bch2_journal_reclaim_stop(j);
974 }
975
976 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
977                           struct list_head *journal_entries)
978 {
979         struct bch_fs *c = container_of(j, struct bch_fs, journal);
980         struct journal_entry_pin_list *p;
981         struct journal_replay *i;
982         u64 last_seq = cur_seq, nr, seq;
983
984         if (!list_empty(journal_entries))
985                 last_seq = le64_to_cpu(list_last_entry(journal_entries,
986                                 struct journal_replay, list)->j.last_seq);
987
988         nr = cur_seq - last_seq;
989
990         if (nr + 1 > j->pin.size) {
991                 free_fifo(&j->pin);
992                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
993                 if (!j->pin.data) {
994                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
995                         return -ENOMEM;
996                 }
997         }
998
999         j->replay_journal_seq   = last_seq;
1000         j->replay_journal_seq_end = cur_seq;
1001         j->last_seq_ondisk      = last_seq;
1002         j->pin.front            = last_seq;
1003         j->pin.back             = cur_seq;
1004         atomic64_set(&j->seq, cur_seq - 1);
1005
1006         fifo_for_each_entry_ptr(p, &j->pin, seq) {
1007                 INIT_LIST_HEAD(&p->list);
1008                 INIT_LIST_HEAD(&p->flushed);
1009                 atomic_set(&p->count, 1);
1010                 p->devs.nr = 0;
1011         }
1012
1013         list_for_each_entry(i, journal_entries, list) {
1014                 unsigned ptr;
1015
1016                 seq = le64_to_cpu(i->j.seq);
1017                 BUG_ON(seq >= cur_seq);
1018
1019                 if (seq < last_seq)
1020                         continue;
1021
1022                 p = journal_seq_pin(j, seq);
1023
1024                 p->devs.nr = 0;
1025                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1026                         bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1027         }
1028
1029         spin_lock(&j->lock);
1030
1031         set_bit(JOURNAL_STARTED, &j->flags);
1032         j->last_flush_write = jiffies;
1033
1034         journal_pin_new_entry(j, 1);
1035
1036         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1037
1038         bch2_journal_buf_init(j);
1039
1040         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1041
1042         bch2_journal_space_available(j);
1043         spin_unlock(&j->lock);
1044
1045         return 0;
1046 }
1047
1048 /* init/exit: */
1049
1050 void bch2_dev_journal_exit(struct bch_dev *ca)
1051 {
1052         kfree(ca->journal.bio);
1053         kfree(ca->journal.buckets);
1054         kfree(ca->journal.bucket_seq);
1055
1056         ca->journal.bio         = NULL;
1057         ca->journal.buckets     = NULL;
1058         ca->journal.bucket_seq  = NULL;
1059 }
1060
1061 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1062 {
1063         struct journal_device *ja = &ca->journal;
1064         struct bch_sb_field_journal *journal_buckets =
1065                 bch2_sb_get_journal(sb);
1066         unsigned i;
1067
1068         ja->nr = bch2_nr_journal_buckets(journal_buckets);
1069
1070         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1071         if (!ja->bucket_seq)
1072                 return -ENOMEM;
1073
1074         ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1075                         DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1076         if (!ca->journal.bio)
1077                 return -ENOMEM;
1078
1079         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1080         if (!ja->buckets)
1081                 return -ENOMEM;
1082
1083         for (i = 0; i < ja->nr; i++)
1084                 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1085
1086         return 0;
1087 }
1088
1089 void bch2_fs_journal_exit(struct journal *j)
1090 {
1091         unsigned i;
1092
1093         for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1094                 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1095         free_fifo(&j->pin);
1096 }
1097
1098 int bch2_fs_journal_init(struct journal *j)
1099 {
1100         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1101         static struct lock_class_key res_key;
1102         unsigned i;
1103         int ret = 0;
1104
1105         pr_verbose_init(c->opts, "");
1106
1107         spin_lock_init(&j->lock);
1108         spin_lock_init(&j->err_lock);
1109         init_waitqueue_head(&j->wait);
1110         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1111         init_waitqueue_head(&j->pin_flush_wait);
1112         mutex_init(&j->reclaim_lock);
1113         mutex_init(&j->discard_lock);
1114
1115         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1116
1117         j->write_delay_ms       = 1000;
1118         j->reclaim_delay_ms     = 100;
1119
1120         /* Btree roots: */
1121         j->entry_u64s_reserved +=
1122                 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX);
1123
1124         j->entry_u64s_reserved +=
1125                 2 * (sizeof(struct jset_entry_clock) / sizeof(u64));
1126
1127         atomic64_set(&j->reservations.counter,
1128                 ((union journal_res_state)
1129                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1130
1131         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1132                 ret = -ENOMEM;
1133                 goto out;
1134         }
1135
1136         for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1137                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1138                 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1139                 if (!j->buf[i].data) {
1140                         ret = -ENOMEM;
1141                         goto out;
1142                 }
1143         }
1144
1145         j->pin.front = j->pin.back = 1;
1146 out:
1147         pr_verbose_init(c->opts, "ret %i", ret);
1148         return ret;
1149 }
1150
1151 /* debug: */
1152
1153 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1154 {
1155         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1156         union journal_res_state s;
1157         struct bch_dev *ca;
1158         unsigned i;
1159
1160         rcu_read_lock();
1161         s = READ_ONCE(j->reservations);
1162
1163         pr_buf(out,
1164                "active journal entries:\t%llu\n"
1165                "seq:\t\t\t%llu\n"
1166                "last_seq:\t\t%llu\n"
1167                "last_seq_ondisk:\t%llu\n"
1168                "prereserved:\t\t%u/%u\n"
1169                "nr flush writes:\t%llu\n"
1170                "nr noflush writes:\t%llu\n"
1171                "nr direct reclaim:\t%llu\n"
1172                "nr background reclaim:\t%llu\n"
1173                "current entry sectors:\t%u\n"
1174                "current entry error:\t%u\n"
1175                "current entry:\t\t",
1176                fifo_used(&j->pin),
1177                journal_cur_seq(j),
1178                journal_last_seq(j),
1179                j->last_seq_ondisk,
1180                j->prereserved.reserved,
1181                j->prereserved.remaining,
1182                j->nr_flush_writes,
1183                j->nr_noflush_writes,
1184                j->nr_direct_reclaim,
1185                j->nr_background_reclaim,
1186                j->cur_entry_sectors,
1187                j->cur_entry_error);
1188
1189         switch (s.cur_entry_offset) {
1190         case JOURNAL_ENTRY_ERROR_VAL:
1191                 pr_buf(out, "error\n");
1192                 break;
1193         case JOURNAL_ENTRY_CLOSED_VAL:
1194                 pr_buf(out, "closed\n");
1195                 break;
1196         default:
1197                 pr_buf(out, "%u/%u\n",
1198                        s.cur_entry_offset,
1199                        j->cur_entry_u64s);
1200                 break;
1201         }
1202
1203         pr_buf(out,
1204                "current entry:\t\tidx %u refcount %u\n",
1205                s.idx, journal_state_count(s, s.idx));
1206
1207         i = s.idx;
1208         while (i != s.unwritten_idx) {
1209                 i = (i - 1) & JOURNAL_BUF_MASK;
1210
1211                 pr_buf(out, "unwritten entry:\tidx %u refcount %u sectors %u\n",
1212                        i, journal_state_count(s, i), j->buf[i].sectors);
1213         }
1214
1215         pr_buf(out,
1216                "need write:\t\t%i\n"
1217                "replay done:\t\t%i\n",
1218                test_bit(JOURNAL_NEED_WRITE,     &j->flags),
1219                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1220
1221         pr_buf(out, "space:\n");
1222         pr_buf(out, "\tdiscarded\t%u:%u\n",
1223                j->space[journal_space_discarded].next_entry,
1224                j->space[journal_space_discarded].total);
1225         pr_buf(out, "\tclean ondisk\t%u:%u\n",
1226                j->space[journal_space_clean_ondisk].next_entry,
1227                j->space[journal_space_clean_ondisk].total);
1228         pr_buf(out, "\tclean\t\t%u:%u\n",
1229                j->space[journal_space_clean].next_entry,
1230                j->space[journal_space_clean].total);
1231         pr_buf(out, "\ttotal\t\t%u:%u\n",
1232                j->space[journal_space_total].next_entry,
1233                j->space[journal_space_total].total);
1234
1235         for_each_member_device_rcu(ca, c, i,
1236                                    &c->rw_devs[BCH_DATA_journal]) {
1237                 struct journal_device *ja = &ca->journal;
1238
1239                 if (!ja->nr)
1240                         continue;
1241
1242                 pr_buf(out,
1243                        "dev %u:\n"
1244                        "\tnr\t\t%u\n"
1245                        "\tbucket size\t%u\n"
1246                        "\tavailable\t%u:%u\n"
1247                        "\tdiscard_idx\t%u\n"
1248                        "\tdirty_ondisk\t%u (seq %llu)\n"
1249                        "\tdirty_idx\t%u (seq %llu)\n"
1250                        "\tcur_idx\t\t%u (seq %llu)\n",
1251                        i, ja->nr, ca->mi.bucket_size,
1252                        bch2_journal_dev_buckets_available(j, ja, journal_space_discarded),
1253                        ja->sectors_free,
1254                        ja->discard_idx,
1255                        ja->dirty_idx_ondisk,    ja->bucket_seq[ja->dirty_idx_ondisk],
1256                        ja->dirty_idx,           ja->bucket_seq[ja->dirty_idx],
1257                        ja->cur_idx,             ja->bucket_seq[ja->cur_idx]);
1258         }
1259
1260         rcu_read_unlock();
1261 }
1262
1263 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1264 {
1265         spin_lock(&j->lock);
1266         __bch2_journal_debug_to_text(out, j);
1267         spin_unlock(&j->lock);
1268 }
1269
1270 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1271 {
1272         struct journal_entry_pin_list *pin_list;
1273         struct journal_entry_pin *pin;
1274         u64 i;
1275
1276         spin_lock(&j->lock);
1277         fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1278                 pr_buf(out, "%llu: count %u\n",
1279                        i, atomic_read(&pin_list->count));
1280
1281                 list_for_each_entry(pin, &pin_list->list, list)
1282                         pr_buf(out, "\t%px %ps\n",
1283                                pin, pin->flush);
1284
1285                 if (!list_empty(&pin_list->flushed))
1286                         pr_buf(out, "flushed:\n");
1287
1288                 list_for_each_entry(pin, &pin_list->flushed, list)
1289                         pr_buf(out, "\t%px %ps\n",
1290                                pin, pin->flush);
1291         }
1292         spin_unlock(&j->lock);
1293 }