]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
New upstream snapshot
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "buckets.h"
14 #include "error.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "super-io.h"
20
21 #include <trace/events/bcachefs.h>
22
23 static u64 last_unwritten_seq(struct journal *j)
24 {
25         union journal_res_state s = READ_ONCE(j->reservations);
26
27         lockdep_assert_held(&j->lock);
28
29         return journal_cur_seq(j) - ((s.idx - s.unwritten_idx) & JOURNAL_BUF_MASK);
30 }
31
32 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
33 {
34         return seq >= last_unwritten_seq(j);
35 }
36
37 static bool __journal_entry_is_open(union journal_res_state state)
38 {
39         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
40 }
41
42 static bool journal_entry_is_open(struct journal *j)
43 {
44         return __journal_entry_is_open(j->reservations);
45 }
46
47 static inline struct journal_buf *
48 journal_seq_to_buf(struct journal *j, u64 seq)
49 {
50         struct journal_buf *buf = NULL;
51
52         EBUG_ON(seq > journal_cur_seq(j));
53         EBUG_ON(seq == journal_cur_seq(j) &&
54                 j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
55
56         if (journal_seq_unwritten(j, seq)) {
57                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
58                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
59         }
60         return buf;
61 }
62
63 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
64 {
65         INIT_LIST_HEAD(&p->list);
66         INIT_LIST_HEAD(&p->key_cache_list);
67         INIT_LIST_HEAD(&p->flushed);
68         atomic_set(&p->count, count);
69         p->devs.nr = 0;
70 }
71
72 static void journal_pin_new_entry(struct journal *j)
73 {
74         /*
75          * The fifo_push() needs to happen at the same time as j->seq is
76          * incremented for journal_last_seq() to be calculated correctly
77          */
78         atomic64_inc(&j->seq);
79         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
80 }
81
82 static void bch2_journal_buf_init(struct journal *j)
83 {
84         struct journal_buf *buf = journal_cur_buf(j);
85
86         bkey_extent_init(&buf->key);
87         buf->noflush    = false;
88         buf->must_flush = false;
89         buf->separate_flush = false;
90
91         memset(buf->has_inode, 0, sizeof(buf->has_inode));
92
93         memset(buf->data, 0, sizeof(*buf->data));
94         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
95         buf->data->u64s = 0;
96 }
97
98 void bch2_journal_halt(struct journal *j)
99 {
100         union journal_res_state old, new;
101         u64 v = atomic64_read(&j->reservations.counter);
102
103         do {
104                 old.v = new.v = v;
105                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
106                         return;
107
108                 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
109         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
110                                        old.v, new.v)) != old.v);
111
112         j->err_seq = journal_cur_seq(j);
113         journal_wake(j);
114         closure_wake_up(&journal_cur_buf(j)->wait);
115 }
116
117 /* journal entry close/open: */
118
119 void __bch2_journal_buf_put(struct journal *j)
120 {
121         closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
122 }
123
124 /*
125  * Returns true if journal entry is now closed:
126  *
127  * We don't close a journal_buf until the next journal_buf is finished writing,
128  * and can be opened again - this also initializes the next journal_buf:
129  */
130 static bool __journal_entry_close(struct journal *j)
131 {
132         struct bch_fs *c = container_of(j, struct bch_fs, journal);
133         struct journal_buf *buf = journal_cur_buf(j);
134         union journal_res_state old, new;
135         u64 v = atomic64_read(&j->reservations.counter);
136         unsigned sectors;
137
138         lockdep_assert_held(&j->lock);
139
140         do {
141                 old.v = new.v = v;
142                 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
143                         return true;
144
145                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
146                         /* this entry will never be written: */
147                         closure_wake_up(&buf->wait);
148                         return true;
149                 }
150
151                 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
152                         set_bit(JOURNAL_NEED_WRITE, &j->flags);
153                         j->need_write_time = local_clock();
154                 }
155
156                 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
157                 new.idx++;
158
159                 if (new.idx == new.unwritten_idx)
160                         return false;
161
162                 BUG_ON(journal_state_count(new, new.idx));
163         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
164                                        old.v, new.v)) != old.v);
165
166         /* Close out old buffer: */
167         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
168
169         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
170                                       buf->u64s_reserved) << c->block_bits;
171         BUG_ON(sectors > buf->sectors);
172         buf->sectors = sectors;
173
174         /*
175          * We have to set last_seq here, _before_ opening a new journal entry:
176          *
177          * A threads may replace an old pin with a new pin on their current
178          * journal reservation - the expectation being that the journal will
179          * contain either what the old pin protected or what the new pin
180          * protects.
181          *
182          * After the old pin is dropped journal_last_seq() won't include the old
183          * pin, so we can only write the updated last_seq on the entry that
184          * contains whatever the new pin protects.
185          *
186          * Restated, we can _not_ update last_seq for a given entry if there
187          * could be a newer entry open with reservations/pins that have been
188          * taken against it.
189          *
190          * Hence, we want update/set last_seq on the current journal entry right
191          * before we open a new one:
192          */
193         buf->data->last_seq     = cpu_to_le64(journal_last_seq(j));
194
195         __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
196
197         /* Initialize new buffer: */
198         journal_pin_new_entry(j);
199
200         bch2_journal_buf_init(j);
201
202         cancel_delayed_work(&j->write_work);
203         clear_bit(JOURNAL_NEED_WRITE, &j->flags);
204
205         bch2_journal_space_available(j);
206
207         bch2_journal_buf_put(j, old.idx);
208         return true;
209 }
210
211 static bool journal_entry_want_write(struct journal *j)
212 {
213         union journal_res_state s = READ_ONCE(j->reservations);
214         bool ret = false;
215
216         /*
217          * Don't close it yet if we already have a write in flight, but do set
218          * NEED_WRITE:
219          */
220         if (s.idx != s.unwritten_idx)
221                 set_bit(JOURNAL_NEED_WRITE, &j->flags);
222         else
223                 ret = __journal_entry_close(j);
224
225         return ret;
226 }
227
228 static bool journal_entry_close(struct journal *j)
229 {
230         bool ret;
231
232         spin_lock(&j->lock);
233         ret = journal_entry_want_write(j);
234         spin_unlock(&j->lock);
235
236         return ret;
237 }
238
239 /*
240  * should _only_ called from journal_res_get() - when we actually want a
241  * journal reservation - journal entry is open means journal is dirty:
242  *
243  * returns:
244  * 0:           success
245  * -ENOSPC:     journal currently full, must invoke reclaim
246  * -EAGAIN:     journal blocked, must wait
247  * -EROFS:      insufficient rw devices or journal error
248  */
249 static int journal_entry_open(struct journal *j)
250 {
251         struct bch_fs *c = container_of(j, struct bch_fs, journal);
252         struct journal_buf *buf = journal_cur_buf(j);
253         union journal_res_state old, new;
254         int u64s;
255         u64 v;
256
257         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
258
259         lockdep_assert_held(&j->lock);
260         BUG_ON(journal_entry_is_open(j));
261
262         if (j->blocked)
263                 return cur_entry_blocked;
264
265         if (j->cur_entry_error)
266                 return j->cur_entry_error;
267
268         BUG_ON(!j->cur_entry_sectors);
269
270         buf->u64s_reserved      = j->entry_u64s_reserved;
271         buf->disk_sectors       = j->cur_entry_sectors;
272         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
273
274         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
275                 journal_entry_overhead(j);
276         u64s  = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
277
278         if (u64s <= le32_to_cpu(buf->data->u64s))
279                 return cur_entry_journal_full;
280
281         /*
282          * Must be set before marking the journal entry as open:
283          */
284         j->cur_entry_u64s = u64s;
285
286         v = atomic64_read(&j->reservations.counter);
287         do {
288                 old.v = new.v = v;
289
290                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
291                         return cur_entry_insufficient_devices;
292
293                 /* Handle any already added entries */
294                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
295
296                 EBUG_ON(journal_state_count(new, new.idx));
297                 journal_state_inc(&new);
298         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
299                                        old.v, new.v)) != old.v);
300
301         if (j->res_get_blocked_start)
302                 bch2_time_stats_update(j->blocked_time,
303                                        j->res_get_blocked_start);
304         j->res_get_blocked_start = 0;
305
306         mod_delayed_work(system_freezable_wq,
307                          &j->write_work,
308                          msecs_to_jiffies(j->write_delay_ms));
309         journal_wake(j);
310         return 0;
311 }
312
313 static bool journal_quiesced(struct journal *j)
314 {
315         union journal_res_state s = READ_ONCE(j->reservations);
316         bool ret = s.idx == s.unwritten_idx && !__journal_entry_is_open(s);
317
318         if (!ret)
319                 journal_entry_close(j);
320         return ret;
321 }
322
323 static void journal_quiesce(struct journal *j)
324 {
325         wait_event(j->wait, journal_quiesced(j));
326 }
327
328 static void journal_write_work(struct work_struct *work)
329 {
330         struct journal *j = container_of(work, struct journal, write_work.work);
331
332         journal_entry_close(j);
333 }
334
335 /*
336  * Given an inode number, if that inode number has data in the journal that
337  * hasn't yet been flushed, return the journal sequence number that needs to be
338  * flushed:
339  */
340 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
341 {
342         size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
343         union journal_res_state s;
344         unsigned i;
345         u64 seq;
346
347
348         spin_lock(&j->lock);
349         seq = journal_cur_seq(j);
350         s = READ_ONCE(j->reservations);
351         i = s.idx;
352
353         while (1) {
354                 if (test_bit(h, j->buf[i].has_inode))
355                         goto out;
356
357                 if (i == s.unwritten_idx)
358                         break;
359
360                 i = (i - 1) & JOURNAL_BUF_MASK;
361                 seq--;
362         }
363
364         seq = 0;
365 out:
366         spin_unlock(&j->lock);
367
368         return seq;
369 }
370
371 void bch2_journal_set_has_inum(struct journal *j, u64 inode, u64 seq)
372 {
373         size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
374         struct journal_buf *buf;
375
376         spin_lock(&j->lock);
377
378         if ((buf = journal_seq_to_buf(j, seq)))
379                 set_bit(h, buf->has_inode);
380
381         spin_unlock(&j->lock);
382 }
383
384 static int __journal_res_get(struct journal *j, struct journal_res *res,
385                              unsigned flags)
386 {
387         struct bch_fs *c = container_of(j, struct bch_fs, journal);
388         struct journal_buf *buf;
389         bool can_discard;
390         int ret;
391 retry:
392         if (journal_res_get_fast(j, res, flags))
393                 return 0;
394
395         if (bch2_journal_error(j))
396                 return -EROFS;
397
398         spin_lock(&j->lock);
399
400         /*
401          * Recheck after taking the lock, so we don't race with another thread
402          * that just did journal_entry_open() and call journal_entry_close()
403          * unnecessarily
404          */
405         if (journal_res_get_fast(j, res, flags)) {
406                 spin_unlock(&j->lock);
407                 return 0;
408         }
409
410         if (!(flags & JOURNAL_RES_GET_RESERVED) &&
411             !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
412                 /*
413                  * Don't want to close current journal entry, just need to
414                  * invoke reclaim:
415                  */
416                 ret = cur_entry_journal_full;
417                 goto unlock;
418         }
419
420         /*
421          * If we couldn't get a reservation because the current buf filled up,
422          * and we had room for a bigger entry on disk, signal that we want to
423          * realloc the journal bufs:
424          */
425         buf = journal_cur_buf(j);
426         if (journal_entry_is_open(j) &&
427             buf->buf_size >> 9 < buf->disk_sectors &&
428             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
429                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
430
431         if (journal_entry_is_open(j) &&
432             !__journal_entry_close(j)) {
433                 /*
434                  * We failed to get a reservation on the current open journal
435                  * entry because it's full, and we can't close it because
436                  * there's still a previous one in flight:
437                  */
438                 trace_journal_entry_full(c);
439                 ret = cur_entry_blocked;
440         } else {
441                 ret = journal_entry_open(j);
442         }
443 unlock:
444         if ((ret && ret != cur_entry_insufficient_devices) &&
445             !j->res_get_blocked_start) {
446                 j->res_get_blocked_start = local_clock() ?: 1;
447                 trace_journal_full(c);
448         }
449
450         can_discard = j->can_discard;
451         spin_unlock(&j->lock);
452
453         if (!ret)
454                 goto retry;
455
456         if ((ret == cur_entry_journal_full ||
457              ret == cur_entry_journal_pin_full) &&
458             !can_discard &&
459             j->reservations.idx == j->reservations.unwritten_idx &&
460             (flags & JOURNAL_RES_GET_RESERVED)) {
461                 char *journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
462
463                 bch_err(c, "Journal stuck!");
464                 if (journal_debug_buf) {
465                         bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
466                         bch_err(c, "%s", journal_debug_buf);
467
468                         bch2_journal_pins_to_text(&_PBUF(journal_debug_buf, 4096), j);
469                         bch_err(c, "Journal pins:\n%s", journal_debug_buf);
470                         kfree(journal_debug_buf);
471                 }
472
473                 bch2_fatal_error(c);
474                 dump_stack();
475         }
476
477         /*
478          * Journal is full - can't rely on reclaim from work item due to
479          * freezing:
480          */
481         if ((ret == cur_entry_journal_full ||
482              ret == cur_entry_journal_pin_full) &&
483             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
484                 if (can_discard) {
485                         bch2_journal_do_discards(j);
486                         goto retry;
487                 }
488
489                 if (mutex_trylock(&j->reclaim_lock)) {
490                         bch2_journal_reclaim(j);
491                         mutex_unlock(&j->reclaim_lock);
492                 }
493         }
494
495         return ret == cur_entry_insufficient_devices ? -EROFS : -EAGAIN;
496 }
497
498 /*
499  * Essentially the entry function to the journaling code. When bcachefs is doing
500  * a btree insert, it calls this function to get the current journal write.
501  * Journal write is the structure used set up journal writes. The calling
502  * function will then add its keys to the structure, queuing them for the next
503  * write.
504  *
505  * To ensure forward progress, the current task must not be holding any
506  * btree node write locks.
507  */
508 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
509                                   unsigned flags)
510 {
511         int ret;
512
513         closure_wait_event(&j->async_wait,
514                    (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
515                    (flags & JOURNAL_RES_GET_NONBLOCK));
516         return ret;
517 }
518
519 /* journal_preres: */
520
521 static bool journal_preres_available(struct journal *j,
522                                      struct journal_preres *res,
523                                      unsigned new_u64s,
524                                      unsigned flags)
525 {
526         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
527
528         if (!ret && mutex_trylock(&j->reclaim_lock)) {
529                 bch2_journal_reclaim(j);
530                 mutex_unlock(&j->reclaim_lock);
531         }
532
533         return ret;
534 }
535
536 int __bch2_journal_preres_get(struct journal *j,
537                               struct journal_preres *res,
538                               unsigned new_u64s,
539                               unsigned flags)
540 {
541         int ret;
542
543         closure_wait_event(&j->preres_wait,
544                    (ret = bch2_journal_error(j)) ||
545                    journal_preres_available(j, res, new_u64s, flags));
546         return ret;
547 }
548
549 /* journal_entry_res: */
550
551 void bch2_journal_entry_res_resize(struct journal *j,
552                                    struct journal_entry_res *res,
553                                    unsigned new_u64s)
554 {
555         union journal_res_state state;
556         int d = new_u64s - res->u64s;
557
558         spin_lock(&j->lock);
559
560         j->entry_u64s_reserved += d;
561         if (d <= 0)
562                 goto out;
563
564         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
565         smp_mb();
566         state = READ_ONCE(j->reservations);
567
568         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
569             state.cur_entry_offset > j->cur_entry_u64s) {
570                 j->cur_entry_u64s += d;
571                 /*
572                  * Not enough room in current journal entry, have to flush it:
573                  */
574                 __journal_entry_close(j);
575         } else {
576                 journal_cur_buf(j)->u64s_reserved += d;
577         }
578 out:
579         spin_unlock(&j->lock);
580         res->u64s += d;
581 }
582
583 /* journal flushing: */
584
585 /**
586  * bch2_journal_flush_seq_async - wait for a journal entry to be written
587  *
588  * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
589  * necessary
590  */
591 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
592                                  struct closure *parent)
593 {
594         struct journal_buf *buf;
595         int ret = 0;
596
597         if (seq <= j->flushed_seq_ondisk)
598                 return 1;
599
600         spin_lock(&j->lock);
601
602         BUG_ON(seq > journal_cur_seq(j));
603
604         /* Recheck under lock: */
605         if (j->err_seq && seq >= j->err_seq) {
606                 ret = -EIO;
607                 goto out;
608         }
609
610         if (seq <= j->flushed_seq_ondisk) {
611                 ret = 1;
612                 goto out;
613         }
614
615         /* if seq was written, but not flushed - flush a newer one instead */
616         seq = max(seq, last_unwritten_seq(j));
617
618 recheck_need_open:
619         if (seq == journal_cur_seq(j) && !journal_entry_is_open(j)) {
620                 struct journal_res res = { 0 };
621
622                 spin_unlock(&j->lock);
623
624                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
625                 if (ret)
626                         return ret;
627
628                 seq = res.seq;
629                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
630                 buf->must_flush = true;
631                 set_bit(JOURNAL_NEED_WRITE, &j->flags);
632
633                 if (parent && !closure_wait(&buf->wait, parent))
634                         BUG();
635
636                 bch2_journal_res_put(j, &res);
637
638                 spin_lock(&j->lock);
639                 goto want_write;
640         }
641
642         /*
643          * if write was kicked off without a flush, flush the next sequence
644          * number instead
645          */
646         buf = journal_seq_to_buf(j, seq);
647         if (buf->noflush) {
648                 seq++;
649                 goto recheck_need_open;
650         }
651
652         buf->must_flush = true;
653
654         if (parent && !closure_wait(&buf->wait, parent))
655                 BUG();
656 want_write:
657         if (seq == journal_cur_seq(j))
658                 journal_entry_want_write(j);
659 out:
660         spin_unlock(&j->lock);
661         return ret;
662 }
663
664 int bch2_journal_flush_seq(struct journal *j, u64 seq)
665 {
666         u64 start_time = local_clock();
667         int ret, ret2;
668
669         ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
670
671         if (!ret)
672                 bch2_time_stats_update(j->flush_seq_time, start_time);
673
674         return ret ?: ret2 < 0 ? ret2 : 0;
675 }
676
677 int bch2_journal_meta(struct journal *j)
678 {
679         struct journal_res res;
680         int ret;
681
682         memset(&res, 0, sizeof(res));
683
684         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
685         if (ret)
686                 return ret;
687
688         bch2_journal_res_put(j, &res);
689
690         return bch2_journal_flush_seq(j, res.seq);
691 }
692
693 /*
694  * bch2_journal_flush_async - if there is an open journal entry, or a journal
695  * still being written, write it and wait for the write to complete
696  */
697 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
698 {
699         u64 seq, journal_seq;
700
701         spin_lock(&j->lock);
702         journal_seq = journal_cur_seq(j);
703
704         if (journal_entry_is_open(j)) {
705                 seq = journal_seq;
706         } else if (journal_seq) {
707                 seq = journal_seq - 1;
708         } else {
709                 spin_unlock(&j->lock);
710                 return;
711         }
712         spin_unlock(&j->lock);
713
714         bch2_journal_flush_seq_async(j, seq, parent);
715 }
716
717 int bch2_journal_flush(struct journal *j)
718 {
719         u64 seq, journal_seq;
720
721         spin_lock(&j->lock);
722         journal_seq = journal_cur_seq(j);
723
724         if (journal_entry_is_open(j)) {
725                 seq = journal_seq;
726         } else if (journal_seq) {
727                 seq = journal_seq - 1;
728         } else {
729                 spin_unlock(&j->lock);
730                 return 0;
731         }
732         spin_unlock(&j->lock);
733
734         return bch2_journal_flush_seq(j, seq);
735 }
736
737 /* block/unlock the journal: */
738
739 void bch2_journal_unblock(struct journal *j)
740 {
741         spin_lock(&j->lock);
742         j->blocked--;
743         spin_unlock(&j->lock);
744
745         journal_wake(j);
746 }
747
748 void bch2_journal_block(struct journal *j)
749 {
750         spin_lock(&j->lock);
751         j->blocked++;
752         spin_unlock(&j->lock);
753
754         journal_quiesce(j);
755 }
756
757 /* allocate journal on a device: */
758
759 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
760                                          bool new_fs, struct closure *cl)
761 {
762         struct bch_fs *c = ca->fs;
763         struct journal_device *ja = &ca->journal;
764         struct bch_sb_field_journal *journal_buckets;
765         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
766         int ret = 0;
767
768         /* don't handle reducing nr of buckets yet: */
769         if (nr <= ja->nr)
770                 return 0;
771
772         new_buckets     = kzalloc(nr * sizeof(u64), GFP_KERNEL);
773         new_bucket_seq  = kzalloc(nr * sizeof(u64), GFP_KERNEL);
774         if (!new_buckets || !new_bucket_seq) {
775                 ret = -ENOMEM;
776                 goto err;
777         }
778
779         journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
780                                         nr + sizeof(*journal_buckets) / sizeof(u64));
781         if (!journal_buckets) {
782                 ret = -ENOSPC;
783                 goto err;
784         }
785
786         /*
787          * We may be called from the device add path, before the new device has
788          * actually been added to the running filesystem:
789          */
790         if (c)
791                 spin_lock(&c->journal.lock);
792
793         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
794         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
795         swap(new_buckets,       ja->buckets);
796         swap(new_bucket_seq,    ja->bucket_seq);
797
798         if (c)
799                 spin_unlock(&c->journal.lock);
800
801         while (ja->nr < nr) {
802                 struct open_bucket *ob = NULL;
803                 unsigned pos;
804                 long bucket;
805
806                 if (new_fs) {
807                         bucket = bch2_bucket_alloc_new_fs(ca);
808                         if (bucket < 0) {
809                                 ret = -ENOSPC;
810                                 goto err;
811                         }
812                 } else {
813                         rcu_read_lock();
814                         ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
815                                                false, cl);
816                         rcu_read_unlock();
817                         if (IS_ERR(ob)) {
818                                 ret = cl ? -EAGAIN : -ENOSPC;
819                                 goto err;
820                         }
821
822                         bucket = sector_to_bucket(ca, ob->ptr.offset);
823                 }
824
825                 if (c) {
826                         percpu_down_read(&c->mark_lock);
827                         spin_lock(&c->journal.lock);
828                 }
829
830                 /*
831                  * XXX
832                  * For resize at runtime, we should be writing the new
833                  * superblock before inserting into the journal array
834                  */
835
836                 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
837                 __array_insert_item(ja->buckets,                ja->nr, pos);
838                 __array_insert_item(ja->bucket_seq,             ja->nr, pos);
839                 __array_insert_item(journal_buckets->buckets,   ja->nr, pos);
840                 ja->nr++;
841
842                 ja->buckets[pos] = bucket;
843                 ja->bucket_seq[pos] = 0;
844                 journal_buckets->buckets[pos] = cpu_to_le64(bucket);
845
846                 if (pos <= ja->discard_idx)
847                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
848                 if (pos <= ja->dirty_idx_ondisk)
849                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
850                 if (pos <= ja->dirty_idx)
851                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
852                 if (pos <= ja->cur_idx)
853                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
854
855                 if (!c || new_fs)
856                         bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal,
857                                                   ca->mi.bucket_size,
858                                                   gc_phase(GC_PHASE_SB),
859                                                   0);
860
861                 if (c) {
862                         spin_unlock(&c->journal.lock);
863                         percpu_up_read(&c->mark_lock);
864                 }
865
866                 if (c && !new_fs)
867                         ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
868                                 bch2_trans_mark_metadata_bucket(&trans, NULL, ca,
869                                                 bucket, BCH_DATA_journal,
870                                                 ca->mi.bucket_size));
871
872                 if (!new_fs)
873                         bch2_open_bucket_put(c, ob);
874
875                 if (ret)
876                         goto err;
877         }
878 err:
879         bch2_sb_resize_journal(&ca->disk_sb,
880                 ja->nr + sizeof(*journal_buckets) / sizeof(u64));
881         kfree(new_bucket_seq);
882         kfree(new_buckets);
883
884         return ret;
885 }
886
887 /*
888  * Allocate more journal space at runtime - not currently making use if it, but
889  * the code works:
890  */
891 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
892                                 unsigned nr)
893 {
894         struct journal_device *ja = &ca->journal;
895         struct closure cl;
896         unsigned current_nr;
897         int ret;
898
899         closure_init_stack(&cl);
900
901         do {
902                 struct disk_reservation disk_res = { 0, 0 };
903
904                 closure_sync(&cl);
905
906                 mutex_lock(&c->sb_lock);
907                 current_nr = ja->nr;
908
909                 /*
910                  * note: journal buckets aren't really counted as _sectors_ used yet, so
911                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
912                  * when space used goes up without a reservation - but we do need the
913                  * reservation to ensure we'll actually be able to allocate:
914                  */
915
916                 if (bch2_disk_reservation_get(c, &disk_res,
917                                               bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
918                         mutex_unlock(&c->sb_lock);
919                         return -ENOSPC;
920                 }
921
922                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
923
924                 bch2_disk_reservation_put(c, &disk_res);
925
926                 if (ja->nr != current_nr)
927                         bch2_write_super(c);
928                 mutex_unlock(&c->sb_lock);
929         } while (ret == -EAGAIN);
930
931         return ret;
932 }
933
934 int bch2_dev_journal_alloc(struct bch_dev *ca)
935 {
936         unsigned nr;
937
938         if (dynamic_fault("bcachefs:add:journal_alloc"))
939                 return -ENOMEM;
940
941         /* 1/128th of the device by default: */
942         nr = ca->mi.nbuckets >> 7;
943
944         /*
945          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
946          * is smaller:
947          */
948         nr = clamp_t(unsigned, nr,
949                      BCH_JOURNAL_BUCKETS_MIN,
950                      min(1 << 13,
951                          (1 << 24) / ca->mi.bucket_size));
952
953         return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
954 }
955
956 /* startup/shutdown: */
957
958 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
959 {
960         union journal_res_state state;
961         bool ret = false;
962         unsigned i;
963
964         spin_lock(&j->lock);
965         state = READ_ONCE(j->reservations);
966         i = state.idx;
967
968         while (i != state.unwritten_idx) {
969                 i = (i - 1) & JOURNAL_BUF_MASK;
970                 if (bch2_bkey_has_device(bkey_i_to_s_c(&j->buf[i].key), dev_idx))
971                         ret = true;
972         }
973         spin_unlock(&j->lock);
974
975         return ret;
976 }
977
978 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
979 {
980         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
981 }
982
983 void bch2_fs_journal_stop(struct journal *j)
984 {
985         bch2_journal_flush_all_pins(j);
986
987         wait_event(j->wait, journal_entry_close(j));
988
989         /*
990          * Always write a new journal entry, to make sure the clock hands are up
991          * to date (and match the superblock)
992          */
993         bch2_journal_meta(j);
994
995         journal_quiesce(j);
996
997         BUG_ON(!bch2_journal_error(j) &&
998                test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
999                (journal_entry_is_open(j) ||
1000                 j->last_empty_seq + 1 != journal_cur_seq(j)));
1001
1002         cancel_delayed_work_sync(&j->write_work);
1003         bch2_journal_reclaim_stop(j);
1004 }
1005
1006 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
1007                           struct list_head *journal_entries)
1008 {
1009         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1010         struct journal_entry_pin_list *p;
1011         struct journal_replay *i;
1012         u64 last_seq = cur_seq, nr, seq;
1013
1014         if (!list_empty(journal_entries))
1015                 last_seq = le64_to_cpu(list_last_entry(journal_entries,
1016                                 struct journal_replay, list)->j.last_seq);
1017
1018         nr = cur_seq - last_seq;
1019
1020         if (nr + 1 > j->pin.size) {
1021                 free_fifo(&j->pin);
1022                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1023                 if (!j->pin.data) {
1024                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1025                         return -ENOMEM;
1026                 }
1027         }
1028
1029         j->replay_journal_seq   = last_seq;
1030         j->replay_journal_seq_end = cur_seq;
1031         j->last_seq_ondisk      = last_seq;
1032         j->pin.front            = last_seq;
1033         j->pin.back             = cur_seq;
1034         atomic64_set(&j->seq, cur_seq - 1);
1035
1036         fifo_for_each_entry_ptr(p, &j->pin, seq)
1037                 journal_pin_list_init(p, 1);
1038
1039         list_for_each_entry(i, journal_entries, list) {
1040                 unsigned ptr;
1041
1042                 seq = le64_to_cpu(i->j.seq);
1043                 BUG_ON(seq >= cur_seq);
1044
1045                 if (seq < last_seq)
1046                         continue;
1047
1048                 p = journal_seq_pin(j, seq);
1049
1050                 p->devs.nr = 0;
1051                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1052                         bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1053         }
1054
1055         spin_lock(&j->lock);
1056
1057         set_bit(JOURNAL_STARTED, &j->flags);
1058         j->last_flush_write = jiffies;
1059
1060         journal_pin_new_entry(j);
1061
1062         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1063
1064         bch2_journal_buf_init(j);
1065
1066         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1067
1068         bch2_journal_space_available(j);
1069         spin_unlock(&j->lock);
1070
1071         return 0;
1072 }
1073
1074 /* init/exit: */
1075
1076 void bch2_dev_journal_exit(struct bch_dev *ca)
1077 {
1078         kfree(ca->journal.bio);
1079         kfree(ca->journal.buckets);
1080         kfree(ca->journal.bucket_seq);
1081
1082         ca->journal.bio         = NULL;
1083         ca->journal.buckets     = NULL;
1084         ca->journal.bucket_seq  = NULL;
1085 }
1086
1087 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1088 {
1089         struct journal_device *ja = &ca->journal;
1090         struct bch_sb_field_journal *journal_buckets =
1091                 bch2_sb_get_journal(sb);
1092         unsigned i;
1093
1094         ja->nr = bch2_nr_journal_buckets(journal_buckets);
1095
1096         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1097         if (!ja->bucket_seq)
1098                 return -ENOMEM;
1099
1100         ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1101                         DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1102         if (!ca->journal.bio)
1103                 return -ENOMEM;
1104
1105         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1106         if (!ja->buckets)
1107                 return -ENOMEM;
1108
1109         for (i = 0; i < ja->nr; i++)
1110                 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1111
1112         return 0;
1113 }
1114
1115 void bch2_fs_journal_exit(struct journal *j)
1116 {
1117         unsigned i;
1118
1119         for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1120                 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1121         free_fifo(&j->pin);
1122 }
1123
1124 int bch2_fs_journal_init(struct journal *j)
1125 {
1126         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1127         static struct lock_class_key res_key;
1128         unsigned i;
1129         int ret = 0;
1130
1131         pr_verbose_init(c->opts, "");
1132
1133         spin_lock_init(&j->lock);
1134         spin_lock_init(&j->err_lock);
1135         init_waitqueue_head(&j->wait);
1136         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1137         init_waitqueue_head(&j->reclaim_wait);
1138         init_waitqueue_head(&j->pin_flush_wait);
1139         mutex_init(&j->reclaim_lock);
1140         mutex_init(&j->discard_lock);
1141
1142         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1143
1144         j->write_delay_ms       = 1000;
1145         j->reclaim_delay_ms     = 100;
1146
1147         atomic64_set(&j->reservations.counter,
1148                 ((union journal_res_state)
1149                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1150
1151         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1152                 ret = -ENOMEM;
1153                 goto out;
1154         }
1155
1156         for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1157                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1158                 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1159                 if (!j->buf[i].data) {
1160                         ret = -ENOMEM;
1161                         goto out;
1162                 }
1163         }
1164
1165         j->pin.front = j->pin.back = 1;
1166 out:
1167         pr_verbose_init(c->opts, "ret %i", ret);
1168         return ret;
1169 }
1170
1171 /* debug: */
1172
1173 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1174 {
1175         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1176         union journal_res_state s;
1177         struct bch_dev *ca;
1178         unsigned i;
1179
1180         rcu_read_lock();
1181         s = READ_ONCE(j->reservations);
1182
1183         pr_buf(out,
1184                "active journal entries:\t%llu\n"
1185                "seq:\t\t\t%llu\n"
1186                "last_seq:\t\t%llu\n"
1187                "last_seq_ondisk:\t%llu\n"
1188                "flushed_seq_ondisk:\t%llu\n"
1189                "prereserved:\t\t%u/%u\n"
1190                "each entry reserved:\t%u\n"
1191                "nr flush writes:\t%llu\n"
1192                "nr noflush writes:\t%llu\n"
1193                "nr direct reclaim:\t%llu\n"
1194                "nr background reclaim:\t%llu\n"
1195                "current entry sectors:\t%u\n"
1196                "current entry error:\t%u\n"
1197                "current entry:\t\t",
1198                fifo_used(&j->pin),
1199                journal_cur_seq(j),
1200                journal_last_seq(j),
1201                j->last_seq_ondisk,
1202                j->flushed_seq_ondisk,
1203                j->prereserved.reserved,
1204                j->prereserved.remaining,
1205                j->entry_u64s_reserved,
1206                j->nr_flush_writes,
1207                j->nr_noflush_writes,
1208                j->nr_direct_reclaim,
1209                j->nr_background_reclaim,
1210                j->cur_entry_sectors,
1211                j->cur_entry_error);
1212
1213         switch (s.cur_entry_offset) {
1214         case JOURNAL_ENTRY_ERROR_VAL:
1215                 pr_buf(out, "error\n");
1216                 break;
1217         case JOURNAL_ENTRY_CLOSED_VAL:
1218                 pr_buf(out, "closed\n");
1219                 break;
1220         default:
1221                 pr_buf(out, "%u/%u\n",
1222                        s.cur_entry_offset,
1223                        j->cur_entry_u64s);
1224                 break;
1225         }
1226
1227         pr_buf(out,
1228                "current entry:\t\tidx %u refcount %u\n",
1229                s.idx, journal_state_count(s, s.idx));
1230
1231         i = s.idx;
1232         while (i != s.unwritten_idx) {
1233                 i = (i - 1) & JOURNAL_BUF_MASK;
1234
1235                 pr_buf(out, "unwritten entry:\tidx %u refcount %u sectors %u\n",
1236                        i, journal_state_count(s, i), j->buf[i].sectors);
1237         }
1238
1239         pr_buf(out,
1240                "need write:\t\t%i\n"
1241                "replay done:\t\t%i\n",
1242                test_bit(JOURNAL_NEED_WRITE,     &j->flags),
1243                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1244
1245         pr_buf(out, "space:\n");
1246         pr_buf(out, "\tdiscarded\t%u:%u\n",
1247                j->space[journal_space_discarded].next_entry,
1248                j->space[journal_space_discarded].total);
1249         pr_buf(out, "\tclean ondisk\t%u:%u\n",
1250                j->space[journal_space_clean_ondisk].next_entry,
1251                j->space[journal_space_clean_ondisk].total);
1252         pr_buf(out, "\tclean\t\t%u:%u\n",
1253                j->space[journal_space_clean].next_entry,
1254                j->space[journal_space_clean].total);
1255         pr_buf(out, "\ttotal\t\t%u:%u\n",
1256                j->space[journal_space_total].next_entry,
1257                j->space[journal_space_total].total);
1258
1259         for_each_member_device_rcu(ca, c, i,
1260                                    &c->rw_devs[BCH_DATA_journal]) {
1261                 struct journal_device *ja = &ca->journal;
1262
1263                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1264                         continue;
1265
1266                 if (!ja->nr)
1267                         continue;
1268
1269                 pr_buf(out,
1270                        "dev %u:\n"
1271                        "\tnr\t\t%u\n"
1272                        "\tbucket size\t%u\n"
1273                        "\tavailable\t%u:%u\n"
1274                        "\tdiscard_idx\t%u\n"
1275                        "\tdirty_ondisk\t%u (seq %llu)\n"
1276                        "\tdirty_idx\t%u (seq %llu)\n"
1277                        "\tcur_idx\t\t%u (seq %llu)\n",
1278                        i, ja->nr, ca->mi.bucket_size,
1279                        bch2_journal_dev_buckets_available(j, ja, journal_space_discarded),
1280                        ja->sectors_free,
1281                        ja->discard_idx,
1282                        ja->dirty_idx_ondisk,    ja->bucket_seq[ja->dirty_idx_ondisk],
1283                        ja->dirty_idx,           ja->bucket_seq[ja->dirty_idx],
1284                        ja->cur_idx,             ja->bucket_seq[ja->cur_idx]);
1285         }
1286
1287         rcu_read_unlock();
1288 }
1289
1290 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1291 {
1292         spin_lock(&j->lock);
1293         __bch2_journal_debug_to_text(out, j);
1294         spin_unlock(&j->lock);
1295 }
1296
1297 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1298 {
1299         struct journal_entry_pin_list *pin_list;
1300         struct journal_entry_pin *pin;
1301         u64 i;
1302
1303         spin_lock(&j->lock);
1304         fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1305                 pr_buf(out, "%llu: count %u\n",
1306                        i, atomic_read(&pin_list->count));
1307
1308                 list_for_each_entry(pin, &pin_list->list, list)
1309                         pr_buf(out, "\t%px %ps\n",
1310                                pin, pin->flush);
1311
1312                 if (!list_empty(&pin_list->flushed))
1313                         pr_buf(out, "flushed:\n");
1314
1315                 list_for_each_entry(pin, &pin_list->flushed, list)
1316                         pr_buf(out, "\t%px %ps\n",
1317                                pin, pin->flush);
1318         }
1319         spin_unlock(&j->lock);
1320 }