]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to d372ddcbfa bcachefs: Reorganize extents.c
[bcachefs-tools-debian] / libbcachefs / journal.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcachefs journalling code, for btree insertions
4  *
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include "bcachefs.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "btree_gc.h"
12 #include "buckets.h"
13 #include "journal.h"
14 #include "journal_io.h"
15 #include "journal_reclaim.h"
16 #include "journal_seq_blacklist.h"
17 #include "super-io.h"
18
19 #include <trace/events/bcachefs.h>
20
21 static bool __journal_entry_is_open(union journal_res_state state)
22 {
23         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
24 }
25
26 static bool journal_entry_is_open(struct journal *j)
27 {
28         return __journal_entry_is_open(j->reservations);
29 }
30
31 static void journal_pin_new_entry(struct journal *j, int count)
32 {
33         struct journal_entry_pin_list *p;
34
35         /*
36          * The fifo_push() needs to happen at the same time as j->seq is
37          * incremented for journal_last_seq() to be calculated correctly
38          */
39         atomic64_inc(&j->seq);
40         p = fifo_push_ref(&j->pin);
41
42         INIT_LIST_HEAD(&p->list);
43         INIT_LIST_HEAD(&p->flushed);
44         atomic_set(&p->count, count);
45         p->devs.nr = 0;
46 }
47
48 static void bch2_journal_buf_init(struct journal *j)
49 {
50         struct journal_buf *buf = journal_cur_buf(j);
51
52         memset(buf->has_inode, 0, sizeof(buf->has_inode));
53
54         memset(buf->data, 0, sizeof(*buf->data));
55         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
56         buf->data->u64s = 0;
57 }
58
59 void bch2_journal_halt(struct journal *j)
60 {
61         union journal_res_state old, new;
62         u64 v = atomic64_read(&j->reservations.counter);
63
64         do {
65                 old.v = new.v = v;
66                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
67                         return;
68
69                 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
70         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
71                                        old.v, new.v)) != old.v);
72
73         journal_wake(j);
74         closure_wake_up(&journal_cur_buf(j)->wait);
75 }
76
77 /* journal entry close/open: */
78
79 void __bch2_journal_buf_put(struct journal *j, bool need_write_just_set)
80 {
81         if (!need_write_just_set &&
82             test_bit(JOURNAL_NEED_WRITE, &j->flags))
83                 bch2_time_stats_update(j->delay_time,
84                                        j->need_write_time);
85
86         clear_bit(JOURNAL_NEED_WRITE, &j->flags);
87
88         closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
89 }
90
91 /*
92  * Returns true if journal entry is now closed:
93  */
94 static bool __journal_entry_close(struct journal *j)
95 {
96         struct bch_fs *c = container_of(j, struct bch_fs, journal);
97         struct journal_buf *buf = journal_cur_buf(j);
98         union journal_res_state old, new;
99         u64 v = atomic64_read(&j->reservations.counter);
100         bool set_need_write = false;
101         unsigned sectors;
102
103         lockdep_assert_held(&j->lock);
104
105         do {
106                 old.v = new.v = v;
107                 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
108                         return true;
109
110                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
111                         /* this entry will never be written: */
112                         closure_wake_up(&buf->wait);
113                         return true;
114                 }
115
116                 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
117                         set_bit(JOURNAL_NEED_WRITE, &j->flags);
118                         j->need_write_time = local_clock();
119                         set_need_write = true;
120                 }
121
122                 if (new.prev_buf_unwritten)
123                         return false;
124
125                 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
126                 new.idx++;
127                 new.prev_buf_unwritten = 1;
128
129                 BUG_ON(journal_state_count(new, new.idx));
130         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
131                                        old.v, new.v)) != old.v);
132
133         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
134
135         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
136                                       buf->u64s_reserved) << c->block_bits;
137         BUG_ON(sectors > buf->sectors);
138         buf->sectors = sectors;
139
140         bkey_extent_init(&buf->key);
141
142         /*
143          * We have to set last_seq here, _before_ opening a new journal entry:
144          *
145          * A threads may replace an old pin with a new pin on their current
146          * journal reservation - the expectation being that the journal will
147          * contain either what the old pin protected or what the new pin
148          * protects.
149          *
150          * After the old pin is dropped journal_last_seq() won't include the old
151          * pin, so we can only write the updated last_seq on the entry that
152          * contains whatever the new pin protects.
153          *
154          * Restated, we can _not_ update last_seq for a given entry if there
155          * could be a newer entry open with reservations/pins that have been
156          * taken against it.
157          *
158          * Hence, we want update/set last_seq on the current journal entry right
159          * before we open a new one:
160          */
161         buf->data->last_seq     = cpu_to_le64(journal_last_seq(j));
162
163         if (journal_entry_empty(buf->data))
164                 clear_bit(JOURNAL_NOT_EMPTY, &j->flags);
165         else
166                 set_bit(JOURNAL_NOT_EMPTY, &j->flags);
167
168         journal_pin_new_entry(j, 1);
169
170         bch2_journal_buf_init(j);
171
172         cancel_delayed_work(&j->write_work);
173
174         bch2_journal_space_available(j);
175
176         bch2_journal_buf_put(j, old.idx, set_need_write);
177         return true;
178 }
179
180 static bool journal_entry_close(struct journal *j)
181 {
182         bool ret;
183
184         spin_lock(&j->lock);
185         ret = __journal_entry_close(j);
186         spin_unlock(&j->lock);
187
188         return ret;
189 }
190
191 /*
192  * should _only_ called from journal_res_get() - when we actually want a
193  * journal reservation - journal entry is open means journal is dirty:
194  *
195  * returns:
196  * 0:           success
197  * -ENOSPC:     journal currently full, must invoke reclaim
198  * -EAGAIN:     journal blocked, must wait
199  * -EROFS:      insufficient rw devices or journal error
200  */
201 static int journal_entry_open(struct journal *j)
202 {
203         struct journal_buf *buf = journal_cur_buf(j);
204         union journal_res_state old, new;
205         int u64s;
206         u64 v;
207
208         lockdep_assert_held(&j->lock);
209         BUG_ON(journal_entry_is_open(j));
210
211         if (j->blocked)
212                 return -EAGAIN;
213
214         if (j->cur_entry_error)
215                 return j->cur_entry_error;
216
217         BUG_ON(!j->cur_entry_sectors);
218
219         buf->u64s_reserved      = j->entry_u64s_reserved;
220         buf->disk_sectors       = j->cur_entry_sectors;
221         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
222
223         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
224                 journal_entry_overhead(j);
225         u64s  = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
226
227         if (u64s <= le32_to_cpu(buf->data->u64s))
228                 return -ENOSPC;
229
230         /*
231          * Must be set before marking the journal entry as open:
232          */
233         j->cur_entry_u64s = u64s;
234
235         v = atomic64_read(&j->reservations.counter);
236         do {
237                 old.v = new.v = v;
238
239                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
240                         return -EROFS;
241
242                 /* Handle any already added entries */
243                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
244
245                 EBUG_ON(journal_state_count(new, new.idx));
246                 journal_state_inc(&new);
247         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
248                                        old.v, new.v)) != old.v);
249
250         if (j->res_get_blocked_start)
251                 bch2_time_stats_update(j->blocked_time,
252                                        j->res_get_blocked_start);
253         j->res_get_blocked_start = 0;
254
255         mod_delayed_work(system_freezable_wq,
256                          &j->write_work,
257                          msecs_to_jiffies(j->write_delay_ms));
258         journal_wake(j);
259         return 0;
260 }
261
262 static bool journal_quiesced(struct journal *j)
263 {
264         union journal_res_state state = READ_ONCE(j->reservations);
265         bool ret = !state.prev_buf_unwritten && !__journal_entry_is_open(state);
266
267         if (!ret)
268                 journal_entry_close(j);
269         return ret;
270 }
271
272 static void journal_quiesce(struct journal *j)
273 {
274         wait_event(j->wait, journal_quiesced(j));
275 }
276
277 static void journal_write_work(struct work_struct *work)
278 {
279         struct journal *j = container_of(work, struct journal, write_work.work);
280
281         journal_entry_close(j);
282 }
283
284 /*
285  * Given an inode number, if that inode number has data in the journal that
286  * hasn't yet been flushed, return the journal sequence number that needs to be
287  * flushed:
288  */
289 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
290 {
291         size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
292         u64 seq = 0;
293
294         if (!test_bit(h, j->buf[0].has_inode) &&
295             !test_bit(h, j->buf[1].has_inode))
296                 return 0;
297
298         spin_lock(&j->lock);
299         if (test_bit(h, journal_cur_buf(j)->has_inode))
300                 seq = journal_cur_seq(j);
301         else if (test_bit(h, journal_prev_buf(j)->has_inode))
302                 seq = journal_cur_seq(j) - 1;
303         spin_unlock(&j->lock);
304
305         return seq;
306 }
307
308 static int __journal_res_get(struct journal *j, struct journal_res *res,
309                              unsigned flags)
310 {
311         struct bch_fs *c = container_of(j, struct bch_fs, journal);
312         struct journal_buf *buf;
313         bool can_discard;
314         int ret;
315 retry:
316         if (journal_res_get_fast(j, res, flags))
317                 return 0;
318
319         if (bch2_journal_error(j))
320                 return -EROFS;
321
322         spin_lock(&j->lock);
323
324         /*
325          * Recheck after taking the lock, so we don't race with another thread
326          * that just did journal_entry_open() and call journal_entry_close()
327          * unnecessarily
328          */
329         if (journal_res_get_fast(j, res, flags)) {
330                 spin_unlock(&j->lock);
331                 return 0;
332         }
333
334         if (!(flags & JOURNAL_RES_GET_RESERVED) &&
335             !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
336                 /*
337                  * Don't want to close current journal entry, just need to
338                  * invoke reclaim:
339                  */
340                 ret = -ENOSPC;
341                 goto unlock;
342         }
343
344         /*
345          * If we couldn't get a reservation because the current buf filled up,
346          * and we had room for a bigger entry on disk, signal that we want to
347          * realloc the journal bufs:
348          */
349         buf = journal_cur_buf(j);
350         if (journal_entry_is_open(j) &&
351             buf->buf_size >> 9 < buf->disk_sectors &&
352             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
353                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
354
355         if (journal_entry_is_open(j) &&
356             !__journal_entry_close(j)) {
357                 /*
358                  * We failed to get a reservation on the current open journal
359                  * entry because it's full, and we can't close it because
360                  * there's still a previous one in flight:
361                  */
362                 trace_journal_entry_full(c);
363                 ret = -EAGAIN;
364         } else {
365                 ret = journal_entry_open(j);
366         }
367 unlock:
368         if ((ret == -EAGAIN || ret == -ENOSPC) &&
369             !j->res_get_blocked_start)
370                 j->res_get_blocked_start = local_clock() ?: 1;
371
372         can_discard = j->can_discard;
373         spin_unlock(&j->lock);
374
375         if (!ret)
376                 goto retry;
377
378         if (ret == -ENOSPC) {
379                 BUG_ON(!can_discard && (flags & JOURNAL_RES_GET_RESERVED));
380
381                 /*
382                  * Journal is full - can't rely on reclaim from work item due to
383                  * freezing:
384                  */
385                 trace_journal_full(c);
386
387                 if (!(flags & JOURNAL_RES_GET_NONBLOCK)) {
388                         if (can_discard) {
389                                 bch2_journal_do_discards(j);
390                                 goto retry;
391                         }
392
393                         if (mutex_trylock(&j->reclaim_lock)) {
394                                 bch2_journal_reclaim(j);
395                                 mutex_unlock(&j->reclaim_lock);
396                         }
397                 }
398
399                 ret = -EAGAIN;
400         }
401
402         return ret;
403 }
404
405 /*
406  * Essentially the entry function to the journaling code. When bcachefs is doing
407  * a btree insert, it calls this function to get the current journal write.
408  * Journal write is the structure used set up journal writes. The calling
409  * function will then add its keys to the structure, queuing them for the next
410  * write.
411  *
412  * To ensure forward progress, the current task must not be holding any
413  * btree node write locks.
414  */
415 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
416                                   unsigned flags)
417 {
418         int ret;
419
420         closure_wait_event(&j->async_wait,
421                    (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
422                    (flags & JOURNAL_RES_GET_NONBLOCK));
423         return ret;
424 }
425
426 /* journal_preres: */
427
428 static bool journal_preres_available(struct journal *j,
429                                      struct journal_preres *res,
430                                      unsigned new_u64s)
431 {
432         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s);
433
434         if (!ret)
435                 bch2_journal_reclaim_work(&j->reclaim_work.work);
436
437         return ret;
438 }
439
440 int __bch2_journal_preres_get(struct journal *j,
441                               struct journal_preres *res,
442                               unsigned new_u64s)
443 {
444         int ret;
445
446         closure_wait_event(&j->preres_wait,
447                    (ret = bch2_journal_error(j)) ||
448                    journal_preres_available(j, res, new_u64s));
449         return ret;
450 }
451
452 /* journal_entry_res: */
453
454 void bch2_journal_entry_res_resize(struct journal *j,
455                                    struct journal_entry_res *res,
456                                    unsigned new_u64s)
457 {
458         union journal_res_state state;
459         int d = new_u64s - res->u64s;
460
461         spin_lock(&j->lock);
462
463         j->entry_u64s_reserved += d;
464         if (d <= 0)
465                 goto out;
466
467         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
468         smp_mb();
469         state = READ_ONCE(j->reservations);
470
471         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
472             state.cur_entry_offset > j->cur_entry_u64s) {
473                 j->cur_entry_u64s += d;
474                 /*
475                  * Not enough room in current journal entry, have to flush it:
476                  */
477                 __journal_entry_close(j);
478         } else {
479                 journal_cur_buf(j)->u64s_reserved += d;
480         }
481 out:
482         spin_unlock(&j->lock);
483         res->u64s += d;
484 }
485
486 /* journal flushing: */
487
488 u64 bch2_journal_last_unwritten_seq(struct journal *j)
489 {
490         u64 seq;
491
492         spin_lock(&j->lock);
493         seq = journal_cur_seq(j);
494         if (j->reservations.prev_buf_unwritten)
495                 seq--;
496         spin_unlock(&j->lock);
497
498         return seq;
499 }
500
501 /**
502  * bch2_journal_open_seq_async - try to open a new journal entry if @seq isn't
503  * open yet, or wait if we cannot
504  *
505  * used by the btree interior update machinery, when it needs to write a new
506  * btree root - every journal entry contains the roots of all the btrees, so it
507  * doesn't need to bother with getting a journal reservation
508  */
509 int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *cl)
510 {
511         struct bch_fs *c = container_of(j, struct bch_fs, journal);
512         int ret;
513
514         spin_lock(&j->lock);
515
516         /*
517          * Can't try to open more than one sequence number ahead:
518          */
519         BUG_ON(journal_cur_seq(j) < seq && !journal_entry_is_open(j));
520
521         if (journal_cur_seq(j) > seq ||
522             journal_entry_is_open(j)) {
523                 spin_unlock(&j->lock);
524                 return 0;
525         }
526
527         if (journal_cur_seq(j) < seq &&
528             !__journal_entry_close(j)) {
529                 /* haven't finished writing out the previous one: */
530                 trace_journal_entry_full(c);
531                 ret = -EAGAIN;
532         } else {
533                 BUG_ON(journal_cur_seq(j) != seq);
534
535                 ret = journal_entry_open(j);
536         }
537
538         if ((ret == -EAGAIN || ret == -ENOSPC) &&
539             !j->res_get_blocked_start)
540                 j->res_get_blocked_start = local_clock() ?: 1;
541
542         if (ret == -EAGAIN || ret == -ENOSPC)
543                 closure_wait(&j->async_wait, cl);
544
545         spin_unlock(&j->lock);
546
547         if (ret == -ENOSPC) {
548                 trace_journal_full(c);
549                 bch2_journal_reclaim_work(&j->reclaim_work.work);
550                 ret = -EAGAIN;
551         }
552
553         return ret;
554 }
555
556 static int journal_seq_error(struct journal *j, u64 seq)
557 {
558         union journal_res_state state = READ_ONCE(j->reservations);
559
560         if (seq == journal_cur_seq(j))
561                 return bch2_journal_error(j);
562
563         if (seq + 1 == journal_cur_seq(j) &&
564             !state.prev_buf_unwritten &&
565             seq > j->seq_ondisk)
566                 return -EIO;
567
568         return 0;
569 }
570
571 static inline struct journal_buf *
572 journal_seq_to_buf(struct journal *j, u64 seq)
573 {
574         /* seq should be for a journal entry that has been opened: */
575         BUG_ON(seq > journal_cur_seq(j));
576         BUG_ON(seq == journal_cur_seq(j) &&
577                j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
578
579         if (seq == journal_cur_seq(j))
580                 return journal_cur_buf(j);
581         if (seq + 1 == journal_cur_seq(j) &&
582             j->reservations.prev_buf_unwritten)
583                 return journal_prev_buf(j);
584         return NULL;
585 }
586
587 /**
588  * bch2_journal_wait_on_seq - wait for a journal entry to be written
589  *
590  * does _not_ cause @seq to be written immediately - if there is no other
591  * activity to cause the relevant journal entry to be filled up or flushed it
592  * can wait for an arbitrary amount of time (up to @j->write_delay_ms, which is
593  * configurable).
594  */
595 void bch2_journal_wait_on_seq(struct journal *j, u64 seq,
596                               struct closure *parent)
597 {
598         struct journal_buf *buf;
599
600         spin_lock(&j->lock);
601
602         if ((buf = journal_seq_to_buf(j, seq))) {
603                 if (!closure_wait(&buf->wait, parent))
604                         BUG();
605
606                 if (seq == journal_cur_seq(j)) {
607                         smp_mb();
608                         if (bch2_journal_error(j))
609                                 closure_wake_up(&buf->wait);
610                 }
611         }
612
613         spin_unlock(&j->lock);
614 }
615
616 /**
617  * bch2_journal_flush_seq_async - wait for a journal entry to be written
618  *
619  * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
620  * necessary
621  */
622 void bch2_journal_flush_seq_async(struct journal *j, u64 seq,
623                                   struct closure *parent)
624 {
625         struct journal_buf *buf;
626
627         spin_lock(&j->lock);
628
629         if (parent &&
630             (buf = journal_seq_to_buf(j, seq)))
631                 if (!closure_wait(&buf->wait, parent))
632                         BUG();
633
634         if (seq == journal_cur_seq(j))
635                 __journal_entry_close(j);
636         spin_unlock(&j->lock);
637 }
638
639 static int journal_seq_flushed(struct journal *j, u64 seq)
640 {
641         int ret;
642
643         spin_lock(&j->lock);
644         ret = seq <= j->seq_ondisk ? 1 : journal_seq_error(j, seq);
645
646         if (seq == journal_cur_seq(j))
647                 __journal_entry_close(j);
648         spin_unlock(&j->lock);
649
650         return ret;
651 }
652
653 int bch2_journal_flush_seq(struct journal *j, u64 seq)
654 {
655         u64 start_time = local_clock();
656         int ret, ret2;
657
658         ret = wait_event_killable(j->wait, (ret2 = journal_seq_flushed(j, seq)));
659
660         bch2_time_stats_update(j->flush_seq_time, start_time);
661
662         return ret ?: ret2 < 0 ? ret2 : 0;
663 }
664
665 /**
666  * bch2_journal_meta_async - force a journal entry to be written
667  */
668 void bch2_journal_meta_async(struct journal *j, struct closure *parent)
669 {
670         struct journal_res res;
671
672         memset(&res, 0, sizeof(res));
673
674         bch2_journal_res_get(j, &res, jset_u64s(0), 0);
675         bch2_journal_res_put(j, &res);
676
677         bch2_journal_flush_seq_async(j, res.seq, parent);
678 }
679
680 int bch2_journal_meta(struct journal *j)
681 {
682         struct journal_res res;
683         int ret;
684
685         memset(&res, 0, sizeof(res));
686
687         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
688         if (ret)
689                 return ret;
690
691         bch2_journal_res_put(j, &res);
692
693         return bch2_journal_flush_seq(j, res.seq);
694 }
695
696 /*
697  * bch2_journal_flush_async - if there is an open journal entry, or a journal
698  * still being written, write it and wait for the write to complete
699  */
700 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
701 {
702         u64 seq, journal_seq;
703
704         spin_lock(&j->lock);
705         journal_seq = journal_cur_seq(j);
706
707         if (journal_entry_is_open(j)) {
708                 seq = journal_seq;
709         } else if (journal_seq) {
710                 seq = journal_seq - 1;
711         } else {
712                 spin_unlock(&j->lock);
713                 return;
714         }
715         spin_unlock(&j->lock);
716
717         bch2_journal_flush_seq_async(j, seq, parent);
718 }
719
720 int bch2_journal_flush(struct journal *j)
721 {
722         u64 seq, journal_seq;
723
724         spin_lock(&j->lock);
725         journal_seq = journal_cur_seq(j);
726
727         if (journal_entry_is_open(j)) {
728                 seq = journal_seq;
729         } else if (journal_seq) {
730                 seq = journal_seq - 1;
731         } else {
732                 spin_unlock(&j->lock);
733                 return 0;
734         }
735         spin_unlock(&j->lock);
736
737         return bch2_journal_flush_seq(j, seq);
738 }
739
740 /* block/unlock the journal: */
741
742 void bch2_journal_unblock(struct journal *j)
743 {
744         spin_lock(&j->lock);
745         j->blocked--;
746         spin_unlock(&j->lock);
747
748         journal_wake(j);
749 }
750
751 void bch2_journal_block(struct journal *j)
752 {
753         spin_lock(&j->lock);
754         j->blocked++;
755         spin_unlock(&j->lock);
756
757         journal_quiesce(j);
758 }
759
760 /* allocate journal on a device: */
761
762 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
763                                          bool new_fs, struct closure *cl)
764 {
765         struct bch_fs *c = ca->fs;
766         struct journal_device *ja = &ca->journal;
767         struct bch_sb_field_journal *journal_buckets;
768         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
769         int ret = 0;
770
771         /* don't handle reducing nr of buckets yet: */
772         if (nr <= ja->nr)
773                 return 0;
774
775         ret = -ENOMEM;
776         new_buckets     = kzalloc(nr * sizeof(u64), GFP_KERNEL);
777         new_bucket_seq  = kzalloc(nr * sizeof(u64), GFP_KERNEL);
778         if (!new_buckets || !new_bucket_seq)
779                 goto err;
780
781         journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
782                                                  nr + sizeof(*journal_buckets) / sizeof(u64));
783         if (!journal_buckets)
784                 goto err;
785
786         /*
787          * We may be called from the device add path, before the new device has
788          * actually been added to the running filesystem:
789          */
790         if (c)
791                 spin_lock(&c->journal.lock);
792
793         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
794         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
795         swap(new_buckets,       ja->buckets);
796         swap(new_bucket_seq,    ja->bucket_seq);
797
798         if (c)
799                 spin_unlock(&c->journal.lock);
800
801         while (ja->nr < nr) {
802                 struct open_bucket *ob = NULL;
803                 unsigned pos;
804                 long bucket;
805
806                 if (new_fs) {
807                         bucket = bch2_bucket_alloc_new_fs(ca);
808                         if (bucket < 0) {
809                                 ret = -ENOSPC;
810                                 goto err;
811                         }
812                 } else {
813                         ob = bch2_bucket_alloc(c, ca, RESERVE_ALLOC,
814                                                false, cl);
815                         if (IS_ERR(ob)) {
816                                 ret = cl ? -EAGAIN : -ENOSPC;
817                                 goto err;
818                         }
819
820                         bucket = sector_to_bucket(ca, ob->ptr.offset);
821                 }
822
823                 if (c) {
824                         percpu_down_read(&c->mark_lock);
825                         spin_lock(&c->journal.lock);
826                 }
827
828                 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
829                 __array_insert_item(ja->buckets,                ja->nr, pos);
830                 __array_insert_item(ja->bucket_seq,             ja->nr, pos);
831                 __array_insert_item(journal_buckets->buckets,   ja->nr, pos);
832                 ja->nr++;
833
834                 ja->buckets[pos] = bucket;
835                 ja->bucket_seq[pos] = 0;
836                 journal_buckets->buckets[pos] = cpu_to_le64(bucket);
837
838                 if (pos <= ja->discard_idx)
839                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
840                 if (pos <= ja->dirty_idx_ondisk)
841                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
842                 if (pos <= ja->dirty_idx)
843                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
844                 if (pos <= ja->cur_idx)
845                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
846
847                 bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
848                                           ca->mi.bucket_size,
849                                           gc_phase(GC_PHASE_SB),
850                                           0);
851
852                 if (c) {
853                         spin_unlock(&c->journal.lock);
854                         percpu_up_read(&c->mark_lock);
855                 }
856
857                 if (!new_fs)
858                         bch2_open_bucket_put(c, ob);
859         }
860
861         ret = 0;
862 err:
863         kfree(new_bucket_seq);
864         kfree(new_buckets);
865
866         return ret;
867 }
868
869 /*
870  * Allocate more journal space at runtime - not currently making use if it, but
871  * the code works:
872  */
873 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
874                                 unsigned nr)
875 {
876         struct journal_device *ja = &ca->journal;
877         struct closure cl;
878         unsigned current_nr;
879         int ret;
880
881         closure_init_stack(&cl);
882
883         do {
884                 struct disk_reservation disk_res = { 0, 0 };
885
886                 closure_sync(&cl);
887
888                 mutex_lock(&c->sb_lock);
889                 current_nr = ja->nr;
890
891                 /*
892                  * note: journal buckets aren't really counted as _sectors_ used yet, so
893                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
894                  * when space used goes up without a reservation - but we do need the
895                  * reservation to ensure we'll actually be able to allocate:
896                  */
897
898                 if (bch2_disk_reservation_get(c, &disk_res,
899                                               bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
900                         mutex_unlock(&c->sb_lock);
901                         return -ENOSPC;
902                 }
903
904                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
905
906                 bch2_disk_reservation_put(c, &disk_res);
907
908                 if (ja->nr != current_nr)
909                         bch2_write_super(c);
910                 mutex_unlock(&c->sb_lock);
911         } while (ret == -EAGAIN);
912
913         return ret;
914 }
915
916 int bch2_dev_journal_alloc(struct bch_dev *ca)
917 {
918         unsigned nr;
919
920         if (dynamic_fault("bcachefs:add:journal_alloc"))
921                 return -ENOMEM;
922
923         /*
924          * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
925          * is smaller:
926          */
927         nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
928                      BCH_JOURNAL_BUCKETS_MIN,
929                      min(1 << 10,
930                          (1 << 20) / ca->mi.bucket_size));
931
932         return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
933 }
934
935 /* startup/shutdown: */
936
937 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
938 {
939         union journal_res_state state;
940         struct journal_buf *w;
941         bool ret;
942
943         spin_lock(&j->lock);
944         state = READ_ONCE(j->reservations);
945         w = j->buf + !state.idx;
946
947         ret = state.prev_buf_unwritten &&
948                 bch2_bkey_has_device(bkey_i_to_s_c(&w->key), dev_idx);
949         spin_unlock(&j->lock);
950
951         return ret;
952 }
953
954 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
955 {
956         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
957 }
958
959 void bch2_fs_journal_stop(struct journal *j)
960 {
961         struct bch_fs *c = container_of(j, struct bch_fs, journal);
962
963         bch2_journal_flush_all_pins(j);
964
965         wait_event(j->wait, journal_entry_close(j));
966
967         /* do we need to write another journal entry? */
968         if (test_bit(JOURNAL_NOT_EMPTY, &j->flags) ||
969             c->btree_roots_dirty)
970                 bch2_journal_meta(j);
971
972         journal_quiesce(j);
973
974         BUG_ON(!bch2_journal_error(j) &&
975                test_bit(JOURNAL_NOT_EMPTY, &j->flags));
976
977         cancel_delayed_work_sync(&j->write_work);
978         cancel_delayed_work_sync(&j->reclaim_work);
979 }
980
981 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
982                           struct list_head *journal_entries)
983 {
984         struct bch_fs *c = container_of(j, struct bch_fs, journal);
985         struct journal_entry_pin_list *p;
986         struct journal_replay *i;
987         u64 last_seq = cur_seq, nr, seq;
988
989         if (!list_empty(journal_entries))
990                 last_seq = le64_to_cpu(list_first_entry(journal_entries,
991                                                         struct journal_replay,
992                                                         list)->j.seq);
993
994         nr = cur_seq - last_seq;
995
996         if (nr + 1 > j->pin.size) {
997                 free_fifo(&j->pin);
998                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
999                 if (!j->pin.data) {
1000                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1001                         return -ENOMEM;
1002                 }
1003         }
1004
1005         j->replay_journal_seq   = last_seq;
1006         j->replay_journal_seq_end = cur_seq;
1007         j->last_seq_ondisk      = last_seq;
1008         j->pin.front            = last_seq;
1009         j->pin.back             = cur_seq;
1010         atomic64_set(&j->seq, cur_seq - 1);
1011
1012         fifo_for_each_entry_ptr(p, &j->pin, seq) {
1013                 INIT_LIST_HEAD(&p->list);
1014                 INIT_LIST_HEAD(&p->flushed);
1015                 atomic_set(&p->count, 1);
1016                 p->devs.nr = 0;
1017         }
1018
1019         list_for_each_entry(i, journal_entries, list) {
1020                 seq = le64_to_cpu(i->j.seq);
1021
1022                 BUG_ON(seq < last_seq || seq >= cur_seq);
1023
1024                 journal_seq_pin(j, seq)->devs = i->devs;
1025         }
1026
1027         spin_lock(&j->lock);
1028
1029         set_bit(JOURNAL_STARTED, &j->flags);
1030
1031         journal_pin_new_entry(j, 1);
1032         bch2_journal_buf_init(j);
1033
1034         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1035
1036         bch2_journal_space_available(j);
1037         spin_unlock(&j->lock);
1038
1039         return 0;
1040 }
1041
1042 /* init/exit: */
1043
1044 void bch2_dev_journal_exit(struct bch_dev *ca)
1045 {
1046         kfree(ca->journal.bio);
1047         kfree(ca->journal.buckets);
1048         kfree(ca->journal.bucket_seq);
1049
1050         ca->journal.bio         = NULL;
1051         ca->journal.buckets     = NULL;
1052         ca->journal.bucket_seq  = NULL;
1053 }
1054
1055 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1056 {
1057         struct journal_device *ja = &ca->journal;
1058         struct bch_sb_field_journal *journal_buckets =
1059                 bch2_sb_get_journal(sb);
1060         unsigned i;
1061
1062         ja->nr = bch2_nr_journal_buckets(journal_buckets);
1063
1064         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1065         if (!ja->bucket_seq)
1066                 return -ENOMEM;
1067
1068         ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1069                         DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1070         if (!ca->journal.bio)
1071                 return -ENOMEM;
1072
1073         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1074         if (!ja->buckets)
1075                 return -ENOMEM;
1076
1077         for (i = 0; i < ja->nr; i++)
1078                 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1079
1080         return 0;
1081 }
1082
1083 void bch2_fs_journal_exit(struct journal *j)
1084 {
1085         kvpfree(j->buf[1].data, j->buf[1].buf_size);
1086         kvpfree(j->buf[0].data, j->buf[0].buf_size);
1087         free_fifo(&j->pin);
1088 }
1089
1090 int bch2_fs_journal_init(struct journal *j)
1091 {
1092         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1093         static struct lock_class_key res_key;
1094         int ret = 0;
1095
1096         pr_verbose_init(c->opts, "");
1097
1098         spin_lock_init(&j->lock);
1099         spin_lock_init(&j->err_lock);
1100         init_waitqueue_head(&j->wait);
1101         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1102         INIT_DELAYED_WORK(&j->reclaim_work, bch2_journal_reclaim_work);
1103         init_waitqueue_head(&j->pin_flush_wait);
1104         mutex_init(&j->reclaim_lock);
1105         mutex_init(&j->discard_lock);
1106
1107         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1108
1109         j->buf[0].buf_size      = JOURNAL_ENTRY_SIZE_MIN;
1110         j->buf[1].buf_size      = JOURNAL_ENTRY_SIZE_MIN;
1111         j->write_delay_ms       = 1000;
1112         j->reclaim_delay_ms     = 100;
1113
1114         /* Btree roots: */
1115         j->entry_u64s_reserved +=
1116                 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
1117
1118         atomic64_set(&j->reservations.counter,
1119                 ((union journal_res_state)
1120                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1121
1122         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1123             !(j->buf[0].data = kvpmalloc(j->buf[0].buf_size, GFP_KERNEL)) ||
1124             !(j->buf[1].data = kvpmalloc(j->buf[1].buf_size, GFP_KERNEL))) {
1125                 ret = -ENOMEM;
1126                 goto out;
1127         }
1128
1129         j->pin.front = j->pin.back = 1;
1130 out:
1131         pr_verbose_init(c->opts, "ret %i", ret);
1132         return ret;
1133 }
1134
1135 /* debug: */
1136
1137 ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
1138 {
1139         struct printbuf out = _PBUF(buf, PAGE_SIZE);
1140         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1141         union journal_res_state s;
1142         struct bch_dev *ca;
1143         unsigned iter;
1144
1145         rcu_read_lock();
1146         spin_lock(&j->lock);
1147         s = READ_ONCE(j->reservations);
1148
1149         pr_buf(&out,
1150                "active journal entries:\t%llu\n"
1151                "seq:\t\t\t%llu\n"
1152                "last_seq:\t\t%llu\n"
1153                "last_seq_ondisk:\t%llu\n"
1154                "prereserved:\t\t%u/%u\n"
1155                "current entry sectors:\t%u\n"
1156                "current entry:\t\t",
1157                fifo_used(&j->pin),
1158                journal_cur_seq(j),
1159                journal_last_seq(j),
1160                j->last_seq_ondisk,
1161                j->prereserved.reserved,
1162                j->prereserved.remaining,
1163                j->cur_entry_sectors);
1164
1165         switch (s.cur_entry_offset) {
1166         case JOURNAL_ENTRY_ERROR_VAL:
1167                 pr_buf(&out, "error\n");
1168                 break;
1169         case JOURNAL_ENTRY_CLOSED_VAL:
1170                 pr_buf(&out, "closed\n");
1171                 break;
1172         default:
1173                 pr_buf(&out, "%u/%u\n",
1174                        s.cur_entry_offset,
1175                        j->cur_entry_u64s);
1176                 break;
1177         }
1178
1179         pr_buf(&out,
1180                "current entry refs:\t%u\n"
1181                "prev entry unwritten:\t",
1182                journal_state_count(s, s.idx));
1183
1184         if (s.prev_buf_unwritten)
1185                 pr_buf(&out, "yes, ref %u sectors %u\n",
1186                        journal_state_count(s, !s.idx),
1187                        journal_prev_buf(j)->sectors);
1188         else
1189                 pr_buf(&out, "no\n");
1190
1191         pr_buf(&out,
1192                "need write:\t\t%i\n"
1193                "replay done:\t\t%i\n",
1194                test_bit(JOURNAL_NEED_WRITE,     &j->flags),
1195                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1196
1197         for_each_member_device_rcu(ca, c, iter,
1198                                    &c->rw_devs[BCH_DATA_JOURNAL]) {
1199                 struct journal_device *ja = &ca->journal;
1200
1201                 if (!ja->nr)
1202                         continue;
1203
1204                 pr_buf(&out,
1205                        "dev %u:\n"
1206                        "\tnr\t\t%u\n"
1207                        "\tavailable\t%u:%u\n"
1208                        "\tdiscard_idx\t\t%u\n"
1209                        "\tdirty_idx_ondisk\t%u (seq %llu)\n"
1210                        "\tdirty_idx\t\t%u (seq %llu)\n"
1211                        "\tcur_idx\t\t%u (seq %llu)\n",
1212                        iter, ja->nr,
1213                        bch2_journal_dev_buckets_available(j, ja, journal_space_discarded),
1214                        ja->sectors_free,
1215                        ja->discard_idx,
1216                        ja->dirty_idx_ondisk,    ja->bucket_seq[ja->dirty_idx_ondisk],
1217                        ja->dirty_idx,           ja->bucket_seq[ja->dirty_idx],
1218                        ja->cur_idx,             ja->bucket_seq[ja->cur_idx]);
1219         }
1220
1221         spin_unlock(&j->lock);
1222         rcu_read_unlock();
1223
1224         return out.pos - buf;
1225 }
1226
1227 ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
1228 {
1229         struct printbuf out = _PBUF(buf, PAGE_SIZE);
1230         struct journal_entry_pin_list *pin_list;
1231         struct journal_entry_pin *pin;
1232         u64 i;
1233
1234         spin_lock(&j->lock);
1235         fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1236                 pr_buf(&out, "%llu: count %u\n",
1237                        i, atomic_read(&pin_list->count));
1238
1239                 list_for_each_entry(pin, &pin_list->list, list)
1240                         pr_buf(&out, "\t%p %pf\n",
1241                                pin, pin->flush);
1242
1243                 if (!list_empty(&pin_list->flushed))
1244                         pr_buf(&out, "flushed:\n");
1245
1246                 list_for_each_entry(pin, &pin_list->flushed, list)
1247                         pr_buf(&out, "\t%p %pf\n",
1248                                pin, pin->flush);
1249         }
1250         spin_unlock(&j->lock);
1251
1252         return out.pos - buf;
1253 }