]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to 6f603b8d79 bcachefs: some improvements to startup messages...
[bcachefs-tools-debian] / libbcachefs / journal.c
1 /*
2  * bcachefs journalling code, for btree insertions
3  *
4  * Copyright 2012 Google, Inc.
5  */
6
7 #include "bcachefs.h"
8 #include "alloc_foreground.h"
9 #include "bkey_methods.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "journal.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
16 #include "super-io.h"
17
18 #include <trace/events/bcachefs.h>
19
20 static bool __journal_entry_is_open(union journal_res_state state)
21 {
22         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
23 }
24
25 static bool journal_entry_is_open(struct journal *j)
26 {
27         return __journal_entry_is_open(j->reservations);
28 }
29
30 static void journal_pin_new_entry(struct journal *j, int count)
31 {
32         struct journal_entry_pin_list *p;
33
34         /*
35          * The fifo_push() needs to happen at the same time as j->seq is
36          * incremented for journal_last_seq() to be calculated correctly
37          */
38         atomic64_inc(&j->seq);
39         p = fifo_push_ref(&j->pin);
40
41         INIT_LIST_HEAD(&p->list);
42         INIT_LIST_HEAD(&p->flushed);
43         atomic_set(&p->count, count);
44         p->devs.nr = 0;
45 }
46
47 static void bch2_journal_buf_init(struct journal *j)
48 {
49         struct journal_buf *buf = journal_cur_buf(j);
50
51         memset(buf->has_inode, 0, sizeof(buf->has_inode));
52
53         memset(buf->data, 0, sizeof(*buf->data));
54         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
55         buf->data->u64s = 0;
56 }
57
58 void bch2_journal_halt(struct journal *j)
59 {
60         union journal_res_state old, new;
61         u64 v = atomic64_read(&j->reservations.counter);
62
63         do {
64                 old.v = new.v = v;
65                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
66                         return;
67
68                 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
69         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
70                                        old.v, new.v)) != old.v);
71
72         journal_wake(j);
73         closure_wake_up(&journal_cur_buf(j)->wait);
74 }
75
76 /* journal entry close/open: */
77
78 void __bch2_journal_buf_put(struct journal *j, bool need_write_just_set)
79 {
80         if (!need_write_just_set &&
81             test_bit(JOURNAL_NEED_WRITE, &j->flags))
82                 bch2_time_stats_update(j->delay_time,
83                                        j->need_write_time);
84
85         clear_bit(JOURNAL_NEED_WRITE, &j->flags);
86
87         closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
88 }
89
90 /*
91  * Returns true if journal entry is now closed:
92  */
93 static bool __journal_entry_close(struct journal *j)
94 {
95         struct bch_fs *c = container_of(j, struct bch_fs, journal);
96         struct journal_buf *buf = journal_cur_buf(j);
97         union journal_res_state old, new;
98         u64 v = atomic64_read(&j->reservations.counter);
99         bool set_need_write = false;
100         unsigned sectors;
101
102         lockdep_assert_held(&j->lock);
103
104         do {
105                 old.v = new.v = v;
106                 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
107                         return true;
108
109                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
110                         /* this entry will never be written: */
111                         closure_wake_up(&buf->wait);
112                         return true;
113                 }
114
115                 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
116                         set_bit(JOURNAL_NEED_WRITE, &j->flags);
117                         j->need_write_time = local_clock();
118                         set_need_write = true;
119                 }
120
121                 if (new.prev_buf_unwritten)
122                         return false;
123
124                 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
125                 new.idx++;
126                 new.prev_buf_unwritten = 1;
127
128                 BUG_ON(journal_state_count(new, new.idx));
129         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
130                                        old.v, new.v)) != old.v);
131
132         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
133
134         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
135                                       buf->u64s_reserved) << c->block_bits;
136         BUG_ON(sectors > buf->sectors);
137         buf->sectors = sectors;
138
139         bkey_extent_init(&buf->key);
140
141         /*
142          * We have to set last_seq here, _before_ opening a new journal entry:
143          *
144          * A threads may replace an old pin with a new pin on their current
145          * journal reservation - the expectation being that the journal will
146          * contain either what the old pin protected or what the new pin
147          * protects.
148          *
149          * After the old pin is dropped journal_last_seq() won't include the old
150          * pin, so we can only write the updated last_seq on the entry that
151          * contains whatever the new pin protects.
152          *
153          * Restated, we can _not_ update last_seq for a given entry if there
154          * could be a newer entry open with reservations/pins that have been
155          * taken against it.
156          *
157          * Hence, we want update/set last_seq on the current journal entry right
158          * before we open a new one:
159          */
160         buf->data->last_seq     = cpu_to_le64(journal_last_seq(j));
161
162         if (journal_entry_empty(buf->data))
163                 clear_bit(JOURNAL_NOT_EMPTY, &j->flags);
164         else
165                 set_bit(JOURNAL_NOT_EMPTY, &j->flags);
166
167         journal_pin_new_entry(j, 1);
168
169         bch2_journal_buf_init(j);
170
171         cancel_delayed_work(&j->write_work);
172
173         bch2_journal_space_available(j);
174
175         bch2_journal_buf_put(j, old.idx, set_need_write);
176         return true;
177 }
178
179 static bool journal_entry_close(struct journal *j)
180 {
181         bool ret;
182
183         spin_lock(&j->lock);
184         ret = __journal_entry_close(j);
185         spin_unlock(&j->lock);
186
187         return ret;
188 }
189
190 /*
191  * should _only_ called from journal_res_get() - when we actually want a
192  * journal reservation - journal entry is open means journal is dirty:
193  *
194  * returns:
195  * 0:           success
196  * -ENOSPC:     journal currently full, must invoke reclaim
197  * -EAGAIN:     journal blocked, must wait
198  * -EROFS:      insufficient rw devices or journal error
199  */
200 static int journal_entry_open(struct journal *j)
201 {
202         struct journal_buf *buf = journal_cur_buf(j);
203         union journal_res_state old, new;
204         int u64s;
205         u64 v;
206
207         lockdep_assert_held(&j->lock);
208         BUG_ON(journal_entry_is_open(j));
209
210         if (j->blocked)
211                 return -EAGAIN;
212
213         if (j->cur_entry_error)
214                 return j->cur_entry_error;
215
216         BUG_ON(!j->cur_entry_sectors);
217
218         buf->u64s_reserved      = j->entry_u64s_reserved;
219         buf->disk_sectors       = j->cur_entry_sectors;
220         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
221
222         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
223                 journal_entry_overhead(j);
224         u64s  = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
225
226         if (u64s <= le32_to_cpu(buf->data->u64s))
227                 return -ENOSPC;
228
229         /*
230          * Must be set before marking the journal entry as open:
231          */
232         j->cur_entry_u64s = u64s;
233
234         v = atomic64_read(&j->reservations.counter);
235         do {
236                 old.v = new.v = v;
237
238                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
239                         return -EROFS;
240
241                 /* Handle any already added entries */
242                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
243
244                 EBUG_ON(journal_state_count(new, new.idx));
245                 journal_state_inc(&new);
246         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
247                                        old.v, new.v)) != old.v);
248
249         if (j->res_get_blocked_start)
250                 bch2_time_stats_update(j->blocked_time,
251                                        j->res_get_blocked_start);
252         j->res_get_blocked_start = 0;
253
254         mod_delayed_work(system_freezable_wq,
255                          &j->write_work,
256                          msecs_to_jiffies(j->write_delay_ms));
257         journal_wake(j);
258         return 0;
259 }
260
261 static bool journal_quiesced(struct journal *j)
262 {
263         union journal_res_state state = READ_ONCE(j->reservations);
264         bool ret = !state.prev_buf_unwritten && !__journal_entry_is_open(state);
265
266         if (!ret)
267                 journal_entry_close(j);
268         return ret;
269 }
270
271 static void journal_quiesce(struct journal *j)
272 {
273         wait_event(j->wait, journal_quiesced(j));
274 }
275
276 static void journal_write_work(struct work_struct *work)
277 {
278         struct journal *j = container_of(work, struct journal, write_work.work);
279
280         journal_entry_close(j);
281 }
282
283 /*
284  * Given an inode number, if that inode number has data in the journal that
285  * hasn't yet been flushed, return the journal sequence number that needs to be
286  * flushed:
287  */
288 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
289 {
290         size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
291         u64 seq = 0;
292
293         if (!test_bit(h, j->buf[0].has_inode) &&
294             !test_bit(h, j->buf[1].has_inode))
295                 return 0;
296
297         spin_lock(&j->lock);
298         if (test_bit(h, journal_cur_buf(j)->has_inode))
299                 seq = journal_cur_seq(j);
300         else if (test_bit(h, journal_prev_buf(j)->has_inode))
301                 seq = journal_cur_seq(j) - 1;
302         spin_unlock(&j->lock);
303
304         return seq;
305 }
306
307 static int __journal_res_get(struct journal *j, struct journal_res *res,
308                              unsigned flags)
309 {
310         struct bch_fs *c = container_of(j, struct bch_fs, journal);
311         struct journal_buf *buf;
312         bool can_discard;
313         int ret;
314 retry:
315         if (journal_res_get_fast(j, res, flags))
316                 return 0;
317
318         if (bch2_journal_error(j))
319                 return -EROFS;
320
321         spin_lock(&j->lock);
322
323         /*
324          * Recheck after taking the lock, so we don't race with another thread
325          * that just did journal_entry_open() and call journal_entry_close()
326          * unnecessarily
327          */
328         if (journal_res_get_fast(j, res, flags)) {
329                 spin_unlock(&j->lock);
330                 return 0;
331         }
332
333         if (!(flags & JOURNAL_RES_GET_RESERVED) &&
334             !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
335                 /*
336                  * Don't want to close current journal entry, just need to
337                  * invoke reclaim:
338                  */
339                 ret = -ENOSPC;
340                 goto unlock;
341         }
342
343         /*
344          * If we couldn't get a reservation because the current buf filled up,
345          * and we had room for a bigger entry on disk, signal that we want to
346          * realloc the journal bufs:
347          */
348         buf = journal_cur_buf(j);
349         if (journal_entry_is_open(j) &&
350             buf->buf_size >> 9 < buf->disk_sectors &&
351             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
352                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
353
354         if (journal_entry_is_open(j) &&
355             !__journal_entry_close(j)) {
356                 /*
357                  * We failed to get a reservation on the current open journal
358                  * entry because it's full, and we can't close it because
359                  * there's still a previous one in flight:
360                  */
361                 trace_journal_entry_full(c);
362                 ret = -EAGAIN;
363         } else {
364                 ret = journal_entry_open(j);
365         }
366 unlock:
367         if ((ret == -EAGAIN || ret == -ENOSPC) &&
368             !j->res_get_blocked_start)
369                 j->res_get_blocked_start = local_clock() ?: 1;
370
371         can_discard = j->can_discard;
372         spin_unlock(&j->lock);
373
374         if (!ret)
375                 goto retry;
376
377         if (ret == -ENOSPC) {
378                 BUG_ON(!can_discard && (flags & JOURNAL_RES_GET_RESERVED));
379
380                 /*
381                  * Journal is full - can't rely on reclaim from work item due to
382                  * freezing:
383                  */
384                 trace_journal_full(c);
385
386                 if (!(flags & JOURNAL_RES_GET_NONBLOCK)) {
387                         if (can_discard) {
388                                 bch2_journal_do_discards(j);
389                                 goto retry;
390                         }
391
392                         if (mutex_trylock(&j->reclaim_lock)) {
393                                 bch2_journal_reclaim(j);
394                                 mutex_unlock(&j->reclaim_lock);
395                         }
396                 }
397
398                 ret = -EAGAIN;
399         }
400
401         return ret;
402 }
403
404 /*
405  * Essentially the entry function to the journaling code. When bcachefs is doing
406  * a btree insert, it calls this function to get the current journal write.
407  * Journal write is the structure used set up journal writes. The calling
408  * function will then add its keys to the structure, queuing them for the next
409  * write.
410  *
411  * To ensure forward progress, the current task must not be holding any
412  * btree node write locks.
413  */
414 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
415                                   unsigned flags)
416 {
417         int ret;
418
419         closure_wait_event(&j->async_wait,
420                    (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
421                    (flags & JOURNAL_RES_GET_NONBLOCK));
422         return ret;
423 }
424
425 /* journal_preres: */
426
427 static bool journal_preres_available(struct journal *j,
428                                      struct journal_preres *res,
429                                      unsigned new_u64s)
430 {
431         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s);
432
433         if (!ret)
434                 bch2_journal_reclaim_work(&j->reclaim_work.work);
435
436         return ret;
437 }
438
439 int __bch2_journal_preres_get(struct journal *j,
440                               struct journal_preres *res,
441                               unsigned new_u64s)
442 {
443         int ret;
444
445         closure_wait_event(&j->preres_wait,
446                    (ret = bch2_journal_error(j)) ||
447                    journal_preres_available(j, res, new_u64s));
448         return ret;
449 }
450
451 /* journal_entry_res: */
452
453 void bch2_journal_entry_res_resize(struct journal *j,
454                                    struct journal_entry_res *res,
455                                    unsigned new_u64s)
456 {
457         union journal_res_state state;
458         int d = new_u64s - res->u64s;
459
460         spin_lock(&j->lock);
461
462         j->entry_u64s_reserved += d;
463         if (d <= 0)
464                 goto out;
465
466         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
467         smp_mb();
468         state = READ_ONCE(j->reservations);
469
470         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
471             state.cur_entry_offset > j->cur_entry_u64s) {
472                 j->cur_entry_u64s += d;
473                 /*
474                  * Not enough room in current journal entry, have to flush it:
475                  */
476                 __journal_entry_close(j);
477         } else {
478                 journal_cur_buf(j)->u64s_reserved += d;
479         }
480 out:
481         spin_unlock(&j->lock);
482         res->u64s += d;
483 }
484
485 /* journal flushing: */
486
487 u64 bch2_journal_last_unwritten_seq(struct journal *j)
488 {
489         u64 seq;
490
491         spin_lock(&j->lock);
492         seq = journal_cur_seq(j);
493         if (j->reservations.prev_buf_unwritten)
494                 seq--;
495         spin_unlock(&j->lock);
496
497         return seq;
498 }
499
500 /**
501  * bch2_journal_open_seq_async - try to open a new journal entry if @seq isn't
502  * open yet, or wait if we cannot
503  *
504  * used by the btree interior update machinery, when it needs to write a new
505  * btree root - every journal entry contains the roots of all the btrees, so it
506  * doesn't need to bother with getting a journal reservation
507  */
508 int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *cl)
509 {
510         struct bch_fs *c = container_of(j, struct bch_fs, journal);
511         int ret;
512
513         spin_lock(&j->lock);
514
515         /*
516          * Can't try to open more than one sequence number ahead:
517          */
518         BUG_ON(journal_cur_seq(j) < seq && !journal_entry_is_open(j));
519
520         if (journal_cur_seq(j) > seq ||
521             journal_entry_is_open(j)) {
522                 spin_unlock(&j->lock);
523                 return 0;
524         }
525
526         if (journal_cur_seq(j) < seq &&
527             !__journal_entry_close(j)) {
528                 /* haven't finished writing out the previous one: */
529                 trace_journal_entry_full(c);
530                 ret = -EAGAIN;
531         } else {
532                 BUG_ON(journal_cur_seq(j) != seq);
533
534                 ret = journal_entry_open(j);
535         }
536
537         if ((ret == -EAGAIN || ret == -ENOSPC) &&
538             !j->res_get_blocked_start)
539                 j->res_get_blocked_start = local_clock() ?: 1;
540
541         if (ret == -EAGAIN || ret == -ENOSPC)
542                 closure_wait(&j->async_wait, cl);
543
544         spin_unlock(&j->lock);
545
546         if (ret == -ENOSPC) {
547                 trace_journal_full(c);
548                 bch2_journal_reclaim_work(&j->reclaim_work.work);
549                 ret = -EAGAIN;
550         }
551
552         return ret;
553 }
554
555 static int journal_seq_error(struct journal *j, u64 seq)
556 {
557         union journal_res_state state = READ_ONCE(j->reservations);
558
559         if (seq == journal_cur_seq(j))
560                 return bch2_journal_error(j);
561
562         if (seq + 1 == journal_cur_seq(j) &&
563             !state.prev_buf_unwritten &&
564             seq > j->seq_ondisk)
565                 return -EIO;
566
567         return 0;
568 }
569
570 static inline struct journal_buf *
571 journal_seq_to_buf(struct journal *j, u64 seq)
572 {
573         /* seq should be for a journal entry that has been opened: */
574         BUG_ON(seq > journal_cur_seq(j));
575         BUG_ON(seq == journal_cur_seq(j) &&
576                j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
577
578         if (seq == journal_cur_seq(j))
579                 return journal_cur_buf(j);
580         if (seq + 1 == journal_cur_seq(j) &&
581             j->reservations.prev_buf_unwritten)
582                 return journal_prev_buf(j);
583         return NULL;
584 }
585
586 /**
587  * bch2_journal_wait_on_seq - wait for a journal entry to be written
588  *
589  * does _not_ cause @seq to be written immediately - if there is no other
590  * activity to cause the relevant journal entry to be filled up or flushed it
591  * can wait for an arbitrary amount of time (up to @j->write_delay_ms, which is
592  * configurable).
593  */
594 void bch2_journal_wait_on_seq(struct journal *j, u64 seq,
595                               struct closure *parent)
596 {
597         struct journal_buf *buf;
598
599         spin_lock(&j->lock);
600
601         if ((buf = journal_seq_to_buf(j, seq))) {
602                 if (!closure_wait(&buf->wait, parent))
603                         BUG();
604
605                 if (seq == journal_cur_seq(j)) {
606                         smp_mb();
607                         if (bch2_journal_error(j))
608                                 closure_wake_up(&buf->wait);
609                 }
610         }
611
612         spin_unlock(&j->lock);
613 }
614
615 /**
616  * bch2_journal_flush_seq_async - wait for a journal entry to be written
617  *
618  * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
619  * necessary
620  */
621 void bch2_journal_flush_seq_async(struct journal *j, u64 seq,
622                                   struct closure *parent)
623 {
624         struct journal_buf *buf;
625
626         spin_lock(&j->lock);
627
628         if (parent &&
629             (buf = journal_seq_to_buf(j, seq)))
630                 if (!closure_wait(&buf->wait, parent))
631                         BUG();
632
633         if (seq == journal_cur_seq(j))
634                 __journal_entry_close(j);
635         spin_unlock(&j->lock);
636 }
637
638 static int journal_seq_flushed(struct journal *j, u64 seq)
639 {
640         int ret;
641
642         spin_lock(&j->lock);
643         ret = seq <= j->seq_ondisk ? 1 : journal_seq_error(j, seq);
644
645         if (seq == journal_cur_seq(j))
646                 __journal_entry_close(j);
647         spin_unlock(&j->lock);
648
649         return ret;
650 }
651
652 int bch2_journal_flush_seq(struct journal *j, u64 seq)
653 {
654         u64 start_time = local_clock();
655         int ret, ret2;
656
657         ret = wait_event_killable(j->wait, (ret2 = journal_seq_flushed(j, seq)));
658
659         bch2_time_stats_update(j->flush_seq_time, start_time);
660
661         return ret ?: ret2 < 0 ? ret2 : 0;
662 }
663
664 /**
665  * bch2_journal_meta_async - force a journal entry to be written
666  */
667 void bch2_journal_meta_async(struct journal *j, struct closure *parent)
668 {
669         struct journal_res res;
670
671         memset(&res, 0, sizeof(res));
672
673         bch2_journal_res_get(j, &res, jset_u64s(0), 0);
674         bch2_journal_res_put(j, &res);
675
676         bch2_journal_flush_seq_async(j, res.seq, parent);
677 }
678
679 int bch2_journal_meta(struct journal *j)
680 {
681         struct journal_res res;
682         int ret;
683
684         memset(&res, 0, sizeof(res));
685
686         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
687         if (ret)
688                 return ret;
689
690         bch2_journal_res_put(j, &res);
691
692         return bch2_journal_flush_seq(j, res.seq);
693 }
694
695 /*
696  * bch2_journal_flush_async - if there is an open journal entry, or a journal
697  * still being written, write it and wait for the write to complete
698  */
699 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
700 {
701         u64 seq, journal_seq;
702
703         spin_lock(&j->lock);
704         journal_seq = journal_cur_seq(j);
705
706         if (journal_entry_is_open(j)) {
707                 seq = journal_seq;
708         } else if (journal_seq) {
709                 seq = journal_seq - 1;
710         } else {
711                 spin_unlock(&j->lock);
712                 return;
713         }
714         spin_unlock(&j->lock);
715
716         bch2_journal_flush_seq_async(j, seq, parent);
717 }
718
719 int bch2_journal_flush(struct journal *j)
720 {
721         u64 seq, journal_seq;
722
723         spin_lock(&j->lock);
724         journal_seq = journal_cur_seq(j);
725
726         if (journal_entry_is_open(j)) {
727                 seq = journal_seq;
728         } else if (journal_seq) {
729                 seq = journal_seq - 1;
730         } else {
731                 spin_unlock(&j->lock);
732                 return 0;
733         }
734         spin_unlock(&j->lock);
735
736         return bch2_journal_flush_seq(j, seq);
737 }
738
739 /* block/unlock the journal: */
740
741 void bch2_journal_unblock(struct journal *j)
742 {
743         spin_lock(&j->lock);
744         j->blocked--;
745         spin_unlock(&j->lock);
746
747         journal_wake(j);
748 }
749
750 void bch2_journal_block(struct journal *j)
751 {
752         spin_lock(&j->lock);
753         j->blocked++;
754         spin_unlock(&j->lock);
755
756         journal_quiesce(j);
757 }
758
759 /* allocate journal on a device: */
760
761 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
762                                          bool new_fs, struct closure *cl)
763 {
764         struct bch_fs *c = ca->fs;
765         struct journal_device *ja = &ca->journal;
766         struct bch_sb_field_journal *journal_buckets;
767         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
768         int ret = 0;
769
770         /* don't handle reducing nr of buckets yet: */
771         if (nr <= ja->nr)
772                 return 0;
773
774         ret = -ENOMEM;
775         new_buckets     = kzalloc(nr * sizeof(u64), GFP_KERNEL);
776         new_bucket_seq  = kzalloc(nr * sizeof(u64), GFP_KERNEL);
777         if (!new_buckets || !new_bucket_seq)
778                 goto err;
779
780         journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
781                                                  nr + sizeof(*journal_buckets) / sizeof(u64));
782         if (!journal_buckets)
783                 goto err;
784
785         /*
786          * We may be called from the device add path, before the new device has
787          * actually been added to the running filesystem:
788          */
789         if (c)
790                 spin_lock(&c->journal.lock);
791
792         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
793         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
794         swap(new_buckets,       ja->buckets);
795         swap(new_bucket_seq,    ja->bucket_seq);
796
797         if (c)
798                 spin_unlock(&c->journal.lock);
799
800         while (ja->nr < nr) {
801                 struct open_bucket *ob = NULL;
802                 unsigned pos;
803                 long bucket;
804
805                 if (new_fs) {
806                         bucket = bch2_bucket_alloc_new_fs(ca);
807                         if (bucket < 0) {
808                                 ret = -ENOSPC;
809                                 goto err;
810                         }
811                 } else {
812                         ob = bch2_bucket_alloc(c, ca, RESERVE_ALLOC,
813                                                false, cl);
814                         if (IS_ERR(ob)) {
815                                 ret = cl ? -EAGAIN : -ENOSPC;
816                                 goto err;
817                         }
818
819                         bucket = sector_to_bucket(ca, ob->ptr.offset);
820                 }
821
822                 if (c) {
823                         percpu_down_read_preempt_disable(&c->mark_lock);
824                         spin_lock(&c->journal.lock);
825                 } else {
826                         preempt_disable();
827                 }
828
829                 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
830                 __array_insert_item(ja->buckets,                ja->nr, pos);
831                 __array_insert_item(ja->bucket_seq,             ja->nr, pos);
832                 __array_insert_item(journal_buckets->buckets,   ja->nr, pos);
833                 ja->nr++;
834
835                 ja->buckets[pos] = bucket;
836                 ja->bucket_seq[pos] = 0;
837                 journal_buckets->buckets[pos] = cpu_to_le64(bucket);
838
839                 if (pos <= ja->discard_idx)
840                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
841                 if (pos <= ja->dirty_idx_ondisk)
842                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
843                 if (pos <= ja->dirty_idx)
844                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
845                 if (pos <= ja->cur_idx)
846                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
847
848                 bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
849                                           ca->mi.bucket_size,
850                                           gc_phase(GC_PHASE_SB),
851                                           0);
852
853                 if (c) {
854                         spin_unlock(&c->journal.lock);
855                         percpu_up_read_preempt_enable(&c->mark_lock);
856                 } else {
857                         preempt_enable();
858                 }
859
860                 if (!new_fs)
861                         bch2_open_bucket_put(c, ob);
862         }
863
864         ret = 0;
865 err:
866         kfree(new_bucket_seq);
867         kfree(new_buckets);
868
869         return ret;
870 }
871
872 /*
873  * Allocate more journal space at runtime - not currently making use if it, but
874  * the code works:
875  */
876 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
877                                 unsigned nr)
878 {
879         struct journal_device *ja = &ca->journal;
880         struct closure cl;
881         unsigned current_nr;
882         int ret;
883
884         closure_init_stack(&cl);
885
886         do {
887                 struct disk_reservation disk_res = { 0, 0 };
888
889                 closure_sync(&cl);
890
891                 mutex_lock(&c->sb_lock);
892                 current_nr = ja->nr;
893
894                 /*
895                  * note: journal buckets aren't really counted as _sectors_ used yet, so
896                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
897                  * when space used goes up without a reservation - but we do need the
898                  * reservation to ensure we'll actually be able to allocate:
899                  */
900
901                 if (bch2_disk_reservation_get(c, &disk_res,
902                                               bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
903                         mutex_unlock(&c->sb_lock);
904                         return -ENOSPC;
905                 }
906
907                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
908
909                 bch2_disk_reservation_put(c, &disk_res);
910
911                 if (ja->nr != current_nr)
912                         bch2_write_super(c);
913                 mutex_unlock(&c->sb_lock);
914         } while (ret == -EAGAIN);
915
916         return ret;
917 }
918
919 int bch2_dev_journal_alloc(struct bch_dev *ca)
920 {
921         unsigned nr;
922
923         if (dynamic_fault("bcachefs:add:journal_alloc"))
924                 return -ENOMEM;
925
926         /*
927          * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
928          * is smaller:
929          */
930         nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
931                      BCH_JOURNAL_BUCKETS_MIN,
932                      min(1 << 10,
933                          (1 << 20) / ca->mi.bucket_size));
934
935         return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
936 }
937
938 /* startup/shutdown: */
939
940 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
941 {
942         union journal_res_state state;
943         struct journal_buf *w;
944         bool ret;
945
946         spin_lock(&j->lock);
947         state = READ_ONCE(j->reservations);
948         w = j->buf + !state.idx;
949
950         ret = state.prev_buf_unwritten &&
951                 bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), dev_idx);
952         spin_unlock(&j->lock);
953
954         return ret;
955 }
956
957 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
958 {
959         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
960 }
961
962 void bch2_fs_journal_stop(struct journal *j)
963 {
964         struct bch_fs *c = container_of(j, struct bch_fs, journal);
965
966         wait_event(j->wait, journal_entry_close(j));
967
968         /* do we need to write another journal entry? */
969         if (test_bit(JOURNAL_NOT_EMPTY, &j->flags) ||
970             c->btree_roots_dirty)
971                 bch2_journal_meta(j);
972
973         journal_quiesce(j);
974
975         BUG_ON(!bch2_journal_error(j) &&
976                test_bit(JOURNAL_NOT_EMPTY, &j->flags));
977
978         cancel_delayed_work_sync(&j->write_work);
979         cancel_delayed_work_sync(&j->reclaim_work);
980 }
981
982 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
983                           struct list_head *journal_entries)
984 {
985         struct bch_fs *c = container_of(j, struct bch_fs, journal);
986         struct journal_entry_pin_list *p;
987         struct journal_replay *i;
988         u64 last_seq = cur_seq, nr, seq;
989
990         if (!list_empty(journal_entries))
991                 last_seq = le64_to_cpu(list_first_entry(journal_entries,
992                                                         struct journal_replay,
993                                                         list)->j.seq);
994
995         nr = cur_seq - last_seq;
996
997         if (nr + 1 > j->pin.size) {
998                 free_fifo(&j->pin);
999                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1000                 if (!j->pin.data) {
1001                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1002                         return -ENOMEM;
1003                 }
1004         }
1005
1006         j->replay_journal_seq   = last_seq;
1007         j->replay_journal_seq_end = cur_seq;
1008         j->last_seq_ondisk      = last_seq;
1009         j->pin.front            = last_seq;
1010         j->pin.back             = cur_seq;
1011         atomic64_set(&j->seq, cur_seq - 1);
1012
1013         fifo_for_each_entry_ptr(p, &j->pin, seq) {
1014                 INIT_LIST_HEAD(&p->list);
1015                 INIT_LIST_HEAD(&p->flushed);
1016                 atomic_set(&p->count, 1);
1017                 p->devs.nr = 0;
1018         }
1019
1020         list_for_each_entry(i, journal_entries, list) {
1021                 seq = le64_to_cpu(i->j.seq);
1022
1023                 BUG_ON(seq < last_seq || seq >= cur_seq);
1024
1025                 journal_seq_pin(j, seq)->devs = i->devs;
1026         }
1027
1028         spin_lock(&j->lock);
1029
1030         set_bit(JOURNAL_STARTED, &j->flags);
1031
1032         journal_pin_new_entry(j, 1);
1033         bch2_journal_buf_init(j);
1034
1035         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1036
1037         bch2_journal_space_available(j);
1038         spin_unlock(&j->lock);
1039
1040         return 0;
1041 }
1042
1043 /* init/exit: */
1044
1045 void bch2_dev_journal_exit(struct bch_dev *ca)
1046 {
1047         kfree(ca->journal.bio);
1048         kfree(ca->journal.buckets);
1049         kfree(ca->journal.bucket_seq);
1050
1051         ca->journal.bio         = NULL;
1052         ca->journal.buckets     = NULL;
1053         ca->journal.bucket_seq  = NULL;
1054 }
1055
1056 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1057 {
1058         struct journal_device *ja = &ca->journal;
1059         struct bch_sb_field_journal *journal_buckets =
1060                 bch2_sb_get_journal(sb);
1061         unsigned i;
1062
1063         ja->nr = bch2_nr_journal_buckets(journal_buckets);
1064
1065         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1066         if (!ja->bucket_seq)
1067                 return -ENOMEM;
1068
1069         ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1070                         DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1071         if (!ca->journal.bio)
1072                 return -ENOMEM;
1073
1074         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1075         if (!ja->buckets)
1076                 return -ENOMEM;
1077
1078         for (i = 0; i < ja->nr; i++)
1079                 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1080
1081         return 0;
1082 }
1083
1084 void bch2_fs_journal_exit(struct journal *j)
1085 {
1086         kvpfree(j->buf[1].data, j->buf[1].buf_size);
1087         kvpfree(j->buf[0].data, j->buf[0].buf_size);
1088         free_fifo(&j->pin);
1089 }
1090
1091 int bch2_fs_journal_init(struct journal *j)
1092 {
1093         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1094         static struct lock_class_key res_key;
1095         int ret = 0;
1096
1097         pr_verbose_init(c->opts, "");
1098
1099         spin_lock_init(&j->lock);
1100         spin_lock_init(&j->err_lock);
1101         init_waitqueue_head(&j->wait);
1102         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1103         INIT_DELAYED_WORK(&j->reclaim_work, bch2_journal_reclaim_work);
1104         init_waitqueue_head(&j->pin_flush_wait);
1105         mutex_init(&j->reclaim_lock);
1106         mutex_init(&j->discard_lock);
1107
1108         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1109
1110         j->buf[0].buf_size      = JOURNAL_ENTRY_SIZE_MIN;
1111         j->buf[1].buf_size      = JOURNAL_ENTRY_SIZE_MIN;
1112         j->write_delay_ms       = 1000;
1113         j->reclaim_delay_ms     = 100;
1114
1115         /* Btree roots: */
1116         j->entry_u64s_reserved +=
1117                 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
1118
1119         atomic64_set(&j->reservations.counter,
1120                 ((union journal_res_state)
1121                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1122
1123         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1124             !(j->buf[0].data = kvpmalloc(j->buf[0].buf_size, GFP_KERNEL)) ||
1125             !(j->buf[1].data = kvpmalloc(j->buf[1].buf_size, GFP_KERNEL))) {
1126                 ret = -ENOMEM;
1127                 goto out;
1128         }
1129
1130         j->pin.front = j->pin.back = 1;
1131 out:
1132         pr_verbose_init(c->opts, "ret %i", ret);
1133         return ret;
1134 }
1135
1136 /* debug: */
1137
1138 ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
1139 {
1140         struct printbuf out = _PBUF(buf, PAGE_SIZE);
1141         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1142         union journal_res_state s;
1143         struct bch_dev *ca;
1144         unsigned iter;
1145
1146         rcu_read_lock();
1147         spin_lock(&j->lock);
1148         s = READ_ONCE(j->reservations);
1149
1150         pr_buf(&out,
1151                "active journal entries:\t%llu\n"
1152                "seq:\t\t\t%llu\n"
1153                "last_seq:\t\t%llu\n"
1154                "last_seq_ondisk:\t%llu\n"
1155                "prereserved:\t\t%u/%u\n"
1156                "current entry sectors:\t%u\n"
1157                "current entry:\t\t",
1158                fifo_used(&j->pin),
1159                journal_cur_seq(j),
1160                journal_last_seq(j),
1161                j->last_seq_ondisk,
1162                j->prereserved.reserved,
1163                j->prereserved.remaining,
1164                j->cur_entry_sectors);
1165
1166         switch (s.cur_entry_offset) {
1167         case JOURNAL_ENTRY_ERROR_VAL:
1168                 pr_buf(&out, "error\n");
1169                 break;
1170         case JOURNAL_ENTRY_CLOSED_VAL:
1171                 pr_buf(&out, "closed\n");
1172                 break;
1173         default:
1174                 pr_buf(&out, "%u/%u\n",
1175                        s.cur_entry_offset,
1176                        j->cur_entry_u64s);
1177                 break;
1178         }
1179
1180         pr_buf(&out,
1181                "current entry refs:\t%u\n"
1182                "prev entry unwritten:\t",
1183                journal_state_count(s, s.idx));
1184
1185         if (s.prev_buf_unwritten)
1186                 pr_buf(&out, "yes, ref %u sectors %u\n",
1187                        journal_state_count(s, !s.idx),
1188                        journal_prev_buf(j)->sectors);
1189         else
1190                 pr_buf(&out, "no\n");
1191
1192         pr_buf(&out,
1193                "need write:\t\t%i\n"
1194                "replay done:\t\t%i\n",
1195                test_bit(JOURNAL_NEED_WRITE,     &j->flags),
1196                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1197
1198         for_each_member_device_rcu(ca, c, iter,
1199                                    &c->rw_devs[BCH_DATA_JOURNAL]) {
1200                 struct journal_device *ja = &ca->journal;
1201
1202                 if (!ja->nr)
1203                         continue;
1204
1205                 pr_buf(&out,
1206                        "dev %u:\n"
1207                        "\tnr\t\t%u\n"
1208                        "\tavailable\t%u:%u\n"
1209                        "\tdiscard_idx\t\t%u\n"
1210                        "\tdirty_idx_ondisk\t%u (seq %llu)\n"
1211                        "\tdirty_idx\t\t%u (seq %llu)\n"
1212                        "\tcur_idx\t\t%u (seq %llu)\n",
1213                        iter, ja->nr,
1214                        bch2_journal_dev_buckets_available(j, ja, journal_space_discarded),
1215                        ja->sectors_free,
1216                        ja->discard_idx,
1217                        ja->dirty_idx_ondisk,    ja->bucket_seq[ja->dirty_idx_ondisk],
1218                        ja->dirty_idx,           ja->bucket_seq[ja->dirty_idx],
1219                        ja->cur_idx,             ja->bucket_seq[ja->cur_idx]);
1220         }
1221
1222         spin_unlock(&j->lock);
1223         rcu_read_unlock();
1224
1225         return out.pos - buf;
1226 }
1227
1228 ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
1229 {
1230         struct printbuf out = _PBUF(buf, PAGE_SIZE);
1231         struct journal_entry_pin_list *pin_list;
1232         struct journal_entry_pin *pin;
1233         u64 i;
1234
1235         spin_lock(&j->lock);
1236         fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1237                 pr_buf(&out, "%llu: count %u\n",
1238                        i, atomic_read(&pin_list->count));
1239
1240                 list_for_each_entry(pin, &pin_list->list, list)
1241                         pr_buf(&out, "\t%p %pf\n",
1242                                pin, pin->flush);
1243
1244                 if (!list_empty(&pin_list->flushed))
1245                         pr_buf(&out, "flushed:\n");
1246
1247                 list_for_each_entry(pin, &pin_list->flushed, list)
1248                         pr_buf(&out, "\t%p %pf\n",
1249                                pin, pin->flush);
1250         }
1251         spin_unlock(&j->lock);
1252
1253         return out.pos - buf;
1254 }