]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal.c
Update bcachefs sources to d83b992f65 bcachefs: Rewrite journal_seq_blacklist machinery
[bcachefs-tools-debian] / libbcachefs / journal.c
1 /*
2  * bcachefs journalling code, for btree insertions
3  *
4  * Copyright 2012 Google, Inc.
5  */
6
7 #include "bcachefs.h"
8 #include "alloc_foreground.h"
9 #include "bkey_methods.h"
10 #include "btree_gc.h"
11 #include "buckets.h"
12 #include "journal.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
16 #include "super-io.h"
17
18 #include <trace/events/bcachefs.h>
19
20 static bool __journal_entry_is_open(union journal_res_state state)
21 {
22         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
23 }
24
25 static bool journal_entry_is_open(struct journal *j)
26 {
27         return __journal_entry_is_open(j->reservations);
28 }
29
30 static void journal_pin_new_entry(struct journal *j, int count)
31 {
32         struct journal_entry_pin_list *p;
33
34         /*
35          * The fifo_push() needs to happen at the same time as j->seq is
36          * incremented for journal_last_seq() to be calculated correctly
37          */
38         atomic64_inc(&j->seq);
39         p = fifo_push_ref(&j->pin);
40
41         INIT_LIST_HEAD(&p->list);
42         INIT_LIST_HEAD(&p->flushed);
43         atomic_set(&p->count, count);
44         p->devs.nr = 0;
45 }
46
47 static void bch2_journal_buf_init(struct journal *j)
48 {
49         struct journal_buf *buf = journal_cur_buf(j);
50
51         memset(buf->has_inode, 0, sizeof(buf->has_inode));
52
53         memset(buf->data, 0, sizeof(*buf->data));
54         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
55         buf->data->u64s = 0;
56 }
57
58 static inline bool journal_entry_empty(struct jset *j)
59 {
60         struct jset_entry *i;
61
62         if (j->seq != j->last_seq)
63                 return false;
64
65         vstruct_for_each(j, i)
66                 if (i->type || i->u64s)
67                         return false;
68         return true;
69 }
70
71 void bch2_journal_halt(struct journal *j)
72 {
73         union journal_res_state old, new;
74         u64 v = atomic64_read(&j->reservations.counter);
75
76         do {
77                 old.v = new.v = v;
78                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
79                         return;
80
81                 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
82         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
83                                        old.v, new.v)) != old.v);
84
85         journal_wake(j);
86         closure_wake_up(&journal_cur_buf(j)->wait);
87 }
88
89 /* journal entry close/open: */
90
91 void __bch2_journal_buf_put(struct journal *j, bool need_write_just_set)
92 {
93         if (!need_write_just_set &&
94             test_bit(JOURNAL_NEED_WRITE, &j->flags))
95                 bch2_time_stats_update(j->delay_time,
96                                        j->need_write_time);
97
98         clear_bit(JOURNAL_NEED_WRITE, &j->flags);
99
100         closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
101 }
102
103 /*
104  * Returns true if journal entry is now closed:
105  */
106 static bool __journal_entry_close(struct journal *j)
107 {
108         struct bch_fs *c = container_of(j, struct bch_fs, journal);
109         struct journal_buf *buf = journal_cur_buf(j);
110         union journal_res_state old, new;
111         u64 v = atomic64_read(&j->reservations.counter);
112         bool set_need_write = false;
113         unsigned sectors;
114
115         lockdep_assert_held(&j->lock);
116
117         do {
118                 old.v = new.v = v;
119                 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
120                         return true;
121
122                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
123                         /* this entry will never be written: */
124                         closure_wake_up(&buf->wait);
125                         return true;
126                 }
127
128                 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
129                         set_bit(JOURNAL_NEED_WRITE, &j->flags);
130                         j->need_write_time = local_clock();
131                         set_need_write = true;
132                 }
133
134                 if (new.prev_buf_unwritten)
135                         return false;
136
137                 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
138                 new.idx++;
139                 new.prev_buf_unwritten = 1;
140
141                 BUG_ON(journal_state_count(new, new.idx));
142         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
143                                        old.v, new.v)) != old.v);
144
145         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
146
147         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
148                                       buf->u64s_reserved) << c->block_bits;
149         BUG_ON(sectors > buf->sectors);
150         buf->sectors = sectors;
151
152         bkey_extent_init(&buf->key);
153
154         /*
155          * We have to set last_seq here, _before_ opening a new journal entry:
156          *
157          * A threads may replace an old pin with a new pin on their current
158          * journal reservation - the expectation being that the journal will
159          * contain either what the old pin protected or what the new pin
160          * protects.
161          *
162          * After the old pin is dropped journal_last_seq() won't include the old
163          * pin, so we can only write the updated last_seq on the entry that
164          * contains whatever the new pin protects.
165          *
166          * Restated, we can _not_ update last_seq for a given entry if there
167          * could be a newer entry open with reservations/pins that have been
168          * taken against it.
169          *
170          * Hence, we want update/set last_seq on the current journal entry right
171          * before we open a new one:
172          */
173         buf->data->last_seq     = cpu_to_le64(journal_last_seq(j));
174
175         if (journal_entry_empty(buf->data))
176                 clear_bit(JOURNAL_NOT_EMPTY, &j->flags);
177         else
178                 set_bit(JOURNAL_NOT_EMPTY, &j->flags);
179
180         journal_pin_new_entry(j, 1);
181
182         bch2_journal_buf_init(j);
183
184         cancel_delayed_work(&j->write_work);
185
186         bch2_journal_space_available(j);
187
188         bch2_journal_buf_put(j, old.idx, set_need_write);
189         return true;
190 }
191
192 static bool journal_entry_close(struct journal *j)
193 {
194         bool ret;
195
196         spin_lock(&j->lock);
197         ret = __journal_entry_close(j);
198         spin_unlock(&j->lock);
199
200         return ret;
201 }
202
203 /*
204  * should _only_ called from journal_res_get() - when we actually want a
205  * journal reservation - journal entry is open means journal is dirty:
206  *
207  * returns:
208  * 0:           success
209  * -ENOSPC:     journal currently full, must invoke reclaim
210  * -EAGAIN:     journal blocked, must wait
211  * -EROFS:      insufficient rw devices or journal error
212  */
213 static int journal_entry_open(struct journal *j)
214 {
215         struct journal_buf *buf = journal_cur_buf(j);
216         union journal_res_state old, new;
217         int u64s;
218         u64 v;
219
220         lockdep_assert_held(&j->lock);
221         BUG_ON(journal_entry_is_open(j));
222
223         if (j->blocked)
224                 return -EAGAIN;
225
226         if (j->cur_entry_error)
227                 return j->cur_entry_error;
228
229         BUG_ON(!j->cur_entry_sectors);
230
231         buf->u64s_reserved      = j->entry_u64s_reserved;
232         buf->disk_sectors       = j->cur_entry_sectors;
233         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
234
235         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
236                 journal_entry_overhead(j);
237         u64s  = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
238
239         if (u64s <= le32_to_cpu(buf->data->u64s))
240                 return -ENOSPC;
241
242         /*
243          * Must be set before marking the journal entry as open:
244          */
245         j->cur_entry_u64s = u64s;
246
247         v = atomic64_read(&j->reservations.counter);
248         do {
249                 old.v = new.v = v;
250
251                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
252                         return -EROFS;
253
254                 /* Handle any already added entries */
255                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
256
257                 EBUG_ON(journal_state_count(new, new.idx));
258                 journal_state_inc(&new);
259         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
260                                        old.v, new.v)) != old.v);
261
262         if (j->res_get_blocked_start)
263                 bch2_time_stats_update(j->blocked_time,
264                                        j->res_get_blocked_start);
265         j->res_get_blocked_start = 0;
266
267         mod_delayed_work(system_freezable_wq,
268                          &j->write_work,
269                          msecs_to_jiffies(j->write_delay_ms));
270         journal_wake(j);
271         return 0;
272 }
273
274 static bool journal_quiesced(struct journal *j)
275 {
276         union journal_res_state state = READ_ONCE(j->reservations);
277         bool ret = !state.prev_buf_unwritten && !__journal_entry_is_open(state);
278
279         if (!ret)
280                 journal_entry_close(j);
281         return ret;
282 }
283
284 static void journal_quiesce(struct journal *j)
285 {
286         wait_event(j->wait, journal_quiesced(j));
287 }
288
289 static void journal_write_work(struct work_struct *work)
290 {
291         struct journal *j = container_of(work, struct journal, write_work.work);
292
293         journal_entry_close(j);
294 }
295
296 /*
297  * Given an inode number, if that inode number has data in the journal that
298  * hasn't yet been flushed, return the journal sequence number that needs to be
299  * flushed:
300  */
301 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
302 {
303         size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
304         u64 seq = 0;
305
306         if (!test_bit(h, j->buf[0].has_inode) &&
307             !test_bit(h, j->buf[1].has_inode))
308                 return 0;
309
310         spin_lock(&j->lock);
311         if (test_bit(h, journal_cur_buf(j)->has_inode))
312                 seq = journal_cur_seq(j);
313         else if (test_bit(h, journal_prev_buf(j)->has_inode))
314                 seq = journal_cur_seq(j) - 1;
315         spin_unlock(&j->lock);
316
317         return seq;
318 }
319
320 static int __journal_res_get(struct journal *j, struct journal_res *res,
321                              unsigned flags)
322 {
323         struct bch_fs *c = container_of(j, struct bch_fs, journal);
324         struct journal_buf *buf;
325         bool can_discard;
326         int ret;
327 retry:
328         if (journal_res_get_fast(j, res, flags))
329                 return 0;
330
331         if (bch2_journal_error(j))
332                 return -EROFS;
333
334         spin_lock(&j->lock);
335
336         /*
337          * Recheck after taking the lock, so we don't race with another thread
338          * that just did journal_entry_open() and call journal_entry_close()
339          * unnecessarily
340          */
341         if (journal_res_get_fast(j, res, flags)) {
342                 spin_unlock(&j->lock);
343                 return 0;
344         }
345
346         if (!(flags & JOURNAL_RES_GET_RESERVED) &&
347             !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
348                 /*
349                  * Don't want to close current journal entry, just need to
350                  * invoke reclaim:
351                  */
352                 ret = -ENOSPC;
353                 goto unlock;
354         }
355
356         /*
357          * If we couldn't get a reservation because the current buf filled up,
358          * and we had room for a bigger entry on disk, signal that we want to
359          * realloc the journal bufs:
360          */
361         buf = journal_cur_buf(j);
362         if (journal_entry_is_open(j) &&
363             buf->buf_size >> 9 < buf->disk_sectors &&
364             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
365                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
366
367         if (journal_entry_is_open(j) &&
368             !__journal_entry_close(j)) {
369                 /*
370                  * We failed to get a reservation on the current open journal
371                  * entry because it's full, and we can't close it because
372                  * there's still a previous one in flight:
373                  */
374                 trace_journal_entry_full(c);
375                 ret = -EAGAIN;
376         } else {
377                 ret = journal_entry_open(j);
378         }
379 unlock:
380         if ((ret == -EAGAIN || ret == -ENOSPC) &&
381             !j->res_get_blocked_start)
382                 j->res_get_blocked_start = local_clock() ?: 1;
383
384         can_discard = j->can_discard;
385         spin_unlock(&j->lock);
386
387         if (!ret)
388                 goto retry;
389
390         if (ret == -ENOSPC) {
391                 BUG_ON(!can_discard && (flags & JOURNAL_RES_GET_RESERVED));
392
393                 /*
394                  * Journal is full - can't rely on reclaim from work item due to
395                  * freezing:
396                  */
397                 trace_journal_full(c);
398
399                 if (!(flags & JOURNAL_RES_GET_NONBLOCK)) {
400                         if (can_discard) {
401                                 bch2_journal_do_discards(j);
402                                 goto retry;
403                         }
404
405                         if (mutex_trylock(&j->reclaim_lock)) {
406                                 bch2_journal_reclaim(j);
407                                 mutex_unlock(&j->reclaim_lock);
408                         }
409                 }
410
411                 ret = -EAGAIN;
412         }
413
414         return ret;
415 }
416
417 /*
418  * Essentially the entry function to the journaling code. When bcachefs is doing
419  * a btree insert, it calls this function to get the current journal write.
420  * Journal write is the structure used set up journal writes. The calling
421  * function will then add its keys to the structure, queuing them for the next
422  * write.
423  *
424  * To ensure forward progress, the current task must not be holding any
425  * btree node write locks.
426  */
427 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
428                                   unsigned flags)
429 {
430         int ret;
431
432         closure_wait_event(&j->async_wait,
433                    (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
434                    (flags & JOURNAL_RES_GET_NONBLOCK));
435         return ret;
436 }
437
438 /* journal_preres: */
439
440 static bool journal_preres_available(struct journal *j,
441                                      struct journal_preres *res,
442                                      unsigned new_u64s)
443 {
444         bool ret = bch2_journal_preres_get_fast(j, res, new_u64s);
445
446         if (!ret)
447                 bch2_journal_reclaim_work(&j->reclaim_work.work);
448
449         return ret;
450 }
451
452 int __bch2_journal_preres_get(struct journal *j,
453                               struct journal_preres *res,
454                               unsigned new_u64s)
455 {
456         int ret;
457
458         closure_wait_event(&j->preres_wait,
459                    (ret = bch2_journal_error(j)) ||
460                    journal_preres_available(j, res, new_u64s));
461         return ret;
462 }
463
464 /* journal_entry_res: */
465
466 void bch2_journal_entry_res_resize(struct journal *j,
467                                    struct journal_entry_res *res,
468                                    unsigned new_u64s)
469 {
470         union journal_res_state state;
471         int d = new_u64s - res->u64s;
472
473         spin_lock(&j->lock);
474
475         j->entry_u64s_reserved += d;
476         if (d <= 0)
477                 goto out;
478
479         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
480         smp_mb();
481         state = READ_ONCE(j->reservations);
482
483         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
484             state.cur_entry_offset > j->cur_entry_u64s) {
485                 j->cur_entry_u64s += d;
486                 /*
487                  * Not enough room in current journal entry, have to flush it:
488                  */
489                 __journal_entry_close(j);
490         } else {
491                 journal_cur_buf(j)->u64s_reserved += d;
492         }
493 out:
494         spin_unlock(&j->lock);
495         res->u64s += d;
496 }
497
498 /* journal flushing: */
499
500 u64 bch2_journal_last_unwritten_seq(struct journal *j)
501 {
502         u64 seq;
503
504         spin_lock(&j->lock);
505         seq = journal_cur_seq(j);
506         if (j->reservations.prev_buf_unwritten)
507                 seq--;
508         spin_unlock(&j->lock);
509
510         return seq;
511 }
512
513 /**
514  * bch2_journal_open_seq_async - try to open a new journal entry if @seq isn't
515  * open yet, or wait if we cannot
516  *
517  * used by the btree interior update machinery, when it needs to write a new
518  * btree root - every journal entry contains the roots of all the btrees, so it
519  * doesn't need to bother with getting a journal reservation
520  */
521 int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *cl)
522 {
523         struct bch_fs *c = container_of(j, struct bch_fs, journal);
524         int ret;
525
526         spin_lock(&j->lock);
527
528         /*
529          * Can't try to open more than one sequence number ahead:
530          */
531         BUG_ON(journal_cur_seq(j) < seq && !journal_entry_is_open(j));
532
533         if (journal_cur_seq(j) > seq ||
534             journal_entry_is_open(j)) {
535                 spin_unlock(&j->lock);
536                 return 0;
537         }
538
539         if (journal_cur_seq(j) < seq &&
540             !__journal_entry_close(j)) {
541                 /* haven't finished writing out the previous one: */
542                 trace_journal_entry_full(c);
543                 ret = -EAGAIN;
544         } else {
545                 BUG_ON(journal_cur_seq(j) != seq);
546
547                 ret = journal_entry_open(j);
548         }
549
550         if ((ret == -EAGAIN || ret == -ENOSPC) &&
551             !j->res_get_blocked_start)
552                 j->res_get_blocked_start = local_clock() ?: 1;
553
554         if (ret == -EAGAIN || ret == -ENOSPC)
555                 closure_wait(&j->async_wait, cl);
556
557         spin_unlock(&j->lock);
558
559         if (ret == -ENOSPC) {
560                 trace_journal_full(c);
561                 bch2_journal_reclaim_work(&j->reclaim_work.work);
562                 ret = -EAGAIN;
563         }
564
565         return ret;
566 }
567
568 static int journal_seq_error(struct journal *j, u64 seq)
569 {
570         union journal_res_state state = READ_ONCE(j->reservations);
571
572         if (seq == journal_cur_seq(j))
573                 return bch2_journal_error(j);
574
575         if (seq + 1 == journal_cur_seq(j) &&
576             !state.prev_buf_unwritten &&
577             seq > j->seq_ondisk)
578                 return -EIO;
579
580         return 0;
581 }
582
583 static inline struct journal_buf *
584 journal_seq_to_buf(struct journal *j, u64 seq)
585 {
586         /* seq should be for a journal entry that has been opened: */
587         BUG_ON(seq > journal_cur_seq(j));
588         BUG_ON(seq == journal_cur_seq(j) &&
589                j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
590
591         if (seq == journal_cur_seq(j))
592                 return journal_cur_buf(j);
593         if (seq + 1 == journal_cur_seq(j) &&
594             j->reservations.prev_buf_unwritten)
595                 return journal_prev_buf(j);
596         return NULL;
597 }
598
599 /**
600  * bch2_journal_wait_on_seq - wait for a journal entry to be written
601  *
602  * does _not_ cause @seq to be written immediately - if there is no other
603  * activity to cause the relevant journal entry to be filled up or flushed it
604  * can wait for an arbitrary amount of time (up to @j->write_delay_ms, which is
605  * configurable).
606  */
607 void bch2_journal_wait_on_seq(struct journal *j, u64 seq,
608                               struct closure *parent)
609 {
610         struct journal_buf *buf;
611
612         spin_lock(&j->lock);
613
614         if ((buf = journal_seq_to_buf(j, seq))) {
615                 if (!closure_wait(&buf->wait, parent))
616                         BUG();
617
618                 if (seq == journal_cur_seq(j)) {
619                         smp_mb();
620                         if (bch2_journal_error(j))
621                                 closure_wake_up(&buf->wait);
622                 }
623         }
624
625         spin_unlock(&j->lock);
626 }
627
628 /**
629  * bch2_journal_flush_seq_async - wait for a journal entry to be written
630  *
631  * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
632  * necessary
633  */
634 void bch2_journal_flush_seq_async(struct journal *j, u64 seq,
635                                   struct closure *parent)
636 {
637         struct journal_buf *buf;
638
639         spin_lock(&j->lock);
640
641         if (parent &&
642             (buf = journal_seq_to_buf(j, seq)))
643                 if (!closure_wait(&buf->wait, parent))
644                         BUG();
645
646         if (seq == journal_cur_seq(j))
647                 __journal_entry_close(j);
648         spin_unlock(&j->lock);
649 }
650
651 static int journal_seq_flushed(struct journal *j, u64 seq)
652 {
653         int ret;
654
655         spin_lock(&j->lock);
656         ret = seq <= j->seq_ondisk ? 1 : journal_seq_error(j, seq);
657
658         if (seq == journal_cur_seq(j))
659                 __journal_entry_close(j);
660         spin_unlock(&j->lock);
661
662         return ret;
663 }
664
665 int bch2_journal_flush_seq(struct journal *j, u64 seq)
666 {
667         u64 start_time = local_clock();
668         int ret, ret2;
669
670         ret = wait_event_killable(j->wait, (ret2 = journal_seq_flushed(j, seq)));
671
672         bch2_time_stats_update(j->flush_seq_time, start_time);
673
674         return ret ?: ret2 < 0 ? ret2 : 0;
675 }
676
677 /**
678  * bch2_journal_meta_async - force a journal entry to be written
679  */
680 void bch2_journal_meta_async(struct journal *j, struct closure *parent)
681 {
682         struct journal_res res;
683
684         memset(&res, 0, sizeof(res));
685
686         bch2_journal_res_get(j, &res, jset_u64s(0), 0);
687         bch2_journal_res_put(j, &res);
688
689         bch2_journal_flush_seq_async(j, res.seq, parent);
690 }
691
692 int bch2_journal_meta(struct journal *j)
693 {
694         struct journal_res res;
695         int ret;
696
697         memset(&res, 0, sizeof(res));
698
699         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
700         if (ret)
701                 return ret;
702
703         bch2_journal_res_put(j, &res);
704
705         return bch2_journal_flush_seq(j, res.seq);
706 }
707
708 /*
709  * bch2_journal_flush_async - if there is an open journal entry, or a journal
710  * still being written, write it and wait for the write to complete
711  */
712 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
713 {
714         u64 seq, journal_seq;
715
716         spin_lock(&j->lock);
717         journal_seq = journal_cur_seq(j);
718
719         if (journal_entry_is_open(j)) {
720                 seq = journal_seq;
721         } else if (journal_seq) {
722                 seq = journal_seq - 1;
723         } else {
724                 spin_unlock(&j->lock);
725                 return;
726         }
727         spin_unlock(&j->lock);
728
729         bch2_journal_flush_seq_async(j, seq, parent);
730 }
731
732 int bch2_journal_flush(struct journal *j)
733 {
734         u64 seq, journal_seq;
735
736         spin_lock(&j->lock);
737         journal_seq = journal_cur_seq(j);
738
739         if (journal_entry_is_open(j)) {
740                 seq = journal_seq;
741         } else if (journal_seq) {
742                 seq = journal_seq - 1;
743         } else {
744                 spin_unlock(&j->lock);
745                 return 0;
746         }
747         spin_unlock(&j->lock);
748
749         return bch2_journal_flush_seq(j, seq);
750 }
751
752 /* block/unlock the journal: */
753
754 void bch2_journal_unblock(struct journal *j)
755 {
756         spin_lock(&j->lock);
757         j->blocked--;
758         spin_unlock(&j->lock);
759
760         journal_wake(j);
761 }
762
763 void bch2_journal_block(struct journal *j)
764 {
765         spin_lock(&j->lock);
766         j->blocked++;
767         spin_unlock(&j->lock);
768
769         journal_quiesce(j);
770 }
771
772 /* allocate journal on a device: */
773
774 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
775                                          bool new_fs, struct closure *cl)
776 {
777         struct bch_fs *c = ca->fs;
778         struct journal_device *ja = &ca->journal;
779         struct bch_sb_field_journal *journal_buckets;
780         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
781         int ret = 0;
782
783         /* don't handle reducing nr of buckets yet: */
784         if (nr <= ja->nr)
785                 return 0;
786
787         ret = -ENOMEM;
788         new_buckets     = kzalloc(nr * sizeof(u64), GFP_KERNEL);
789         new_bucket_seq  = kzalloc(nr * sizeof(u64), GFP_KERNEL);
790         if (!new_buckets || !new_bucket_seq)
791                 goto err;
792
793         journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
794                                                  nr + sizeof(*journal_buckets) / sizeof(u64));
795         if (!journal_buckets)
796                 goto err;
797
798         /*
799          * We may be called from the device add path, before the new device has
800          * actually been added to the running filesystem:
801          */
802         if (c)
803                 spin_lock(&c->journal.lock);
804
805         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
806         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
807         swap(new_buckets,       ja->buckets);
808         swap(new_bucket_seq,    ja->bucket_seq);
809
810         if (c)
811                 spin_unlock(&c->journal.lock);
812
813         while (ja->nr < nr) {
814                 struct open_bucket *ob = NULL;
815                 unsigned pos;
816                 long bucket;
817
818                 if (new_fs) {
819                         bucket = bch2_bucket_alloc_new_fs(ca);
820                         if (bucket < 0) {
821                                 ret = -ENOSPC;
822                                 goto err;
823                         }
824                 } else {
825                         ob = bch2_bucket_alloc(c, ca, RESERVE_ALLOC,
826                                                false, cl);
827                         if (IS_ERR(ob)) {
828                                 ret = cl ? -EAGAIN : -ENOSPC;
829                                 goto err;
830                         }
831
832                         bucket = sector_to_bucket(ca, ob->ptr.offset);
833                 }
834
835                 if (c) {
836                         percpu_down_read_preempt_disable(&c->mark_lock);
837                         spin_lock(&c->journal.lock);
838                 } else {
839                         preempt_disable();
840                 }
841
842                 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
843                 __array_insert_item(ja->buckets,                ja->nr, pos);
844                 __array_insert_item(ja->bucket_seq,             ja->nr, pos);
845                 __array_insert_item(journal_buckets->buckets,   ja->nr, pos);
846                 ja->nr++;
847
848                 ja->buckets[pos] = bucket;
849                 ja->bucket_seq[pos] = 0;
850                 journal_buckets->buckets[pos] = cpu_to_le64(bucket);
851
852                 if (pos <= ja->discard_idx)
853                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
854                 if (pos <= ja->dirty_idx_ondisk)
855                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
856                 if (pos <= ja->dirty_idx)
857                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
858                 if (pos <= ja->cur_idx)
859                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
860
861                 bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
862                                           ca->mi.bucket_size,
863                                           gc_phase(GC_PHASE_SB),
864                                           0);
865
866                 if (c) {
867                         spin_unlock(&c->journal.lock);
868                         percpu_up_read_preempt_enable(&c->mark_lock);
869                 } else {
870                         preempt_enable();
871                 }
872
873                 if (!new_fs)
874                         bch2_open_bucket_put(c, ob);
875         }
876
877         ret = 0;
878 err:
879         kfree(new_bucket_seq);
880         kfree(new_buckets);
881
882         return ret;
883 }
884
885 /*
886  * Allocate more journal space at runtime - not currently making use if it, but
887  * the code works:
888  */
889 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
890                                 unsigned nr)
891 {
892         struct journal_device *ja = &ca->journal;
893         struct closure cl;
894         unsigned current_nr;
895         int ret;
896
897         closure_init_stack(&cl);
898
899         do {
900                 struct disk_reservation disk_res = { 0, 0 };
901
902                 closure_sync(&cl);
903
904                 mutex_lock(&c->sb_lock);
905                 current_nr = ja->nr;
906
907                 /*
908                  * note: journal buckets aren't really counted as _sectors_ used yet, so
909                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
910                  * when space used goes up without a reservation - but we do need the
911                  * reservation to ensure we'll actually be able to allocate:
912                  */
913
914                 if (bch2_disk_reservation_get(c, &disk_res,
915                                               bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
916                         mutex_unlock(&c->sb_lock);
917                         return -ENOSPC;
918                 }
919
920                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
921
922                 bch2_disk_reservation_put(c, &disk_res);
923
924                 if (ja->nr != current_nr)
925                         bch2_write_super(c);
926                 mutex_unlock(&c->sb_lock);
927         } while (ret == -EAGAIN);
928
929         return ret;
930 }
931
932 int bch2_dev_journal_alloc(struct bch_dev *ca)
933 {
934         unsigned nr;
935
936         if (dynamic_fault("bcachefs:add:journal_alloc"))
937                 return -ENOMEM;
938
939         /*
940          * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
941          * is smaller:
942          */
943         nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
944                      BCH_JOURNAL_BUCKETS_MIN,
945                      min(1 << 10,
946                          (1 << 20) / ca->mi.bucket_size));
947
948         return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
949 }
950
951 /* startup/shutdown: */
952
953 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
954 {
955         union journal_res_state state;
956         struct journal_buf *w;
957         bool ret;
958
959         spin_lock(&j->lock);
960         state = READ_ONCE(j->reservations);
961         w = j->buf + !state.idx;
962
963         ret = state.prev_buf_unwritten &&
964                 bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), dev_idx);
965         spin_unlock(&j->lock);
966
967         return ret;
968 }
969
970 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
971 {
972         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
973 }
974
975 void bch2_fs_journal_stop(struct journal *j)
976 {
977         struct bch_fs *c = container_of(j, struct bch_fs, journal);
978
979         wait_event(j->wait, journal_entry_close(j));
980
981         /* do we need to write another journal entry? */
982         if (test_bit(JOURNAL_NOT_EMPTY, &j->flags) ||
983             c->btree_roots_dirty)
984                 bch2_journal_meta(j);
985
986         journal_quiesce(j);
987
988         BUG_ON(!bch2_journal_error(j) &&
989                test_bit(JOURNAL_NOT_EMPTY, &j->flags));
990
991         cancel_delayed_work_sync(&j->write_work);
992         cancel_delayed_work_sync(&j->reclaim_work);
993 }
994
995 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
996                           struct list_head *journal_entries)
997 {
998         struct bch_fs *c = container_of(j, struct bch_fs, journal);
999         struct journal_entry_pin_list *p;
1000         struct journal_replay *i;
1001         u64 last_seq = cur_seq, nr, seq;
1002
1003         if (!list_empty(journal_entries))
1004                 last_seq = le64_to_cpu(list_last_entry(journal_entries,
1005                                                        struct journal_replay,
1006                                                        list)->j.last_seq);
1007
1008         nr = cur_seq - last_seq;
1009
1010         if (nr + 1 > j->pin.size) {
1011                 free_fifo(&j->pin);
1012                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1013                 if (!j->pin.data) {
1014                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1015                         return -ENOMEM;
1016                 }
1017         }
1018
1019         j->last_seq_ondisk      = last_seq;
1020         j->pin.front            = last_seq;
1021         j->pin.back             = cur_seq;
1022         atomic64_set(&j->seq, cur_seq - 1);
1023
1024         fifo_for_each_entry_ptr(p, &j->pin, seq) {
1025                 INIT_LIST_HEAD(&p->list);
1026                 INIT_LIST_HEAD(&p->flushed);
1027                 atomic_set(&p->count, 0);
1028                 p->devs.nr = 0;
1029         }
1030
1031         list_for_each_entry(i, journal_entries, list) {
1032                 seq = le64_to_cpu(i->j.seq);
1033
1034                 BUG_ON(seq < last_seq || seq >= cur_seq);
1035
1036                 p = journal_seq_pin(j, seq);
1037
1038                 atomic_set(&p->count, 1);
1039                 p->devs = i->devs;
1040         }
1041
1042         spin_lock(&j->lock);
1043
1044         set_bit(JOURNAL_STARTED, &j->flags);
1045
1046         journal_pin_new_entry(j, 1);
1047         bch2_journal_buf_init(j);
1048
1049         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1050
1051         bch2_journal_space_available(j);
1052         spin_unlock(&j->lock);
1053
1054         return 0;
1055 }
1056
1057 /* init/exit: */
1058
1059 void bch2_dev_journal_exit(struct bch_dev *ca)
1060 {
1061         kfree(ca->journal.bio);
1062         kfree(ca->journal.buckets);
1063         kfree(ca->journal.bucket_seq);
1064
1065         ca->journal.bio         = NULL;
1066         ca->journal.buckets     = NULL;
1067         ca->journal.bucket_seq  = NULL;
1068 }
1069
1070 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1071 {
1072         struct journal_device *ja = &ca->journal;
1073         struct bch_sb_field_journal *journal_buckets =
1074                 bch2_sb_get_journal(sb);
1075         unsigned i;
1076
1077         ja->nr = bch2_nr_journal_buckets(journal_buckets);
1078
1079         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1080         if (!ja->bucket_seq)
1081                 return -ENOMEM;
1082
1083         ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1084                         DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1085         if (!ca->journal.bio)
1086                 return -ENOMEM;
1087
1088         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1089         if (!ja->buckets)
1090                 return -ENOMEM;
1091
1092         for (i = 0; i < ja->nr; i++)
1093                 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1094
1095         return 0;
1096 }
1097
1098 void bch2_fs_journal_exit(struct journal *j)
1099 {
1100         kvpfree(j->buf[1].data, j->buf[1].buf_size);
1101         kvpfree(j->buf[0].data, j->buf[0].buf_size);
1102         free_fifo(&j->pin);
1103 }
1104
1105 int bch2_fs_journal_init(struct journal *j)
1106 {
1107         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1108         static struct lock_class_key res_key;
1109         int ret = 0;
1110
1111         pr_verbose_init(c->opts, "");
1112
1113         spin_lock_init(&j->lock);
1114         spin_lock_init(&j->err_lock);
1115         init_waitqueue_head(&j->wait);
1116         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1117         INIT_DELAYED_WORK(&j->reclaim_work, bch2_journal_reclaim_work);
1118         init_waitqueue_head(&j->pin_flush_wait);
1119         mutex_init(&j->reclaim_lock);
1120         mutex_init(&j->discard_lock);
1121
1122         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1123
1124         j->buf[0].buf_size      = JOURNAL_ENTRY_SIZE_MIN;
1125         j->buf[1].buf_size      = JOURNAL_ENTRY_SIZE_MIN;
1126         j->write_delay_ms       = 1000;
1127         j->reclaim_delay_ms     = 100;
1128
1129         /* Btree roots: */
1130         j->entry_u64s_reserved +=
1131                 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
1132
1133         atomic64_set(&j->reservations.counter,
1134                 ((union journal_res_state)
1135                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1136
1137         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1138             !(j->buf[0].data = kvpmalloc(j->buf[0].buf_size, GFP_KERNEL)) ||
1139             !(j->buf[1].data = kvpmalloc(j->buf[1].buf_size, GFP_KERNEL))) {
1140                 ret = -ENOMEM;
1141                 goto out;
1142         }
1143
1144         j->pin.front = j->pin.back = 1;
1145 out:
1146         pr_verbose_init(c->opts, "ret %i", ret);
1147         return ret;
1148 }
1149
1150 /* debug: */
1151
1152 ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
1153 {
1154         struct printbuf out = _PBUF(buf, PAGE_SIZE);
1155         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1156         union journal_res_state s;
1157         struct bch_dev *ca;
1158         unsigned iter;
1159
1160         rcu_read_lock();
1161         spin_lock(&j->lock);
1162         s = READ_ONCE(j->reservations);
1163
1164         pr_buf(&out,
1165                "active journal entries:\t%llu\n"
1166                "seq:\t\t\t%llu\n"
1167                "last_seq:\t\t%llu\n"
1168                "last_seq_ondisk:\t%llu\n"
1169                "prereserved:\t\t%u/%u\n"
1170                "current entry sectors:\t%u\n"
1171                "current entry:\t\t",
1172                fifo_used(&j->pin),
1173                journal_cur_seq(j),
1174                journal_last_seq(j),
1175                j->last_seq_ondisk,
1176                j->prereserved.reserved,
1177                j->prereserved.remaining,
1178                j->cur_entry_sectors);
1179
1180         switch (s.cur_entry_offset) {
1181         case JOURNAL_ENTRY_ERROR_VAL:
1182                 pr_buf(&out, "error\n");
1183                 break;
1184         case JOURNAL_ENTRY_CLOSED_VAL:
1185                 pr_buf(&out, "closed\n");
1186                 break;
1187         default:
1188                 pr_buf(&out, "%u/%u\n",
1189                        s.cur_entry_offset,
1190                        j->cur_entry_u64s);
1191                 break;
1192         }
1193
1194         pr_buf(&out,
1195                "current entry refs:\t%u\n"
1196                "prev entry unwritten:\t",
1197                journal_state_count(s, s.idx));
1198
1199         if (s.prev_buf_unwritten)
1200                 pr_buf(&out, "yes, ref %u sectors %u\n",
1201                        journal_state_count(s, !s.idx),
1202                        journal_prev_buf(j)->sectors);
1203         else
1204                 pr_buf(&out, "no\n");
1205
1206         pr_buf(&out,
1207                "need write:\t\t%i\n"
1208                "replay done:\t\t%i\n",
1209                test_bit(JOURNAL_NEED_WRITE,     &j->flags),
1210                test_bit(JOURNAL_REPLAY_DONE,    &j->flags));
1211
1212         for_each_member_device_rcu(ca, c, iter,
1213                                    &c->rw_devs[BCH_DATA_JOURNAL]) {
1214                 struct journal_device *ja = &ca->journal;
1215
1216                 if (!ja->nr)
1217                         continue;
1218
1219                 pr_buf(&out,
1220                        "dev %u:\n"
1221                        "\tnr\t\t%u\n"
1222                        "\tavailable\t%u:%u\n"
1223                        "\tdiscard_idx\t\t%u\n"
1224                        "\tdirty_idx_ondisk\t%u (seq %llu)\n"
1225                        "\tdirty_idx\t\t%u (seq %llu)\n"
1226                        "\tcur_idx\t\t%u (seq %llu)\n",
1227                        iter, ja->nr,
1228                        bch2_journal_dev_buckets_available(j, ja, journal_space_discarded),
1229                        ja->sectors_free,
1230                        ja->discard_idx,
1231                        ja->dirty_idx_ondisk,    ja->bucket_seq[ja->dirty_idx_ondisk],
1232                        ja->dirty_idx,           ja->bucket_seq[ja->dirty_idx],
1233                        ja->cur_idx,             ja->bucket_seq[ja->cur_idx]);
1234         }
1235
1236         spin_unlock(&j->lock);
1237         rcu_read_unlock();
1238
1239         return out.pos - buf;
1240 }
1241
1242 ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
1243 {
1244         struct printbuf out = _PBUF(buf, PAGE_SIZE);
1245         struct journal_entry_pin_list *pin_list;
1246         struct journal_entry_pin *pin;
1247         u64 i;
1248
1249         spin_lock(&j->lock);
1250         fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1251                 pr_buf(&out, "%llu: count %u\n",
1252                        i, atomic_read(&pin_list->count));
1253
1254                 list_for_each_entry(pin, &pin_list->list, list)
1255                         pr_buf(&out, "\t%p %pf\n",
1256                                pin, pin->flush);
1257
1258                 if (!list_empty(&pin_list->flushed))
1259                         pr_buf(&out, "flushed:\n");
1260
1261                 list_for_each_entry(pin, &pin_list->flushed, list)
1262                         pr_buf(&out, "\t%p %pf\n",
1263                                pin, pin->flush);
1264         }
1265         spin_unlock(&j->lock);
1266
1267         return out.pos - buf;
1268 }