]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_io.c
Update bcachefs sources to d3da360412 bcachefs: Fold bucket_state in to BCH_DATA_TYPES()
[bcachefs-tools-debian] / libbcachefs / journal_io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_io.h"
6 #include "btree_update_interior.h"
7 #include "buckets.h"
8 #include "checksum.h"
9 #include "disk_groups.h"
10 #include "error.h"
11 #include "io.h"
12 #include "journal.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
16 #include "replicas.h"
17
18 #include <trace/events/bcachefs.h>
19
20 static void __journal_replay_free(struct journal_replay *i)
21 {
22         list_del(&i->list);
23         kvpfree(i, offsetof(struct journal_replay, j) +
24                 vstruct_bytes(&i->j));
25
26 }
27
28 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
29 {
30         i->ignore = true;
31
32         if (!c->opts.read_entire_journal)
33                 __journal_replay_free(i);
34 }
35
36 struct journal_list {
37         struct closure          cl;
38         struct mutex            lock;
39         struct list_head        *head;
40         int                     ret;
41 };
42
43 #define JOURNAL_ENTRY_ADD_OK            0
44 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE  5
45
46 /*
47  * Given a journal entry we just read, add it to the list of journal entries to
48  * be replayed:
49  */
50 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
51                              struct journal_ptr entry_ptr,
52                              struct journal_list *jlist, struct jset *j,
53                              bool bad)
54 {
55         struct journal_replay *i, *pos, *dup = NULL;
56         struct journal_ptr *ptr;
57         struct list_head *where;
58         size_t bytes = vstruct_bytes(j);
59         u64 last_seq = 0;
60         int ret = JOURNAL_ENTRY_ADD_OK;
61
62         list_for_each_entry_reverse(i, jlist->head, list) {
63                 if (!JSET_NO_FLUSH(&i->j)) {
64                         last_seq = le64_to_cpu(i->j.last_seq);
65                         break;
66                 }
67         }
68
69         /* Is this entry older than the range we need? */
70         if (!c->opts.read_entire_journal &&
71             le64_to_cpu(j->seq) < last_seq) {
72                 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
73                 goto out;
74         }
75
76         /* Drop entries we don't need anymore */
77         if (!JSET_NO_FLUSH(j)) {
78                 list_for_each_entry_safe(i, pos, jlist->head, list) {
79                         if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
80                                 break;
81                         journal_replay_free(c, i);
82                 }
83         }
84
85         list_for_each_entry_reverse(i, jlist->head, list) {
86                 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
87                         where = &i->list;
88                         goto add;
89                 }
90         }
91
92         where = jlist->head;
93 add:
94         dup = where->next != jlist->head
95                 ? container_of(where->next, struct journal_replay, list)
96                 : NULL;
97
98         if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
99                 dup = NULL;
100
101         /*
102          * Duplicate journal entries? If so we want the one that didn't have a
103          * checksum error:
104          */
105         if (dup) {
106                 if (dup->bad) {
107                         /* we'll replace @dup: */
108                 } else if (bad) {
109                         i = dup;
110                         goto found;
111                 } else {
112                         fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
113                                     memcmp(j, &dup->j, bytes), c,
114                                     "found duplicate but non identical journal entries (seq %llu)",
115                                     le64_to_cpu(j->seq));
116                         i = dup;
117                         goto found;
118                 }
119         }
120
121         i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
122         if (!i) {
123                 ret = -ENOMEM;
124                 goto out;
125         }
126
127         i->nr_ptrs       = 0;
128         i->bad          = bad;
129         i->ignore       = false;
130         memcpy(&i->j, j, bytes);
131
132         if (dup) {
133                 i->nr_ptrs = dup->nr_ptrs;
134                 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
135                 __journal_replay_free(dup);
136         }
137
138         list_add(&i->list, where);
139 found:
140         for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
141                 if (ptr->dev == ca->dev_idx) {
142                         bch_err(c, "duplicate journal entry %llu on same device",
143                                 le64_to_cpu(i->j.seq));
144                         goto out;
145                 }
146         }
147
148         if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
149                 bch_err(c, "found too many copies of journal entry %llu",
150                         le64_to_cpu(i->j.seq));
151                 goto out;
152         }
153
154         i->ptrs[i->nr_ptrs++] = entry_ptr;
155 out:
156 fsck_err:
157         return ret;
158 }
159
160 static struct nonce journal_nonce(const struct jset *jset)
161 {
162         return (struct nonce) {{
163                 [0] = 0,
164                 [1] = ((__le32 *) &jset->seq)[0],
165                 [2] = ((__le32 *) &jset->seq)[1],
166                 [3] = BCH_NONCE_JOURNAL,
167         }};
168 }
169
170 /* this fills in a range with empty jset_entries: */
171 static void journal_entry_null_range(void *start, void *end)
172 {
173         struct jset_entry *entry;
174
175         for (entry = start; entry != end; entry = vstruct_next(entry))
176                 memset(entry, 0, sizeof(*entry));
177 }
178
179 #define JOURNAL_ENTRY_REREAD    5
180 #define JOURNAL_ENTRY_NONE      6
181 #define JOURNAL_ENTRY_BAD       7
182
183 #define journal_entry_err(c, msg, ...)                                  \
184 ({                                                                      \
185         switch (write) {                                                \
186         case READ:                                                      \
187                 mustfix_fsck_err(c, msg, ##__VA_ARGS__);                \
188                 break;                                                  \
189         case WRITE:                                                     \
190                 bch_err(c, "corrupt metadata before write:\n"           \
191                         msg, ##__VA_ARGS__);                            \
192                 if (bch2_fs_inconsistent(c)) {                          \
193                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
194                         goto fsck_err;                                  \
195                 }                                                       \
196                 break;                                                  \
197         }                                                               \
198         true;                                                           \
199 })
200
201 #define journal_entry_err_on(cond, c, msg, ...)                         \
202         ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
203
204 #define FSCK_DELETED_KEY        5
205
206 static int journal_validate_key(struct bch_fs *c, const char *where,
207                                 struct jset_entry *entry,
208                                 unsigned level, enum btree_id btree_id,
209                                 struct bkey_i *k, const char *type,
210                                 unsigned version, int big_endian, int write)
211 {
212         void *next = vstruct_next(entry);
213         struct printbuf buf = PRINTBUF;
214         int ret = 0;
215
216         if (journal_entry_err_on(!k->k.u64s, c,
217                         "invalid %s in %s entry offset %zi/%u: k->u64s 0",
218                         type, where,
219                         (u64 *) k - entry->_data,
220                         le16_to_cpu(entry->u64s))) {
221                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
222                 journal_entry_null_range(vstruct_next(entry), next);
223                 return FSCK_DELETED_KEY;
224         }
225
226         if (journal_entry_err_on((void *) bkey_next(k) >
227                                 (void *) vstruct_next(entry), c,
228                         "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
229                         type, where,
230                         (u64 *) k - entry->_data,
231                         le16_to_cpu(entry->u64s))) {
232                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
233                 journal_entry_null_range(vstruct_next(entry), next);
234                 return FSCK_DELETED_KEY;
235         }
236
237         if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
238                         "invalid %s in %s entry offset %zi/%u: bad format %u",
239                         type, where,
240                         (u64 *) k - entry->_data,
241                         le16_to_cpu(entry->u64s),
242                         k->k.format)) {
243                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
244                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
245                 journal_entry_null_range(vstruct_next(entry), next);
246                 return FSCK_DELETED_KEY;
247         }
248
249         if (!write)
250                 bch2_bkey_compat(level, btree_id, version, big_endian,
251                                  write, NULL, bkey_to_packed(k));
252
253         if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
254                               __btree_node_type(level, btree_id), write, &buf)) {
255                 printbuf_reset(&buf);
256                 pr_buf(&buf, "invalid %s in %s entry offset %zi/%u:",
257                        type, where,
258                        (u64 *) k - entry->_data,
259                        le16_to_cpu(entry->u64s));
260                 pr_newline(&buf);
261                 pr_indent_push(&buf, 2);
262
263                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
264                 pr_newline(&buf);
265                 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
266                                   __btree_node_type(level, btree_id), write, &buf);
267
268                 mustfix_fsck_err(c, "%s", buf.buf);
269
270                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
271                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
272                 journal_entry_null_range(vstruct_next(entry), next);
273
274                 printbuf_exit(&buf);
275                 return FSCK_DELETED_KEY;
276         }
277
278         if (write)
279                 bch2_bkey_compat(level, btree_id, version, big_endian,
280                                  write, NULL, bkey_to_packed(k));
281 fsck_err:
282         printbuf_exit(&buf);
283         return ret;
284 }
285
286 static int journal_entry_btree_keys_validate(struct bch_fs *c,
287                                              const char *where,
288                                              struct jset_entry *entry,
289                                              unsigned version, int big_endian, int write)
290 {
291         struct bkey_i *k = entry->start;
292
293         while (k != vstruct_last(entry)) {
294                 int ret = journal_validate_key(c, where, entry,
295                                                entry->level,
296                                                entry->btree_id,
297                                                k, "key", version, big_endian, write);
298                 if (ret == FSCK_DELETED_KEY)
299                         continue;
300
301                 k = bkey_next(k);
302         }
303
304         return 0;
305 }
306
307 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
308                                              struct jset_entry *entry)
309 {
310         struct bkey_i *k;
311         bool first = true;
312
313         vstruct_for_each(entry, k) {
314                 if (!first) {
315                         pr_newline(out);
316                         pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
317                 }
318                 pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
319                 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
320                 first = false;
321         }
322 }
323
324 static int journal_entry_btree_root_validate(struct bch_fs *c,
325                                              const char *where,
326                                              struct jset_entry *entry,
327                                              unsigned version, int big_endian, int write)
328 {
329         struct bkey_i *k = entry->start;
330         int ret = 0;
331
332         if (journal_entry_err_on(!entry->u64s ||
333                                  le16_to_cpu(entry->u64s) != k->k.u64s, c,
334                                  "invalid btree root journal entry: wrong number of keys")) {
335                 void *next = vstruct_next(entry);
336                 /*
337                  * we don't want to null out this jset_entry,
338                  * just the contents, so that later we can tell
339                  * we were _supposed_ to have a btree root
340                  */
341                 entry->u64s = 0;
342                 journal_entry_null_range(vstruct_next(entry), next);
343                 return 0;
344         }
345
346         return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
347                                     "btree root", version, big_endian, write);
348 fsck_err:
349         return ret;
350 }
351
352 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
353                                              struct jset_entry *entry)
354 {
355         journal_entry_btree_keys_to_text(out, c, entry);
356 }
357
358 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
359                                             const char *where,
360                                             struct jset_entry *entry,
361                                             unsigned version, int big_endian, int write)
362 {
363         /* obsolete, don't care: */
364         return 0;
365 }
366
367 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
368                                             struct jset_entry *entry)
369 {
370 }
371
372 static int journal_entry_blacklist_validate(struct bch_fs *c,
373                                             const char *where,
374                                             struct jset_entry *entry,
375                                             unsigned version, int big_endian, int write)
376 {
377         int ret = 0;
378
379         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
380                 "invalid journal seq blacklist entry: bad size")) {
381                 journal_entry_null_range(entry, vstruct_next(entry));
382         }
383 fsck_err:
384         return ret;
385 }
386
387 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
388                                             struct jset_entry *entry)
389 {
390         struct jset_entry_blacklist *bl =
391                 container_of(entry, struct jset_entry_blacklist, entry);
392
393         pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
394 }
395
396 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
397                                                const char *where,
398                                                struct jset_entry *entry,
399                                                unsigned version, int big_endian, int write)
400 {
401         struct jset_entry_blacklist_v2 *bl_entry;
402         int ret = 0;
403
404         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
405                 "invalid journal seq blacklist entry: bad size")) {
406                 journal_entry_null_range(entry, vstruct_next(entry));
407                 goto out;
408         }
409
410         bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
411
412         if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
413                                  le64_to_cpu(bl_entry->end), c,
414                 "invalid journal seq blacklist entry: start > end")) {
415                 journal_entry_null_range(entry, vstruct_next(entry));
416         }
417 out:
418 fsck_err:
419         return ret;
420 }
421
422 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
423                                                struct jset_entry *entry)
424 {
425         struct jset_entry_blacklist_v2 *bl =
426                 container_of(entry, struct jset_entry_blacklist_v2, entry);
427
428         pr_buf(out, "start=%llu end=%llu",
429                le64_to_cpu(bl->start),
430                le64_to_cpu(bl->end));
431 }
432
433 static int journal_entry_usage_validate(struct bch_fs *c,
434                                         const char *where,
435                                         struct jset_entry *entry,
436                                         unsigned version, int big_endian, int write)
437 {
438         struct jset_entry_usage *u =
439                 container_of(entry, struct jset_entry_usage, entry);
440         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
441         int ret = 0;
442
443         if (journal_entry_err_on(bytes < sizeof(*u),
444                                  c,
445                                  "invalid journal entry usage: bad size")) {
446                 journal_entry_null_range(entry, vstruct_next(entry));
447                 return ret;
448         }
449
450 fsck_err:
451         return ret;
452 }
453
454 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
455                                         struct jset_entry *entry)
456 {
457         struct jset_entry_usage *u =
458                 container_of(entry, struct jset_entry_usage, entry);
459
460         pr_buf(out, "type=%s v=%llu",
461                bch2_fs_usage_types[u->entry.btree_id],
462                le64_to_cpu(u->v));
463 }
464
465 static int journal_entry_data_usage_validate(struct bch_fs *c,
466                                         const char *where,
467                                         struct jset_entry *entry,
468                                         unsigned version, int big_endian, int write)
469 {
470         struct jset_entry_data_usage *u =
471                 container_of(entry, struct jset_entry_data_usage, entry);
472         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
473         int ret = 0;
474
475         if (journal_entry_err_on(bytes < sizeof(*u) ||
476                                  bytes < sizeof(*u) + u->r.nr_devs,
477                                  c,
478                                  "invalid journal entry usage: bad size")) {
479                 journal_entry_null_range(entry, vstruct_next(entry));
480                 return ret;
481         }
482
483 fsck_err:
484         return ret;
485 }
486
487 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
488                                              struct jset_entry *entry)
489 {
490         struct jset_entry_data_usage *u =
491                 container_of(entry, struct jset_entry_data_usage, entry);
492
493         bch2_replicas_entry_to_text(out, &u->r);
494         pr_buf(out, "=%llu", le64_to_cpu(u->v));
495 }
496
497 static int journal_entry_clock_validate(struct bch_fs *c,
498                                         const char *where,
499                                         struct jset_entry *entry,
500                                         unsigned version, int big_endian, int write)
501 {
502         struct jset_entry_clock *clock =
503                 container_of(entry, struct jset_entry_clock, entry);
504         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
505         int ret = 0;
506
507         if (journal_entry_err_on(bytes != sizeof(*clock),
508                                  c, "invalid journal entry clock: bad size")) {
509                 journal_entry_null_range(entry, vstruct_next(entry));
510                 return ret;
511         }
512
513         if (journal_entry_err_on(clock->rw > 1,
514                                  c, "invalid journal entry clock: bad rw")) {
515                 journal_entry_null_range(entry, vstruct_next(entry));
516                 return ret;
517         }
518
519 fsck_err:
520         return ret;
521 }
522
523 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
524                                         struct jset_entry *entry)
525 {
526         struct jset_entry_clock *clock =
527                 container_of(entry, struct jset_entry_clock, entry);
528
529         pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
530 }
531
532 static int journal_entry_dev_usage_validate(struct bch_fs *c,
533                                             const char *where,
534                                             struct jset_entry *entry,
535                                             unsigned version, int big_endian, int write)
536 {
537         struct jset_entry_dev_usage *u =
538                 container_of(entry, struct jset_entry_dev_usage, entry);
539         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
540         unsigned expected = sizeof(*u);
541         unsigned dev;
542         int ret = 0;
543
544         if (journal_entry_err_on(bytes < expected,
545                                  c, "invalid journal entry dev usage: bad size (%u < %u)",
546                                  bytes, expected)) {
547                 journal_entry_null_range(entry, vstruct_next(entry));
548                 return ret;
549         }
550
551         dev = le32_to_cpu(u->dev);
552
553         if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
554                                  c, "invalid journal entry dev usage: bad dev")) {
555                 journal_entry_null_range(entry, vstruct_next(entry));
556                 return ret;
557         }
558
559         if (journal_entry_err_on(u->pad,
560                                  c, "invalid journal entry dev usage: bad pad")) {
561                 journal_entry_null_range(entry, vstruct_next(entry));
562                 return ret;
563         }
564
565 fsck_err:
566         return ret;
567 }
568
569 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
570                                             struct jset_entry *entry)
571 {
572         struct jset_entry_dev_usage *u =
573                 container_of(entry, struct jset_entry_dev_usage, entry);
574         unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
575
576         pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
577
578         for (i = 0; i < nr_types; i++) {
579                 if (i < BCH_DATA_NR)
580                         pr_buf(out, " %s", bch2_data_types[i]);
581                 else
582                         pr_buf(out, " (unknown data type %u)", i);
583                 pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
584                        le64_to_cpu(u->d[i].buckets),
585                        le64_to_cpu(u->d[i].sectors),
586                        le64_to_cpu(u->d[i].fragmented));
587         }
588
589         pr_buf(out, " buckets_ec: %llu", le64_to_cpu(u->buckets_ec));
590 }
591
592 static int journal_entry_log_validate(struct bch_fs *c,
593                                       const char *where,
594                                       struct jset_entry *entry,
595                                       unsigned version, int big_endian, int write)
596 {
597         return 0;
598 }
599
600 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
601                                       struct jset_entry *entry)
602 {
603         struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
604         unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
605
606         pr_buf(out, "%.*s", bytes, l->d);
607 }
608
609 struct jset_entry_ops {
610         int (*validate)(struct bch_fs *, const char *,
611                         struct jset_entry *, unsigned, int, int);
612         void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
613 };
614
615 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
616 #define x(f, nr)                                                \
617         [BCH_JSET_ENTRY_##f]    = (struct jset_entry_ops) {     \
618                 .validate       = journal_entry_##f##_validate, \
619                 .to_text        = journal_entry_##f##_to_text,  \
620         },
621         BCH_JSET_ENTRY_TYPES()
622 #undef x
623 };
624
625 int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
626                                 struct jset_entry *entry,
627                                 unsigned version, int big_endian, int write)
628 {
629         return entry->type < BCH_JSET_ENTRY_NR
630                 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
631                                 version, big_endian, write)
632                 : 0;
633 }
634
635 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
636                                 struct jset_entry *entry)
637 {
638         if (entry->type < BCH_JSET_ENTRY_NR) {
639                 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
640                 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
641         } else {
642                 pr_buf(out, "(unknown type %u)", entry->type);
643         }
644 }
645
646 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
647                                  int write)
648 {
649         char buf[100];
650         struct jset_entry *entry;
651         int ret = 0;
652
653         vstruct_for_each(jset, entry) {
654                 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
655                           le64_to_cpu(jset->seq),
656                           (u64 *) entry - jset->_data,
657                           le32_to_cpu(jset->u64s));
658
659                 if (journal_entry_err_on(vstruct_next(entry) >
660                                          vstruct_last(jset), c,
661                                 "journal entry extends past end of jset")) {
662                         jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
663                         break;
664                 }
665
666                 ret = bch2_journal_entry_validate(c, buf, entry,
667                                         le32_to_cpu(jset->version),
668                                         JSET_BIG_ENDIAN(jset), write);
669                 if (ret)
670                         break;
671         }
672 fsck_err:
673         return ret;
674 }
675
676 static int jset_validate(struct bch_fs *c,
677                          struct bch_dev *ca,
678                          struct jset *jset, u64 sector,
679                          unsigned bucket_sectors_left,
680                          unsigned sectors_read,
681                          int write)
682 {
683         size_t bytes = vstruct_bytes(jset);
684         struct bch_csum csum;
685         unsigned version;
686         int ret = 0;
687
688         if (le64_to_cpu(jset->magic) != jset_magic(c))
689                 return JOURNAL_ENTRY_NONE;
690
691         version = le32_to_cpu(jset->version);
692         if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
693                                   version < bcachefs_metadata_version_min) ||
694                                  version >= bcachefs_metadata_version_max, c,
695                         "%s sector %llu seq %llu: unknown journal entry version %u",
696                         ca ? ca->name : c->name,
697                         sector, le64_to_cpu(jset->seq),
698                         version)) {
699                 /* don't try to continue: */
700                 return EINVAL;
701         }
702
703         if (bytes > (sectors_read << 9) &&
704             sectors_read < bucket_sectors_left)
705                 return JOURNAL_ENTRY_REREAD;
706
707         if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
708                         "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
709                         ca ? ca->name : c->name,
710                         sector, le64_to_cpu(jset->seq), bytes)) {
711                 ret = JOURNAL_ENTRY_BAD;
712                 le32_add_cpu(&jset->u64s,
713                              -((bytes - (bucket_sectors_left << 9)) / 8));
714         }
715
716         if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
717                         "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
718                         ca ? ca->name : c->name,
719                         sector, le64_to_cpu(jset->seq),
720                         JSET_CSUM_TYPE(jset))) {
721                 ret = JOURNAL_ENTRY_BAD;
722                 goto csum_done;
723         }
724
725         if (write)
726                 goto csum_done;
727
728         csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
729         if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
730                                  "%s sector %llu seq %llu: journal checksum bad",
731                                  ca ? ca->name : c->name,
732                                  sector, le64_to_cpu(jset->seq)))
733                 ret = JOURNAL_ENTRY_BAD;
734
735         ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
736                      jset->encrypted_start,
737                      vstruct_end(jset) - (void *) jset->encrypted_start);
738         bch2_fs_fatal_err_on(ret, c,
739                         "error decrypting journal entry: %i", ret);
740 csum_done:
741         /* last_seq is ignored when JSET_NO_FLUSH is true */
742         if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
743                                  le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
744                                  "invalid journal entry: last_seq > seq (%llu > %llu)",
745                                  le64_to_cpu(jset->last_seq),
746                                  le64_to_cpu(jset->seq))) {
747                 jset->last_seq = jset->seq;
748                 return JOURNAL_ENTRY_BAD;
749         }
750 fsck_err:
751         return ret;
752 }
753
754 static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
755 {
756         unsigned sectors = vstruct_sectors(jset, c->block_bits);
757
758         return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
759                 jset_validate_entries(c, jset, WRITE);
760 }
761
762 struct journal_read_buf {
763         void            *data;
764         size_t          size;
765 };
766
767 static int journal_read_buf_realloc(struct journal_read_buf *b,
768                                     size_t new_size)
769 {
770         void *n;
771
772         /* the bios are sized for this many pages, max: */
773         if (new_size > JOURNAL_ENTRY_SIZE_MAX)
774                 return -ENOMEM;
775
776         new_size = roundup_pow_of_two(new_size);
777         n = kvpmalloc(new_size, GFP_KERNEL);
778         if (!n)
779                 return -ENOMEM;
780
781         kvpfree(b->data, b->size);
782         b->data = n;
783         b->size = new_size;
784         return 0;
785 }
786
787 static int journal_read_bucket(struct bch_dev *ca,
788                                struct journal_read_buf *buf,
789                                struct journal_list *jlist,
790                                unsigned bucket)
791 {
792         struct bch_fs *c = ca->fs;
793         struct journal_device *ja = &ca->journal;
794         struct jset *j = NULL;
795         unsigned sectors, sectors_read = 0;
796         u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
797             end = offset + ca->mi.bucket_size;
798         bool saw_bad = false;
799         int ret = 0;
800
801         pr_debug("reading %u", bucket);
802
803         while (offset < end) {
804                 if (!sectors_read) {
805                         struct bio *bio;
806 reread:
807                         sectors_read = min_t(unsigned,
808                                 end - offset, buf->size >> 9);
809
810                         bio = bio_kmalloc(GFP_KERNEL,
811                                           buf_pages(buf->data,
812                                                     sectors_read << 9));
813                         bio_set_dev(bio, ca->disk_sb.bdev);
814                         bio->bi_iter.bi_sector  = offset;
815                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
816                         bch2_bio_map(bio, buf->data, sectors_read << 9);
817
818                         ret = submit_bio_wait(bio);
819                         bio_put(bio);
820
821                         if (bch2_dev_io_err_on(ret, ca,
822                                                "journal read error: sector %llu",
823                                                offset) ||
824                             bch2_meta_read_fault("journal")) {
825                                 /*
826                                  * We don't error out of the recovery process
827                                  * here, since the relevant journal entry may be
828                                  * found on a different device, and missing or
829                                  * no journal entries will be handled later
830                                  */
831                                 return 0;
832                         }
833
834                         j = buf->data;
835                 }
836
837                 ret = jset_validate(c, ca, j, offset,
838                                     end - offset, sectors_read,
839                                     READ);
840                 switch (ret) {
841                 case BCH_FSCK_OK:
842                         sectors = vstruct_sectors(j, c->block_bits);
843                         break;
844                 case JOURNAL_ENTRY_REREAD:
845                         if (vstruct_bytes(j) > buf->size) {
846                                 ret = journal_read_buf_realloc(buf,
847                                                         vstruct_bytes(j));
848                                 if (ret)
849                                         return ret;
850                         }
851                         goto reread;
852                 case JOURNAL_ENTRY_NONE:
853                         if (!saw_bad)
854                                 return 0;
855                         sectors = block_sectors(c);
856                         goto next_block;
857                 case JOURNAL_ENTRY_BAD:
858                         saw_bad = true;
859                         /*
860                          * On checksum error we don't really trust the size
861                          * field of the journal entry we read, so try reading
862                          * again at next block boundary:
863                          */
864                         sectors = block_sectors(c);
865                         break;
866                 default:
867                         return ret;
868                 }
869
870                 /*
871                  * This happens sometimes if we don't have discards on -
872                  * when we've partially overwritten a bucket with new
873                  * journal entries. We don't need the rest of the
874                  * bucket:
875                  */
876                 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
877                         return 0;
878
879                 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
880
881                 mutex_lock(&jlist->lock);
882                 ret = journal_entry_add(c, ca, (struct journal_ptr) {
883                                         .dev            = ca->dev_idx,
884                                         .bucket         = bucket,
885                                         .bucket_offset  = offset -
886                                                 bucket_to_sector(ca, ja->buckets[bucket]),
887                                         .sector         = offset,
888                                         }, jlist, j, ret != 0);
889                 mutex_unlock(&jlist->lock);
890
891                 switch (ret) {
892                 case JOURNAL_ENTRY_ADD_OK:
893                         break;
894                 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
895                         break;
896                 default:
897                         return ret;
898                 }
899 next_block:
900                 pr_debug("next");
901                 offset          += sectors;
902                 sectors_read    -= sectors;
903                 j = ((void *) j) + (sectors << 9);
904         }
905
906         return 0;
907 }
908
909 static void bch2_journal_read_device(struct closure *cl)
910 {
911         struct journal_device *ja =
912                 container_of(cl, struct journal_device, read);
913         struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
914         struct bch_fs *c = ca->fs;
915         struct journal_list *jlist =
916                 container_of(cl->parent, struct journal_list, cl);
917         struct journal_replay *r;
918         struct journal_read_buf buf = { NULL, 0 };
919         u64 min_seq = U64_MAX;
920         unsigned i;
921         int ret = 0;
922
923         if (!ja->nr)
924                 goto out;
925
926         ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
927         if (ret)
928                 goto err;
929
930         pr_debug("%u journal buckets", ja->nr);
931
932         for (i = 0; i < ja->nr; i++) {
933                 ret = journal_read_bucket(ca, &buf, jlist, i);
934                 if (ret)
935                         goto err;
936         }
937
938         /* Find the journal bucket with the highest sequence number: */
939         for (i = 0; i < ja->nr; i++) {
940                 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
941                         ja->cur_idx = i;
942
943                 min_seq = min(ja->bucket_seq[i], min_seq);
944         }
945
946         /*
947          * If there's duplicate journal entries in multiple buckets (which
948          * definitely isn't supposed to happen, but...) - make sure to start
949          * cur_idx at the last of those buckets, so we don't deadlock trying to
950          * allocate
951          */
952         while (ja->bucket_seq[ja->cur_idx] > min_seq &&
953                ja->bucket_seq[ja->cur_idx] ==
954                ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
955                 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
956
957         ja->sectors_free = ca->mi.bucket_size;
958
959         mutex_lock(&jlist->lock);
960         list_for_each_entry(r, jlist->head, list) {
961                 for (i = 0; i < r->nr_ptrs; i++) {
962                         if (r->ptrs[i].dev == ca->dev_idx &&
963                             sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) {
964                                 unsigned wrote = (r->ptrs[i].sector % ca->mi.bucket_size) +
965                                         vstruct_sectors(&r->j, c->block_bits);
966
967                                 ja->sectors_free = min(ja->sectors_free,
968                                                        ca->mi.bucket_size - wrote);
969                         }
970                 }
971         }
972         mutex_unlock(&jlist->lock);
973
974         if (ja->bucket_seq[ja->cur_idx] &&
975             ja->sectors_free == ca->mi.bucket_size) {
976                 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
977                 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
978                 for (i = 0; i < 3; i++) {
979                         unsigned idx = ja->cur_idx - 1 + i;
980                         bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
981                 }
982                 ja->sectors_free = 0;
983         }
984
985         /*
986          * Set dirty_idx to indicate the entire journal is full and needs to be
987          * reclaimed - journal reclaim will immediately reclaim whatever isn't
988          * pinned when it first runs:
989          */
990         ja->discard_idx = ja->dirty_idx_ondisk =
991                 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
992 out:
993         bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
994         kvpfree(buf.data, buf.size);
995         percpu_ref_put(&ca->io_ref);
996         closure_return(cl);
997         return;
998 err:
999         mutex_lock(&jlist->lock);
1000         jlist->ret = ret;
1001         mutex_unlock(&jlist->lock);
1002         goto out;
1003 }
1004
1005 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1006                                struct journal_replay *j)
1007 {
1008         unsigned i;
1009
1010         for (i = 0; i < j->nr_ptrs; i++) {
1011                 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
1012                 u64 offset;
1013
1014                 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
1015
1016                 if (i)
1017                         pr_buf(out, " ");
1018                 pr_buf(out, "%u:%u:%u (sector %llu)",
1019                        j->ptrs[i].dev,
1020                        j->ptrs[i].bucket,
1021                        j->ptrs[i].bucket_offset,
1022                        j->ptrs[i].sector);
1023         }
1024 }
1025
1026 int bch2_journal_read(struct bch_fs *c, struct list_head *list,
1027                       u64 *blacklist_seq, u64 *start_seq)
1028 {
1029         struct journal_list jlist;
1030         struct journal_replay *i, *t;
1031         struct bch_dev *ca;
1032         unsigned iter;
1033         struct printbuf buf = PRINTBUF;
1034         size_t keys = 0, entries = 0;
1035         bool degraded = false;
1036         u64 seq, last_seq = 0;
1037         int ret = 0;
1038
1039         closure_init_stack(&jlist.cl);
1040         mutex_init(&jlist.lock);
1041         jlist.head = list;
1042         jlist.ret = 0;
1043
1044         for_each_member_device(ca, c, iter) {
1045                 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1046                     !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1047                         continue;
1048
1049                 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1050                      ca->mi.state == BCH_MEMBER_STATE_ro) &&
1051                     percpu_ref_tryget(&ca->io_ref))
1052                         closure_call(&ca->journal.read,
1053                                      bch2_journal_read_device,
1054                                      system_unbound_wq,
1055                                      &jlist.cl);
1056                 else
1057                         degraded = true;
1058         }
1059
1060         closure_sync(&jlist.cl);
1061
1062         if (jlist.ret)
1063                 return jlist.ret;
1064
1065         if (list_empty(list)) {
1066                 bch_info(c, "journal read done, but no entries found");
1067                 return 0;
1068         }
1069
1070         i = list_last_entry(list, struct journal_replay, list);
1071         *start_seq = le64_to_cpu(i->j.seq) + 1;
1072
1073         /*
1074          * Find most recent flush entry, and ignore newer non flush entries -
1075          * those entries will be blacklisted:
1076          */
1077         list_for_each_entry_safe_reverse(i, t, list, list) {
1078                 if (i->ignore)
1079                         continue;
1080
1081                 if (!JSET_NO_FLUSH(&i->j)) {
1082                         last_seq        = le64_to_cpu(i->j.last_seq);
1083                         *blacklist_seq  = le64_to_cpu(i->j.seq) + 1;
1084                         break;
1085                 }
1086
1087                 journal_replay_free(c, i);
1088         }
1089
1090         if (!last_seq) {
1091                 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
1092                 ret = -1;
1093                 goto err;
1094         }
1095
1096         /* Drop blacklisted entries and entries older than last_seq: */
1097         list_for_each_entry_safe(i, t, list, list) {
1098                 if (i->ignore)
1099                         continue;
1100
1101                 seq = le64_to_cpu(i->j.seq);
1102                 if (seq < last_seq) {
1103                         journal_replay_free(c, i);
1104                         continue;
1105                 }
1106
1107                 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1108                         fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1109                                     "found blacklisted journal entry %llu", seq);
1110
1111                         journal_replay_free(c, i);
1112                 }
1113         }
1114
1115         /* Check for missing entries: */
1116         seq = last_seq;
1117         list_for_each_entry(i, list, list) {
1118                 if (i->ignore)
1119                         continue;
1120
1121                 BUG_ON(seq > le64_to_cpu(i->j.seq));
1122
1123                 while (seq < le64_to_cpu(i->j.seq)) {
1124                         u64 missing_start, missing_end;
1125                         struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1126
1127                         while (seq < le64_to_cpu(i->j.seq) &&
1128                                bch2_journal_seq_is_blacklisted(c, seq, false))
1129                                 seq++;
1130
1131                         if (seq == le64_to_cpu(i->j.seq))
1132                                 break;
1133
1134                         missing_start = seq;
1135
1136                         while (seq < le64_to_cpu(i->j.seq) &&
1137                                !bch2_journal_seq_is_blacklisted(c, seq, false))
1138                                 seq++;
1139
1140                         if (i->list.prev != list) {
1141                                 struct journal_replay *p = list_prev_entry(i, list);
1142
1143                                 bch2_journal_ptrs_to_text(&buf1, c, p);
1144                                 pr_buf(&buf1, " size %zu", vstruct_sectors(&p->j, c->block_bits));
1145                         } else
1146                                 pr_buf(&buf1, "(none)");
1147                         bch2_journal_ptrs_to_text(&buf2, c, i);
1148
1149                         missing_end = seq - 1;
1150                         fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1151                                  "  prev at %s\n"
1152                                  "  next at %s",
1153                                  missing_start, missing_end,
1154                                  last_seq, *blacklist_seq - 1,
1155                                  buf1.buf, buf2.buf);
1156
1157                         printbuf_exit(&buf1);
1158                         printbuf_exit(&buf2);
1159                 }
1160
1161                 seq++;
1162         }
1163
1164         list_for_each_entry(i, list, list) {
1165                 struct jset_entry *entry;
1166                 struct bkey_i *k, *_n;
1167                 struct bch_replicas_padded replicas = {
1168                         .e.data_type = BCH_DATA_journal,
1169                         .e.nr_required = 1,
1170                 };
1171                 unsigned ptr;
1172
1173                 if (i->ignore)
1174                         continue;
1175
1176                 ret = jset_validate_entries(c, &i->j, READ);
1177                 if (ret)
1178                         goto err;
1179
1180                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1181                         replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1182
1183                 bch2_replicas_entry_sort(&replicas.e);
1184
1185                 /*
1186                  * If we're mounting in degraded mode - if we didn't read all
1187                  * the devices - this is wrong:
1188                  */
1189
1190                 printbuf_reset(&buf);
1191                 bch2_replicas_entry_to_text(&buf, &replicas.e);
1192
1193                 if (!degraded &&
1194                     (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
1195                      fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
1196                                  "superblock not marked as containing replicas %s",
1197                                  buf.buf))) {
1198                         ret = bch2_mark_replicas(c, &replicas.e);
1199                         if (ret)
1200                                 goto err;
1201                 }
1202
1203                 for_each_jset_key(k, _n, entry, &i->j)
1204                         keys++;
1205                 entries++;
1206         }
1207
1208         bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1209                  keys, entries, *start_seq);
1210
1211         if (*start_seq != *blacklist_seq)
1212                 bch_info(c, "dropped unflushed entries %llu-%llu",
1213                          *blacklist_seq, *start_seq - 1);
1214 err:
1215 fsck_err:
1216         printbuf_exit(&buf);
1217         return ret;
1218 }
1219
1220 /* journal write: */
1221
1222 static void __journal_write_alloc(struct journal *j,
1223                                   struct journal_buf *w,
1224                                   struct dev_alloc_list *devs_sorted,
1225                                   unsigned sectors,
1226                                   unsigned *replicas,
1227                                   unsigned replicas_want)
1228 {
1229         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1230         struct journal_device *ja;
1231         struct bch_dev *ca;
1232         unsigned i;
1233
1234         if (*replicas >= replicas_want)
1235                 return;
1236
1237         for (i = 0; i < devs_sorted->nr; i++) {
1238                 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1239                 if (!ca)
1240                         continue;
1241
1242                 ja = &ca->journal;
1243
1244                 /*
1245                  * Check that we can use this device, and aren't already using
1246                  * it:
1247                  */
1248                 if (!ca->mi.durability ||
1249                     ca->mi.state != BCH_MEMBER_STATE_rw ||
1250                     !ja->nr ||
1251                     bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1252                                          ca->dev_idx) ||
1253                     sectors > ja->sectors_free)
1254                         continue;
1255
1256                 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1257
1258                 bch2_bkey_append_ptr(&w->key,
1259                         (struct bch_extent_ptr) {
1260                                   .offset = bucket_to_sector(ca,
1261                                         ja->buckets[ja->cur_idx]) +
1262                                         ca->mi.bucket_size -
1263                                         ja->sectors_free,
1264                                   .dev = ca->dev_idx,
1265                 });
1266
1267                 ja->sectors_free -= sectors;
1268                 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1269
1270                 *replicas += ca->mi.durability;
1271
1272                 if (*replicas >= replicas_want)
1273                         break;
1274         }
1275 }
1276
1277 /**
1278  * journal_next_bucket - move on to the next journal bucket if possible
1279  */
1280 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1281                                unsigned sectors)
1282 {
1283         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1284         struct bch_devs_mask devs;
1285         struct journal_device *ja;
1286         struct bch_dev *ca;
1287         struct dev_alloc_list devs_sorted;
1288         unsigned target = c->opts.metadata_target ?:
1289                 c->opts.foreground_target;
1290         unsigned i, replicas = 0, replicas_want =
1291                 READ_ONCE(c->opts.metadata_replicas);
1292
1293         rcu_read_lock();
1294 retry:
1295         devs = target_rw_devs(c, BCH_DATA_journal, target);
1296
1297         devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1298
1299         __journal_write_alloc(j, w, &devs_sorted,
1300                               sectors, &replicas, replicas_want);
1301
1302         if (replicas >= replicas_want)
1303                 goto done;
1304
1305         for (i = 0; i < devs_sorted.nr; i++) {
1306                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1307                 if (!ca)
1308                         continue;
1309
1310                 ja = &ca->journal;
1311
1312                 if (sectors > ja->sectors_free &&
1313                     sectors <= ca->mi.bucket_size &&
1314                     bch2_journal_dev_buckets_available(j, ja,
1315                                         journal_space_discarded)) {
1316                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1317                         ja->sectors_free = ca->mi.bucket_size;
1318
1319                         /*
1320                          * ja->bucket_seq[ja->cur_idx] must always have
1321                          * something sensible:
1322                          */
1323                         ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1324                 }
1325         }
1326
1327         __journal_write_alloc(j, w, &devs_sorted,
1328                               sectors, &replicas, replicas_want);
1329
1330         if (replicas < replicas_want && target) {
1331                 /* Retry from all devices: */
1332                 target = 0;
1333                 goto retry;
1334         }
1335 done:
1336         rcu_read_unlock();
1337
1338         BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1339
1340         return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1341 }
1342
1343 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1344 {
1345         /* we aren't holding j->lock: */
1346         unsigned new_size = READ_ONCE(j->buf_size_want);
1347         void *new_buf;
1348
1349         if (buf->buf_size >= new_size)
1350                 return;
1351
1352         new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1353         if (!new_buf)
1354                 return;
1355
1356         memcpy(new_buf, buf->data, buf->buf_size);
1357
1358         spin_lock(&j->lock);
1359         swap(buf->data,         new_buf);
1360         swap(buf->buf_size,     new_size);
1361         spin_unlock(&j->lock);
1362
1363         kvpfree(new_buf, new_size);
1364 }
1365
1366 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1367 {
1368         return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
1369 }
1370
1371 static void journal_write_done(struct closure *cl)
1372 {
1373         struct journal *j = container_of(cl, struct journal, io);
1374         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1375         struct journal_buf *w = journal_last_unwritten_buf(j);
1376         struct bch_replicas_padded replicas;
1377         union journal_res_state old, new;
1378         u64 v, seq;
1379         int err = 0;
1380
1381         bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1382                                ? j->flush_write_time
1383                                : j->noflush_write_time, j->write_start_time);
1384
1385         if (!w->devs_written.nr) {
1386                 bch_err(c, "unable to write journal to sufficient devices");
1387                 err = -EIO;
1388         } else {
1389                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1390                                          w->devs_written);
1391                 if (bch2_mark_replicas(c, &replicas.e))
1392                         err = -EIO;
1393         }
1394
1395         if (err)
1396                 bch2_fatal_error(c);
1397
1398         spin_lock(&j->lock);
1399         seq = le64_to_cpu(w->data->seq);
1400
1401         if (seq >= j->pin.front)
1402                 journal_seq_pin(j, seq)->devs = w->devs_written;
1403
1404         if (!err) {
1405                 if (!JSET_NO_FLUSH(w->data)) {
1406                         j->flushed_seq_ondisk = seq;
1407                         j->last_seq_ondisk = w->last_seq;
1408
1409                         bch2_do_discards(c);
1410                         closure_wake_up(&c->freelist_wait);
1411                 }
1412         } else if (!j->err_seq || seq < j->err_seq)
1413                 j->err_seq      = seq;
1414
1415         j->seq_ondisk           = seq;
1416
1417         /*
1418          * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1419          * more buckets:
1420          *
1421          * Must come before signaling write completion, for
1422          * bch2_fs_journal_stop():
1423          */
1424         journal_reclaim_kick(&c->journal);
1425
1426         /* also must come before signalling write completion: */
1427         closure_debug_destroy(cl);
1428
1429         v = atomic64_read(&j->reservations.counter);
1430         do {
1431                 old.v = new.v = v;
1432                 BUG_ON(journal_state_count(new, new.unwritten_idx));
1433
1434                 new.unwritten_idx++;
1435         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1436                                        old.v, new.v)) != old.v);
1437
1438         bch2_journal_space_available(j);
1439
1440         closure_wake_up(&w->wait);
1441         journal_wake(j);
1442
1443         if (!journal_state_count(new, new.unwritten_idx) &&
1444             journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1445                 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1446         } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1447                    new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
1448                 struct journal_buf *buf = journal_cur_buf(j);
1449                 long delta = buf->expires - jiffies;
1450
1451                 /*
1452                  * We don't close a journal entry to write it while there's
1453                  * previous entries still in flight - the current journal entry
1454                  * might want to be written now:
1455                  */
1456
1457                 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
1458         }
1459
1460         spin_unlock(&j->lock);
1461 }
1462
1463 static void journal_write_endio(struct bio *bio)
1464 {
1465         struct bch_dev *ca = bio->bi_private;
1466         struct journal *j = &ca->fs->journal;
1467         struct journal_buf *w = journal_last_unwritten_buf(j);
1468         unsigned long flags;
1469
1470         if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1471                                le64_to_cpu(w->data->seq),
1472                                bch2_blk_status_to_str(bio->bi_status)) ||
1473             bch2_meta_write_fault("journal")) {
1474                 spin_lock_irqsave(&j->err_lock, flags);
1475                 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1476                 spin_unlock_irqrestore(&j->err_lock, flags);
1477         }
1478
1479         closure_put(&j->io);
1480         percpu_ref_put(&ca->io_ref);
1481 }
1482
1483 static void do_journal_write(struct closure *cl)
1484 {
1485         struct journal *j = container_of(cl, struct journal, io);
1486         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1487         struct bch_dev *ca;
1488         struct journal_buf *w = journal_last_unwritten_buf(j);
1489         struct bch_extent_ptr *ptr;
1490         struct bio *bio;
1491         unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1492
1493         extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1494                 ca = bch_dev_bkey_exists(c, ptr->dev);
1495                 if (!percpu_ref_tryget(&ca->io_ref)) {
1496                         /* XXX: fix this */
1497                         bch_err(c, "missing device for journal write\n");
1498                         continue;
1499                 }
1500
1501                 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1502                              sectors);
1503
1504                 bio = ca->journal.bio;
1505                 bio_reset(bio);
1506                 bio_set_dev(bio, ca->disk_sb.bdev);
1507                 bio->bi_iter.bi_sector  = ptr->offset;
1508                 bio->bi_end_io          = journal_write_endio;
1509                 bio->bi_private         = ca;
1510                 bio->bi_opf             = REQ_OP_WRITE|REQ_SYNC|REQ_META;
1511
1512                 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1513                 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1514
1515                 if (!JSET_NO_FLUSH(w->data))
1516                         bio->bi_opf    |= REQ_FUA;
1517                 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1518                         bio->bi_opf    |= REQ_PREFLUSH;
1519
1520                 bch2_bio_map(bio, w->data, sectors << 9);
1521
1522                 trace_journal_write(bio);
1523                 closure_bio_submit(bio, cl);
1524
1525                 ca->journal.bucket_seq[ca->journal.cur_idx] =
1526                         le64_to_cpu(w->data->seq);
1527         }
1528
1529         continue_at(cl, journal_write_done, c->io_complete_wq);
1530         return;
1531 }
1532
1533 void bch2_journal_write(struct closure *cl)
1534 {
1535         struct journal *j = container_of(cl, struct journal, io);
1536         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1537         struct bch_dev *ca;
1538         struct journal_buf *w = journal_last_unwritten_buf(j);
1539         struct jset_entry *start, *end;
1540         struct jset *jset;
1541         struct bio *bio;
1542         struct printbuf journal_debug_buf = PRINTBUF;
1543         bool validate_before_checksum = false;
1544         unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
1545         int ret;
1546
1547         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1548
1549         journal_buf_realloc(j, w);
1550         jset = w->data;
1551
1552         j->write_start_time = local_clock();
1553
1554         spin_lock(&j->lock);
1555         if (bch2_journal_error(j) ||
1556             w->noflush ||
1557             (!w->must_flush &&
1558              (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1559              test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
1560                 w->noflush = true;
1561                 SET_JSET_NO_FLUSH(jset, true);
1562                 jset->last_seq  = 0;
1563                 w->last_seq     = 0;
1564
1565                 j->nr_noflush_writes++;
1566         } else {
1567                 j->last_flush_write = jiffies;
1568                 j->nr_flush_writes++;
1569         }
1570         spin_unlock(&j->lock);
1571
1572         /*
1573          * New btree roots are set by journalling them; when the journal entry
1574          * gets written we have to propagate them to c->btree_roots
1575          *
1576          * But, every journal entry we write has to contain all the btree roots
1577          * (at least for now); so after we copy btree roots to c->btree_roots we
1578          * have to get any missing btree roots and add them to this journal
1579          * entry:
1580          */
1581
1582         bch2_journal_entries_to_btree_roots(c, jset);
1583
1584         start = end = vstruct_last(jset);
1585
1586         end     = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1587
1588         bch2_journal_super_entries_add_common(c, &end,
1589                                 le64_to_cpu(jset->seq));
1590         u64s    = (u64 *) end - (u64 *) start;
1591         BUG_ON(u64s > j->entry_u64s_reserved);
1592
1593         le32_add_cpu(&jset->u64s, u64s);
1594         BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1595
1596         jset->magic             = cpu_to_le64(jset_magic(c));
1597         jset->version           = c->sb.version < bcachefs_metadata_version_bkey_renumber
1598                 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1599                 : cpu_to_le32(c->sb.version);
1600
1601         SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1602         SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1603
1604         if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1605                 j->last_empty_seq = le64_to_cpu(jset->seq);
1606
1607         if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1608                 validate_before_checksum = true;
1609
1610         if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1611                 validate_before_checksum = true;
1612
1613         if (validate_before_checksum &&
1614             jset_validate_for_write(c, jset))
1615                 goto err;
1616
1617         ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1618                     jset->encrypted_start,
1619                     vstruct_end(jset) - (void *) jset->encrypted_start);
1620         if (bch2_fs_fatal_err_on(ret, c,
1621                         "error decrypting journal entry: %i", ret))
1622                 goto err;
1623
1624         jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1625                                   journal_nonce(jset), jset);
1626
1627         if (!validate_before_checksum &&
1628             jset_validate_for_write(c, jset))
1629                 goto err;
1630
1631         sectors = vstruct_sectors(jset, c->block_bits);
1632         BUG_ON(sectors > w->sectors);
1633
1634         bytes = vstruct_bytes(jset);
1635         memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1636
1637 retry_alloc:
1638         spin_lock(&j->lock);
1639         ret = journal_write_alloc(j, w, sectors);
1640
1641         if (ret && j->can_discard) {
1642                 spin_unlock(&j->lock);
1643                 bch2_journal_do_discards(j);
1644                 goto retry_alloc;
1645         }
1646
1647         if (ret)
1648                 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1649
1650         /*
1651          * write is allocated, no longer need to account for it in
1652          * bch2_journal_space_available():
1653          */
1654         w->sectors = 0;
1655
1656         /*
1657          * journal entry has been compacted and allocated, recalculate space
1658          * available:
1659          */
1660         bch2_journal_space_available(j);
1661         spin_unlock(&j->lock);
1662
1663         if (ret) {
1664                 bch_err(c, "Unable to allocate journal write:\n%s",
1665                         journal_debug_buf.buf);
1666                 printbuf_exit(&journal_debug_buf);
1667                 bch2_fatal_error(c);
1668                 continue_at(cl, journal_write_done, c->io_complete_wq);
1669                 return;
1670         }
1671
1672         w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1673
1674         if (c->opts.nochanges)
1675                 goto no_io;
1676
1677         for_each_rw_member(ca, c, i)
1678                 nr_rw_members++;
1679
1680         if (nr_rw_members > 1)
1681                 w->separate_flush = true;
1682
1683         if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1684                 for_each_rw_member(ca, c, i) {
1685                         percpu_ref_get(&ca->io_ref);
1686
1687                         bio = ca->journal.bio;
1688                         bio_reset(bio);
1689                         bio_set_dev(bio, ca->disk_sb.bdev);
1690                         bio->bi_opf             = REQ_OP_FLUSH;
1691                         bio->bi_end_io          = journal_write_endio;
1692                         bio->bi_private         = ca;
1693                         closure_bio_submit(bio, cl);
1694                 }
1695         }
1696
1697         continue_at(cl, do_journal_write, c->io_complete_wq);
1698         return;
1699 no_io:
1700         continue_at(cl, journal_write_done, c->io_complete_wq);
1701         return;
1702 err:
1703         bch2_fatal_error(c);
1704         continue_at(cl, journal_write_done, c->io_complete_wq);
1705 }