]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_io.c
c4922c64065323ebfe4703c7d782a8d2c1acc4c1
[bcachefs-tools-debian] / libbcachefs / journal_io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_io.h"
6 #include "btree_update_interior.h"
7 #include "buckets.h"
8 #include "checksum.h"
9 #include "disk_groups.h"
10 #include "error.h"
11 #include "io.h"
12 #include "journal.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
16 #include "replicas.h"
17
18 #include <trace/events/bcachefs.h>
19
20 static struct nonce journal_nonce(const struct jset *jset)
21 {
22         return (struct nonce) {{
23                 [0] = 0,
24                 [1] = ((__le32 *) &jset->seq)[0],
25                 [2] = ((__le32 *) &jset->seq)[1],
26                 [3] = BCH_NONCE_JOURNAL,
27         }};
28 }
29
30 static bool jset_csum_good(struct bch_fs *c, struct jset *j)
31 {
32         return bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j)) &&
33                 !bch2_crc_cmp(j->csum,
34                               csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j));
35 }
36
37 static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
38 {
39         return (seq - c->journal_entries_base_seq) & (~0U >> 1);
40 }
41
42 static void __journal_replay_free(struct bch_fs *c,
43                                   struct journal_replay *i)
44 {
45         struct journal_replay **p =
46                 genradix_ptr(&c->journal_entries,
47                              journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
48
49         BUG_ON(*p != i);
50         *p = NULL;
51         kvpfree(i, offsetof(struct journal_replay, j) +
52                 vstruct_bytes(&i->j));
53 }
54
55 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
56 {
57         i->ignore = true;
58
59         if (!c->opts.read_entire_journal)
60                 __journal_replay_free(c, i);
61 }
62
63 struct journal_list {
64         struct closure          cl;
65         u64                     last_seq;
66         struct mutex            lock;
67         int                     ret;
68 };
69
70 #define JOURNAL_ENTRY_ADD_OK            0
71 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE  5
72
73 /*
74  * Given a journal entry we just read, add it to the list of journal entries to
75  * be replayed:
76  */
77 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
78                              struct journal_ptr entry_ptr,
79                              struct journal_list *jlist, struct jset *j)
80 {
81         struct genradix_iter iter;
82         struct journal_replay **_i, *i, *dup;
83         struct journal_ptr *ptr;
84         size_t bytes = vstruct_bytes(j);
85         u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
86         int ret = JOURNAL_ENTRY_ADD_OK;
87
88         /* Is this entry older than the range we need? */
89         if (!c->opts.read_entire_journal &&
90             le64_to_cpu(j->seq) < jlist->last_seq)
91                 return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
92
93         /*
94          * genradixes are indexed by a ulong, not a u64, so we can't index them
95          * by sequence number directly: Assume instead that they will all fall
96          * within the range of +-2billion of the filrst one we find.
97          */
98         if (!c->journal_entries_base_seq)
99                 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
100
101         /* Drop entries we don't need anymore */
102         if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
103                 genradix_for_each_from(&c->journal_entries, iter, _i,
104                                        journal_entry_radix_idx(c, jlist->last_seq)) {
105                         i = *_i;
106
107                         if (!i || i->ignore)
108                                 continue;
109
110                         if (le64_to_cpu(i->j.seq) >= last_seq)
111                                 break;
112                         journal_replay_free(c, i);
113                 }
114         }
115
116         jlist->last_seq = max(jlist->last_seq, last_seq);
117
118         _i = genradix_ptr_alloc(&c->journal_entries,
119                                 journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
120                                 GFP_KERNEL);
121         if (!_i)
122                 return -ENOMEM;
123
124         /*
125          * Duplicate journal entries? If so we want the one that didn't have a
126          * checksum error:
127          */
128         dup = *_i;
129         if (dup) {
130                 if (bytes == vstruct_bytes(&dup->j) &&
131                     !memcmp(j, &dup->j, bytes)) {
132                         i = dup;
133                         goto found;
134                 }
135
136                 if (!entry_ptr.csum_good) {
137                         i = dup;
138                         goto found;
139                 }
140
141                 if (!dup->csum_good)
142                         goto replace;
143
144                 fsck_err(c, "found duplicate but non identical journal entries (seq %llu)",
145                          le64_to_cpu(j->seq));
146                 i = dup;
147                 goto found;
148         }
149 replace:
150         i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
151         if (!i)
152                 return -ENOMEM;
153
154         i->nr_ptrs      = 0;
155         i->csum_good    = entry_ptr.csum_good;
156         i->ignore       = false;
157         memcpy(&i->j, j, bytes);
158         i->ptrs[i->nr_ptrs++] = entry_ptr;
159
160         if (dup) {
161                 if (dup->nr_ptrs >= ARRAY_SIZE(dup->ptrs)) {
162                         bch_err(c, "found too many copies of journal entry %llu",
163                                 le64_to_cpu(i->j.seq));
164                         dup->nr_ptrs = ARRAY_SIZE(dup->ptrs) - 1;
165                 }
166
167                 /* The first ptr should represent the jset we kept: */
168                 memcpy(i->ptrs + i->nr_ptrs,
169                        dup->ptrs,
170                        sizeof(dup->ptrs[0]) * dup->nr_ptrs);
171                 i->nr_ptrs += dup->nr_ptrs;
172                 __journal_replay_free(c, dup);
173         }
174
175         *_i = i;
176         return 0;
177 found:
178         for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
179                 if (ptr->dev == ca->dev_idx) {
180                         bch_err(c, "duplicate journal entry %llu on same device",
181                                 le64_to_cpu(i->j.seq));
182                         goto out;
183                 }
184         }
185
186         if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
187                 bch_err(c, "found too many copies of journal entry %llu",
188                         le64_to_cpu(i->j.seq));
189                 goto out;
190         }
191
192         i->ptrs[i->nr_ptrs++] = entry_ptr;
193 out:
194 fsck_err:
195         return ret;
196 }
197
198 /* this fills in a range with empty jset_entries: */
199 static void journal_entry_null_range(void *start, void *end)
200 {
201         struct jset_entry *entry;
202
203         for (entry = start; entry != end; entry = vstruct_next(entry))
204                 memset(entry, 0, sizeof(*entry));
205 }
206
207 #define JOURNAL_ENTRY_REREAD    5
208 #define JOURNAL_ENTRY_NONE      6
209 #define JOURNAL_ENTRY_BAD       7
210
211 static void journal_entry_err_msg(struct printbuf *out,
212                                   struct jset *jset,
213                                   struct jset_entry *entry)
214 {
215         prt_str(out, "invalid journal entry ");
216         if (entry)
217                 prt_printf(out, "%s ", bch2_jset_entry_types[entry->type]);
218
219         if (!jset)
220                 prt_printf(out, "in superblock");
221         else if (!entry)
222                 prt_printf(out, "at seq %llu", le64_to_cpu(jset->seq));
223         else
224                 prt_printf(out, "at offset %zi/%u seq %llu",
225                            (u64 *) entry - jset->_data,
226                            le32_to_cpu(jset->u64s),
227                            le64_to_cpu(jset->seq));
228         prt_str(out, ": ");
229 }
230
231 #define journal_entry_err(c, jset, entry, msg, ...)                     \
232 ({                                                                      \
233         struct printbuf buf = PRINTBUF;                                 \
234                                                                         \
235         journal_entry_err_msg(&buf, jset, entry);                       \
236         prt_printf(&buf, msg, ##__VA_ARGS__);                           \
237                                                                         \
238         switch (write) {                                                \
239         case READ:                                                      \
240                 mustfix_fsck_err(c, "%s", buf.buf);                     \
241                 break;                                                  \
242         case WRITE:                                                     \
243                 bch_err(c, "corrupt metadata before write: %s\n", buf.buf);\
244                 if (bch2_fs_inconsistent(c)) {                          \
245                         ret = -BCH_ERR_fsck_errors_not_fixed;           \
246                         goto fsck_err;                                  \
247                 }                                                       \
248                 break;                                                  \
249         }                                                               \
250                                                                         \
251         printbuf_exit(&buf);                                            \
252         true;                                                           \
253 })
254
255 #define journal_entry_err_on(cond, c, jset, entry, msg, ...)            \
256         ((cond) ? journal_entry_err(c, jset, entry, msg, ##__VA_ARGS__) : false)
257
258 #define FSCK_DELETED_KEY        5
259
260 static int journal_validate_key(struct bch_fs *c,
261                                 struct jset *jset,
262                                 struct jset_entry *entry,
263                                 unsigned level, enum btree_id btree_id,
264                                 struct bkey_i *k,
265                                 unsigned version, int big_endian, int write)
266 {
267         void *next = vstruct_next(entry);
268         struct printbuf buf = PRINTBUF;
269         int ret = 0;
270
271         if (journal_entry_err_on(!k->k.u64s, c, jset, entry, "k->u64s 0")) {
272                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
273                 journal_entry_null_range(vstruct_next(entry), next);
274                 return FSCK_DELETED_KEY;
275         }
276
277         if (journal_entry_err_on((void *) bkey_next(k) >
278                                  (void *) vstruct_next(entry),
279                                  c, jset, entry,
280                                  "extends past end of journal entry")) {
281                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
282                 journal_entry_null_range(vstruct_next(entry), next);
283                 return FSCK_DELETED_KEY;
284         }
285
286         if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
287                                  c, jset, entry,
288                                  "bad format %u", k->k.format)) {
289                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
290                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
291                 journal_entry_null_range(vstruct_next(entry), next);
292                 return FSCK_DELETED_KEY;
293         }
294
295         if (!write)
296                 bch2_bkey_compat(level, btree_id, version, big_endian,
297                                  write, NULL, bkey_to_packed(k));
298
299         if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
300                               __btree_node_type(level, btree_id), write, &buf)) {
301                 printbuf_reset(&buf);
302                 prt_printf(&buf, "invalid journal entry %s at offset %zi/%u seq %llu:",
303                            bch2_jset_entry_types[entry->type],
304                            (u64 *) entry - jset->_data,
305                            le32_to_cpu(jset->u64s),
306                            le64_to_cpu(jset->seq));
307                 prt_newline(&buf);
308                 printbuf_indent_add(&buf, 2);
309
310                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
311                 prt_newline(&buf);
312                 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
313                                   __btree_node_type(level, btree_id), write, &buf);
314
315                 mustfix_fsck_err(c, "%s", buf.buf);
316
317                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
318                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
319                 journal_entry_null_range(vstruct_next(entry), next);
320
321                 printbuf_exit(&buf);
322                 return FSCK_DELETED_KEY;
323         }
324
325         if (write)
326                 bch2_bkey_compat(level, btree_id, version, big_endian,
327                                  write, NULL, bkey_to_packed(k));
328 fsck_err:
329         printbuf_exit(&buf);
330         return ret;
331 }
332
333 static int journal_entry_btree_keys_validate(struct bch_fs *c,
334                                              struct jset *jset,
335                                              struct jset_entry *entry,
336                                              unsigned version, int big_endian, int write)
337 {
338         struct bkey_i *k = entry->start;
339
340         while (k != vstruct_last(entry)) {
341                 int ret = journal_validate_key(c, jset, entry,
342                                                entry->level,
343                                                entry->btree_id,
344                                                k, version, big_endian, write);
345                 if (ret == FSCK_DELETED_KEY)
346                         continue;
347
348                 k = bkey_next(k);
349         }
350
351         return 0;
352 }
353
354 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
355                                              struct jset_entry *entry)
356 {
357         struct bkey_i *k;
358         bool first = true;
359
360         vstruct_for_each(entry, k) {
361                 if (!first) {
362                         prt_newline(out);
363                         prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
364                 }
365                 prt_printf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
366                 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
367                 first = false;
368         }
369 }
370
371 static int journal_entry_btree_root_validate(struct bch_fs *c,
372                                              struct jset *jset,
373                                              struct jset_entry *entry,
374                                              unsigned version, int big_endian, int write)
375 {
376         struct bkey_i *k = entry->start;
377         int ret = 0;
378
379         if (journal_entry_err_on(!entry->u64s ||
380                                  le16_to_cpu(entry->u64s) != k->k.u64s,
381                                  c, jset, entry,
382                                  "invalid btree root journal entry: wrong number of keys")) {
383                 void *next = vstruct_next(entry);
384                 /*
385                  * we don't want to null out this jset_entry,
386                  * just the contents, so that later we can tell
387                  * we were _supposed_ to have a btree root
388                  */
389                 entry->u64s = 0;
390                 journal_entry_null_range(vstruct_next(entry), next);
391                 return 0;
392         }
393
394         return journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
395                                     version, big_endian, write);
396 fsck_err:
397         return ret;
398 }
399
400 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
401                                              struct jset_entry *entry)
402 {
403         journal_entry_btree_keys_to_text(out, c, entry);
404 }
405
406 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
407                                             struct jset *jset,
408                                             struct jset_entry *entry,
409                                             unsigned version, int big_endian, int write)
410 {
411         /* obsolete, don't care: */
412         return 0;
413 }
414
415 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
416                                             struct jset_entry *entry)
417 {
418 }
419
420 static int journal_entry_blacklist_validate(struct bch_fs *c,
421                                             struct jset *jset,
422                                             struct jset_entry *entry,
423                                             unsigned version, int big_endian, int write)
424 {
425         int ret = 0;
426
427         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
428                                  c, jset, entry,
429                 "invalid journal seq blacklist entry: bad size")) {
430                 journal_entry_null_range(entry, vstruct_next(entry));
431         }
432 fsck_err:
433         return ret;
434 }
435
436 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
437                                             struct jset_entry *entry)
438 {
439         struct jset_entry_blacklist *bl =
440                 container_of(entry, struct jset_entry_blacklist, entry);
441
442         prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq));
443 }
444
445 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
446                                                struct jset *jset,
447                                                struct jset_entry *entry,
448                                                unsigned version, int big_endian, int write)
449 {
450         struct jset_entry_blacklist_v2 *bl_entry;
451         int ret = 0;
452
453         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
454                                  c, jset, entry,
455                 "invalid journal seq blacklist entry: bad size")) {
456                 journal_entry_null_range(entry, vstruct_next(entry));
457                 goto out;
458         }
459
460         bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
461
462         if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
463                                  le64_to_cpu(bl_entry->end),
464                                  c, jset, entry,
465                 "invalid journal seq blacklist entry: start > end")) {
466                 journal_entry_null_range(entry, vstruct_next(entry));
467         }
468 out:
469 fsck_err:
470         return ret;
471 }
472
473 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
474                                                struct jset_entry *entry)
475 {
476         struct jset_entry_blacklist_v2 *bl =
477                 container_of(entry, struct jset_entry_blacklist_v2, entry);
478
479         prt_printf(out, "start=%llu end=%llu",
480                le64_to_cpu(bl->start),
481                le64_to_cpu(bl->end));
482 }
483
484 static int journal_entry_usage_validate(struct bch_fs *c,
485                                         struct jset *jset,
486                                         struct jset_entry *entry,
487                                         unsigned version, int big_endian, int write)
488 {
489         struct jset_entry_usage *u =
490                 container_of(entry, struct jset_entry_usage, entry);
491         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
492         int ret = 0;
493
494         if (journal_entry_err_on(bytes < sizeof(*u),
495                                  c, jset, entry,
496                                  "invalid journal entry usage: bad size")) {
497                 journal_entry_null_range(entry, vstruct_next(entry));
498                 return ret;
499         }
500
501 fsck_err:
502         return ret;
503 }
504
505 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
506                                         struct jset_entry *entry)
507 {
508         struct jset_entry_usage *u =
509                 container_of(entry, struct jset_entry_usage, entry);
510
511         prt_printf(out, "type=%s v=%llu",
512                bch2_fs_usage_types[u->entry.btree_id],
513                le64_to_cpu(u->v));
514 }
515
516 static int journal_entry_data_usage_validate(struct bch_fs *c,
517                                         struct jset *jset,
518                                         struct jset_entry *entry,
519                                         unsigned version, int big_endian, int write)
520 {
521         struct jset_entry_data_usage *u =
522                 container_of(entry, struct jset_entry_data_usage, entry);
523         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
524         int ret = 0;
525
526         if (journal_entry_err_on(bytes < sizeof(*u) ||
527                                  bytes < sizeof(*u) + u->r.nr_devs,
528                                  c, jset, entry,
529                                  "invalid journal entry usage: bad size")) {
530                 journal_entry_null_range(entry, vstruct_next(entry));
531                 return ret;
532         }
533
534 fsck_err:
535         return ret;
536 }
537
538 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
539                                              struct jset_entry *entry)
540 {
541         struct jset_entry_data_usage *u =
542                 container_of(entry, struct jset_entry_data_usage, entry);
543
544         bch2_replicas_entry_to_text(out, &u->r);
545         prt_printf(out, "=%llu", le64_to_cpu(u->v));
546 }
547
548 static int journal_entry_clock_validate(struct bch_fs *c,
549                                         struct jset *jset,
550                                         struct jset_entry *entry,
551                                         unsigned version, int big_endian, int write)
552 {
553         struct jset_entry_clock *clock =
554                 container_of(entry, struct jset_entry_clock, entry);
555         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
556         int ret = 0;
557
558         if (journal_entry_err_on(bytes != sizeof(*clock),
559                                  c, jset, entry, "bad size")) {
560                 journal_entry_null_range(entry, vstruct_next(entry));
561                 return ret;
562         }
563
564         if (journal_entry_err_on(clock->rw > 1,
565                                  c, jset, entry, "bad rw")) {
566                 journal_entry_null_range(entry, vstruct_next(entry));
567                 return ret;
568         }
569
570 fsck_err:
571         return ret;
572 }
573
574 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
575                                         struct jset_entry *entry)
576 {
577         struct jset_entry_clock *clock =
578                 container_of(entry, struct jset_entry_clock, entry);
579
580         prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
581 }
582
583 static int journal_entry_dev_usage_validate(struct bch_fs *c,
584                                             struct jset *jset,
585                                             struct jset_entry *entry,
586                                             unsigned version, int big_endian, int write)
587 {
588         struct jset_entry_dev_usage *u =
589                 container_of(entry, struct jset_entry_dev_usage, entry);
590         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
591         unsigned expected = sizeof(*u);
592         unsigned dev;
593         int ret = 0;
594
595         if (journal_entry_err_on(bytes < expected,
596                                  c, jset, entry, "bad size (%u < %u)",
597                                  bytes, expected)) {
598                 journal_entry_null_range(entry, vstruct_next(entry));
599                 return ret;
600         }
601
602         dev = le32_to_cpu(u->dev);
603
604         if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
605                                  c, jset, entry, "bad dev")) {
606                 journal_entry_null_range(entry, vstruct_next(entry));
607                 return ret;
608         }
609
610         if (journal_entry_err_on(u->pad,
611                                  c, jset, entry, "bad pad")) {
612                 journal_entry_null_range(entry, vstruct_next(entry));
613                 return ret;
614         }
615
616 fsck_err:
617         return ret;
618 }
619
620 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
621                                             struct jset_entry *entry)
622 {
623         struct jset_entry_dev_usage *u =
624                 container_of(entry, struct jset_entry_dev_usage, entry);
625         unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
626
627         prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
628
629         for (i = 0; i < nr_types; i++) {
630                 if (i < BCH_DATA_NR)
631                         prt_printf(out, " %s", bch2_data_types[i]);
632                 else
633                         prt_printf(out, " (unknown data type %u)", i);
634                 prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
635                        le64_to_cpu(u->d[i].buckets),
636                        le64_to_cpu(u->d[i].sectors),
637                        le64_to_cpu(u->d[i].fragmented));
638         }
639
640         prt_printf(out, " buckets_ec: %llu", le64_to_cpu(u->buckets_ec));
641 }
642
643 static int journal_entry_log_validate(struct bch_fs *c,
644                                       struct jset *jset,
645                                       struct jset_entry *entry,
646                                       unsigned version, int big_endian, int write)
647 {
648         return 0;
649 }
650
651 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
652                                       struct jset_entry *entry)
653 {
654         struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
655         unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
656
657         prt_printf(out, "%.*s", bytes, l->d);
658 }
659
660 static int journal_entry_overwrite_validate(struct bch_fs *c,
661                                       struct jset *jset,
662                                       struct jset_entry *entry,
663                                       unsigned version, int big_endian, int write)
664 {
665         return journal_entry_btree_keys_validate(c, jset, entry, version, big_endian, write);
666 }
667
668 static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c,
669                                             struct jset_entry *entry)
670 {
671         journal_entry_btree_keys_to_text(out, c, entry);
672 }
673
674 struct jset_entry_ops {
675         int (*validate)(struct bch_fs *, struct jset *,
676                         struct jset_entry *, unsigned, int, int);
677         void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
678 };
679
680 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
681 #define x(f, nr)                                                \
682         [BCH_JSET_ENTRY_##f]    = (struct jset_entry_ops) {     \
683                 .validate       = journal_entry_##f##_validate, \
684                 .to_text        = journal_entry_##f##_to_text,  \
685         },
686         BCH_JSET_ENTRY_TYPES()
687 #undef x
688 };
689
690 int bch2_journal_entry_validate(struct bch_fs *c,
691                                 struct jset *jset,
692                                 struct jset_entry *entry,
693                                 unsigned version, int big_endian, int write)
694 {
695         return entry->type < BCH_JSET_ENTRY_NR
696                 ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
697                                 version, big_endian, write)
698                 : 0;
699 }
700
701 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
702                                 struct jset_entry *entry)
703 {
704         if (entry->type < BCH_JSET_ENTRY_NR) {
705                 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
706                 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
707         } else {
708                 prt_printf(out, "(unknown type %u)", entry->type);
709         }
710 }
711
712 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
713                                  int write)
714 {
715         struct jset_entry *entry;
716         int ret = 0;
717
718         vstruct_for_each(jset, entry) {
719                 if (journal_entry_err_on(vstruct_next(entry) >
720                                          vstruct_last(jset), c, jset, entry,
721                                 "journal entry extends past end of jset")) {
722                         jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
723                         break;
724                 }
725
726                 ret = bch2_journal_entry_validate(c, jset, entry,
727                                         le32_to_cpu(jset->version),
728                                         JSET_BIG_ENDIAN(jset), write);
729                 if (ret)
730                         break;
731         }
732 fsck_err:
733         return ret;
734 }
735
736 static int jset_validate(struct bch_fs *c,
737                          struct bch_dev *ca,
738                          struct jset *jset, u64 sector,
739                          int write)
740 {
741         unsigned version;
742         int ret = 0;
743
744         if (le64_to_cpu(jset->magic) != jset_magic(c))
745                 return JOURNAL_ENTRY_NONE;
746
747         version = le32_to_cpu(jset->version);
748         if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
749                                   version < bcachefs_metadata_version_min) ||
750                                  version >= bcachefs_metadata_version_max,
751                                  c, jset, NULL,
752                         "%s sector %llu seq %llu: unknown journal entry version %u",
753                         ca ? ca->name : c->name,
754                         sector, le64_to_cpu(jset->seq),
755                         version)) {
756                 /* don't try to continue: */
757                 return -EINVAL;
758         }
759
760         if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
761                                  c, jset, NULL,
762                         "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
763                         ca ? ca->name : c->name,
764                         sector, le64_to_cpu(jset->seq),
765                         JSET_CSUM_TYPE(jset)))
766                 ret = JOURNAL_ENTRY_BAD;
767
768         /* last_seq is ignored when JSET_NO_FLUSH is true */
769         if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
770                                  le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
771                                  c, jset, NULL,
772                                  "invalid journal entry: last_seq > seq (%llu > %llu)",
773                                  le64_to_cpu(jset->last_seq),
774                                  le64_to_cpu(jset->seq))) {
775                 jset->last_seq = jset->seq;
776                 return JOURNAL_ENTRY_BAD;
777         }
778
779         ret = jset_validate_entries(c, jset, write);
780 fsck_err:
781         return ret;
782 }
783
784 static int jset_validate_early(struct bch_fs *c,
785                          struct bch_dev *ca,
786                          struct jset *jset, u64 sector,
787                          unsigned bucket_sectors_left,
788                          unsigned sectors_read)
789 {
790         size_t bytes = vstruct_bytes(jset);
791         unsigned version;
792         int write = READ;
793         int ret = 0;
794
795         if (le64_to_cpu(jset->magic) != jset_magic(c))
796                 return JOURNAL_ENTRY_NONE;
797
798         version = le32_to_cpu(jset->version);
799         if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
800                                   version < bcachefs_metadata_version_min) ||
801                                  version >= bcachefs_metadata_version_max,
802                                  c, jset, NULL,
803                         "%s sector %llu seq %llu: unknown journal entry version %u",
804                         ca ? ca->name : c->name,
805                         sector, le64_to_cpu(jset->seq),
806                         version)) {
807                 /* don't try to continue: */
808                 return -EINVAL;
809         }
810
811         if (bytes > (sectors_read << 9) &&
812             sectors_read < bucket_sectors_left)
813                 return JOURNAL_ENTRY_REREAD;
814
815         if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
816                                  c, jset, NULL,
817                         "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
818                         ca ? ca->name : c->name,
819                         sector, le64_to_cpu(jset->seq), bytes))
820                 le32_add_cpu(&jset->u64s,
821                              -((bytes - (bucket_sectors_left << 9)) / 8));
822 fsck_err:
823         return ret;
824 }
825
826 struct journal_read_buf {
827         void            *data;
828         size_t          size;
829 };
830
831 static int journal_read_buf_realloc(struct journal_read_buf *b,
832                                     size_t new_size)
833 {
834         void *n;
835
836         /* the bios are sized for this many pages, max: */
837         if (new_size > JOURNAL_ENTRY_SIZE_MAX)
838                 return -ENOMEM;
839
840         new_size = roundup_pow_of_two(new_size);
841         n = kvpmalloc(new_size, GFP_KERNEL);
842         if (!n)
843                 return -ENOMEM;
844
845         kvpfree(b->data, b->size);
846         b->data = n;
847         b->size = new_size;
848         return 0;
849 }
850
851 static int journal_read_bucket(struct bch_dev *ca,
852                                struct journal_read_buf *buf,
853                                struct journal_list *jlist,
854                                unsigned bucket)
855 {
856         struct bch_fs *c = ca->fs;
857         struct journal_device *ja = &ca->journal;
858         struct jset *j = NULL;
859         unsigned sectors, sectors_read = 0;
860         u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
861             end = offset + ca->mi.bucket_size;
862         bool saw_bad = false, csum_good;
863         int ret = 0;
864
865         pr_debug("reading %u", bucket);
866
867         while (offset < end) {
868                 if (!sectors_read) {
869                         struct bio *bio;
870                         unsigned nr_bvecs;
871 reread:
872                         sectors_read = min_t(unsigned,
873                                 end - offset, buf->size >> 9);
874                         nr_bvecs = buf_pages(buf->data, sectors_read << 9);
875
876                         bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
877                         bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
878
879                         bio->bi_iter.bi_sector = offset;
880                         bch2_bio_map(bio, buf->data, sectors_read << 9);
881
882                         ret = submit_bio_wait(bio);
883                         kfree(bio);
884
885                         if (bch2_dev_io_err_on(ret, ca,
886                                                "journal read error: sector %llu",
887                                                offset) ||
888                             bch2_meta_read_fault("journal")) {
889                                 /*
890                                  * We don't error out of the recovery process
891                                  * here, since the relevant journal entry may be
892                                  * found on a different device, and missing or
893                                  * no journal entries will be handled later
894                                  */
895                                 return 0;
896                         }
897
898                         j = buf->data;
899                 }
900
901                 ret = jset_validate_early(c, ca, j, offset,
902                                     end - offset, sectors_read);
903                 switch (ret) {
904                 case 0:
905                         sectors = vstruct_sectors(j, c->block_bits);
906                         break;
907                 case JOURNAL_ENTRY_REREAD:
908                         if (vstruct_bytes(j) > buf->size) {
909                                 ret = journal_read_buf_realloc(buf,
910                                                         vstruct_bytes(j));
911                                 if (ret)
912                                         return ret;
913                         }
914                         goto reread;
915                 case JOURNAL_ENTRY_NONE:
916                         if (!saw_bad)
917                                 return 0;
918                         /*
919                          * On checksum error we don't really trust the size
920                          * field of the journal entry we read, so try reading
921                          * again at next block boundary:
922                          */
923                         sectors = block_sectors(c);
924                         goto next_block;
925                 default:
926                         return ret;
927                 }
928
929                 /*
930                  * This happens sometimes if we don't have discards on -
931                  * when we've partially overwritten a bucket with new
932                  * journal entries. We don't need the rest of the
933                  * bucket:
934                  */
935                 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
936                         return 0;
937
938                 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
939
940                 csum_good = jset_csum_good(c, j);
941                 if (!csum_good)
942                         saw_bad = true;
943
944                 ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
945                              j->encrypted_start,
946                              vstruct_end(j) - (void *) j->encrypted_start);
947                 bch2_fs_fatal_err_on(ret, c,
948                                 "error decrypting journal entry: %i", ret);
949
950                 mutex_lock(&jlist->lock);
951                 ret = journal_entry_add(c, ca, (struct journal_ptr) {
952                                         .csum_good      = csum_good,
953                                         .dev            = ca->dev_idx,
954                                         .bucket         = bucket,
955                                         .bucket_offset  = offset -
956                                                 bucket_to_sector(ca, ja->buckets[bucket]),
957                                         .sector         = offset,
958                                         }, jlist, j);
959                 mutex_unlock(&jlist->lock);
960
961                 switch (ret) {
962                 case JOURNAL_ENTRY_ADD_OK:
963                         break;
964                 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
965                         break;
966                 default:
967                         return ret;
968                 }
969 next_block:
970                 pr_debug("next");
971                 offset          += sectors;
972                 sectors_read    -= sectors;
973                 j = ((void *) j) + (sectors << 9);
974         }
975
976         return 0;
977 }
978
979 static void bch2_journal_read_device(struct closure *cl)
980 {
981         struct journal_device *ja =
982                 container_of(cl, struct journal_device, read);
983         struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
984         struct bch_fs *c = ca->fs;
985         struct journal_list *jlist =
986                 container_of(cl->parent, struct journal_list, cl);
987         struct journal_replay *r, **_r;
988         struct genradix_iter iter;
989         struct journal_read_buf buf = { NULL, 0 };
990         u64 min_seq = U64_MAX;
991         unsigned i;
992         int ret = 0;
993
994         if (!ja->nr)
995                 goto out;
996
997         ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
998         if (ret)
999                 goto err;
1000
1001         pr_debug("%u journal buckets", ja->nr);
1002
1003         for (i = 0; i < ja->nr; i++) {
1004                 ret = journal_read_bucket(ca, &buf, jlist, i);
1005                 if (ret)
1006                         goto err;
1007         }
1008
1009         /* Find the journal bucket with the highest sequence number: */
1010         for (i = 0; i < ja->nr; i++) {
1011                 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
1012                         ja->cur_idx = i;
1013
1014                 min_seq = min(ja->bucket_seq[i], min_seq);
1015         }
1016
1017         /*
1018          * If there's duplicate journal entries in multiple buckets (which
1019          * definitely isn't supposed to happen, but...) - make sure to start
1020          * cur_idx at the last of those buckets, so we don't deadlock trying to
1021          * allocate
1022          */
1023         while (ja->bucket_seq[ja->cur_idx] > min_seq &&
1024                ja->bucket_seq[ja->cur_idx] ==
1025                ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
1026                 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1027
1028         ja->sectors_free = ca->mi.bucket_size;
1029
1030         mutex_lock(&jlist->lock);
1031         genradix_for_each(&c->journal_entries, iter, _r) {
1032                 r = *_r;
1033
1034                 if (!r)
1035                         continue;
1036
1037                 for (i = 0; i < r->nr_ptrs; i++) {
1038                         if (r->ptrs[i].dev == ca->dev_idx &&
1039                             sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) {
1040                                 unsigned wrote = bucket_remainder(ca, r->ptrs[i].sector) +
1041                                         vstruct_sectors(&r->j, c->block_bits);
1042
1043                                 ja->sectors_free = min(ja->sectors_free,
1044                                                        ca->mi.bucket_size - wrote);
1045                         }
1046                 }
1047         }
1048         mutex_unlock(&jlist->lock);
1049
1050         if (ja->bucket_seq[ja->cur_idx] &&
1051             ja->sectors_free == ca->mi.bucket_size) {
1052                 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
1053                 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
1054                 for (i = 0; i < 3; i++) {
1055                         unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
1056                         bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
1057                 }
1058                 ja->sectors_free = 0;
1059         }
1060
1061         /*
1062          * Set dirty_idx to indicate the entire journal is full and needs to be
1063          * reclaimed - journal reclaim will immediately reclaim whatever isn't
1064          * pinned when it first runs:
1065          */
1066         ja->discard_idx = ja->dirty_idx_ondisk =
1067                 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1068 out:
1069         bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
1070         kvpfree(buf.data, buf.size);
1071         percpu_ref_put(&ca->io_ref);
1072         closure_return(cl);
1073         return;
1074 err:
1075         mutex_lock(&jlist->lock);
1076         jlist->ret = ret;
1077         mutex_unlock(&jlist->lock);
1078         goto out;
1079 }
1080
1081 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1082                                struct journal_replay *j)
1083 {
1084         unsigned i;
1085
1086         for (i = 0; i < j->nr_ptrs; i++) {
1087                 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
1088                 u64 offset;
1089
1090                 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
1091
1092                 if (i)
1093                         prt_printf(out, " ");
1094                 prt_printf(out, "%u:%u:%u (sector %llu)",
1095                        j->ptrs[i].dev,
1096                        j->ptrs[i].bucket,
1097                        j->ptrs[i].bucket_offset,
1098                        j->ptrs[i].sector);
1099         }
1100 }
1101
1102 int bch2_journal_read(struct bch_fs *c, u64 *blacklist_seq, u64 *start_seq)
1103 {
1104         struct journal_list jlist;
1105         struct journal_replay *i, **_i, *prev = NULL;
1106         struct genradix_iter radix_iter;
1107         struct bch_dev *ca;
1108         unsigned iter;
1109         struct printbuf buf = PRINTBUF;
1110         size_t keys = 0, entries = 0;
1111         bool degraded = false;
1112         u64 seq, last_seq = 0;
1113         int ret = 0;
1114
1115         closure_init_stack(&jlist.cl);
1116         mutex_init(&jlist.lock);
1117         jlist.last_seq = 0;
1118         jlist.ret = 0;
1119
1120         for_each_member_device(ca, c, iter) {
1121                 if (!c->opts.fsck &&
1122                     !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1123                         continue;
1124
1125                 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1126                      ca->mi.state == BCH_MEMBER_STATE_ro) &&
1127                     percpu_ref_tryget(&ca->io_ref))
1128                         closure_call(&ca->journal.read,
1129                                      bch2_journal_read_device,
1130                                      system_unbound_wq,
1131                                      &jlist.cl);
1132                 else
1133                         degraded = true;
1134         }
1135
1136         closure_sync(&jlist.cl);
1137
1138         if (jlist.ret)
1139                 return jlist.ret;
1140
1141         *start_seq = 0;
1142
1143         /*
1144          * Find most recent flush entry, and ignore newer non flush entries -
1145          * those entries will be blacklisted:
1146          */
1147         genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
1148                 i = *_i;
1149
1150                 if (!i || i->ignore)
1151                         continue;
1152
1153                 if (!*start_seq)
1154                         *start_seq = le64_to_cpu(i->j.seq) + 1;
1155
1156                 if (!JSET_NO_FLUSH(&i->j)) {
1157                         int write = READ;
1158                         if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
1159                                                  c, &i->j, NULL,
1160                                                  "invalid journal entry: last_seq > seq (%llu > %llu)",
1161                                                  le64_to_cpu(i->j.last_seq),
1162                                                  le64_to_cpu(i->j.seq)))
1163                                 i->j.last_seq = i->j.seq;
1164
1165                         last_seq        = le64_to_cpu(i->j.last_seq);
1166                         *blacklist_seq  = le64_to_cpu(i->j.seq) + 1;
1167                         break;
1168                 }
1169
1170                 journal_replay_free(c, i);
1171         }
1172
1173         if (!*start_seq) {
1174                 bch_info(c, "journal read done, but no entries found");
1175                 return 0;
1176         }
1177
1178         if (!last_seq) {
1179                 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
1180                 ret = -1;
1181                 goto err;
1182         }
1183
1184         /* Drop blacklisted entries and entries older than last_seq: */
1185         genradix_for_each(&c->journal_entries, radix_iter, _i) {
1186                 i = *_i;
1187
1188                 if (!i || i->ignore)
1189                         continue;
1190
1191                 seq = le64_to_cpu(i->j.seq);
1192                 if (seq < last_seq) {
1193                         journal_replay_free(c, i);
1194                         continue;
1195                 }
1196
1197                 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1198                         fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1199                                     "found blacklisted journal entry %llu", seq);
1200
1201                         journal_replay_free(c, i);
1202                 }
1203         }
1204
1205         /* Check for missing entries: */
1206         seq = last_seq;
1207         genradix_for_each(&c->journal_entries, radix_iter, _i) {
1208                 i = *_i;
1209
1210                 if (!i || i->ignore)
1211                         continue;
1212
1213                 BUG_ON(seq > le64_to_cpu(i->j.seq));
1214
1215                 while (seq < le64_to_cpu(i->j.seq)) {
1216                         u64 missing_start, missing_end;
1217                         struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1218
1219                         while (seq < le64_to_cpu(i->j.seq) &&
1220                                bch2_journal_seq_is_blacklisted(c, seq, false))
1221                                 seq++;
1222
1223                         if (seq == le64_to_cpu(i->j.seq))
1224                                 break;
1225
1226                         missing_start = seq;
1227
1228                         while (seq < le64_to_cpu(i->j.seq) &&
1229                                !bch2_journal_seq_is_blacklisted(c, seq, false))
1230                                 seq++;
1231
1232                         if (prev) {
1233                                 bch2_journal_ptrs_to_text(&buf1, c, prev);
1234                                 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
1235                         } else
1236                                 prt_printf(&buf1, "(none)");
1237                         bch2_journal_ptrs_to_text(&buf2, c, i);
1238
1239                         missing_end = seq - 1;
1240                         fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1241                                  "  prev at %s\n"
1242                                  "  next at %s",
1243                                  missing_start, missing_end,
1244                                  last_seq, *blacklist_seq - 1,
1245                                  buf1.buf, buf2.buf);
1246
1247                         printbuf_exit(&buf1);
1248                         printbuf_exit(&buf2);
1249                 }
1250
1251                 prev = i;
1252                 seq++;
1253         }
1254
1255         genradix_for_each(&c->journal_entries, radix_iter, _i) {
1256                 struct jset_entry *entry;
1257                 struct bkey_i *k, *_n;
1258                 struct bch_replicas_padded replicas = {
1259                         .e.data_type = BCH_DATA_journal,
1260                         .e.nr_required = 1,
1261                 };
1262                 unsigned ptr;
1263
1264                 i = *_i;
1265                 if (!i || i->ignore)
1266                         continue;
1267
1268                 for (ptr = 0; ptr < i->nr_ptrs; ptr++) {
1269                         struct bch_dev *ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
1270
1271                         if (!i->ptrs[ptr].csum_good)
1272                                 printk(KERN_ERR "bcachefs (%s) sector %llu: invalid journal checksum, seq %llu%s\n",
1273                                        ca->name, i->ptrs[ptr].sector,
1274                                        le64_to_cpu(i->j.seq),
1275                                        i->csum_good ? " (had good copy on another device)" : "");
1276                 }
1277
1278                 ret = jset_validate(c,
1279                                     bch_dev_bkey_exists(c, i->ptrs[0].dev),
1280                                     &i->j,
1281                                     i->ptrs[0].sector,
1282                                     READ);
1283                 if (ret)
1284                         goto err;
1285
1286                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1287                         replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1288
1289                 bch2_replicas_entry_sort(&replicas.e);
1290
1291                 /*
1292                  * If we're mounting in degraded mode - if we didn't read all
1293                  * the devices - this is wrong:
1294                  */
1295
1296                 printbuf_reset(&buf);
1297                 bch2_replicas_entry_to_text(&buf, &replicas.e);
1298
1299                 if (!degraded &&
1300                     fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
1301                                 "superblock not marked as containing replicas %s",
1302                                 buf.buf)) {
1303                         ret = bch2_mark_replicas(c, &replicas.e);
1304                         if (ret)
1305                                 goto err;
1306                 }
1307
1308                 for_each_jset_key(k, _n, entry, &i->j)
1309                         keys++;
1310                 entries++;
1311         }
1312
1313         bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1314                  keys, entries, *start_seq);
1315
1316         if (*start_seq != *blacklist_seq)
1317                 bch_info(c, "dropped unflushed entries %llu-%llu",
1318                          *blacklist_seq, *start_seq - 1);
1319 err:
1320 fsck_err:
1321         printbuf_exit(&buf);
1322         return ret;
1323 }
1324
1325 /* journal write: */
1326
1327 static void __journal_write_alloc(struct journal *j,
1328                                   struct journal_buf *w,
1329                                   struct dev_alloc_list *devs_sorted,
1330                                   unsigned sectors,
1331                                   unsigned *replicas,
1332                                   unsigned replicas_want)
1333 {
1334         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1335         struct journal_device *ja;
1336         struct bch_dev *ca;
1337         unsigned i;
1338
1339         if (*replicas >= replicas_want)
1340                 return;
1341
1342         for (i = 0; i < devs_sorted->nr; i++) {
1343                 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1344                 if (!ca)
1345                         continue;
1346
1347                 ja = &ca->journal;
1348
1349                 /*
1350                  * Check that we can use this device, and aren't already using
1351                  * it:
1352                  */
1353                 if (!ca->mi.durability ||
1354                     ca->mi.state != BCH_MEMBER_STATE_rw ||
1355                     !ja->nr ||
1356                     bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1357                                          ca->dev_idx) ||
1358                     sectors > ja->sectors_free)
1359                         continue;
1360
1361                 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1362
1363                 bch2_bkey_append_ptr(&w->key,
1364                         (struct bch_extent_ptr) {
1365                                   .offset = bucket_to_sector(ca,
1366                                         ja->buckets[ja->cur_idx]) +
1367                                         ca->mi.bucket_size -
1368                                         ja->sectors_free,
1369                                   .dev = ca->dev_idx,
1370                 });
1371
1372                 ja->sectors_free -= sectors;
1373                 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1374
1375                 *replicas += ca->mi.durability;
1376
1377                 if (*replicas >= replicas_want)
1378                         break;
1379         }
1380 }
1381
1382 /**
1383  * journal_next_bucket - move on to the next journal bucket if possible
1384  */
1385 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1386                                unsigned sectors)
1387 {
1388         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1389         struct bch_devs_mask devs;
1390         struct journal_device *ja;
1391         struct bch_dev *ca;
1392         struct dev_alloc_list devs_sorted;
1393         unsigned target = c->opts.metadata_target ?:
1394                 c->opts.foreground_target;
1395         unsigned i, replicas = 0, replicas_want =
1396                 READ_ONCE(c->opts.metadata_replicas);
1397
1398         rcu_read_lock();
1399 retry:
1400         devs = target_rw_devs(c, BCH_DATA_journal, target);
1401
1402         devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1403
1404         __journal_write_alloc(j, w, &devs_sorted,
1405                               sectors, &replicas, replicas_want);
1406
1407         if (replicas >= replicas_want)
1408                 goto done;
1409
1410         for (i = 0; i < devs_sorted.nr; i++) {
1411                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1412                 if (!ca)
1413                         continue;
1414
1415                 ja = &ca->journal;
1416
1417                 if (sectors > ja->sectors_free &&
1418                     sectors <= ca->mi.bucket_size &&
1419                     bch2_journal_dev_buckets_available(j, ja,
1420                                         journal_space_discarded)) {
1421                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1422                         ja->sectors_free = ca->mi.bucket_size;
1423
1424                         /*
1425                          * ja->bucket_seq[ja->cur_idx] must always have
1426                          * something sensible:
1427                          */
1428                         ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1429                 }
1430         }
1431
1432         __journal_write_alloc(j, w, &devs_sorted,
1433                               sectors, &replicas, replicas_want);
1434
1435         if (replicas < replicas_want && target) {
1436                 /* Retry from all devices: */
1437                 target = 0;
1438                 goto retry;
1439         }
1440 done:
1441         rcu_read_unlock();
1442
1443         BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1444
1445         return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1446 }
1447
1448 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1449 {
1450         /* we aren't holding j->lock: */
1451         unsigned new_size = READ_ONCE(j->buf_size_want);
1452         void *new_buf;
1453
1454         if (buf->buf_size >= new_size)
1455                 return;
1456
1457         new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1458         if (!new_buf)
1459                 return;
1460
1461         memcpy(new_buf, buf->data, buf->buf_size);
1462
1463         spin_lock(&j->lock);
1464         swap(buf->data,         new_buf);
1465         swap(buf->buf_size,     new_size);
1466         spin_unlock(&j->lock);
1467
1468         kvpfree(new_buf, new_size);
1469 }
1470
1471 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1472 {
1473         return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
1474 }
1475
1476 static void journal_write_done(struct closure *cl)
1477 {
1478         struct journal *j = container_of(cl, struct journal, io);
1479         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1480         struct journal_buf *w = journal_last_unwritten_buf(j);
1481         struct bch_replicas_padded replicas;
1482         union journal_res_state old, new;
1483         u64 v, seq;
1484         int err = 0;
1485
1486         bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1487                                ? j->flush_write_time
1488                                : j->noflush_write_time, j->write_start_time);
1489
1490         if (!w->devs_written.nr) {
1491                 bch_err(c, "unable to write journal to sufficient devices");
1492                 err = -EIO;
1493         } else {
1494                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1495                                          w->devs_written);
1496                 if (bch2_mark_replicas(c, &replicas.e))
1497                         err = -EIO;
1498         }
1499
1500         if (err)
1501                 bch2_fatal_error(c);
1502
1503         spin_lock(&j->lock);
1504         seq = le64_to_cpu(w->data->seq);
1505
1506         if (seq >= j->pin.front)
1507                 journal_seq_pin(j, seq)->devs = w->devs_written;
1508
1509         if (!err) {
1510                 if (!JSET_NO_FLUSH(w->data)) {
1511                         j->flushed_seq_ondisk = seq;
1512                         j->last_seq_ondisk = w->last_seq;
1513
1514                         bch2_do_discards(c);
1515                         closure_wake_up(&c->freelist_wait);
1516                 }
1517         } else if (!j->err_seq || seq < j->err_seq)
1518                 j->err_seq      = seq;
1519
1520         j->seq_ondisk           = seq;
1521
1522         /*
1523          * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1524          * more buckets:
1525          *
1526          * Must come before signaling write completion, for
1527          * bch2_fs_journal_stop():
1528          */
1529         if (j->watermark)
1530                 journal_reclaim_kick(&c->journal);
1531
1532         /* also must come before signalling write completion: */
1533         closure_debug_destroy(cl);
1534
1535         v = atomic64_read(&j->reservations.counter);
1536         do {
1537                 old.v = new.v = v;
1538                 BUG_ON(journal_state_count(new, new.unwritten_idx));
1539
1540                 new.unwritten_idx++;
1541         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1542                                        old.v, new.v)) != old.v);
1543
1544         bch2_journal_space_available(j);
1545
1546         closure_wake_up(&w->wait);
1547         journal_wake(j);
1548
1549         if (!journal_state_count(new, new.unwritten_idx) &&
1550             journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1551                 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1552         } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1553                    new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
1554                 struct journal_buf *buf = journal_cur_buf(j);
1555                 long delta = buf->expires - jiffies;
1556
1557                 /*
1558                  * We don't close a journal entry to write it while there's
1559                  * previous entries still in flight - the current journal entry
1560                  * might want to be written now:
1561                  */
1562
1563                 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
1564         }
1565
1566         spin_unlock(&j->lock);
1567 }
1568
1569 static void journal_write_endio(struct bio *bio)
1570 {
1571         struct bch_dev *ca = bio->bi_private;
1572         struct journal *j = &ca->fs->journal;
1573         struct journal_buf *w = journal_last_unwritten_buf(j);
1574         unsigned long flags;
1575
1576         if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1577                                le64_to_cpu(w->data->seq),
1578                                bch2_blk_status_to_str(bio->bi_status)) ||
1579             bch2_meta_write_fault("journal")) {
1580                 spin_lock_irqsave(&j->err_lock, flags);
1581                 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1582                 spin_unlock_irqrestore(&j->err_lock, flags);
1583         }
1584
1585         closure_put(&j->io);
1586         percpu_ref_put(&ca->io_ref);
1587 }
1588
1589 static void do_journal_write(struct closure *cl)
1590 {
1591         struct journal *j = container_of(cl, struct journal, io);
1592         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1593         struct bch_dev *ca;
1594         struct journal_buf *w = journal_last_unwritten_buf(j);
1595         struct bch_extent_ptr *ptr;
1596         struct bio *bio;
1597         unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1598
1599         extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1600                 ca = bch_dev_bkey_exists(c, ptr->dev);
1601                 if (!percpu_ref_tryget(&ca->io_ref)) {
1602                         /* XXX: fix this */
1603                         bch_err(c, "missing device for journal write\n");
1604                         continue;
1605                 }
1606
1607                 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1608                              sectors);
1609
1610                 bio = ca->journal.bio;
1611                 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1612                 bio->bi_iter.bi_sector  = ptr->offset;
1613                 bio->bi_end_io          = journal_write_endio;
1614                 bio->bi_private         = ca;
1615
1616                 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1617                 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1618
1619                 if (!JSET_NO_FLUSH(w->data))
1620                         bio->bi_opf    |= REQ_FUA;
1621                 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1622                         bio->bi_opf    |= REQ_PREFLUSH;
1623
1624                 bch2_bio_map(bio, w->data, sectors << 9);
1625
1626                 trace_and_count(c, journal_write, bio);
1627                 closure_bio_submit(bio, cl);
1628
1629                 ca->journal.bucket_seq[ca->journal.cur_idx] =
1630                         le64_to_cpu(w->data->seq);
1631         }
1632
1633         continue_at(cl, journal_write_done, c->io_complete_wq);
1634         return;
1635 }
1636
1637 void bch2_journal_write(struct closure *cl)
1638 {
1639         struct journal *j = container_of(cl, struct journal, io);
1640         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1641         struct bch_dev *ca;
1642         struct journal_buf *w = journal_last_unwritten_buf(j);
1643         struct jset_entry *start, *end;
1644         struct jset *jset;
1645         struct bio *bio;
1646         struct printbuf journal_debug_buf = PRINTBUF;
1647         bool validate_before_checksum = false;
1648         unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
1649         int ret;
1650
1651         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1652
1653         journal_buf_realloc(j, w);
1654         jset = w->data;
1655
1656         j->write_start_time = local_clock();
1657
1658         spin_lock(&j->lock);
1659         if (bch2_journal_error(j) ||
1660             w->noflush ||
1661             (!w->must_flush &&
1662              (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1663              test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
1664                 w->noflush = true;
1665                 SET_JSET_NO_FLUSH(jset, true);
1666                 jset->last_seq  = 0;
1667                 w->last_seq     = 0;
1668
1669                 j->nr_noflush_writes++;
1670         } else {
1671                 j->last_flush_write = jiffies;
1672                 j->nr_flush_writes++;
1673         }
1674         spin_unlock(&j->lock);
1675
1676         /*
1677          * New btree roots are set by journalling them; when the journal entry
1678          * gets written we have to propagate them to c->btree_roots
1679          *
1680          * But, every journal entry we write has to contain all the btree roots
1681          * (at least for now); so after we copy btree roots to c->btree_roots we
1682          * have to get any missing btree roots and add them to this journal
1683          * entry:
1684          */
1685
1686         bch2_journal_entries_to_btree_roots(c, jset);
1687
1688         start = end = vstruct_last(jset);
1689
1690         end     = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1691
1692         bch2_journal_super_entries_add_common(c, &end,
1693                                 le64_to_cpu(jset->seq));
1694         u64s    = (u64 *) end - (u64 *) start;
1695         BUG_ON(u64s > j->entry_u64s_reserved);
1696
1697         le32_add_cpu(&jset->u64s, u64s);
1698         BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1699
1700         jset->magic             = cpu_to_le64(jset_magic(c));
1701         jset->version           = c->sb.version < bcachefs_metadata_version_bkey_renumber
1702                 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1703                 : cpu_to_le32(c->sb.version);
1704
1705         SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1706         SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1707
1708         if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1709                 j->last_empty_seq = le64_to_cpu(jset->seq);
1710
1711         if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1712                 validate_before_checksum = true;
1713
1714         if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1715                 validate_before_checksum = true;
1716
1717         if (validate_before_checksum &&
1718             jset_validate(c, NULL, jset, 0, WRITE))
1719                 goto err;
1720
1721         ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1722                     jset->encrypted_start,
1723                     vstruct_end(jset) - (void *) jset->encrypted_start);
1724         if (bch2_fs_fatal_err_on(ret, c,
1725                         "error decrypting journal entry: %i", ret))
1726                 goto err;
1727
1728         jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1729                                   journal_nonce(jset), jset);
1730
1731         if (!validate_before_checksum &&
1732             jset_validate(c, NULL, jset, 0, WRITE))
1733                 goto err;
1734
1735         sectors = vstruct_sectors(jset, c->block_bits);
1736         BUG_ON(sectors > w->sectors);
1737
1738         bytes = vstruct_bytes(jset);
1739         memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1740
1741 retry_alloc:
1742         spin_lock(&j->lock);
1743         ret = journal_write_alloc(j, w, sectors);
1744
1745         if (ret && j->can_discard) {
1746                 spin_unlock(&j->lock);
1747                 bch2_journal_do_discards(j);
1748                 goto retry_alloc;
1749         }
1750
1751         if (ret)
1752                 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1753
1754         /*
1755          * write is allocated, no longer need to account for it in
1756          * bch2_journal_space_available():
1757          */
1758         w->sectors = 0;
1759
1760         /*
1761          * journal entry has been compacted and allocated, recalculate space
1762          * available:
1763          */
1764         bch2_journal_space_available(j);
1765         spin_unlock(&j->lock);
1766
1767         if (ret) {
1768                 bch_err(c, "Unable to allocate journal write:\n%s",
1769                         journal_debug_buf.buf);
1770                 printbuf_exit(&journal_debug_buf);
1771                 bch2_fatal_error(c);
1772                 continue_at(cl, journal_write_done, c->io_complete_wq);
1773                 return;
1774         }
1775
1776         w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1777
1778         if (c->opts.nochanges)
1779                 goto no_io;
1780
1781         for_each_rw_member(ca, c, i)
1782                 nr_rw_members++;
1783
1784         if (nr_rw_members > 1)
1785                 w->separate_flush = true;
1786
1787         if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1788                 for_each_rw_member(ca, c, i) {
1789                         percpu_ref_get(&ca->io_ref);
1790
1791                         bio = ca->journal.bio;
1792                         bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
1793                         bio->bi_end_io          = journal_write_endio;
1794                         bio->bi_private         = ca;
1795                         closure_bio_submit(bio, cl);
1796                 }
1797         }
1798
1799         continue_at(cl, do_journal_write, c->io_complete_wq);
1800         return;
1801 no_io:
1802         continue_at(cl, journal_write_done, c->io_complete_wq);
1803         return;
1804 err:
1805         bch2_fatal_error(c);
1806         continue_at(cl, journal_write_done, c->io_complete_wq);
1807 }