]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_io.c
Update bcachefs sources to ed4aea2ad4 bcachefs: fix gcc warning
[bcachefs-tools-debian] / libbcachefs / journal_io.c
1 #include "bcachefs.h"
2 #include "alloc.h"
3 #include "btree_gc.h"
4 #include "btree_update.h"
5 #include "buckets.h"
6 #include "checksum.h"
7 #include "error.h"
8 #include "journal.h"
9 #include "journal_io.h"
10 #include "journal_reclaim.h"
11 #include "journal_seq_blacklist.h"
12 #include "replicas.h"
13
14 #include <trace/events/bcachefs.h>
15
16 static struct jset_entry *bch2_journal_find_entry(struct jset *j, unsigned type,
17                                                  enum btree_id id)
18 {
19         struct jset_entry *entry;
20
21         for_each_jset_entry_type(entry, j, type)
22                 if (entry->btree_id == id)
23                         return entry;
24
25         return NULL;
26 }
27
28 struct bkey_i *bch2_journal_find_btree_root(struct bch_fs *c, struct jset *j,
29                                            enum btree_id id, unsigned *level)
30 {
31         struct bkey_i *k;
32         struct jset_entry *entry =
33                 bch2_journal_find_entry(j, BCH_JSET_ENTRY_btree_root, id);
34
35         if (!entry)
36                 return NULL;
37
38         if (!entry->u64s)
39                 return ERR_PTR(-EINVAL);
40
41         k = entry->start;
42         *level = entry->level;
43         *level = entry->level;
44         return k;
45 }
46
47 struct journal_list {
48         struct closure          cl;
49         struct mutex            lock;
50         struct list_head        *head;
51         int                     ret;
52 };
53
54 #define JOURNAL_ENTRY_ADD_OK            0
55 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE  5
56
57 /*
58  * Given a journal entry we just read, add it to the list of journal entries to
59  * be replayed:
60  */
61 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
62                              struct journal_list *jlist, struct jset *j)
63 {
64         struct journal_replay *i, *pos;
65         struct list_head *where;
66         size_t bytes = vstruct_bytes(j);
67         __le64 last_seq;
68         int ret;
69
70         last_seq = !list_empty(jlist->head)
71                 ? list_last_entry(jlist->head, struct journal_replay,
72                                   list)->j.last_seq
73                 : 0;
74
75         /* Is this entry older than the range we need? */
76         if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
77                 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
78                 goto out;
79         }
80
81         /* Drop entries we don't need anymore */
82         list_for_each_entry_safe(i, pos, jlist->head, list) {
83                 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
84                         break;
85                 list_del(&i->list);
86                 kvpfree(i, offsetof(struct journal_replay, j) +
87                         vstruct_bytes(&i->j));
88         }
89
90         list_for_each_entry_reverse(i, jlist->head, list) {
91                 /* Duplicate? */
92                 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
93                         fsck_err_on(bytes != vstruct_bytes(&i->j) ||
94                                     memcmp(j, &i->j, bytes), c,
95                                     "found duplicate but non identical journal entries (seq %llu)",
96                                     le64_to_cpu(j->seq));
97                         goto found;
98                 }
99
100                 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
101                         where = &i->list;
102                         goto add;
103                 }
104         }
105
106         where = jlist->head;
107 add:
108         i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
109         if (!i) {
110                 ret = -ENOMEM;
111                 goto out;
112         }
113
114         list_add(&i->list, where);
115         i->devs.nr = 0;
116         memcpy(&i->j, j, bytes);
117 found:
118         if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
119                 bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
120         else
121                 fsck_err_on(1, c, "duplicate journal entries on same device");
122         ret = JOURNAL_ENTRY_ADD_OK;
123 out:
124 fsck_err:
125         return ret;
126 }
127
128 static struct nonce journal_nonce(const struct jset *jset)
129 {
130         return (struct nonce) {{
131                 [0] = 0,
132                 [1] = ((__le32 *) &jset->seq)[0],
133                 [2] = ((__le32 *) &jset->seq)[1],
134                 [3] = BCH_NONCE_JOURNAL,
135         }};
136 }
137
138 /* this fills in a range with empty jset_entries: */
139 static void journal_entry_null_range(void *start, void *end)
140 {
141         struct jset_entry *entry;
142
143         for (entry = start; entry != end; entry = vstruct_next(entry))
144                 memset(entry, 0, sizeof(*entry));
145 }
146
147 #define JOURNAL_ENTRY_REREAD    5
148 #define JOURNAL_ENTRY_NONE      6
149 #define JOURNAL_ENTRY_BAD       7
150
151 #define journal_entry_err(c, msg, ...)                                  \
152 ({                                                                      \
153         switch (write) {                                                \
154         case READ:                                                      \
155                 mustfix_fsck_err(c, msg, ##__VA_ARGS__);                \
156                 break;                                                  \
157         case WRITE:                                                     \
158                 bch_err(c, "corrupt metadata before write:\n"           \
159                         msg, ##__VA_ARGS__);                            \
160                 if (bch2_fs_inconsistent(c)) {                          \
161                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
162                         goto fsck_err;                                  \
163                 }                                                       \
164                 break;                                                  \
165         }                                                               \
166         true;                                                           \
167 })
168
169 #define journal_entry_err_on(cond, c, msg, ...)                         \
170         ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
171
172 static int journal_validate_key(struct bch_fs *c, struct jset *jset,
173                                 struct jset_entry *entry,
174                                 struct bkey_i *k, enum bkey_type key_type,
175                                 const char *type, int write)
176 {
177         void *next = vstruct_next(entry);
178         const char *invalid;
179         char buf[160];
180         int ret = 0;
181
182         if (journal_entry_err_on(!k->k.u64s, c,
183                         "invalid %s in journal: k->u64s 0", type)) {
184                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
185                 journal_entry_null_range(vstruct_next(entry), next);
186                 return 0;
187         }
188
189         if (journal_entry_err_on((void *) bkey_next(k) >
190                                 (void *) vstruct_next(entry), c,
191                         "invalid %s in journal: extends past end of journal entry",
192                         type)) {
193                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
194                 journal_entry_null_range(vstruct_next(entry), next);
195                 return 0;
196         }
197
198         if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
199                         "invalid %s in journal: bad format %u",
200                         type, k->k.format)) {
201                 le16_add_cpu(&entry->u64s, -k->k.u64s);
202                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
203                 journal_entry_null_range(vstruct_next(entry), next);
204                 return 0;
205         }
206
207         if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN)
208                 bch2_bkey_swab(key_type, NULL, bkey_to_packed(k));
209
210         invalid = bch2_bkey_invalid(c, key_type, bkey_i_to_s_c(k));
211         if (invalid) {
212                 bch2_bkey_val_to_text(c, key_type, buf, sizeof(buf),
213                                      bkey_i_to_s_c(k));
214                 mustfix_fsck_err(c, "invalid %s in journal: %s\n%s",
215                                  type, invalid, buf);
216
217                 le16_add_cpu(&entry->u64s, -k->k.u64s);
218                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
219                 journal_entry_null_range(vstruct_next(entry), next);
220                 return 0;
221         }
222 fsck_err:
223         return ret;
224 }
225
226 static int journal_entry_validate_btree_keys(struct bch_fs *c,
227                                              struct jset *jset,
228                                              struct jset_entry *entry,
229                                              int write)
230 {
231         struct bkey_i *k;
232
233         vstruct_for_each(entry, k) {
234                 int ret = journal_validate_key(c, jset, entry, k,
235                                 bkey_type(entry->level,
236                                           entry->btree_id),
237                                 "key", write);
238                 if (ret)
239                         return ret;
240         }
241
242         return 0;
243 }
244
245 static int journal_entry_validate_btree_root(struct bch_fs *c,
246                                              struct jset *jset,
247                                              struct jset_entry *entry,
248                                              int write)
249 {
250         struct bkey_i *k = entry->start;
251         int ret = 0;
252
253         if (journal_entry_err_on(!entry->u64s ||
254                                  le16_to_cpu(entry->u64s) != k->k.u64s, c,
255                                  "invalid btree root journal entry: wrong number of keys")) {
256                 void *next = vstruct_next(entry);
257                 /*
258                  * we don't want to null out this jset_entry,
259                  * just the contents, so that later we can tell
260                  * we were _supposed_ to have a btree root
261                  */
262                 entry->u64s = 0;
263                 journal_entry_null_range(vstruct_next(entry), next);
264                 return 0;
265         }
266
267         return journal_validate_key(c, jset, entry, k, BKEY_TYPE_BTREE,
268                                     "btree root", write);
269 fsck_err:
270         return ret;
271 }
272
273 static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
274                                             struct jset *jset,
275                                             struct jset_entry *entry,
276                                             int write)
277 {
278         /* obsolete, don't care: */
279         return 0;
280 }
281
282 static int journal_entry_validate_blacklist(struct bch_fs *c,
283                                             struct jset *jset,
284                                             struct jset_entry *entry,
285                                             int write)
286 {
287         int ret = 0;
288
289         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
290                 "invalid journal seq blacklist entry: bad size")) {
291                 journal_entry_null_range(entry, vstruct_next(entry));
292         }
293 fsck_err:
294         return ret;
295 }
296
297 static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
298                                                struct jset *jset,
299                                                struct jset_entry *entry,
300                                                int write)
301 {
302         struct jset_entry_blacklist_v2 *bl_entry;
303         int ret = 0;
304
305         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
306                 "invalid journal seq blacklist entry: bad size")) {
307                 journal_entry_null_range(entry, vstruct_next(entry));
308         }
309
310         bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
311
312         if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
313                                  le64_to_cpu(bl_entry->end), c,
314                 "invalid journal seq blacklist entry: start > end")) {
315                 journal_entry_null_range(entry, vstruct_next(entry));
316         }
317
318 fsck_err:
319         return ret;
320 }
321
322 struct jset_entry_ops {
323         int (*validate)(struct bch_fs *, struct jset *,
324                         struct jset_entry *, int);
325 };
326
327 const struct jset_entry_ops bch2_jset_entry_ops[] = {
328 #define x(f, nr)                                                \
329         [BCH_JSET_ENTRY_##f]    = (struct jset_entry_ops) {     \
330                 .validate       = journal_entry_validate_##f,   \
331         },
332         BCH_JSET_ENTRY_TYPES()
333 #undef x
334 };
335
336 static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
337                                   struct jset_entry *entry, int write)
338 {
339         int ret = 0;
340
341         if (entry->type >= BCH_JSET_ENTRY_NR) {
342                 journal_entry_err(c, "invalid journal entry type %u",
343                                   entry->type);
344                 journal_entry_null_range(entry, vstruct_next(entry));
345                 return 0;
346         }
347
348         ret = bch2_jset_entry_ops[entry->type].validate(c, jset, entry, write);
349 fsck_err:
350         return ret;
351 }
352
353 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
354                                  int write)
355 {
356         struct jset_entry *entry;
357         int ret = 0;
358
359         vstruct_for_each(jset, entry) {
360                 if (journal_entry_err_on(vstruct_next(entry) >
361                                          vstruct_last(jset), c,
362                                 "journal entry extends past end of jset")) {
363                         jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
364                         break;
365                 }
366
367                 ret = journal_entry_validate(c, jset, entry, write);
368                 if (ret)
369                         break;
370         }
371 fsck_err:
372         return ret;
373 }
374
375 static int jset_validate(struct bch_fs *c,
376                          struct jset *jset, u64 sector,
377                          unsigned bucket_sectors_left,
378                          unsigned sectors_read,
379                          int write)
380 {
381         size_t bytes = vstruct_bytes(jset);
382         struct bch_csum csum;
383         int ret = 0;
384
385         if (le64_to_cpu(jset->magic) != jset_magic(c))
386                 return JOURNAL_ENTRY_NONE;
387
388         if (le32_to_cpu(jset->version) != BCACHE_JSET_VERSION) {
389                 bch_err(c, "unknown journal entry version %u",
390                         le32_to_cpu(jset->version));
391                 return BCH_FSCK_UNKNOWN_VERSION;
392         }
393
394         if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
395                                  "journal entry too big (%zu bytes), sector %lluu",
396                                  bytes, sector)) {
397                 /* XXX: note we might have missing journal entries */
398                 return JOURNAL_ENTRY_BAD;
399         }
400
401         if (bytes > sectors_read << 9)
402                 return JOURNAL_ENTRY_REREAD;
403
404         if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
405                         "journal entry with unknown csum type %llu sector %lluu",
406                         JSET_CSUM_TYPE(jset), sector))
407                 return JOURNAL_ENTRY_BAD;
408
409         csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
410         if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
411                                  "journal checksum bad, sector %llu", sector)) {
412                 /* XXX: retry IO, when we start retrying checksum errors */
413                 /* XXX: note we might have missing journal entries */
414                 return JOURNAL_ENTRY_BAD;
415         }
416
417         bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
418                      jset->encrypted_start,
419                      vstruct_end(jset) - (void *) jset->encrypted_start);
420
421         if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
422                                  "invalid journal entry: last_seq > seq"))
423                 jset->last_seq = jset->seq;
424
425         return 0;
426 fsck_err:
427         return ret;
428 }
429
430 struct journal_read_buf {
431         void            *data;
432         size_t          size;
433 };
434
435 static int journal_read_buf_realloc(struct journal_read_buf *b,
436                                     size_t new_size)
437 {
438         void *n;
439
440         /* the bios are sized for this many pages, max: */
441         if (new_size > JOURNAL_ENTRY_SIZE_MAX)
442                 return -ENOMEM;
443
444         new_size = roundup_pow_of_two(new_size);
445         n = kvpmalloc(new_size, GFP_KERNEL);
446         if (!n)
447                 return -ENOMEM;
448
449         kvpfree(b->data, b->size);
450         b->data = n;
451         b->size = new_size;
452         return 0;
453 }
454
455 static int journal_read_bucket(struct bch_dev *ca,
456                                struct journal_read_buf *buf,
457                                struct journal_list *jlist,
458                                unsigned bucket, u64 *seq, bool *entries_found)
459 {
460         struct bch_fs *c = ca->fs;
461         struct journal_device *ja = &ca->journal;
462         struct bio *bio = ja->bio;
463         struct jset *j = NULL;
464         unsigned sectors, sectors_read = 0;
465         u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
466             end = offset + ca->mi.bucket_size;
467         bool saw_bad = false;
468         int ret = 0;
469
470         pr_debug("reading %u", bucket);
471
472         while (offset < end) {
473                 if (!sectors_read) {
474 reread:                 sectors_read = min_t(unsigned,
475                                 end - offset, buf->size >> 9);
476
477                         bio_reset(bio);
478                         bio_set_dev(bio, ca->disk_sb.bdev);
479                         bio->bi_iter.bi_sector  = offset;
480                         bio->bi_iter.bi_size    = sectors_read << 9;
481                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
482                         bch2_bio_map(bio, buf->data);
483
484                         ret = submit_bio_wait(bio);
485
486                         if (bch2_dev_io_err_on(ret, ca,
487                                                "journal read from sector %llu",
488                                                offset) ||
489                             bch2_meta_read_fault("journal"))
490                                 return -EIO;
491
492                         j = buf->data;
493                 }
494
495                 ret = jset_validate(c, j, offset,
496                                     end - offset, sectors_read,
497                                     READ);
498                 switch (ret) {
499                 case BCH_FSCK_OK:
500                         break;
501                 case JOURNAL_ENTRY_REREAD:
502                         if (vstruct_bytes(j) > buf->size) {
503                                 ret = journal_read_buf_realloc(buf,
504                                                         vstruct_bytes(j));
505                                 if (ret)
506                                         return ret;
507                         }
508                         goto reread;
509                 case JOURNAL_ENTRY_NONE:
510                         if (!saw_bad)
511                                 return 0;
512                         sectors = c->opts.block_size;
513                         goto next_block;
514                 case JOURNAL_ENTRY_BAD:
515                         saw_bad = true;
516                         sectors = c->opts.block_size;
517                         goto next_block;
518                 default:
519                         return ret;
520                 }
521
522                 /*
523                  * This happens sometimes if we don't have discards on -
524                  * when we've partially overwritten a bucket with new
525                  * journal entries. We don't need the rest of the
526                  * bucket:
527                  */
528                 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
529                         return 0;
530
531                 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
532
533                 mutex_lock(&jlist->lock);
534                 ret = journal_entry_add(c, ca, jlist, j);
535                 mutex_unlock(&jlist->lock);
536
537                 switch (ret) {
538                 case JOURNAL_ENTRY_ADD_OK:
539                         *entries_found = true;
540                         break;
541                 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
542                         break;
543                 default:
544                         return ret;
545                 }
546
547                 if (le64_to_cpu(j->seq) > *seq)
548                         *seq = le64_to_cpu(j->seq);
549
550                 sectors = vstruct_sectors(j, c->block_bits);
551 next_block:
552                 pr_debug("next");
553                 offset          += sectors;
554                 sectors_read    -= sectors;
555                 j = ((void *) j) + (sectors << 9);
556         }
557
558         return 0;
559 }
560
561 static void bch2_journal_read_device(struct closure *cl)
562 {
563 #define read_bucket(b)                                                  \
564         ({                                                              \
565                 bool entries_found = false;                             \
566                 ret = journal_read_bucket(ca, &buf, jlist, b, &seq,     \
567                                           &entries_found);              \
568                 if (ret)                                                \
569                         goto err;                                       \
570                 __set_bit(b, bitmap);                                   \
571                 entries_found;                                          \
572          })
573
574         struct journal_device *ja =
575                 container_of(cl, struct journal_device, read);
576         struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
577         struct journal_list *jlist =
578                 container_of(cl->parent, struct journal_list, cl);
579         struct request_queue *q = bdev_get_queue(ca->disk_sb.bdev);
580         struct journal_read_buf buf = { NULL, 0 };
581
582         DECLARE_BITMAP(bitmap, ja->nr);
583         unsigned i, l, r;
584         u64 seq = 0;
585         int ret;
586
587         if (!ja->nr)
588                 goto out;
589
590         bitmap_zero(bitmap, ja->nr);
591         ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
592         if (ret)
593                 goto err;
594
595         pr_debug("%u journal buckets", ja->nr);
596
597         /*
598          * If the device supports discard but not secure discard, we can't do
599          * the fancy fibonacci hash/binary search because the live journal
600          * entries might not form a contiguous range:
601          */
602         for (i = 0; i < ja->nr; i++)
603                 read_bucket(i);
604         goto search_done;
605
606         if (!blk_queue_nonrot(q))
607                 goto linear_scan;
608
609         /*
610          * Read journal buckets ordered by golden ratio hash to quickly
611          * find a sequence of buckets with valid journal entries
612          */
613         for (i = 0; i < ja->nr; i++) {
614                 l = (i * 2654435769U) % ja->nr;
615
616                 if (test_bit(l, bitmap))
617                         break;
618
619                 if (read_bucket(l))
620                         goto bsearch;
621         }
622
623         /*
624          * If that fails, check all the buckets we haven't checked
625          * already
626          */
627         pr_debug("falling back to linear search");
628 linear_scan:
629         for (l = find_first_zero_bit(bitmap, ja->nr);
630              l < ja->nr;
631              l = find_next_zero_bit(bitmap, ja->nr, l + 1))
632                 if (read_bucket(l))
633                         goto bsearch;
634
635         /* no journal entries on this device? */
636         if (l == ja->nr)
637                 goto out;
638 bsearch:
639         /* Binary search */
640         r = find_next_bit(bitmap, ja->nr, l + 1);
641         pr_debug("starting binary search, l %u r %u", l, r);
642
643         while (l + 1 < r) {
644                 unsigned m = (l + r) >> 1;
645                 u64 cur_seq = seq;
646
647                 read_bucket(m);
648
649                 if (cur_seq != seq)
650                         l = m;
651                 else
652                         r = m;
653         }
654
655 search_done:
656         /*
657          * Find the journal bucket with the highest sequence number:
658          *
659          * If there's duplicate journal entries in multiple buckets (which
660          * definitely isn't supposed to happen, but...) - make sure to start
661          * cur_idx at the last of those buckets, so we don't deadlock trying to
662          * allocate
663          */
664         seq = 0;
665
666         for (i = 0; i < ja->nr; i++)
667                 if (ja->bucket_seq[i] >= seq &&
668                     ja->bucket_seq[i] != ja->bucket_seq[(i + 1) % ja->nr]) {
669                         /*
670                          * When journal_next_bucket() goes to allocate for
671                          * the first time, it'll use the bucket after
672                          * ja->cur_idx
673                          */
674                         ja->cur_idx = i;
675                         seq = ja->bucket_seq[i];
676                 }
677
678         /*
679          * Set last_idx to indicate the entire journal is full and needs to be
680          * reclaimed - journal reclaim will immediately reclaim whatever isn't
681          * pinned when it first runs:
682          */
683         ja->last_idx = (ja->cur_idx + 1) % ja->nr;
684
685         /*
686          * Read buckets in reverse order until we stop finding more journal
687          * entries:
688          */
689         for (i = (ja->cur_idx + ja->nr - 1) % ja->nr;
690              i != ja->cur_idx;
691              i = (i + ja->nr - 1) % ja->nr)
692                 if (!test_bit(i, bitmap) &&
693                     !read_bucket(i))
694                         break;
695 out:
696         kvpfree(buf.data, buf.size);
697         percpu_ref_put(&ca->io_ref);
698         closure_return(cl);
699 err:
700         mutex_lock(&jlist->lock);
701         jlist->ret = ret;
702         mutex_unlock(&jlist->lock);
703         goto out;
704 #undef read_bucket
705 }
706
707 void bch2_journal_entries_free(struct list_head *list)
708 {
709
710         while (!list_empty(list)) {
711                 struct journal_replay *i =
712                         list_first_entry(list, struct journal_replay, list);
713                 list_del(&i->list);
714                 kvpfree(i, offsetof(struct journal_replay, j) +
715                         vstruct_bytes(&i->j));
716         }
717 }
718
719 static inline bool journal_has_keys(struct list_head *list)
720 {
721         struct journal_replay *i;
722         struct jset_entry *entry;
723         struct bkey_i *k, *_n;
724
725         list_for_each_entry(i, list, list)
726                 for_each_jset_key(k, _n, entry, &i->j)
727                         return true;
728
729         return false;
730 }
731
732 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
733 {
734         struct journal *j = &c->journal;
735         struct journal_list jlist;
736         struct journal_replay *i;
737         struct journal_entry_pin_list *p;
738         struct bch_dev *ca;
739         u64 cur_seq, end_seq, seq;
740         unsigned iter, keys = 0, entries = 0;
741         size_t nr;
742         bool degraded = false;
743         int ret = 0;
744
745         closure_init_stack(&jlist.cl);
746         mutex_init(&jlist.lock);
747         jlist.head = list;
748         jlist.ret = 0;
749
750         for_each_member_device(ca, c, iter) {
751                 if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
752                         continue;
753
754                 if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
755                      ca->mi.state == BCH_MEMBER_STATE_RO) &&
756                     percpu_ref_tryget(&ca->io_ref))
757                         closure_call(&ca->journal.read,
758                                      bch2_journal_read_device,
759                                      system_unbound_wq,
760                                      &jlist.cl);
761                 else
762                         degraded = true;
763         }
764
765         closure_sync(&jlist.cl);
766
767         if (jlist.ret)
768                 return jlist.ret;
769
770         if (list_empty(list)){
771                 bch_err(c, "no journal entries found");
772                 return BCH_FSCK_REPAIR_IMPOSSIBLE;
773         }
774
775         fsck_err_on(c->sb.clean && journal_has_keys(list), c,
776                     "filesystem marked clean but journal has keys to replay");
777
778         list_for_each_entry(i, list, list) {
779                 ret = jset_validate_entries(c, &i->j, READ);
780                 if (ret)
781                         goto fsck_err;
782
783                 /*
784                  * If we're mounting in degraded mode - if we didn't read all
785                  * the devices - this is wrong:
786                  */
787
788                 if (!degraded &&
789                     (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
790                      fsck_err_on(!bch2_replicas_marked(c, BCH_DATA_JOURNAL,
791                                                        i->devs), c,
792                                  "superblock not marked as containing replicas (type %u)",
793                                  BCH_DATA_JOURNAL))) {
794                         ret = bch2_mark_replicas(c, BCH_DATA_JOURNAL, i->devs);
795                         if (ret)
796                                 return ret;
797                 }
798         }
799
800         i = list_last_entry(list, struct journal_replay, list);
801
802         nr = le64_to_cpu(i->j.seq) - le64_to_cpu(i->j.last_seq) + 1;
803
804         if (nr > j->pin.size) {
805                 free_fifo(&j->pin);
806                 init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL);
807                 if (!j->pin.data) {
808                         bch_err(c, "error reallocating journal fifo (%zu open entries)", nr);
809                         return -ENOMEM;
810                 }
811         }
812
813         atomic64_set(&j->seq, le64_to_cpu(i->j.seq));
814         j->last_seq_ondisk = le64_to_cpu(i->j.last_seq);
815
816         j->pin.front    = le64_to_cpu(i->j.last_seq);
817         j->pin.back     = le64_to_cpu(i->j.seq) + 1;
818
819         fifo_for_each_entry_ptr(p, &j->pin, seq) {
820                 INIT_LIST_HEAD(&p->list);
821                 INIT_LIST_HEAD(&p->flushed);
822                 atomic_set(&p->count, 0);
823                 p->devs.nr = 0;
824         }
825
826         mutex_lock(&j->blacklist_lock);
827
828         list_for_each_entry(i, list, list) {
829                 p = journal_seq_pin(j, le64_to_cpu(i->j.seq));
830
831                 atomic_set(&p->count, 1);
832                 p->devs = i->devs;
833
834                 if (bch2_journal_seq_blacklist_read(j, i)) {
835                         mutex_unlock(&j->blacklist_lock);
836                         return -ENOMEM;
837                 }
838         }
839
840         mutex_unlock(&j->blacklist_lock);
841
842         cur_seq = journal_last_seq(j);
843         end_seq = le64_to_cpu(list_last_entry(list,
844                                 struct journal_replay, list)->j.seq);
845
846         list_for_each_entry(i, list, list) {
847                 struct jset_entry *entry;
848                 struct bkey_i *k, *_n;
849                 bool blacklisted;
850
851                 mutex_lock(&j->blacklist_lock);
852                 while (cur_seq < le64_to_cpu(i->j.seq) &&
853                        bch2_journal_seq_blacklist_find(j, cur_seq))
854                         cur_seq++;
855
856                 blacklisted = bch2_journal_seq_blacklist_find(j,
857                                                          le64_to_cpu(i->j.seq));
858                 mutex_unlock(&j->blacklist_lock);
859
860                 fsck_err_on(blacklisted, c,
861                             "found blacklisted journal entry %llu",
862                             le64_to_cpu(i->j.seq));
863
864                 fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c,
865                         "journal entries %llu-%llu missing! (replaying %llu-%llu)",
866                         cur_seq, le64_to_cpu(i->j.seq) - 1,
867                         journal_last_seq(j), end_seq);
868
869                 cur_seq = le64_to_cpu(i->j.seq) + 1;
870
871                 for_each_jset_key(k, _n, entry, &i->j)
872                         keys++;
873                 entries++;
874         }
875
876         bch_info(c, "journal read done, %i keys in %i entries, seq %llu",
877                  keys, entries, journal_cur_seq(j));
878 fsck_err:
879         return ret;
880 }
881
882 /* journal replay: */
883
884 int bch2_journal_mark(struct bch_fs *c, struct list_head *list)
885 {
886         struct bkey_i *k, *n;
887         struct jset_entry *j;
888         struct journal_replay *r;
889         int ret;
890
891         list_for_each_entry(r, list, list)
892                 for_each_jset_key(k, n, j, &r->j) {
893                         enum bkey_type type = bkey_type(j->level, j->btree_id);
894                         struct bkey_s_c k_s_c = bkey_i_to_s_c(k);
895
896                         if (btree_type_has_ptrs(type)) {
897                                 ret = bch2_btree_mark_key_initial(c, type, k_s_c);
898                                 if (ret)
899                                         return ret;
900                         }
901                 }
902
903         return 0;
904 }
905
906 int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
907 {
908         struct journal *j = &c->journal;
909         struct journal_entry_pin_list *pin_list;
910         struct bkey_i *k, *_n;
911         struct jset_entry *entry;
912         struct journal_replay *i, *n;
913         int ret = 0;
914
915         list_for_each_entry_safe(i, n, list, list) {
916
917                 j->replay_journal_seq = le64_to_cpu(i->j.seq);
918
919                 for_each_jset_key(k, _n, entry, &i->j) {
920
921                         if (entry->btree_id == BTREE_ID_ALLOC) {
922                                 /*
923                                  * allocation code handles replay for
924                                  * BTREE_ID_ALLOC keys:
925                                  */
926                                 ret = bch2_alloc_replay_key(c, k->k.p);
927                         } else {
928                                 /*
929                                  * We might cause compressed extents to be
930                                  * split, so we need to pass in a
931                                  * disk_reservation:
932                                  */
933                                 struct disk_reservation disk_res =
934                                         bch2_disk_reservation_init(c, 0);
935
936                                 ret = bch2_btree_insert(c, entry->btree_id, k,
937                                                         &disk_res, NULL, NULL,
938                                                         BTREE_INSERT_NOFAIL|
939                                                         BTREE_INSERT_JOURNAL_REPLAY);
940                         }
941
942                         if (ret) {
943                                 bch_err(c, "journal replay: error %d while replaying key",
944                                         ret);
945                                 goto err;
946                         }
947
948                         cond_resched();
949                 }
950
951                 pin_list = journal_seq_pin(j, j->replay_journal_seq);
952
953                 if (atomic_dec_and_test(&pin_list->count))
954                         journal_wake(j);
955         }
956
957         j->replay_journal_seq = 0;
958
959         bch2_journal_set_replay_done(j);
960         ret = bch2_journal_flush_all_pins(j);
961 err:
962         bch2_journal_entries_free(list);
963         return ret;
964 }
965
966 /* journal write: */
967
968 static void bch2_journal_add_btree_root(struct journal_buf *buf,
969                                        enum btree_id id, struct bkey_i *k,
970                                        unsigned level)
971 {
972         struct jset_entry *entry;
973
974         entry = bch2_journal_add_entry_noreservation(buf, k->k.u64s);
975         entry->type     = BCH_JSET_ENTRY_btree_root;
976         entry->btree_id = id;
977         entry->level    = level;
978         memcpy_u64s(entry->_data, k, k->k.u64s);
979 }
980
981 static unsigned journal_dev_buckets_available(struct journal *j,
982                                               struct bch_dev *ca)
983 {
984         struct journal_device *ja = &ca->journal;
985         unsigned next = (ja->cur_idx + 1) % ja->nr;
986         unsigned available = (ja->last_idx + ja->nr - next) % ja->nr;
987
988         /*
989          * Hack to avoid a deadlock during journal replay:
990          * journal replay might require setting a new btree
991          * root, which requires writing another journal entry -
992          * thus, if the journal is full (and this happens when
993          * replaying the first journal bucket's entries) we're
994          * screwed.
995          *
996          * So don't let the journal fill up unless we're in
997          * replay:
998          */
999         if (test_bit(JOURNAL_REPLAY_DONE, &j->flags))
1000                 available = max((int) available - 2, 0);
1001
1002         /*
1003          * Don't use the last bucket unless writing the new last_seq
1004          * will make another bucket available:
1005          */
1006         if (ja->bucket_seq[ja->last_idx] >= journal_last_seq(j))
1007                 available = max((int) available - 1, 0);
1008
1009         return available;
1010 }
1011
1012 /* returns number of sectors available for next journal entry: */
1013 int bch2_journal_entry_sectors(struct journal *j)
1014 {
1015         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1016         struct bch_dev *ca;
1017         struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
1018         unsigned sectors_available = UINT_MAX;
1019         unsigned i, nr_online = 0, nr_devs = 0;
1020
1021         lockdep_assert_held(&j->lock);
1022
1023         rcu_read_lock();
1024         for_each_member_device_rcu(ca, c, i,
1025                                    &c->rw_devs[BCH_DATA_JOURNAL]) {
1026                 struct journal_device *ja = &ca->journal;
1027                 unsigned buckets_required = 0;
1028
1029                 if (!ja->nr)
1030                         continue;
1031
1032                 sectors_available = min_t(unsigned, sectors_available,
1033                                           ca->mi.bucket_size);
1034
1035                 /*
1036                  * Note that we don't allocate the space for a journal entry
1037                  * until we write it out - thus, if we haven't started the write
1038                  * for the previous entry we have to make sure we have space for
1039                  * it too:
1040                  */
1041                 if (bch2_extent_has_device(e.c, ca->dev_idx)) {
1042                         if (j->prev_buf_sectors > ja->sectors_free)
1043                                 buckets_required++;
1044
1045                         if (j->prev_buf_sectors + sectors_available >
1046                             ja->sectors_free)
1047                                 buckets_required++;
1048                 } else {
1049                         if (j->prev_buf_sectors + sectors_available >
1050                             ca->mi.bucket_size)
1051                                 buckets_required++;
1052
1053                         buckets_required++;
1054                 }
1055
1056                 if (journal_dev_buckets_available(j, ca) >= buckets_required)
1057                         nr_devs++;
1058                 nr_online++;
1059         }
1060         rcu_read_unlock();
1061
1062         if (nr_online < c->opts.metadata_replicas_required)
1063                 return -EROFS;
1064
1065         if (nr_devs < min_t(unsigned, nr_online, c->opts.metadata_replicas))
1066                 return 0;
1067
1068         return sectors_available;
1069 }
1070
1071 /**
1072  * journal_next_bucket - move on to the next journal bucket if possible
1073  */
1074 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1075                                unsigned sectors)
1076 {
1077         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1078         struct bkey_s_extent e;
1079         struct bch_extent_ptr *ptr;
1080         struct journal_device *ja;
1081         struct bch_dev *ca;
1082         struct dev_alloc_list devs_sorted;
1083         unsigned i, replicas, replicas_want =
1084                 READ_ONCE(c->opts.metadata_replicas);
1085
1086         spin_lock(&j->lock);
1087         e = bkey_i_to_s_extent(&j->key);
1088
1089         /*
1090          * Drop any pointers to devices that have been removed, are no longer
1091          * empty, or filled up their current journal bucket:
1092          *
1093          * Note that a device may have had a small amount of free space (perhaps
1094          * one sector) that wasn't enough for the smallest possible journal
1095          * entry - that's why we drop pointers to devices <= current free space,
1096          * i.e. whichever device was limiting the current journal entry size.
1097          */
1098         extent_for_each_ptr_backwards(e, ptr) {
1099                    ca = bch_dev_bkey_exists(c, ptr->dev);
1100
1101                 if (ca->mi.state != BCH_MEMBER_STATE_RW ||
1102                     ca->journal.sectors_free <= sectors)
1103                         __bch2_extent_drop_ptr(e, ptr);
1104                 else
1105                         ca->journal.sectors_free -= sectors;
1106         }
1107
1108         replicas = bch2_extent_nr_ptrs(e.c);
1109
1110         rcu_read_lock();
1111         devs_sorted = bch2_wp_alloc_list(c, &j->wp,
1112                                          &c->rw_devs[BCH_DATA_JOURNAL]);
1113
1114         for (i = 0; i < devs_sorted.nr; i++) {
1115                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1116                 if (!ca)
1117                         continue;
1118
1119                 if (!ca->mi.durability)
1120                         continue;
1121
1122                 ja = &ca->journal;
1123                 if (!ja->nr)
1124                         continue;
1125
1126                 if (replicas >= replicas_want)
1127                         break;
1128
1129                 /*
1130                  * Check that we can use this device, and aren't already using
1131                  * it:
1132                  */
1133                 if (bch2_extent_has_device(e.c, ca->dev_idx) ||
1134                     !journal_dev_buckets_available(j, ca) ||
1135                     sectors > ca->mi.bucket_size)
1136                         continue;
1137
1138                 j->wp.next_alloc[ca->dev_idx] += U32_MAX;
1139                 bch2_wp_rescale(c, ca, &j->wp);
1140
1141                 ja->sectors_free = ca->mi.bucket_size - sectors;
1142                 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1143                 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1144
1145                 extent_ptr_append(bkey_i_to_extent(&j->key),
1146                         (struct bch_extent_ptr) {
1147                                   .offset = bucket_to_sector(ca,
1148                                         ja->buckets[ja->cur_idx]),
1149                                   .dev = ca->dev_idx,
1150                 });
1151
1152                 replicas += ca->mi.durability;
1153         }
1154         rcu_read_unlock();
1155
1156         j->prev_buf_sectors = 0;
1157
1158         bkey_copy(&w->key, &j->key);
1159         spin_unlock(&j->lock);
1160
1161         if (replicas < c->opts.metadata_replicas_required)
1162                 return -EROFS;
1163
1164         BUG_ON(!replicas);
1165
1166         return 0;
1167 }
1168
1169 static void journal_write_compact(struct jset *jset)
1170 {
1171         struct jset_entry *i, *next, *prev = NULL;
1172
1173         /*
1174          * Simple compaction, dropping empty jset_entries (from journal
1175          * reservations that weren't fully used) and merging jset_entries that
1176          * can be.
1177          *
1178          * If we wanted to be really fancy here, we could sort all the keys in
1179          * the jset and drop keys that were overwritten - probably not worth it:
1180          */
1181         vstruct_for_each_safe(jset, i, next) {
1182                 unsigned u64s = le16_to_cpu(i->u64s);
1183
1184                 /* Empty entry: */
1185                 if (!u64s)
1186                         continue;
1187
1188                 /* Can we merge with previous entry? */
1189                 if (prev &&
1190                     i->btree_id == prev->btree_id &&
1191                     i->level    == prev->level &&
1192                     i->type     == prev->type &&
1193                     i->type     == BCH_JSET_ENTRY_btree_keys &&
1194                     le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1195                         memmove_u64s_down(vstruct_next(prev),
1196                                           i->_data,
1197                                           u64s);
1198                         le16_add_cpu(&prev->u64s, u64s);
1199                         continue;
1200                 }
1201
1202                 /* Couldn't merge, move i into new position (after prev): */
1203                 prev = prev ? vstruct_next(prev) : jset->start;
1204                 if (i != prev)
1205                         memmove_u64s_down(prev, i, jset_u64s(u64s));
1206         }
1207
1208         prev = prev ? vstruct_next(prev) : jset->start;
1209         jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
1210 }
1211
1212 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1213 {
1214         /* we aren't holding j->lock: */
1215         unsigned new_size = READ_ONCE(j->buf_size_want);
1216         void *new_buf;
1217
1218         if (buf->size >= new_size)
1219                 return;
1220
1221         new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1222         if (!new_buf)
1223                 return;
1224
1225         memcpy(new_buf, buf->data, buf->size);
1226         kvpfree(buf->data, buf->size);
1227         buf->data       = new_buf;
1228         buf->size       = new_size;
1229 }
1230
1231 static void journal_write_done(struct closure *cl)
1232 {
1233         struct journal *j = container_of(cl, struct journal, io);
1234         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1235         struct journal_buf *w = journal_prev_buf(j);
1236         struct bch_devs_list devs =
1237                 bch2_extent_devs(bkey_i_to_s_c_extent(&w->key));
1238         u64 seq = le64_to_cpu(w->data->seq);
1239
1240         if (!devs.nr) {
1241                 bch_err(c, "unable to write journal to sufficient devices");
1242                 goto err;
1243         }
1244
1245         if (bch2_mark_replicas(c, BCH_DATA_JOURNAL, devs))
1246                 goto err;
1247 out:
1248         bch2_time_stats_update(j->write_time, j->write_start_time);
1249
1250         spin_lock(&j->lock);
1251         j->last_seq_ondisk = seq;
1252         if (seq >= j->pin.front)
1253                 journal_seq_pin(j, seq)->devs = devs;
1254
1255         /*
1256          * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1257          * more buckets:
1258          *
1259          * Must come before signaling write completion, for
1260          * bch2_fs_journal_stop():
1261          */
1262         mod_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
1263
1264         /* also must come before signalling write completion: */
1265         closure_debug_destroy(cl);
1266
1267         BUG_ON(!j->reservations.prev_buf_unwritten);
1268         atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
1269                      &j->reservations.counter);
1270
1271         closure_wake_up(&w->wait);
1272         journal_wake(j);
1273
1274         if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
1275                 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
1276         spin_unlock(&j->lock);
1277         return;
1278 err:
1279         bch2_fatal_error(c);
1280         bch2_journal_halt(j);
1281         goto out;
1282 }
1283
1284 static void journal_write_endio(struct bio *bio)
1285 {
1286         struct bch_dev *ca = bio->bi_private;
1287         struct journal *j = &ca->fs->journal;
1288
1289         if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") ||
1290             bch2_meta_write_fault("journal")) {
1291                 struct journal_buf *w = journal_prev_buf(j);
1292                 unsigned long flags;
1293
1294                 spin_lock_irqsave(&j->err_lock, flags);
1295                 bch2_extent_drop_device(bkey_i_to_s_extent(&w->key), ca->dev_idx);
1296                 spin_unlock_irqrestore(&j->err_lock, flags);
1297         }
1298
1299         closure_put(&j->io);
1300         percpu_ref_put(&ca->io_ref);
1301 }
1302
1303 void bch2_journal_write(struct closure *cl)
1304 {
1305         struct journal *j = container_of(cl, struct journal, io);
1306         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1307         struct bch_dev *ca;
1308         struct journal_buf *w = journal_prev_buf(j);
1309         struct jset *jset;
1310         struct bio *bio;
1311         struct bch_extent_ptr *ptr;
1312         unsigned i, sectors, bytes;
1313
1314         journal_buf_realloc(j, w);
1315         jset = w->data;
1316
1317         j->write_start_time = local_clock();
1318         mutex_lock(&c->btree_root_lock);
1319         for (i = 0; i < BTREE_ID_NR; i++) {
1320                 struct btree_root *r = &c->btree_roots[i];
1321
1322                 if (r->alive)
1323                         bch2_journal_add_btree_root(w, i, &r->key, r->level);
1324         }
1325         c->btree_roots_dirty = false;
1326         mutex_unlock(&c->btree_root_lock);
1327
1328         journal_write_compact(jset);
1329
1330         jset->read_clock        = cpu_to_le16(c->bucket_clock[READ].hand);
1331         jset->write_clock       = cpu_to_le16(c->bucket_clock[WRITE].hand);
1332         jset->magic             = cpu_to_le64(jset_magic(c));
1333         jset->version           = cpu_to_le32(BCACHE_JSET_VERSION);
1334
1335         SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1336         SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1337
1338         if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) &&
1339             jset_validate_entries(c, jset, WRITE))
1340                 goto err;
1341
1342         bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1343                     jset->encrypted_start,
1344                     vstruct_end(jset) - (void *) jset->encrypted_start);
1345
1346         jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1347                                   journal_nonce(jset), jset);
1348
1349         if (!bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) &&
1350             jset_validate_entries(c, jset, WRITE))
1351                 goto err;
1352
1353         sectors = vstruct_sectors(jset, c->block_bits);
1354         BUG_ON(sectors > j->prev_buf_sectors);
1355
1356         bytes = vstruct_bytes(w->data);
1357         memset((void *) w->data + bytes, 0, (sectors << 9) - bytes);
1358
1359         if (journal_write_alloc(j, w, sectors)) {
1360                 bch2_journal_halt(j);
1361                 bch_err(c, "Unable to allocate journal write");
1362                 bch2_fatal_error(c);
1363                 continue_at(cl, journal_write_done, system_highpri_wq);
1364         }
1365
1366         /*
1367          * XXX: we really should just disable the entire journal in nochanges
1368          * mode
1369          */
1370         if (c->opts.nochanges)
1371                 goto no_io;
1372
1373         extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1374                 ca = bch_dev_bkey_exists(c, ptr->dev);
1375                 if (!percpu_ref_tryget(&ca->io_ref)) {
1376                         /* XXX: fix this */
1377                         bch_err(c, "missing device for journal write\n");
1378                         continue;
1379                 }
1380
1381                 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
1382                              sectors);
1383
1384                 bio = ca->journal.bio;
1385                 bio_reset(bio);
1386                 bio_set_dev(bio, ca->disk_sb.bdev);
1387                 bio->bi_iter.bi_sector  = ptr->offset;
1388                 bio->bi_iter.bi_size    = sectors << 9;
1389                 bio->bi_end_io          = journal_write_endio;
1390                 bio->bi_private         = ca;
1391                 bio_set_op_attrs(bio, REQ_OP_WRITE,
1392                                  REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
1393                 bch2_bio_map(bio, jset);
1394
1395                 trace_journal_write(bio);
1396                 closure_bio_submit(bio, cl);
1397
1398                 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq);
1399         }
1400
1401         for_each_rw_member(ca, c, i)
1402                 if (journal_flushes_device(ca) &&
1403                     !bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), i)) {
1404                         percpu_ref_get(&ca->io_ref);
1405
1406                         bio = ca->journal.bio;
1407                         bio_reset(bio);
1408                         bio_set_dev(bio, ca->disk_sb.bdev);
1409                         bio->bi_opf             = REQ_OP_FLUSH;
1410                         bio->bi_end_io          = journal_write_endio;
1411                         bio->bi_private         = ca;
1412                         closure_bio_submit(bio, cl);
1413                 }
1414
1415 no_io:
1416         extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr)
1417                 ptr->offset += sectors;
1418
1419         continue_at(cl, journal_write_done, system_highpri_wq);
1420 err:
1421         bch2_inconsistent_error(c);
1422         continue_at(cl, journal_write_done, system_highpri_wq);
1423 }