]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/sb-clean.c
Upload to experimental
[bcachefs-tools-debian] / libbcachefs / sb-clean.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_update_interior.h"
5 #include "buckets.h"
6 #include "error.h"
7 #include "journal_io.h"
8 #include "replicas.h"
9 #include "sb-clean.h"
10 #include "super-io.h"
11
12 /*
13  * BCH_SB_FIELD_clean:
14  *
15  * Btree roots, and a few other things, are recovered from the journal after an
16  * unclean shutdown - but after a clean shutdown, to avoid having to read the
17  * journal, we can store them in the superblock.
18  *
19  * bch_sb_field_clean simply contains a list of journal entries, stored exactly
20  * as they would be in the journal:
21  */
22
23 int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *clean,
24                                 int write)
25 {
26         struct jset_entry *entry;
27         int ret;
28
29         for (entry = clean->start;
30              entry < (struct jset_entry *) vstruct_end(&clean->field);
31              entry = vstruct_next(entry)) {
32                 ret = bch2_journal_entry_validate(c, NULL, entry,
33                                                   le16_to_cpu(c->disk_sb.sb->version),
34                                                   BCH_SB_BIG_ENDIAN(c->disk_sb.sb),
35                                                   write);
36                 if (ret)
37                         return ret;
38         }
39
40         return 0;
41 }
42
43 static struct bkey_i *btree_root_find(struct bch_fs *c,
44                                       struct bch_sb_field_clean *clean,
45                                       struct jset *j,
46                                       enum btree_id id, unsigned *level)
47 {
48         struct bkey_i *k;
49         struct jset_entry *entry, *start, *end;
50
51         if (clean) {
52                 start = clean->start;
53                 end = vstruct_end(&clean->field);
54         } else {
55                 start = j->start;
56                 end = vstruct_last(j);
57         }
58
59         for (entry = start; entry < end; entry = vstruct_next(entry))
60                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
61                     entry->btree_id == id)
62                         goto found;
63
64         return NULL;
65 found:
66         if (!entry->u64s)
67                 return ERR_PTR(-EINVAL);
68
69         k = entry->start;
70         *level = entry->level;
71         return k;
72 }
73
74 int bch2_verify_superblock_clean(struct bch_fs *c,
75                                  struct bch_sb_field_clean **cleanp,
76                                  struct jset *j)
77 {
78         unsigned i;
79         struct bch_sb_field_clean *clean = *cleanp;
80         struct printbuf buf1 = PRINTBUF;
81         struct printbuf buf2 = PRINTBUF;
82         int ret = 0;
83
84         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
85                         sb_clean_journal_seq_mismatch,
86                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
87                         le64_to_cpu(clean->journal_seq),
88                         le64_to_cpu(j->seq))) {
89                 kfree(clean);
90                 *cleanp = NULL;
91                 return 0;
92         }
93
94         for (i = 0; i < BTREE_ID_NR; i++) {
95                 struct bkey_i *k1, *k2;
96                 unsigned l1 = 0, l2 = 0;
97
98                 k1 = btree_root_find(c, clean, NULL, i, &l1);
99                 k2 = btree_root_find(c, NULL, j, i, &l2);
100
101                 if (!k1 && !k2)
102                         continue;
103
104                 printbuf_reset(&buf1);
105                 printbuf_reset(&buf2);
106
107                 if (k1)
108                         bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
109                 else
110                         prt_printf(&buf1, "(none)");
111
112                 if (k2)
113                         bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
114                 else
115                         prt_printf(&buf2, "(none)");
116
117                 mustfix_fsck_err_on(!k1 || !k2 ||
118                                     IS_ERR(k1) ||
119                                     IS_ERR(k2) ||
120                                     k1->k.u64s != k2->k.u64s ||
121                                     memcmp(k1, k2, bkey_bytes(&k1->k)) ||
122                                     l1 != l2, c,
123                         sb_clean_btree_root_mismatch,
124                         "superblock btree root %u doesn't match journal after clean shutdown\n"
125                         "sb:      l=%u %s\n"
126                         "journal: l=%u %s\n", i,
127                         l1, buf1.buf,
128                         l2, buf2.buf);
129         }
130 fsck_err:
131         printbuf_exit(&buf2);
132         printbuf_exit(&buf1);
133         return ret;
134 }
135
136 struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
137 {
138         struct bch_sb_field_clean *clean, *sb_clean;
139         int ret;
140
141         mutex_lock(&c->sb_lock);
142         sb_clean = bch2_sb_field_get(c->disk_sb.sb, clean);
143
144         if (fsck_err_on(!sb_clean, c,
145                         sb_clean_missing,
146                         "superblock marked clean but clean section not present")) {
147                 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
148                 c->sb.clean = false;
149                 mutex_unlock(&c->sb_lock);
150                 return NULL;
151         }
152
153         clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
154                         GFP_KERNEL);
155         if (!clean) {
156                 mutex_unlock(&c->sb_lock);
157                 return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean);
158         }
159
160         ret = bch2_sb_clean_validate_late(c, clean, READ);
161         if (ret) {
162                 mutex_unlock(&c->sb_lock);
163                 return ERR_PTR(ret);
164         }
165
166         mutex_unlock(&c->sb_lock);
167
168         return clean;
169 fsck_err:
170         mutex_unlock(&c->sb_lock);
171         return ERR_PTR(ret);
172 }
173
174 static struct jset_entry *jset_entry_init(struct jset_entry **end, size_t size)
175 {
176         struct jset_entry *entry = *end;
177         unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));
178
179         memset(entry, 0, u64s * sizeof(u64));
180         /*
181          * The u64s field counts from the start of data, ignoring the shared
182          * fields.
183          */
184         entry->u64s = cpu_to_le16(u64s - 1);
185
186         *end = vstruct_next(*end);
187         return entry;
188 }
189
190 void bch2_journal_super_entries_add_common(struct bch_fs *c,
191                                            struct jset_entry **end,
192                                            u64 journal_seq)
193 {
194         struct bch_dev *ca;
195         unsigned i, dev;
196
197         percpu_down_read(&c->mark_lock);
198
199         if (!journal_seq) {
200                 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
201                         bch2_fs_usage_acc_to_base(c, i);
202         } else {
203                 bch2_fs_usage_acc_to_base(c, journal_seq & JOURNAL_BUF_MASK);
204         }
205
206         {
207                 struct jset_entry_usage *u =
208                         container_of(jset_entry_init(end, sizeof(*u)),
209                                      struct jset_entry_usage, entry);
210
211                 u->entry.type   = BCH_JSET_ENTRY_usage;
212                 u->entry.btree_id = BCH_FS_USAGE_inodes;
213                 u->v            = cpu_to_le64(c->usage_base->nr_inodes);
214         }
215
216         {
217                 struct jset_entry_usage *u =
218                         container_of(jset_entry_init(end, sizeof(*u)),
219                                      struct jset_entry_usage, entry);
220
221                 u->entry.type   = BCH_JSET_ENTRY_usage;
222                 u->entry.btree_id = BCH_FS_USAGE_key_version;
223                 u->v            = cpu_to_le64(atomic64_read(&c->key_version));
224         }
225
226         for (i = 0; i < BCH_REPLICAS_MAX; i++) {
227                 struct jset_entry_usage *u =
228                         container_of(jset_entry_init(end, sizeof(*u)),
229                                      struct jset_entry_usage, entry);
230
231                 u->entry.type   = BCH_JSET_ENTRY_usage;
232                 u->entry.btree_id = BCH_FS_USAGE_reserved;
233                 u->entry.level  = i;
234                 u->v            = cpu_to_le64(c->usage_base->persistent_reserved[i]);
235         }
236
237         for (i = 0; i < c->replicas.nr; i++) {
238                 struct bch_replicas_entry *e =
239                         cpu_replicas_entry(&c->replicas, i);
240                 struct jset_entry_data_usage *u =
241                         container_of(jset_entry_init(end, sizeof(*u) + e->nr_devs),
242                                      struct jset_entry_data_usage, entry);
243
244                 u->entry.type   = BCH_JSET_ENTRY_data_usage;
245                 u->v            = cpu_to_le64(c->usage_base->replicas[i]);
246                 unsafe_memcpy(&u->r, e, replicas_entry_bytes(e),
247                               "embedded variable length struct");
248         }
249
250         for_each_member_device(ca, c, dev) {
251                 unsigned b = sizeof(struct jset_entry_dev_usage) +
252                         sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR;
253                 struct jset_entry_dev_usage *u =
254                         container_of(jset_entry_init(end, b),
255                                      struct jset_entry_dev_usage, entry);
256
257                 u->entry.type = BCH_JSET_ENTRY_dev_usage;
258                 u->dev = cpu_to_le32(dev);
259                 u->buckets_ec           = cpu_to_le64(ca->usage_base->buckets_ec);
260
261                 for (i = 0; i < BCH_DATA_NR; i++) {
262                         u->d[i].buckets = cpu_to_le64(ca->usage_base->d[i].buckets);
263                         u->d[i].sectors = cpu_to_le64(ca->usage_base->d[i].sectors);
264                         u->d[i].fragmented = cpu_to_le64(ca->usage_base->d[i].fragmented);
265                 }
266         }
267
268         percpu_up_read(&c->mark_lock);
269
270         for (i = 0; i < 2; i++) {
271                 struct jset_entry_clock *clock =
272                         container_of(jset_entry_init(end, sizeof(*clock)),
273                                      struct jset_entry_clock, entry);
274
275                 clock->entry.type = BCH_JSET_ENTRY_clock;
276                 clock->rw       = i;
277                 clock->time     = cpu_to_le64(atomic64_read(&c->io_clock[i].now));
278         }
279 }
280
281 static int bch2_sb_clean_validate(struct bch_sb *sb,
282                                   struct bch_sb_field *f,
283                                   struct printbuf *err)
284 {
285         struct bch_sb_field_clean *clean = field_to_type(f, clean);
286
287         if (vstruct_bytes(&clean->field) < sizeof(*clean)) {
288                 prt_printf(err, "wrong size (got %zu should be %zu)",
289                        vstruct_bytes(&clean->field), sizeof(*clean));
290                 return -BCH_ERR_invalid_sb_clean;
291         }
292
293         return 0;
294 }
295
296 static void bch2_sb_clean_to_text(struct printbuf *out, struct bch_sb *sb,
297                                   struct bch_sb_field *f)
298 {
299         struct bch_sb_field_clean *clean = field_to_type(f, clean);
300         struct jset_entry *entry;
301
302         prt_printf(out, "flags:          %x",   le32_to_cpu(clean->flags));
303         prt_newline(out);
304         prt_printf(out, "journal_seq:    %llu", le64_to_cpu(clean->journal_seq));
305         prt_newline(out);
306
307         for (entry = clean->start;
308              entry != vstruct_end(&clean->field);
309              entry = vstruct_next(entry)) {
310                 if (entry->type == BCH_JSET_ENTRY_btree_keys &&
311                     !entry->u64s)
312                         continue;
313
314                 bch2_journal_entry_to_text(out, NULL, entry);
315                 prt_newline(out);
316         }
317 }
318
319 const struct bch_sb_field_ops bch_sb_field_ops_clean = {
320         .validate       = bch2_sb_clean_validate,
321         .to_text        = bch2_sb_clean_to_text,
322 };
323
324 int bch2_fs_mark_dirty(struct bch_fs *c)
325 {
326         int ret;
327
328         /*
329          * Unconditionally write superblock, to verify it hasn't changed before
330          * we go rw:
331          */
332
333         mutex_lock(&c->sb_lock);
334         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
335
336         bch2_sb_maybe_downgrade(c);
337         c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS);
338
339         ret = bch2_write_super(c);
340         mutex_unlock(&c->sb_lock);
341
342         return ret;
343 }
344
345 void bch2_fs_mark_clean(struct bch_fs *c)
346 {
347         struct bch_sb_field_clean *sb_clean;
348         struct jset_entry *entry;
349         unsigned u64s;
350         int ret;
351
352         mutex_lock(&c->sb_lock);
353         if (BCH_SB_CLEAN(c->disk_sb.sb))
354                 goto out;
355
356         SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
357
358         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
359         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_metadata);
360         c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_extents_above_btree_updates));
361         c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_btree_updates_journalled));
362
363         u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved;
364
365         sb_clean = bch2_sb_field_resize(&c->disk_sb, clean, u64s);
366         if (!sb_clean) {
367                 bch_err(c, "error resizing superblock while setting filesystem clean");
368                 goto out;
369         }
370
371         sb_clean->flags         = 0;
372         sb_clean->journal_seq   = cpu_to_le64(atomic64_read(&c->journal.seq));
373
374         /* Trying to catch outstanding bug: */
375         BUG_ON(le64_to_cpu(sb_clean->journal_seq) > S64_MAX);
376
377         entry = sb_clean->start;
378         bch2_journal_super_entries_add_common(c, &entry, 0);
379         entry = bch2_btree_roots_to_journal_entries(c, entry, 0);
380         BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
381
382         memset(entry, 0,
383                vstruct_end(&sb_clean->field) - (void *) entry);
384
385         /*
386          * this should be in the write path, and we should be validating every
387          * superblock section:
388          */
389         ret = bch2_sb_clean_validate_late(c, sb_clean, WRITE);
390         if (ret) {
391                 bch_err(c, "error writing marking filesystem clean: validate error");
392                 goto out;
393         }
394
395         bch2_write_super(c);
396 out:
397         mutex_unlock(&c->sb_lock);
398 }