1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_update_interior.h"
7 #include "journal_io.h"
15 * Btree roots, and a few other things, are recovered from the journal after an
16 * unclean shutdown - but after a clean shutdown, to avoid having to read the
17 * journal, we can store them in the superblock.
19 * bch_sb_field_clean simply contains a list of journal entries, stored exactly
20 * as they would be in the journal:
23 int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *clean,
26 struct jset_entry *entry;
29 for (entry = clean->start;
30 entry < (struct jset_entry *) vstruct_end(&clean->field);
31 entry = vstruct_next(entry)) {
32 ret = bch2_journal_entry_validate(c, NULL, entry,
33 le16_to_cpu(c->disk_sb.sb->version),
34 BCH_SB_BIG_ENDIAN(c->disk_sb.sb),
43 static struct bkey_i *btree_root_find(struct bch_fs *c,
44 struct bch_sb_field_clean *clean,
46 enum btree_id id, unsigned *level)
49 struct jset_entry *entry, *start, *end;
53 end = vstruct_end(&clean->field);
56 end = vstruct_last(j);
59 for (entry = start; entry < end; entry = vstruct_next(entry))
60 if (entry->type == BCH_JSET_ENTRY_btree_root &&
61 entry->btree_id == id)
67 return ERR_PTR(-EINVAL);
70 *level = entry->level;
74 int bch2_verify_superblock_clean(struct bch_fs *c,
75 struct bch_sb_field_clean **cleanp,
79 struct bch_sb_field_clean *clean = *cleanp;
80 struct printbuf buf1 = PRINTBUF;
81 struct printbuf buf2 = PRINTBUF;
84 if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
85 "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
86 le64_to_cpu(clean->journal_seq),
87 le64_to_cpu(j->seq))) {
93 for (i = 0; i < BTREE_ID_NR; i++) {
94 struct bkey_i *k1, *k2;
95 unsigned l1 = 0, l2 = 0;
97 k1 = btree_root_find(c, clean, NULL, i, &l1);
98 k2 = btree_root_find(c, NULL, j, i, &l2);
103 printbuf_reset(&buf1);
104 printbuf_reset(&buf2);
107 bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
109 prt_printf(&buf1, "(none)");
112 bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
114 prt_printf(&buf2, "(none)");
116 mustfix_fsck_err_on(!k1 || !k2 ||
119 k1->k.u64s != k2->k.u64s ||
120 memcmp(k1, k2, bkey_bytes(&k1->k)) ||
122 "superblock btree root %u doesn't match journal after clean shutdown\n"
124 "journal: l=%u %s\n", i,
129 printbuf_exit(&buf2);
130 printbuf_exit(&buf1);
134 struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
136 struct bch_sb_field_clean *clean, *sb_clean;
139 mutex_lock(&c->sb_lock);
140 sb_clean = bch2_sb_field_get(c->disk_sb.sb, clean);
142 if (fsck_err_on(!sb_clean, c,
143 "superblock marked clean but clean section not present")) {
144 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
146 mutex_unlock(&c->sb_lock);
150 clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
153 mutex_unlock(&c->sb_lock);
154 return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean);
157 ret = bch2_sb_clean_validate_late(c, clean, READ);
159 mutex_unlock(&c->sb_lock);
163 mutex_unlock(&c->sb_lock);
167 mutex_unlock(&c->sb_lock);
171 static struct jset_entry *jset_entry_init(struct jset_entry **end, size_t size)
173 struct jset_entry *entry = *end;
174 unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));
176 memset(entry, 0, u64s * sizeof(u64));
178 * The u64s field counts from the start of data, ignoring the shared
181 entry->u64s = cpu_to_le16(u64s - 1);
183 *end = vstruct_next(*end);
187 void bch2_journal_super_entries_add_common(struct bch_fs *c,
188 struct jset_entry **end,
194 percpu_down_read(&c->mark_lock);
197 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
198 bch2_fs_usage_acc_to_base(c, i);
200 bch2_fs_usage_acc_to_base(c, journal_seq & JOURNAL_BUF_MASK);
204 struct jset_entry_usage *u =
205 container_of(jset_entry_init(end, sizeof(*u)),
206 struct jset_entry_usage, entry);
208 u->entry.type = BCH_JSET_ENTRY_usage;
209 u->entry.btree_id = BCH_FS_USAGE_inodes;
210 u->v = cpu_to_le64(c->usage_base->nr_inodes);
214 struct jset_entry_usage *u =
215 container_of(jset_entry_init(end, sizeof(*u)),
216 struct jset_entry_usage, entry);
218 u->entry.type = BCH_JSET_ENTRY_usage;
219 u->entry.btree_id = BCH_FS_USAGE_key_version;
220 u->v = cpu_to_le64(atomic64_read(&c->key_version));
223 for (i = 0; i < BCH_REPLICAS_MAX; i++) {
224 struct jset_entry_usage *u =
225 container_of(jset_entry_init(end, sizeof(*u)),
226 struct jset_entry_usage, entry);
228 u->entry.type = BCH_JSET_ENTRY_usage;
229 u->entry.btree_id = BCH_FS_USAGE_reserved;
231 u->v = cpu_to_le64(c->usage_base->persistent_reserved[i]);
234 for (i = 0; i < c->replicas.nr; i++) {
235 struct bch_replicas_entry *e =
236 cpu_replicas_entry(&c->replicas, i);
237 struct jset_entry_data_usage *u =
238 container_of(jset_entry_init(end, sizeof(*u) + e->nr_devs),
239 struct jset_entry_data_usage, entry);
241 u->entry.type = BCH_JSET_ENTRY_data_usage;
242 u->v = cpu_to_le64(c->usage_base->replicas[i]);
243 unsafe_memcpy(&u->r, e, replicas_entry_bytes(e),
244 "embedded variable length struct");
247 for_each_member_device(ca, c, dev) {
248 unsigned b = sizeof(struct jset_entry_dev_usage) +
249 sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR;
250 struct jset_entry_dev_usage *u =
251 container_of(jset_entry_init(end, b),
252 struct jset_entry_dev_usage, entry);
254 u->entry.type = BCH_JSET_ENTRY_dev_usage;
255 u->dev = cpu_to_le32(dev);
256 u->buckets_ec = cpu_to_le64(ca->usage_base->buckets_ec);
258 for (i = 0; i < BCH_DATA_NR; i++) {
259 u->d[i].buckets = cpu_to_le64(ca->usage_base->d[i].buckets);
260 u->d[i].sectors = cpu_to_le64(ca->usage_base->d[i].sectors);
261 u->d[i].fragmented = cpu_to_le64(ca->usage_base->d[i].fragmented);
265 percpu_up_read(&c->mark_lock);
267 for (i = 0; i < 2; i++) {
268 struct jset_entry_clock *clock =
269 container_of(jset_entry_init(end, sizeof(*clock)),
270 struct jset_entry_clock, entry);
272 clock->entry.type = BCH_JSET_ENTRY_clock;
274 clock->time = cpu_to_le64(atomic64_read(&c->io_clock[i].now));
278 static int bch2_sb_clean_validate(struct bch_sb *sb,
279 struct bch_sb_field *f,
280 struct printbuf *err)
282 struct bch_sb_field_clean *clean = field_to_type(f, clean);
284 if (vstruct_bytes(&clean->field) < sizeof(*clean)) {
285 prt_printf(err, "wrong size (got %zu should be %zu)",
286 vstruct_bytes(&clean->field), sizeof(*clean));
287 return -BCH_ERR_invalid_sb_clean;
293 static void bch2_sb_clean_to_text(struct printbuf *out, struct bch_sb *sb,
294 struct bch_sb_field *f)
296 struct bch_sb_field_clean *clean = field_to_type(f, clean);
297 struct jset_entry *entry;
299 prt_printf(out, "flags: %x", le32_to_cpu(clean->flags));
301 prt_printf(out, "journal_seq: %llu", le64_to_cpu(clean->journal_seq));
304 for (entry = clean->start;
305 entry != vstruct_end(&clean->field);
306 entry = vstruct_next(entry)) {
307 if (entry->type == BCH_JSET_ENTRY_btree_keys &&
311 bch2_journal_entry_to_text(out, NULL, entry);
316 const struct bch_sb_field_ops bch_sb_field_ops_clean = {
317 .validate = bch2_sb_clean_validate,
318 .to_text = bch2_sb_clean_to_text,
321 int bch2_fs_mark_dirty(struct bch_fs *c)
326 * Unconditionally write superblock, to verify it hasn't changed before
330 mutex_lock(&c->sb_lock);
331 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
333 bch2_sb_maybe_downgrade(c);
334 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS);
336 ret = bch2_write_super(c);
337 mutex_unlock(&c->sb_lock);
342 void bch2_fs_mark_clean(struct bch_fs *c)
344 struct bch_sb_field_clean *sb_clean;
345 struct jset_entry *entry;
349 mutex_lock(&c->sb_lock);
350 if (BCH_SB_CLEAN(c->disk_sb.sb))
353 SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
355 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
356 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_metadata);
357 c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_extents_above_btree_updates));
358 c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_btree_updates_journalled));
360 u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved;
362 sb_clean = bch2_sb_field_resize(&c->disk_sb, clean, u64s);
364 bch_err(c, "error resizing superblock while setting filesystem clean");
369 sb_clean->journal_seq = cpu_to_le64(atomic64_read(&c->journal.seq));
371 /* Trying to catch outstanding bug: */
372 BUG_ON(le64_to_cpu(sb_clean->journal_seq) > S64_MAX);
374 entry = sb_clean->start;
375 bch2_journal_super_entries_add_common(c, &entry, 0);
376 entry = bch2_btree_roots_to_journal_entries(c, entry, entry);
377 BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
380 vstruct_end(&sb_clean->field) - (void *) entry);
383 * this should be in the write path, and we should be validating every
384 * superblock section:
386 ret = bch2_sb_clean_validate_late(c, sb_clean, WRITE);
388 bch_err(c, "error writing marking filesystem clean: validate error");
394 mutex_unlock(&c->sb_lock);