1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_iter.h"
6 #include "journal_seq_blacklist.h"
10 * journal_seq_blacklist machinery:
12 * To guarantee order of btree updates after a crash, we need to detect when a
13 * btree node entry (bset) is newer than the newest journal entry that was
14 * successfully written, and ignore it - effectively ignoring any btree updates
15 * that didn't make it into the journal.
17 * If we didn't do this, we might have two btree nodes, a and b, both with
18 * updates that weren't written to the journal yet: if b was updated after a,
19 * but b was flushed and not a - oops; on recovery we'll find that the updates
20 * to b happened, but not the updates to a that happened before it.
22 * Ignoring bsets that are newer than the newest journal entry is always safe,
23 * because everything they contain will also have been journalled - and must
24 * still be present in the journal on disk until a journal entry has been
25 * written _after_ that bset was written.
27 * To accomplish this, bsets record the newest journal sequence number they
28 * contain updates for; then, on startup, the btree code queries the journal
29 * code to ask "Is this sequence number newer than the newest journal entry? If
32 * When this happens, we must blacklist that journal sequence number: the
33 * journal must not write any entries with that sequence number, and it must
34 * record that it was blacklisted so that a) on recovery we don't think we have
35 * missing journal entries and b) so that the btree code continues to ignore
36 * that bset, until that btree node is rewritten.
39 static unsigned sb_blacklist_u64s(unsigned nr)
41 struct bch_sb_field_journal_seq_blacklist *bl;
43 return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
46 static struct bch_sb_field_journal_seq_blacklist *
47 blacklist_entry_try_merge(struct bch_fs *c,
48 struct bch_sb_field_journal_seq_blacklist *bl,
51 unsigned nr = blacklist_nr_entries(bl);
53 if (le64_to_cpu(bl->start[i].end) >=
54 le64_to_cpu(bl->start[i + 1].start)) {
55 bl->start[i].end = bl->start[i + 1].end;
57 memmove(&bl->start[i],
59 sizeof(bl->start[0]) * (nr - i));
61 bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
62 sb_blacklist_u64s(nr));
69 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
71 struct bch_sb_field_journal_seq_blacklist *bl;
75 mutex_lock(&c->sb_lock);
76 bl = bch2_sb_get_journal_seq_blacklist(c->disk_sb.sb);
77 nr = blacklist_nr_entries(bl);
80 for (i = 0; i < nr; i++) {
81 struct journal_seq_blacklist_entry *e =
84 if (start == le64_to_cpu(e->start) &&
85 end == le64_to_cpu(e->end))
88 if (start <= le64_to_cpu(e->start) &&
89 end >= le64_to_cpu(e->end)) {
90 e->start = cpu_to_le64(start);
91 e->end = cpu_to_le64(end);
94 bl = blacklist_entry_try_merge(c,
97 bl = blacklist_entry_try_merge(c,
104 bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
105 sb_blacklist_u64s(nr + 1));
111 bl->start[nr].start = cpu_to_le64(start);
112 bl->start[nr].end = cpu_to_le64(end);
114 c->disk_sb.sb->features[0] |=
115 1ULL << BCH_FEATURE_journal_seq_blacklist_v3;
117 ret = bch2_write_super(c);
119 mutex_unlock(&c->sb_lock);
124 static int journal_seq_blacklist_table_cmp(const void *_l,
125 const void *_r, size_t size)
127 const struct journal_seq_blacklist_table_entry *l = _l;
128 const struct journal_seq_blacklist_table_entry *r = _r;
130 return cmp_int(l->start, r->start);
133 bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
136 struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
137 struct journal_seq_blacklist_table_entry search = { .start = seq };
143 idx = eytzinger0_find_le(t->entries, t->nr,
144 sizeof(t->entries[0]),
145 journal_seq_blacklist_table_cmp,
150 BUG_ON(t->entries[idx].start > seq);
152 if (seq >= t->entries[idx].end)
156 t->entries[idx].dirty = true;
160 int bch2_blacklist_table_initialize(struct bch_fs *c)
162 struct bch_sb_field_journal_seq_blacklist *bl =
163 bch2_sb_get_journal_seq_blacklist(c->disk_sb.sb);
164 struct journal_seq_blacklist_table *t;
165 unsigned i, nr = blacklist_nr_entries(bl);
167 BUG_ON(c->journal_seq_blacklist_table);
172 t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr,
179 for (i = 0; i < nr; i++) {
180 t->entries[i].start = le64_to_cpu(bl->start[i].start);
181 t->entries[i].end = le64_to_cpu(bl->start[i].end);
184 eytzinger0_sort(t->entries,
186 sizeof(t->entries[0]),
187 journal_seq_blacklist_table_cmp,
190 c->journal_seq_blacklist_table = t;
195 bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
196 struct bch_sb_field *f)
198 struct bch_sb_field_journal_seq_blacklist *bl =
199 field_to_type(f, journal_seq_blacklist);
200 struct journal_seq_blacklist_entry *i;
201 unsigned nr = blacklist_nr_entries(bl);
203 for (i = bl->start; i < bl->start + nr; i++) {
204 if (le64_to_cpu(i->start) >=
206 return "entry start >= end";
208 if (i + 1 < bl->start + nr &&
209 le64_to_cpu(i[0].end) >
210 le64_to_cpu(i[1].start))
211 return "entries out of order";
217 static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
219 struct bch_sb_field *f)
221 struct bch_sb_field_journal_seq_blacklist *bl =
222 field_to_type(f, journal_seq_blacklist);
223 struct journal_seq_blacklist_entry *i;
224 unsigned nr = blacklist_nr_entries(bl);
226 for (i = bl->start; i < bl->start + nr; i++) {
230 pr_buf(out, "%llu-%llu",
231 le64_to_cpu(i->start),
232 le64_to_cpu(i->end));
236 const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
237 .validate = bch2_sb_journal_seq_blacklist_validate,
238 .to_text = bch2_sb_journal_seq_blacklist_to_text
241 void bch2_blacklist_entries_gc(struct work_struct *work)
243 struct bch_fs *c = container_of(work, struct bch_fs,
244 journal_seq_blacklist_gc_work);
245 struct journal_seq_blacklist_table *t;
246 struct bch_sb_field_journal_seq_blacklist *bl;
247 struct journal_seq_blacklist_entry *src, *dst;
248 struct btree_trans trans;
249 unsigned i, nr, new_nr;
252 bch2_trans_init(&trans, c, 0, 0);
254 for (i = 0; i < BTREE_ID_NR; i++) {
255 struct btree_iter *iter;
258 for_each_btree_node(&trans, iter, i, POS_MIN,
259 BTREE_ITER_PREFETCH, b)
260 if (test_bit(BCH_FS_STOPPING, &c->flags)) {
261 bch2_trans_exit(&trans);
264 bch2_trans_iter_free(&trans, iter);
267 ret = bch2_trans_exit(&trans);
271 mutex_lock(&c->sb_lock);
272 bl = bch2_sb_get_journal_seq_blacklist(c->disk_sb.sb);
276 nr = blacklist_nr_entries(bl);
279 t = c->journal_seq_blacklist_table;
282 for (src = bl->start, i = eytzinger0_first(t->nr);
283 src < bl->start + nr;
284 src++, i = eytzinger0_next(i, nr)) {
285 BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
286 BUG_ON(t->entries[i].end != le64_to_cpu(src->end));
288 if (t->entries[i].dirty)
292 new_nr = dst - bl->start;
294 bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
297 bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
298 new_nr ? sb_blacklist_u64s(new_nr) : 0);
299 BUG_ON(new_nr && !bl);
302 c->disk_sb.sb->features[0] &=
303 ~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
308 mutex_unlock(&c->sb_lock);