1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_iter.h"
6 #include "journal_seq_blacklist.h"
10 * journal_seq_blacklist machinery:
12 * To guarantee order of btree updates after a crash, we need to detect when a
13 * btree node entry (bset) is newer than the newest journal entry that was
14 * successfully written, and ignore it - effectively ignoring any btree updates
15 * that didn't make it into the journal.
17 * If we didn't do this, we might have two btree nodes, a and b, both with
18 * updates that weren't written to the journal yet: if b was updated after a,
19 * but b was flushed and not a - oops; on recovery we'll find that the updates
20 * to b happened, but not the updates to a that happened before it.
22 * Ignoring bsets that are newer than the newest journal entry is always safe,
23 * because everything they contain will also have been journalled - and must
24 * still be present in the journal on disk until a journal entry has been
25 * written _after_ that bset was written.
27 * To accomplish this, bsets record the newest journal sequence number they
28 * contain updates for; then, on startup, the btree code queries the journal
29 * code to ask "Is this sequence number newer than the newest journal entry? If
32 * When this happens, we must blacklist that journal sequence number: the
33 * journal must not write any entries with that sequence number, and it must
34 * record that it was blacklisted so that a) on recovery we don't think we have
35 * missing journal entries and b) so that the btree code continues to ignore
36 * that bset, until that btree node is rewritten.
39 static unsigned sb_blacklist_u64s(unsigned nr)
41 struct bch_sb_field_journal_seq_blacklist *bl;
43 return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
46 static struct bch_sb_field_journal_seq_blacklist *
47 blacklist_entry_try_merge(struct bch_fs *c,
48 struct bch_sb_field_journal_seq_blacklist *bl,
51 unsigned nr = blacklist_nr_entries(bl);
53 if (le64_to_cpu(bl->start[i].end) >=
54 le64_to_cpu(bl->start[i + 1].start)) {
55 bl->start[i].end = bl->start[i + 1].end;
57 memmove(&bl->start[i],
59 sizeof(bl->start[0]) * (nr - i));
61 bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
62 sb_blacklist_u64s(nr));
69 static bool bl_entry_contig_or_overlaps(struct journal_seq_blacklist_entry *e,
72 return !(end < le64_to_cpu(e->start) || le64_to_cpu(e->end) < start);
75 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
77 struct bch_sb_field_journal_seq_blacklist *bl;
81 mutex_lock(&c->sb_lock);
82 bl = bch2_sb_get_journal_seq_blacklist(c->disk_sb.sb);
83 nr = blacklist_nr_entries(bl);
85 for (i = 0; i < nr; i++) {
86 struct journal_seq_blacklist_entry *e =
89 if (bl_entry_contig_or_overlaps(e, start, end)) {
90 e->start = cpu_to_le64(min(start, le64_to_cpu(e->start)));
91 e->end = cpu_to_le64(max(end, le64_to_cpu(e->end)));
94 bl = blacklist_entry_try_merge(c,
97 bl = blacklist_entry_try_merge(c,
103 bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
104 sb_blacklist_u64s(nr + 1));
110 bl->start[nr].start = cpu_to_le64(start);
111 bl->start[nr].end = cpu_to_le64(end);
113 c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
115 ret = bch2_write_super(c);
117 mutex_unlock(&c->sb_lock);
119 return ret ?: bch2_blacklist_table_initialize(c);
122 static int journal_seq_blacklist_table_cmp(const void *_l,
123 const void *_r, size_t size)
125 const struct journal_seq_blacklist_table_entry *l = _l;
126 const struct journal_seq_blacklist_table_entry *r = _r;
128 return cmp_int(l->start, r->start);
131 bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
134 struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
135 struct journal_seq_blacklist_table_entry search = { .start = seq };
141 idx = eytzinger0_find_le(t->entries, t->nr,
142 sizeof(t->entries[0]),
143 journal_seq_blacklist_table_cmp,
148 BUG_ON(t->entries[idx].start > seq);
150 if (seq >= t->entries[idx].end)
154 t->entries[idx].dirty = true;
158 int bch2_blacklist_table_initialize(struct bch_fs *c)
160 struct bch_sb_field_journal_seq_blacklist *bl =
161 bch2_sb_get_journal_seq_blacklist(c->disk_sb.sb);
162 struct journal_seq_blacklist_table *t;
163 unsigned i, nr = blacklist_nr_entries(bl);
168 t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr,
175 for (i = 0; i < nr; i++) {
176 t->entries[i].start = le64_to_cpu(bl->start[i].start);
177 t->entries[i].end = le64_to_cpu(bl->start[i].end);
180 eytzinger0_sort(t->entries,
182 sizeof(t->entries[0]),
183 journal_seq_blacklist_table_cmp,
186 kfree(c->journal_seq_blacklist_table);
187 c->journal_seq_blacklist_table = t;
191 static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
192 struct bch_sb_field *f,
193 struct printbuf *err)
195 struct bch_sb_field_journal_seq_blacklist *bl =
196 field_to_type(f, journal_seq_blacklist);
197 unsigned i, nr = blacklist_nr_entries(bl);
199 for (i = 0; i < nr; i++) {
200 struct journal_seq_blacklist_entry *e = bl->start + i;
202 if (le64_to_cpu(e->start) >=
203 le64_to_cpu(e->end)) {
204 prt_printf(err, "entry %u start >= end (%llu >= %llu)",
205 i, le64_to_cpu(e->start), le64_to_cpu(e->end));
206 return -BCH_ERR_invalid_sb_journal_seq_blacklist;
210 le64_to_cpu(e[0].end) >
211 le64_to_cpu(e[1].start)) {
212 prt_printf(err, "entry %u out of order with next entry (%llu > %llu)",
213 i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start));
214 return -BCH_ERR_invalid_sb_journal_seq_blacklist;
221 static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
223 struct bch_sb_field *f)
225 struct bch_sb_field_journal_seq_blacklist *bl =
226 field_to_type(f, journal_seq_blacklist);
227 struct journal_seq_blacklist_entry *i;
228 unsigned nr = blacklist_nr_entries(bl);
230 for (i = bl->start; i < bl->start + nr; i++) {
232 prt_printf(out, " ");
234 prt_printf(out, "%llu-%llu",
235 le64_to_cpu(i->start),
236 le64_to_cpu(i->end));
241 const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
242 .validate = bch2_sb_journal_seq_blacklist_validate,
243 .to_text = bch2_sb_journal_seq_blacklist_to_text
246 void bch2_blacklist_entries_gc(struct work_struct *work)
248 struct bch_fs *c = container_of(work, struct bch_fs,
249 journal_seq_blacklist_gc_work);
250 struct journal_seq_blacklist_table *t;
251 struct bch_sb_field_journal_seq_blacklist *bl;
252 struct journal_seq_blacklist_entry *src, *dst;
253 struct btree_trans trans;
254 unsigned i, nr, new_nr;
257 bch2_trans_init(&trans, c, 0, 0);
259 for (i = 0; i < BTREE_ID_NR; i++) {
260 struct btree_iter iter;
263 bch2_trans_node_iter_init(&trans, &iter, i, POS_MIN,
264 0, 0, BTREE_ITER_PREFETCH);
266 bch2_trans_begin(&trans);
268 b = bch2_btree_iter_peek_node(&iter);
270 while (!(ret = PTR_ERR_OR_ZERO(b)) &&
272 !test_bit(BCH_FS_STOPPING, &c->flags))
273 b = bch2_btree_iter_next_node(&iter);
275 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
278 bch2_trans_iter_exit(&trans, &iter);
281 bch2_trans_exit(&trans);
285 mutex_lock(&c->sb_lock);
286 bl = bch2_sb_get_journal_seq_blacklist(c->disk_sb.sb);
290 nr = blacklist_nr_entries(bl);
293 t = c->journal_seq_blacklist_table;
296 for (src = bl->start, i = eytzinger0_first(t->nr);
297 src < bl->start + nr;
298 src++, i = eytzinger0_next(i, nr)) {
299 BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
300 BUG_ON(t->entries[i].end != le64_to_cpu(src->end));
302 if (t->entries[i].dirty)
306 new_nr = dst - bl->start;
308 bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
311 bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
312 new_nr ? sb_blacklist_u64s(new_nr) : 0);
313 BUG_ON(new_nr && !bl);
316 c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
321 mutex_unlock(&c->sb_lock);