]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_seq_blacklist.c
Disable pristine-tar option in gbp.conf, since there is no pristine-tar branch.
[bcachefs-tools-debian] / libbcachefs / journal_seq_blacklist.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_iter.h"
5 #include "journal_seq_blacklist.h"
6 #include "super-io.h"
7
8 #include <linux/eytzinger.h>
9
10 /*
11  * journal_seq_blacklist machinery:
12  *
13  * To guarantee order of btree updates after a crash, we need to detect when a
14  * btree node entry (bset) is newer than the newest journal entry that was
15  * successfully written, and ignore it - effectively ignoring any btree updates
16  * that didn't make it into the journal.
17  *
18  * If we didn't do this, we might have two btree nodes, a and b, both with
19  * updates that weren't written to the journal yet: if b was updated after a,
20  * but b was flushed and not a - oops; on recovery we'll find that the updates
21  * to b happened, but not the updates to a that happened before it.
22  *
23  * Ignoring bsets that are newer than the newest journal entry is always safe,
24  * because everything they contain will also have been journalled - and must
25  * still be present in the journal on disk until a journal entry has been
26  * written _after_ that bset was written.
27  *
28  * To accomplish this, bsets record the newest journal sequence number they
29  * contain updates for; then, on startup, the btree code queries the journal
30  * code to ask "Is this sequence number newer than the newest journal entry? If
31  * so, ignore it."
32  *
33  * When this happens, we must blacklist that journal sequence number: the
34  * journal must not write any entries with that sequence number, and it must
35  * record that it was blacklisted so that a) on recovery we don't think we have
36  * missing journal entries and b) so that the btree code continues to ignore
37  * that bset, until that btree node is rewritten.
38  */
39
40 static unsigned sb_blacklist_u64s(unsigned nr)
41 {
42         struct bch_sb_field_journal_seq_blacklist *bl;
43
44         return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
45 }
46
47 static struct bch_sb_field_journal_seq_blacklist *
48 blacklist_entry_try_merge(struct bch_fs *c,
49                           struct bch_sb_field_journal_seq_blacklist *bl,
50                           unsigned i)
51 {
52         unsigned nr = blacklist_nr_entries(bl);
53
54         if (le64_to_cpu(bl->start[i].end) >=
55             le64_to_cpu(bl->start[i + 1].start)) {
56                 bl->start[i].end = bl->start[i + 1].end;
57                 --nr;
58                 memmove(&bl->start[i],
59                         &bl->start[i + 1],
60                         sizeof(bl->start[0]) * (nr - i));
61
62                 bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
63                                           sb_blacklist_u64s(nr));
64                 BUG_ON(!bl);
65         }
66
67         return bl;
68 }
69
70 static bool bl_entry_contig_or_overlaps(struct journal_seq_blacklist_entry *e,
71                                         u64 start, u64 end)
72 {
73         return !(end < le64_to_cpu(e->start) || le64_to_cpu(e->end) < start);
74 }
75
76 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
77 {
78         struct bch_sb_field_journal_seq_blacklist *bl;
79         unsigned i, nr;
80         int ret = 0;
81
82         mutex_lock(&c->sb_lock);
83         bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
84         nr = blacklist_nr_entries(bl);
85
86         for (i = 0; i < nr; i++) {
87                 struct journal_seq_blacklist_entry *e =
88                         bl->start + i;
89
90                 if (bl_entry_contig_or_overlaps(e, start, end)) {
91                         e->start = cpu_to_le64(min(start, le64_to_cpu(e->start)));
92                         e->end  = cpu_to_le64(max(end, le64_to_cpu(e->end)));
93
94                         if (i + 1 < nr)
95                                 bl = blacklist_entry_try_merge(c,
96                                                         bl, i);
97                         if (i)
98                                 bl = blacklist_entry_try_merge(c,
99                                                         bl, i - 1);
100                         goto out_write_sb;
101                 }
102         }
103
104         bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
105                                   sb_blacklist_u64s(nr + 1));
106         if (!bl) {
107                 ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
108                 goto out;
109         }
110
111         bl->start[nr].start     = cpu_to_le64(start);
112         bl->start[nr].end       = cpu_to_le64(end);
113 out_write_sb:
114         c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
115
116         ret = bch2_write_super(c);
117 out:
118         mutex_unlock(&c->sb_lock);
119
120         return ret ?: bch2_blacklist_table_initialize(c);
121 }
122
123 static int journal_seq_blacklist_table_cmp(const void *_l, const void *_r)
124 {
125         const struct journal_seq_blacklist_table_entry *l = _l;
126         const struct journal_seq_blacklist_table_entry *r = _r;
127
128         return cmp_int(l->start, r->start);
129 }
130
131 bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
132                                      bool dirty)
133 {
134         struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
135         struct journal_seq_blacklist_table_entry search = { .start = seq };
136         int idx;
137
138         if (!t)
139                 return false;
140
141         idx = eytzinger0_find_le(t->entries, t->nr,
142                                  sizeof(t->entries[0]),
143                                  journal_seq_blacklist_table_cmp,
144                                  &search);
145         if (idx < 0)
146                 return false;
147
148         BUG_ON(t->entries[idx].start > seq);
149
150         if (seq >= t->entries[idx].end)
151                 return false;
152
153         if (dirty)
154                 t->entries[idx].dirty = true;
155         return true;
156 }
157
158 int bch2_blacklist_table_initialize(struct bch_fs *c)
159 {
160         struct bch_sb_field_journal_seq_blacklist *bl =
161                 bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
162         struct journal_seq_blacklist_table *t;
163         unsigned i, nr = blacklist_nr_entries(bl);
164
165         if (!bl)
166                 return 0;
167
168         t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr,
169                     GFP_KERNEL);
170         if (!t)
171                 return -BCH_ERR_ENOMEM_blacklist_table_init;
172
173         t->nr = nr;
174
175         for (i = 0; i < nr; i++) {
176                 t->entries[i].start     = le64_to_cpu(bl->start[i].start);
177                 t->entries[i].end       = le64_to_cpu(bl->start[i].end);
178         }
179
180         eytzinger0_sort(t->entries,
181                         t->nr,
182                         sizeof(t->entries[0]),
183                         journal_seq_blacklist_table_cmp,
184                         NULL);
185
186         kfree(c->journal_seq_blacklist_table);
187         c->journal_seq_blacklist_table = t;
188         return 0;
189 }
190
191 static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
192                                                   struct bch_sb_field *f,
193                                                   struct printbuf *err)
194 {
195         struct bch_sb_field_journal_seq_blacklist *bl =
196                 field_to_type(f, journal_seq_blacklist);
197         unsigned i, nr = blacklist_nr_entries(bl);
198
199         for (i = 0; i < nr; i++) {
200                 struct journal_seq_blacklist_entry *e = bl->start + i;
201
202                 if (le64_to_cpu(e->start) >=
203                     le64_to_cpu(e->end)) {
204                         prt_printf(err, "entry %u start >= end (%llu >= %llu)",
205                                i, le64_to_cpu(e->start), le64_to_cpu(e->end));
206                         return -BCH_ERR_invalid_sb_journal_seq_blacklist;
207                 }
208
209                 if (i + 1 < nr &&
210                     le64_to_cpu(e[0].end) >
211                     le64_to_cpu(e[1].start)) {
212                         prt_printf(err, "entry %u out of order with next entry (%llu > %llu)",
213                                i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start));
214                         return -BCH_ERR_invalid_sb_journal_seq_blacklist;
215                 }
216         }
217
218         return 0;
219 }
220
221 static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
222                                                   struct bch_sb *sb,
223                                                   struct bch_sb_field *f)
224 {
225         struct bch_sb_field_journal_seq_blacklist *bl =
226                 field_to_type(f, journal_seq_blacklist);
227         struct journal_seq_blacklist_entry *i;
228         unsigned nr = blacklist_nr_entries(bl);
229
230         for (i = bl->start; i < bl->start + nr; i++) {
231                 if (i != bl->start)
232                         prt_printf(out, " ");
233
234                 prt_printf(out, "%llu-%llu",
235                        le64_to_cpu(i->start),
236                        le64_to_cpu(i->end));
237         }
238         prt_newline(out);
239 }
240
241 const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
242         .validate       = bch2_sb_journal_seq_blacklist_validate,
243         .to_text        = bch2_sb_journal_seq_blacklist_to_text
244 };
245
246 void bch2_blacklist_entries_gc(struct work_struct *work)
247 {
248         struct bch_fs *c = container_of(work, struct bch_fs,
249                                         journal_seq_blacklist_gc_work);
250         struct journal_seq_blacklist_table *t;
251         struct bch_sb_field_journal_seq_blacklist *bl;
252         struct journal_seq_blacklist_entry *src, *dst;
253         struct btree_trans *trans = bch2_trans_get(c);
254         unsigned i, nr, new_nr;
255         int ret;
256
257         for (i = 0; i < BTREE_ID_NR; i++) {
258                 struct btree_iter iter;
259                 struct btree *b;
260
261                 bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
262                                           0, 0, BTREE_ITER_PREFETCH);
263 retry:
264                 bch2_trans_begin(trans);
265
266                 b = bch2_btree_iter_peek_node(&iter);
267
268                 while (!(ret = PTR_ERR_OR_ZERO(b)) &&
269                        b &&
270                        !test_bit(BCH_FS_stopping, &c->flags))
271                         b = bch2_btree_iter_next_node(&iter);
272
273                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
274                         goto retry;
275
276                 bch2_trans_iter_exit(trans, &iter);
277         }
278
279         bch2_trans_put(trans);
280         if (ret)
281                 return;
282
283         mutex_lock(&c->sb_lock);
284         bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
285         if (!bl)
286                 goto out;
287
288         nr = blacklist_nr_entries(bl);
289         dst = bl->start;
290
291         t = c->journal_seq_blacklist_table;
292         BUG_ON(nr != t->nr);
293
294         for (src = bl->start, i = eytzinger0_first(t->nr);
295              src < bl->start + nr;
296              src++, i = eytzinger0_next(i, nr)) {
297                 BUG_ON(t->entries[i].start      != le64_to_cpu(src->start));
298                 BUG_ON(t->entries[i].end        != le64_to_cpu(src->end));
299
300                 if (t->entries[i].dirty)
301                         *dst++ = *src;
302         }
303
304         new_nr = dst - bl->start;
305
306         bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
307
308         if (new_nr != nr) {
309                 bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
310                                 new_nr ? sb_blacklist_u64s(new_nr) : 0);
311                 BUG_ON(new_nr && !bl);
312
313                 if (!new_nr)
314                         c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
315
316                 bch2_write_super(c);
317         }
318 out:
319         mutex_unlock(&c->sb_lock);
320 }