]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_seq_blacklist.c
Merge pull request #38 from jnsaff/patch-1
[bcachefs-tools-debian] / libbcachefs / journal_seq_blacklist.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_iter.h"
5 #include "eytzinger.h"
6 #include "journal_seq_blacklist.h"
7 #include "super-io.h"
8
9 /*
10  * journal_seq_blacklist machinery:
11  *
12  * To guarantee order of btree updates after a crash, we need to detect when a
13  * btree node entry (bset) is newer than the newest journal entry that was
14  * successfully written, and ignore it - effectively ignoring any btree updates
15  * that didn't make it into the journal.
16  *
17  * If we didn't do this, we might have two btree nodes, a and b, both with
18  * updates that weren't written to the journal yet: if b was updated after a,
19  * but b was flushed and not a - oops; on recovery we'll find that the updates
20  * to b happened, but not the updates to a that happened before it.
21  *
22  * Ignoring bsets that are newer than the newest journal entry is always safe,
23  * because everything they contain will also have been journalled - and must
24  * still be present in the journal on disk until a journal entry has been
25  * written _after_ that bset was written.
26  *
27  * To accomplish this, bsets record the newest journal sequence number they
28  * contain updates for; then, on startup, the btree code queries the journal
29  * code to ask "Is this sequence number newer than the newest journal entry? If
30  * so, ignore it."
31  *
32  * When this happens, we must blacklist that journal sequence number: the
33  * journal must not write any entries with that sequence number, and it must
34  * record that it was blacklisted so that a) on recovery we don't think we have
35  * missing journal entries and b) so that the btree code continues to ignore
36  * that bset, until that btree node is rewritten.
37  */
38
39 static unsigned sb_blacklist_u64s(unsigned nr)
40 {
41         struct bch_sb_field_journal_seq_blacklist *bl;
42
43         return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
44 }
45
46 static struct bch_sb_field_journal_seq_blacklist *
47 blacklist_entry_try_merge(struct bch_fs *c,
48                           struct bch_sb_field_journal_seq_blacklist *bl,
49                           unsigned i)
50 {
51         unsigned nr = blacklist_nr_entries(bl);
52
53         if (le64_to_cpu(bl->start[i].end) >=
54             le64_to_cpu(bl->start[i + 1].start)) {
55                 bl->start[i].end = bl->start[i + 1].end;
56                 --nr;
57                 memmove(&bl->start[i],
58                         &bl->start[i + 1],
59                         sizeof(bl->start[0]) * (nr - i));
60
61                 bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
62                                                         sb_blacklist_u64s(nr));
63                 BUG_ON(!bl);
64         }
65
66         return bl;
67 }
68
69 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
70 {
71         struct bch_sb_field_journal_seq_blacklist *bl;
72         unsigned i, nr;
73         int ret = 0;
74
75         mutex_lock(&c->sb_lock);
76         bl = bch2_sb_get_journal_seq_blacklist(c->disk_sb.sb);
77         nr = blacklist_nr_entries(bl);
78
79         if (bl) {
80                 for (i = 0; i < nr; i++) {
81                         struct journal_seq_blacklist_entry *e =
82                                 bl->start + i;
83
84                         if (start == le64_to_cpu(e->start) &&
85                             end   == le64_to_cpu(e->end))
86                                 goto out;
87
88                         if (start <= le64_to_cpu(e->start) &&
89                             end   >= le64_to_cpu(e->end)) {
90                                 e->start = cpu_to_le64(start);
91                                 e->end  = cpu_to_le64(end);
92
93                                 if (i + 1 < nr)
94                                         bl = blacklist_entry_try_merge(c,
95                                                                 bl, i);
96                                 if (i)
97                                         bl = blacklist_entry_try_merge(c,
98                                                                 bl, i - 1);
99                                 goto out_write_sb;
100                         }
101                 }
102         }
103
104         bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
105                                         sb_blacklist_u64s(nr + 1));
106         if (!bl) {
107                 ret = -ENOMEM;
108                 goto out;
109         }
110
111         bl->start[nr].start     = cpu_to_le64(start);
112         bl->start[nr].end       = cpu_to_le64(end);
113 out_write_sb:
114         c->disk_sb.sb->features[0] |=
115                 1ULL << BCH_FEATURE_journal_seq_blacklist_v3;
116
117         ret = bch2_write_super(c);
118 out:
119         mutex_unlock(&c->sb_lock);
120
121         return ret;
122 }
123
124 static int journal_seq_blacklist_table_cmp(const void *_l,
125                                            const void *_r, size_t size)
126 {
127         const struct journal_seq_blacklist_table_entry *l = _l;
128         const struct journal_seq_blacklist_table_entry *r = _r;
129
130         return cmp_int(l->start, r->start);
131 }
132
133 bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
134                                      bool dirty)
135 {
136         struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
137         struct journal_seq_blacklist_table_entry search = { .start = seq };
138         int idx;
139
140         if (!t)
141                 return false;
142
143         idx = eytzinger0_find_le(t->entries, t->nr,
144                                  sizeof(t->entries[0]),
145                                  journal_seq_blacklist_table_cmp,
146                                  &search);
147         if (idx < 0)
148                 return false;
149
150         BUG_ON(t->entries[idx].start > seq);
151
152         if (seq >= t->entries[idx].end)
153                 return false;
154
155         if (dirty)
156                 t->entries[idx].dirty = true;
157         return true;
158 }
159
160 int bch2_blacklist_table_initialize(struct bch_fs *c)
161 {
162         struct bch_sb_field_journal_seq_blacklist *bl =
163                 bch2_sb_get_journal_seq_blacklist(c->disk_sb.sb);
164         struct journal_seq_blacklist_table *t;
165         unsigned i, nr = blacklist_nr_entries(bl);
166
167         BUG_ON(c->journal_seq_blacklist_table);
168
169         if (!bl)
170                 return 0;
171
172         t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr,
173                     GFP_KERNEL);
174         if (!t)
175                 return -ENOMEM;
176
177         t->nr = nr;
178
179         for (i = 0; i < nr; i++) {
180                 t->entries[i].start     = le64_to_cpu(bl->start[i].start);
181                 t->entries[i].end       = le64_to_cpu(bl->start[i].end);
182         }
183
184         eytzinger0_sort(t->entries,
185                         t->nr,
186                         sizeof(t->entries[0]),
187                         journal_seq_blacklist_table_cmp,
188                         NULL);
189
190         c->journal_seq_blacklist_table = t;
191         return 0;
192 }
193
194 static const char *
195 bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
196                                        struct bch_sb_field *f)
197 {
198         struct bch_sb_field_journal_seq_blacklist *bl =
199                 field_to_type(f, journal_seq_blacklist);
200         struct journal_seq_blacklist_entry *i;
201         unsigned nr = blacklist_nr_entries(bl);
202
203         for (i = bl->start; i < bl->start + nr; i++) {
204                 if (le64_to_cpu(i->start) >=
205                     le64_to_cpu(i->end))
206                         return "entry start >= end";
207
208                 if (i + 1 < bl->start + nr &&
209                     le64_to_cpu(i[0].end) >
210                     le64_to_cpu(i[1].start))
211                         return "entries out of order";
212         }
213
214         return NULL;
215 }
216
217 static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
218                                                   struct bch_sb *sb,
219                                                   struct bch_sb_field *f)
220 {
221         struct bch_sb_field_journal_seq_blacklist *bl =
222                 field_to_type(f, journal_seq_blacklist);
223         struct journal_seq_blacklist_entry *i;
224         unsigned nr = blacklist_nr_entries(bl);
225
226         for (i = bl->start; i < bl->start + nr; i++) {
227                 if (i != bl->start)
228                         pr_buf(out, " ");
229
230                 pr_buf(out, "%llu-%llu",
231                        le64_to_cpu(i->start),
232                        le64_to_cpu(i->end));
233         }
234 }
235
236 const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
237         .validate       = bch2_sb_journal_seq_blacklist_validate,
238         .to_text        = bch2_sb_journal_seq_blacklist_to_text
239 };
240
241 void bch2_blacklist_entries_gc(struct work_struct *work)
242 {
243         struct bch_fs *c = container_of(work, struct bch_fs,
244                                         journal_seq_blacklist_gc_work);
245         struct journal_seq_blacklist_table *t;
246         struct bch_sb_field_journal_seq_blacklist *bl;
247         struct journal_seq_blacklist_entry *src, *dst;
248         struct btree_trans trans;
249         unsigned i, nr, new_nr;
250         int ret;
251
252         bch2_trans_init(&trans, c, 0, 0);
253
254         for (i = 0; i < BTREE_ID_NR; i++) {
255                 struct btree_iter *iter;
256                 struct btree *b;
257
258                 for_each_btree_node(&trans, iter, i, POS_MIN,
259                                     BTREE_ITER_PREFETCH, b)
260                         if (test_bit(BCH_FS_STOPPING, &c->flags)) {
261                                 bch2_trans_exit(&trans);
262                                 return;
263                         }
264                 bch2_trans_iter_free(&trans, iter);
265         }
266
267         ret = bch2_trans_exit(&trans);
268         if (ret)
269                 return;
270
271         mutex_lock(&c->sb_lock);
272         bl = bch2_sb_get_journal_seq_blacklist(c->disk_sb.sb);
273         if (!bl)
274                 goto out;
275
276         nr = blacklist_nr_entries(bl);
277         dst = bl->start;
278
279         t = c->journal_seq_blacklist_table;
280         BUG_ON(nr != t->nr);
281
282         for (src = bl->start, i = eytzinger0_first(t->nr);
283              src < bl->start + nr;
284              src++, i = eytzinger0_next(i, nr)) {
285                 BUG_ON(t->entries[i].start      != le64_to_cpu(src->start));
286                 BUG_ON(t->entries[i].end        != le64_to_cpu(src->end));
287
288                 if (t->entries[i].dirty)
289                         *dst++ = *src;
290         }
291
292         new_nr = dst - bl->start;
293
294         bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
295
296         if (new_nr != nr) {
297                 bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
298                                 new_nr ? sb_blacklist_u64s(new_nr) : 0);
299                 BUG_ON(new_nr && !bl);
300
301                 if (!new_nr)
302                         c->disk_sb.sb->features[0] &=
303                                 ~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
304
305                 bch2_write_super(c);
306         }
307 out:
308         mutex_unlock(&c->sb_lock);
309 }