]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_write_buffer.c
Update bcachefs sources to 717b356d1d bcachefs: Convert journal validation to bkey_in...
[bcachefs-tools-debian] / libbcachefs / btree_write_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
8 #include "error.h"
9 #include "journal.h"
10 #include "journal_reclaim.h"
11
12 #include <linux/sort.h>
13
14 static int btree_write_buffered_key_cmp(const void *_l, const void *_r)
15 {
16         const struct btree_write_buffered_key *l = _l;
17         const struct btree_write_buffered_key *r = _r;
18
19         return  cmp_int(l->btree, r->btree) ?:
20                 bpos_cmp(l->k.k.p, r->k.k.p) ?:
21                 cmp_int(l->journal_seq, r->journal_seq) ?:
22                 cmp_int(l->journal_offset, r->journal_offset);
23 }
24
25 static int btree_write_buffered_journal_cmp(const void *_l, const void *_r)
26 {
27         const struct btree_write_buffered_key *l = _l;
28         const struct btree_write_buffered_key *r = _r;
29
30         return  cmp_int(l->journal_seq, r->journal_seq);
31 }
32
33 static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
34                                              struct btree_iter *iter,
35                                              struct btree_write_buffered_key *wb,
36                                              unsigned commit_flags,
37                                              bool *write_locked,
38                                              size_t *fast)
39 {
40         struct bch_fs *c = trans->c;
41         struct btree_path *path;
42         int ret;
43
44         ret = bch2_btree_iter_traverse(iter);
45         if (ret)
46                 return ret;
47
48         path = iter->path;
49
50         if (!*write_locked) {
51                 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
52                 if (ret)
53                         return ret;
54
55                 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
56                 *write_locked = true;
57         }
58
59         if (!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s)) {
60                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
61                 *write_locked = false;
62                 goto trans_commit;
63         }
64
65         bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
66         (*fast)++;
67
68         if (path->ref > 1) {
69                 /*
70                  * We can't clone a path that has write locks: if the path is
71                  * shared, unlock before set_pos(), traverse():
72                  */
73                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
74                 *write_locked = false;
75         }
76         return 0;
77 trans_commit:
78         return  bch2_trans_update_seq(trans, wb->journal_seq, iter, &wb->k, 0) ?:
79                 bch2_trans_commit(trans, NULL, NULL,
80                                   commit_flags|
81                                   BTREE_INSERT_NOCHECK_RW|
82                                   BTREE_INSERT_NOFAIL|
83                                   BTREE_INSERT_JOURNAL_RECLAIM);
84 }
85
86 static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb)
87 {
88         union btree_write_buffer_state old, new;
89         u64 v = READ_ONCE(wb->state.v);
90
91         do {
92                 old.v = new.v = v;
93
94                 new.nr = 0;
95                 new.idx++;
96         } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
97
98         while (old.idx == 0 ? wb->state.ref0 : wb->state.ref1)
99                 cpu_relax();
100
101         smp_mb();
102
103         return old;
104 }
105
106 /*
107  * Update a btree with a write buffered key using the journal seq of the
108  * original write buffer insert.
109  *
110  * It is not safe to rejournal the key once it has been inserted into the write
111  * buffer because that may break recovery ordering. For example, the key may
112  * have already been modified in the active write buffer in a seq that comes
113  * before the current transaction. If we were to journal this key again and
114  * crash, recovery would process updates in the wrong order.
115  */
116 static int
117 btree_write_buffered_insert(struct btree_trans *trans,
118                           struct btree_write_buffered_key *wb)
119 {
120         struct btree_iter iter;
121         int ret;
122
123         bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
124                              BTREE_ITER_CACHED|BTREE_ITER_INTENT);
125
126         ret   = bch2_btree_iter_traverse(&iter) ?:
127                 bch2_trans_update_seq(trans, wb->journal_seq, &iter, &wb->k, 0);
128         bch2_trans_iter_exit(trans, &iter);
129         return ret;
130 }
131
132 int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags,
133                                     bool locked)
134 {
135         struct bch_fs *c = trans->c;
136         struct journal *j = &c->journal;
137         struct btree_write_buffer *wb = &c->btree_write_buffer;
138         struct journal_entry_pin pin;
139         struct btree_write_buffered_key *i, *keys;
140         struct btree_iter iter = { NULL };
141         size_t nr = 0, skipped = 0, fast = 0, slowpath = 0;
142         bool write_locked = false;
143         union btree_write_buffer_state s;
144         int ret = 0;
145
146         memset(&pin, 0, sizeof(pin));
147
148         if (!locked && !mutex_trylock(&wb->flush_lock))
149                 return 0;
150
151         bch2_journal_pin_copy(j, &pin, &wb->journal_pin, NULL);
152         bch2_journal_pin_drop(j, &wb->journal_pin);
153
154         s = btree_write_buffer_switch(wb);
155         keys = wb->keys[s.idx];
156         nr = s.nr;
157
158         if (race_fault())
159                 goto slowpath;
160
161         /*
162          * We first sort so that we can detect and skip redundant updates, and
163          * then we attempt to flush in sorted btree order, as this is most
164          * efficient.
165          *
166          * However, since we're not flushing in the order they appear in the
167          * journal we won't be able to drop our journal pin until everything is
168          * flushed - which means this could deadlock the journal if we weren't
169          * passing BTREE_INSERT_JOURNAL_RECLAIM. This causes the update to fail
170          * if it would block taking a journal reservation.
171          *
172          * If that happens, simply skip the key so we can optimistically insert
173          * as many keys as possible in the fast path.
174          */
175         sort(keys, nr, sizeof(keys[0]),
176              btree_write_buffered_key_cmp, NULL);
177
178         for (i = keys; i < keys + nr; i++) {
179                 if (i + 1 < keys + nr &&
180                     i[0].btree == i[1].btree &&
181                     bpos_eq(i[0].k.k.p, i[1].k.k.p)) {
182                         skipped++;
183                         i->journal_seq = 0;
184                         continue;
185                 }
186
187                 if (write_locked &&
188                     (iter.path->btree_id != i->btree ||
189                      bpos_gt(i->k.k.p, iter.path->l[0].b->key.k.p))) {
190                         bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
191                         write_locked = false;
192                 }
193
194                 if (!iter.path || iter.path->btree_id != i->btree) {
195                         bch2_trans_iter_exit(trans, &iter);
196                         bch2_trans_iter_init(trans, &iter, i->btree, i->k.k.p, BTREE_ITER_INTENT);
197                 }
198
199                 bch2_btree_iter_set_pos(&iter, i->k.k.p);
200                 iter.path->preserve = false;
201
202                 do {
203                         ret = bch2_btree_write_buffer_flush_one(trans, &iter, i,
204                                                 commit_flags, &write_locked, &fast);
205                         if (!write_locked)
206                                 bch2_trans_begin(trans);
207                 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
208
209                 if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
210                         slowpath++;
211                         continue;
212                 }
213                 if (ret)
214                         break;
215
216                 i->journal_seq = 0;
217         }
218
219         if (write_locked)
220                 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
221         bch2_trans_iter_exit(trans, &iter);
222
223         trace_write_buffer_flush(trans, nr, skipped, fast, wb->size);
224
225         if (slowpath)
226                 goto slowpath;
227
228         bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
229 out:
230         bch2_journal_pin_drop(j, &pin);
231         mutex_unlock(&wb->flush_lock);
232         return ret;
233 slowpath:
234         trace_write_buffer_flush_slowpath(trans, i - keys, nr);
235
236         /*
237          * Now sort the rest by journal seq and bump the journal pin as we go.
238          * The slowpath zapped the seq of keys that were successfully flushed so
239          * we can skip those here.
240          */
241         sort(keys, nr, sizeof(keys[0]),
242              btree_write_buffered_journal_cmp,
243              NULL);
244
245         commit_flags &= ~BCH_WATERMARK_MASK;
246         commit_flags |= BCH_WATERMARK_reclaim;
247
248         for (i = keys; i < keys + nr; i++) {
249                 if (!i->journal_seq)
250                         continue;
251
252                 if (i->journal_seq > pin.seq) {
253                         struct journal_entry_pin pin2;
254
255                         memset(&pin2, 0, sizeof(pin2));
256
257                         bch2_journal_pin_add(j, i->journal_seq, &pin2, NULL);
258                         bch2_journal_pin_drop(j, &pin);
259                         bch2_journal_pin_copy(j, &pin, &pin2, NULL);
260                         bch2_journal_pin_drop(j, &pin2);
261                 }
262
263                 ret = commit_do(trans, NULL, NULL,
264                                 commit_flags|
265                                 BTREE_INSERT_NOFAIL|
266                                 BTREE_INSERT_JOURNAL_RECLAIM,
267                                 btree_write_buffered_insert(trans, i));
268                 if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
269                         break;
270         }
271
272         goto out;
273 }
274
275 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
276 {
277         bch2_trans_unlock(trans);
278         mutex_lock(&trans->c->btree_write_buffer.flush_lock);
279         return __bch2_btree_write_buffer_flush(trans, 0, true);
280 }
281
282 int bch2_btree_write_buffer_flush(struct btree_trans *trans)
283 {
284         return __bch2_btree_write_buffer_flush(trans, 0, false);
285 }
286
287 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
288                                 struct journal_entry_pin *_pin, u64 seq)
289 {
290         struct bch_fs *c = container_of(j, struct bch_fs, journal);
291         struct btree_write_buffer *wb = &c->btree_write_buffer;
292
293         mutex_lock(&wb->flush_lock);
294
295         return bch2_trans_run(c,
296                         __bch2_btree_write_buffer_flush(&trans, BTREE_INSERT_NOCHECK_RW, true));
297 }
298
299 static inline u64 btree_write_buffer_ref(int idx)
300 {
301         return ((union btree_write_buffer_state) {
302                 .ref0 = idx == 0,
303                 .ref1 = idx == 1,
304         }).v;
305 }
306
307 int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
308 {
309         struct bch_fs *c = trans->c;
310         struct btree_write_buffer *wb = &c->btree_write_buffer;
311         struct btree_write_buffered_key *i;
312         union btree_write_buffer_state old, new;
313         int ret = 0;
314         u64 v;
315
316         trans_for_each_wb_update(trans, i) {
317                 EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
318
319                 i->journal_seq          = trans->journal_res.seq;
320                 i->journal_offset       = trans->journal_res.offset;
321         }
322
323         preempt_disable();
324         v = READ_ONCE(wb->state.v);
325         do {
326                 old.v = new.v = v;
327
328                 new.v += btree_write_buffer_ref(new.idx);
329                 new.nr += trans->nr_wb_updates;
330                 if (new.nr > wb->size) {
331                         ret = -BCH_ERR_btree_insert_need_flush_buffer;
332                         goto out;
333                 }
334         } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
335
336         memcpy(wb->keys[new.idx] + old.nr,
337                trans->wb_updates,
338                sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
339
340         bch2_journal_pin_add(&c->journal, trans->journal_res.seq, &wb->journal_pin,
341                              bch2_btree_write_buffer_journal_flush);
342
343         atomic64_sub_return_release(btree_write_buffer_ref(new.idx), &wb->state.counter);
344 out:
345         preempt_enable();
346         return ret;
347 }
348
349 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
350 {
351         struct btree_write_buffer *wb = &c->btree_write_buffer;
352
353         BUG_ON(wb->state.nr && !bch2_journal_error(&c->journal));
354
355         kvfree(wb->keys[1]);
356         kvfree(wb->keys[0]);
357 }
358
359 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
360 {
361         struct btree_write_buffer *wb = &c->btree_write_buffer;
362
363         mutex_init(&wb->flush_lock);
364         wb->size = c->opts.btree_write_buffer_size;
365
366         wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
367         wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
368         if (!wb->keys[0] || !wb->keys[1])
369                 return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init;
370
371         return 0;
372 }