]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_write_buffer.c
Upload to experimental
[bcachefs-tools-debian] / libbcachefs / btree_write_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
8 #include "error.h"
9 #include "journal.h"
10 #include "journal_reclaim.h"
11
12 #include <linux/sort.h>
13
14 static int bch2_btree_write_buffer_journal_flush(struct journal *,
15                                 struct journal_entry_pin *, u64);
16
17 static int btree_write_buffered_key_cmp(const void *_l, const void *_r)
18 {
19         const struct btree_write_buffered_key *l = _l;
20         const struct btree_write_buffered_key *r = _r;
21
22         return  cmp_int(l->btree, r->btree) ?:
23                 bpos_cmp(l->k.k.p, r->k.k.p) ?:
24                 cmp_int(l->journal_seq, r->journal_seq) ?:
25                 cmp_int(l->journal_offset, r->journal_offset);
26 }
27
28 static int btree_write_buffered_journal_cmp(const void *_l, const void *_r)
29 {
30         const struct btree_write_buffered_key *l = _l;
31         const struct btree_write_buffered_key *r = _r;
32
33         return  cmp_int(l->journal_seq, r->journal_seq);
34 }
35
36 static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
37                                              struct btree_iter *iter,
38                                              struct btree_write_buffered_key *wb,
39                                              unsigned commit_flags,
40                                              bool *write_locked,
41                                              size_t *fast)
42 {
43         struct bch_fs *c = trans->c;
44         struct btree_path *path;
45         int ret;
46
47         ret = bch2_btree_iter_traverse(iter);
48         if (ret)
49                 return ret;
50
51         /*
52          * We can't clone a path that has write locks: unshare it now, before
53          * set_pos and traverse():
54          */
55         if (iter->path->ref > 1)
56                 iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
57
58         path = iter->path;
59
60         if (!*write_locked) {
61                 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
62                 if (ret)
63                         return ret;
64
65                 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
66                 *write_locked = true;
67         }
68
69         if (!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s)) {
70                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
71                 *write_locked = false;
72                 goto trans_commit;
73         }
74
75         bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
76         (*fast)++;
77         return 0;
78 trans_commit:
79         trans->journal_res.seq = wb->journal_seq;
80
81         return  bch2_trans_update(trans, iter, &wb->k,
82                                   BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
83                 bch2_trans_commit(trans, NULL, NULL,
84                                   commit_flags|
85                                   BCH_TRANS_COMMIT_no_check_rw|
86                                   BCH_TRANS_COMMIT_no_enospc|
87                                   BCH_TRANS_COMMIT_no_journal_res|
88                                   BCH_TRANS_COMMIT_journal_reclaim);
89 }
90
91 static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb)
92 {
93         union btree_write_buffer_state old, new;
94         u64 v = READ_ONCE(wb->state.v);
95
96         do {
97                 old.v = new.v = v;
98
99                 new.nr = 0;
100                 new.idx++;
101         } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
102
103         while (old.idx == 0 ? wb->state.ref0 : wb->state.ref1)
104                 cpu_relax();
105
106         smp_mb();
107
108         return old;
109 }
110
111 /*
112  * Update a btree with a write buffered key using the journal seq of the
113  * original write buffer insert.
114  *
115  * It is not safe to rejournal the key once it has been inserted into the write
116  * buffer because that may break recovery ordering. For example, the key may
117  * have already been modified in the active write buffer in a seq that comes
118  * before the current transaction. If we were to journal this key again and
119  * crash, recovery would process updates in the wrong order.
120  */
121 static int
122 btree_write_buffered_insert(struct btree_trans *trans,
123                           struct btree_write_buffered_key *wb)
124 {
125         struct btree_iter iter;
126         int ret;
127
128         bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
129                              BTREE_ITER_CACHED|BTREE_ITER_INTENT);
130
131         trans->journal_res.seq = wb->journal_seq;
132
133         ret   = bch2_btree_iter_traverse(&iter) ?:
134                 bch2_trans_update(trans, &iter, &wb->k,
135                                   BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
136         bch2_trans_iter_exit(trans, &iter);
137         return ret;
138 }
139
140 int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags,
141                                     bool locked)
142 {
143         struct bch_fs *c = trans->c;
144         struct journal *j = &c->journal;
145         struct btree_write_buffer *wb = &c->btree_write_buffer;
146         struct journal_entry_pin pin;
147         struct btree_write_buffered_key *i, *keys;
148         struct btree_iter iter = { NULL };
149         size_t nr = 0, skipped = 0, fast = 0, slowpath = 0;
150         bool write_locked = false;
151         union btree_write_buffer_state s;
152         int ret = 0;
153
154         memset(&pin, 0, sizeof(pin));
155
156         if (!locked && !mutex_trylock(&wb->flush_lock))
157                 return 0;
158
159         bch2_journal_pin_copy(j, &pin, &wb->journal_pin,
160                               bch2_btree_write_buffer_journal_flush);
161         bch2_journal_pin_drop(j, &wb->journal_pin);
162
163         s = btree_write_buffer_switch(wb);
164         keys = wb->keys[s.idx];
165         nr = s.nr;
166
167         if (race_fault())
168                 goto slowpath;
169
170         /*
171          * We first sort so that we can detect and skip redundant updates, and
172          * then we attempt to flush in sorted btree order, as this is most
173          * efficient.
174          *
175          * However, since we're not flushing in the order they appear in the
176          * journal we won't be able to drop our journal pin until everything is
177          * flushed - which means this could deadlock the journal if we weren't
178          * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail
179          * if it would block taking a journal reservation.
180          *
181          * If that happens, simply skip the key so we can optimistically insert
182          * as many keys as possible in the fast path.
183          */
184         sort(keys, nr, sizeof(keys[0]),
185              btree_write_buffered_key_cmp, NULL);
186
187         for (i = keys; i < keys + nr; i++) {
188                 if (i + 1 < keys + nr &&
189                     i[0].btree == i[1].btree &&
190                     bpos_eq(i[0].k.k.p, i[1].k.k.p)) {
191                         skipped++;
192                         i->journal_seq = 0;
193                         continue;
194                 }
195
196                 if (write_locked &&
197                     (iter.path->btree_id != i->btree ||
198                      bpos_gt(i->k.k.p, iter.path->l[0].b->key.k.p))) {
199                         bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
200                         write_locked = false;
201                 }
202
203                 if (!iter.path || iter.path->btree_id != i->btree) {
204                         bch2_trans_iter_exit(trans, &iter);
205                         bch2_trans_iter_init(trans, &iter, i->btree, i->k.k.p,
206                                              BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
207                 }
208
209                 bch2_btree_iter_set_pos(&iter, i->k.k.p);
210                 iter.path->preserve = false;
211
212                 do {
213                         ret = bch2_btree_write_buffer_flush_one(trans, &iter, i,
214                                                 commit_flags, &write_locked, &fast);
215                         if (!write_locked)
216                                 bch2_trans_begin(trans);
217                 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
218
219                 if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
220                         slowpath++;
221                         continue;
222                 }
223                 if (ret)
224                         break;
225
226                 i->journal_seq = 0;
227         }
228
229         if (write_locked)
230                 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
231         bch2_trans_iter_exit(trans, &iter);
232
233         trace_write_buffer_flush(trans, nr, skipped, fast, wb->size);
234
235         if (slowpath)
236                 goto slowpath;
237
238         bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
239 out:
240         bch2_journal_pin_drop(j, &pin);
241         mutex_unlock(&wb->flush_lock);
242         return ret;
243 slowpath:
244         trace_write_buffer_flush_slowpath(trans, i - keys, nr);
245
246         /*
247          * Now sort the rest by journal seq and bump the journal pin as we go.
248          * The slowpath zapped the seq of keys that were successfully flushed so
249          * we can skip those here.
250          */
251         sort(keys, nr, sizeof(keys[0]),
252              btree_write_buffered_journal_cmp,
253              NULL);
254
255         commit_flags &= ~BCH_WATERMARK_MASK;
256         commit_flags |= BCH_WATERMARK_reclaim;
257
258         for (i = keys; i < keys + nr; i++) {
259                 if (!i->journal_seq)
260                         continue;
261
262                 bch2_journal_pin_update(j, i->journal_seq, &pin,
263                               bch2_btree_write_buffer_journal_flush);
264
265                 ret = commit_do(trans, NULL, NULL,
266                                 commit_flags|
267                                 BCH_TRANS_COMMIT_no_enospc|
268                                 BCH_TRANS_COMMIT_no_journal_res|
269                                 BCH_TRANS_COMMIT_journal_reclaim,
270                                 btree_write_buffered_insert(trans, i));
271                 if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
272                         break;
273         }
274
275         goto out;
276 }
277
278 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
279 {
280         bch2_trans_unlock(trans);
281         mutex_lock(&trans->c->btree_write_buffer.flush_lock);
282         return __bch2_btree_write_buffer_flush(trans, 0, true);
283 }
284
285 int bch2_btree_write_buffer_flush(struct btree_trans *trans)
286 {
287         return __bch2_btree_write_buffer_flush(trans, 0, false);
288 }
289
290 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
291                                 struct journal_entry_pin *_pin, u64 seq)
292 {
293         struct bch_fs *c = container_of(j, struct bch_fs, journal);
294         struct btree_write_buffer *wb = &c->btree_write_buffer;
295
296         mutex_lock(&wb->flush_lock);
297
298         return bch2_trans_run(c,
299                         __bch2_btree_write_buffer_flush(trans, BCH_TRANS_COMMIT_no_check_rw, true));
300 }
301
302 static inline u64 btree_write_buffer_ref(int idx)
303 {
304         return ((union btree_write_buffer_state) {
305                 .ref0 = idx == 0,
306                 .ref1 = idx == 1,
307         }).v;
308 }
309
310 int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
311 {
312         struct bch_fs *c = trans->c;
313         struct btree_write_buffer *wb = &c->btree_write_buffer;
314         struct btree_write_buffered_key *i;
315         union btree_write_buffer_state old, new;
316         int ret = 0;
317         u64 v;
318
319         trans_for_each_wb_update(trans, i) {
320                 EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
321
322                 i->journal_seq          = trans->journal_res.seq;
323                 i->journal_offset       = trans->journal_res.offset;
324         }
325
326         preempt_disable();
327         v = READ_ONCE(wb->state.v);
328         do {
329                 old.v = new.v = v;
330
331                 new.v += btree_write_buffer_ref(new.idx);
332                 new.nr += trans->nr_wb_updates;
333                 if (new.nr > wb->size) {
334                         ret = -BCH_ERR_btree_insert_need_flush_buffer;
335                         goto out;
336                 }
337         } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
338
339         memcpy(wb->keys[new.idx] + old.nr,
340                trans->wb_updates,
341                sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
342
343         bch2_journal_pin_add(&c->journal, trans->journal_res.seq, &wb->journal_pin,
344                              bch2_btree_write_buffer_journal_flush);
345
346         atomic64_sub_return_release(btree_write_buffer_ref(new.idx), &wb->state.counter);
347 out:
348         preempt_enable();
349         return ret;
350 }
351
352 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
353 {
354         struct btree_write_buffer *wb = &c->btree_write_buffer;
355
356         BUG_ON(wb->state.nr && !bch2_journal_error(&c->journal));
357
358         kvfree(wb->keys[1]);
359         kvfree(wb->keys[0]);
360 }
361
362 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
363 {
364         struct btree_write_buffer *wb = &c->btree_write_buffer;
365
366         mutex_init(&wb->flush_lock);
367         wb->size = c->opts.btree_write_buffer_size;
368
369         wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
370         wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
371         if (!wb->keys[0] || !wb->keys[1])
372                 return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init;
373
374         return 0;
375 }