]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_write_buffer.c
btree_write_buffer: ensure atomic64_sub_return_release availability
[bcachefs-tools-debian] / libbcachefs / btree_write_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
8 #include "error.h"
9 #include "journal.h"
10 #include "journal_reclaim.h"
11
12 #include <linux/atomic.h>
13 #include <linux/sort.h>
14
15 static int btree_write_buffered_key_cmp(const void *_l, const void *_r)
16 {
17         const struct btree_write_buffered_key *l = _l;
18         const struct btree_write_buffered_key *r = _r;
19
20         return  cmp_int(l->btree, r->btree) ?:
21                 bpos_cmp(l->k.k.p, r->k.k.p) ?:
22                 cmp_int(l->journal_seq, r->journal_seq) ?:
23                 cmp_int(l->journal_offset, r->journal_offset);
24 }
25
26 static int btree_write_buffered_journal_cmp(const void *_l, const void *_r)
27 {
28         const struct btree_write_buffered_key *l = _l;
29         const struct btree_write_buffered_key *r = _r;
30
31         return  cmp_int(l->journal_seq, r->journal_seq);
32 }
33
34 static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
35                                              struct btree_iter *iter,
36                                              struct btree_write_buffered_key *wb,
37                                              unsigned commit_flags,
38                                              bool *write_locked,
39                                              size_t *fast)
40 {
41         struct bch_fs *c = trans->c;
42         struct btree_path *path;
43         int ret;
44
45         ret = bch2_btree_iter_traverse(iter);
46         if (ret)
47                 return ret;
48
49         path = iter->path;
50
51         if (!*write_locked) {
52                 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
53                 if (ret)
54                         return ret;
55
56                 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
57                 *write_locked = true;
58         }
59
60         if (!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s)) {
61                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
62                 *write_locked = false;
63                 goto trans_commit;
64         }
65
66         bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
67         (*fast)++;
68
69         if (path->ref > 1) {
70                 /*
71                  * We can't clone a path that has write locks: if the path is
72                  * shared, unlock before set_pos(), traverse():
73                  */
74                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
75                 *write_locked = false;
76         }
77         return 0;
78 trans_commit:
79         return  bch2_trans_update_seq(trans, wb->journal_seq, iter, &wb->k,
80                                       BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
81                 bch2_trans_commit(trans, NULL, NULL,
82                                   commit_flags|
83                                   BTREE_INSERT_NOCHECK_RW|
84                                   BTREE_INSERT_NOFAIL|
85                                   BTREE_INSERT_JOURNAL_RECLAIM);
86 }
87
88 static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb)
89 {
90         union btree_write_buffer_state old, new;
91         u64 v = READ_ONCE(wb->state.v);
92
93         do {
94                 old.v = new.v = v;
95
96                 new.nr = 0;
97                 new.idx++;
98         } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
99
100         while (old.idx == 0 ? wb->state.ref0 : wb->state.ref1)
101                 cpu_relax();
102
103         smp_mb();
104
105         return old;
106 }
107
108 /*
109  * Update a btree with a write buffered key using the journal seq of the
110  * original write buffer insert.
111  *
112  * It is not safe to rejournal the key once it has been inserted into the write
113  * buffer because that may break recovery ordering. For example, the key may
114  * have already been modified in the active write buffer in a seq that comes
115  * before the current transaction. If we were to journal this key again and
116  * crash, recovery would process updates in the wrong order.
117  */
118 static int
119 btree_write_buffered_insert(struct btree_trans *trans,
120                           struct btree_write_buffered_key *wb)
121 {
122         struct btree_iter iter;
123         int ret;
124
125         bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
126                              BTREE_ITER_CACHED|BTREE_ITER_INTENT);
127
128         ret   = bch2_btree_iter_traverse(&iter) ?:
129                 bch2_trans_update_seq(trans, wb->journal_seq, &iter, &wb->k,
130                                       BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
131         bch2_trans_iter_exit(trans, &iter);
132         return ret;
133 }
134
135 int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags,
136                                     bool locked)
137 {
138         struct bch_fs *c = trans->c;
139         struct journal *j = &c->journal;
140         struct btree_write_buffer *wb = &c->btree_write_buffer;
141         struct journal_entry_pin pin;
142         struct btree_write_buffered_key *i, *keys;
143         struct btree_iter iter = { NULL };
144         size_t nr = 0, skipped = 0, fast = 0, slowpath = 0;
145         bool write_locked = false;
146         union btree_write_buffer_state s;
147         int ret = 0;
148
149         memset(&pin, 0, sizeof(pin));
150
151         if (!locked && !mutex_trylock(&wb->flush_lock))
152                 return 0;
153
154         bch2_journal_pin_copy(j, &pin, &wb->journal_pin, NULL);
155         bch2_journal_pin_drop(j, &wb->journal_pin);
156
157         s = btree_write_buffer_switch(wb);
158         keys = wb->keys[s.idx];
159         nr = s.nr;
160
161         if (race_fault())
162                 goto slowpath;
163
164         /*
165          * We first sort so that we can detect and skip redundant updates, and
166          * then we attempt to flush in sorted btree order, as this is most
167          * efficient.
168          *
169          * However, since we're not flushing in the order they appear in the
170          * journal we won't be able to drop our journal pin until everything is
171          * flushed - which means this could deadlock the journal if we weren't
172          * passing BTREE_INSERT_JOURNAL_RECLAIM. This causes the update to fail
173          * if it would block taking a journal reservation.
174          *
175          * If that happens, simply skip the key so we can optimistically insert
176          * as many keys as possible in the fast path.
177          */
178         sort(keys, nr, sizeof(keys[0]),
179              btree_write_buffered_key_cmp, NULL);
180
181         for (i = keys; i < keys + nr; i++) {
182                 if (i + 1 < keys + nr &&
183                     i[0].btree == i[1].btree &&
184                     bpos_eq(i[0].k.k.p, i[1].k.k.p)) {
185                         skipped++;
186                         i->journal_seq = 0;
187                         continue;
188                 }
189
190                 if (write_locked &&
191                     (iter.path->btree_id != i->btree ||
192                      bpos_gt(i->k.k.p, iter.path->l[0].b->key.k.p))) {
193                         bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
194                         write_locked = false;
195                 }
196
197                 if (!iter.path || iter.path->btree_id != i->btree) {
198                         bch2_trans_iter_exit(trans, &iter);
199                         bch2_trans_iter_init(trans, &iter, i->btree, i->k.k.p,
200                                              BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
201                 }
202
203                 bch2_btree_iter_set_pos(&iter, i->k.k.p);
204                 iter.path->preserve = false;
205
206                 do {
207                         ret = bch2_btree_write_buffer_flush_one(trans, &iter, i,
208                                                 commit_flags, &write_locked, &fast);
209                         if (!write_locked)
210                                 bch2_trans_begin(trans);
211                 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
212
213                 if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
214                         slowpath++;
215                         continue;
216                 }
217                 if (ret)
218                         break;
219
220                 i->journal_seq = 0;
221         }
222
223         if (write_locked)
224                 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
225         bch2_trans_iter_exit(trans, &iter);
226
227         trace_write_buffer_flush(trans, nr, skipped, fast, wb->size);
228
229         if (slowpath)
230                 goto slowpath;
231
232         bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
233 out:
234         bch2_journal_pin_drop(j, &pin);
235         mutex_unlock(&wb->flush_lock);
236         return ret;
237 slowpath:
238         trace_write_buffer_flush_slowpath(trans, i - keys, nr);
239
240         /*
241          * Now sort the rest by journal seq and bump the journal pin as we go.
242          * The slowpath zapped the seq of keys that were successfully flushed so
243          * we can skip those here.
244          */
245         sort(keys, nr, sizeof(keys[0]),
246              btree_write_buffered_journal_cmp,
247              NULL);
248
249         commit_flags &= ~BCH_WATERMARK_MASK;
250         commit_flags |= BCH_WATERMARK_reclaim;
251
252         for (i = keys; i < keys + nr; i++) {
253                 if (!i->journal_seq)
254                         continue;
255
256                 if (i->journal_seq > pin.seq) {
257                         struct journal_entry_pin pin2;
258
259                         memset(&pin2, 0, sizeof(pin2));
260
261                         bch2_journal_pin_add(j, i->journal_seq, &pin2, NULL);
262                         bch2_journal_pin_drop(j, &pin);
263                         bch2_journal_pin_copy(j, &pin, &pin2, NULL);
264                         bch2_journal_pin_drop(j, &pin2);
265                 }
266
267                 ret = commit_do(trans, NULL, NULL,
268                                 commit_flags|
269                                 BTREE_INSERT_NOFAIL|
270                                 BTREE_INSERT_JOURNAL_RECLAIM,
271                                 btree_write_buffered_insert(trans, i));
272                 if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
273                         break;
274         }
275
276         goto out;
277 }
278
279 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
280 {
281         bch2_trans_unlock(trans);
282         mutex_lock(&trans->c->btree_write_buffer.flush_lock);
283         return __bch2_btree_write_buffer_flush(trans, 0, true);
284 }
285
286 int bch2_btree_write_buffer_flush(struct btree_trans *trans)
287 {
288         return __bch2_btree_write_buffer_flush(trans, 0, false);
289 }
290
291 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
292                                 struct journal_entry_pin *_pin, u64 seq)
293 {
294         struct bch_fs *c = container_of(j, struct bch_fs, journal);
295         struct btree_write_buffer *wb = &c->btree_write_buffer;
296
297         mutex_lock(&wb->flush_lock);
298
299         return bch2_trans_run(c,
300                         __bch2_btree_write_buffer_flush(trans, BTREE_INSERT_NOCHECK_RW, true));
301 }
302
303 static inline u64 btree_write_buffer_ref(int idx)
304 {
305         return ((union btree_write_buffer_state) {
306                 .ref0 = idx == 0,
307                 .ref1 = idx == 1,
308         }).v;
309 }
310
311 int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
312 {
313         struct bch_fs *c = trans->c;
314         struct btree_write_buffer *wb = &c->btree_write_buffer;
315         struct btree_write_buffered_key *i;
316         union btree_write_buffer_state old, new;
317         int ret = 0;
318         u64 v;
319
320         trans_for_each_wb_update(trans, i) {
321                 EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
322
323                 i->journal_seq          = trans->journal_res.seq;
324                 i->journal_offset       = trans->journal_res.offset;
325         }
326
327         preempt_disable();
328         v = READ_ONCE(wb->state.v);
329         do {
330                 old.v = new.v = v;
331
332                 new.v += btree_write_buffer_ref(new.idx);
333                 new.nr += trans->nr_wb_updates;
334                 if (new.nr > wb->size) {
335                         ret = -BCH_ERR_btree_insert_need_flush_buffer;
336                         goto out;
337                 }
338         } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
339
340         memcpy(wb->keys[new.idx] + old.nr,
341                trans->wb_updates,
342                sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
343
344         bch2_journal_pin_add(&c->journal, trans->journal_res.seq, &wb->journal_pin,
345                              bch2_btree_write_buffer_journal_flush);
346
347         atomic64_sub_return_release(btree_write_buffer_ref(new.idx), &wb->state.counter);
348 out:
349         preempt_enable();
350         return ret;
351 }
352
353 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
354 {
355         struct btree_write_buffer *wb = &c->btree_write_buffer;
356
357         BUG_ON(wb->state.nr && !bch2_journal_error(&c->journal));
358
359         kvfree(wb->keys[1]);
360         kvfree(wb->keys[0]);
361 }
362
363 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
364 {
365         struct btree_write_buffer *wb = &c->btree_write_buffer;
366
367         mutex_init(&wb->flush_lock);
368         wb->size = c->opts.btree_write_buffer_size;
369
370         wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
371         wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
372         if (!wb->keys[0] || !wb->keys[1])
373                 return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init;
374
375         return 0;
376 }