]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_write_buffer.c
Update bcachefs sources to 8e1519ccb6 bcachefs: Add tracepoint & counter for btree...
[bcachefs-tools-debian] / libbcachefs / btree_write_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
8 #include "error.h"
9 #include "journal.h"
10 #include "journal_reclaim.h"
11
12 #include <linux/sort.h>
13
14 static int btree_write_buffered_key_cmp(const void *_l, const void *_r)
15 {
16         const struct btree_write_buffered_key *l = _l;
17         const struct btree_write_buffered_key *r = _r;
18
19         return  cmp_int(l->btree, r->btree) ?:
20                 bpos_cmp(l->k.k.p, r->k.k.p) ?:
21                 cmp_int(l->journal_seq, r->journal_seq) ?:
22                 cmp_int(l->journal_offset, r->journal_offset);
23 }
24
25 static int btree_write_buffered_journal_cmp(const void *_l, const void *_r)
26 {
27         const struct btree_write_buffered_key *l = _l;
28         const struct btree_write_buffered_key *r = _r;
29
30         return  cmp_int(l->journal_seq, r->journal_seq);
31 }
32
33 static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
34                                              struct btree_iter *iter,
35                                              struct btree_write_buffered_key *wb,
36                                              unsigned commit_flags,
37                                              bool *write_locked,
38                                              size_t *fast)
39 {
40         struct bch_fs *c = trans->c;
41         struct btree_path *path;
42         int ret;
43
44         ret = bch2_btree_iter_traverse(iter);
45         if (ret)
46                 return ret;
47
48         path = iter->path;
49
50         if (!*write_locked) {
51                 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
52                 if (ret)
53                         return ret;
54
55                 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
56                 *write_locked = true;
57         }
58
59         if (!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s)) {
60                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
61                 *write_locked = false;
62                 goto trans_commit;
63         }
64
65         bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
66         (*fast)++;
67         return 0;
68 trans_commit:
69         return  bch2_trans_update(trans, iter, &wb->k, 0) ?:
70                 bch2_trans_commit(trans, NULL, NULL,
71                                   commit_flags|
72                                   BTREE_INSERT_NOFAIL|
73                                   BTREE_INSERT_JOURNAL_RECLAIM);
74 }
75
76 static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb)
77 {
78         union btree_write_buffer_state old, new;
79         u64 v = READ_ONCE(wb->state.v);
80
81         do {
82                 old.v = new.v = v;
83
84                 new.nr = 0;
85                 new.idx++;
86         } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
87
88         while (old.idx == 0 ? wb->state.ref0 : wb->state.ref1)
89                 cpu_relax();
90
91         smp_mb();
92
93         return old;
94 }
95
96 int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags,
97                                     bool locked)
98 {
99         struct bch_fs *c = trans->c;
100         struct journal *j = &c->journal;
101         struct btree_write_buffer *wb = &c->btree_write_buffer;
102         struct journal_entry_pin pin;
103         struct btree_write_buffered_key *i, *dst, *keys;
104         struct btree_iter iter = { NULL };
105         size_t nr = 0, skipped = 0, fast = 0;
106         bool write_locked = false;
107         union btree_write_buffer_state s;
108         int ret = 0;
109
110         memset(&pin, 0, sizeof(pin));
111
112         if (!locked && !mutex_trylock(&wb->flush_lock))
113                 return 0;
114
115         bch2_journal_pin_copy(j, &pin, &wb->journal_pin, NULL);
116         bch2_journal_pin_drop(j, &wb->journal_pin);
117
118         s = btree_write_buffer_switch(wb);
119         keys = wb->keys[s.idx];
120         nr = s.nr;
121
122         /*
123          * We first sort so that we can detect and skip redundant updates, and
124          * then we attempt to flush in sorted btree order, as this is most
125          * efficient.
126          *
127          * However, since we're not flushing in the order they appear in the
128          * journal we won't be able to drop our journal pin until everything is
129          * flushed - which means this could deadlock the journal, if we weren't
130          * passing BTREE_INSERT_JORUNAL_RECLAIM. This causes the update to fail
131          * if it would block taking a journal reservation.
132          *
133          * If that happens, we sort them by the order they appeared in the
134          * journal - after dropping redundant entries - and then restart
135          * flushing, this time dropping journal pins as we go.
136          */
137
138         sort(keys, nr, sizeof(keys[0]),
139              btree_write_buffered_key_cmp, NULL);
140
141         for (i = keys; i < keys + nr; i++) {
142                 if (i + 1 < keys + nr &&
143                     i[0].btree == i[1].btree &&
144                     bpos_eq(i[0].k.k.p, i[1].k.k.p)) {
145                         skipped++;
146                         continue;
147                 }
148
149                 if (write_locked &&
150                     (iter.path->btree_id != i->btree ||
151                      bpos_gt(i->k.k.p, iter.path->l[0].b->key.k.p))) {
152                         bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
153                         write_locked = false;
154                 }
155
156                 if (!iter.path || iter.path->btree_id != i->btree) {
157                         bch2_trans_iter_exit(trans, &iter);
158                         bch2_trans_iter_init(trans, &iter, i->btree, i->k.k.p, BTREE_ITER_INTENT);
159                 }
160
161                 bch2_btree_iter_set_pos(&iter, i->k.k.p);
162                 iter.path->preserve = false;
163
164                 do {
165                         ret = bch2_btree_write_buffer_flush_one(trans, &iter, i,
166                                                 commit_flags, &write_locked, &fast);
167                         if (!write_locked)
168                                 bch2_trans_begin(trans);
169                 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
170
171                 if (ret)
172                         break;
173         }
174
175         if (write_locked)
176                 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
177         bch2_trans_iter_exit(trans, &iter);
178
179         trace_write_buffer_flush(trans, nr, skipped, fast, wb->size);
180
181         if (ret == -BCH_ERR_journal_reclaim_would_deadlock)
182                 goto slowpath;
183
184         bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
185 out:
186         bch2_journal_pin_drop(j, &pin);
187         mutex_unlock(&wb->flush_lock);
188         return ret;
189 slowpath:
190         trace_write_buffer_flush_slowpath(trans, i - keys, nr);
191
192         dst = keys;
193         for (; i < keys + nr; i++) {
194                 if (i + 1 < keys + nr &&
195                     i[0].btree == i[1].btree &&
196                     bpos_eq(i[0].k.k.p, i[1].k.k.p))
197                         continue;
198
199                 *dst = *i;
200                 dst++;
201         }
202         nr = dst - keys;
203
204         sort(keys, nr, sizeof(keys[0]),
205              btree_write_buffered_journal_cmp,
206              NULL);
207
208         for (i = keys; i < keys + nr; i++) {
209                 if (i->journal_seq > pin.seq) {
210                         struct journal_entry_pin pin2;
211
212                         memset(&pin2, 0, sizeof(pin2));
213
214                         bch2_journal_pin_add(j, i->journal_seq, &pin2, NULL);
215                         bch2_journal_pin_drop(j, &pin);
216                         bch2_journal_pin_copy(j, &pin, &pin2, NULL);
217                         bch2_journal_pin_drop(j, &pin2);
218                 }
219
220                 ret = commit_do(trans, NULL, NULL,
221                                 commit_flags|
222                                 BTREE_INSERT_NOFAIL|
223                                 BTREE_INSERT_JOURNAL_RECLAIM|
224                                 JOURNAL_WATERMARK_reserved,
225                                 __bch2_btree_insert(trans, i->btree, &i->k, 0));
226                 if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
227                         break;
228         }
229
230         goto out;
231 }
232
233 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
234 {
235         bch2_trans_unlock(trans);
236         mutex_lock(&trans->c->btree_write_buffer.flush_lock);
237         return __bch2_btree_write_buffer_flush(trans, 0, true);
238 }
239
240 int bch2_btree_write_buffer_flush(struct btree_trans *trans)
241 {
242         return __bch2_btree_write_buffer_flush(trans, 0, false);
243 }
244
245 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
246                                 struct journal_entry_pin *_pin, u64 seq)
247 {
248         struct bch_fs *c = container_of(j, struct bch_fs, journal);
249         struct btree_write_buffer *wb = &c->btree_write_buffer;
250
251         mutex_lock(&wb->flush_lock);
252
253         return bch2_trans_run(c,
254                         __bch2_btree_write_buffer_flush(&trans, BTREE_INSERT_NOCHECK_RW, true));
255 }
256
257 static inline u64 btree_write_buffer_ref(int idx)
258 {
259         return ((union btree_write_buffer_state) {
260                 .ref0 = idx == 0,
261                 .ref1 = idx == 1,
262         }).v;
263 }
264
265 int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
266 {
267         struct bch_fs *c = trans->c;
268         struct btree_write_buffer *wb = &c->btree_write_buffer;
269         struct btree_write_buffered_key *i;
270         union btree_write_buffer_state old, new;
271         int ret = 0;
272         u64 v;
273
274         trans_for_each_wb_update(trans, i) {
275                 EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
276
277                 i->journal_seq          = trans->journal_res.seq;
278                 i->journal_offset       = trans->journal_res.offset;
279         }
280
281         preempt_disable();
282         v = READ_ONCE(wb->state.v);
283         do {
284                 old.v = new.v = v;
285
286                 new.v += btree_write_buffer_ref(new.idx);
287                 new.nr += trans->nr_wb_updates;
288                 if (new.nr > wb->size) {
289                         ret = -BCH_ERR_btree_insert_need_flush_buffer;
290                         goto out;
291                 }
292         } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
293
294         memcpy(wb->keys[new.idx] + old.nr,
295                trans->wb_updates,
296                sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
297
298         bch2_journal_pin_add(&c->journal, trans->journal_res.seq, &wb->journal_pin,
299                              bch2_btree_write_buffer_journal_flush);
300
301         atomic64_sub_return_release(btree_write_buffer_ref(new.idx), &wb->state.counter);
302 out:
303         preempt_enable();
304         return ret;
305 }
306
307 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
308 {
309         struct btree_write_buffer *wb = &c->btree_write_buffer;
310
311         BUG_ON(wb->state.nr && !bch2_journal_error(&c->journal));
312
313         kvfree(wb->keys[1]);
314         kvfree(wb->keys[0]);
315 }
316
317 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
318 {
319         struct btree_write_buffer *wb = &c->btree_write_buffer;
320
321         mutex_init(&wb->flush_lock);
322         wb->size = c->opts.btree_write_buffer_size;
323
324         wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
325         wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
326         if (!wb->keys[0] || !wb->keys[1])
327                 return -ENOMEM;
328
329         return 0;
330 }