]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_write_buffer.c
Disable pristine-tar option in gbp.conf, since there is no pristine-tar branch.
[bcachefs-tools-debian] / libbcachefs / btree_write_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
8 #include "error.h"
9 #include "journal.h"
10 #include "journal_io.h"
11 #include "journal_reclaim.h"
12
13 #include <linux/prefetch.h>
14
15 static int bch2_btree_write_buffer_journal_flush(struct journal *,
16                                 struct journal_entry_pin *, u64);
17
18 static int bch2_journal_keys_to_write_buffer(struct bch_fs *, struct journal_buf *);
19
20 static inline bool __wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
21 {
22         return (cmp_int(l->hi, r->hi) ?:
23                 cmp_int(l->mi, r->mi) ?:
24                 cmp_int(l->lo, r->lo)) >= 0;
25 }
26
27 static inline bool wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
28 {
29 #ifdef CONFIG_X86_64
30         int cmp;
31
32         asm("mov   (%[l]), %%rax;"
33             "sub   (%[r]), %%rax;"
34             "mov  8(%[l]), %%rax;"
35             "sbb  8(%[r]), %%rax;"
36             "mov 16(%[l]), %%rax;"
37             "sbb 16(%[r]), %%rax;"
38             : "=@ccae" (cmp)
39             : [l] "r" (l), [r] "r" (r)
40             : "rax", "cc");
41
42         EBUG_ON(cmp != __wb_key_ref_cmp(l, r));
43         return cmp;
44 #else
45         return __wb_key_ref_cmp(l, r);
46 #endif
47 }
48
49 /* Compare excluding idx, the low 24 bits: */
50 static inline bool wb_key_eq(const void *_l, const void *_r)
51 {
52         const struct wb_key_ref *l = _l;
53         const struct wb_key_ref *r = _r;
54
55         return !((l->hi ^ r->hi)|
56                  (l->mi ^ r->mi)|
57                  ((l->lo >> 24) ^ (r->lo >> 24)));
58 }
59
60 static noinline void wb_sort(struct wb_key_ref *base, size_t num)
61 {
62         size_t n = num, a = num / 2;
63
64         if (!a)         /* num < 2 || size == 0 */
65                 return;
66
67         for (;;) {
68                 size_t b, c, d;
69
70                 if (a)                  /* Building heap: sift down --a */
71                         --a;
72                 else if (--n)           /* Sorting: Extract root to --n */
73                         swap(base[0], base[n]);
74                 else                    /* Sort complete */
75                         break;
76
77                 /*
78                  * Sift element at "a" down into heap.  This is the
79                  * "bottom-up" variant, which significantly reduces
80                  * calls to cmp_func(): we find the sift-down path all
81                  * the way to the leaves (one compare per level), then
82                  * backtrack to find where to insert the target element.
83                  *
84                  * Because elements tend to sift down close to the leaves,
85                  * this uses fewer compares than doing two per level
86                  * on the way down.  (A bit more than half as many on
87                  * average, 3/4 worst-case.)
88                  */
89                 for (b = a; c = 2*b + 1, (d = c + 1) < n;)
90                         b = wb_key_ref_cmp(base + c, base + d) ? c : d;
91                 if (d == n)             /* Special case last leaf with no sibling */
92                         b = c;
93
94                 /* Now backtrack from "b" to the correct location for "a" */
95                 while (b != a && wb_key_ref_cmp(base + a, base + b))
96                         b = (b - 1) / 2;
97                 c = b;                  /* Where "a" belongs */
98                 while (b != a) {        /* Shift it into place */
99                         b = (b - 1) / 2;
100                         swap(base[b], base[c]);
101                 }
102         }
103 }
104
105 static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
106                                           struct btree_iter *iter,
107                                           struct btree_write_buffered_key *wb)
108 {
109         struct btree_path *path = btree_iter_path(trans, iter);
110
111         bch2_btree_node_unlock_write(trans, path, path->l[0].b);
112
113         trans->journal_res.seq = wb->journal_seq;
114
115         return bch2_trans_update(trans, iter, &wb->k,
116                                  BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
117                 bch2_trans_commit(trans, NULL, NULL,
118                                   BCH_TRANS_COMMIT_no_enospc|
119                                   BCH_TRANS_COMMIT_no_check_rw|
120                                   BCH_TRANS_COMMIT_no_journal_res|
121                                   BCH_TRANS_COMMIT_journal_reclaim);
122 }
123
124 static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *iter,
125                                struct btree_write_buffered_key *wb,
126                                bool *write_locked, size_t *fast)
127 {
128         struct btree_path *path;
129         int ret;
130
131         EBUG_ON(!wb->journal_seq);
132         EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
133         EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
134
135         ret = bch2_btree_iter_traverse(iter);
136         if (ret)
137                 return ret;
138
139         /*
140          * We can't clone a path that has write locks: unshare it now, before
141          * set_pos and traverse():
142          */
143         if (btree_iter_path(trans, iter)->ref > 1)
144                 iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
145
146         path = btree_iter_path(trans, iter);
147
148         if (!*write_locked) {
149                 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
150                 if (ret)
151                         return ret;
152
153                 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
154                 *write_locked = true;
155         }
156
157         if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) {
158                 *write_locked = false;
159                 return wb_flush_one_slowpath(trans, iter, wb);
160         }
161
162         bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
163         (*fast)++;
164         return 0;
165 }
166
167 /*
168  * Update a btree with a write buffered key using the journal seq of the
169  * original write buffer insert.
170  *
171  * It is not safe to rejournal the key once it has been inserted into the write
172  * buffer because that may break recovery ordering. For example, the key may
173  * have already been modified in the active write buffer in a seq that comes
174  * before the current transaction. If we were to journal this key again and
175  * crash, recovery would process updates in the wrong order.
176  */
177 static int
178 btree_write_buffered_insert(struct btree_trans *trans,
179                           struct btree_write_buffered_key *wb)
180 {
181         struct btree_iter iter;
182         int ret;
183
184         bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
185                              BTREE_ITER_CACHED|BTREE_ITER_INTENT);
186
187         trans->journal_res.seq = wb->journal_seq;
188
189         ret   = bch2_btree_iter_traverse(&iter) ?:
190                 bch2_trans_update(trans, &iter, &wb->k,
191                                   BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
192         bch2_trans_iter_exit(trans, &iter);
193         return ret;
194 }
195
196 static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb)
197 {
198         struct bch_fs *c = container_of(wb, struct bch_fs, btree_write_buffer);
199         struct journal *j = &c->journal;
200
201         if (!wb->inc.keys.nr)
202                 return;
203
204         bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin,
205                              bch2_btree_write_buffer_journal_flush);
206
207         darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr));
208         darray_resize(&wb->sorted, wb->flushing.keys.size);
209
210         if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) {
211                 swap(wb->flushing.keys, wb->inc.keys);
212                 goto out;
213         }
214
215         size_t nr = min(darray_room(wb->flushing.keys),
216                         wb->sorted.size - wb->flushing.keys.nr);
217         nr = min(nr, wb->inc.keys.nr);
218
219         memcpy(&darray_top(wb->flushing.keys),
220                wb->inc.keys.data,
221                sizeof(wb->inc.keys.data[0]) * nr);
222
223         memmove(wb->inc.keys.data,
224                 wb->inc.keys.data + nr,
225                sizeof(wb->inc.keys.data[0]) * (wb->inc.keys.nr - nr));
226
227         wb->flushing.keys.nr    += nr;
228         wb->inc.keys.nr         -= nr;
229 out:
230         if (!wb->inc.keys.nr)
231                 bch2_journal_pin_drop(j, &wb->inc.pin);
232         else
233                 bch2_journal_pin_update(j, wb->inc.keys.data[0].journal_seq, &wb->inc.pin,
234                                         bch2_btree_write_buffer_journal_flush);
235
236         if (j->watermark) {
237                 spin_lock(&j->lock);
238                 bch2_journal_set_watermark(j);
239                 spin_unlock(&j->lock);
240         }
241
242         BUG_ON(wb->sorted.size < wb->flushing.keys.nr);
243 }
244
245 static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
246 {
247         struct bch_fs *c = trans->c;
248         struct journal *j = &c->journal;
249         struct btree_write_buffer *wb = &c->btree_write_buffer;
250         struct btree_iter iter = { NULL };
251         size_t skipped = 0, fast = 0, slowpath = 0;
252         bool write_locked = false;
253         int ret = 0;
254
255         bch2_trans_unlock(trans);
256         bch2_trans_begin(trans);
257
258         mutex_lock(&wb->inc.lock);
259         move_keys_from_inc_to_flushing(wb);
260         mutex_unlock(&wb->inc.lock);
261
262         for (size_t i = 0; i < wb->flushing.keys.nr; i++) {
263                 wb->sorted.data[i].idx = i;
264                 wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree;
265                 memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos));
266         }
267         wb->sorted.nr = wb->flushing.keys.nr;
268
269         /*
270          * We first sort so that we can detect and skip redundant updates, and
271          * then we attempt to flush in sorted btree order, as this is most
272          * efficient.
273          *
274          * However, since we're not flushing in the order they appear in the
275          * journal we won't be able to drop our journal pin until everything is
276          * flushed - which means this could deadlock the journal if we weren't
277          * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail
278          * if it would block taking a journal reservation.
279          *
280          * If that happens, simply skip the key so we can optimistically insert
281          * as many keys as possible in the fast path.
282          */
283         wb_sort(wb->sorted.data, wb->sorted.nr);
284
285         darray_for_each(wb->sorted, i) {
286                 struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx];
287
288                 for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++)
289                         prefetch(&wb->flushing.keys.data[n->idx]);
290
291                 BUG_ON(!k->journal_seq);
292
293                 if (i + 1 < &darray_top(wb->sorted) &&
294                     wb_key_eq(i, i + 1)) {
295                         struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx];
296
297                         skipped++;
298                         n->journal_seq = min_t(u64, n->journal_seq, k->journal_seq);
299                         k->journal_seq = 0;
300                         continue;
301                 }
302
303                 if (write_locked) {
304                         struct btree_path *path = btree_iter_path(trans, &iter);
305
306                         if (path->btree_id != i->btree ||
307                             bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
308                                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
309                                 write_locked = false;
310                         }
311                 }
312
313                 if (!iter.path || iter.btree_id != k->btree) {
314                         bch2_trans_iter_exit(trans, &iter);
315                         bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
316                                              BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
317                 }
318
319                 bch2_btree_iter_set_pos(&iter, k->k.k.p);
320                 btree_iter_path(trans, &iter)->preserve = false;
321
322                 do {
323                         if (race_fault()) {
324                                 ret = -BCH_ERR_journal_reclaim_would_deadlock;
325                                 break;
326                         }
327
328                         ret = wb_flush_one(trans, &iter, k, &write_locked, &fast);
329                         if (!write_locked)
330                                 bch2_trans_begin(trans);
331                 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
332
333                 if (!ret) {
334                         k->journal_seq = 0;
335                 } else if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
336                         slowpath++;
337                         ret = 0;
338                 } else
339                         break;
340         }
341
342         if (write_locked) {
343                 struct btree_path *path = btree_iter_path(trans, &iter);
344                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
345         }
346         bch2_trans_iter_exit(trans, &iter);
347
348         if (ret)
349                 goto err;
350
351         if (slowpath) {
352                 /*
353                  * Flush in the order they were present in the journal, so that
354                  * we can release journal pins:
355                  * The fastpath zapped the seq of keys that were successfully flushed so
356                  * we can skip those here.
357                  */
358                 trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
359
360                 darray_for_each(wb->flushing.keys, i) {
361                         if (!i->journal_seq)
362                                 continue;
363
364                         bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin,
365                                                 bch2_btree_write_buffer_journal_flush);
366
367                         bch2_trans_begin(trans);
368
369                         ret = commit_do(trans, NULL, NULL,
370                                         BCH_WATERMARK_reclaim|
371                                         BCH_TRANS_COMMIT_no_check_rw|
372                                         BCH_TRANS_COMMIT_no_enospc|
373                                         BCH_TRANS_COMMIT_no_journal_res|
374                                         BCH_TRANS_COMMIT_journal_reclaim,
375                                         btree_write_buffered_insert(trans, i));
376                         if (ret)
377                                 goto err;
378                 }
379         }
380 err:
381         bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
382         trace_write_buffer_flush(trans, wb->flushing.keys.nr, skipped, fast, 0);
383         bch2_journal_pin_drop(j, &wb->flushing.pin);
384         wb->flushing.keys.nr = 0;
385         return ret;
386 }
387
388 static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq)
389 {
390         struct journal *j = &c->journal;
391         struct journal_buf *buf;
392         int ret = 0;
393
394         while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, seq))) {
395                 ret = bch2_journal_keys_to_write_buffer(c, buf);
396                 mutex_unlock(&j->buf_lock);
397         }
398
399         return ret;
400 }
401
402 static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq)
403 {
404         struct bch_fs *c = trans->c;
405         struct btree_write_buffer *wb = &c->btree_write_buffer;
406         int ret = 0, fetch_from_journal_err;
407
408         do {
409                 bch2_trans_unlock(trans);
410
411                 fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq);
412
413                 /*
414                  * On memory allocation failure, bch2_btree_write_buffer_flush_locked()
415                  * is not guaranteed to empty wb->inc:
416                  */
417                 mutex_lock(&wb->flushing.lock);
418                 ret = bch2_btree_write_buffer_flush_locked(trans);
419                 mutex_unlock(&wb->flushing.lock);
420         } while (!ret &&
421                  (fetch_from_journal_err ||
422                   (wb->inc.pin.seq && wb->inc.pin.seq <= seq) ||
423                   (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq)));
424
425         return ret;
426 }
427
428 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
429                                 struct journal_entry_pin *_pin, u64 seq)
430 {
431         struct bch_fs *c = container_of(j, struct bch_fs, journal);
432
433         return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq));
434 }
435
436 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
437 {
438         struct bch_fs *c = trans->c;
439
440         trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
441
442         return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal));
443 }
444
445 int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
446 {
447         struct bch_fs *c = trans->c;
448         struct btree_write_buffer *wb = &c->btree_write_buffer;
449         int ret = 0;
450
451         if (mutex_trylock(&wb->flushing.lock)) {
452                 ret = bch2_btree_write_buffer_flush_locked(trans);
453                 mutex_unlock(&wb->flushing.lock);
454         }
455
456         return ret;
457 }
458
459 int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
460 {
461         struct bch_fs *c = trans->c;
462
463         if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
464                 return -BCH_ERR_erofs_no_writes;
465
466         int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
467         bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
468         return ret;
469 }
470
471 static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
472 {
473         struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work);
474         struct btree_write_buffer *wb = &c->btree_write_buffer;
475         int ret;
476
477         mutex_lock(&wb->flushing.lock);
478         do {
479                 ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans));
480         } while (!ret && bch2_btree_write_buffer_should_flush(c));
481         mutex_unlock(&wb->flushing.lock);
482
483         bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
484 }
485
486 int bch2_journal_key_to_wb_slowpath(struct bch_fs *c,
487                              struct journal_keys_to_wb *dst,
488                              enum btree_id btree, struct bkey_i *k)
489 {
490         struct btree_write_buffer *wb = &c->btree_write_buffer;
491         int ret;
492 retry:
493         ret = darray_make_room_gfp(&dst->wb->keys, 1, GFP_KERNEL);
494         if (!ret && dst->wb == &wb->flushing)
495                 ret = darray_resize(&wb->sorted, wb->flushing.keys.size);
496
497         if (unlikely(ret)) {
498                 if (dst->wb == &c->btree_write_buffer.flushing) {
499                         mutex_unlock(&dst->wb->lock);
500                         dst->wb = &c->btree_write_buffer.inc;
501                         bch2_journal_pin_add(&c->journal, dst->seq, &dst->wb->pin,
502                                              bch2_btree_write_buffer_journal_flush);
503                         goto retry;
504                 }
505
506                 return ret;
507         }
508
509         dst->room = darray_room(dst->wb->keys);
510         if (dst->wb == &wb->flushing)
511                 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
512         BUG_ON(!dst->room);
513         BUG_ON(!dst->seq);
514
515         struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
516         wb_k->journal_seq       = dst->seq;
517         wb_k->btree             = btree;
518         bkey_copy(&wb_k->k, k);
519         dst->wb->keys.nr++;
520         dst->room--;
521         return 0;
522 }
523
524 void bch2_journal_keys_to_write_buffer_start(struct bch_fs *c, struct journal_keys_to_wb *dst, u64 seq)
525 {
526         struct btree_write_buffer *wb = &c->btree_write_buffer;
527
528         if (mutex_trylock(&wb->flushing.lock)) {
529                 mutex_lock(&wb->inc.lock);
530                 move_keys_from_inc_to_flushing(wb);
531
532                 /*
533                  * Attempt to skip wb->inc, and add keys directly to
534                  * wb->flushing, saving us a copy later:
535                  */
536
537                 if (!wb->inc.keys.nr) {
538                         dst->wb = &wb->flushing;
539                 } else {
540                         mutex_unlock(&wb->flushing.lock);
541                         dst->wb = &wb->inc;
542                 }
543         } else {
544                 mutex_lock(&wb->inc.lock);
545                 dst->wb = &wb->inc;
546         }
547
548         dst->room = darray_room(dst->wb->keys);
549         if (dst->wb == &wb->flushing)
550                 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
551         dst->seq = seq;
552
553         bch2_journal_pin_add(&c->journal, seq, &dst->wb->pin,
554                              bch2_btree_write_buffer_journal_flush);
555 }
556
557 void bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_to_wb *dst)
558 {
559         struct btree_write_buffer *wb = &c->btree_write_buffer;
560
561         if (!dst->wb->keys.nr)
562                 bch2_journal_pin_drop(&c->journal, &dst->wb->pin);
563
564         if (bch2_btree_write_buffer_should_flush(c) &&
565             __bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer) &&
566             !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work))
567                 bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
568
569         if (dst->wb == &wb->flushing)
570                 mutex_unlock(&wb->flushing.lock);
571         mutex_unlock(&wb->inc.lock);
572 }
573
574 static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf)
575 {
576         struct journal_keys_to_wb dst;
577         struct jset_entry *entry;
578         struct bkey_i *k;
579         int ret = 0;
580
581         bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq));
582
583         for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) {
584                 jset_entry_for_each_key(entry, k) {
585                         ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k);
586                         if (ret)
587                                 goto out;
588                 }
589
590                 entry->type = BCH_JSET_ENTRY_btree_keys;
591         }
592
593         spin_lock(&c->journal.lock);
594         buf->need_flush_to_write_buffer = false;
595         spin_unlock(&c->journal.lock);
596 out:
597         bch2_journal_keys_to_write_buffer_end(c, &dst);
598         return ret;
599 }
600
601 static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size)
602 {
603         if (wb->keys.size >= new_size)
604                 return 0;
605
606         if (!mutex_trylock(&wb->lock))
607                 return -EINTR;
608
609         int ret = darray_resize(&wb->keys, new_size);
610         mutex_unlock(&wb->lock);
611         return ret;
612 }
613
614 int bch2_btree_write_buffer_resize(struct bch_fs *c, size_t new_size)
615 {
616         struct btree_write_buffer *wb = &c->btree_write_buffer;
617
618         return wb_keys_resize(&wb->flushing, new_size) ?:
619                 wb_keys_resize(&wb->inc, new_size);
620 }
621
622 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
623 {
624         struct btree_write_buffer *wb = &c->btree_write_buffer;
625
626         BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) &&
627                !bch2_journal_error(&c->journal));
628
629         darray_exit(&wb->sorted);
630         darray_exit(&wb->flushing.keys);
631         darray_exit(&wb->inc.keys);
632 }
633
634 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
635 {
636         struct btree_write_buffer *wb = &c->btree_write_buffer;
637
638         mutex_init(&wb->inc.lock);
639         mutex_init(&wb->flushing.lock);
640         INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work);
641
642         /* Will be resized by journal as needed: */
643         unsigned initial_size = 1 << 16;
644
645         return  darray_make_room(&wb->inc.keys, initial_size) ?:
646                 darray_make_room(&wb->flushing.keys, initial_size) ?:
647                 darray_make_room(&wb->sorted, initial_size);
648 }