1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_locking.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
10 #include "journal_io.h"
11 #include "journal_reclaim.h"
13 #include <linux/prefetch.h>
15 static int bch2_btree_write_buffer_journal_flush(struct journal *,
16 struct journal_entry_pin *, u64);
18 static int bch2_journal_keys_to_write_buffer(struct bch_fs *, struct journal_buf *);
20 static inline bool __wb_key_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
22 return (cmp_int(l->hi, r->hi) ?:
23 cmp_int(l->mi, r->mi) ?:
24 cmp_int(l->lo, r->lo)) >= 0;
27 static inline bool wb_key_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
32 asm(".intel_syntax noprefix;"
35 "mov rax, [%[l] + 8];"
36 "sbb rax, [%[r] + 8];"
37 "mov rax, [%[l] + 16];"
38 "sbb rax, [%[r] + 16];"
41 : [l] "r" (l), [r] "r" (r)
44 EBUG_ON(cmp != __wb_key_cmp(l, r));
47 return __wb_key_cmp(l, r);
51 /* Compare excluding idx, the low 24 bits: */
52 static inline bool wb_key_eq(const void *_l, const void *_r)
54 const struct wb_key_ref *l = _l;
55 const struct wb_key_ref *r = _r;
57 return !((l->hi ^ r->hi)|
59 ((l->lo >> 24) ^ (r->lo >> 24)));
62 static noinline void wb_sort(struct wb_key_ref *base, size_t num)
64 size_t n = num, a = num / 2;
66 if (!a) /* num < 2 || size == 0 */
72 if (a) /* Building heap: sift down --a */
74 else if (--n) /* Sorting: Extract root to --n */
75 swap(base[0], base[n]);
76 else /* Sort complete */
80 * Sift element at "a" down into heap. This is the
81 * "bottom-up" variant, which significantly reduces
82 * calls to cmp_func(): we find the sift-down path all
83 * the way to the leaves (one compare per level), then
84 * backtrack to find where to insert the target element.
86 * Because elements tend to sift down close to the leaves,
87 * this uses fewer compares than doing two per level
88 * on the way down. (A bit more than half as many on
89 * average, 3/4 worst-case.)
91 for (b = a; c = 2*b + 1, (d = c + 1) < n;)
92 b = wb_key_cmp(base + c, base + d) ? c : d;
93 if (d == n) /* Special case last leaf with no sibling */
96 /* Now backtrack from "b" to the correct location for "a" */
97 while (b != a && wb_key_cmp(base + a, base + b))
99 c = b; /* Where "a" belongs */
100 while (b != a) { /* Shift it into place */
102 swap(base[b], base[c]);
107 static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
108 struct btree_iter *iter,
109 struct btree_write_buffered_key *wb)
111 bch2_btree_node_unlock_write(trans, iter->path, iter->path->l[0].b);
113 trans->journal_res.seq = wb->journal_seq;
115 return bch2_trans_update(trans, iter, &wb->k,
116 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
117 bch2_trans_commit(trans, NULL, NULL,
118 BCH_TRANS_COMMIT_no_enospc|
119 BCH_TRANS_COMMIT_no_check_rw|
120 BCH_TRANS_COMMIT_no_journal_res|
121 BCH_TRANS_COMMIT_journal_reclaim);
124 static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *iter,
125 struct btree_write_buffered_key *wb,
126 bool *write_locked, size_t *fast)
128 struct bch_fs *c = trans->c;
129 struct btree_path *path;
132 EBUG_ON(!wb->journal_seq);
133 EBUG_ON(!c->btree_write_buffer.flushing.pin.seq);
134 EBUG_ON(c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
136 ret = bch2_btree_iter_traverse(iter);
141 * We can't clone a path that has write locks: unshare it now, before
142 * set_pos and traverse():
144 if (iter->path->ref > 1)
145 iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
149 if (!*write_locked) {
150 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
154 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
155 *write_locked = true;
158 if (unlikely(!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s))) {
159 *write_locked = false;
160 return wb_flush_one_slowpath(trans, iter, wb);
163 bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
169 * Update a btree with a write buffered key using the journal seq of the
170 * original write buffer insert.
172 * It is not safe to rejournal the key once it has been inserted into the write
173 * buffer because that may break recovery ordering. For example, the key may
174 * have already been modified in the active write buffer in a seq that comes
175 * before the current transaction. If we were to journal this key again and
176 * crash, recovery would process updates in the wrong order.
179 btree_write_buffered_insert(struct btree_trans *trans,
180 struct btree_write_buffered_key *wb)
182 struct btree_iter iter;
185 bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
186 BTREE_ITER_CACHED|BTREE_ITER_INTENT);
188 trans->journal_res.seq = wb->journal_seq;
190 ret = bch2_btree_iter_traverse(&iter) ?:
191 bch2_trans_update(trans, &iter, &wb->k,
192 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
193 bch2_trans_iter_exit(trans, &iter);
197 static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb)
199 struct bch_fs *c = container_of(wb, struct bch_fs, btree_write_buffer);
200 struct journal *j = &c->journal;
202 if (!wb->inc.keys.nr)
205 bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin,
206 bch2_btree_write_buffer_journal_flush);
208 darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr));
209 darray_resize(&wb->sorted, wb->flushing.keys.size);
211 if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) {
212 swap(wb->flushing.keys, wb->inc.keys);
216 size_t nr = min(darray_room(wb->flushing.keys),
217 wb->sorted.size - wb->flushing.keys.nr);
218 nr = min(nr, wb->inc.keys.nr);
220 memcpy(&darray_top(wb->flushing.keys),
222 sizeof(wb->inc.keys.data[0]) * nr);
224 memmove(wb->inc.keys.data,
225 wb->inc.keys.data + nr,
226 sizeof(wb->inc.keys.data[0]) * (wb->inc.keys.nr - nr));
228 wb->flushing.keys.nr += nr;
229 wb->inc.keys.nr -= nr;
231 if (!wb->inc.keys.nr)
232 bch2_journal_pin_drop(j, &wb->inc.pin);
234 bch2_journal_pin_update(j, wb->inc.keys.data[0].journal_seq, &wb->inc.pin,
235 bch2_btree_write_buffer_journal_flush);
239 bch2_journal_set_watermark(j);
240 spin_unlock(&j->lock);
243 BUG_ON(wb->sorted.size < wb->flushing.keys.nr);
246 static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
248 struct bch_fs *c = trans->c;
249 struct journal *j = &c->journal;
250 struct btree_write_buffer *wb = &c->btree_write_buffer;
251 struct wb_key_ref *i;
252 struct btree_iter iter = { NULL };
253 size_t skipped = 0, fast = 0, slowpath = 0;
254 bool write_locked = false;
257 bch2_trans_unlock(trans);
258 bch2_trans_begin(trans);
260 mutex_lock(&wb->inc.lock);
261 move_keys_from_inc_to_flushing(wb);
262 mutex_unlock(&wb->inc.lock);
264 for (size_t i = 0; i < wb->flushing.keys.nr; i++) {
265 wb->sorted.data[i].idx = i;
266 wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree;
267 memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos));
269 wb->sorted.nr = wb->flushing.keys.nr;
272 * We first sort so that we can detect and skip redundant updates, and
273 * then we attempt to flush in sorted btree order, as this is most
276 * However, since we're not flushing in the order they appear in the
277 * journal we won't be able to drop our journal pin until everything is
278 * flushed - which means this could deadlock the journal if we weren't
279 * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail
280 * if it would block taking a journal reservation.
282 * If that happens, simply skip the key so we can optimistically insert
283 * as many keys as possible in the fast path.
285 wb_sort(wb->sorted.data, wb->sorted.nr);
287 darray_for_each(wb->sorted, i) {
288 struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx];
290 for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++)
291 prefetch(&wb->flushing.keys.data[n->idx]);
293 BUG_ON(!k->journal_seq);
295 if (i + 1 < &darray_top(wb->sorted) &&
296 wb_key_eq(i, i + 1)) {
297 struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx];
300 n->journal_seq = min_t(u64, n->journal_seq, k->journal_seq);;
306 (iter.path->btree_id != k->btree ||
307 bpos_gt(k->k.k.p, iter.path->l[0].b->key.k.p))) {
308 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
309 write_locked = false;
312 if (!iter.path || iter.path->btree_id != k->btree) {
313 bch2_trans_iter_exit(trans, &iter);
314 bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
315 BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
318 bch2_btree_iter_set_pos(&iter, k->k.k.p);
319 iter.path->preserve = false;
323 ret = -BCH_ERR_journal_reclaim_would_deadlock;
327 ret = wb_flush_one(trans, &iter, k, &write_locked, &fast);
329 bch2_trans_begin(trans);
330 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
334 } else if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
342 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
343 bch2_trans_iter_exit(trans, &iter);
350 * Flush in the order they were present in the journal, so that
351 * we can release journal pins:
352 * The fastpath zapped the seq of keys that were successfully flushed so
353 * we can skip those here.
355 trace_write_buffer_flush_slowpath(trans, slowpath, wb->flushing.keys.nr);
357 struct btree_write_buffered_key *i;
358 darray_for_each(wb->flushing.keys, i) {
362 bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin,
363 bch2_btree_write_buffer_journal_flush);
365 bch2_trans_begin(trans);
367 ret = commit_do(trans, NULL, NULL,
368 BCH_WATERMARK_reclaim|
369 BCH_TRANS_COMMIT_no_check_rw|
370 BCH_TRANS_COMMIT_no_enospc|
371 BCH_TRANS_COMMIT_no_journal_res|
372 BCH_TRANS_COMMIT_journal_reclaim,
373 btree_write_buffered_insert(trans, i));
379 bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
380 trace_write_buffer_flush(trans, wb->flushing.keys.nr, skipped, fast, 0);
381 bch2_journal_pin_drop(j, &wb->flushing.pin);
382 wb->flushing.keys.nr = 0;
386 static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq)
388 struct journal *j = &c->journal;
389 struct journal_buf *buf;
392 mutex_lock(&j->buf_lock);
393 while ((buf = bch2_next_write_buffer_flush_journal_buf(j, seq)))
394 if (bch2_journal_keys_to_write_buffer(c, buf)) {
398 mutex_unlock(&j->buf_lock);
403 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
405 struct bch_fs *c = trans->c;
406 struct btree_write_buffer *wb = &c->btree_write_buffer;
407 int ret = 0, fetch_from_journal_err;
409 trace_write_buffer_flush_sync(trans, _RET_IP_);
411 bch2_trans_unlock(trans);
413 bch2_journal_block_reservations(&c->journal);
414 fetch_from_journal_err = fetch_wb_keys_from_journal(c, U64_MAX);
415 bch2_journal_unblock(&c->journal);
418 * On memory allocation failure, bch2_btree_write_buffer_flush_locked()
419 * is not guaranteed to empty wb->inc:
421 mutex_lock(&wb->flushing.lock);
423 (wb->flushing.keys.nr || wb->inc.keys.nr))
424 ret = bch2_btree_write_buffer_flush_locked(trans);
425 mutex_unlock(&wb->flushing.lock);
427 if (!ret && fetch_from_journal_err)
433 int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
435 struct bch_fs *c = trans->c;
436 struct btree_write_buffer *wb = &c->btree_write_buffer;
439 if (mutex_trylock(&wb->flushing.lock)) {
440 ret = bch2_btree_write_buffer_flush_locked(trans);
441 mutex_unlock(&wb->flushing.lock);
447 int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
449 struct bch_fs *c = trans->c;
451 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
452 return -BCH_ERR_erofs_no_writes;
454 int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
455 bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
459 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
460 struct journal_entry_pin *_pin, u64 seq)
462 struct bch_fs *c = container_of(j, struct bch_fs, journal);
463 struct btree_write_buffer *wb = &c->btree_write_buffer;
464 int ret, fetch_from_journal_err;
467 fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq);
469 mutex_lock(&wb->flushing.lock);
470 ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans));
471 mutex_unlock(&wb->flushing.lock);
473 (fetch_from_journal_err ||
474 (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq) ||
475 (wb->inc.pin.seq && wb->inc.pin.seq <= seq)));
480 static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
482 struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work);
483 struct btree_write_buffer *wb = &c->btree_write_buffer;
486 mutex_lock(&wb->flushing.lock);
488 ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans));
489 } while (!ret && bch2_btree_write_buffer_should_flush(c));
490 mutex_unlock(&wb->flushing.lock);
492 bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
495 int __bch2_journal_key_to_wb(struct bch_fs *c,
496 struct journal_keys_to_wb *dst,
497 enum btree_id btree, struct bkey_i *k)
499 struct btree_write_buffer *wb = &c->btree_write_buffer;
502 ret = darray_make_room_gfp(&dst->wb->keys, 1, GFP_KERNEL);
503 if (!ret && dst->wb == &wb->flushing)
504 ret = darray_resize(&wb->sorted, wb->flushing.keys.size);
507 if (dst->wb == &c->btree_write_buffer.flushing) {
508 mutex_unlock(&dst->wb->lock);
509 dst->wb = &c->btree_write_buffer.inc;
510 bch2_journal_pin_add(&c->journal, dst->seq, &dst->wb->pin,
511 bch2_btree_write_buffer_journal_flush);
518 dst->room = darray_room(dst->wb->keys);
519 if (dst->wb == &wb->flushing)
520 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
524 struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
525 wb_k->journal_seq = dst->seq;
527 bkey_copy(&wb_k->k, k);
533 void bch2_journal_keys_to_write_buffer_start(struct bch_fs *c, struct journal_keys_to_wb *dst, u64 seq)
535 struct btree_write_buffer *wb = &c->btree_write_buffer;
537 if (mutex_trylock(&wb->flushing.lock)) {
538 mutex_lock(&wb->inc.lock);
539 move_keys_from_inc_to_flushing(wb);
542 * Attempt to skip wb->inc, and add keys directly to
543 * wb->flushing, saving us a copy later:
546 if (!wb->inc.keys.nr) {
547 dst->wb = &wb->flushing;
549 mutex_unlock(&wb->flushing.lock);
553 mutex_lock(&wb->inc.lock);
557 dst->room = darray_room(dst->wb->keys);
558 if (dst->wb == &wb->flushing)
559 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
562 bch2_journal_pin_add(&c->journal, seq, &dst->wb->pin,
563 bch2_btree_write_buffer_journal_flush);
566 void bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_to_wb *dst)
568 struct btree_write_buffer *wb = &c->btree_write_buffer;
570 if (!dst->wb->keys.nr)
571 bch2_journal_pin_drop(&c->journal, &dst->wb->pin);
573 if (bch2_btree_write_buffer_should_flush(c) &&
574 __bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer) &&
575 !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work))
576 bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
578 if (dst->wb == &wb->flushing)
579 mutex_unlock(&wb->flushing.lock);
580 mutex_unlock(&wb->inc.lock);
583 static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf)
585 struct journal_keys_to_wb dst;
586 struct jset_entry *entry;
590 bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq));
592 for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) {
593 jset_entry_for_each_key(entry, k) {
594 ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k);
599 entry->type = BCH_JSET_ENTRY_btree_keys;
602 buf->need_flush_to_write_buffer = false;
604 bch2_journal_keys_to_write_buffer_end(c, &dst);
608 static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size)
610 if (wb->keys.size >= new_size)
613 if (!mutex_trylock(&wb->lock))
616 int ret = darray_resize(&wb->keys, new_size);
617 mutex_unlock(&wb->lock);
621 int bch2_btree_write_buffer_resize(struct bch_fs *c, size_t new_size)
623 struct btree_write_buffer *wb = &c->btree_write_buffer;
625 return wb_keys_resize(&wb->flushing, new_size) ?:
626 wb_keys_resize(&wb->inc, new_size);
629 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
631 struct btree_write_buffer *wb = &c->btree_write_buffer;
633 BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) &&
634 !bch2_journal_error(&c->journal));
636 darray_exit(&wb->sorted);
637 darray_exit(&wb->flushing.keys);
638 darray_exit(&wb->inc.keys);
641 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
643 struct btree_write_buffer *wb = &c->btree_write_buffer;
645 mutex_init(&wb->inc.lock);
646 mutex_init(&wb->flushing.lock);
647 INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work);
649 /* Will be resized by journal as needed: */
650 unsigned initial_size = 1 << 16;
652 return darray_make_room(&wb->inc.keys, initial_size) ?:
653 darray_make_room(&wb->flushing.keys, initial_size) ?:
654 darray_make_room(&wb->sorted, initial_size);