1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_locking.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
10 #include "journal_reclaim.h"
12 #include <linux/sort.h>
14 static int bch2_btree_write_buffer_journal_flush(struct journal *,
15 struct journal_entry_pin *, u64);
17 static int btree_write_buffered_key_cmp(const void *_l, const void *_r)
19 const struct btree_write_buffered_key *l = _l;
20 const struct btree_write_buffered_key *r = _r;
22 return cmp_int(l->btree, r->btree) ?:
23 bpos_cmp(l->k.k.p, r->k.k.p) ?:
24 cmp_int(l->journal_seq, r->journal_seq) ?:
25 cmp_int(l->journal_offset, r->journal_offset);
28 static int btree_write_buffered_journal_cmp(const void *_l, const void *_r)
30 const struct btree_write_buffered_key *l = _l;
31 const struct btree_write_buffered_key *r = _r;
33 return cmp_int(l->journal_seq, r->journal_seq);
36 static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
37 struct btree_iter *iter,
38 struct btree_write_buffered_key *wb,
39 unsigned commit_flags,
43 struct bch_fs *c = trans->c;
44 struct btree_path *path;
47 ret = bch2_btree_iter_traverse(iter);
52 * We can't clone a path that has write locks: unshare it now, before
53 * set_pos and traverse():
55 if (iter->path->ref > 1)
56 iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
61 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
65 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
69 if (!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s)) {
70 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
71 *write_locked = false;
75 bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
79 trans->journal_res.seq = wb->journal_seq;
81 return bch2_trans_update(trans, iter, &wb->k,
82 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
83 bch2_trans_commit(trans, NULL, NULL,
85 BCH_TRANS_COMMIT_no_check_rw|
86 BCH_TRANS_COMMIT_no_enospc|
87 BCH_TRANS_COMMIT_no_journal_res|
88 BCH_TRANS_COMMIT_journal_reclaim);
91 static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb)
93 union btree_write_buffer_state old, new;
94 u64 v = READ_ONCE(wb->state.v);
101 } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
103 while (old.idx == 0 ? wb->state.ref0 : wb->state.ref1)
112 * Update a btree with a write buffered key using the journal seq of the
113 * original write buffer insert.
115 * It is not safe to rejournal the key once it has been inserted into the write
116 * buffer because that may break recovery ordering. For example, the key may
117 * have already been modified in the active write buffer in a seq that comes
118 * before the current transaction. If we were to journal this key again and
119 * crash, recovery would process updates in the wrong order.
122 btree_write_buffered_insert(struct btree_trans *trans,
123 struct btree_write_buffered_key *wb)
125 struct btree_iter iter;
128 bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
129 BTREE_ITER_CACHED|BTREE_ITER_INTENT);
131 trans->journal_res.seq = wb->journal_seq;
133 ret = bch2_btree_iter_traverse(&iter) ?:
134 bch2_trans_update(trans, &iter, &wb->k,
135 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
136 bch2_trans_iter_exit(trans, &iter);
140 int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
142 struct bch_fs *c = trans->c;
143 struct journal *j = &c->journal;
144 struct btree_write_buffer *wb = &c->btree_write_buffer;
145 struct journal_entry_pin pin;
146 struct btree_write_buffered_key *i, *keys;
147 struct btree_iter iter = { NULL };
148 size_t nr = 0, skipped = 0, fast = 0, slowpath = 0;
149 bool write_locked = false;
150 union btree_write_buffer_state s;
153 memset(&pin, 0, sizeof(pin));
155 bch2_journal_pin_copy(j, &pin, &wb->journal_pin,
156 bch2_btree_write_buffer_journal_flush);
157 bch2_journal_pin_drop(j, &wb->journal_pin);
159 s = btree_write_buffer_switch(wb);
160 keys = wb->keys[s.idx];
167 * We first sort so that we can detect and skip redundant updates, and
168 * then we attempt to flush in sorted btree order, as this is most
171 * However, since we're not flushing in the order they appear in the
172 * journal we won't be able to drop our journal pin until everything is
173 * flushed - which means this could deadlock the journal if we weren't
174 * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail
175 * if it would block taking a journal reservation.
177 * If that happens, simply skip the key so we can optimistically insert
178 * as many keys as possible in the fast path.
180 sort(keys, nr, sizeof(keys[0]),
181 btree_write_buffered_key_cmp, NULL);
183 for (i = keys; i < keys + nr; i++) {
184 if (i + 1 < keys + nr &&
185 i[0].btree == i[1].btree &&
186 bpos_eq(i[0].k.k.p, i[1].k.k.p)) {
193 (iter.path->btree_id != i->btree ||
194 bpos_gt(i->k.k.p, iter.path->l[0].b->key.k.p))) {
195 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
196 write_locked = false;
199 if (!iter.path || iter.path->btree_id != i->btree) {
200 bch2_trans_iter_exit(trans, &iter);
201 bch2_trans_iter_init(trans, &iter, i->btree, i->k.k.p,
202 BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
205 bch2_btree_iter_set_pos(&iter, i->k.k.p);
206 iter.path->preserve = false;
209 ret = bch2_btree_write_buffer_flush_one(trans, &iter, i, 0,
210 &write_locked, &fast);
212 bch2_trans_begin(trans);
213 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
215 if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
226 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
227 bch2_trans_iter_exit(trans, &iter);
229 trace_write_buffer_flush(trans, nr, skipped, fast, wb->size);
234 bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
236 bch2_journal_pin_drop(j, &pin);
239 trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, nr);
242 * Now sort the rest by journal seq and bump the journal pin as we go.
243 * The slowpath zapped the seq of keys that were successfully flushed so
244 * we can skip those here.
246 sort(keys, nr, sizeof(keys[0]),
247 btree_write_buffered_journal_cmp,
250 for (i = keys; i < keys + nr; i++) {
254 bch2_journal_pin_update(j, i->journal_seq, &pin,
255 bch2_btree_write_buffer_journal_flush);
257 ret = commit_do(trans, NULL, NULL,
258 BCH_WATERMARK_reclaim|
259 BCH_TRANS_COMMIT_no_check_rw|
260 BCH_TRANS_COMMIT_no_enospc|
261 BCH_TRANS_COMMIT_no_journal_res|
262 BCH_TRANS_COMMIT_journal_reclaim,
263 btree_write_buffered_insert(trans, i));
264 if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
271 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
273 struct bch_fs *c = trans->c;
275 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
276 return -BCH_ERR_erofs_no_writes;
278 trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
280 bch2_trans_unlock(trans);
281 mutex_lock(&c->btree_write_buffer.flush_lock);
282 int ret = bch2_btree_write_buffer_flush_locked(trans);
283 mutex_unlock(&c->btree_write_buffer.flush_lock);
284 bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
288 int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
290 struct bch_fs *c = trans->c;
291 struct btree_write_buffer *wb = &c->btree_write_buffer;
294 if (mutex_trylock(&wb->flush_lock)) {
295 ret = bch2_btree_write_buffer_flush_locked(trans);
296 mutex_unlock(&wb->flush_lock);
302 int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
304 struct bch_fs *c = trans->c;
306 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
307 return -BCH_ERR_erofs_no_writes;
309 int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
310 bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
314 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
315 struct journal_entry_pin *_pin, u64 seq)
317 struct bch_fs *c = container_of(j, struct bch_fs, journal);
318 struct btree_write_buffer *wb = &c->btree_write_buffer;
320 mutex_lock(&wb->flush_lock);
321 int ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans));
322 mutex_unlock(&wb->flush_lock);
327 static inline u64 btree_write_buffer_ref(int idx)
329 return ((union btree_write_buffer_state) {
335 int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
337 struct bch_fs *c = trans->c;
338 struct btree_write_buffer *wb = &c->btree_write_buffer;
339 struct btree_write_buffered_key *i;
340 union btree_write_buffer_state old, new;
344 trans_for_each_wb_update(trans, i) {
345 EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
347 i->journal_seq = trans->journal_res.seq;
348 i->journal_offset = trans->journal_res.offset;
352 v = READ_ONCE(wb->state.v);
356 new.v += btree_write_buffer_ref(new.idx);
357 new.nr += trans->nr_wb_updates;
358 if (new.nr > wb->size) {
359 ret = -BCH_ERR_btree_insert_need_flush_buffer;
362 } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
364 memcpy(wb->keys[new.idx] + old.nr,
366 sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
368 bch2_journal_pin_add(&c->journal, trans->journal_res.seq, &wb->journal_pin,
369 bch2_btree_write_buffer_journal_flush);
371 atomic64_sub_return_release(btree_write_buffer_ref(new.idx), &wb->state.counter);
377 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
379 struct btree_write_buffer *wb = &c->btree_write_buffer;
381 BUG_ON(wb->state.nr && !bch2_journal_error(&c->journal));
387 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
389 struct btree_write_buffer *wb = &c->btree_write_buffer;
391 mutex_init(&wb->flush_lock);
392 wb->size = c->opts.btree_write_buffer_size;
394 wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
395 wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
396 if (!wb->keys[0] || !wb->keys[1])
397 return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init;