1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_locking.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
10 #include "journal_reclaim.h"
12 #include <linux/sort.h>
14 static int btree_write_buffered_key_cmp(const void *_l, const void *_r)
16 const struct btree_write_buffered_key *l = _l;
17 const struct btree_write_buffered_key *r = _r;
19 return cmp_int(l->btree, r->btree) ?:
20 bpos_cmp(l->k.k.p, r->k.k.p) ?:
21 cmp_int(l->journal_seq, r->journal_seq) ?:
22 cmp_int(l->journal_offset, r->journal_offset);
25 static int btree_write_buffered_journal_cmp(const void *_l, const void *_r)
27 const struct btree_write_buffered_key *l = _l;
28 const struct btree_write_buffered_key *r = _r;
30 return cmp_int(l->journal_seq, r->journal_seq);
33 static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
34 struct btree_iter *iter,
35 struct btree_write_buffered_key *wb,
36 unsigned commit_flags,
40 struct bch_fs *c = trans->c;
41 struct btree_path *path;
44 ret = bch2_btree_iter_traverse(iter);
51 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
55 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
59 if (!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s)) {
60 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
61 *write_locked = false;
65 bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
70 * We can't clone a path that has write locks: if the path is
71 * shared, unlock before set_pos(), traverse():
73 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
74 *write_locked = false;
78 return bch2_trans_update_seq(trans, wb->journal_seq, iter, &wb->k, 0) ?:
79 bch2_trans_commit(trans, NULL, NULL,
81 BTREE_INSERT_NOCHECK_RW|
83 BTREE_INSERT_JOURNAL_RECLAIM);
86 static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb)
88 union btree_write_buffer_state old, new;
89 u64 v = READ_ONCE(wb->state.v);
96 } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
98 while (old.idx == 0 ? wb->state.ref0 : wb->state.ref1)
107 * Update a btree with a write buffered key using the journal seq of the
108 * original write buffer insert.
110 * It is not safe to rejournal the key once it has been inserted into the write
111 * buffer because that may break recovery ordering. For example, the key may
112 * have already been modified in the active write buffer in a seq that comes
113 * before the current transaction. If we were to journal this key again and
114 * crash, recovery would process updates in the wrong order.
117 btree_write_buffered_insert(struct btree_trans *trans,
118 struct btree_write_buffered_key *wb)
120 struct btree_iter iter;
123 bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
124 BTREE_ITER_CACHED|BTREE_ITER_INTENT);
126 ret = bch2_btree_iter_traverse(&iter) ?:
127 bch2_trans_update_seq(trans, wb->journal_seq, &iter, &wb->k, 0);
128 bch2_trans_iter_exit(trans, &iter);
132 int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags,
135 struct bch_fs *c = trans->c;
136 struct journal *j = &c->journal;
137 struct btree_write_buffer *wb = &c->btree_write_buffer;
138 struct journal_entry_pin pin;
139 struct btree_write_buffered_key *i, *keys;
140 struct btree_iter iter = { NULL };
141 size_t nr = 0, skipped = 0, fast = 0, slowpath = 0;
142 bool write_locked = false;
143 union btree_write_buffer_state s;
146 memset(&pin, 0, sizeof(pin));
148 if (!locked && !mutex_trylock(&wb->flush_lock))
151 bch2_journal_pin_copy(j, &pin, &wb->journal_pin, NULL);
152 bch2_journal_pin_drop(j, &wb->journal_pin);
154 s = btree_write_buffer_switch(wb);
155 keys = wb->keys[s.idx];
162 * We first sort so that we can detect and skip redundant updates, and
163 * then we attempt to flush in sorted btree order, as this is most
166 * However, since we're not flushing in the order they appear in the
167 * journal we won't be able to drop our journal pin until everything is
168 * flushed - which means this could deadlock the journal if we weren't
169 * passing BTREE_INSERT_JOURNAL_RECLAIM. This causes the update to fail
170 * if it would block taking a journal reservation.
172 * If that happens, simply skip the key so we can optimistically insert
173 * as many keys as possible in the fast path.
175 sort(keys, nr, sizeof(keys[0]),
176 btree_write_buffered_key_cmp, NULL);
178 for (i = keys; i < keys + nr; i++) {
179 if (i + 1 < keys + nr &&
180 i[0].btree == i[1].btree &&
181 bpos_eq(i[0].k.k.p, i[1].k.k.p)) {
188 (iter.path->btree_id != i->btree ||
189 bpos_gt(i->k.k.p, iter.path->l[0].b->key.k.p))) {
190 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
191 write_locked = false;
194 if (!iter.path || iter.path->btree_id != i->btree) {
195 bch2_trans_iter_exit(trans, &iter);
196 bch2_trans_iter_init(trans, &iter, i->btree, i->k.k.p, BTREE_ITER_INTENT);
199 bch2_btree_iter_set_pos(&iter, i->k.k.p);
200 iter.path->preserve = false;
203 ret = bch2_btree_write_buffer_flush_one(trans, &iter, i,
204 commit_flags, &write_locked, &fast);
206 bch2_trans_begin(trans);
207 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
209 if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
220 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
221 bch2_trans_iter_exit(trans, &iter);
223 trace_write_buffer_flush(trans, nr, skipped, fast, wb->size);
228 bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
230 bch2_journal_pin_drop(j, &pin);
231 mutex_unlock(&wb->flush_lock);
234 trace_write_buffer_flush_slowpath(trans, i - keys, nr);
237 * Now sort the rest by journal seq and bump the journal pin as we go.
238 * The slowpath zapped the seq of keys that were successfully flushed so
239 * we can skip those here.
241 sort(keys, nr, sizeof(keys[0]),
242 btree_write_buffered_journal_cmp,
245 commit_flags &= ~BCH_WATERMARK_MASK;
246 commit_flags |= BCH_WATERMARK_reclaim;
248 for (i = keys; i < keys + nr; i++) {
252 if (i->journal_seq > pin.seq) {
253 struct journal_entry_pin pin2;
255 memset(&pin2, 0, sizeof(pin2));
257 bch2_journal_pin_add(j, i->journal_seq, &pin2, NULL);
258 bch2_journal_pin_drop(j, &pin);
259 bch2_journal_pin_copy(j, &pin, &pin2, NULL);
260 bch2_journal_pin_drop(j, &pin2);
263 ret = commit_do(trans, NULL, NULL,
266 BTREE_INSERT_JOURNAL_RECLAIM,
267 btree_write_buffered_insert(trans, i));
268 if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
275 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
277 bch2_trans_unlock(trans);
278 mutex_lock(&trans->c->btree_write_buffer.flush_lock);
279 return __bch2_btree_write_buffer_flush(trans, 0, true);
282 int bch2_btree_write_buffer_flush(struct btree_trans *trans)
284 return __bch2_btree_write_buffer_flush(trans, 0, false);
287 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
288 struct journal_entry_pin *_pin, u64 seq)
290 struct bch_fs *c = container_of(j, struct bch_fs, journal);
291 struct btree_write_buffer *wb = &c->btree_write_buffer;
293 mutex_lock(&wb->flush_lock);
295 return bch2_trans_run(c,
296 __bch2_btree_write_buffer_flush(&trans, BTREE_INSERT_NOCHECK_RW, true));
299 static inline u64 btree_write_buffer_ref(int idx)
301 return ((union btree_write_buffer_state) {
307 int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
309 struct bch_fs *c = trans->c;
310 struct btree_write_buffer *wb = &c->btree_write_buffer;
311 struct btree_write_buffered_key *i;
312 union btree_write_buffer_state old, new;
316 trans_for_each_wb_update(trans, i) {
317 EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
319 i->journal_seq = trans->journal_res.seq;
320 i->journal_offset = trans->journal_res.offset;
324 v = READ_ONCE(wb->state.v);
328 new.v += btree_write_buffer_ref(new.idx);
329 new.nr += trans->nr_wb_updates;
330 if (new.nr > wb->size) {
331 ret = -BCH_ERR_btree_insert_need_flush_buffer;
334 } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
336 memcpy(wb->keys[new.idx] + old.nr,
338 sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
340 bch2_journal_pin_add(&c->journal, trans->journal_res.seq, &wb->journal_pin,
341 bch2_btree_write_buffer_journal_flush);
343 atomic64_sub_return_release(btree_write_buffer_ref(new.idx), &wb->state.counter);
349 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
351 struct btree_write_buffer *wb = &c->btree_write_buffer;
353 BUG_ON(wb->state.nr && !bch2_journal_error(&c->journal));
359 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
361 struct btree_write_buffer *wb = &c->btree_write_buffer;
363 mutex_init(&wb->flush_lock);
364 wb->size = c->opts.btree_write_buffer_size;
366 wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
367 wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
368 if (!wb->keys[0] || !wb->keys[1])
369 return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init;