1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_locking.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
10 #include "journal_reclaim.h"
12 #include <linux/sort.h>
14 static int btree_write_buffered_key_cmp(const void *_l, const void *_r)
16 const struct btree_write_buffered_key *l = _l;
17 const struct btree_write_buffered_key *r = _r;
19 return cmp_int(l->btree, r->btree) ?:
20 bpos_cmp(l->k.k.p, r->k.k.p) ?:
21 cmp_int(l->journal_seq, r->journal_seq) ?:
22 cmp_int(l->journal_offset, r->journal_offset);
25 static int btree_write_buffered_journal_cmp(const void *_l, const void *_r)
27 const struct btree_write_buffered_key *l = _l;
28 const struct btree_write_buffered_key *r = _r;
30 return cmp_int(l->journal_seq, r->journal_seq);
33 static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
34 struct btree_iter *iter,
35 struct btree_write_buffered_key *wb,
36 unsigned commit_flags,
40 struct bch_fs *c = trans->c;
41 struct btree_path *path;
44 ret = bch2_btree_iter_traverse(iter);
51 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
55 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
59 if (!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s)) {
60 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
61 *write_locked = false;
65 bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
70 * We can't clone a path that has write locks: if the path is
71 * shared, unlock before set_pos(), traverse():
73 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
74 *write_locked = false;
78 return bch2_trans_update(trans, iter, &wb->k, 0) ?:
79 bch2_trans_commit(trans, NULL, NULL,
82 BTREE_INSERT_JOURNAL_RECLAIM);
85 static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb)
87 union btree_write_buffer_state old, new;
88 u64 v = READ_ONCE(wb->state.v);
95 } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
97 while (old.idx == 0 ? wb->state.ref0 : wb->state.ref1)
105 int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags,
108 struct bch_fs *c = trans->c;
109 struct journal *j = &c->journal;
110 struct btree_write_buffer *wb = &c->btree_write_buffer;
111 struct journal_entry_pin pin;
112 struct btree_write_buffered_key *i, *keys;
113 struct btree_iter iter = { NULL };
114 size_t nr = 0, skipped = 0, fast = 0, slowpath = 0;
115 bool write_locked = false;
116 union btree_write_buffer_state s;
119 memset(&pin, 0, sizeof(pin));
121 if (!locked && !mutex_trylock(&wb->flush_lock))
124 bch2_journal_pin_copy(j, &pin, &wb->journal_pin, NULL);
125 bch2_journal_pin_drop(j, &wb->journal_pin);
127 s = btree_write_buffer_switch(wb);
128 keys = wb->keys[s.idx];
132 * We first sort so that we can detect and skip redundant updates, and
133 * then we attempt to flush in sorted btree order, as this is most
136 * However, since we're not flushing in the order they appear in the
137 * journal we won't be able to drop our journal pin until everything is
138 * flushed - which means this could deadlock the journal if we weren't
139 * passing BTREE_INSERT_JOURNAL_RECLAIM. This causes the update to fail
140 * if it would block taking a journal reservation.
142 * If that happens, simply skip the key so we can optimistically insert
143 * as many keys as possible in the fast path.
145 sort(keys, nr, sizeof(keys[0]),
146 btree_write_buffered_key_cmp, NULL);
148 for (i = keys; i < keys + nr; i++) {
149 if (i + 1 < keys + nr &&
150 i[0].btree == i[1].btree &&
151 bpos_eq(i[0].k.k.p, i[1].k.k.p)) {
158 (iter.path->btree_id != i->btree ||
159 bpos_gt(i->k.k.p, iter.path->l[0].b->key.k.p))) {
160 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
161 write_locked = false;
164 if (!iter.path || iter.path->btree_id != i->btree) {
165 bch2_trans_iter_exit(trans, &iter);
166 bch2_trans_iter_init(trans, &iter, i->btree, i->k.k.p, BTREE_ITER_INTENT);
169 bch2_btree_iter_set_pos(&iter, i->k.k.p);
170 iter.path->preserve = false;
173 ret = bch2_btree_write_buffer_flush_one(trans, &iter, i,
174 commit_flags, &write_locked, &fast);
176 bch2_trans_begin(trans);
177 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
179 if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
190 bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
191 bch2_trans_iter_exit(trans, &iter);
193 trace_write_buffer_flush(trans, nr, skipped, fast, wb->size);
198 bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
200 bch2_journal_pin_drop(j, &pin);
201 mutex_unlock(&wb->flush_lock);
204 trace_write_buffer_flush_slowpath(trans, i - keys, nr);
207 * Now sort the rest by journal seq and bump the journal pin as we go.
208 * The slowpath zapped the seq of keys that were successfully flushed so
209 * we can skip those here.
211 sort(keys, nr, sizeof(keys[0]),
212 btree_write_buffered_journal_cmp,
215 for (i = keys; i < keys + nr; i++) {
219 if (i->journal_seq > pin.seq) {
220 struct journal_entry_pin pin2;
222 memset(&pin2, 0, sizeof(pin2));
224 bch2_journal_pin_add(j, i->journal_seq, &pin2, NULL);
225 bch2_journal_pin_drop(j, &pin);
226 bch2_journal_pin_copy(j, &pin, &pin2, NULL);
227 bch2_journal_pin_drop(j, &pin2);
230 ret = commit_do(trans, NULL, NULL,
233 BTREE_INSERT_JOURNAL_RECLAIM|
234 JOURNAL_WATERMARK_reserved,
235 __bch2_btree_insert(trans, i->btree, &i->k, 0));
236 if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
243 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
245 bch2_trans_unlock(trans);
246 mutex_lock(&trans->c->btree_write_buffer.flush_lock);
247 return __bch2_btree_write_buffer_flush(trans, 0, true);
250 int bch2_btree_write_buffer_flush(struct btree_trans *trans)
252 return __bch2_btree_write_buffer_flush(trans, 0, false);
255 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
256 struct journal_entry_pin *_pin, u64 seq)
258 struct bch_fs *c = container_of(j, struct bch_fs, journal);
259 struct btree_write_buffer *wb = &c->btree_write_buffer;
261 mutex_lock(&wb->flush_lock);
263 return bch2_trans_run(c,
264 __bch2_btree_write_buffer_flush(&trans, BTREE_INSERT_NOCHECK_RW, true));
267 static inline u64 btree_write_buffer_ref(int idx)
269 return ((union btree_write_buffer_state) {
275 int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
277 struct bch_fs *c = trans->c;
278 struct btree_write_buffer *wb = &c->btree_write_buffer;
279 struct btree_write_buffered_key *i;
280 union btree_write_buffer_state old, new;
284 trans_for_each_wb_update(trans, i) {
285 EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
287 i->journal_seq = trans->journal_res.seq;
288 i->journal_offset = trans->journal_res.offset;
292 v = READ_ONCE(wb->state.v);
296 new.v += btree_write_buffer_ref(new.idx);
297 new.nr += trans->nr_wb_updates;
298 if (new.nr > wb->size) {
299 ret = -BCH_ERR_btree_insert_need_flush_buffer;
302 } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
304 memcpy(wb->keys[new.idx] + old.nr,
306 sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
308 bch2_journal_pin_add(&c->journal, trans->journal_res.seq, &wb->journal_pin,
309 bch2_btree_write_buffer_journal_flush);
311 atomic64_sub_return_release(btree_write_buffer_ref(new.idx), &wb->state.counter);
317 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
319 struct btree_write_buffer *wb = &c->btree_write_buffer;
321 BUG_ON(wb->state.nr && !bch2_journal_error(&c->journal));
327 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
329 struct btree_write_buffer *wb = &c->btree_write_buffer;
331 mutex_init(&wb->flush_lock);
332 wb->size = c->opts.btree_write_buffer_size;
334 wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
335 wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
336 if (!wb->keys[0] || !wb->keys[1])
337 return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init;