3 #include "btree_update.h"
4 #include "btree_update_interior.h"
6 #include "btree_iter.h"
7 #include "btree_locking.h"
11 #include "journal_reclaim.h"
14 #include <linux/sort.h>
15 #include <trace/events/bcachefs.h>
17 /* Inserting into a given leaf node (last stage of insert): */
19 /* Handle overwrites and do insert, for non extents: */
20 bool bch2_btree_bset_insert_key(struct btree_iter *iter,
22 struct btree_node_iter *node_iter,
23 struct bkey_i *insert)
25 const struct bkey_format *f = &b->format;
26 struct bkey_packed *k;
28 unsigned clobber_u64s;
30 EBUG_ON(btree_node_just_written(b));
31 EBUG_ON(bset_written(b, btree_bset_last(b)));
32 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
33 EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
34 bkey_cmp(insert->k.p, b->data->max_key) > 0);
36 k = bch2_btree_node_iter_peek_all(node_iter, b);
37 if (k && !bkey_cmp_packed(b, k, &insert->k)) {
38 BUG_ON(bkey_whiteout(k));
40 t = bch2_bkey_to_bset(b, k);
42 if (bset_unwritten(b, bset(b, t)) &&
43 bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
44 !bkey_whiteout(&insert->k)) {
45 k->type = insert->k.type;
46 memcpy_u64s(bkeyp_val(f, k), &insert->v,
47 bkey_val_u64s(&insert->k));
51 insert->k.needs_whiteout = k->needs_whiteout;
53 btree_keys_account_key_drop(&b->nr, t - b->set, k);
55 if (t == bset_tree_last(b)) {
56 clobber_u64s = k->u64s;
59 * If we're deleting, and the key we're deleting doesn't
60 * need a whiteout (it wasn't overwriting a key that had
61 * been written to disk) - just delete it:
63 if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
64 bch2_bset_delete(b, k, clobber_u64s);
65 bch2_btree_node_iter_fix(iter, b, node_iter, t,
73 k->type = KEY_TYPE_DELETED;
74 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
77 if (bkey_whiteout(&insert->k)) {
78 reserve_whiteout(b, t, k);
81 k->needs_whiteout = false;
85 * Deleting, but the key to delete wasn't found - nothing to do:
87 if (bkey_whiteout(&insert->k))
90 insert->k.needs_whiteout = false;
93 t = bset_tree_last(b);
94 k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
97 bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
98 if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
99 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
100 clobber_u64s, k->u64s);
104 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
107 struct bch_fs *c = container_of(j, struct bch_fs, journal);
108 struct btree_write *w = container_of(pin, struct btree_write, journal);
109 struct btree *b = container_of(w, struct btree, writes[i]);
111 btree_node_lock_type(c, b, SIX_LOCK_read);
112 bch2_btree_node_write_cond(c, b,
113 (btree_current_write(b) == w &&
114 w->journal.pin_list == journal_seq_pin(j, seq)));
115 six_unlock_read(&b->lock);
118 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
120 return __btree_node_flush(j, pin, 0, seq);
123 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
125 return __btree_node_flush(j, pin, 1, seq);
128 void bch2_btree_journal_key(struct btree_insert *trans,
129 struct btree_iter *iter,
130 struct bkey_i *insert)
132 struct bch_fs *c = trans->c;
133 struct journal *j = &c->journal;
134 struct btree *b = iter->l[0].b;
135 struct btree_write *w = btree_current_write(b);
137 EBUG_ON(iter->level || b->level);
138 EBUG_ON(trans->journal_res.ref !=
139 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
141 if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
142 u64 seq = trans->journal_res.seq;
143 bool needs_whiteout = insert->k.needs_whiteout;
146 insert->k.needs_whiteout = false;
147 bch2_journal_add_keys(j, &trans->journal_res,
148 iter->btree_id, insert);
149 insert->k.needs_whiteout = needs_whiteout;
151 bch2_journal_set_has_inode(j, &trans->journal_res,
154 if (trans->journal_seq)
155 *trans->journal_seq = seq;
156 btree_bset_last(b)->journal_seq = cpu_to_le64(seq);
159 if (unlikely(!journal_pin_active(&w->journal))) {
160 u64 seq = likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
161 ? trans->journal_res.seq
162 : j->replay_journal_seq;
164 bch2_journal_pin_add(j, seq, &w->journal,
165 btree_node_write_idx(b) == 0
167 : btree_node_flush1);
170 if (unlikely(!btree_node_dirty(b)))
171 set_btree_node_dirty(b);
174 static enum btree_insert_ret
175 bch2_insert_fixup_key(struct btree_insert *trans,
176 struct btree_insert_entry *insert)
178 struct btree_iter *iter = insert->iter;
179 struct btree_iter_level *l = &iter->l[0];
181 EBUG_ON(iter->level);
182 EBUG_ON(insert->k->k.u64s >
183 bch_btree_keys_u64s_remaining(trans->c, l->b));
185 if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
187 bch2_btree_journal_key(trans, iter, insert->k);
189 trans->did_work = true;
190 return BTREE_INSERT_OK;
194 * btree_insert_key - insert a key one key into a leaf node
196 static enum btree_insert_ret
197 btree_insert_key_leaf(struct btree_insert *trans,
198 struct btree_insert_entry *insert)
200 struct bch_fs *c = trans->c;
201 struct btree_iter *iter = insert->iter;
202 struct btree *b = iter->l[0].b;
203 enum btree_insert_ret ret;
204 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
205 int old_live_u64s = b->nr.live_u64s;
206 int live_u64s_added, u64s_added;
208 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
210 ret = !btree_node_is_extents(b)
211 ? bch2_insert_fixup_key(trans, insert)
212 : bch2_insert_fixup_extent(trans, insert);
214 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
215 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
217 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
218 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
219 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
220 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
222 if (u64s_added > live_u64s_added &&
223 bch2_maybe_compact_whiteouts(c, b))
224 bch2_btree_iter_reinit_node(iter, b);
226 trace_btree_insert_key(c, b, insert->k);
230 #define trans_for_each_entry(trans, i) \
231 for ((i) = (trans)->entries; (i) < (trans)->entries + (trans)->nr; (i)++)
234 * We sort transaction entries so that if multiple iterators point to the same
235 * leaf node they'll be adjacent:
237 static bool same_leaf_as_prev(struct btree_insert *trans,
238 struct btree_insert_entry *i)
240 return i != trans->entries &&
241 i[0].iter->l[0].b == i[-1].iter->l[0].b;
244 static inline struct btree_insert_entry *trans_next_leaf(struct btree_insert *trans,
245 struct btree_insert_entry *i)
247 struct btree *b = i->iter->l[0].b;
251 } while (i < trans->entries + trans->nr && b == i->iter->l[0].b);
256 #define trans_for_each_leaf(trans, i) \
257 for ((i) = (trans)->entries; \
258 (i) < (trans)->entries + (trans)->nr; \
259 (i) = trans_next_leaf(trans, i))
261 inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
262 struct btree_iter *iter)
264 bch2_btree_node_lock_write(b, iter);
266 if (btree_node_just_written(b) &&
267 bch2_btree_post_write_cleanup(c, b))
268 bch2_btree_iter_reinit_node(iter, b);
271 * If the last bset has been written, or if it's gotten too big - start
272 * a new bset to insert into:
274 if (want_new_bset(c, b))
275 bch2_btree_init_next(c, b, iter);
278 static void multi_lock_write(struct bch_fs *c, struct btree_insert *trans)
280 struct btree_insert_entry *i;
282 trans_for_each_leaf(trans, i)
283 bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
286 static void multi_unlock_write(struct btree_insert *trans)
288 struct btree_insert_entry *i;
290 trans_for_each_leaf(trans, i)
291 bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
294 static inline int btree_trans_cmp(struct btree_insert_entry l,
295 struct btree_insert_entry r)
297 return btree_iter_cmp(l.iter, r.iter);
300 /* Normal update interface: */
303 * Get journal reservation, take write locks, and attempt to do btree update(s):
305 static inline int do_btree_insert_at(struct btree_insert *trans,
306 struct btree_iter **split,
309 struct bch_fs *c = trans->c;
310 struct btree_insert_entry *i;
314 trans_for_each_entry(trans, i)
318 trans_for_each_entry(trans, i)
319 u64s += jset_u64s(i->k->k.u64s + i->extra_res);
321 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
323 ret = !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)
324 ? bch2_journal_res_get(&c->journal,
331 multi_lock_write(c, trans);
339 trans_for_each_entry(trans, i) {
340 /* Multiple inserts might go to same leaf: */
341 if (!same_leaf_as_prev(trans, i))
345 * bch2_btree_node_insert_fits() must be called under write lock:
346 * with only an intent lock, another thread can still call
347 * bch2_btree_node_write(), converting an unwritten bset to a
350 u64s += i->k->k.u64s + i->extra_res;
351 if (!bch2_btree_node_insert_fits(c,
352 i->iter->l[0].b, u64s)) {
359 if (journal_seq_verify(c) &&
360 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
361 trans_for_each_entry(trans, i)
362 i->k->k.version.lo = trans->journal_res.seq;
364 trans_for_each_entry(trans, i) {
365 switch (btree_insert_key_leaf(trans, i)) {
366 case BTREE_INSERT_OK:
369 case BTREE_INSERT_JOURNAL_RES_FULL:
370 case BTREE_INSERT_NEED_TRAVERSE:
371 case BTREE_INSERT_NEED_RESCHED:
374 case BTREE_INSERT_BTREE_NODE_FULL:
378 case BTREE_INSERT_ENOSPC:
381 case BTREE_INSERT_NEED_GC_LOCK:
383 *cycle_gc_lock = true;
390 * If we did some work (i.e. inserted part of an extent),
391 * we have to do all the other updates as well:
393 if (!trans->did_work && (ret || *split))
397 multi_unlock_write(trans);
398 bch2_journal_res_put(&c->journal, &trans->journal_res);
404 * __bch_btree_insert_at - insert keys at given iterator positions
406 * This is main entry point for btree updates.
409 * -EINTR: locking changed, this function should be called again. Only returned
410 * if passed BTREE_INSERT_ATOMIC.
411 * -EROFS: filesystem read only
412 * -EIO: journal or btree node IO error
414 int __bch2_btree_insert_at(struct btree_insert *trans)
416 struct bch_fs *c = trans->c;
417 struct btree_insert_entry *i;
418 struct btree_iter *linked, *split = NULL;
419 bool cycle_gc_lock = false;
423 for_each_btree_iter(trans->entries[0].iter, linked)
424 bch2_btree_iter_verify_locks(linked);
426 /* for the sake of sanity: */
427 BUG_ON(trans->nr > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
429 trans_for_each_entry(trans, i) {
430 BUG_ON(i->iter->level);
431 BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
432 BUG_ON(debug_check_bkeys(c) &&
433 bch2_bkey_invalid(c, i->iter->btree_id,
434 bkey_i_to_s_c(i->k)));
435 BUG_ON(i->iter->uptodate == BTREE_ITER_END);
438 bubble_sort(trans->entries, trans->nr, btree_trans_cmp);
440 if (unlikely(!percpu_ref_tryget(&c->writes)))
444 cycle_gc_lock = false;
446 trans_for_each_entry(trans, i) {
447 if (!bch2_btree_iter_upgrade(i->iter, 1)) {
452 if (i->iter->flags & BTREE_ITER_ERROR) {
458 ret = do_btree_insert_at(trans, &split, &cycle_gc_lock);
462 trans_for_each_leaf(trans, i)
463 bch2_foreground_maybe_merge(c, i->iter, 0, trans->flags);
465 trans_for_each_entry(trans, i)
466 bch2_btree_iter_downgrade(i->iter);
468 percpu_ref_put(&c->writes);
470 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
471 /* make sure we didn't drop or screw up locks: */
472 for_each_btree_iter(trans->entries[0].iter, linked) {
473 bch2_btree_iter_verify_locks(linked);
474 BUG_ON((trans->flags & BTREE_INSERT_NOUNLOCK) &&
476 linked->uptodate >= BTREE_ITER_NEED_RELOCK);
479 /* make sure we didn't lose an error: */
481 trans_for_each_entry(trans, i)
485 BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
489 flags = trans->flags;
492 * BTREE_INSERT_NOUNLOCK means don't unlock _after_ successful btree
493 * update; if we haven't done anything yet it doesn't apply
495 if (!trans->did_work)
496 flags &= ~BTREE_INSERT_NOUNLOCK;
499 ret = bch2_btree_split_leaf(c, split, flags);
502 * if the split succeeded without dropping locks the insert will
503 * still be atomic (in the BTREE_INSERT_ATOMIC sense, what the
504 * caller peeked() and is overwriting won't have changed)
509 * split -> btree node merging (of parent node) might still drop
510 * locks when we're not passing it BTREE_INSERT_NOUNLOCK
512 if (!ret && !trans->did_work)
517 * don't care if we got ENOSPC because we told split it
520 if (!ret || (flags & BTREE_INSERT_NOUNLOCK))
525 if (!down_read_trylock(&c->gc_lock)) {
526 if (flags & BTREE_INSERT_NOUNLOCK)
529 bch2_btree_iter_unlock(trans->entries[0].iter);
530 down_read(&c->gc_lock);
532 up_read(&c->gc_lock);
536 if (flags & BTREE_INSERT_NOUNLOCK)
539 trans_for_each_entry(trans, i) {
540 int ret2 = bch2_btree_iter_traverse(i->iter);
546 BUG_ON(i->iter->uptodate > BTREE_ITER_NEED_PEEK);
550 * BTREE_ITER_ATOMIC means we have to return -EINTR if we
553 if (!(flags & BTREE_INSERT_ATOMIC))
560 int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
567 return bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
569 BTREE_INSERT_USE_RESERVE|flags,
570 BTREE_INSERT_ENTRY(iter, &k));
573 int bch2_btree_insert_list_at(struct btree_iter *iter,
574 struct keylist *keys,
575 struct disk_reservation *disk_res,
576 struct extent_insert_hook *hook,
577 u64 *journal_seq, unsigned flags)
579 BUG_ON(flags & BTREE_INSERT_ATOMIC);
580 BUG_ON(bch2_keylist_empty(keys));
581 bch2_verify_keylist_sorted(keys);
583 while (!bch2_keylist_empty(keys)) {
584 int ret = bch2_btree_insert_at(iter->c, disk_res, hook,
586 BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
590 bch2_keylist_pop_front(keys);
597 * bch_btree_insert - insert keys into the extent btree
598 * @c: pointer to struct bch_fs
599 * @id: btree to insert into
600 * @insert_keys: list of keys to insert
601 * @hook: insert callback
603 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
605 struct disk_reservation *disk_res,
606 struct extent_insert_hook *hook,
607 u64 *journal_seq, int flags)
609 struct btree_iter iter;
612 bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
614 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
615 BTREE_INSERT_ENTRY(&iter, k));
616 bch2_btree_iter_unlock(&iter);
622 * bch_btree_delete_range - delete everything within a given range
624 * Range is a half open interval - [start, end)
626 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
629 struct bversion version,
630 struct disk_reservation *disk_res,
631 struct extent_insert_hook *hook,
634 struct btree_iter iter;
638 bch2_btree_iter_init(&iter, c, id, start,
641 while ((k = bch2_btree_iter_peek(&iter)).k &&
642 !(ret = btree_iter_err(k))) {
643 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
644 /* really shouldn't be using a bare, unpadded bkey_i */
645 struct bkey_i delete;
647 if (bkey_cmp(iter.pos, end) >= 0)
650 if (k.k->type == KEY_TYPE_DISCARD) {
651 bch2_btree_iter_next(&iter);
655 bkey_init(&delete.k);
658 * For extents, iter.pos won't necessarily be the same as
659 * bkey_start_pos(k.k) (for non extents they always will be the
660 * same). It's important that we delete starting from iter.pos
661 * because the range we want to delete could start in the middle
664 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
665 * bkey_start_pos(k.k)).
667 delete.k.p = iter.pos;
668 delete.k.version = version;
670 if (iter.flags & BTREE_ITER_IS_EXTENTS) {
672 * The extents btree is special - KEY_TYPE_DISCARD is
673 * used for deletions, not KEY_TYPE_DELETED. This is an
674 * internal implementation detail that probably
675 * shouldn't be exposed (internally, KEY_TYPE_DELETED is
676 * used as a proxy for k->size == 0):
678 delete.k.type = KEY_TYPE_DISCARD;
680 /* create the biggest key we can */
681 bch2_key_resize(&delete.k, max_sectors);
682 bch2_cut_back(end, &delete.k);
685 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
687 BTREE_INSERT_ENTRY(&iter, &delete));
691 bch2_btree_iter_cond_resched(&iter);
694 bch2_btree_iter_unlock(&iter);