3 #include "btree_update.h"
4 #include "btree_update_interior.h"
6 #include "btree_iter.h"
7 #include "btree_locking.h"
13 #include <linux/sort.h>
14 #include <trace/events/bcachefs.h>
16 /* Inserting into a given leaf node (last stage of insert): */
18 /* Handle overwrites and do insert, for non extents: */
19 bool bch2_btree_bset_insert_key(struct btree_iter *iter,
21 struct btree_node_iter *node_iter,
22 struct bkey_i *insert)
24 const struct bkey_format *f = &b->format;
25 struct bkey_packed *k;
27 unsigned clobber_u64s;
29 EBUG_ON(btree_node_just_written(b));
30 EBUG_ON(bset_written(b, btree_bset_last(b)));
31 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
32 EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0 ||
33 bkey_cmp(insert->k.p, b->data->max_key) > 0);
35 k = bch2_btree_node_iter_peek_all(node_iter, b);
36 if (k && !bkey_cmp_packed(b, k, &insert->k)) {
37 BUG_ON(bkey_whiteout(k));
39 t = bch2_bkey_to_bset(b, k);
41 if (bset_unwritten(b, bset(b, t)) &&
42 bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k) &&
43 !bkey_whiteout(&insert->k)) {
44 k->type = insert->k.type;
45 memcpy_u64s(bkeyp_val(f, k), &insert->v,
46 bkey_val_u64s(&insert->k));
50 insert->k.needs_whiteout = k->needs_whiteout;
52 btree_keys_account_key_drop(&b->nr, t - b->set, k);
54 if (t == bset_tree_last(b)) {
55 clobber_u64s = k->u64s;
58 * If we're deleting, and the key we're deleting doesn't
59 * need a whiteout (it wasn't overwriting a key that had
60 * been written to disk) - just delete it:
62 if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
63 bch2_bset_delete(b, k, clobber_u64s);
64 bch2_btree_node_iter_fix(iter, b, node_iter, t,
72 k->type = KEY_TYPE_DELETED;
73 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
76 if (bkey_whiteout(&insert->k)) {
77 reserve_whiteout(b, t, k);
80 k->needs_whiteout = false;
84 * Deleting, but the key to delete wasn't found - nothing to do:
86 if (bkey_whiteout(&insert->k))
89 insert->k.needs_whiteout = false;
92 t = bset_tree_last(b);
93 k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
96 bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
97 if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
98 bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
99 clobber_u64s, k->u64s);
103 static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
106 struct bch_fs *c = container_of(j, struct bch_fs, journal);
107 struct btree_write *w = container_of(pin, struct btree_write, journal);
108 struct btree *b = container_of(w, struct btree, writes[i]);
110 six_lock_read(&b->lock);
111 bch2_btree_node_write_cond(c, b,
112 (btree_current_write(b) == w &&
113 w->journal.pin_list == journal_seq_pin(j, seq)));
114 six_unlock_read(&b->lock);
117 static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
119 return __btree_node_flush(j, pin, 0, seq);
122 static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
124 return __btree_node_flush(j, pin, 1, seq);
127 void bch2_btree_journal_key(struct btree_insert *trans,
128 struct btree_iter *iter,
129 struct bkey_i *insert)
131 struct bch_fs *c = trans->c;
132 struct journal *j = &c->journal;
133 struct btree *b = iter->l[0].b;
134 struct btree_write *w = btree_current_write(b);
136 EBUG_ON(iter->level || b->level);
137 EBUG_ON(trans->journal_res.ref !=
138 !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
140 if (likely(trans->journal_res.ref)) {
141 u64 seq = trans->journal_res.seq;
142 bool needs_whiteout = insert->k.needs_whiteout;
145 insert->k.needs_whiteout = false;
146 bch2_journal_add_keys(j, &trans->journal_res,
147 iter->btree_id, insert);
148 insert->k.needs_whiteout = needs_whiteout;
150 bch2_journal_set_has_inode(j, &trans->journal_res,
153 if (trans->journal_seq)
154 *trans->journal_seq = seq;
155 btree_bset_last(b)->journal_seq = cpu_to_le64(seq);
158 if (unlikely(!journal_pin_active(&w->journal)))
159 bch2_journal_pin_add(j, &trans->journal_res,
161 btree_node_write_idx(b) == 0
163 : btree_node_flush1);
165 if (unlikely(!btree_node_dirty(b)))
166 set_btree_node_dirty(b);
169 static enum btree_insert_ret
170 bch2_insert_fixup_key(struct btree_insert *trans,
171 struct btree_insert_entry *insert)
173 struct btree_iter *iter = insert->iter;
174 struct btree_iter_level *l = &iter->l[0];
176 EBUG_ON(iter->level);
177 EBUG_ON(insert->k->k.u64s >
178 bch_btree_keys_u64s_remaining(trans->c, l->b));
180 if (bch2_btree_bset_insert_key(iter, l->b, &l->iter,
182 bch2_btree_journal_key(trans, iter, insert->k);
184 trans->did_work = true;
185 return BTREE_INSERT_OK;
189 * btree_insert_key - insert a key one key into a leaf node
191 static enum btree_insert_ret
192 btree_insert_key_leaf(struct btree_insert *trans,
193 struct btree_insert_entry *insert)
195 struct bch_fs *c = trans->c;
196 struct btree_iter *iter = insert->iter;
197 struct btree *b = iter->l[0].b;
198 enum btree_insert_ret ret;
199 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
200 int old_live_u64s = b->nr.live_u64s;
201 int live_u64s_added, u64s_added;
203 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
205 ret = !btree_node_is_extents(b)
206 ? bch2_insert_fixup_key(trans, insert)
207 : bch2_insert_fixup_extent(trans, insert);
209 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
210 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
212 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
213 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
214 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
215 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
217 if (u64s_added > live_u64s_added &&
218 bch2_maybe_compact_whiteouts(c, b))
219 bch2_btree_iter_reinit_node(iter, b);
221 trace_btree_insert_key(c, b, insert->k);
225 static bool same_leaf_as_prev(struct btree_insert *trans,
226 struct btree_insert_entry *i)
229 * Because we sorted the transaction entries, if multiple iterators
230 * point to the same leaf node they'll always be adjacent now:
232 return i != trans->entries &&
233 i[0].iter->l[0].b == i[-1].iter->l[0].b;
236 #define trans_for_each_entry(trans, i) \
237 for ((i) = (trans)->entries; (i) < (trans)->entries + (trans)->nr; (i)++)
239 inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
240 struct btree_iter *iter)
242 bch2_btree_node_lock_write(b, iter);
244 if (btree_node_just_written(b) &&
245 bch2_btree_post_write_cleanup(c, b))
246 bch2_btree_iter_reinit_node(iter, b);
249 * If the last bset has been written, or if it's gotten too big - start
250 * a new bset to insert into:
252 if (want_new_bset(c, b))
253 bch2_btree_init_next(c, b, iter);
256 static void multi_lock_write(struct bch_fs *c, struct btree_insert *trans)
258 struct btree_insert_entry *i;
260 trans_for_each_entry(trans, i)
261 if (!same_leaf_as_prev(trans, i))
262 bch2_btree_node_lock_for_insert(c, i->iter->l[0].b,
266 static void multi_unlock_write(struct btree_insert *trans)
268 struct btree_insert_entry *i;
270 trans_for_each_entry(trans, i)
271 if (!same_leaf_as_prev(trans, i))
272 bch2_btree_node_unlock_write(i->iter->l[0].b, i->iter);
275 static inline int btree_trans_cmp(struct btree_insert_entry l,
276 struct btree_insert_entry r)
278 return btree_iter_cmp(l.iter, r.iter);
281 /* Normal update interface: */
284 * __bch_btree_insert_at - insert keys at given iterator positions
286 * This is main entry point for btree updates.
289 * -EINTR: locking changed, this function should be called again. Only returned
290 * if passed BTREE_INSERT_ATOMIC.
291 * -EROFS: filesystem read only
292 * -EIO: journal or btree node IO error
294 int __bch2_btree_insert_at(struct btree_insert *trans)
296 struct bch_fs *c = trans->c;
297 struct btree_insert_entry *i;
298 struct btree_iter *split = NULL;
299 bool cycle_gc_lock = false;
303 trans_for_each_entry(trans, i) {
304 BUG_ON(i->iter->level);
305 BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
306 BUG_ON(debug_check_bkeys(c) &&
307 bch2_bkey_invalid(c, i->iter->btree_id,
308 bkey_i_to_s_c(i->k)));
311 bubble_sort(trans->entries, trans->nr, btree_trans_cmp);
313 if (unlikely(!percpu_ref_tryget(&c->writes)))
317 trans_for_each_entry(trans, i) {
318 if (!bch2_btree_iter_set_locks_want(i->iter, 1))
321 if (i->iter->uptodate == BTREE_ITER_NEED_TRAVERSE) {
322 ret = bch2_btree_iter_traverse(i->iter);
328 trans->did_work = false;
330 trans_for_each_entry(trans, i)
332 u64s += jset_u64s(i->k->k.u64s + i->extra_res);
334 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
336 ret = !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)
337 ? bch2_journal_res_get(&c->journal,
344 multi_lock_write(c, trans);
352 trans_for_each_entry(trans, i) {
353 /* Multiple inserts might go to same leaf: */
354 if (!same_leaf_as_prev(trans, i))
358 * bch2_btree_node_insert_fits() must be called under write lock:
359 * with only an intent lock, another thread can still call
360 * bch2_btree_node_write(), converting an unwritten bset to a
364 u64s += i->k->k.u64s + i->extra_res;
365 if (!bch2_btree_node_insert_fits(c,
366 i->iter->l[0].b, u64s)) {
375 cycle_gc_lock = false;
377 trans_for_each_entry(trans, i) {
381 switch (btree_insert_key_leaf(trans, i)) {
382 case BTREE_INSERT_OK:
385 case BTREE_INSERT_JOURNAL_RES_FULL:
386 case BTREE_INSERT_NEED_TRAVERSE:
389 case BTREE_INSERT_NEED_RESCHED:
392 case BTREE_INSERT_BTREE_NODE_FULL:
395 case BTREE_INSERT_ENOSPC:
398 case BTREE_INSERT_NEED_GC_LOCK:
399 cycle_gc_lock = true;
406 if (!trans->did_work && (ret || split))
410 multi_unlock_write(trans);
411 bch2_journal_res_put(&c->journal, &trans->journal_res);
418 trans_for_each_entry(trans, i)
419 if (i->iter->flags & BTREE_ITER_AT_END_OF_LEAF)
422 trans_for_each_entry(trans, i) {
424 * iterators are inconsistent when they hit end of leaf, until
427 if (i->iter->uptodate < BTREE_ITER_NEED_TRAVERSE &&
428 !same_leaf_as_prev(trans, i))
429 bch2_foreground_maybe_merge(c, i->iter, 0);
432 /* make sure we didn't lose an error: */
433 if (!ret && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
434 trans_for_each_entry(trans, i)
437 percpu_ref_put(&c->writes);
441 * have to drop journal res before splitting, because splitting means
442 * allocating new btree nodes, and holding a journal reservation
443 * potentially blocks the allocator:
445 ret = bch2_btree_split_leaf(c, split, trans->flags);
449 * if the split didn't have to drop locks the insert will still be
450 * atomic (in the BTREE_INSERT_ATOMIC sense, what the caller peeked()
451 * and is overwriting won't have changed)
456 down_read(&c->gc_lock);
457 up_read(&c->gc_lock);
461 trans_for_each_entry(trans, i) {
462 int ret2 = bch2_btree_iter_traverse(i->iter);
470 * BTREE_ITER_ATOMIC means we have to return -EINTR if we
473 if (!(trans->flags & BTREE_INSERT_ATOMIC))
480 int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags)
487 return bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
489 BTREE_INSERT_USE_RESERVE|flags,
490 BTREE_INSERT_ENTRY(iter, &k));
493 int bch2_btree_insert_list_at(struct btree_iter *iter,
494 struct keylist *keys,
495 struct disk_reservation *disk_res,
496 struct extent_insert_hook *hook,
497 u64 *journal_seq, unsigned flags)
499 BUG_ON(flags & BTREE_INSERT_ATOMIC);
500 BUG_ON(bch2_keylist_empty(keys));
501 bch2_verify_keylist_sorted(keys);
503 while (!bch2_keylist_empty(keys)) {
504 int ret = bch2_btree_insert_at(iter->c, disk_res, hook,
506 BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
510 bch2_keylist_pop_front(keys);
517 * bch_btree_insert - insert keys into the extent btree
518 * @c: pointer to struct bch_fs
519 * @id: btree to insert into
520 * @insert_keys: list of keys to insert
521 * @hook: insert callback
523 int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
525 struct disk_reservation *disk_res,
526 struct extent_insert_hook *hook,
527 u64 *journal_seq, int flags)
529 struct btree_iter iter;
532 bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
534 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
535 BTREE_INSERT_ENTRY(&iter, k));
536 bch2_btree_iter_unlock(&iter);
542 * bch_btree_delete_range - delete everything within a given range
544 * Range is a half open interval - [start, end)
546 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
549 struct bversion version,
550 struct disk_reservation *disk_res,
551 struct extent_insert_hook *hook,
554 struct btree_iter iter;
558 bch2_btree_iter_init(&iter, c, id, start,
561 while ((k = bch2_btree_iter_peek(&iter)).k &&
562 !(ret = btree_iter_err(k))) {
563 unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
564 /* really shouldn't be using a bare, unpadded bkey_i */
565 struct bkey_i delete;
567 if (bkey_cmp(iter.pos, end) >= 0)
570 bkey_init(&delete.k);
573 * For extents, iter.pos won't necessarily be the same as
574 * bkey_start_pos(k.k) (for non extents they always will be the
575 * same). It's important that we delete starting from iter.pos
576 * because the range we want to delete could start in the middle
579 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
580 * bkey_start_pos(k.k)).
582 delete.k.p = iter.pos;
583 delete.k.version = version;
585 if (iter.flags & BTREE_ITER_IS_EXTENTS) {
587 * The extents btree is special - KEY_TYPE_DISCARD is
588 * used for deletions, not KEY_TYPE_DELETED. This is an
589 * internal implementation detail that probably
590 * shouldn't be exposed (internally, KEY_TYPE_DELETED is
591 * used as a proxy for k->size == 0):
593 delete.k.type = KEY_TYPE_DISCARD;
595 /* create the biggest key we can */
596 bch2_key_resize(&delete.k, max_sectors);
597 bch2_cut_back(end, &delete.k);
600 ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
602 BTREE_INSERT_ENTRY(&iter, &delete));
606 bch2_btree_iter_cond_resched(&iter);
609 bch2_btree_iter_unlock(&iter);