X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fextent_update.c;h=21af6fb8cecff150908724c238f434bc54a9dd6d;hb=54b6beabf05b6cf62092f98f0c06395e4242b064;hp=5c43678e94a3173299ea62f5523d9407abd1d802;hpb=9fce394ca6d0082ced3612a627cd16e06d84244a;p=bcachefs-tools-debian diff --git a/libbcachefs/extent_update.c b/libbcachefs/extent_update.c index 5c43678..21af6fb 100644 --- a/libbcachefs/extent_update.c +++ b/libbcachefs/extent_update.c @@ -15,17 +15,26 @@ static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k) { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; - unsigned ret = 0; + unsigned ret = 0, lru = 0; bkey_extent_entry_for_each(ptrs, entry) { switch (__extent_entry_type(entry)) { case BCH_EXTENT_ENTRY_ptr: + /* Might also be updating LRU btree */ + if (entry->ptr.cached) + lru++; + + fallthrough; case BCH_EXTENT_ENTRY_stripe_ptr: ret++; } } - return ret; + /* + * Updating keys in the alloc btree may also update keys in the + * freespace or discard btrees: + */ + return lru + ret * 2; } static int count_iters_for_insert(struct btree_trans *trans, @@ -58,14 +67,13 @@ static int count_iters_for_insert(struct btree_trans *trans, u64 idx = le64_to_cpu(p.v->idx); unsigned sectors = bpos_min(*end, p.k->p).offset - bkey_start_offset(p.k); - struct btree_iter *iter; + struct btree_iter iter; struct bkey_s_c r_k; - for_each_btree_key(trans, iter, - BTREE_ID_REFLINK, POS(0, idx + offset), + for_each_btree_key_norestart(trans, iter, + BTREE_ID_reflink, POS(0, idx + offset), BTREE_ITER_SLOTS, r_k, ret2) { - if (bkey_cmp(bkey_start_pos(r_k.k), - POS(0, idx + sectors)) >= 0) + if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors))) break; /* extent_update_to_keys(), for the reflink_v update */ @@ -83,8 +91,8 @@ static int count_iters_for_insert(struct btree_trans *trans, break; } } + bch2_trans_iter_exit(trans, &iter); - bch2_trans_iter_put(trans, iter); break; } } @@ -94,29 +102,21 @@ static int count_iters_for_insert(struct btree_trans *trans, #define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3) -int bch2_extent_atomic_end(struct btree_iter *iter, +int bch2_extent_atomic_end(struct btree_trans *trans, + struct btree_iter *iter, struct bkey_i *insert, struct bpos *end) { - struct btree_trans *trans = iter->trans; - struct btree *b; - struct btree_node_iter node_iter; - struct bkey_packed *_k; - unsigned nr_iters = 0; + struct btree_iter copy; + struct bkey_s_c k; + unsigned nr_iters = 0; int ret; ret = bch2_btree_iter_traverse(iter); if (ret) return ret; - b = iter->l[0].b; - node_iter = iter->l[0].iter; - - BUG_ON(bkey_cmp(b->data->min_key, POS_MIN) && - bkey_cmp(bkey_start_pos(&insert->k), - bkey_predecessor(b->data->min_key)) < 0); - - *end = bpos_min(insert->k.p, b->key.k.p); + *end = insert->k.p; /* extent_update_to_keys(): */ nr_iters += 1; @@ -126,16 +126,12 @@ int bch2_extent_atomic_end(struct btree_iter *iter, if (ret < 0) return ret; - while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) { - struct bkey unpacked; - struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked); - unsigned offset = 0; + bch2_trans_copy_iter(©, iter); - if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0) - break; + for_each_btree_key_upto_continue_norestart(copy, insert->k.p, 0, k, ret) { + unsigned offset = 0; - if (bkey_cmp(bkey_start_pos(&insert->k), - bkey_start_pos(k.k)) > 0) + if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) offset = bkey_start_offset(&insert->k) - bkey_start_offset(k.k); @@ -155,69 +151,23 @@ int bch2_extent_atomic_end(struct btree_iter *iter, &nr_iters, EXTENT_ITERS_MAX); if (ret) break; - - bch2_btree_node_iter_advance(&node_iter, b); } + bch2_trans_iter_exit(trans, ©); return ret < 0 ? ret : 0; } -int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter) +int bch2_extent_trim_atomic(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_i *k) { struct bpos end; int ret; - ret = bch2_extent_atomic_end(iter, k, &end); + ret = bch2_extent_atomic_end(trans, iter, k, &end); if (ret) return ret; bch2_cut_back(end, k); return 0; } - -int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter) -{ - struct bpos end; - int ret; - - ret = bch2_extent_atomic_end(iter, k, &end); - if (ret) - return ret; - - return !bkey_cmp(end, k->k.p); -} - -enum btree_insert_ret -bch2_extent_can_insert(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_i *insert) -{ - struct bkey_s_c k; - int ret, sectors; - - k = bch2_btree_iter_peek_slot(iter); - ret = bkey_err(k); - if (ret) - return ret; - - /* Check if we're splitting a compressed extent: */ - - if (bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k)) > 0 && - bkey_cmp(insert->k.p, k.k->p) < 0 && - (sectors = bch2_bkey_sectors_compressed(k))) { - int flags = trans->flags & BTREE_INSERT_NOFAIL - ? BCH_DISK_RESERVATION_NOFAIL : 0; - - switch (bch2_disk_reservation_add(trans->c, trans->disk_res, - sectors, flags)) { - case 0: - break; - case -ENOSPC: - return BTREE_INSERT_ENOSPC; - default: - BUG(); - } - } - - return BTREE_INSERT_OK; -}