1 // SPDX-License-Identifier: GPL-2.0
3 #include "bkey_on_stack.h"
4 #include "btree_update.h"
5 #include "btree_update_interior.h"
9 #include "extent_update.h"
12 * This counts the number of iterators to the alloc & ec btrees we'll need
13 * inserting/removing this extent:
15 static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
17 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
18 const union bch_extent_entry *entry;
21 bkey_extent_entry_for_each(ptrs, entry) {
22 switch (__extent_entry_type(entry)) {
23 case BCH_EXTENT_ENTRY_ptr:
24 case BCH_EXTENT_ENTRY_stripe_ptr:
32 static int count_iters_for_insert(struct btree_trans *trans,
43 * The extent update path requires an _additional_ iterator for each
44 * extent we're inserting and overwriting:
50 case KEY_TYPE_reflink_v:
51 *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
53 if (*nr_iters >= max_iters) {
54 *end = bpos_min(*end, k.k->p);
59 case KEY_TYPE_reflink_p: {
60 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
61 u64 idx = le64_to_cpu(p.v->idx);
62 unsigned sectors = bpos_min(*end, p.k->p).offset -
63 bkey_start_offset(p.k);
64 struct btree_iter *iter;
67 for_each_btree_key(trans, iter,
68 BTREE_ID_REFLINK, POS(0, idx + offset),
69 BTREE_ITER_SLOTS, r_k, ret) {
70 if (bkey_cmp(bkey_start_pos(r_k.k),
71 POS(0, idx + sectors)) >= 0)
74 *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
76 if (*nr_iters >= max_iters) {
77 struct bpos pos = bkey_start_pos(k.k);
78 pos.offset += r_k.k->p.offset - idx;
80 *end = bpos_min(*end, pos);
86 bch2_trans_iter_put(trans, iter);
94 #define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3)
96 int bch2_extent_atomic_end(struct btree_iter *iter,
97 struct bkey_i *insert,
100 struct btree_trans *trans = iter->trans;
102 struct btree_node_iter node_iter;
103 struct bkey_packed *_k;
104 unsigned nr_iters = 0;
107 ret = bch2_btree_iter_traverse(iter);
112 node_iter = iter->l[0].iter;
114 BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
116 *end = bpos_min(insert->k.p, b->key.k.p);
118 ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
119 &nr_iters, EXTENT_ITERS_MAX / 2, false);
123 while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
124 KEY_TYPE_discard))) {
125 struct bkey unpacked;
126 struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
129 if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
132 if (bkey_cmp(bkey_start_pos(&insert->k),
133 bkey_start_pos(k.k)) > 0)
134 offset = bkey_start_offset(&insert->k) -
135 bkey_start_offset(k.k);
137 ret = count_iters_for_insert(trans, k, offset, end,
138 &nr_iters, EXTENT_ITERS_MAX, true);
142 bch2_btree_node_iter_advance(&node_iter, b);
145 return ret < 0 ? ret : 0;
148 int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
153 ret = bch2_extent_atomic_end(iter, k, &end);
157 bch2_cut_back(end, k);
161 int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
166 ret = bch2_extent_atomic_end(iter, k, &end);
170 return !bkey_cmp(end, k->k.p);
173 enum btree_insert_ret
174 bch2_extent_can_insert(struct btree_trans *trans,
175 struct btree_iter *iter,
176 struct bkey_i *insert)
178 struct btree_iter_level *l = &iter->l[0];
179 struct btree_node_iter node_iter = l->iter;
180 struct bkey_packed *_k;
182 struct bkey unpacked;
185 _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
188 return BTREE_INSERT_OK;
190 k = bkey_disassemble(l->b, _k, &unpacked);
192 /* Check if we're splitting a compressed extent: */
194 if (bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k)) > 0 &&
195 bkey_cmp(insert->k.p, k.k->p) < 0 &&
196 (sectors = bch2_bkey_sectors_compressed(k))) {
197 int flags = trans->flags & BTREE_INSERT_NOFAIL
198 ? BCH_DISK_RESERVATION_NOFAIL : 0;
200 switch (bch2_disk_reservation_add(trans->c, trans->disk_res,
205 return BTREE_INSERT_ENOSPC;
211 return BTREE_INSERT_OK;