]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extent_update.c
Update bcachefs sources to e1f6739c4a bcachefs: Fix another iterator counting bug
[bcachefs-tools-debian] / libbcachefs / extent_update.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bkey_on_stack.h"
4 #include "btree_update.h"
5 #include "btree_update_interior.h"
6 #include "buckets.h"
7 #include "debug.h"
8 #include "extents.h"
9 #include "extent_update.h"
10
11 /*
12  * This counts the number of iterators to the alloc & ec btrees we'll need
13  * inserting/removing this extent:
14  */
15 static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
16 {
17         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
18         const union bch_extent_entry *entry;
19         unsigned ret = 0;
20
21         bkey_extent_entry_for_each(ptrs, entry) {
22                 switch (__extent_entry_type(entry)) {
23                 case BCH_EXTENT_ENTRY_ptr:
24                 case BCH_EXTENT_ENTRY_stripe_ptr:
25                         ret++;
26                 }
27         }
28
29         return ret;
30 }
31
32 static int count_iters_for_insert(struct btree_trans *trans,
33                                   struct bkey_s_c k,
34                                   unsigned offset,
35                                   struct bpos *end,
36                                   unsigned *nr_iters,
37                                   unsigned max_iters)
38 {
39         int ret = 0, ret2 = 0;
40
41         if (*nr_iters >= max_iters) {
42                 *end = bpos_min(*end, k.k->p);
43                 ret = 1;
44         }
45
46         switch (k.k->type) {
47         case KEY_TYPE_extent:
48         case KEY_TYPE_reflink_v:
49                 *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
50
51                 if (*nr_iters >= max_iters) {
52                         *end = bpos_min(*end, k.k->p);
53                         ret = 1;
54                 }
55
56                 break;
57         case KEY_TYPE_reflink_p: {
58                 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
59                 u64 idx = le64_to_cpu(p.v->idx);
60                 unsigned sectors = bpos_min(*end, p.k->p).offset -
61                         bkey_start_offset(p.k);
62                 struct btree_iter *iter;
63                 struct bkey_s_c r_k;
64
65                 for_each_btree_key(trans, iter,
66                                    BTREE_ID_REFLINK, POS(0, idx + offset),
67                                    BTREE_ITER_SLOTS, r_k, ret2) {
68                         if (bkey_cmp(bkey_start_pos(r_k.k),
69                                      POS(0, idx + sectors)) >= 0)
70                                 break;
71
72                         /* extent_update_to_keys(), for the reflink_v update */
73                         *nr_iters += 1;
74
75                         *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
76
77                         if (*nr_iters >= max_iters) {
78                                 struct bpos pos = bkey_start_pos(k.k);
79                                 pos.offset += min_t(u64, k.k->size,
80                                                     r_k.k->p.offset - idx);
81
82                                 *end = bpos_min(*end, pos);
83                                 ret = 1;
84                                 break;
85                         }
86                 }
87
88                 bch2_trans_iter_put(trans, iter);
89                 break;
90         }
91         }
92
93         return ret2 ?: ret;
94 }
95
96 #define EXTENT_ITERS_MAX        (BTREE_ITER_MAX / 3)
97
98 int bch2_extent_atomic_end(struct btree_iter *iter,
99                            struct bkey_i *insert,
100                            struct bpos *end)
101 {
102         struct btree_trans *trans = iter->trans;
103         struct btree *b;
104         struct btree_node_iter  node_iter;
105         struct bkey_packed      *_k;
106         unsigned                nr_iters = 0;
107         int ret;
108
109         ret = bch2_btree_iter_traverse(iter);
110         if (ret)
111                 return ret;
112
113         b = iter->l[0].b;
114         node_iter = iter->l[0].iter;
115
116         BUG_ON(bkey_cmp(b->data->min_key, POS_MIN) &&
117                bkey_cmp(bkey_start_pos(&insert->k),
118                         bkey_predecessor(b->data->min_key)) < 0);
119
120         *end = bpos_min(insert->k.p, b->key.k.p);
121
122         /* extent_update_to_keys(): */
123         nr_iters += 1;
124
125         ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
126                                      &nr_iters, EXTENT_ITERS_MAX / 2);
127         if (ret < 0)
128                 return ret;
129
130         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
131                 struct bkey     unpacked;
132                 struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
133                 unsigned offset = 0;
134
135                 if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
136                         break;
137
138                 if (bkey_cmp(bkey_start_pos(&insert->k),
139                              bkey_start_pos(k.k)) > 0)
140                         offset = bkey_start_offset(&insert->k) -
141                                 bkey_start_offset(k.k);
142
143                 /* extent_handle_overwrites(): */
144                 switch (bch2_extent_overlap(&insert->k, k.k)) {
145                 case BCH_EXTENT_OVERLAP_ALL:
146                 case BCH_EXTENT_OVERLAP_FRONT:
147                         nr_iters += 1;
148                         break;
149                 case BCH_EXTENT_OVERLAP_BACK:
150                 case BCH_EXTENT_OVERLAP_MIDDLE:
151                         nr_iters += 2;
152                         break;
153                 }
154
155                 ret = count_iters_for_insert(trans, k, offset, end,
156                                         &nr_iters, EXTENT_ITERS_MAX);
157                 if (ret)
158                         break;
159
160                 bch2_btree_node_iter_advance(&node_iter, b);
161         }
162
163         return ret < 0 ? ret : 0;
164 }
165
166 int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
167 {
168         struct bpos end;
169         int ret;
170
171         ret = bch2_extent_atomic_end(iter, k, &end);
172         if (ret)
173                 return ret;
174
175         bch2_cut_back(end, k);
176         return 0;
177 }
178
179 int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
180 {
181         struct bpos end;
182         int ret;
183
184         ret = bch2_extent_atomic_end(iter, k, &end);
185         if (ret)
186                 return ret;
187
188         return !bkey_cmp(end, k->k.p);
189 }
190
191 enum btree_insert_ret
192 bch2_extent_can_insert(struct btree_trans *trans,
193                        struct btree_iter *iter,
194                        struct bkey_i *insert)
195 {
196         struct btree_iter_level *l = &iter->l[0];
197         struct btree_node_iter node_iter = l->iter;
198         struct bkey_packed *_k;
199         struct bkey_s_c k;
200         struct bkey unpacked;
201         int sectors;
202
203         _k = bch2_btree_node_iter_peek(&node_iter, l->b);
204         if (!_k)
205                 return BTREE_INSERT_OK;
206
207         k = bkey_disassemble(l->b, _k, &unpacked);
208
209         /* Check if we're splitting a compressed extent: */
210
211         if (bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k)) > 0 &&
212             bkey_cmp(insert->k.p, k.k->p) < 0 &&
213             (sectors = bch2_bkey_sectors_compressed(k))) {
214                 int flags = trans->flags & BTREE_INSERT_NOFAIL
215                         ? BCH_DISK_RESERVATION_NOFAIL : 0;
216
217                 switch (bch2_disk_reservation_add(trans->c, trans->disk_res,
218                                                   sectors, flags)) {
219                 case 0:
220                         break;
221                 case -ENOSPC:
222                         return BTREE_INSERT_ENOSPC;
223                 default:
224                         BUG();
225                 }
226         }
227
228         return BTREE_INSERT_OK;
229 }