]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extent_update.c
Update bcachefs sources to d9bb516b2d bcachefs: Move extent overwrite handling out...
[bcachefs-tools-debian] / libbcachefs / extent_update.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bkey_on_stack.h"
4 #include "btree_update.h"
5 #include "btree_update_interior.h"
6 #include "buckets.h"
7 #include "debug.h"
8 #include "extents.h"
9 #include "extent_update.h"
10
11 /*
12  * This counts the number of iterators to the alloc & ec btrees we'll need
13  * inserting/removing this extent:
14  */
15 static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
16 {
17         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
18         const union bch_extent_entry *entry;
19         unsigned ret = 0;
20
21         bkey_extent_entry_for_each(ptrs, entry) {
22                 switch (__extent_entry_type(entry)) {
23                 case BCH_EXTENT_ENTRY_ptr:
24                 case BCH_EXTENT_ENTRY_stripe_ptr:
25                         ret++;
26                 }
27         }
28
29         return ret;
30 }
31
32 static int count_iters_for_insert(struct btree_trans *trans,
33                                   struct bkey_s_c k,
34                                   unsigned offset,
35                                   struct bpos *end,
36                                   unsigned *nr_iters,
37                                   unsigned max_iters,
38                                   bool overwrite)
39 {
40         int ret = 0;
41
42         /*
43          * The extent update path requires an _additional_ iterator for each
44          * extent we're inserting and overwriting:
45          */
46         *nr_iters += 1;
47
48         switch (k.k->type) {
49         case KEY_TYPE_extent:
50         case KEY_TYPE_reflink_v:
51                 *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
52
53                 if (*nr_iters >= max_iters) {
54                         *end = bpos_min(*end, k.k->p);
55                         ret = 1;
56                 }
57
58                 break;
59         case KEY_TYPE_reflink_p: {
60                 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
61                 u64 idx = le64_to_cpu(p.v->idx);
62                 unsigned sectors = bpos_min(*end, p.k->p).offset -
63                         bkey_start_offset(p.k);
64                 struct btree_iter *iter;
65                 struct bkey_s_c r_k;
66
67                 for_each_btree_key(trans, iter,
68                                    BTREE_ID_REFLINK, POS(0, idx + offset),
69                                    BTREE_ITER_SLOTS, r_k, ret) {
70                         if (bkey_cmp(bkey_start_pos(r_k.k),
71                                      POS(0, idx + sectors)) >= 0)
72                                 break;
73
74                         *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
75
76                         if (*nr_iters >= max_iters) {
77                                 struct bpos pos = bkey_start_pos(k.k);
78                                 pos.offset += r_k.k->p.offset - idx;
79
80                                 *end = bpos_min(*end, pos);
81                                 ret = 1;
82                                 break;
83                         }
84                 }
85
86                 bch2_trans_iter_put(trans, iter);
87                 break;
88         }
89         }
90
91         return ret;
92 }
93
94 #define EXTENT_ITERS_MAX        (BTREE_ITER_MAX / 3)
95
96 int bch2_extent_atomic_end(struct btree_iter *iter,
97                            struct bkey_i *insert,
98                            struct bpos *end)
99 {
100         struct btree_trans *trans = iter->trans;
101         struct btree *b;
102         struct btree_node_iter  node_iter;
103         struct bkey_packed      *_k;
104         unsigned                nr_iters = 0;
105         int ret;
106
107         ret = bch2_btree_iter_traverse(iter);
108         if (ret)
109                 return ret;
110
111         b = iter->l[0].b;
112         node_iter = iter->l[0].iter;
113
114         BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
115
116         *end = bpos_min(insert->k.p, b->key.k.p);
117
118         ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
119                                      &nr_iters, EXTENT_ITERS_MAX / 2, false);
120         if (ret < 0)
121                 return ret;
122
123         while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
124                                                       KEY_TYPE_discard))) {
125                 struct bkey     unpacked;
126                 struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
127                 unsigned offset = 0;
128
129                 if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
130                         break;
131
132                 if (bkey_cmp(bkey_start_pos(&insert->k),
133                              bkey_start_pos(k.k)) > 0)
134                         offset = bkey_start_offset(&insert->k) -
135                                 bkey_start_offset(k.k);
136
137                 ret = count_iters_for_insert(trans, k, offset, end,
138                                         &nr_iters, EXTENT_ITERS_MAX, true);
139                 if (ret)
140                         break;
141
142                 bch2_btree_node_iter_advance(&node_iter, b);
143         }
144
145         return ret < 0 ? ret : 0;
146 }
147
148 int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
149 {
150         struct bpos end;
151         int ret;
152
153         ret = bch2_extent_atomic_end(iter, k, &end);
154         if (ret)
155                 return ret;
156
157         bch2_cut_back(end, k);
158         return 0;
159 }
160
161 int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
162 {
163         struct bpos end;
164         int ret;
165
166         ret = bch2_extent_atomic_end(iter, k, &end);
167         if (ret)
168                 return ret;
169
170         return !bkey_cmp(end, k->k.p);
171 }
172
173 enum btree_insert_ret
174 bch2_extent_can_insert(struct btree_trans *trans,
175                        struct btree_iter *iter,
176                        struct bkey_i *insert)
177 {
178         struct btree_iter_level *l = &iter->l[0];
179         struct btree_node_iter node_iter = l->iter;
180         struct bkey_packed *_k;
181         struct bkey_s_c k;
182         struct bkey unpacked;
183         int sectors;
184
185         _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
186                                               KEY_TYPE_discard);
187         if (!_k)
188                 return BTREE_INSERT_OK;
189
190         k = bkey_disassemble(l->b, _k, &unpacked);
191
192         /* Check if we're splitting a compressed extent: */
193
194         if (bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k)) > 0 &&
195             bkey_cmp(insert->k.p, k.k->p) < 0 &&
196             (sectors = bch2_bkey_sectors_compressed(k))) {
197                 int flags = trans->flags & BTREE_INSERT_NOFAIL
198                         ? BCH_DISK_RESERVATION_NOFAIL : 0;
199
200                 switch (bch2_disk_reservation_add(trans->c, trans->disk_res,
201                                                   sectors, flags)) {
202                 case 0:
203                         break;
204                 case -ENOSPC:
205                         return BTREE_INSERT_ENOSPC;
206                 default:
207                         BUG();
208                 }
209         }
210
211         return BTREE_INSERT_OK;
212 }