]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/extent_update.c
Update bcachefs sources to 96b991466a bcachefs: Improve error message in fsck
[bcachefs-tools-debian] / libbcachefs / extent_update.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "bkey_on_stack.h"
4 #include "btree_update.h"
5 #include "btree_update_interior.h"
6 #include "buckets.h"
7 #include "debug.h"
8 #include "extents.h"
9 #include "extent_update.h"
10
11 /*
12  * This counts the number of iterators to the alloc & ec btrees we'll need
13  * inserting/removing this extent:
14  */
15 static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
16 {
17         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
18         const union bch_extent_entry *entry;
19         unsigned ret = 0;
20
21         bkey_extent_entry_for_each(ptrs, entry) {
22                 switch (__extent_entry_type(entry)) {
23                 case BCH_EXTENT_ENTRY_ptr:
24                 case BCH_EXTENT_ENTRY_stripe_ptr:
25                         ret++;
26                 }
27         }
28
29         return ret;
30 }
31
32 static int count_iters_for_insert(struct btree_trans *trans,
33                                   struct bkey_s_c k,
34                                   unsigned offset,
35                                   struct bpos *end,
36                                   unsigned *nr_iters,
37                                   unsigned max_iters,
38                                   bool overwrite)
39 {
40         int ret = 0;
41
42         /*
43          * The extent update path requires an _additional_ iterator for each
44          * extent we're inserting and overwriting:
45          */
46         *nr_iters += 1;
47         if (*nr_iters >= max_iters) {
48                 *end = bpos_min(*end, k.k->p);
49                 ret = 1;
50         }
51
52         switch (k.k->type) {
53         case KEY_TYPE_extent:
54         case KEY_TYPE_reflink_v:
55                 *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
56
57                 if (*nr_iters >= max_iters) {
58                         *end = bpos_min(*end, k.k->p);
59                         ret = 1;
60                 }
61
62                 break;
63         case KEY_TYPE_reflink_p: {
64                 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
65                 u64 idx = le64_to_cpu(p.v->idx);
66                 unsigned sectors = bpos_min(*end, p.k->p).offset -
67                         bkey_start_offset(p.k);
68                 struct btree_iter *iter;
69                 struct bkey_s_c r_k;
70
71                 for_each_btree_key(trans, iter,
72                                    BTREE_ID_REFLINK, POS(0, idx + offset),
73                                    BTREE_ITER_SLOTS, r_k, ret) {
74                         if (bkey_cmp(bkey_start_pos(r_k.k),
75                                      POS(0, idx + sectors)) >= 0)
76                                 break;
77
78                         *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
79
80                         if (*nr_iters >= max_iters) {
81                                 struct bpos pos = bkey_start_pos(k.k);
82                                 pos.offset += r_k.k->p.offset - idx;
83
84                                 *end = bpos_min(*end, pos);
85                                 ret = 1;
86                                 break;
87                         }
88                 }
89
90                 bch2_trans_iter_put(trans, iter);
91                 break;
92         }
93         }
94
95         return ret;
96 }
97
98 #define EXTENT_ITERS_MAX        (BTREE_ITER_MAX / 3)
99
100 int bch2_extent_atomic_end(struct btree_iter *iter,
101                            struct bkey_i *insert,
102                            struct bpos *end)
103 {
104         struct btree_trans *trans = iter->trans;
105         struct btree *b;
106         struct btree_node_iter  node_iter;
107         struct bkey_packed      *_k;
108         unsigned                nr_iters = 0;
109         int ret;
110
111         ret = bch2_btree_iter_traverse(iter);
112         if (ret)
113                 return ret;
114
115         b = iter->l[0].b;
116         node_iter = iter->l[0].iter;
117
118         BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
119
120         *end = bpos_min(insert->k.p, b->key.k.p);
121
122         ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
123                                      &nr_iters, EXTENT_ITERS_MAX / 2, false);
124         if (ret < 0)
125                 return ret;
126
127         while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
128                 struct bkey     unpacked;
129                 struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
130                 unsigned offset = 0;
131
132                 if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
133                         break;
134
135                 if (bkey_cmp(bkey_start_pos(&insert->k),
136                              bkey_start_pos(k.k)) > 0)
137                         offset = bkey_start_offset(&insert->k) -
138                                 bkey_start_offset(k.k);
139
140                 ret = count_iters_for_insert(trans, k, offset, end,
141                                         &nr_iters, EXTENT_ITERS_MAX, true);
142                 if (ret)
143                         break;
144
145                 bch2_btree_node_iter_advance(&node_iter, b);
146         }
147
148         return ret < 0 ? ret : 0;
149 }
150
151 int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
152 {
153         struct bpos end;
154         int ret;
155
156         ret = bch2_extent_atomic_end(iter, k, &end);
157         if (ret)
158                 return ret;
159
160         bch2_cut_back(end, k);
161         return 0;
162 }
163
164 int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
165 {
166         struct bpos end;
167         int ret;
168
169         ret = bch2_extent_atomic_end(iter, k, &end);
170         if (ret)
171                 return ret;
172
173         return !bkey_cmp(end, k->k.p);
174 }
175
176 enum btree_insert_ret
177 bch2_extent_can_insert(struct btree_trans *trans,
178                        struct btree_iter *iter,
179                        struct bkey_i *insert)
180 {
181         struct btree_iter_level *l = &iter->l[0];
182         struct btree_node_iter node_iter = l->iter;
183         struct bkey_packed *_k;
184         struct bkey_s_c k;
185         struct bkey unpacked;
186         int sectors;
187
188         _k = bch2_btree_node_iter_peek(&node_iter, l->b);
189         if (!_k)
190                 return BTREE_INSERT_OK;
191
192         k = bkey_disassemble(l->b, _k, &unpacked);
193
194         /* Check if we're splitting a compressed extent: */
195
196         if (bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k)) > 0 &&
197             bkey_cmp(insert->k.p, k.k->p) < 0 &&
198             (sectors = bch2_bkey_sectors_compressed(k))) {
199                 int flags = trans->flags & BTREE_INSERT_NOFAIL
200                         ? BCH_DISK_RESERVATION_NOFAIL : 0;
201
202                 switch (bch2_disk_reservation_add(trans->c, trans->disk_res,
203                                                   sectors, flags)) {
204                 case 0:
205                         break;
206                 case -ENOSPC:
207                         return BTREE_INSERT_ENOSPC;
208                 default:
209                         BUG();
210                 }
211         }
212
213         return BTREE_INSERT_OK;
214 }