]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/ec.h
Update bcachefs sources to 4b5917839c bcachefs: Fix a null ptr deref in check_xattr()
[bcachefs-tools-debian] / libbcachefs / ec.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EC_H
3 #define _BCACHEFS_EC_H
4
5 #include "ec_types.h"
6 #include "buckets_types.h"
7 #include "extents_types.h"
8
9 enum bkey_invalid_flags;
10
11 int bch2_stripe_invalid(const struct bch_fs *, struct bkey_s_c,
12                         enum bkey_invalid_flags, struct printbuf *);
13 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
14                          struct bkey_s_c);
15
16 #define bch2_bkey_ops_stripe ((struct bkey_ops) {       \
17         .key_invalid    = bch2_stripe_invalid,          \
18         .val_to_text    = bch2_stripe_to_text,          \
19         .swab           = bch2_ptr_swab,                \
20         .trans_trigger  = bch2_trans_mark_stripe,       \
21         .atomic_trigger = bch2_mark_stripe,             \
22         .min_val_size   = 8,                            \
23 })
24
25 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
26 {
27         return DIV_ROUND_UP(le16_to_cpu(s->sectors),
28                             1 << s->csum_granularity_bits);
29 }
30
31 static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
32                                           unsigned dev, unsigned csum_idx)
33 {
34         unsigned csum_bytes = bch_crc_bytes[s->csum_type];
35
36         return sizeof(struct bch_stripe) +
37                 sizeof(struct bch_extent_ptr) * s->nr_blocks +
38                 (dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
39 }
40
41 static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s,
42                                                 unsigned idx)
43 {
44         return stripe_csum_offset(s, s->nr_blocks, 0) +
45                 sizeof(u16) * idx;
46 }
47
48 static inline unsigned stripe_blockcount_get(const struct bch_stripe *s,
49                                              unsigned idx)
50 {
51         return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx));
52 }
53
54 static inline void stripe_blockcount_set(struct bch_stripe *s,
55                                          unsigned idx, unsigned v)
56 {
57         __le16 *p = (void *) s + stripe_blockcount_offset(s, idx);
58
59         *p = cpu_to_le16(v);
60 }
61
62 static inline unsigned stripe_val_u64s(const struct bch_stripe *s)
63 {
64         return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks),
65                             sizeof(u64));
66 }
67
68 static inline void *stripe_csum(struct bch_stripe *s,
69                                 unsigned block, unsigned csum_idx)
70 {
71         EBUG_ON(block >= s->nr_blocks);
72         EBUG_ON(csum_idx >= stripe_csums_per_device(s));
73
74         return (void *) s + stripe_csum_offset(s, block, csum_idx);
75 }
76
77 static inline struct bch_csum stripe_csum_get(struct bch_stripe *s,
78                                    unsigned block, unsigned csum_idx)
79 {
80         struct bch_csum csum = { 0 };
81
82         memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]);
83         return csum;
84 }
85
86 static inline void stripe_csum_set(struct bch_stripe *s,
87                                    unsigned block, unsigned csum_idx,
88                                    struct bch_csum csum)
89 {
90         memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
91 }
92
93 static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
94                                              const struct bch_extent_ptr *data_ptr,
95                                              unsigned sectors)
96 {
97         return  data_ptr->dev    == stripe_ptr->dev &&
98                 data_ptr->gen    == stripe_ptr->gen &&
99                 data_ptr->offset >= stripe_ptr->offset &&
100                 data_ptr->offset  < stripe_ptr->offset + sectors;
101 }
102
103 static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
104                                            struct extent_ptr_decoded p)
105 {
106         unsigned nr_data = s->nr_blocks - s->nr_redundant;
107
108         BUG_ON(!p.has_ec);
109
110         if (p.ec.block >= nr_data)
111                 return false;
112
113         return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr,
114                                          le16_to_cpu(s->sectors));
115 }
116
117 static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
118                                              struct extent_ptr_decoded p)
119 {
120         unsigned nr_data = m->nr_blocks - m->nr_redundant;
121
122         BUG_ON(!p.has_ec);
123
124         if (p.ec.block >= nr_data)
125                 return false;
126
127         return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr,
128                                          m->sectors);
129 }
130
131 struct bch_read_bio;
132
133 struct ec_stripe_buf {
134         /* might not be buffering the entire stripe: */
135         unsigned                offset;
136         unsigned                size;
137         unsigned long           valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
138
139         void                    *data[BCH_BKEY_PTRS_MAX];
140
141         union {
142                 struct bkey_i_stripe    key;
143                 u64                     pad[255];
144         };
145 };
146
147 struct ec_stripe_head;
148
149 enum ec_stripe_ref {
150         STRIPE_REF_io,
151         STRIPE_REF_stripe,
152         STRIPE_REF_NR
153 };
154
155 struct ec_stripe_new {
156         struct bch_fs           *c;
157         struct ec_stripe_head   *h;
158         struct mutex            lock;
159         struct list_head        list;
160
161         struct hlist_node       hash;
162         u64                     idx;
163
164         struct closure          iodone;
165
166         atomic_t                ref[STRIPE_REF_NR];
167
168         int                     err;
169
170         u8                      nr_data;
171         u8                      nr_parity;
172         bool                    allocated;
173         bool                    pending;
174         bool                    have_existing_stripe;
175
176         unsigned long           blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
177         unsigned long           blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
178         open_bucket_idx_t       blocks[BCH_BKEY_PTRS_MAX];
179         struct disk_reservation res;
180
181         struct ec_stripe_buf    new_stripe;
182         struct ec_stripe_buf    existing_stripe;
183 };
184
185 struct ec_stripe_head {
186         struct list_head        list;
187         struct mutex            lock;
188
189         unsigned                target;
190         unsigned                algo;
191         unsigned                redundancy;
192         enum bch_watermark      watermark;
193
194         struct bch_devs_mask    devs;
195         unsigned                nr_active_devs;
196
197         unsigned                blocksize;
198
199         struct dev_stripe_state block_stripe;
200         struct dev_stripe_state parity_stripe;
201
202         struct ec_stripe_new    *s;
203 };
204
205 int bch2_ec_read_extent(struct bch_fs *, struct bch_read_bio *);
206
207 void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
208
209 void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
210
211 int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
212
213 void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
214 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
215                         unsigned, unsigned, unsigned,
216                         enum bch_watermark, struct closure *);
217
218 void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
219 void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
220 void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t);
221
222 void bch2_do_stripe_deletes(struct bch_fs *);
223 void bch2_ec_do_stripe_creates(struct bch_fs *);
224 void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
225
226 static inline void ec_stripe_new_get(struct ec_stripe_new *s,
227                                      enum ec_stripe_ref ref)
228 {
229         atomic_inc(&s->ref[ref]);
230 }
231
232 static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
233                                      enum ec_stripe_ref ref)
234 {
235         BUG_ON(atomic_read(&s->ref[ref]) <= 0);
236
237         if (atomic_dec_and_test(&s->ref[ref]))
238                 switch (ref) {
239                 case STRIPE_REF_stripe:
240                         bch2_ec_stripe_new_free(c, s);
241                         break;
242                 case STRIPE_REF_io:
243                         bch2_ec_do_stripe_creates(c);
244                         break;
245                 default:
246                         unreachable();
247                 }
248 }
249
250 void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
251 void bch2_fs_ec_stop(struct bch_fs *);
252 void bch2_fs_ec_flush(struct bch_fs *);
253
254 int bch2_stripes_read(struct bch_fs *);
255
256 void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
257 void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
258
259 void bch2_fs_ec_exit(struct bch_fs *);
260 void bch2_fs_ec_init_early(struct bch_fs *);
261 int bch2_fs_ec_init(struct bch_fs *);
262
263 #endif /* _BCACHEFS_EC_H */