]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/ec.h
Update bcachefs sources to 2115a2ffde bcachefs: Kill bch2_verify_bucket_evacuated()
[bcachefs-tools-debian] / libbcachefs / ec.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EC_H
3 #define _BCACHEFS_EC_H
4
5 #include "ec_types.h"
6 #include "buckets_types.h"
7 #include "extents_types.h"
8
9 int bch2_stripe_invalid(const struct bch_fs *, struct bkey_s_c,
10                         unsigned, struct printbuf *);
11 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
12                          struct bkey_s_c);
13
14 #define bch2_bkey_ops_stripe ((struct bkey_ops) {       \
15         .key_invalid    = bch2_stripe_invalid,          \
16         .val_to_text    = bch2_stripe_to_text,          \
17         .swab           = bch2_ptr_swab,                \
18         .trans_trigger  = bch2_trans_mark_stripe,       \
19         .atomic_trigger = bch2_mark_stripe,             \
20 })
21
22 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
23 {
24         return DIV_ROUND_UP(le16_to_cpu(s->sectors),
25                             1 << s->csum_granularity_bits);
26 }
27
28 static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
29                                           unsigned dev, unsigned csum_idx)
30 {
31         unsigned csum_bytes = bch_crc_bytes[s->csum_type];
32
33         return sizeof(struct bch_stripe) +
34                 sizeof(struct bch_extent_ptr) * s->nr_blocks +
35                 (dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
36 }
37
38 static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s,
39                                                 unsigned idx)
40 {
41         return stripe_csum_offset(s, s->nr_blocks, 0) +
42                 sizeof(u16) * idx;
43 }
44
45 static inline unsigned stripe_blockcount_get(const struct bch_stripe *s,
46                                              unsigned idx)
47 {
48         return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx));
49 }
50
51 static inline void stripe_blockcount_set(struct bch_stripe *s,
52                                          unsigned idx, unsigned v)
53 {
54         __le16 *p = (void *) s + stripe_blockcount_offset(s, idx);
55
56         *p = cpu_to_le16(v);
57 }
58
59 static inline unsigned stripe_val_u64s(const struct bch_stripe *s)
60 {
61         return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks),
62                             sizeof(u64));
63 }
64
65 static inline void *stripe_csum(struct bch_stripe *s,
66                                 unsigned block, unsigned csum_idx)
67 {
68         EBUG_ON(block >= s->nr_blocks);
69         EBUG_ON(csum_idx >= stripe_csums_per_device(s));
70
71         return (void *) s + stripe_csum_offset(s, block, csum_idx);
72 }
73
74 static inline struct bch_csum stripe_csum_get(struct bch_stripe *s,
75                                    unsigned block, unsigned csum_idx)
76 {
77         struct bch_csum csum = { 0 };
78
79         memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]);
80         return csum;
81 }
82
83 static inline void stripe_csum_set(struct bch_stripe *s,
84                                    unsigned block, unsigned csum_idx,
85                                    struct bch_csum csum)
86 {
87         memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
88 }
89
90 static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
91                                              const struct bch_extent_ptr *data_ptr,
92                                              unsigned sectors)
93 {
94         return  data_ptr->dev    == stripe_ptr->dev &&
95                 data_ptr->gen    == stripe_ptr->gen &&
96                 data_ptr->offset >= stripe_ptr->offset &&
97                 data_ptr->offset  < stripe_ptr->offset + sectors;
98 }
99
100 static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
101                                            struct extent_ptr_decoded p)
102 {
103         unsigned nr_data = s->nr_blocks - s->nr_redundant;
104
105         BUG_ON(!p.has_ec);
106
107         if (p.ec.block >= nr_data)
108                 return false;
109
110         return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr,
111                                          le16_to_cpu(s->sectors));
112 }
113
114 static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
115                                              struct extent_ptr_decoded p)
116 {
117         unsigned nr_data = m->nr_blocks - m->nr_redundant;
118
119         BUG_ON(!p.has_ec);
120
121         if (p.ec.block >= nr_data)
122                 return false;
123
124         return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr,
125                                          m->sectors);
126 }
127
128 struct bch_read_bio;
129
130 struct ec_stripe_buf {
131         /* might not be buffering the entire stripe: */
132         unsigned                offset;
133         unsigned                size;
134         unsigned long           valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
135
136         void                    *data[BCH_BKEY_PTRS_MAX];
137
138         union {
139                 struct bkey_i_stripe    key;
140                 u64                     pad[255];
141         };
142 };
143
144 struct ec_stripe_head;
145
146 enum ec_stripe_ref {
147         STRIPE_REF_io,
148         STRIPE_REF_stripe,
149         STRIPE_REF_NR
150 };
151
152 struct ec_stripe_new {
153         struct bch_fs           *c;
154         struct ec_stripe_head   *h;
155         struct mutex            lock;
156         struct list_head        list;
157
158         struct hlist_node       hash;
159         u64                     idx;
160
161         struct closure          iodone;
162
163         atomic_t                ref[STRIPE_REF_NR];
164
165         int                     err;
166
167         u8                      nr_data;
168         u8                      nr_parity;
169         bool                    allocated;
170         bool                    pending;
171         bool                    have_existing_stripe;
172
173         unsigned long           blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
174         unsigned long           blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
175         open_bucket_idx_t       blocks[BCH_BKEY_PTRS_MAX];
176         struct disk_reservation res;
177
178         struct ec_stripe_buf    new_stripe;
179         struct ec_stripe_buf    existing_stripe;
180 };
181
182 struct ec_stripe_head {
183         struct list_head        list;
184         struct mutex            lock;
185
186         unsigned                target;
187         unsigned                algo;
188         unsigned                redundancy;
189         enum alloc_reserve      reserve;
190
191         struct bch_devs_mask    devs;
192         unsigned                nr_active_devs;
193
194         unsigned                blocksize;
195
196         struct dev_stripe_state block_stripe;
197         struct dev_stripe_state parity_stripe;
198
199         struct ec_stripe_new    *s;
200 };
201
202 int bch2_ec_read_extent(struct bch_fs *, struct bch_read_bio *);
203
204 void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
205
206 void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
207
208 int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
209
210 void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
211 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
212                         unsigned, unsigned, unsigned,
213                         enum alloc_reserve, struct closure *);
214
215 void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
216 void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
217 void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t);
218
219 void bch2_do_stripe_deletes(struct bch_fs *);
220 void bch2_ec_do_stripe_creates(struct bch_fs *);
221 void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
222
223 static inline void ec_stripe_new_get(struct ec_stripe_new *s,
224                                      enum ec_stripe_ref ref)
225 {
226         atomic_inc(&s->ref[ref]);
227 }
228
229 static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
230                                      enum ec_stripe_ref ref)
231 {
232         BUG_ON(atomic_read(&s->ref[ref]) <= 0);
233
234         if (atomic_dec_and_test(&s->ref[ref]))
235                 switch (ref) {
236                 case STRIPE_REF_stripe:
237                         bch2_ec_stripe_new_free(c, s);
238                         break;
239                 case STRIPE_REF_io:
240                         bch2_ec_do_stripe_creates(c);
241                         break;
242                 default:
243                         unreachable();
244                 }
245 }
246
247 void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
248 void bch2_fs_ec_stop(struct bch_fs *);
249 void bch2_fs_ec_flush(struct bch_fs *);
250
251 int bch2_stripes_read(struct bch_fs *);
252
253 void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
254 void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
255
256 void bch2_fs_ec_exit(struct bch_fs *);
257 void bch2_fs_ec_init_early(struct bch_fs *);
258 int bch2_fs_ec_init(struct bch_fs *);
259
260 #endif /* _BCACHEFS_EC_H */