1 #ifndef _BCACHE_EXTENTS_H
2 #define _BCACHE_EXTENTS_H
6 #include <linux/bcache.h>
8 struct bch_replace_info;
12 struct btree_insert_entry;
14 struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *,
16 struct btree_node_iter *);
17 struct btree_nr_keys bch_extent_sort_fix_overlapping(struct cache_set *c,
20 struct btree_node_iter *);
22 extern const struct bkey_ops bch_bkey_btree_ops;
23 extern const struct bkey_ops bch_bkey_extent_ops;
28 struct extent_pick_ptr {
29 struct bch_extent_crc64 crc;
30 struct bch_extent_ptr ptr;
34 struct extent_pick_ptr
35 bch_btree_pick_ptr(struct cache_set *, const struct btree *);
37 void bch_extent_pick_ptr_avoiding(struct cache_set *, struct bkey_s_c,
38 struct cache *, struct extent_pick_ptr *);
41 bch_extent_pick_ptr(struct cache_set *c, struct bkey_s_c k,
42 struct extent_pick_ptr *ret)
44 bch_extent_pick_ptr_avoiding(c, k, NULL, ret);
47 enum extent_insert_hook_ret
48 bch_extent_cmpxchg(struct extent_insert_hook *, struct bpos, struct bpos,
49 struct bkey_s_c, const struct bkey_i *);
52 bch_insert_fixup_extent(struct btree_insert *,
53 struct btree_insert_entry *);
55 bool bch_extent_normalize(struct cache_set *, struct bkey_s);
57 unsigned bch_extent_nr_ptrs_from(struct bkey_s_c_extent,
58 const struct bch_extent_ptr *);
59 unsigned bch_extent_nr_ptrs(struct bkey_s_c_extent);
61 static inline bool bkey_extent_is_data(const struct bkey *k)
65 case BCH_EXTENT_CACHED:
72 static inline bool bkey_extent_is_allocation(const struct bkey *k)
76 case BCH_EXTENT_CACHED:
84 static inline bool bkey_extent_is_cached(const struct bkey *k)
86 return k->type == BCH_EXTENT_CACHED;
89 static inline void bkey_extent_set_cached(struct bkey *k, bool cached)
91 EBUG_ON(k->type != BCH_EXTENT &&
92 k->type != BCH_EXTENT_CACHED);
94 k->type = cached ? BCH_EXTENT_CACHED : BCH_EXTENT;
97 static inline unsigned
98 __extent_entry_type(const union bch_extent_entry *e)
100 return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
103 static inline enum bch_extent_entry_type
104 extent_entry_type(const union bch_extent_entry *e)
106 int ret = __ffs(e->type);
108 EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
113 static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
115 switch (extent_entry_type(entry)) {
116 case BCH_EXTENT_ENTRY_crc32:
117 return sizeof(struct bch_extent_crc32);
118 case BCH_EXTENT_ENTRY_crc64:
119 return sizeof(struct bch_extent_crc64);
120 case BCH_EXTENT_ENTRY_ptr:
121 return sizeof(struct bch_extent_ptr);
127 static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
129 return extent_entry_bytes(entry) / sizeof(u64);
132 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
134 return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
137 static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
139 return !extent_entry_is_ptr(e);
142 union bch_extent_crc {
144 struct bch_extent_crc32 crc32;
145 struct bch_extent_crc64 crc64;
148 /* downcast, preserves const */
149 #define to_entry(_entry) \
151 BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) && \
152 !type_is(_entry, struct bch_extent_ptr *)); \
154 __builtin_choose_expr( \
155 (type_is_exact(_entry, const union bch_extent_crc *) || \
156 type_is_exact(_entry, const struct bch_extent_ptr *)), \
157 (const union bch_extent_entry *) (_entry), \
158 (union bch_extent_entry *) (_entry)); \
161 #define __entry_to_crc(_entry) \
162 __builtin_choose_expr( \
163 type_is_exact(_entry, const union bch_extent_entry *), \
164 (const union bch_extent_crc *) (_entry), \
165 (union bch_extent_crc *) (_entry))
167 #define entry_to_crc(_entry) \
169 EBUG_ON((_entry) && !extent_entry_is_crc(_entry)); \
171 __entry_to_crc(_entry); \
174 #define entry_to_ptr(_entry) \
176 EBUG_ON((_entry) && !extent_entry_is_ptr(_entry)); \
178 __builtin_choose_expr( \
179 type_is_exact(_entry, const union bch_extent_entry *), \
180 (const struct bch_extent_ptr *) (_entry), \
181 (struct bch_extent_ptr *) (_entry)); \
184 enum bch_extent_crc_type {
190 static inline enum bch_extent_crc_type
191 extent_crc_type(const union bch_extent_crc *crc)
194 return BCH_EXTENT_CRC_NONE;
196 switch (extent_entry_type(to_entry(crc))) {
197 case BCH_EXTENT_ENTRY_crc32:
198 return BCH_EXTENT_CRC32;
199 case BCH_EXTENT_ENTRY_crc64:
200 return BCH_EXTENT_CRC64;
206 #define extent_entry_next(_entry) \
207 ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
209 #define extent_entry_last(_e) \
210 bkey_idx((_e).v, bkey_val_u64s((_e).k))
212 /* Iterate over all entries: */
214 #define extent_for_each_entry_from(_e, _entry, _start) \
215 for ((_entry) = _start; \
216 (_entry) < extent_entry_last(_e); \
217 (_entry) = extent_entry_next(_entry))
219 #define extent_for_each_entry(_e, _entry) \
220 extent_for_each_entry_from(_e, _entry, (_e).v->start)
222 /* Iterate over crcs only: */
224 #define extent_crc_next(_e, _p) \
226 typeof(&(_e).v->start[0]) _entry = _p; \
228 while ((_entry) < extent_entry_last(_e) && \
229 !extent_entry_is_crc(_entry)) \
230 (_entry) = extent_entry_next(_entry); \
232 entry_to_crc(_entry < extent_entry_last(_e) ? _entry : NULL); \
235 #define extent_for_each_crc(_e, _crc) \
236 for ((_crc) = extent_crc_next(_e, (_e).v->start); \
238 (_crc) = extent_crc_next(_e, extent_entry_next(to_entry(_crc))))
240 /* Iterate over pointers, with crcs: */
242 #define extent_ptr_crc_next_filter(_e, _crc, _ptr, _filter) \
245 typeof(&(_e).v->start[0]) _entry; \
247 extent_for_each_entry_from(_e, _entry, to_entry(_ptr)) \
248 if (extent_entry_is_crc(_entry)) { \
249 (_crc) = entry_to_crc(_entry); \
251 _ptr = entry_to_ptr(_entry); \
261 #define extent_for_each_ptr_crc_filter(_e, _ptr, _crc, _filter) \
262 for ((_crc) = NULL, \
263 (_ptr) = &(_e).v->start->ptr; \
264 ((_ptr) = extent_ptr_crc_next_filter(_e, _crc, _ptr, _filter));\
267 #define extent_for_each_ptr_crc(_e, _ptr, _crc) \
268 extent_for_each_ptr_crc_filter(_e, _ptr, _crc, true)
270 #define extent_for_each_online_device_crc(_c, _e, _crc, _ptr, _ca) \
271 extent_for_each_ptr_crc_filter(_e, _ptr, _crc, \
272 ((_ca) = PTR_CACHE(_c, _ptr)))
274 /* Iterate over pointers only, and from a given position: */
276 #define extent_ptr_next_filter(_e, _ptr, _filter) \
278 typeof(__entry_to_crc(&(_e).v->start[0])) _crc; \
280 extent_ptr_crc_next_filter(_e, _crc, _ptr, _filter); \
283 #define extent_ptr_next(_e, _ptr) \
284 extent_ptr_next_filter(_e, _ptr, true)
286 #define extent_for_each_ptr_from_filter(_e, _ptr, _start, _filter) \
287 for ((_ptr) = (_start); \
288 ((_ptr) = extent_ptr_next_filter(_e, _ptr, _filter)); \
291 #define extent_for_each_ptr_from(_e, _ptr, _start) \
292 extent_for_each_ptr_from_filter(_e, _ptr, _start, true)
294 #define extent_for_each_ptr(_e, _ptr) \
295 extent_for_each_ptr_from_filter(_e, _ptr, &(_e).v->start->ptr, true)
297 #define extent_for_each_online_device(_c, _e, _ptr, _ca) \
298 extent_for_each_ptr_from_filter(_e, _ptr, &(_e).v->start->ptr, \
299 ((_ca) = PTR_CACHE(_c, _ptr)))
301 #define extent_ptr_prev(_e, _ptr) \
303 typeof(&(_e).v->start->ptr) _p; \
304 typeof(&(_e).v->start->ptr) _prev = NULL; \
306 extent_for_each_ptr(_e, _p) { \
316 * Use this when you'll be dropping pointers as you iterate. Quadratic,
319 #define extent_for_each_ptr_backwards(_e, _ptr) \
320 for ((_ptr) = extent_ptr_prev(_e, NULL); \
322 (_ptr) = extent_ptr_prev(_e, _ptr))
324 void bch_extent_entry_append(struct bkey_i_extent *, union bch_extent_entry *);
325 void bch_extent_crc_append(struct bkey_i_extent *, unsigned, unsigned,
326 unsigned, u64, unsigned);
328 static inline void extent_ptr_append(struct bkey_i_extent *e,
329 struct bch_extent_ptr ptr)
331 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
332 bch_extent_entry_append(e, to_entry(&ptr));
335 /* XXX: inefficient */
336 static inline bool bch_extent_ptr_is_dirty(const struct cache_set *c,
337 struct bkey_s_c_extent e,
338 const struct bch_extent_ptr *ptr)
340 if (bkey_extent_is_cached(e.k))
343 /* Dirty pointers come last */
344 return bch_extent_nr_ptrs_from(e, ptr) <= c->opts.data_replicas;
347 extern const unsigned bch_crc_size[];
349 static inline struct bch_extent_crc64 crc_to_64(const struct bkey *k,
350 const union bch_extent_crc *crc)
352 switch (extent_crc_type(crc)) {
353 case BCH_EXTENT_CRC_NONE:
354 return (struct bch_extent_crc64) {
355 .compressed_size = k->size,
356 .uncompressed_size = k->size,
358 case BCH_EXTENT_CRC32:
359 return (struct bch_extent_crc64) {
360 .compressed_size = crc->crc32.compressed_size,
361 .uncompressed_size = crc->crc32.uncompressed_size,
362 .offset = crc->crc32.offset,
363 .csum_type = crc->crc32.csum_type,
364 .compression_type = crc->crc32.compression_type,
365 .csum = crc->crc32.csum,
367 case BCH_EXTENT_CRC64:
374 static inline unsigned crc_compressed_size(const struct bkey *k,
375 const union bch_extent_crc *crc)
377 return crc_to_64(k, crc).compressed_size;
380 static inline unsigned crc_uncompressed_size(const struct bkey *k,
381 const union bch_extent_crc *crc)
383 return crc_to_64(k, crc).uncompressed_size;
386 static inline unsigned crc_offset(const union bch_extent_crc *crc)
388 switch (extent_crc_type(crc)) {
389 case BCH_EXTENT_CRC_NONE:
391 case BCH_EXTENT_CRC32:
392 return crc->crc32.offset;
393 case BCH_EXTENT_CRC64:
394 return crc->crc64.offset;
400 static inline unsigned crc_csum_type(const union bch_extent_crc *crc)
402 switch (extent_crc_type(crc)) {
403 case BCH_EXTENT_CRC_NONE:
405 case BCH_EXTENT_CRC32:
406 return crc->crc32.csum_type;
407 case BCH_EXTENT_CRC64:
408 return crc->crc64.csum_type;
414 static inline unsigned crc_compression_type(const union bch_extent_crc *crc)
416 switch (extent_crc_type(crc)) {
417 case BCH_EXTENT_CRC_NONE:
419 case BCH_EXTENT_CRC32:
420 return crc->crc32.compression_type;
421 case BCH_EXTENT_CRC64:
422 return crc->crc64.compression_type;
428 static inline u64 crc_csum(const union bch_extent_crc *crc)
430 switch (extent_crc_type(crc)) {
431 case BCH_EXTENT_CRC_NONE:
433 case BCH_EXTENT_CRC32:
434 return crc->crc32.csum;
435 case BCH_EXTENT_CRC64:
436 return crc->crc64.csum;
442 static inline unsigned bkey_extent_is_compressed(struct cache_set *c,
445 struct bkey_s_c_extent e;
446 const struct bch_extent_ptr *ptr;
447 const union bch_extent_crc *crc;
452 case BCH_EXTENT_CACHED:
453 e = bkey_s_c_to_extent(k);
455 extent_for_each_ptr_crc(e, ptr, crc)
456 if (bch_extent_ptr_is_dirty(c, e, ptr) &&
457 crc_compression_type(crc) != BCH_COMPRESSION_NONE &&
458 crc_compressed_size(e.k, crc) < k.k->size)
459 ret = max_t(unsigned, ret,
460 crc_compressed_size(e.k, crc));
466 void bch_extent_narrow_crcs(struct bkey_s_extent);
467 void bch_extent_drop_redundant_crcs(struct bkey_s_extent);
469 /* Doesn't cleanup redundant crcs */
470 static inline void __bch_extent_drop_ptr(struct bkey_s_extent e,
471 struct bch_extent_ptr *ptr)
473 EBUG_ON(ptr < &e.v->start->ptr ||
474 ptr >= &extent_entry_last(e)->ptr);
475 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
476 memmove_u64s_down(ptr, ptr + 1,
477 (u64 *) extent_entry_last(e) - (u64 *) (ptr + 1));
478 e.k->u64s -= sizeof(*ptr) / sizeof(u64);
481 static inline void bch_extent_drop_ptr(struct bkey_s_extent e,
482 struct bch_extent_ptr *ptr)
484 __bch_extent_drop_ptr(e, ptr);
485 bch_extent_drop_redundant_crcs(e);
488 bool bch_extent_has_device(struct bkey_s_c_extent, unsigned);
490 bool bch_cut_front(struct bpos, struct bkey_i *);
491 bool bch_cut_back(struct bpos, struct bkey *);
492 void bch_key_resize(struct bkey *, unsigned);
494 #endif /* _BCACHE_EXTENTS_H */