1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
3 #define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
5 #include "btree_iter.h"
6 #include "btree_update.h"
10 static inline u64 swab40(u64 x)
12 return (((x & 0x00000000ffULL) << 32)|
13 ((x & 0x000000ff00ULL) << 16)|
14 ((x & 0x0000ff0000ULL) >> 0)|
15 ((x & 0x00ff000000ULL) >> 16)|
16 ((x & 0xff00000000ULL) >> 32));
19 int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k,
20 enum bkey_invalid_flags, struct printbuf *);
21 void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
22 void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
23 void bch2_backpointer_swab(struct bkey_s);
25 #define bch2_bkey_ops_backpointer ((struct bkey_ops) { \
26 .key_invalid = bch2_backpointer_invalid, \
27 .val_to_text = bch2_backpointer_k_to_text, \
28 .swab = bch2_backpointer_swab, \
32 #define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10
35 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
38 static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
41 struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
42 u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
44 return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
48 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
50 static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
54 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
57 ret = POS(bucket.inode,
58 (bucket_to_sector(ca, bucket.offset) <<
59 MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
61 EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
66 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bpos bucket,
67 struct bch_backpointer, struct bkey_s_c, bool);
69 static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
71 struct bch_backpointer bp,
72 struct bkey_s_c orig_k,
75 if (unlikely(bch2_backpointers_no_use_write_buffer))
76 return bch2_bucket_backpointer_mod_nowritebuffer(trans, bucket, bp, orig_k, insert);
78 struct bkey_i_backpointer bp_k;
80 bkey_backpointer_init(&bp_k.k_i);
81 bp_k.k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
85 bp_k.k.type = KEY_TYPE_deleted;
86 set_bkey_val_u64s(&bp_k.k, 0);
89 return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
92 static inline enum bch_data_type bkey_ptr_data_type(enum btree_id btree_id, unsigned level,
93 struct bkey_s_c k, struct extent_ptr_decoded p)
95 return level ? BCH_DATA_btree :
96 p.has_ec ? BCH_DATA_stripe :
100 static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
101 enum btree_id btree_id, unsigned level,
102 struct bkey_s_c k, struct extent_ptr_decoded p,
103 struct bpos *bucket_pos, struct bch_backpointer *bp)
105 enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
106 s64 sectors = level ? btree_sectors(c) : k.k->size;
109 *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
110 *bp = (struct bch_backpointer) {
111 .btree_id = btree_id,
113 .data_type = data_type,
114 .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
116 .bucket_len = ptr_disk_sectors(sectors, p),
121 int bch2_get_next_backpointer(struct btree_trans *, struct bpos, int,
122 struct bpos *, struct bch_backpointer *, unsigned);
123 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
124 struct bpos, struct bch_backpointer,
126 struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *,
127 struct bpos, struct bch_backpointer);
129 int bch2_check_btree_backpointers(struct bch_fs *);
130 int bch2_check_extents_to_backpointers(struct bch_fs *);
131 int bch2_check_backpointers_to_extents(struct bch_fs *);
133 #endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */