x(alloc_v3, 24) \
x(set, 25) \
x(lru, 26) \
- x(alloc_v4, 27)
+ x(alloc_v4, 27) \
+ x(backpointer, 28)
enum bch_bkey_type {
#define x(name, nr) KEY_TYPE_##name = nr,
x(stripe, 32) \
x(stripe_redundancy, 8)
+enum {
+#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
+ BCH_ALLOC_FIELDS_V1()
+#undef x
+};
+
struct bch_alloc_v2 {
struct bch_val v;
__u8 nr_fields;
__u8 data[];
} __attribute__((packed, aligned(8)));
+LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
+LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
+
struct bch_alloc_v4 {
struct bch_val v;
__u64 journal_seq;
__u64 io_time[2];
__u32 stripe;
__u32 nr_external_backpointers;
- struct bpos backpointers[0];
} __attribute__((packed, aligned(8)));
-LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
-LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
+#define BCH_ALLOC_V4_U64s_V0 6
+#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(u64))
BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
-enum {
-#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
- BCH_ALLOC_FIELDS_V1()
-#undef x
-};
+#define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX 40
+
+struct bch_backpointer {
+ struct bch_val v;
+ __u8 btree_id;
+ __u8 level;
+ __u8 data_type;
+ __u64 bucket_offset:40;
+ __u32 bucket_len;
+ struct bpos pos;
+} __attribute__((packed, aligned(8)));
/* Quotas: */
x(io_read, 0) \
x(io_write, 1) \
x(io_move, 2) \
- x(bucket_invalidate, 3)
+ x(bucket_invalidate, 3) \
+ x(bucket_discard, 4)
enum bch_persistent_counters {
#define x(t, n, ...) BCH_COUNTER_##t,
x(inode_v2, 18) \
x(freespace, 19) \
x(alloc_v4, 20) \
- x(new_data_types, 21)
+ x(new_data_types, 21) \
+ x(backpointers, 22)
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,