X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbcachefs_format.h;h=8312018e1ed5a5199709446dde6375ec7bef8455;hb=790ca9522a96efe321aae36fb0d7f4e437110b0f;hp=a78988e3ded7347df61c3d13c074438339d15e40;hpb=8bcd38555cb224b7da722bcdeca5c2d15f3ef284;p=bcachefs-tools-debian diff --git a/libbcachefs/bcachefs_format.h b/libbcachefs/bcachefs_format.h index a78988e..8312018 100644 --- a/libbcachefs/bcachefs_format.h +++ b/libbcachefs/bcachefs_format.h @@ -76,6 +76,22 @@ #include #include #include +#include "vstructs.h" + +#define BITMASK(name, type, field, offset, end) \ +static const unsigned name##_OFFSET = offset; \ +static const unsigned name##_BITS = (end - offset); \ + \ +static inline __u64 name(const type *k) \ +{ \ + return (k->field >> offset) & ~(~0ULL << (end - offset)); \ +} \ + \ +static inline void SET_##name(type *k, __u64 v) \ +{ \ + k->field &= ~(~(~0ULL << (end - offset)) << offset); \ + k->field |= (v & ~(~0ULL << (end - offset))) << offset; \ +} #define LE_BITMASK(_bits, name, type, field, offset, end) \ static const unsigned name##_OFFSET = offset; \ @@ -138,19 +154,19 @@ struct bpos { #define KEY_SNAPSHOT_MAX ((__u32)~0U) #define KEY_SIZE_MAX ((__u32)~0U) -static inline struct bpos POS(__u64 inode, __u64 offset) +static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot) { - struct bpos ret; - - ret.inode = inode; - ret.offset = offset; - ret.snapshot = 0; - - return ret; + return (struct bpos) { + .inode = inode, + .offset = offset, + .snapshot = snapshot, + }; } -#define POS_MIN POS(0, 0) -#define POS_MAX POS(KEY_INODE_MAX, KEY_OFFSET_MAX) +#define POS_MIN SPOS(0, 0, 0) +#define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0) +#define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX) +#define POS(_inode, _offset) SPOS(_inode, _offset, 0) /* Empty placeholder struct, for container_of() */ struct bch_val { @@ -323,10 +339,10 @@ static inline void bkey_init(struct bkey *k) */ #define BCH_BKEY_TYPES() \ x(deleted, 0) \ - x(discard, 1) \ + x(whiteout, 1) \ x(error, 2) \ x(cookie, 3) \ - x(whiteout, 4) \ + x(hash_whiteout, 4) \ x(btree_ptr, 5) \ x(extent, 6) \ x(reservation, 7) \ @@ -340,7 +356,16 @@ static inline void bkey_init(struct bkey *k) x(reflink_p, 15) \ x(reflink_v, 16) \ x(inline_data, 17) \ - x(btree_ptr_v2, 18) + x(btree_ptr_v2, 18) \ + x(indirect_inline_data, 19) \ + x(alloc_v2, 20) \ + x(subvolume, 21) \ + x(snapshot, 22) \ + x(inode_v2, 23) \ + x(alloc_v3, 24) \ + x(set, 25) \ + x(lru, 26) \ + x(alloc_v4, 27) enum bch_bkey_type { #define x(name, nr) KEY_TYPE_##name = nr, @@ -349,11 +374,31 @@ enum bch_bkey_type { KEY_TYPE_MAX, }; +struct bch_deleted { + struct bch_val v; +}; + +struct bch_whiteout { + struct bch_val v; +}; + +struct bch_error { + struct bch_val v; +}; + struct bch_cookie { struct bch_val v; __le64 cookie; }; +struct bch_hash_whiteout { + struct bch_val v; +}; + +struct bch_set { + struct bch_val v; +}; + /* Extents */ /* @@ -550,9 +595,11 @@ struct bch_extent_stripe_ptr { #if defined(__LITTLE_ENDIAN_BITFIELD) __u64 type:5, block:8, - idx:51; + redundancy:4, + idx:47; #elif defined (__BIG_ENDIAN_BITFIELD) - __u64 idx:51, + __u64 idx:47, + redundancy:4, block:8, type:5; #endif @@ -602,13 +649,14 @@ struct bch_btree_ptr_v2 { __u64 mem_ptr; __le64 seq; __le16 sectors_written; - /* In case we ever decide to do variable size btree nodes: */ - __le16 sectors; + __le16 flags; struct bpos min_key; struct bch_extent_ptr start[0]; __u64 _data[0]; } __attribute__((packed, aligned(8))); +LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1); + struct bch_extent { struct bch_val v; @@ -633,8 +681,6 @@ struct bch_reservation { #define BKEY_EXTENT_VAL_U64s_MAX \ (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1)) -#define BKEY_PADDED(key) __BKEY_PADDED(key, BKEY_EXTENT_VAL_U64s_MAX) - /* * Maximum possible size of an entire extent, key + value: */ #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX) @@ -660,6 +706,16 @@ struct bch_inode { __u8 fields[0]; } __attribute__((packed, aligned(8))); +struct bch_inode_v2 { + struct bch_val v; + + __le64 bi_journal_seq; + __le64 bi_hash_seed; + __le64 bi_flags; + __le16 bi_mode; + __u8 fields[0]; +} __attribute__((packed, aligned(8))); + struct bch_inode_generation { struct bch_val v; @@ -667,11 +723,15 @@ struct bch_inode_generation { __le32 pad; } __attribute__((packed, aligned(8))); +/* + * bi_subvol and bi_parent_subvol are only set for subvolume roots: + */ + #define BCH_INODE_FIELDS() \ - x(bi_atime, 64) \ - x(bi_ctime, 64) \ - x(bi_mtime, 64) \ - x(bi_otime, 64) \ + x(bi_atime, 96) \ + x(bi_ctime, 96) \ + x(bi_mtime, 96) \ + x(bi_otime, 96) \ x(bi_size, 64) \ x(bi_sectors, 64) \ x(bi_uid, 32) \ @@ -688,7 +748,11 @@ struct bch_inode_generation { x(bi_foreground_target, 16) \ x(bi_background_target, 16) \ x(bi_erasure_code, 16) \ - x(bi_fields_set, 16) + x(bi_fields_set, 16) \ + x(bi_dir, 64) \ + x(bi_dir_offset, 64) \ + x(bi_subvol, 32) \ + x(bi_parent_subvol, 32) /* subset of BCH_INODE_FIELDS */ #define BCH_INODE_OPTS() \ @@ -724,6 +788,7 @@ enum { __BCH_INODE_I_SIZE_DIRTY= 5, __BCH_INODE_I_SECTORS_DIRTY= 6, __BCH_INODE_UNLINKED = 7, + __BCH_INODE_BACKPTR_UNTRUSTED = 8, /* bits 20+ reserved for packed fields below: */ }; @@ -736,9 +801,14 @@ enum { #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY) #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY) #define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED) +#define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED) LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24); -LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 32); +LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31); +LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32); + +LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24); +LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31); /* Dirents */ @@ -757,7 +827,13 @@ struct bch_dirent { struct bch_val v; /* Target inode number: */ + union { __le64 d_inum; + struct { /* DT_SUBVOL */ + __le32 d_child_subvol; + __le32 d_parent_subvol; + }; + }; /* * Copy of mode bits 12-15 from the target inode - so userspace can get @@ -768,6 +844,9 @@ struct bch_dirent { __u8 d_name[]; } __attribute__((packed, aligned(8))); +#define DT_SUBVOL 16 +#define BCH_DT_MAX 17 + #define BCH_NAME_MAX (U8_MAX * sizeof(u64) - \ sizeof(struct bkey) - \ offsetof(struct bch_dirent, d_name)) @@ -798,34 +877,73 @@ struct bch_alloc { __u8 data[]; } __attribute__((packed, aligned(8))); -#define BCH_ALLOC_FIELDS() \ +#define BCH_ALLOC_FIELDS_V1() \ x(read_time, 16) \ x(write_time, 16) \ x(data_type, 8) \ x(dirty_sectors, 16) \ x(cached_sectors, 16) \ - x(oldest_gen, 8) + x(oldest_gen, 8) \ + x(stripe, 32) \ + x(stripe_redundancy, 8) -enum { -#define x(name, bytes) BCH_ALLOC_FIELD_##name, - BCH_ALLOC_FIELDS() -#undef x - BCH_ALLOC_FIELD_NR -}; +struct bch_alloc_v2 { + struct bch_val v; + __u8 nr_fields; + __u8 gen; + __u8 oldest_gen; + __u8 data_type; + __u8 data[]; +} __attribute__((packed, aligned(8))); -static const unsigned BCH_ALLOC_FIELD_BYTES[] = { -#define x(name, bits) [BCH_ALLOC_FIELD_##name] = bits / 8, - BCH_ALLOC_FIELDS() -#undef x -}; +#define BCH_ALLOC_FIELDS_V2() \ + x(read_time, 64) \ + x(write_time, 64) \ + x(dirty_sectors, 32) \ + x(cached_sectors, 32) \ + x(stripe, 32) \ + x(stripe_redundancy, 8) -#define x(name, bits) + (bits / 8) -static const unsigned BKEY_ALLOC_VAL_U64s_MAX = - DIV_ROUND_UP(offsetof(struct bch_alloc, data) - BCH_ALLOC_FIELDS(), sizeof(u64)); -#undef x +struct bch_alloc_v3 { + struct bch_val v; + __le64 journal_seq; + __le32 flags; + __u8 nr_fields; + __u8 gen; + __u8 oldest_gen; + __u8 data_type; + __u8 data[]; +} __attribute__((packed, aligned(8))); + +struct bch_alloc_v4 { + struct bch_val v; + __u64 journal_seq; + __u32 flags; + __u8 gen; + __u8 oldest_gen; + __u8 data_type; + __u8 stripe_redundancy; + __u32 dirty_sectors; + __u32 cached_sectors; + __u64 io_time[2]; + __u32 stripe; + __u32 nr_external_backpointers; + struct bpos backpointers[0]; +} __attribute__((packed, aligned(8))); -#define BKEY_ALLOC_U64s_MAX (BKEY_U64s + BKEY_ALLOC_VAL_U64s_MAX) +LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1) +LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2) + +BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1) +BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2) +BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8) +BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14) + +enum { +#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name, + BCH_ALLOC_FIELDS_V1() +#undef x +}; /* Quotas: */ @@ -873,17 +991,29 @@ struct bch_stripe { struct bch_reflink_p { struct bch_val v; __le64 idx; - - __le32 reservation_generation; - __u8 nr_replicas; - __u8 pad[3]; -}; + /* + * A reflink pointer might point to an indirect extent which is then + * later split (by copygc or rebalance). If we only pointed to part of + * the original indirect extent, and then one of the fragments is + * outside the range we point to, we'd leak a refcount: so when creating + * reflink pointers, we need to store pad values to remember the full + * range we were taking a reference on. + */ + __le32 front_pad; + __le32 back_pad; +} __attribute__((packed, aligned(8))); struct bch_reflink_v { struct bch_val v; __le64 refcount; union bch_extent_entry start[0]; __u64 _data[0]; +} __attribute__((packed, aligned(8))); + +struct bch_indirect_inline_data { + struct bch_val v; + __le64 refcount; + u8 data[0]; }; /* Inline data */ @@ -893,6 +1023,52 @@ struct bch_inline_data { u8 data[0]; }; +/* Subvolumes: */ + +#define SUBVOL_POS_MIN POS(0, 1) +#define SUBVOL_POS_MAX POS(0, S32_MAX) +#define BCACHEFS_ROOT_SUBVOL 1 + +struct bch_subvolume { + struct bch_val v; + __le32 flags; + __le32 snapshot; + __le64 inode; +}; + +LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1) +/* + * We need to know whether a subvolume is a snapshot so we can know whether we + * can delete it (or whether it should just be rm -rf'd) + */ +LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2) +LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3) + +/* Snapshots */ + +struct bch_snapshot { + struct bch_val v; + __le32 flags; + __le32 parent; + __le32 children[2]; + __le32 subvol; + __le32 pad; +}; + +LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1) + +/* True if a subvolume points to this snapshot node: */ +LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2) + +/* LRU btree: */ + +struct bch_lru { + struct bch_val v; + __le64 idx; +} __attribute__((packed, aligned(8))); + +#define LRU_ID_STRIPES (1U << 16) + /* Optional/variable size superblock sections: */ struct bch_sb_field { @@ -901,16 +1077,17 @@ struct bch_sb_field { __le32 type; }; -#define BCH_SB_FIELDS() \ - x(journal, 0) \ - x(members, 1) \ - x(crypt, 2) \ - x(replicas_v0, 3) \ - x(quota, 4) \ - x(disk_groups, 5) \ - x(clean, 6) \ - x(replicas, 7) \ - x(journal_seq_blacklist, 8) +#define BCH_SB_FIELDS() \ + x(journal, 0) \ + x(members, 1) \ + x(crypt, 2) \ + x(replicas_v0, 3) \ + x(quota, 4) \ + x(disk_groups, 5) \ + x(clean, 6) \ + x(replicas, 7) \ + x(journal_seq_blacklist, 8) \ + x(journal_v2, 9) enum bch_sb_field_type { #define x(f, nr) BCH_SB_FIELD_##f = nr, @@ -919,6 +1096,14 @@ enum bch_sb_field_type { BCH_SB_FIELD_NR }; +/* + * Most superblock fields are replicated in all device's superblocks - a few are + * not: + */ +#define BCH_SINGLE_DEVICE_SB_FIELDS \ + ((1U << BCH_SB_FIELD_journal)| \ + (1U << BCH_SB_FIELD_journal_v2)) + /* BCH_SB_FIELD_journal: */ struct bch_sb_field_journal { @@ -926,6 +1111,15 @@ struct bch_sb_field_journal { __le64 buckets[0]; }; +struct bch_sb_field_journal_v2 { + struct bch_sb_field field; + + struct bch_sb_field_journal_v2_entry { + __le64 start; + __le64 nr; + } d[0]; +}; + /* BCH_SB_FIELD_members: */ #define BCH_MIN_NR_NBUCKETS (1 << 6) @@ -942,33 +1136,30 @@ struct bch_member { }; LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4) -/* 4-10 unused, was TIER, HAS_(META)DATA */ -LE64_BITMASK(BCH_MEMBER_REPLACEMENT, struct bch_member, flags[0], 10, 14) +/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */ LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15) LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags[0], 15, 20) LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28) LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30) - -#define BCH_TIER_MAX 4U +LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED, + struct bch_member, flags[0], 30, 31) #if 0 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20); LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40); #endif -enum bch_member_state { - BCH_MEMBER_STATE_RW = 0, - BCH_MEMBER_STATE_RO = 1, - BCH_MEMBER_STATE_FAILED = 2, - BCH_MEMBER_STATE_SPARE = 3, - BCH_MEMBER_STATE_NR = 4, -}; +#define BCH_MEMBER_STATES() \ + x(rw, 0) \ + x(ro, 1) \ + x(failed, 2) \ + x(spare, 3) -enum cache_replacement { - CACHE_REPLACEMENT_LRU = 0, - CACHE_REPLACEMENT_FIFO = 1, - CACHE_REPLACEMENT_RANDOM = 2, - CACHE_REPLACEMENT_NR = 3, +enum bch_member_state { +#define x(t, n) BCH_MEMBER_STATE_##t = n, + BCH_MEMBER_STATES() +#undef x + BCH_MEMBER_STATE_NR }; struct bch_sb_field_members { @@ -1026,14 +1217,20 @@ LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48); /* BCH_SB_FIELD_replicas: */ +#define BCH_DATA_TYPES() \ + x(none, 0) \ + x(sb, 1) \ + x(journal, 2) \ + x(btree, 3) \ + x(user, 4) \ + x(cached, 5) \ + x(parity, 6) + enum bch_data_type { - BCH_DATA_NONE = 0, - BCH_DATA_SB = 1, - BCH_DATA_JOURNAL = 2, - BCH_DATA_BTREE = 3, - BCH_DATA_USER = 4, - BCH_DATA_CACHED = 5, - BCH_DATA_NR = 6, +#define x(t, n) BCH_DATA_##t, + BCH_DATA_TYPES() +#undef x + BCH_DATA_NR }; struct bch_replicas_entry_v0 { @@ -1118,8 +1315,8 @@ struct bch_sb_field_clean { struct bch_sb_field field; __le32 flags; - __le16 read_clock; - __le16 write_clock; + __le16 _read_clock; /* no longer used */ + __le16 _write_clock; __le64 journal_seq; union { @@ -1152,11 +1349,25 @@ struct bch_sb_field_journal_seq_blacklist { #define BCH_JSET_VERSION_OLD 2 #define BCH_BSET_VERSION_OLD 3 +#define BCH_METADATA_VERSIONS() \ + x(bkey_renumber, 10) \ + x(inode_btree_change, 11) \ + x(snapshot, 12) \ + x(inode_backpointers, 13) \ + x(btree_ptr_sectors_written, 14) \ + x(snapshot_2, 15) \ + x(reflink_p_fix, 16) \ + x(subvol_dirent, 17) \ + x(inode_v2, 18) \ + x(freespace, 19) \ + x(alloc_v4, 20) + enum bcachefs_metadata_version { - bcachefs_metadata_version_min = 9, - bcachefs_metadata_version_new_versioning = 10, - bcachefs_metadata_version_bkey_renumber = 10, - bcachefs_metadata_version_max = 11, + bcachefs_metadata_version_min = 9, +#define x(t, n) bcachefs_metadata_version_##t = n, + BCH_METADATA_VERSIONS() +#undef x + bcachefs_metadata_version_max }; #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1) @@ -1260,8 +1471,9 @@ LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59); LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60); LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61); +LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62); -/* 61-64 unused */ +LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63); LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4); LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8); @@ -1289,6 +1501,13 @@ LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE, LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64); LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16); +LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28); +LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29); +LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30); +LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62); +LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63); +LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32); +LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33); /* * Features: @@ -1296,7 +1515,7 @@ LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16); * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist * reflink: gates KEY_TYPE_reflink * inline_data: gates KEY_TYPE_inline_data - * new_siphash: gates BCH_STR_HASH_SIPHASH + * new_siphash: gates BCH_STR_HASH_siphash * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE */ #define BCH_SB_FEATURES() \ @@ -1313,13 +1532,26 @@ LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16); x(incompressible, 10) \ x(btree_ptr_v2, 11) \ x(extents_above_btree_updates, 12) \ - x(btree_updates_journalled, 13) + x(btree_updates_journalled, 13) \ + x(reflink_inline_data, 14) \ + x(new_varint, 15) \ + x(journal_no_flush, 16) \ + x(alloc_v2, 17) \ + x(extents_across_btree_nodes, 18) + +#define BCH_SB_FEATURES_ALWAYS \ + ((1ULL << BCH_FEATURE_new_extent_overwrite)| \ + (1ULL << BCH_FEATURE_extents_above_btree_updates)|\ + (1ULL << BCH_FEATURE_btree_updates_journalled)|\ + (1ULL << BCH_FEATURE_alloc_v2)|\ + (1ULL << BCH_FEATURE_extents_across_btree_nodes)) #define BCH_SB_FEATURES_ALL \ - ((1ULL << BCH_FEATURE_new_siphash)| \ - (1ULL << BCH_FEATURE_new_extent_overwrite)| \ + (BCH_SB_FEATURES_ALWAYS| \ + (1ULL << BCH_FEATURE_new_siphash)| \ (1ULL << BCH_FEATURE_btree_ptr_v2)| \ - (1ULL << BCH_FEATURE_extents_above_btree_updates)) + (1ULL << BCH_FEATURE_new_varint)| \ + (1ULL << BCH_FEATURE_journal_no_flush)) enum bch_sb_feature { #define x(f, n) BCH_FEATURE_##f, @@ -1328,74 +1560,112 @@ enum bch_sb_feature { BCH_FEATURE_NR, }; +#define BCH_SB_COMPAT() \ + x(alloc_info, 0) \ + x(alloc_metadata, 1) \ + x(extents_above_btree_updates_done, 2) \ + x(bformat_overflow_done, 3) + enum bch_sb_compat { - BCH_COMPAT_FEAT_ALLOC_INFO = 0, - BCH_COMPAT_FEAT_ALLOC_METADATA = 1, +#define x(f, n) BCH_COMPAT_##f, + BCH_SB_COMPAT() +#undef x + BCH_COMPAT_NR, }; /* options: */ #define BCH_REPLICAS_MAX 4U +#define BCH_BKEY_PTRS_MAX 16U + +#define BCH_ERROR_ACTIONS() \ + x(continue, 0) \ + x(ro, 1) \ + x(panic, 2) + enum bch_error_actions { - BCH_ON_ERROR_CONTINUE = 0, - BCH_ON_ERROR_RO = 1, - BCH_ON_ERROR_PANIC = 2, - BCH_NR_ERROR_ACTIONS = 3, +#define x(t, n) BCH_ON_ERROR_##t = n, + BCH_ERROR_ACTIONS() +#undef x + BCH_ON_ERROR_NR }; +#define BCH_STR_HASH_TYPES() \ + x(crc32c, 0) \ + x(crc64, 1) \ + x(siphash_old, 2) \ + x(siphash, 3) + enum bch_str_hash_type { - BCH_STR_HASH_CRC32C = 0, - BCH_STR_HASH_CRC64 = 1, - BCH_STR_HASH_SIPHASH_OLD = 2, - BCH_STR_HASH_SIPHASH = 3, - BCH_STR_HASH_NR = 4, +#define x(t, n) BCH_STR_HASH_##t = n, + BCH_STR_HASH_TYPES() +#undef x + BCH_STR_HASH_NR }; +#define BCH_STR_HASH_OPTS() \ + x(crc32c, 0) \ + x(crc64, 1) \ + x(siphash, 2) + enum bch_str_hash_opts { - BCH_STR_HASH_OPT_CRC32C = 0, - BCH_STR_HASH_OPT_CRC64 = 1, - BCH_STR_HASH_OPT_SIPHASH = 2, - BCH_STR_HASH_OPT_NR = 3, +#define x(t, n) BCH_STR_HASH_OPT_##t = n, + BCH_STR_HASH_OPTS() +#undef x + BCH_STR_HASH_OPT_NR }; +#define BCH_CSUM_TYPES() \ + x(none, 0) \ + x(crc32c_nonzero, 1) \ + x(crc64_nonzero, 2) \ + x(chacha20_poly1305_80, 3) \ + x(chacha20_poly1305_128, 4) \ + x(crc32c, 5) \ + x(crc64, 6) \ + x(xxhash, 7) + enum bch_csum_type { - BCH_CSUM_NONE = 0, - BCH_CSUM_CRC32C_NONZERO = 1, - BCH_CSUM_CRC64_NONZERO = 2, - BCH_CSUM_CHACHA20_POLY1305_80 = 3, - BCH_CSUM_CHACHA20_POLY1305_128 = 4, - BCH_CSUM_CRC32C = 5, - BCH_CSUM_CRC64 = 6, - BCH_CSUM_NR = 7, +#define x(t, n) BCH_CSUM_##t = n, + BCH_CSUM_TYPES() +#undef x + BCH_CSUM_NR }; static const unsigned bch_crc_bytes[] = { - [BCH_CSUM_NONE] = 0, - [BCH_CSUM_CRC32C_NONZERO] = 4, - [BCH_CSUM_CRC32C] = 4, - [BCH_CSUM_CRC64_NONZERO] = 8, - [BCH_CSUM_CRC64] = 8, - [BCH_CSUM_CHACHA20_POLY1305_80] = 10, - [BCH_CSUM_CHACHA20_POLY1305_128] = 16, + [BCH_CSUM_none] = 0, + [BCH_CSUM_crc32c_nonzero] = 4, + [BCH_CSUM_crc32c] = 4, + [BCH_CSUM_crc64_nonzero] = 8, + [BCH_CSUM_crc64] = 8, + [BCH_CSUM_xxhash] = 8, + [BCH_CSUM_chacha20_poly1305_80] = 10, + [BCH_CSUM_chacha20_poly1305_128] = 16, }; static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type) { switch (type) { - case BCH_CSUM_CHACHA20_POLY1305_80: - case BCH_CSUM_CHACHA20_POLY1305_128: + case BCH_CSUM_chacha20_poly1305_80: + case BCH_CSUM_chacha20_poly1305_128: return true; default: return false; } } +#define BCH_CSUM_OPTS() \ + x(none, 0) \ + x(crc32c, 1) \ + x(crc64, 2) \ + x(xxhash, 3) + enum bch_csum_opts { - BCH_CSUM_OPT_NONE = 0, - BCH_CSUM_OPT_CRC32C = 1, - BCH_CSUM_OPT_CRC64 = 2, - BCH_CSUM_OPT_NR = 3, +#define x(t, n) BCH_CSUM_OPT_##t = n, + BCH_CSUM_OPTS() +#undef x + BCH_CSUM_OPT_NR }; #define BCH_COMPRESSION_TYPES() \ @@ -1407,7 +1677,7 @@ enum bch_csum_opts { x(incompressible, 5) enum bch_compression_type { -#define x(t, n) BCH_COMPRESSION_TYPE_##t, +#define x(t, n) BCH_COMPRESSION_TYPE_##t = n, BCH_COMPRESSION_TYPES() #undef x BCH_COMPRESSION_TYPE_NR @@ -1420,7 +1690,7 @@ enum bch_compression_type { x(zstd, 3) enum bch_compression_opts { -#define x(t, n) BCH_COMPRESSION_OPT_##t, +#define x(t, n) BCH_COMPRESSION_OPT_##t = n, BCH_COMPRESSION_OPTS() #undef x BCH_COMPRESSION_OPT_NR @@ -1470,7 +1740,10 @@ static inline __u64 __bset_magic(struct bch_sb *sb) x(blacklist, 3) \ x(blacklist_v2, 4) \ x(usage, 5) \ - x(data_usage, 6) + x(data_usage, 6) \ + x(clock, 7) \ + x(dev_usage, 8) \ + x(log, 9) enum { #define x(f, nr) BCH_JSET_ENTRY_##f = nr, @@ -1500,11 +1773,16 @@ struct jset_entry_blacklist_v2 { __le64 end; }; +#define BCH_FS_USAGE_TYPES() \ + x(reserved, 0) \ + x(inodes, 1) \ + x(key_version, 2) + enum { - FS_USAGE_RESERVED = 0, - FS_USAGE_INODES = 1, - FS_USAGE_KEY_VERSION = 2, - FS_USAGE_NR = 3 +#define x(f, nr) BCH_FS_USAGE_##f = nr, + BCH_FS_USAGE_TYPES() +#undef x + BCH_FS_USAGE_NR }; struct jset_entry_usage { @@ -1518,6 +1796,41 @@ struct jset_entry_data_usage { struct bch_replicas_entry r; } __attribute__((packed)); +struct jset_entry_clock { + struct jset_entry entry; + __u8 rw; + __u8 pad[7]; + __le64 time; +} __attribute__((packed)); + +struct jset_entry_dev_usage_type { + __le64 buckets; + __le64 sectors; + __le64 fragmented; +} __attribute__((packed)); + +struct jset_entry_dev_usage { + struct jset_entry entry; + __le32 dev; + __u32 pad; + + __le64 buckets_ec; + __le64 buckets_unavailable; + + struct jset_entry_dev_usage_type d[]; +} __attribute__((packed)); + +static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u) +{ + return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) / + sizeof(struct jset_entry_dev_usage_type); +} + +struct jset_entry_log { + struct jset_entry entry; + u8 d[]; +} __attribute__((packed)); + /* * On disk format for a journal entry: * seq is monotonically increasing; every journal entry has its own unique @@ -1540,8 +1853,8 @@ struct jset { __u8 encrypted_start[0]; - __le16 read_clock; - __le16 write_clock; + __le16 _read_clock; /* no longer used */ + __le16 _write_clock; /* Sequence number of oldest dirty journal entry */ __le64 last_seq; @@ -1555,23 +1868,30 @@ struct jset { LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4); LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5); +LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6); #define BCH_JOURNAL_BUCKETS_MIN 8 /* Btree: */ -#define BCH_BTREE_IDS() \ - x(EXTENTS, 0, "extents") \ - x(INODES, 1, "inodes") \ - x(DIRENTS, 2, "dirents") \ - x(XATTRS, 3, "xattrs") \ - x(ALLOC, 4, "alloc") \ - x(QUOTAS, 5, "quotas") \ - x(EC, 6, "stripes") \ - x(REFLINK, 7, "reflink") +#define BCH_BTREE_IDS() \ + x(extents, 0) \ + x(inodes, 1) \ + x(dirents, 2) \ + x(xattrs, 3) \ + x(alloc, 4) \ + x(quotas, 5) \ + x(stripes, 6) \ + x(reflink, 7) \ + x(subvolumes, 8) \ + x(snapshots, 9) \ + x(lru, 10) \ + x(freespace, 11) \ + x(need_discard, 12) \ + x(backpointers, 13) enum btree_id { -#define x(kwd, val, name) BTREE_ID_##kwd = val, +#define x(kwd, val) BTREE_ID_##kwd = val, BCH_BTREE_IDS() #undef x BTREE_ID_NR @@ -1615,6 +1935,9 @@ LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5); LE32_BITMASK(BSET_SEPARATE_WHITEOUTS, struct bset, flags, 5, 6); +/* Sector offset within the btree node: */ +LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32); + struct btree_node { struct bch_csum csum; __le64 magic; @@ -1625,7 +1948,7 @@ struct btree_node { /* Closed interval: */ struct bpos min_key; struct bpos max_key; - struct bch_extent_ptr ptr; + struct bch_extent_ptr _ptr; /* not used anymore */ struct bkey_format format; union {