1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
6 * bcachefs on disk data structures
10 * There are three main types of on disk data structures in bcachefs (this is
11 * reduced from 5 in bcache)
17 * The btree is the primary structure; most metadata exists as keys in the
18 * various btrees. There are only a small number of btrees, they're not
19 * sharded - we have one btree for extents, another for inodes, et cetera.
23 * The superblock contains the location of the journal, the list of devices in
24 * the filesystem, and in general any metadata we need in order to decide
25 * whether we can start a filesystem or prior to reading the journal/btree
28 * The superblock is extensible, and most of the contents of the superblock are
29 * in variable length, type tagged fields; see struct bch_sb_field.
31 * Backup superblocks do not reside in a fixed location; also, superblocks do
32 * not have a fixed size. To locate backup superblocks we have struct
33 * bch_sb_layout; we store a copy of this inside every superblock, and also
34 * before the first superblock.
38 * The journal primarily records btree updates in the order they occurred;
39 * journal replay consists of just iterating over all the keys in the open
40 * journal entries and re-inserting them into the btrees.
42 * The journal also contains entry types for the btree roots, and blacklisted
43 * journal sequence numbers (see journal_seq_blacklist.c).
47 * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48 * 128k-256k) and log structured. We use struct btree_node for writing the first
49 * entry in a given node (offset 0), and struct btree_node_entry for all
52 * After the header, btree node entries contain a list of keys in sorted order.
53 * Values are stored inline with the keys; since values are variable length (and
54 * keys effectively are variable length too, due to packing) we can't do random
55 * access without building up additional in memory tables in the btree node read
58 * BTREE KEYS (struct bkey):
60 * The various btrees share a common format for the key - so as to avoid
61 * switching in fastpath lookup/comparison code - but define their own
62 * structures for the key values.
64 * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65 * size is just under 2k. The common part also contains a type tag for the
66 * value, and a format field indicating whether the key is packed or not (and
67 * also meant to allow adding new key fields in the future, if desired).
69 * bkeys, when stored within a btree node, may also be packed. In that case, the
70 * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71 * be generous with field sizes in the common part of the key format (64 bit
72 * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
80 #define LE_BITMASK(_bits, name, type, field, offset, end) \
81 static const unsigned name##_OFFSET = offset; \
82 static const unsigned name##_BITS = (end - offset); \
83 static const __u##_bits name##_MAX = (1ULL << (end - offset)) - 1; \
85 static inline __u64 name(const type *k) \
87 return (__le##_bits##_to_cpu(k->field) >> offset) & \
88 ~(~0ULL << (end - offset)); \
91 static inline void SET_##name(type *k, __u64 v) \
93 __u##_bits new = __le##_bits##_to_cpu(k->field); \
95 new &= ~(~(~0ULL << (end - offset)) << offset); \
96 new |= (v & ~(~0ULL << (end - offset))) << offset; \
97 k->field = __cpu_to_le##_bits(new); \
100 #define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
101 #define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
102 #define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
107 /* One unused slot for now: */
108 __u8 bits_per_field[6];
109 __le64 field_offset[6];
112 /* Btree keys - all units are in sectors */
116 * Word order matches machine byte order - btree code treats a bpos as a
117 * single large integer, for search/comparison purposes
119 * Note that wherever a bpos is embedded in another on disk data
120 * structure, it has to be byte swabbed when reading in metadata that
121 * wasn't written in native endian order:
123 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
127 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
129 __u64 offset; /* Points to end of extent - sectors */
132 #error edit for your odd byteorder.
134 } __attribute__((packed, aligned(4)));
136 #define KEY_INODE_MAX ((__u64)~0ULL)
137 #define KEY_OFFSET_MAX ((__u64)~0ULL)
138 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
139 #define KEY_SIZE_MAX ((__u32)~0U)
141 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
143 return (struct bpos) {
146 .snapshot = snapshot,
150 #define POS_MIN SPOS(0, 0, 0)
151 #define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
152 #define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
153 #define POS(_inode, _offset) SPOS(_inode, _offset, 0)
155 /* Empty placeholder struct, for container_of() */
161 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
164 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
168 } __attribute__((packed, aligned(4)));
171 /* Size of combined key and value, in u64s */
174 /* Format of key (0 for format local to btree node) */
175 #if defined(__LITTLE_ENDIAN_BITFIELD)
178 #elif defined (__BIG_ENDIAN_BITFIELD)
179 __u8 needs_whiteout:1,
182 #error edit for your odd byteorder.
185 /* Type of the value */
188 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
191 struct bversion version;
192 __u32 size; /* extent size, in sectors */
194 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
196 __u32 size; /* extent size, in sectors */
197 struct bversion version;
201 } __attribute__((packed, aligned(8)));
206 /* Size of combined key and value, in u64s */
209 /* Format of key (0 for format local to btree node) */
212 * XXX: next incompat on disk format change, switch format and
213 * needs_whiteout - bkey_packed() will be cheaper if format is the high
214 * bits of the bitfield
216 #if defined(__LITTLE_ENDIAN_BITFIELD)
219 #elif defined (__BIG_ENDIAN_BITFIELD)
220 __u8 needs_whiteout:1,
224 /* Type of the value */
229 * We copy bkeys with struct assignment in various places, and while
230 * that shouldn't be done with packed bkeys we can't disallow it in C,
231 * and it's legal to cast a bkey to a bkey_packed - so padding it out
232 * to the same size as struct bkey should hopefully be safest.
234 __u8 pad[sizeof(struct bkey) - 3];
235 } __attribute__((packed, aligned(8)));
237 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
238 #define BKEY_U64s_MAX U8_MAX
239 #define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
241 #define KEY_PACKED_BITS_START 24
243 #define KEY_FORMAT_LOCAL_BTREE 0
244 #define KEY_FORMAT_CURRENT 1
246 enum bch_bkey_fields {
251 BKEY_FIELD_VERSION_HI,
252 BKEY_FIELD_VERSION_LO,
256 #define bkey_format_field(name, field) \
257 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
259 #define BKEY_FORMAT_CURRENT \
260 ((struct bkey_format) { \
261 .key_u64s = BKEY_U64s, \
262 .nr_fields = BKEY_NR_FIELDS, \
263 .bits_per_field = { \
264 bkey_format_field(INODE, p.inode), \
265 bkey_format_field(OFFSET, p.offset), \
266 bkey_format_field(SNAPSHOT, p.snapshot), \
267 bkey_format_field(SIZE, size), \
268 bkey_format_field(VERSION_HI, version.hi), \
269 bkey_format_field(VERSION_LO, version.lo), \
273 /* bkey with inline value */
279 /* Size of combined key and value, in u64s */
289 #define KEY(_inode, _offset, _size) \
292 .format = KEY_FORMAT_CURRENT, \
293 .p = POS(_inode, _offset), \
297 static inline void bkey_init(struct bkey *k)
302 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
304 #define __BKEY_PADDED(key, pad) \
305 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
308 * - DELETED keys are used internally to mark keys that should be ignored but
309 * override keys in composition order. Their version number is ignored.
311 * - DISCARDED keys indicate that the data is all 0s because it has been
312 * discarded. DISCARDs may have a version; if the version is nonzero the key
313 * will be persistent, otherwise the key will be dropped whenever the btree
314 * node is rewritten (like DELETED keys).
316 * - ERROR: any read of the data returns a read error, as the data was lost due
317 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
318 * by new writes or cluster-wide GC. Node repair can also overwrite them with
319 * the same or a more recent version number, but not with an older version
322 * - WHITEOUT: for hash table btrees
324 #define BCH_BKEY_TYPES() \
329 x(hash_whiteout, 4) \
334 x(inode_generation, 9) \
343 x(btree_ptr_v2, 18) \
344 x(indirect_inline_data, 19) \
348 #define x(name, nr) KEY_TYPE_##name = nr,
371 struct bch_hash_whiteout {
378 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
379 * preceded by checksum/compression information (bch_extent_crc32 or
382 * One major determining factor in the format of extents is how we handle and
383 * represent extents that have been partially overwritten and thus trimmed:
385 * If an extent is not checksummed or compressed, when the extent is trimmed we
386 * don't have to remember the extent we originally allocated and wrote: we can
387 * merely adjust ptr->offset to point to the start of the data that is currently
388 * live. The size field in struct bkey records the current (live) size of the
389 * extent, and is also used to mean "size of region on disk that we point to" in
392 * Thus an extent that is not checksummed or compressed will consist only of a
393 * list of bch_extent_ptrs, with none of the fields in
394 * bch_extent_crc32/bch_extent_crc64.
396 * When an extent is checksummed or compressed, it's not possible to read only
397 * the data that is currently live: we have to read the entire extent that was
398 * originally written, and then return only the part of the extent that is
401 * Thus, in addition to the current size of the extent in struct bkey, we need
402 * to store the size of the originally allocated space - this is the
403 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
404 * when the extent is trimmed, instead of modifying the offset field of the
405 * pointer, we keep a second smaller offset field - "offset into the original
406 * extent of the currently live region".
408 * The other major determining factor is replication and data migration:
410 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
411 * write, we will initially write all the replicas in the same format, with the
412 * same checksum type and compression format - however, when copygc runs later (or
413 * tiering/cache promotion, anything that moves data), it is not in general
414 * going to rewrite all the pointers at once - one of the replicas may be in a
415 * bucket on one device that has very little fragmentation while another lives
416 * in a bucket that has become heavily fragmented, and thus is being rewritten
417 * sooner than the rest.
419 * Thus it will only move a subset of the pointers (or in the case of
420 * tiering/cache promotion perhaps add a single pointer without dropping any
421 * current pointers), and if the extent has been partially overwritten it must
422 * write only the currently live portion (or copygc would not be able to reduce
423 * fragmentation!) - which necessitates a different bch_extent_crc format for
426 * But in the interests of space efficiency, we don't want to store one
427 * bch_extent_crc for each pointer if we don't have to.
429 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
430 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
431 * type of a given entry with a scheme similar to utf8 (except we're encoding a
432 * type, not a size), encoding the type in the position of the first set bit:
434 * bch_extent_crc32 - 0b1
435 * bch_extent_ptr - 0b10
436 * bch_extent_crc64 - 0b100
438 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
439 * bch_extent_crc64 is the least constrained).
441 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
442 * until the next bch_extent_crc32/64.
444 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
445 * is neither checksummed nor compressed.
448 /* 128 bits, sufficient for cryptographic MACs: */
452 } __attribute__((packed, aligned(8)));
454 #define BCH_EXTENT_ENTRY_TYPES() \
460 #define BCH_EXTENT_ENTRY_MAX 5
462 enum bch_extent_entry_type {
463 #define x(f, n) BCH_EXTENT_ENTRY_##f = n,
464 BCH_EXTENT_ENTRY_TYPES()
468 /* Compressed/uncompressed size are stored biased by 1: */
469 struct bch_extent_crc32 {
470 #if defined(__LITTLE_ENDIAN_BITFIELD)
473 _uncompressed_size:7,
479 #elif defined (__BIG_ENDIAN_BITFIELD)
481 __u32 compression_type:4,
485 _uncompressed_size:7,
489 } __attribute__((packed, aligned(8)));
491 #define CRC32_SIZE_MAX (1U << 7)
492 #define CRC32_NONCE_MAX 0
494 struct bch_extent_crc64 {
495 #if defined(__LITTLE_ENDIAN_BITFIELD)
498 _uncompressed_size:9,
504 #elif defined (__BIG_ENDIAN_BITFIELD)
510 _uncompressed_size:9,
515 } __attribute__((packed, aligned(8)));
517 #define CRC64_SIZE_MAX (1U << 9)
518 #define CRC64_NONCE_MAX ((1U << 10) - 1)
520 struct bch_extent_crc128 {
521 #if defined(__LITTLE_ENDIAN_BITFIELD)
524 _uncompressed_size:13,
529 #elif defined (__BIG_ENDIAN_BITFIELD)
530 __u64 compression_type:4,
534 _uncompressed_size:13,
538 struct bch_csum csum;
539 } __attribute__((packed, aligned(8)));
541 #define CRC128_SIZE_MAX (1U << 13)
542 #define CRC128_NONCE_MAX ((1U << 13) - 1)
545 * @reservation - pointer hasn't been written to, just reserved
547 struct bch_extent_ptr {
548 #if defined(__LITTLE_ENDIAN_BITFIELD)
553 offset:44, /* 8 petabytes */
556 #elif defined (__BIG_ENDIAN_BITFIELD)
565 } __attribute__((packed, aligned(8)));
567 struct bch_extent_stripe_ptr {
568 #if defined(__LITTLE_ENDIAN_BITFIELD)
573 #elif defined (__BIG_ENDIAN_BITFIELD)
581 struct bch_extent_reservation {
582 #if defined(__LITTLE_ENDIAN_BITFIELD)
587 #elif defined (__BIG_ENDIAN_BITFIELD)
595 union bch_extent_entry {
596 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
598 #elif __BITS_PER_LONG == 32
604 #error edit for your odd byteorder.
607 #define x(f, n) struct bch_extent_##f f;
608 BCH_EXTENT_ENTRY_TYPES()
612 struct bch_btree_ptr {
615 struct bch_extent_ptr start[0];
617 } __attribute__((packed, aligned(8)));
619 struct bch_btree_ptr_v2 {
624 __le16 sectors_written;
627 struct bch_extent_ptr start[0];
629 } __attribute__((packed, aligned(8)));
631 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
636 union bch_extent_entry start[0];
638 } __attribute__((packed, aligned(8)));
640 struct bch_reservation {
646 } __attribute__((packed, aligned(8)));
648 /* Maximum size (in u64s) a single pointer could be: */
649 #define BKEY_EXTENT_PTR_U64s_MAX\
650 ((sizeof(struct bch_extent_crc128) + \
651 sizeof(struct bch_extent_ptr)) / sizeof(u64))
653 /* Maximum possible size of an entire extent value: */
654 #define BKEY_EXTENT_VAL_U64s_MAX \
655 (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
657 /* * Maximum possible size of an entire extent, key + value: */
658 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
660 /* Btree pointers don't carry around checksums: */
661 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
662 ((sizeof(struct bch_btree_ptr_v2) + \
663 sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(u64))
664 #define BKEY_BTREE_PTR_U64s_MAX \
665 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
669 #define BLOCKDEV_INODE_MAX 4096
671 #define BCACHEFS_ROOT_INO 4096
680 } __attribute__((packed, aligned(8)));
682 struct bch_inode_generation {
685 __le32 bi_generation;
687 } __attribute__((packed, aligned(8)));
689 #define BCH_INODE_FIELDS() \
699 x(bi_generation, 32) \
701 x(bi_data_checksum, 8) \
702 x(bi_compression, 8) \
704 x(bi_background_compression, 8) \
705 x(bi_data_replicas, 8) \
706 x(bi_promote_target, 16) \
707 x(bi_foreground_target, 16) \
708 x(bi_background_target, 16) \
709 x(bi_erasure_code, 16) \
710 x(bi_fields_set, 16) \
714 /* subset of BCH_INODE_FIELDS */
715 #define BCH_INODE_OPTS() \
716 x(data_checksum, 8) \
719 x(background_compression, 8) \
720 x(data_replicas, 8) \
721 x(promote_target, 16) \
722 x(foreground_target, 16) \
723 x(background_target, 16) \
727 #define x(name, ...) \
736 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
739 __BCH_INODE_SYNC = 0,
740 __BCH_INODE_IMMUTABLE = 1,
741 __BCH_INODE_APPEND = 2,
742 __BCH_INODE_NODUMP = 3,
743 __BCH_INODE_NOATIME = 4,
745 __BCH_INODE_I_SIZE_DIRTY= 5,
746 __BCH_INODE_I_SECTORS_DIRTY= 6,
747 __BCH_INODE_UNLINKED = 7,
748 __BCH_INODE_BACKPTR_UNTRUSTED = 8,
750 /* bits 20+ reserved for packed fields below: */
753 #define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
754 #define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
755 #define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
756 #define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
757 #define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
758 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
759 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
760 #define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
761 #define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED)
763 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
764 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
765 LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
770 * Dirents (and xattrs) have to implement string lookups; since our b-tree
771 * doesn't support arbitrary length strings for the key, we instead index by a
772 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
773 * field of the key - using linear probing to resolve hash collisions. This also
774 * provides us with the readdir cookie posix requires.
776 * Linear probing requires us to use whiteouts for deletions, in the event of a
783 /* Target inode number: */
787 * Copy of mode bits 12-15 from the target inode - so userspace can get
788 * the filetype without having to do a stat()
793 } __attribute__((packed, aligned(8)));
795 #define BCH_NAME_MAX (U8_MAX * sizeof(u64) - \
796 sizeof(struct bkey) - \
797 offsetof(struct bch_dirent, d_name))
802 #define KEY_TYPE_XATTR_INDEX_USER 0
803 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
804 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
805 #define KEY_TYPE_XATTR_INDEX_TRUSTED 3
806 #define KEY_TYPE_XATTR_INDEX_SECURITY 4
814 } __attribute__((packed, aligned(8)));
816 /* Bucket/allocation information: */
823 } __attribute__((packed, aligned(8)));
825 #define BCH_ALLOC_FIELDS_V1() \
829 x(dirty_sectors, 16) \
830 x(cached_sectors, 16) \
833 x(stripe_redundancy, 8)
835 struct bch_alloc_v2 {
842 } __attribute__((packed, aligned(8)));
844 #define BCH_ALLOC_FIELDS_V2() \
847 x(dirty_sectors, 16) \
848 x(cached_sectors, 16) \
850 x(stripe_redundancy, 8)
853 #define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
854 BCH_ALLOC_FIELDS_V1()
868 enum quota_counters {
874 struct bch_quota_counter {
881 struct bch_quota_counter c[Q_COUNTERS];
882 } __attribute__((packed, aligned(8)));
893 __u8 csum_granularity_bits;
897 struct bch_extent_ptr ptrs[0];
898 } __attribute__((packed, aligned(8)));
902 struct bch_reflink_p {
906 __le32 reservation_generation;
911 struct bch_reflink_v {
914 union bch_extent_entry start[0];
918 struct bch_indirect_inline_data {
926 struct bch_inline_data {
931 /* Optional/variable size superblock sections: */
933 struct bch_sb_field {
939 #define BCH_SB_FIELDS() \
948 x(journal_seq_blacklist, 8)
950 enum bch_sb_field_type {
951 #define x(f, nr) BCH_SB_FIELD_##f = nr,
957 /* BCH_SB_FIELD_journal: */
959 struct bch_sb_field_journal {
960 struct bch_sb_field field;
964 /* BCH_SB_FIELD_members: */
966 #define BCH_MIN_NR_NBUCKETS (1 << 6)
970 __le64 nbuckets; /* device size */
971 __le16 first_bucket; /* index of first bucket used */
972 __le16 bucket_size; /* sectors */
974 __le64 last_mount; /* time_t */
979 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
980 /* 4-10 unused, was TIER, HAS_(META)DATA */
981 LE64_BITMASK(BCH_MEMBER_REPLACEMENT, struct bch_member, flags[0], 10, 14)
982 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15)
983 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags[0], 15, 20)
984 LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28)
985 LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30)
987 #define BCH_TIER_MAX 4U
990 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
991 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
994 #define BCH_MEMBER_STATES() \
1000 enum bch_member_state {
1001 #define x(t, n) BCH_MEMBER_STATE_##t = n,
1007 #define BCH_CACHE_REPLACEMENT_POLICIES() \
1012 enum bch_cache_replacement_policies {
1013 #define x(t, n) BCH_CACHE_REPLACEMENT_##t = n,
1014 BCH_CACHE_REPLACEMENT_POLICIES()
1016 BCH_CACHE_REPLACEMENT_NR
1019 struct bch_sb_field_members {
1020 struct bch_sb_field field;
1021 struct bch_member members[0];
1024 /* BCH_SB_FIELD_crypt: */
1034 #define BCH_KEY_MAGIC \
1035 (((u64) 'b' << 0)|((u64) 'c' << 8)| \
1036 ((u64) 'h' << 16)|((u64) '*' << 24)| \
1037 ((u64) '*' << 32)|((u64) 'k' << 40)| \
1038 ((u64) 'e' << 48)|((u64) 'y' << 56))
1040 struct bch_encrypted_key {
1046 * If this field is present in the superblock, it stores an encryption key which
1047 * is used encrypt all other data/metadata. The key will normally be encrypted
1048 * with the key userspace provides, but if encryption has been turned off we'll
1049 * just store the master key unencrypted in the superblock so we can access the
1050 * previously encrypted data.
1052 struct bch_sb_field_crypt {
1053 struct bch_sb_field field;
1057 struct bch_encrypted_key key;
1060 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
1062 enum bch_kdf_types {
1067 /* stored as base 2 log of scrypt params: */
1068 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
1069 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
1070 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
1072 /* BCH_SB_FIELD_replicas: */
1074 #define BCH_DATA_TYPES() \
1083 enum bch_data_type {
1084 #define x(t, n) BCH_DATA_##t,
1090 struct bch_replicas_entry_v0 {
1094 } __attribute__((packed));
1096 struct bch_sb_field_replicas_v0 {
1097 struct bch_sb_field field;
1098 struct bch_replicas_entry_v0 entries[0];
1099 } __attribute__((packed, aligned(8)));
1101 struct bch_replicas_entry {
1106 } __attribute__((packed));
1108 #define replicas_entry_bytes(_i) \
1109 (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
1111 struct bch_sb_field_replicas {
1112 struct bch_sb_field field;
1113 struct bch_replicas_entry entries[0];
1114 } __attribute__((packed, aligned(8)));
1116 /* BCH_SB_FIELD_quota: */
1118 struct bch_sb_quota_counter {
1123 struct bch_sb_quota_type {
1125 struct bch_sb_quota_counter c[Q_COUNTERS];
1128 struct bch_sb_field_quota {
1129 struct bch_sb_field field;
1130 struct bch_sb_quota_type q[QTYP_NR];
1131 } __attribute__((packed, aligned(8)));
1133 /* BCH_SB_FIELD_disk_groups: */
1135 #define BCH_SB_LABEL_SIZE 32
1137 struct bch_disk_group {
1138 __u8 label[BCH_SB_LABEL_SIZE];
1140 } __attribute__((packed, aligned(8)));
1142 LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
1143 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
1144 LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
1146 struct bch_sb_field_disk_groups {
1147 struct bch_sb_field field;
1148 struct bch_disk_group entries[0];
1149 } __attribute__((packed, aligned(8)));
1152 * On clean shutdown, store btree roots and current journal sequence number in
1159 __u8 type; /* designates what this jset holds */
1163 struct bkey_i start[0];
1168 struct bch_sb_field_clean {
1169 struct bch_sb_field field;
1172 __le16 _read_clock; /* no longer used */
1173 __le16 _write_clock;
1177 struct jset_entry start[0];
1182 struct journal_seq_blacklist_entry {
1187 struct bch_sb_field_journal_seq_blacklist {
1188 struct bch_sb_field field;
1191 struct journal_seq_blacklist_entry start[0];
1199 * New versioning scheme:
1200 * One common version number for all on disk data structures - superblock, btree
1201 * nodes, journal entries
1203 #define BCH_JSET_VERSION_OLD 2
1204 #define BCH_BSET_VERSION_OLD 3
1206 enum bcachefs_metadata_version {
1207 bcachefs_metadata_version_min = 9,
1208 bcachefs_metadata_version_new_versioning = 10,
1209 bcachefs_metadata_version_bkey_renumber = 10,
1210 bcachefs_metadata_version_inode_btree_change = 11,
1211 bcachefs_metadata_version_snapshot = 12,
1212 bcachefs_metadata_version_inode_backpointers = 13,
1213 bcachefs_metadata_version_max = 14,
1216 #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
1218 #define BCH_SB_SECTOR 8
1219 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
1221 struct bch_sb_layout {
1222 uuid_le magic; /* bcachefs superblock UUID */
1224 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
1225 __u8 nr_superblocks;
1227 __le64 sb_offset[61];
1228 } __attribute__((packed, aligned(8)));
1230 #define BCH_SB_LAYOUT_SECTOR 7
1233 * @offset - sector where this sb was written
1234 * @version - on disk format version
1235 * @version_min - Oldest metadata version this filesystem contains; so we can
1236 * safely drop compatibility code and refuse to mount filesystems
1238 * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
1239 * @seq - incremented each time superblock is written
1240 * @uuid - used for generating various magic numbers and identifying
1241 * member devices, never changes
1242 * @user_uuid - user visible UUID, may be changed
1243 * @label - filesystem label
1244 * @seq - identifies most recent superblock, incremented each time
1245 * superblock is written
1246 * @features - enabled incompatible features
1249 struct bch_csum csum;
1256 __u8 label[BCH_SB_LABEL_SIZE];
1265 __le64 time_base_lo;
1266 __le32 time_base_hi;
1267 __le32 time_precision;
1273 struct bch_sb_layout layout;
1276 struct bch_sb_field start[0];
1279 } __attribute__((packed, aligned(8)));
1283 * BCH_SB_INITALIZED - set on first mount
1284 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
1285 * behaviour of mount/recovery path:
1286 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
1287 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
1288 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1289 * DATA/META_CSUM_TYPE. Also indicates encryption
1290 * algorithm in use, if/when we get more than one
1293 LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
1295 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
1296 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
1297 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
1298 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
1300 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
1302 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
1303 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
1305 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
1306 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
1308 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
1309 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
1311 LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
1312 LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
1313 LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
1314 LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
1316 LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
1317 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
1319 LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
1321 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
1322 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
1323 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
1325 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
1326 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
1329 * Max size of an extent that may require bouncing to read or write
1330 * (checksummed, compressed): 64k
1332 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1333 struct bch_sb, flags[1], 14, 20);
1335 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
1336 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
1338 LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
1339 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
1340 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
1342 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE,
1343 struct bch_sb, flags[2], 0, 4);
1344 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
1346 LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
1347 LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
1348 LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
1353 * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
1354 * reflink: gates KEY_TYPE_reflink
1355 * inline_data: gates KEY_TYPE_inline_data
1356 * new_siphash: gates BCH_STR_HASH_SIPHASH
1357 * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1359 #define BCH_SB_FEATURES() \
1363 x(atomic_nlink, 3) \
1365 x(journal_seq_blacklist_v3, 5) \
1369 x(new_extent_overwrite, 9) \
1370 x(incompressible, 10) \
1371 x(btree_ptr_v2, 11) \
1372 x(extents_above_btree_updates, 12) \
1373 x(btree_updates_journalled, 13) \
1374 x(reflink_inline_data, 14) \
1376 x(journal_no_flush, 16) \
1378 x(extents_across_btree_nodes, 18)
1380 #define BCH_SB_FEATURES_ALWAYS \
1381 ((1ULL << BCH_FEATURE_new_extent_overwrite)| \
1382 (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
1383 (1ULL << BCH_FEATURE_btree_updates_journalled)|\
1384 (1ULL << BCH_FEATURE_alloc_v2)|\
1385 (1ULL << BCH_FEATURE_extents_across_btree_nodes))
1387 #define BCH_SB_FEATURES_ALL \
1388 (BCH_SB_FEATURES_ALWAYS| \
1389 (1ULL << BCH_FEATURE_new_siphash)| \
1390 (1ULL << BCH_FEATURE_btree_ptr_v2)| \
1391 (1ULL << BCH_FEATURE_new_varint)| \
1392 (1ULL << BCH_FEATURE_journal_no_flush))
1394 enum bch_sb_feature {
1395 #define x(f, n) BCH_FEATURE_##f,
1401 #define BCH_SB_COMPAT() \
1403 x(alloc_metadata, 1) \
1404 x(extents_above_btree_updates_done, 2) \
1405 x(bformat_overflow_done, 3)
1407 enum bch_sb_compat {
1408 #define x(f, n) BCH_COMPAT_##f,
1416 #define BCH_REPLICAS_MAX 4U
1418 #define BCH_BKEY_PTRS_MAX 16U
1420 #define BCH_ERROR_ACTIONS() \
1425 enum bch_error_actions {
1426 #define x(t, n) BCH_ON_ERROR_##t = n,
1432 enum bch_str_hash_type {
1433 BCH_STR_HASH_CRC32C = 0,
1434 BCH_STR_HASH_CRC64 = 1,
1435 BCH_STR_HASH_SIPHASH_OLD = 2,
1436 BCH_STR_HASH_SIPHASH = 3,
1437 BCH_STR_HASH_NR = 4,
1440 #define BCH_STR_HASH_OPTS() \
1445 enum bch_str_hash_opts {
1446 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
1452 enum bch_csum_type {
1454 BCH_CSUM_CRC32C_NONZERO = 1,
1455 BCH_CSUM_CRC64_NONZERO = 2,
1456 BCH_CSUM_CHACHA20_POLY1305_80 = 3,
1457 BCH_CSUM_CHACHA20_POLY1305_128 = 4,
1458 BCH_CSUM_CRC32C = 5,
1460 BCH_CSUM_XXHASH = 7,
1464 static const unsigned bch_crc_bytes[] = {
1465 [BCH_CSUM_NONE] = 0,
1466 [BCH_CSUM_CRC32C_NONZERO] = 4,
1467 [BCH_CSUM_CRC32C] = 4,
1468 [BCH_CSUM_CRC64_NONZERO] = 8,
1469 [BCH_CSUM_CRC64] = 8,
1470 [BCH_CSUM_XXHASH] = 8,
1471 [BCH_CSUM_CHACHA20_POLY1305_80] = 10,
1472 [BCH_CSUM_CHACHA20_POLY1305_128] = 16,
1475 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
1478 case BCH_CSUM_CHACHA20_POLY1305_80:
1479 case BCH_CSUM_CHACHA20_POLY1305_128:
1486 #define BCH_CSUM_OPTS() \
1492 enum bch_csum_opts {
1493 #define x(t, n) BCH_CSUM_OPT_##t = n,
1499 #define BCH_COMPRESSION_TYPES() \
1505 x(incompressible, 5)
1507 enum bch_compression_type {
1508 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
1509 BCH_COMPRESSION_TYPES()
1511 BCH_COMPRESSION_TYPE_NR
1514 #define BCH_COMPRESSION_OPTS() \
1520 enum bch_compression_opts {
1521 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
1522 BCH_COMPRESSION_OPTS()
1524 BCH_COMPRESSION_OPT_NR
1530 * The various other data structures have their own magic numbers, which are
1531 * xored with the first part of the cache set's UUID
1534 #define BCACHE_MAGIC \
1535 UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
1536 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1538 #define BCACHEFS_STATFS_MAGIC 0xca451a4e
1540 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
1541 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
1543 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1546 memcpy(&ret, &sb->uuid, sizeof(ret));
1550 static inline __u64 __jset_magic(struct bch_sb *sb)
1552 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1555 static inline __u64 __bset_magic(struct bch_sb *sb)
1557 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1562 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
1564 #define BCH_JSET_ENTRY_TYPES() \
1569 x(blacklist_v2, 4) \
1576 #define x(f, nr) BCH_JSET_ENTRY_##f = nr,
1577 BCH_JSET_ENTRY_TYPES()
1583 * Journal sequence numbers can be blacklisted: bsets record the max sequence
1584 * number of all the journal entries they contain updates for, so that on
1585 * recovery we can ignore those bsets that contain index updates newer that what
1586 * made it into the journal.
1588 * This means that we can't reuse that journal_seq - we have to skip it, and
1589 * then record that we skipped it so that the next time we crash and recover we
1590 * don't think there was a missing journal entry.
1592 struct jset_entry_blacklist {
1593 struct jset_entry entry;
1597 struct jset_entry_blacklist_v2 {
1598 struct jset_entry entry;
1604 FS_USAGE_RESERVED = 0,
1605 FS_USAGE_INODES = 1,
1606 FS_USAGE_KEY_VERSION = 2,
1610 struct jset_entry_usage {
1611 struct jset_entry entry;
1613 } __attribute__((packed));
1615 struct jset_entry_data_usage {
1616 struct jset_entry entry;
1618 struct bch_replicas_entry r;
1619 } __attribute__((packed));
1621 struct jset_entry_clock {
1622 struct jset_entry entry;
1626 } __attribute__((packed));
1628 struct jset_entry_dev_usage_type {
1632 } __attribute__((packed));
1634 struct jset_entry_dev_usage {
1635 struct jset_entry entry;
1640 __le64 buckets_unavailable;
1642 struct jset_entry_dev_usage_type d[];
1643 } __attribute__((packed));
1646 * On disk format for a journal entry:
1647 * seq is monotonically increasing; every journal entry has its own unique
1650 * last_seq is the oldest journal entry that still has keys the btree hasn't
1651 * flushed to disk yet.
1653 * version is for on disk format changes.
1656 struct bch_csum csum;
1663 __le32 u64s; /* size of d[] in u64s */
1665 __u8 encrypted_start[0];
1667 __le16 _read_clock; /* no longer used */
1668 __le16 _write_clock;
1670 /* Sequence number of oldest dirty journal entry */
1675 struct jset_entry start[0];
1678 } __attribute__((packed, aligned(8)));
1680 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
1681 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
1682 LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
1684 #define BCH_JOURNAL_BUCKETS_MIN 8
1688 #define BCH_BTREE_IDS() \
1699 #define x(kwd, val) BTREE_ID_##kwd = val,
1705 #define BTREE_MAX_DEPTH 4U
1712 * On disk a btree node is a list/log of these; within each set the keys are
1719 * Highest journal entry this bset contains keys for.
1720 * If on recovery we don't see that journal entry, this bset is ignored:
1721 * this allows us to preserve the order of all index updates after a
1722 * crash, since the journal records a total order of all index updates
1723 * and anything that didn't make it to the journal doesn't get used.
1729 __le16 u64s; /* count of d[] in u64s */
1732 struct bkey_packed start[0];
1735 } __attribute__((packed, aligned(8)));
1737 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
1739 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
1740 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1741 struct bset, flags, 5, 6);
1744 struct bch_csum csum;
1747 /* this flags field is encrypted, unlike bset->flags: */
1750 /* Closed interval: */
1751 struct bpos min_key;
1752 struct bpos max_key;
1753 struct bch_extent_ptr _ptr; /* not used anymore */
1754 struct bkey_format format;
1765 } __attribute__((packed, aligned(8)));
1767 LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
1768 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
1769 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
1770 struct btree_node, flags, 8, 9);
1772 LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
1774 struct btree_node_entry {
1775 struct bch_csum csum;
1786 } __attribute__((packed, aligned(8)));
1788 #endif /* _BCACHEFS_FORMAT_H */