1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
6 * bcachefs on disk data structures
10 * There are three main types of on disk data structures in bcachefs (this is
11 * reduced from 5 in bcache)
17 * The btree is the primary structure; most metadata exists as keys in the
18 * various btrees. There are only a small number of btrees, they're not
19 * sharded - we have one btree for extents, another for inodes, et cetera.
23 * The superblock contains the location of the journal, the list of devices in
24 * the filesystem, and in general any metadata we need in order to decide
25 * whether we can start a filesystem or prior to reading the journal/btree
28 * The superblock is extensible, and most of the contents of the superblock are
29 * in variable length, type tagged fields; see struct bch_sb_field.
31 * Backup superblocks do not reside in a fixed location; also, superblocks do
32 * not have a fixed size. To locate backup superblocks we have struct
33 * bch_sb_layout; we store a copy of this inside every superblock, and also
34 * before the first superblock.
38 * The journal primarily records btree updates in the order they occurred;
39 * journal replay consists of just iterating over all the keys in the open
40 * journal entries and re-inserting them into the btrees.
42 * The journal also contains entry types for the btree roots, and blacklisted
43 * journal sequence numbers (see journal_seq_blacklist.c).
47 * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48 * 128k-256k) and log structured. We use struct btree_node for writing the first
49 * entry in a given node (offset 0), and struct btree_node_entry for all
52 * After the header, btree node entries contain a list of keys in sorted order.
53 * Values are stored inline with the keys; since values are variable length (and
54 * keys effectively are variable length too, due to packing) we can't do random
55 * access without building up additional in memory tables in the btree node read
58 * BTREE KEYS (struct bkey):
60 * The various btrees share a common format for the key - so as to avoid
61 * switching in fastpath lookup/comparison code - but define their own
62 * structures for the key values.
64 * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65 * size is just under 2k. The common part also contains a type tag for the
66 * value, and a format field indicating whether the key is packed or not (and
67 * also meant to allow adding new key fields in the future, if desired).
69 * bkeys, when stored within a btree node, may also be packed. In that case, the
70 * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71 * be generous with field sizes in the common part of the key format (64 bit
72 * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
75 #include <linux/stddef.h>
76 #include <asm/types.h>
77 #include <asm/byteorder.h>
78 #include <linux/kernel.h>
79 #include <linux/uuid.h>
81 #define LE_BITMASK(_bits, name, type, field, offset, end) \
82 static const unsigned name##_OFFSET = offset; \
83 static const unsigned name##_BITS = (end - offset); \
84 static const __u##_bits name##_MAX = (1ULL << (end - offset)) - 1; \
86 static inline __u64 name(const type *k) \
88 return (__le##_bits##_to_cpu(k->field) >> offset) & \
89 ~(~0ULL << (end - offset)); \
92 static inline void SET_##name(type *k, __u64 v) \
94 __u##_bits new = __le##_bits##_to_cpu(k->field); \
96 new &= ~(~(~0ULL << (end - offset)) << offset); \
97 new |= (v & ~(~0ULL << (end - offset))) << offset; \
98 k->field = __cpu_to_le##_bits(new); \
101 #define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
102 #define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
103 #define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
108 /* One unused slot for now: */
109 __u8 bits_per_field[6];
110 __le64 field_offset[6];
113 /* Btree keys - all units are in sectors */
117 * Word order matches machine byte order - btree code treats a bpos as a
118 * single large integer, for search/comparison purposes
120 * Note that wherever a bpos is embedded in another on disk data
121 * structure, it has to be byte swabbed when reading in metadata that
122 * wasn't written in native endian order:
124 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
128 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
130 __u64 offset; /* Points to end of extent - sectors */
133 #error edit for your odd byteorder.
135 } __attribute__((packed, aligned(4)));
137 #define KEY_INODE_MAX ((__u64)~0ULL)
138 #define KEY_OFFSET_MAX ((__u64)~0ULL)
139 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
140 #define KEY_SIZE_MAX ((__u32)~0U)
142 static inline struct bpos POS(__u64 inode, __u64 offset)
153 #define POS_MIN POS(0, 0)
154 #define POS_MAX POS(KEY_INODE_MAX, KEY_OFFSET_MAX)
156 /* Empty placeholder struct, for container_of() */
162 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
165 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
169 } __attribute__((packed, aligned(4)));
172 /* Size of combined key and value, in u64s */
175 /* Format of key (0 for format local to btree node) */
176 #if defined(__LITTLE_ENDIAN_BITFIELD)
179 #elif defined (__BIG_ENDIAN_BITFIELD)
180 __u8 needs_whiteout:1,
183 #error edit for your odd byteorder.
186 /* Type of the value */
189 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
192 struct bversion version;
193 __u32 size; /* extent size, in sectors */
195 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
197 __u32 size; /* extent size, in sectors */
198 struct bversion version;
202 } __attribute__((packed, aligned(8)));
207 /* Size of combined key and value, in u64s */
210 /* Format of key (0 for format local to btree node) */
213 * XXX: next incompat on disk format change, switch format and
214 * needs_whiteout - bkey_packed() will be cheaper if format is the high
215 * bits of the bitfield
217 #if defined(__LITTLE_ENDIAN_BITFIELD)
220 #elif defined (__BIG_ENDIAN_BITFIELD)
221 __u8 needs_whiteout:1,
225 /* Type of the value */
230 * We copy bkeys with struct assignment in various places, and while
231 * that shouldn't be done with packed bkeys we can't disallow it in C,
232 * and it's legal to cast a bkey to a bkey_packed - so padding it out
233 * to the same size as struct bkey should hopefully be safest.
235 __u8 pad[sizeof(struct bkey) - 3];
236 } __attribute__((packed, aligned(8)));
238 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
239 #define BKEY_U64s_MAX U8_MAX
240 #define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
242 #define KEY_PACKED_BITS_START 24
244 #define KEY_FORMAT_LOCAL_BTREE 0
245 #define KEY_FORMAT_CURRENT 1
247 enum bch_bkey_fields {
252 BKEY_FIELD_VERSION_HI,
253 BKEY_FIELD_VERSION_LO,
257 #define bkey_format_field(name, field) \
258 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
260 #define BKEY_FORMAT_CURRENT \
261 ((struct bkey_format) { \
262 .key_u64s = BKEY_U64s, \
263 .nr_fields = BKEY_NR_FIELDS, \
264 .bits_per_field = { \
265 bkey_format_field(INODE, p.inode), \
266 bkey_format_field(OFFSET, p.offset), \
267 bkey_format_field(SNAPSHOT, p.snapshot), \
268 bkey_format_field(SIZE, size), \
269 bkey_format_field(VERSION_HI, version.hi), \
270 bkey_format_field(VERSION_LO, version.lo), \
274 /* bkey with inline value */
280 /* Size of combined key and value, in u64s */
290 #define KEY(_inode, _offset, _size) \
293 .format = KEY_FORMAT_CURRENT, \
294 .p = POS(_inode, _offset), \
298 static inline void bkey_init(struct bkey *k)
303 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
305 #define __BKEY_PADDED(key, pad) \
306 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
309 * - DELETED keys are used internally to mark keys that should be ignored but
310 * override keys in composition order. Their version number is ignored.
312 * - DISCARDED keys indicate that the data is all 0s because it has been
313 * discarded. DISCARDs may have a version; if the version is nonzero the key
314 * will be persistent, otherwise the key will be dropped whenever the btree
315 * node is rewritten (like DELETED keys).
317 * - ERROR: any read of the data returns a read error, as the data was lost due
318 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
319 * by new writes or cluster-wide GC. Node repair can also overwrite them with
320 * the same or a more recent version number, but not with an older version
323 * - WHITEOUT: for hash table btrees
325 #define BCH_BKEY_TYPES() \
335 x(inode_generation, 9) \
347 #define x(name, nr) KEY_TYPE_##name = nr,
361 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
362 * preceded by checksum/compression information (bch_extent_crc32 or
365 * One major determining factor in the format of extents is how we handle and
366 * represent extents that have been partially overwritten and thus trimmed:
368 * If an extent is not checksummed or compressed, when the extent is trimmed we
369 * don't have to remember the extent we originally allocated and wrote: we can
370 * merely adjust ptr->offset to point to the start of the data that is currently
371 * live. The size field in struct bkey records the current (live) size of the
372 * extent, and is also used to mean "size of region on disk that we point to" in
375 * Thus an extent that is not checksummed or compressed will consist only of a
376 * list of bch_extent_ptrs, with none of the fields in
377 * bch_extent_crc32/bch_extent_crc64.
379 * When an extent is checksummed or compressed, it's not possible to read only
380 * the data that is currently live: we have to read the entire extent that was
381 * originally written, and then return only the part of the extent that is
384 * Thus, in addition to the current size of the extent in struct bkey, we need
385 * to store the size of the originally allocated space - this is the
386 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
387 * when the extent is trimmed, instead of modifying the offset field of the
388 * pointer, we keep a second smaller offset field - "offset into the original
389 * extent of the currently live region".
391 * The other major determining factor is replication and data migration:
393 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
394 * write, we will initially write all the replicas in the same format, with the
395 * same checksum type and compression format - however, when copygc runs later (or
396 * tiering/cache promotion, anything that moves data), it is not in general
397 * going to rewrite all the pointers at once - one of the replicas may be in a
398 * bucket on one device that has very little fragmentation while another lives
399 * in a bucket that has become heavily fragmented, and thus is being rewritten
400 * sooner than the rest.
402 * Thus it will only move a subset of the pointers (or in the case of
403 * tiering/cache promotion perhaps add a single pointer without dropping any
404 * current pointers), and if the extent has been partially overwritten it must
405 * write only the currently live portion (or copygc would not be able to reduce
406 * fragmentation!) - which necessitates a different bch_extent_crc format for
409 * But in the interests of space efficiency, we don't want to store one
410 * bch_extent_crc for each pointer if we don't have to.
412 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
413 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
414 * type of a given entry with a scheme similar to utf8 (except we're encoding a
415 * type, not a size), encoding the type in the position of the first set bit:
417 * bch_extent_crc32 - 0b1
418 * bch_extent_ptr - 0b10
419 * bch_extent_crc64 - 0b100
421 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
422 * bch_extent_crc64 is the least constrained).
424 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
425 * until the next bch_extent_crc32/64.
427 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
428 * is neither checksummed nor compressed.
431 /* 128 bits, sufficient for cryptographic MACs: */
435 } __attribute__((packed, aligned(8)));
437 #define BCH_EXTENT_ENTRY_TYPES() \
443 #define BCH_EXTENT_ENTRY_MAX 5
445 enum bch_extent_entry_type {
446 #define x(f, n) BCH_EXTENT_ENTRY_##f = n,
447 BCH_EXTENT_ENTRY_TYPES()
451 /* Compressed/uncompressed size are stored biased by 1: */
452 struct bch_extent_crc32 {
453 #if defined(__LITTLE_ENDIAN_BITFIELD)
456 _uncompressed_size:7,
462 #elif defined (__BIG_ENDIAN_BITFIELD)
464 __u32 compression_type:4,
468 _uncompressed_size:7,
472 } __attribute__((packed, aligned(8)));
474 #define CRC32_SIZE_MAX (1U << 7)
475 #define CRC32_NONCE_MAX 0
477 struct bch_extent_crc64 {
478 #if defined(__LITTLE_ENDIAN_BITFIELD)
481 _uncompressed_size:9,
487 #elif defined (__BIG_ENDIAN_BITFIELD)
493 _uncompressed_size:9,
498 } __attribute__((packed, aligned(8)));
500 #define CRC64_SIZE_MAX (1U << 9)
501 #define CRC64_NONCE_MAX ((1U << 10) - 1)
503 struct bch_extent_crc128 {
504 #if defined(__LITTLE_ENDIAN_BITFIELD)
507 _uncompressed_size:13,
512 #elif defined (__BIG_ENDIAN_BITFIELD)
513 __u64 compression_type:4,
517 _uncompressed_size:13,
521 struct bch_csum csum;
522 } __attribute__((packed, aligned(8)));
524 #define CRC128_SIZE_MAX (1U << 13)
525 #define CRC128_NONCE_MAX ((1U << 13) - 1)
528 * @reservation - pointer hasn't been written to, just reserved
530 struct bch_extent_ptr {
531 #if defined(__LITTLE_ENDIAN_BITFIELD)
536 offset:44, /* 8 petabytes */
539 #elif defined (__BIG_ENDIAN_BITFIELD)
548 } __attribute__((packed, aligned(8)));
550 struct bch_extent_stripe_ptr {
551 #if defined(__LITTLE_ENDIAN_BITFIELD)
555 #elif defined (__BIG_ENDIAN_BITFIELD)
562 struct bch_extent_reservation {
563 #if defined(__LITTLE_ENDIAN_BITFIELD)
568 #elif defined (__BIG_ENDIAN_BITFIELD)
576 union bch_extent_entry {
577 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
579 #elif __BITS_PER_LONG == 32
585 #error edit for your odd byteorder.
588 #define x(f, n) struct bch_extent_##f f;
589 BCH_EXTENT_ENTRY_TYPES()
593 struct bch_btree_ptr {
596 struct bch_extent_ptr start[0];
598 } __attribute__((packed, aligned(8)));
600 struct bch_btree_ptr_v2 {
605 __le16 sectors_written;
606 /* In case we ever decide to do variable size btree nodes: */
609 struct bch_extent_ptr start[0];
611 } __attribute__((packed, aligned(8)));
616 union bch_extent_entry start[0];
618 } __attribute__((packed, aligned(8)));
620 struct bch_reservation {
626 } __attribute__((packed, aligned(8)));
628 /* Maximum size (in u64s) a single pointer could be: */
629 #define BKEY_EXTENT_PTR_U64s_MAX\
630 ((sizeof(struct bch_extent_crc128) + \
631 sizeof(struct bch_extent_ptr)) / sizeof(u64))
633 /* Maximum possible size of an entire extent value: */
634 #define BKEY_EXTENT_VAL_U64s_MAX \
635 (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
637 #define BKEY_PADDED(key) __BKEY_PADDED(key, BKEY_EXTENT_VAL_U64s_MAX)
639 /* * Maximum possible size of an entire extent, key + value: */
640 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
642 /* Btree pointers don't carry around checksums: */
643 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
644 ((sizeof(struct bch_btree_ptr_v2) + \
645 sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(u64))
646 #define BKEY_BTREE_PTR_U64s_MAX \
647 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
651 #define BLOCKDEV_INODE_MAX 4096
653 #define BCACHEFS_ROOT_INO 4096
662 } __attribute__((packed, aligned(8)));
664 struct bch_inode_generation {
667 __le32 bi_generation;
669 } __attribute__((packed, aligned(8)));
671 #define BCH_INODE_FIELDS() \
681 x(bi_generation, 32) \
683 x(bi_data_checksum, 8) \
684 x(bi_compression, 8) \
686 x(bi_background_compression, 8) \
687 x(bi_data_replicas, 8) \
688 x(bi_promote_target, 16) \
689 x(bi_foreground_target, 16) \
690 x(bi_background_target, 16) \
691 x(bi_erasure_code, 16) \
694 /* subset of BCH_INODE_FIELDS */
695 #define BCH_INODE_OPTS() \
696 x(data_checksum, 8) \
699 x(background_compression, 8) \
700 x(data_replicas, 8) \
701 x(promote_target, 16) \
702 x(foreground_target, 16) \
703 x(background_target, 16) \
707 #define x(name, ...) \
716 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
719 __BCH_INODE_SYNC = 0,
720 __BCH_INODE_IMMUTABLE = 1,
721 __BCH_INODE_APPEND = 2,
722 __BCH_INODE_NODUMP = 3,
723 __BCH_INODE_NOATIME = 4,
725 __BCH_INODE_I_SIZE_DIRTY= 5,
726 __BCH_INODE_I_SECTORS_DIRTY= 6,
727 __BCH_INODE_UNLINKED = 7,
729 /* bits 20+ reserved for packed fields below: */
732 #define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
733 #define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
734 #define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
735 #define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
736 #define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
737 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
738 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
739 #define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
741 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
742 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 32);
747 * Dirents (and xattrs) have to implement string lookups; since our b-tree
748 * doesn't support arbitrary length strings for the key, we instead index by a
749 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
750 * field of the key - using linear probing to resolve hash collisions. This also
751 * provides us with the readdir cookie posix requires.
753 * Linear probing requires us to use whiteouts for deletions, in the event of a
760 /* Target inode number: */
764 * Copy of mode bits 12-15 from the target inode - so userspace can get
765 * the filetype without having to do a stat()
770 } __attribute__((packed, aligned(8)));
772 #define BCH_NAME_MAX (U8_MAX * sizeof(u64) - \
773 sizeof(struct bkey) - \
774 offsetof(struct bch_dirent, d_name))
779 #define KEY_TYPE_XATTR_INDEX_USER 0
780 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
781 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
782 #define KEY_TYPE_XATTR_INDEX_TRUSTED 3
783 #define KEY_TYPE_XATTR_INDEX_SECURITY 4
791 } __attribute__((packed, aligned(8)));
793 /* Bucket/allocation information: */
800 } __attribute__((packed, aligned(8)));
802 #define BCH_ALLOC_FIELDS() \
806 x(dirty_sectors, 16) \
807 x(cached_sectors, 16) \
811 #define x(name, bytes) BCH_ALLOC_FIELD_##name,
817 static const unsigned BCH_ALLOC_FIELD_BYTES[] = {
818 #define x(name, bits) [BCH_ALLOC_FIELD_##name] = bits / 8,
823 #define x(name, bits) + (bits / 8)
824 static const unsigned BKEY_ALLOC_VAL_U64s_MAX =
825 DIV_ROUND_UP(offsetof(struct bch_alloc, data)
826 BCH_ALLOC_FIELDS(), sizeof(u64));
829 #define BKEY_ALLOC_U64s_MAX (BKEY_U64s + BKEY_ALLOC_VAL_U64s_MAX)
840 enum quota_counters {
846 struct bch_quota_counter {
853 struct bch_quota_counter c[Q_COUNTERS];
854 } __attribute__((packed, aligned(8)));
865 __u8 csum_granularity_bits;
869 struct bch_extent_ptr ptrs[0];
870 } __attribute__((packed, aligned(8)));
874 struct bch_reflink_p {
878 __le32 reservation_generation;
883 struct bch_reflink_v {
886 union bch_extent_entry start[0];
892 struct bch_inline_data {
897 /* Optional/variable size superblock sections: */
899 struct bch_sb_field {
905 #define BCH_SB_FIELDS() \
914 x(journal_seq_blacklist, 8)
916 enum bch_sb_field_type {
917 #define x(f, nr) BCH_SB_FIELD_##f = nr,
923 /* BCH_SB_FIELD_journal: */
925 struct bch_sb_field_journal {
926 struct bch_sb_field field;
930 /* BCH_SB_FIELD_members: */
932 #define BCH_MIN_NR_NBUCKETS (1 << 6)
936 __le64 nbuckets; /* device size */
937 __le16 first_bucket; /* index of first bucket used */
938 __le16 bucket_size; /* sectors */
940 __le64 last_mount; /* time_t */
945 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
946 /* 4-10 unused, was TIER, HAS_(META)DATA */
947 LE64_BITMASK(BCH_MEMBER_REPLACEMENT, struct bch_member, flags[0], 10, 14)
948 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15)
949 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags[0], 15, 20)
950 LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28)
951 LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30)
953 #define BCH_TIER_MAX 4U
956 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
957 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
960 enum bch_member_state {
961 BCH_MEMBER_STATE_RW = 0,
962 BCH_MEMBER_STATE_RO = 1,
963 BCH_MEMBER_STATE_FAILED = 2,
964 BCH_MEMBER_STATE_SPARE = 3,
965 BCH_MEMBER_STATE_NR = 4,
968 enum cache_replacement {
969 CACHE_REPLACEMENT_LRU = 0,
970 CACHE_REPLACEMENT_FIFO = 1,
971 CACHE_REPLACEMENT_RANDOM = 2,
972 CACHE_REPLACEMENT_NR = 3,
975 struct bch_sb_field_members {
976 struct bch_sb_field field;
977 struct bch_member members[0];
980 /* BCH_SB_FIELD_crypt: */
990 #define BCH_KEY_MAGIC \
991 (((u64) 'b' << 0)|((u64) 'c' << 8)| \
992 ((u64) 'h' << 16)|((u64) '*' << 24)| \
993 ((u64) '*' << 32)|((u64) 'k' << 40)| \
994 ((u64) 'e' << 48)|((u64) 'y' << 56))
996 struct bch_encrypted_key {
1002 * If this field is present in the superblock, it stores an encryption key which
1003 * is used encrypt all other data/metadata. The key will normally be encrypted
1004 * with the key userspace provides, but if encryption has been turned off we'll
1005 * just store the master key unencrypted in the superblock so we can access the
1006 * previously encrypted data.
1008 struct bch_sb_field_crypt {
1009 struct bch_sb_field field;
1013 struct bch_encrypted_key key;
1016 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
1018 enum bch_kdf_types {
1023 /* stored as base 2 log of scrypt params: */
1024 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
1025 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
1026 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
1028 /* BCH_SB_FIELD_replicas: */
1030 enum bch_data_type {
1033 BCH_DATA_JOURNAL = 2,
1036 BCH_DATA_CACHED = 5,
1040 struct bch_replicas_entry_v0 {
1044 } __attribute__((packed));
1046 struct bch_sb_field_replicas_v0 {
1047 struct bch_sb_field field;
1048 struct bch_replicas_entry_v0 entries[0];
1049 } __attribute__((packed, aligned(8)));
1051 struct bch_replicas_entry {
1056 } __attribute__((packed));
1058 #define replicas_entry_bytes(_i) \
1059 (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
1061 struct bch_sb_field_replicas {
1062 struct bch_sb_field field;
1063 struct bch_replicas_entry entries[0];
1064 } __attribute__((packed, aligned(8)));
1066 /* BCH_SB_FIELD_quota: */
1068 struct bch_sb_quota_counter {
1073 struct bch_sb_quota_type {
1075 struct bch_sb_quota_counter c[Q_COUNTERS];
1078 struct bch_sb_field_quota {
1079 struct bch_sb_field field;
1080 struct bch_sb_quota_type q[QTYP_NR];
1081 } __attribute__((packed, aligned(8)));
1083 /* BCH_SB_FIELD_disk_groups: */
1085 #define BCH_SB_LABEL_SIZE 32
1087 struct bch_disk_group {
1088 __u8 label[BCH_SB_LABEL_SIZE];
1090 } __attribute__((packed, aligned(8)));
1092 LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
1093 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
1094 LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
1096 struct bch_sb_field_disk_groups {
1097 struct bch_sb_field field;
1098 struct bch_disk_group entries[0];
1099 } __attribute__((packed, aligned(8)));
1102 * On clean shutdown, store btree roots and current journal sequence number in
1109 __u8 type; /* designates what this jset holds */
1113 struct bkey_i start[0];
1118 struct bch_sb_field_clean {
1119 struct bch_sb_field field;
1127 struct jset_entry start[0];
1132 struct journal_seq_blacklist_entry {
1137 struct bch_sb_field_journal_seq_blacklist {
1138 struct bch_sb_field field;
1141 struct journal_seq_blacklist_entry start[0];
1149 * New versioning scheme:
1150 * One common version number for all on disk data structures - superblock, btree
1151 * nodes, journal entries
1153 #define BCH_JSET_VERSION_OLD 2
1154 #define BCH_BSET_VERSION_OLD 3
1156 enum bcachefs_metadata_version {
1157 bcachefs_metadata_version_min = 9,
1158 bcachefs_metadata_version_new_versioning = 10,
1159 bcachefs_metadata_version_bkey_renumber = 10,
1160 bcachefs_metadata_version_inode_btree_change = 11,
1161 bcachefs_metadata_version_max = 12,
1164 #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
1166 #define BCH_SB_SECTOR 8
1167 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
1169 struct bch_sb_layout {
1170 uuid_le magic; /* bcachefs superblock UUID */
1172 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
1173 __u8 nr_superblocks;
1175 __le64 sb_offset[61];
1176 } __attribute__((packed, aligned(8)));
1178 #define BCH_SB_LAYOUT_SECTOR 7
1181 * @offset - sector where this sb was written
1182 * @version - on disk format version
1183 * @version_min - Oldest metadata version this filesystem contains; so we can
1184 * safely drop compatibility code and refuse to mount filesystems
1186 * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
1187 * @seq - incremented each time superblock is written
1188 * @uuid - used for generating various magic numbers and identifying
1189 * member devices, never changes
1190 * @user_uuid - user visible UUID, may be changed
1191 * @label - filesystem label
1192 * @seq - identifies most recent superblock, incremented each time
1193 * superblock is written
1194 * @features - enabled incompatible features
1197 struct bch_csum csum;
1204 __u8 label[BCH_SB_LABEL_SIZE];
1213 __le64 time_base_lo;
1214 __le32 time_base_hi;
1215 __le32 time_precision;
1221 struct bch_sb_layout layout;
1224 struct bch_sb_field start[0];
1227 } __attribute__((packed, aligned(8)));
1231 * BCH_SB_INITALIZED - set on first mount
1232 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
1233 * behaviour of mount/recovery path:
1234 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
1235 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
1236 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1237 * DATA/META_CSUM_TYPE. Also indicates encryption
1238 * algorithm in use, if/when we get more than one
1241 LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
1243 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
1244 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
1245 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
1246 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
1248 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
1250 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
1251 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
1253 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
1254 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
1256 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
1257 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
1259 LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
1260 LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
1261 LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
1262 LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
1264 LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
1268 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
1269 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
1270 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
1272 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
1273 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
1276 * Max size of an extent that may require bouncing to read or write
1277 * (checksummed, compressed): 64k
1279 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1280 struct bch_sb, flags[1], 14, 20);
1282 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
1283 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
1285 LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
1286 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
1287 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
1289 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE,
1290 struct bch_sb, flags[2], 0, 4);
1291 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
1293 LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
1298 * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
1299 * reflink: gates KEY_TYPE_reflink
1300 * inline_data: gates KEY_TYPE_inline_data
1301 * new_siphash: gates BCH_STR_HASH_SIPHASH
1302 * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1304 #define BCH_SB_FEATURES() \
1308 x(atomic_nlink, 3) \
1310 x(journal_seq_blacklist_v3, 5) \
1314 x(new_extent_overwrite, 9) \
1315 x(incompressible, 10) \
1316 x(btree_ptr_v2, 11) \
1317 x(extents_above_btree_updates, 12) \
1318 x(btree_updates_journalled, 13)
1320 #define BCH_SB_FEATURES_ALL \
1321 ((1ULL << BCH_FEATURE_new_siphash)| \
1322 (1ULL << BCH_FEATURE_new_extent_overwrite)| \
1323 (1ULL << BCH_FEATURE_btree_ptr_v2)| \
1324 (1ULL << BCH_FEATURE_extents_above_btree_updates))
1326 enum bch_sb_feature {
1327 #define x(f, n) BCH_FEATURE_##f,
1333 enum bch_sb_compat {
1334 BCH_COMPAT_FEAT_ALLOC_INFO = 0,
1335 BCH_COMPAT_FEAT_ALLOC_METADATA = 1,
1340 #define BCH_REPLICAS_MAX 4U
1342 enum bch_error_actions {
1343 BCH_ON_ERROR_CONTINUE = 0,
1344 BCH_ON_ERROR_RO = 1,
1345 BCH_ON_ERROR_PANIC = 2,
1346 BCH_NR_ERROR_ACTIONS = 3,
1349 enum bch_str_hash_type {
1350 BCH_STR_HASH_CRC32C = 0,
1351 BCH_STR_HASH_CRC64 = 1,
1352 BCH_STR_HASH_SIPHASH_OLD = 2,
1353 BCH_STR_HASH_SIPHASH = 3,
1354 BCH_STR_HASH_NR = 4,
1357 enum bch_str_hash_opts {
1358 BCH_STR_HASH_OPT_CRC32C = 0,
1359 BCH_STR_HASH_OPT_CRC64 = 1,
1360 BCH_STR_HASH_OPT_SIPHASH = 2,
1361 BCH_STR_HASH_OPT_NR = 3,
1364 enum bch_csum_type {
1366 BCH_CSUM_CRC32C_NONZERO = 1,
1367 BCH_CSUM_CRC64_NONZERO = 2,
1368 BCH_CSUM_CHACHA20_POLY1305_80 = 3,
1369 BCH_CSUM_CHACHA20_POLY1305_128 = 4,
1370 BCH_CSUM_CRC32C = 5,
1375 static const unsigned bch_crc_bytes[] = {
1376 [BCH_CSUM_NONE] = 0,
1377 [BCH_CSUM_CRC32C_NONZERO] = 4,
1378 [BCH_CSUM_CRC32C] = 4,
1379 [BCH_CSUM_CRC64_NONZERO] = 8,
1380 [BCH_CSUM_CRC64] = 8,
1381 [BCH_CSUM_CHACHA20_POLY1305_80] = 10,
1382 [BCH_CSUM_CHACHA20_POLY1305_128] = 16,
1385 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
1388 case BCH_CSUM_CHACHA20_POLY1305_80:
1389 case BCH_CSUM_CHACHA20_POLY1305_128:
1396 enum bch_csum_opts {
1397 BCH_CSUM_OPT_NONE = 0,
1398 BCH_CSUM_OPT_CRC32C = 1,
1399 BCH_CSUM_OPT_CRC64 = 2,
1400 BCH_CSUM_OPT_NR = 3,
1403 #define BCH_COMPRESSION_TYPES() \
1409 x(incompressible, 5)
1411 enum bch_compression_type {
1412 #define x(t, n) BCH_COMPRESSION_TYPE_##t,
1413 BCH_COMPRESSION_TYPES()
1415 BCH_COMPRESSION_TYPE_NR
1418 #define BCH_COMPRESSION_OPTS() \
1424 enum bch_compression_opts {
1425 #define x(t, n) BCH_COMPRESSION_OPT_##t,
1426 BCH_COMPRESSION_OPTS()
1428 BCH_COMPRESSION_OPT_NR
1434 * The various other data structures have their own magic numbers, which are
1435 * xored with the first part of the cache set's UUID
1438 #define BCACHE_MAGIC \
1439 UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
1440 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1442 #define BCACHEFS_STATFS_MAGIC 0xca451a4e
1444 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
1445 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
1447 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1450 memcpy(&ret, &sb->uuid, sizeof(ret));
1454 static inline __u64 __jset_magic(struct bch_sb *sb)
1456 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1459 static inline __u64 __bset_magic(struct bch_sb *sb)
1461 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1466 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
1468 #define BCH_JSET_ENTRY_TYPES() \
1473 x(blacklist_v2, 4) \
1478 #define x(f, nr) BCH_JSET_ENTRY_##f = nr,
1479 BCH_JSET_ENTRY_TYPES()
1485 * Journal sequence numbers can be blacklisted: bsets record the max sequence
1486 * number of all the journal entries they contain updates for, so that on
1487 * recovery we can ignore those bsets that contain index updates newer that what
1488 * made it into the journal.
1490 * This means that we can't reuse that journal_seq - we have to skip it, and
1491 * then record that we skipped it so that the next time we crash and recover we
1492 * don't think there was a missing journal entry.
1494 struct jset_entry_blacklist {
1495 struct jset_entry entry;
1499 struct jset_entry_blacklist_v2 {
1500 struct jset_entry entry;
1506 FS_USAGE_RESERVED = 0,
1507 FS_USAGE_INODES = 1,
1508 FS_USAGE_KEY_VERSION = 2,
1512 struct jset_entry_usage {
1513 struct jset_entry entry;
1515 } __attribute__((packed));
1517 struct jset_entry_data_usage {
1518 struct jset_entry entry;
1520 struct bch_replicas_entry r;
1521 } __attribute__((packed));
1524 * On disk format for a journal entry:
1525 * seq is monotonically increasing; every journal entry has its own unique
1528 * last_seq is the oldest journal entry that still has keys the btree hasn't
1529 * flushed to disk yet.
1531 * version is for on disk format changes.
1534 struct bch_csum csum;
1541 __le32 u64s; /* size of d[] in u64s */
1543 __u8 encrypted_start[0];
1548 /* Sequence number of oldest dirty journal entry */
1553 struct jset_entry start[0];
1556 } __attribute__((packed, aligned(8)));
1558 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
1559 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
1561 #define BCH_JOURNAL_BUCKETS_MIN 8
1565 #define BCH_BTREE_IDS() \
1566 x(EXTENTS, 0, "extents") \
1567 x(INODES, 1, "inodes") \
1568 x(DIRENTS, 2, "dirents") \
1569 x(XATTRS, 3, "xattrs") \
1570 x(ALLOC, 4, "alloc") \
1571 x(QUOTAS, 5, "quotas") \
1572 x(EC, 6, "stripes") \
1573 x(REFLINK, 7, "reflink")
1576 #define x(kwd, val, name) BTREE_ID_##kwd = val,
1582 #define BTREE_MAX_DEPTH 4U
1589 * On disk a btree node is a list/log of these; within each set the keys are
1596 * Highest journal entry this bset contains keys for.
1597 * If on recovery we don't see that journal entry, this bset is ignored:
1598 * this allows us to preserve the order of all index updates after a
1599 * crash, since the journal records a total order of all index updates
1600 * and anything that didn't make it to the journal doesn't get used.
1606 __le16 u64s; /* count of d[] in u64s */
1609 struct bkey_packed start[0];
1612 } __attribute__((packed, aligned(8)));
1614 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
1616 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
1617 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1618 struct bset, flags, 5, 6);
1621 struct bch_csum csum;
1624 /* this flags field is encrypted, unlike bset->flags: */
1627 /* Closed interval: */
1628 struct bpos min_key;
1629 struct bpos max_key;
1630 struct bch_extent_ptr ptr;
1631 struct bkey_format format;
1642 } __attribute__((packed, aligned(8)));
1644 LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
1645 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
1646 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
1647 struct btree_node, flags, 8, 9);
1649 LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
1651 struct btree_node_entry {
1652 struct bch_csum csum;
1663 } __attribute__((packed, aligned(8)));
1665 #endif /* _BCACHEFS_FORMAT_H */