1 #ifndef _BCACHEFS_FORMAT_H
2 #define _BCACHEFS_FORMAT_H
5 * Bcache on disk data structures
9 #include <asm/byteorder.h>
10 #include <linux/uuid.h>
12 #define LE32_BITMASK(name, type, field, offset, end) \
13 static const unsigned name##_OFFSET = offset; \
14 static const unsigned name##_BITS = (end - offset); \
15 static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
17 static inline __u64 name(const type *k) \
19 return (__le32_to_cpu(k->field) >> offset) & \
20 ~(~0ULL << (end - offset)); \
23 static inline void SET_##name(type *k, __u64 v) \
25 __u64 new = __le32_to_cpu(k->field); \
27 new &= ~(~(~0ULL << (end - offset)) << offset); \
28 new |= (v & ~(~0ULL << (end - offset))) << offset; \
29 k->field = __cpu_to_le32(new); \
32 #define LE64_BITMASK(name, type, field, offset, end) \
33 static const unsigned name##_OFFSET = offset; \
34 static const unsigned name##_BITS = (end - offset); \
35 static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
37 static inline __u64 name(const type *k) \
39 return (__le64_to_cpu(k->field) >> offset) & \
40 ~(~0ULL << (end - offset)); \
43 static inline void SET_##name(type *k, __u64 v) \
45 __u64 new = __le64_to_cpu(k->field); \
47 new &= ~(~(~0ULL << (end - offset)) << offset); \
48 new |= (v & ~(~0ULL << (end - offset))) << offset; \
49 k->field = __cpu_to_le64(new); \
55 /* One unused slot for now: */
56 __u8 bits_per_field[6];
57 __le64 field_offset[6];
60 /* Btree keys - all units are in sectors */
63 /* Word order matches machine byte order */
64 #if defined(__LITTLE_ENDIAN)
68 #elif defined(__BIG_ENDIAN)
70 __u64 offset; /* Points to end of extent - sectors */
73 #error edit for your odd byteorder.
75 } __attribute__((packed, aligned(4)));
77 #define KEY_INODE_MAX ((__u64)~0ULL)
78 #define KEY_OFFSET_MAX ((__u64)~0ULL)
79 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
81 static inline struct bpos POS(__u64 inode, __u64 offset)
92 #define POS_MIN POS(0, 0)
93 #define POS_MAX POS(KEY_INODE_MAX, KEY_OFFSET_MAX)
95 /* Empty placeholder struct, for container_of() */
101 #if defined(__LITTLE_ENDIAN)
104 #elif defined(__BIG_ENDIAN)
108 } __attribute__((packed, aligned(4)));
111 /* Size of combined key and value, in u64s */
114 /* Format of key (0 for format local to btree node) */
115 #if defined(__LITTLE_ENDIAN_BITFIELD)
118 #elif defined (__BIG_ENDIAN_BITFIELD)
119 __u8 needs_whiteout:1,
122 #error edit for your odd byteorder.
125 /* Type of the value */
128 #if defined(__LITTLE_ENDIAN)
131 struct bversion version;
132 __u32 size; /* extent size, in sectors */
134 #elif defined(__BIG_ENDIAN)
136 __u32 size; /* extent size, in sectors */
137 struct bversion version;
141 } __attribute__((packed, aligned(8)));
146 /* Size of combined key and value, in u64s */
149 /* Format of key (0 for format local to btree node) */
152 * XXX: next incompat on disk format change, switch format and
153 * needs_whiteout - bkey_packed() will be cheaper if format is the high
154 * bits of the bitfield
156 #if defined(__LITTLE_ENDIAN_BITFIELD)
159 #elif defined (__BIG_ENDIAN_BITFIELD)
160 __u8 needs_whiteout:1,
164 /* Type of the value */
169 * We copy bkeys with struct assignment in various places, and while
170 * that shouldn't be done with packed bkeys we can't disallow it in C,
171 * and it's legal to cast a bkey to a bkey_packed - so padding it out
172 * to the same size as struct bkey should hopefully be safest.
174 __u8 pad[sizeof(struct bkey) - 3];
175 } __attribute__((packed, aligned(8)));
177 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
178 #define KEY_PACKED_BITS_START 24
180 #define KEY_SIZE_MAX ((__u32)~0U)
182 #define KEY_FORMAT_LOCAL_BTREE 0
183 #define KEY_FORMAT_CURRENT 1
185 enum bch_bkey_fields {
190 BKEY_FIELD_VERSION_HI,
191 BKEY_FIELD_VERSION_LO,
195 #define bkey_format_field(name, field) \
196 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
198 #define BKEY_FORMAT_CURRENT \
199 ((struct bkey_format) { \
200 .key_u64s = BKEY_U64s, \
201 .nr_fields = BKEY_NR_FIELDS, \
202 .bits_per_field = { \
203 bkey_format_field(INODE, p.inode), \
204 bkey_format_field(OFFSET, p.offset), \
205 bkey_format_field(SNAPSHOT, p.snapshot), \
206 bkey_format_field(SIZE, size), \
207 bkey_format_field(VERSION_HI, version.hi), \
208 bkey_format_field(VERSION_LO, version.lo), \
212 /* bkey with inline value */
218 /* Size of combined key and value, in u64s */
228 #define KEY(_inode, _offset, _size) \
231 .format = KEY_FORMAT_CURRENT, \
232 .p = POS(_inode, _offset), \
236 static inline void bkey_init(struct bkey *k)
241 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
243 #define __BKEY_PADDED(key, pad) \
244 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
246 #define BKEY_VAL_TYPE(name, nr) \
247 struct bkey_i_##name { \
252 struct bch_##name v; \
256 * - DELETED keys are used internally to mark keys that should be ignored but
257 * override keys in composition order. Their version number is ignored.
259 * - DISCARDED keys indicate that the data is all 0s because it has been
260 * discarded. DISCARDs may have a version; if the version is nonzero the key
261 * will be persistent, otherwise the key will be dropped whenever the btree
262 * node is rewritten (like DELETED keys).
264 * - ERROR: any read of the data returns a read error, as the data was lost due
265 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
266 * by new writes or cluster-wide GC. Node repair can also overwrite them with
267 * the same or a more recent version number, but not with an older version
270 #define KEY_TYPE_DELETED 0
271 #define KEY_TYPE_DISCARD 1
272 #define KEY_TYPE_ERROR 2
273 #define KEY_TYPE_COOKIE 3
274 #define KEY_TYPE_PERSISTENT_DISCARD 4
275 #define KEY_TYPE_GENERIC_NR 128
281 BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
286 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
287 * preceded by checksum/compression information (bch_extent_crc32 or
290 * One major determining factor in the format of extents is how we handle and
291 * represent extents that have been partially overwritten and thus trimmed:
293 * If an extent is not checksummed or compressed, when the extent is trimmed we
294 * don't have to remember the extent we originally allocated and wrote: we can
295 * merely adjust ptr->offset to point to the start of the start of the data that
296 * is currently live. The size field in struct bkey records the current (live)
297 * size of the extent, and is also used to mean "size of region on disk that we
298 * point to" in this case.
300 * Thus an extent that is not checksummed or compressed will consist only of a
301 * list of bch_extent_ptrs, with none of the fields in
302 * bch_extent_crc32/bch_extent_crc64.
304 * When an extent is checksummed or compressed, it's not possible to read only
305 * the data that is currently live: we have to read the entire extent that was
306 * originally written, and then return only the part of the extent that is
309 * Thus, in addition to the current size of the extent in struct bkey, we need
310 * to store the size of the originally allocated space - this is the
311 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
312 * when the extent is trimmed, instead of modifying the offset field of the
313 * pointer, we keep a second smaller offset field - "offset into the original
314 * extent of the currently live region".
316 * The other major determining factor is replication and data migration:
318 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
319 * write, we will initially write all the replicas in the same format, with the
320 * same checksum type and compression format - however, when copygc runs later (or
321 * tiering/cache promotion, anything that moves data), it is not in general
322 * going to rewrite all the pointers at once - one of the replicas may be in a
323 * bucket on one device that has very little fragmentation while another lives
324 * in a bucket that has become heavily fragmented, and thus is being rewritten
325 * sooner than the rest.
327 * Thus it will only move a subset of the pointers (or in the case of
328 * tiering/cache promotion perhaps add a single pointer without dropping any
329 * current pointers), and if the extent has been partially overwritten it must
330 * write only the currently live portion (or copygc would not be able to reduce
331 * fragmentation!) - which necessitates a different bch_extent_crc format for
334 * But in the interests of space efficiency, we don't want to store one
335 * bch_extent_crc for each pointer if we don't have to.
337 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
338 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
339 * type of a given entry with a scheme similar to utf8 (except we're encoding a
340 * type, not a size), encoding the type in the position of the first set bit:
342 * bch_extent_crc32 - 0b1
343 * bch_extent_ptr - 0b10
344 * bch_extent_crc64 - 0b100
346 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
347 * bch_extent_crc64 is the least constrained).
349 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
350 * until the next bch_extent_crc32/64.
352 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
353 * is neither checksummed nor compressed.
356 /* 128 bits, sufficient for cryptographic MACs: */
360 } __attribute__((packed, aligned(8)));
362 #define BCH_CSUM_NONE 0U
363 #define BCH_CSUM_CRC32C 1U
364 #define BCH_CSUM_CRC64 2U
365 #define BCH_CSUM_CHACHA20_POLY1305_80 3U
366 #define BCH_CSUM_CHACHA20_POLY1305_128 4U
367 #define BCH_CSUM_NR 5U
369 static inline _Bool bch2_csum_type_is_encryption(unsigned type)
372 case BCH_CSUM_CHACHA20_POLY1305_80:
373 case BCH_CSUM_CHACHA20_POLY1305_128:
380 enum bch_extent_entry_type {
381 BCH_EXTENT_ENTRY_ptr = 0,
382 BCH_EXTENT_ENTRY_crc32 = 1,
383 BCH_EXTENT_ENTRY_crc64 = 2,
384 BCH_EXTENT_ENTRY_crc128 = 3,
387 #define BCH_EXTENT_ENTRY_MAX 4
389 /* Compressed/uncompressed size are stored biased by 1: */
390 struct bch_extent_crc32 {
391 #if defined(__LITTLE_ENDIAN_BITFIELD)
394 _uncompressed_size:7,
400 #elif defined (__BIG_ENDIAN_BITFIELD)
402 __u32 compression_type:4,
406 _uncompressed_size:7,
410 } __attribute__((packed, aligned(8)));
412 #define CRC32_SIZE_MAX (1U << 7)
413 #define CRC32_NONCE_MAX 0
415 struct bch_extent_crc64 {
416 #if defined(__LITTLE_ENDIAN_BITFIELD)
419 _uncompressed_size:9,
425 #elif defined (__BIG_ENDIAN_BITFIELD)
431 _uncompressed_size:9,
436 } __attribute__((packed, aligned(8)));
438 #define CRC64_SIZE_MAX (1U << 9)
439 #define CRC64_NONCE_MAX ((1U << 10) - 1)
441 struct bch_extent_crc128 {
442 #if defined(__LITTLE_ENDIAN_BITFIELD)
445 _uncompressed_size:13,
450 #elif defined (__BIG_ENDIAN_BITFIELD)
451 __u64 compression_type:4,
455 _uncompressed_size:13,
459 struct bch_csum csum;
460 } __attribute__((packed, aligned(8)));
462 #define CRC128_SIZE_MAX (1U << 13)
463 #define CRC128_NONCE_MAX ((1U << 13) - 1)
466 * Max size of an extent that may require bouncing to read or write
467 * (checksummed, compressed): 64k
469 #define BCH_ENCODED_EXTENT_MAX 128U
472 * @reservation - pointer hasn't been written to, just reserved
474 struct bch_extent_ptr {
475 #if defined(__LITTLE_ENDIAN_BITFIELD)
480 offset:44, /* 8 petabytes */
483 #elif defined (__BIG_ENDIAN_BITFIELD)
492 } __attribute__((packed, aligned(8)));
494 struct bch_extent_reservation {
495 #if defined(__LITTLE_ENDIAN_BITFIELD)
500 #elif defined (__BIG_ENDIAN_BITFIELD)
508 union bch_extent_entry {
509 #if defined(__LITTLE_ENDIAN) || __BITS_PER_LONG == 64
511 #elif __BITS_PER_LONG == 32
517 #error edit for your odd byteorder.
519 struct bch_extent_crc32 crc32;
520 struct bch_extent_crc64 crc64;
521 struct bch_extent_crc128 crc128;
522 struct bch_extent_ptr ptr;
529 * This is kind of a hack, we're overloading the type for a boolean that
530 * really should be part of the value - BCH_EXTENT and BCH_EXTENT_CACHED
531 * have the same value type:
533 BCH_EXTENT_CACHED = 129,
536 * Persistent reservation:
538 BCH_RESERVATION = 130,
544 union bch_extent_entry start[0];
546 } __attribute__((packed, aligned(8)));
547 BKEY_VAL_TYPE(extent, BCH_EXTENT);
549 struct bch_reservation {
555 } __attribute__((packed, aligned(8)));
556 BKEY_VAL_TYPE(reservation, BCH_RESERVATION);
558 /* Maximum size (in u64s) a single pointer could be: */
559 #define BKEY_EXTENT_PTR_U64s_MAX\
560 ((sizeof(struct bch_extent_crc128) + \
561 sizeof(struct bch_extent_ptr)) / sizeof(u64))
563 /* Maximum possible size of an entire extent value: */
564 /* There's a hack in the keylist code that needs to be fixed.. */
565 #define BKEY_EXTENT_VAL_U64s_MAX \
566 (BKEY_EXTENT_PTR_U64s_MAX * BCH_REPLICAS_MAX)
568 /* * Maximum possible size of an entire extent, key + value: */
569 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
571 /* Btree pointers don't carry around checksums: */
572 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
573 ((sizeof(struct bch_extent_ptr)) / sizeof(u64) * BCH_REPLICAS_MAX)
574 #define BKEY_BTREE_PTR_U64s_MAX \
575 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
579 #define BLOCKDEV_INODE_MAX 4096
581 #define BCACHE_ROOT_INO 4096
583 enum bch_inode_types {
585 BCH_INODE_BLOCKDEV = 129,
595 } __attribute__((packed, aligned(8)));
596 BKEY_VAL_TYPE(inode, BCH_INODE_FS);
598 #define BCH_INODE_FIELDS() \
599 BCH_INODE_FIELD(i_atime, 64) \
600 BCH_INODE_FIELD(i_ctime, 64) \
601 BCH_INODE_FIELD(i_mtime, 64) \
602 BCH_INODE_FIELD(i_otime, 64) \
603 BCH_INODE_FIELD(i_size, 64) \
604 BCH_INODE_FIELD(i_sectors, 64) \
605 BCH_INODE_FIELD(i_uid, 32) \
606 BCH_INODE_FIELD(i_gid, 32) \
607 BCH_INODE_FIELD(i_nlink, 32) \
608 BCH_INODE_FIELD(i_generation, 32) \
609 BCH_INODE_FIELD(i_dev, 32)
613 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
616 __BCH_INODE_SYNC = 0,
617 __BCH_INODE_IMMUTABLE = 1,
618 __BCH_INODE_APPEND = 2,
619 __BCH_INODE_NODUMP = 3,
620 __BCH_INODE_NOATIME = 4,
622 __BCH_INODE_I_SIZE_DIRTY= 5,
623 __BCH_INODE_I_SECTORS_DIRTY= 6,
625 /* not implemented yet: */
626 __BCH_INODE_HAS_XATTRS = 7, /* has xattrs in xattr btree */
628 /* bits 20+ reserved for packed fields below: */
631 #define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
632 #define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
633 #define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
634 #define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
635 #define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
636 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
637 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
638 #define BCH_INODE_HAS_XATTRS (1 << __BCH_INODE_HAS_XATTRS)
640 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, i_flags, 20, 24);
641 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, i_flags, 24, 32);
643 struct bch_inode_blockdev {
655 } __attribute__((packed, aligned(8)));
656 BKEY_VAL_TYPE(inode_blockdev, BCH_INODE_BLOCKDEV);
658 /* Thin provisioned volume, or cache for another block device? */
659 LE64_BITMASK(CACHED_DEV, struct bch_inode_blockdev, i_flags, 0, 1)
664 * Dirents (and xattrs) have to implement string lookups; since our b-tree
665 * doesn't support arbitrary length strings for the key, we instead index by a
666 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
667 * field of the key - using linear probing to resolve hash collisions. This also
668 * provides us with the readdir cookie posix requires.
670 * Linear probing requires us to use whiteouts for deletions, in the event of a
676 BCH_DIRENT_WHITEOUT = 129,
682 /* Target inode number: */
686 * Copy of mode bits 12-15 from the target inode - so userspace can get
687 * the filetype without having to do a stat()
692 } __attribute__((packed, aligned(8)));
693 BKEY_VAL_TYPE(dirent, BCH_DIRENT);
699 BCH_XATTR_WHITEOUT = 129,
702 #define BCH_XATTR_INDEX_USER 0
703 #define BCH_XATTR_INDEX_POSIX_ACL_ACCESS 1
704 #define BCH_XATTR_INDEX_POSIX_ACL_DEFAULT 2
705 #define BCH_XATTR_INDEX_TRUSTED 3
706 #define BCH_XATTR_INDEX_SECURITY 4
714 } __attribute__((packed, aligned(8)));
715 BKEY_VAL_TYPE(xattr, BCH_XATTR);
719 /* Version 0: Cache device
720 * Version 1: Backing device
721 * Version 2: Seed pointer into btree node checksum
722 * Version 3: Cache device with new UUID format
723 * Version 4: Backing device with data offset
724 * Version 5: All the incompat changes
725 * Version 6: Cache device UUIDs all in superblock, another incompat bset change
726 * Version 7: Encryption (expanded checksum fields), other random things
728 #define BCACHE_SB_VERSION_CDEV_V0 0
729 #define BCACHE_SB_VERSION_BDEV 1
730 #define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
731 #define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
732 #define BCACHE_SB_VERSION_CDEV_V2 5
733 #define BCACHE_SB_VERSION_CDEV_V3 6
734 #define BCACHE_SB_VERSION_CDEV_V4 7
735 #define BCACHE_SB_VERSION_CDEV 7
736 #define BCACHE_SB_MAX_VERSION 7
738 #define BCH_SB_SECTOR 8
739 #define BCH_SB_LABEL_SIZE 32
740 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
744 __le64 nbuckets; /* device size */
745 __le16 first_bucket; /* index of first bucket used */
746 __le16 bucket_size; /* sectors */
748 __le64 last_mount; /* time_t */
753 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
754 LE64_BITMASK(BCH_MEMBER_TIER, struct bch_member, flags[0], 4, 8)
755 LE64_BITMASK(BCH_MEMBER_HAS_METADATA, struct bch_member, flags[0], 8, 9)
756 LE64_BITMASK(BCH_MEMBER_HAS_DATA, struct bch_member, flags[0], 9, 10)
757 LE64_BITMASK(BCH_MEMBER_REPLACEMENT, struct bch_member, flags[0], 10, 14)
758 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15);
761 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
762 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
765 enum bch_member_state {
766 BCH_MEMBER_STATE_RW = 0,
767 BCH_MEMBER_STATE_RO = 1,
768 BCH_MEMBER_STATE_FAILED = 2,
769 BCH_MEMBER_STATE_SPARE = 3,
770 BCH_MEMBER_STATE_NR = 4,
773 #define BCH_TIER_MAX 4U
775 enum cache_replacement {
776 CACHE_REPLACEMENT_LRU = 0,
777 CACHE_REPLACEMENT_FIFO = 1,
778 CACHE_REPLACEMENT_RANDOM = 2,
779 CACHE_REPLACEMENT_NR = 3,
782 struct bch_sb_layout {
783 uuid_le magic; /* bcachefs superblock UUID */
785 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
789 } __attribute__((packed, aligned(8)));
791 #define BCH_SB_LAYOUT_SECTOR 7
793 struct bch_sb_field {
799 enum bch_sb_field_type {
800 BCH_SB_FIELD_journal = 0,
801 BCH_SB_FIELD_members = 1,
802 BCH_SB_FIELD_crypt = 2,
806 struct bch_sb_field_journal {
807 struct bch_sb_field field;
811 struct bch_sb_field_members {
812 struct bch_sb_field field;
813 struct bch_member members[0];
826 #define BCH_KEY_MAGIC \
827 (((u64) 'b' << 0)|((u64) 'c' << 8)| \
828 ((u64) 'h' << 16)|((u64) '*' << 24)| \
829 ((u64) '*' << 32)|((u64) 'k' << 40)| \
830 ((u64) 'e' << 48)|((u64) 'y' << 56))
832 struct bch_encrypted_key {
838 * If this field is present in the superblock, it stores an encryption key which
839 * is used encrypt all other data/metadata. The key will normally be encrypted
840 * with the key userspace provides, but if encryption has been turned off we'll
841 * just store the master key unencrypted in the superblock so we can access the
842 * previously encrypted data.
844 struct bch_sb_field_crypt {
845 struct bch_sb_field field;
849 struct bch_encrypted_key key;
852 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
859 /* stored as base 2 log of scrypt params: */
860 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
861 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
862 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
864 struct bch_sb_field_replication {
865 struct bch_sb_field field;
869 * @offset - sector where this sb was written
870 * @version - on disk format version
871 * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
872 * @seq - incremented each time superblock is written
873 * @uuid - used for generating various magic numbers and identifying
874 * member devices, never changes
875 * @user_uuid - user visible UUID, may be changed
876 * @label - filesystem label
877 * @seq - identifies most recent superblock, incremented each time
878 * superblock is written
879 * @features - enabled incompatible features
882 struct bch_csum csum;
887 __u8 label[BCH_SB_LABEL_SIZE];
898 __le32 time_precision;
904 struct bch_sb_layout layout;
907 struct bch_sb_field start[0];
910 } __attribute__((packed, aligned(8)));
914 * BCH_SB_INITALIZED - set on first mount
915 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
916 * behaviour of mount/recovery path:
917 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
918 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
919 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
920 * DATA/META_CSUM_TYPE. Also indicates encryption
921 * algorithm in use, if/when we get more than one
924 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
925 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
926 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
927 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
929 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
931 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
932 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
934 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
935 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
937 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
938 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
940 LE64_BITMASK(BCH_SB_META_REPLICAS_HAVE, struct bch_sb, flags[0], 56, 60);
941 LE64_BITMASK(BCH_SB_DATA_REPLICAS_HAVE, struct bch_sb, flags[0], 60, 64);
943 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
944 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
945 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
947 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
948 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
949 /* 14-20 unused, was JOURNAL_ENTRY_SIZE */
951 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
952 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
955 enum bch_sb_features {
957 BCH_FEATURE_GZIP = 1,
962 #define BCH_REPLICAS_MAX 4U
965 #define BCH_ERROR_ACTIONS() \
966 x(BCH_ON_ERROR_CONTINUE, 0, "continue") \
967 x(BCH_ON_ERROR_RO, 1, "remount-ro") \
968 x(BCH_ON_ERROR_PANIC, 2, "panic") \
969 x(BCH_NR_ERROR_ACTIONS, 3, NULL)
971 enum bch_error_actions {
972 #define x(_opt, _nr, _str) _opt = _nr,
978 enum bch_error_actions {
979 BCH_ON_ERROR_CONTINUE = 0,
981 BCH_ON_ERROR_PANIC = 2,
982 BCH_NR_ERROR_ACTIONS = 3,
986 BCH_CSUM_OPT_NONE = 0,
987 BCH_CSUM_OPT_CRC32C = 1,
988 BCH_CSUM_OPT_CRC64 = 2,
992 enum bch_str_hash_opts {
993 BCH_STR_HASH_CRC32C = 0,
994 BCH_STR_HASH_CRC64 = 1,
995 BCH_STR_HASH_SIPHASH = 2,
999 enum bch_compression_opts {
1000 BCH_COMPRESSION_NONE = 0,
1001 BCH_COMPRESSION_LZ4 = 1,
1002 BCH_COMPRESSION_GZIP = 2,
1003 BCH_COMPRESSION_NR = 3,
1006 /* backing device specific stuff: */
1008 struct backingdev_sb {
1010 __le64 offset; /* sector where this sb was written */
1011 __le64 version; /* of on disk format */
1013 uuid_le magic; /* bcachefs superblock UUID */
1018 * Internal cache set UUID - xored with various magic numbers and thus
1019 * must never change:
1025 __u8 label[BCH_SB_LABEL_SIZE];
1029 /* Incremented each time superblock is written: */
1033 * User visible UUID for identifying the cache set the user is allowed
1042 __le16 block_size; /* sectors */
1045 __le32 last_mount; /* time_t */
1047 /* size of variable length portion - always 0 for backingdev superblock */
1052 LE64_BITMASK(BDEV_CACHE_MODE, struct backingdev_sb, flags, 0, 4);
1053 #define CACHE_MODE_WRITETHROUGH 0U
1054 #define CACHE_MODE_WRITEBACK 1U
1055 #define CACHE_MODE_WRITEAROUND 2U
1056 #define CACHE_MODE_NONE 3U
1058 LE64_BITMASK(BDEV_STATE, struct backingdev_sb, flags, 61, 63);
1059 #define BDEV_STATE_NONE 0U
1060 #define BDEV_STATE_CLEAN 1U
1061 #define BDEV_STATE_DIRTY 2U
1062 #define BDEV_STATE_STALE 3U
1064 #define BDEV_DATA_START_DEFAULT 16 /* sectors */
1066 static inline _Bool __SB_IS_BDEV(__u64 version)
1068 return version == BCACHE_SB_VERSION_BDEV
1069 || version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
1072 static inline _Bool SB_IS_BDEV(const struct bch_sb *sb)
1074 return __SB_IS_BDEV(sb->version);
1080 * The various other data structures have their own magic numbers, which are
1081 * xored with the first part of the cache set's UUID
1084 #define BCACHE_MAGIC \
1085 UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
1086 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1088 #define BCACHE_STATFS_MAGIC 0xca451a4e
1090 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
1091 #define PSET_MAGIC __cpu_to_le64(0x6750e15f87337f91ULL)
1092 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
1094 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1097 memcpy(&ret, &sb->uuid, sizeof(ret));
1101 static inline __u64 __jset_magic(struct bch_sb *sb)
1103 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1106 static inline __u64 __pset_magic(struct bch_sb *sb)
1108 return __le64_to_cpu(__bch2_sb_magic(sb) ^ PSET_MAGIC);
1111 static inline __u64 __bset_magic(struct bch_sb *sb)
1113 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1118 #define BCACHE_JSET_VERSION_UUIDv1 1
1119 #define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
1120 #define BCACHE_JSET_VERSION_JKEYS 2
1121 #define BCACHE_JSET_VERSION 2
1127 __le32 flags; /* designates what this jset holds */
1130 struct bkey_i start[0];
1135 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
1137 LE32_BITMASK(JOURNAL_ENTRY_TYPE, struct jset_entry, flags, 0, 8);
1139 JOURNAL_ENTRY_BTREE_KEYS = 0,
1140 JOURNAL_ENTRY_BTREE_ROOT = 1,
1141 JOURNAL_ENTRY_PRIO_PTRS = 2,
1144 * Journal sequence numbers can be blacklisted: bsets record the max
1145 * sequence number of all the journal entries they contain updates for,
1146 * so that on recovery we can ignore those bsets that contain index
1147 * updates newer that what made it into the journal.
1149 * This means that we can't reuse that journal_seq - we have to skip it,
1150 * and then record that we skipped it so that the next time we crash and
1151 * recover we don't think there was a missing journal entry.
1153 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED = 3,
1157 * On disk format for a journal entry:
1158 * seq is monotonically increasing; every journal entry has its own unique
1161 * last_seq is the oldest journal entry that still has keys the btree hasn't
1162 * flushed to disk yet.
1164 * version is for on disk format changes.
1167 struct bch_csum csum;
1174 __le32 u64s; /* size of d[] in u64s */
1176 __u8 encrypted_start[0];
1181 /* Sequence number of oldest dirty journal entry */
1186 struct jset_entry start[0];
1189 } __attribute__((packed, aligned(8)));
1191 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
1192 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
1194 #define BCH_JOURNAL_BUCKETS_MIN 20
1196 /* Bucket prios/gens */
1199 struct bch_csum csum;
1206 __u8 encrypted_start[0];
1210 struct bucket_disk {
1213 } __attribute__((packed)) data[];
1214 } __attribute__((packed, aligned(8)));
1216 LE32_BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4);
1220 #define DEFINE_BCH_BTREE_IDS() \
1221 DEF_BTREE_ID(EXTENTS, 0, "extents") \
1222 DEF_BTREE_ID(INODES, 1, "inodes") \
1223 DEF_BTREE_ID(DIRENTS, 2, "dirents") \
1224 DEF_BTREE_ID(XATTRS, 3, "xattrs")
1226 #define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
1229 DEFINE_BCH_BTREE_IDS()
1235 #define BTREE_MAX_DEPTH 4U
1239 /* Version 1: Seed pointer into btree node checksum
1241 #define BCACHE_BSET_CSUM 1
1242 #define BCACHE_BSET_KEY_v1 2
1243 #define BCACHE_BSET_JOURNAL_SEQ 3
1244 #define BCACHE_BSET_VERSION 3
1249 * On disk a btree node is a list/log of these; within each set the keys are
1256 * Highest journal entry this bset contains keys for.
1257 * If on recovery we don't see that journal entry, this bset is ignored:
1258 * this allows us to preserve the order of all index updates after a
1259 * crash, since the journal records a total order of all index updates
1260 * and anything that didn't make it to the journal doesn't get used.
1266 __le16 u64s; /* count of d[] in u64s */
1269 struct bkey_packed start[0];
1272 } __attribute__((packed, aligned(8)));
1274 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
1276 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
1277 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1278 struct bset, flags, 5, 6);
1281 struct bch_csum csum;
1284 /* this flags field is encrypted, unlike bset->flags: */
1287 /* Closed interval: */
1288 struct bpos min_key;
1289 struct bpos max_key;
1290 struct bch_extent_ptr ptr;
1291 struct bkey_format format;
1302 } __attribute__((packed, aligned(8)));
1304 LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
1305 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
1307 struct btree_node_entry {
1308 struct bch_csum csum;
1319 } __attribute__((packed, aligned(8)));
1321 #endif /* _BCACHEFS_FORMAT_H */