1 #ifndef _BCACHEFS_FORMAT_H
2 #define _BCACHEFS_FORMAT_H
5 * bcachefs on disk data structures
9 * There are three main types of on disk data structures in bcachefs (this is
10 * reduced from 5 in bcache)
16 * The btree is the primary structure; most metadata exists as keys in the
17 * various btrees. There are only a small number of btrees, they're not
18 * sharded - we have one btree for extents, another for inodes, et cetera.
22 * The superblock contains the location of the journal, the list of devices in
23 * the filesystem, and in general any metadata we need in order to decide
24 * whether we can start a filesystem or prior to reading the journal/btree
27 * The superblock is extensible, and most of the contents of the superblock are
28 * in variable length, type tagged fields; see struct bch_sb_field.
30 * Backup superblocks do not reside in a fixed location; also, superblocks do
31 * not have a fixed size. To locate backup superblocks we have struct
32 * bch_sb_layout; we store a copy of this inside every superblock, and also
33 * before the first superblock.
37 * The journal primarily records btree updates in the order they occurred;
38 * journal replay consists of just iterating over all the keys in the open
39 * journal entries and re-inserting them into the btrees.
41 * The journal also contains entry types for the btree roots, and blacklisted
42 * journal sequence numbers (see journal_seq_blacklist.c).
46 * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
47 * 128k-256k) and log structured. We use struct btree_node for writing the first
48 * entry in a given node (offset 0), and struct btree_node_entry for all
51 * After the header, btree node entries contain a list of keys in sorted order.
52 * Values are stored inline with the keys; since values are variable length (and
53 * keys effectively are variable length too, due to packing) we can't do random
54 * access without building up additional in memory tables in the btree node read
57 * BTREE KEYS (struct bkey):
59 * The various btrees share a common format for the key - so as to avoid
60 * switching in fastpath lookup/comparison code - but define their own
61 * structures for the key values.
63 * The size of a key/value pair is stored as a u8 in units of u64s, so the max
64 * size is just under 2k. The common part also contains a type tag for the
65 * value, and a format field indicating whether the key is packed or not (and
66 * also meant to allow adding new key fields in the future, if desired).
68 * bkeys, when stored within a btree node, may also be packed. In that case, the
69 * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
70 * be generous with field sizes in the common part of the key format (64 bit
71 * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
74 #include <asm/types.h>
75 #include <asm/byteorder.h>
76 #include <linux/uuid.h>
78 #define LE_BITMASK(_bits, name, type, field, offset, end) \
79 static const unsigned name##_OFFSET = offset; \
80 static const unsigned name##_BITS = (end - offset); \
81 static const __u##_bits name##_MAX = (1ULL << (end - offset)) - 1; \
83 static inline __u64 name(const type *k) \
85 return (__le##_bits##_to_cpu(k->field) >> offset) & \
86 ~(~0ULL << (end - offset)); \
89 static inline void SET_##name(type *k, __u64 v) \
91 __u##_bits new = __le##_bits##_to_cpu(k->field); \
93 new &= ~(~(~0ULL << (end - offset)) << offset); \
94 new |= (v & ~(~0ULL << (end - offset))) << offset; \
95 k->field = __cpu_to_le##_bits(new); \
98 #define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
99 #define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
100 #define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
105 /* One unused slot for now: */
106 __u8 bits_per_field[6];
107 __le64 field_offset[6];
110 /* Btree keys - all units are in sectors */
114 * Word order matches machine byte order - btree code treats a bpos as a
115 * single large integer, for search/comparison purposes
117 * Note that wherever a bpos is embedded in another on disk data
118 * structure, it has to be byte swabbed when reading in metadata that
119 * wasn't written in native endian order:
121 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
125 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
127 __u64 offset; /* Points to end of extent - sectors */
130 #error edit for your odd byteorder.
132 } __attribute__((packed, aligned(4)));
134 #define KEY_INODE_MAX ((__u64)~0ULL)
135 #define KEY_OFFSET_MAX ((__u64)~0ULL)
136 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
137 #define KEY_SIZE_MAX ((__u32)~0U)
139 static inline struct bpos POS(__u64 inode, __u64 offset)
150 #define POS_MIN POS(0, 0)
151 #define POS_MAX POS(KEY_INODE_MAX, KEY_OFFSET_MAX)
153 /* Empty placeholder struct, for container_of() */
159 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
162 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
166 } __attribute__((packed, aligned(4)));
169 /* Size of combined key and value, in u64s */
172 /* Format of key (0 for format local to btree node) */
173 #if defined(__LITTLE_ENDIAN_BITFIELD)
176 #elif defined (__BIG_ENDIAN_BITFIELD)
177 __u8 needs_whiteout:1,
180 #error edit for your odd byteorder.
183 /* Type of the value */
186 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
189 struct bversion version;
190 __u32 size; /* extent size, in sectors */
192 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
194 __u32 size; /* extent size, in sectors */
195 struct bversion version;
199 } __attribute__((packed, aligned(8)));
204 /* Size of combined key and value, in u64s */
207 /* Format of key (0 for format local to btree node) */
210 * XXX: next incompat on disk format change, switch format and
211 * needs_whiteout - bkey_packed() will be cheaper if format is the high
212 * bits of the bitfield
214 #if defined(__LITTLE_ENDIAN_BITFIELD)
217 #elif defined (__BIG_ENDIAN_BITFIELD)
218 __u8 needs_whiteout:1,
222 /* Type of the value */
227 * We copy bkeys with struct assignment in various places, and while
228 * that shouldn't be done with packed bkeys we can't disallow it in C,
229 * and it's legal to cast a bkey to a bkey_packed - so padding it out
230 * to the same size as struct bkey should hopefully be safest.
232 __u8 pad[sizeof(struct bkey) - 3];
233 } __attribute__((packed, aligned(8)));
235 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
236 #define KEY_PACKED_BITS_START 24
238 #define KEY_FORMAT_LOCAL_BTREE 0
239 #define KEY_FORMAT_CURRENT 1
241 enum bch_bkey_fields {
246 BKEY_FIELD_VERSION_HI,
247 BKEY_FIELD_VERSION_LO,
251 #define bkey_format_field(name, field) \
252 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
254 #define BKEY_FORMAT_CURRENT \
255 ((struct bkey_format) { \
256 .key_u64s = BKEY_U64s, \
257 .nr_fields = BKEY_NR_FIELDS, \
258 .bits_per_field = { \
259 bkey_format_field(INODE, p.inode), \
260 bkey_format_field(OFFSET, p.offset), \
261 bkey_format_field(SNAPSHOT, p.snapshot), \
262 bkey_format_field(SIZE, size), \
263 bkey_format_field(VERSION_HI, version.hi), \
264 bkey_format_field(VERSION_LO, version.lo), \
268 /* bkey with inline value */
274 /* Size of combined key and value, in u64s */
284 #define KEY(_inode, _offset, _size) \
287 .format = KEY_FORMAT_CURRENT, \
288 .p = POS(_inode, _offset), \
292 static inline void bkey_init(struct bkey *k)
297 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
299 #define __BKEY_PADDED(key, pad) \
300 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
302 #define BKEY_VAL_TYPE(name, nr) \
303 struct bkey_i_##name { \
308 struct bch_##name v; \
312 * - DELETED keys are used internally to mark keys that should be ignored but
313 * override keys in composition order. Their version number is ignored.
315 * - DISCARDED keys indicate that the data is all 0s because it has been
316 * discarded. DISCARDs may have a version; if the version is nonzero the key
317 * will be persistent, otherwise the key will be dropped whenever the btree
318 * node is rewritten (like DELETED keys).
320 * - ERROR: any read of the data returns a read error, as the data was lost due
321 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
322 * by new writes or cluster-wide GC. Node repair can also overwrite them with
323 * the same or a more recent version number, but not with an older version
326 #define KEY_TYPE_DELETED 0
327 #define KEY_TYPE_DISCARD 1
328 #define KEY_TYPE_ERROR 2
329 #define KEY_TYPE_COOKIE 3
330 #define KEY_TYPE_PERSISTENT_DISCARD 4
331 #define KEY_TYPE_GENERIC_NR 128
337 BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
342 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
343 * preceded by checksum/compression information (bch_extent_crc32 or
346 * One major determining factor in the format of extents is how we handle and
347 * represent extents that have been partially overwritten and thus trimmed:
349 * If an extent is not checksummed or compressed, when the extent is trimmed we
350 * don't have to remember the extent we originally allocated and wrote: we can
351 * merely adjust ptr->offset to point to the start of the data that is currently
352 * live. The size field in struct bkey records the current (live) size of the
353 * extent, and is also used to mean "size of region on disk that we point to" in
356 * Thus an extent that is not checksummed or compressed will consist only of a
357 * list of bch_extent_ptrs, with none of the fields in
358 * bch_extent_crc32/bch_extent_crc64.
360 * When an extent is checksummed or compressed, it's not possible to read only
361 * the data that is currently live: we have to read the entire extent that was
362 * originally written, and then return only the part of the extent that is
365 * Thus, in addition to the current size of the extent in struct bkey, we need
366 * to store the size of the originally allocated space - this is the
367 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
368 * when the extent is trimmed, instead of modifying the offset field of the
369 * pointer, we keep a second smaller offset field - "offset into the original
370 * extent of the currently live region".
372 * The other major determining factor is replication and data migration:
374 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
375 * write, we will initially write all the replicas in the same format, with the
376 * same checksum type and compression format - however, when copygc runs later (or
377 * tiering/cache promotion, anything that moves data), it is not in general
378 * going to rewrite all the pointers at once - one of the replicas may be in a
379 * bucket on one device that has very little fragmentation while another lives
380 * in a bucket that has become heavily fragmented, and thus is being rewritten
381 * sooner than the rest.
383 * Thus it will only move a subset of the pointers (or in the case of
384 * tiering/cache promotion perhaps add a single pointer without dropping any
385 * current pointers), and if the extent has been partially overwritten it must
386 * write only the currently live portion (or copygc would not be able to reduce
387 * fragmentation!) - which necessitates a different bch_extent_crc format for
390 * But in the interests of space efficiency, we don't want to store one
391 * bch_extent_crc for each pointer if we don't have to.
393 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
394 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
395 * type of a given entry with a scheme similar to utf8 (except we're encoding a
396 * type, not a size), encoding the type in the position of the first set bit:
398 * bch_extent_crc32 - 0b1
399 * bch_extent_ptr - 0b10
400 * bch_extent_crc64 - 0b100
402 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
403 * bch_extent_crc64 is the least constrained).
405 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
406 * until the next bch_extent_crc32/64.
408 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
409 * is neither checksummed nor compressed.
412 /* 128 bits, sufficient for cryptographic MACs: */
416 } __attribute__((packed, aligned(8)));
420 BCH_CSUM_CRC32C_NONZERO = 1,
421 BCH_CSUM_CRC64_NONZERO = 2,
422 BCH_CSUM_CHACHA20_POLY1305_80 = 3,
423 BCH_CSUM_CHACHA20_POLY1305_128 = 4,
429 static const unsigned bch_crc_bytes[] = {
431 [BCH_CSUM_CRC32C_NONZERO] = 4,
432 [BCH_CSUM_CRC32C] = 4,
433 [BCH_CSUM_CRC64_NONZERO] = 8,
434 [BCH_CSUM_CRC64] = 8,
435 [BCH_CSUM_CHACHA20_POLY1305_80] = 10,
436 [BCH_CSUM_CHACHA20_POLY1305_128] = 16,
439 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
442 case BCH_CSUM_CHACHA20_POLY1305_80:
443 case BCH_CSUM_CHACHA20_POLY1305_128:
450 enum bch_compression_type {
451 BCH_COMPRESSION_NONE = 0,
452 BCH_COMPRESSION_LZ4_OLD = 1,
453 BCH_COMPRESSION_GZIP = 2,
454 BCH_COMPRESSION_LZ4 = 3,
455 BCH_COMPRESSION_ZSTD = 4,
456 BCH_COMPRESSION_NR = 5,
459 enum bch_extent_entry_type {
460 BCH_EXTENT_ENTRY_ptr = 0,
461 BCH_EXTENT_ENTRY_crc32 = 1,
462 BCH_EXTENT_ENTRY_crc64 = 2,
463 BCH_EXTENT_ENTRY_crc128 = 3,
466 #define BCH_EXTENT_ENTRY_MAX 4
468 /* Compressed/uncompressed size are stored biased by 1: */
469 struct bch_extent_crc32 {
470 #if defined(__LITTLE_ENDIAN_BITFIELD)
473 _uncompressed_size:7,
479 #elif defined (__BIG_ENDIAN_BITFIELD)
481 __u32 compression_type:4,
485 _uncompressed_size:7,
489 } __attribute__((packed, aligned(8)));
491 #define CRC32_SIZE_MAX (1U << 7)
492 #define CRC32_NONCE_MAX 0
494 struct bch_extent_crc64 {
495 #if defined(__LITTLE_ENDIAN_BITFIELD)
498 _uncompressed_size:9,
504 #elif defined (__BIG_ENDIAN_BITFIELD)
510 _uncompressed_size:9,
515 } __attribute__((packed, aligned(8)));
517 #define CRC64_SIZE_MAX (1U << 9)
518 #define CRC64_NONCE_MAX ((1U << 10) - 1)
520 struct bch_extent_crc128 {
521 #if defined(__LITTLE_ENDIAN_BITFIELD)
524 _uncompressed_size:13,
529 #elif defined (__BIG_ENDIAN_BITFIELD)
530 __u64 compression_type:4,
534 _uncompressed_size:13,
538 struct bch_csum csum;
539 } __attribute__((packed, aligned(8)));
541 #define CRC128_SIZE_MAX (1U << 13)
542 #define CRC128_NONCE_MAX ((1U << 13) - 1)
545 * @reservation - pointer hasn't been written to, just reserved
547 struct bch_extent_ptr {
548 #if defined(__LITTLE_ENDIAN_BITFIELD)
553 offset:44, /* 8 petabytes */
556 #elif defined (__BIG_ENDIAN_BITFIELD)
565 } __attribute__((packed, aligned(8)));
567 struct bch_extent_reservation {
568 #if defined(__LITTLE_ENDIAN_BITFIELD)
573 #elif defined (__BIG_ENDIAN_BITFIELD)
581 union bch_extent_entry {
582 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
584 #elif __BITS_PER_LONG == 32
590 #error edit for your odd byteorder.
592 struct bch_extent_crc32 crc32;
593 struct bch_extent_crc64 crc64;
594 struct bch_extent_crc128 crc128;
595 struct bch_extent_ptr ptr;
602 * This is kind of a hack, we're overloading the type for a boolean that
603 * really should be part of the value - BCH_EXTENT and BCH_EXTENT_CACHED
604 * have the same value type:
606 BCH_EXTENT_CACHED = 129,
609 * Persistent reservation:
611 BCH_RESERVATION = 130,
617 union bch_extent_entry start[0];
619 } __attribute__((packed, aligned(8)));
620 BKEY_VAL_TYPE(extent, BCH_EXTENT);
622 struct bch_reservation {
628 } __attribute__((packed, aligned(8)));
629 BKEY_VAL_TYPE(reservation, BCH_RESERVATION);
631 /* Maximum size (in u64s) a single pointer could be: */
632 #define BKEY_EXTENT_PTR_U64s_MAX\
633 ((sizeof(struct bch_extent_crc128) + \
634 sizeof(struct bch_extent_ptr)) / sizeof(u64))
636 /* Maximum possible size of an entire extent value: */
637 #define BKEY_EXTENT_VAL_U64s_MAX \
638 (BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
640 #define BKEY_PADDED(key) __BKEY_PADDED(key, BKEY_EXTENT_VAL_U64s_MAX)
642 /* * Maximum possible size of an entire extent, key + value: */
643 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
645 /* Btree pointers don't carry around checksums: */
646 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
647 ((sizeof(struct bch_extent_ptr)) / sizeof(u64) * BCH_REPLICAS_MAX)
648 #define BKEY_BTREE_PTR_U64s_MAX \
649 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
653 #define BLOCKDEV_INODE_MAX 4096
655 #define BCACHEFS_ROOT_INO 4096
657 enum bch_inode_types {
659 BCH_INODE_BLOCKDEV = 129,
660 BCH_INODE_GENERATION = 130,
670 } __attribute__((packed, aligned(8)));
671 BKEY_VAL_TYPE(inode, BCH_INODE_FS);
673 struct bch_inode_generation {
676 __le32 bi_generation;
678 } __attribute__((packed, aligned(8)));
679 BKEY_VAL_TYPE(inode_generation, BCH_INODE_GENERATION);
681 #define BCH_INODE_FIELDS() \
682 BCH_INODE_FIELD(bi_atime, 64) \
683 BCH_INODE_FIELD(bi_ctime, 64) \
684 BCH_INODE_FIELD(bi_mtime, 64) \
685 BCH_INODE_FIELD(bi_otime, 64) \
686 BCH_INODE_FIELD(bi_size, 64) \
687 BCH_INODE_FIELD(bi_sectors, 64) \
688 BCH_INODE_FIELD(bi_uid, 32) \
689 BCH_INODE_FIELD(bi_gid, 32) \
690 BCH_INODE_FIELD(bi_nlink, 32) \
691 BCH_INODE_FIELD(bi_generation, 32) \
692 BCH_INODE_FIELD(bi_dev, 32) \
693 BCH_INODE_FIELD(bi_data_checksum, 8) \
694 BCH_INODE_FIELD(bi_compression, 8) \
695 BCH_INODE_FIELD(bi_project, 32) \
696 BCH_INODE_FIELD(bi_background_compression, 8) \
697 BCH_INODE_FIELD(bi_data_replicas, 8) \
698 BCH_INODE_FIELD(bi_promote_target, 16) \
699 BCH_INODE_FIELD(bi_foreground_target, 16) \
700 BCH_INODE_FIELD(bi_background_target, 16)
702 #define BCH_INODE_FIELDS_INHERIT() \
703 BCH_INODE_FIELD(bi_data_checksum) \
704 BCH_INODE_FIELD(bi_compression) \
705 BCH_INODE_FIELD(bi_project) \
706 BCH_INODE_FIELD(bi_background_compression) \
707 BCH_INODE_FIELD(bi_data_replicas) \
708 BCH_INODE_FIELD(bi_promote_target) \
709 BCH_INODE_FIELD(bi_foreground_target) \
710 BCH_INODE_FIELD(bi_background_target)
714 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
717 __BCH_INODE_SYNC = 0,
718 __BCH_INODE_IMMUTABLE = 1,
719 __BCH_INODE_APPEND = 2,
720 __BCH_INODE_NODUMP = 3,
721 __BCH_INODE_NOATIME = 4,
723 __BCH_INODE_I_SIZE_DIRTY= 5,
724 __BCH_INODE_I_SECTORS_DIRTY= 6,
725 __BCH_INODE_UNLINKED = 7,
727 /* bits 20+ reserved for packed fields below: */
730 #define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
731 #define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
732 #define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
733 #define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
734 #define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
735 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
736 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
737 #define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
739 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
740 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 32);
742 struct bch_inode_blockdev {
754 } __attribute__((packed, aligned(8)));
755 BKEY_VAL_TYPE(inode_blockdev, BCH_INODE_BLOCKDEV);
757 /* Thin provisioned volume, or cache for another block device? */
758 LE64_BITMASK(CACHED_DEV, struct bch_inode_blockdev, i_flags, 0, 1)
763 * Dirents (and xattrs) have to implement string lookups; since our b-tree
764 * doesn't support arbitrary length strings for the key, we instead index by a
765 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
766 * field of the key - using linear probing to resolve hash collisions. This also
767 * provides us with the readdir cookie posix requires.
769 * Linear probing requires us to use whiteouts for deletions, in the event of a
775 BCH_DIRENT_WHITEOUT = 129,
781 /* Target inode number: */
785 * Copy of mode bits 12-15 from the target inode - so userspace can get
786 * the filetype without having to do a stat()
791 } __attribute__((packed, aligned(8)));
792 BKEY_VAL_TYPE(dirent, BCH_DIRENT);
794 #define BCH_NAME_MAX (U8_MAX * sizeof(u64) - \
795 sizeof(struct bkey) - \
796 offsetof(struct bch_dirent, d_name))
803 BCH_XATTR_WHITEOUT = 129,
806 #define BCH_XATTR_INDEX_USER 0
807 #define BCH_XATTR_INDEX_POSIX_ACL_ACCESS 1
808 #define BCH_XATTR_INDEX_POSIX_ACL_DEFAULT 2
809 #define BCH_XATTR_INDEX_TRUSTED 3
810 #define BCH_XATTR_INDEX_SECURITY 4
818 } __attribute__((packed, aligned(8)));
819 BKEY_VAL_TYPE(xattr, BCH_XATTR);
821 /* Bucket/allocation information: */
828 BCH_ALLOC_FIELD_READ_TIME = 0,
829 BCH_ALLOC_FIELD_WRITE_TIME = 1,
837 } __attribute__((packed, aligned(8)));
838 BKEY_VAL_TYPE(alloc, BCH_ALLOC);
853 enum quota_counters {
859 struct bch_quota_counter {
866 struct bch_quota_counter c[Q_COUNTERS];
867 } __attribute__((packed, aligned(8)));
868 BKEY_VAL_TYPE(quota, BCH_QUOTA);
870 /* Optional/variable size superblock sections: */
872 struct bch_sb_field {
878 #define BCH_SB_FIELDS() \
887 enum bch_sb_field_type {
888 #define x(f, nr) BCH_SB_FIELD_##f = nr,
894 /* BCH_SB_FIELD_journal: */
896 struct bch_sb_field_journal {
897 struct bch_sb_field field;
901 /* BCH_SB_FIELD_members: */
905 __le64 nbuckets; /* device size */
906 __le16 first_bucket; /* index of first bucket used */
907 __le16 bucket_size; /* sectors */
909 __le64 last_mount; /* time_t */
914 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
915 /* 4-10 unused, was TIER, HAS_(META)DATA */
916 LE64_BITMASK(BCH_MEMBER_REPLACEMENT, struct bch_member, flags[0], 10, 14)
917 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15)
918 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags[0], 15, 20)
919 LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28)
920 LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30)
922 #define BCH_TIER_MAX 4U
925 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
926 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
929 enum bch_member_state {
930 BCH_MEMBER_STATE_RW = 0,
931 BCH_MEMBER_STATE_RO = 1,
932 BCH_MEMBER_STATE_FAILED = 2,
933 BCH_MEMBER_STATE_SPARE = 3,
934 BCH_MEMBER_STATE_NR = 4,
937 enum cache_replacement {
938 CACHE_REPLACEMENT_LRU = 0,
939 CACHE_REPLACEMENT_FIFO = 1,
940 CACHE_REPLACEMENT_RANDOM = 2,
941 CACHE_REPLACEMENT_NR = 3,
944 struct bch_sb_field_members {
945 struct bch_sb_field field;
946 struct bch_member members[0];
949 /* BCH_SB_FIELD_crypt: */
959 #define BCH_KEY_MAGIC \
960 (((u64) 'b' << 0)|((u64) 'c' << 8)| \
961 ((u64) 'h' << 16)|((u64) '*' << 24)| \
962 ((u64) '*' << 32)|((u64) 'k' << 40)| \
963 ((u64) 'e' << 48)|((u64) 'y' << 56))
965 struct bch_encrypted_key {
971 * If this field is present in the superblock, it stores an encryption key which
972 * is used encrypt all other data/metadata. The key will normally be encrypted
973 * with the key userspace provides, but if encryption has been turned off we'll
974 * just store the master key unencrypted in the superblock so we can access the
975 * previously encrypted data.
977 struct bch_sb_field_crypt {
978 struct bch_sb_field field;
982 struct bch_encrypted_key key;
985 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
992 /* stored as base 2 log of scrypt params: */
993 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
994 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
995 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
997 /* BCH_SB_FIELD_replicas: */
1002 BCH_DATA_JOURNAL = 2,
1005 BCH_DATA_CACHED = 5,
1009 struct bch_replicas_entry {
1015 struct bch_sb_field_replicas {
1016 struct bch_sb_field field;
1017 struct bch_replicas_entry entries[0];
1020 /* BCH_SB_FIELD_quota: */
1022 struct bch_sb_quota_counter {
1027 struct bch_sb_quota_type {
1029 struct bch_sb_quota_counter c[Q_COUNTERS];
1032 struct bch_sb_field_quota {
1033 struct bch_sb_field field;
1034 struct bch_sb_quota_type q[QTYP_NR];
1035 } __attribute__((packed, aligned(8)));
1037 /* BCH_SB_FIELD_disk_groups: */
1039 #define BCH_SB_LABEL_SIZE 32
1041 struct bch_disk_group {
1042 __u8 label[BCH_SB_LABEL_SIZE];
1046 LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
1047 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
1048 LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
1050 struct bch_sb_field_disk_groups {
1051 struct bch_sb_field field;
1052 struct bch_disk_group entries[0];
1056 * On clean shutdown, store btree roots and current journal sequence number in
1063 __u8 type; /* designates what this jset holds */
1067 struct bkey_i start[0];
1072 struct bch_sb_field_clean {
1073 struct bch_sb_field field;
1081 struct jset_entry start[0];
1089 * Version 8: BCH_SB_ENCODED_EXTENT_MAX_BITS
1090 * BCH_MEMBER_DATA_ALLOWED
1091 * Version 9: incompatible extent nonce change
1094 #define BCH_SB_VERSION_MIN 7
1095 #define BCH_SB_VERSION_EXTENT_MAX 8
1096 #define BCH_SB_VERSION_EXTENT_NONCE_V1 9
1097 #define BCH_SB_VERSION_MAX 9
1099 #define BCH_SB_SECTOR 8
1100 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
1102 struct bch_sb_layout {
1103 uuid_le magic; /* bcachefs superblock UUID */
1105 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
1106 __u8 nr_superblocks;
1108 __le64 sb_offset[61];
1109 } __attribute__((packed, aligned(8)));
1111 #define BCH_SB_LAYOUT_SECTOR 7
1114 * @offset - sector where this sb was written
1115 * @version - on disk format version
1116 * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
1117 * @seq - incremented each time superblock is written
1118 * @uuid - used for generating various magic numbers and identifying
1119 * member devices, never changes
1120 * @user_uuid - user visible UUID, may be changed
1121 * @label - filesystem label
1122 * @seq - identifies most recent superblock, incremented each time
1123 * superblock is written
1124 * @features - enabled incompatible features
1127 struct bch_csum csum;
1132 __u8 label[BCH_SB_LABEL_SIZE];
1141 __le64 time_base_lo;
1142 __le32 time_base_hi;
1143 __le32 time_precision;
1149 struct bch_sb_layout layout;
1152 struct bch_sb_field start[0];
1155 } __attribute__((packed, aligned(8)));
1159 * BCH_SB_INITALIZED - set on first mount
1160 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
1161 * behaviour of mount/recovery path:
1162 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
1163 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
1164 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1165 * DATA/META_CSUM_TYPE. Also indicates encryption
1166 * algorithm in use, if/when we get more than one
1169 LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
1171 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
1172 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
1173 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
1174 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
1176 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
1178 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
1179 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
1181 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
1182 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
1184 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
1185 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
1187 LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
1188 LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
1189 LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
1190 LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
1194 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
1195 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
1196 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
1198 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
1199 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
1202 * Max size of an extent that may require bouncing to read or write
1203 * (checksummed, compressed): 64k
1205 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1206 struct bch_sb, flags[1], 14, 20);
1208 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
1209 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
1211 LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
1212 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
1213 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
1215 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE,
1216 struct bch_sb, flags[2], 0, 4);
1219 enum bch_sb_features {
1220 BCH_FEATURE_LZ4 = 0,
1221 BCH_FEATURE_GZIP = 1,
1222 BCH_FEATURE_ZSTD = 2,
1223 BCH_FEATURE_ATOMIC_NLINK = 3,
1228 #define BCH_REPLICAS_MAX 4U
1230 enum bch_error_actions {
1231 BCH_ON_ERROR_CONTINUE = 0,
1232 BCH_ON_ERROR_RO = 1,
1233 BCH_ON_ERROR_PANIC = 2,
1234 BCH_NR_ERROR_ACTIONS = 3,
1237 enum bch_csum_opts {
1238 BCH_CSUM_OPT_NONE = 0,
1239 BCH_CSUM_OPT_CRC32C = 1,
1240 BCH_CSUM_OPT_CRC64 = 2,
1241 BCH_CSUM_OPT_NR = 3,
1244 enum bch_str_hash_opts {
1245 BCH_STR_HASH_CRC32C = 0,
1246 BCH_STR_HASH_CRC64 = 1,
1247 BCH_STR_HASH_SIPHASH = 2,
1248 BCH_STR_HASH_NR = 3,
1251 #define BCH_COMPRESSION_TYPES() \
1257 enum bch_compression_opts {
1258 #define x(t) BCH_COMPRESSION_OPT_##t,
1259 BCH_COMPRESSION_TYPES()
1261 BCH_COMPRESSION_OPT_NR
1267 * The various other data structures have their own magic numbers, which are
1268 * xored with the first part of the cache set's UUID
1271 #define BCACHE_MAGIC \
1272 UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
1273 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1275 #define BCACHEFS_STATFS_MAGIC 0xca451a4e
1277 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
1278 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
1280 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1283 memcpy(&ret, &sb->uuid, sizeof(ret));
1287 static inline __u64 __jset_magic(struct bch_sb *sb)
1289 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1292 static inline __u64 __bset_magic(struct bch_sb *sb)
1294 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1299 #define BCACHE_JSET_VERSION_UUIDv1 1
1300 #define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
1301 #define BCACHE_JSET_VERSION_JKEYS 2
1302 #define BCACHE_JSET_VERSION 2
1304 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
1306 #define BCH_JSET_ENTRY_TYPES() \
1314 #define x(f, nr) BCH_JSET_ENTRY_##f = nr,
1315 BCH_JSET_ENTRY_TYPES()
1321 * Journal sequence numbers can be blacklisted: bsets record the max sequence
1322 * number of all the journal entries they contain updates for, so that on
1323 * recovery we can ignore those bsets that contain index updates newer that what
1324 * made it into the journal.
1326 * This means that we can't reuse that journal_seq - we have to skip it, and
1327 * then record that we skipped it so that the next time we crash and recover we
1328 * don't think there was a missing journal entry.
1330 struct jset_entry_blacklist {
1331 struct jset_entry entry;
1335 struct jset_entry_blacklist_v2 {
1336 struct jset_entry entry;
1342 * On disk format for a journal entry:
1343 * seq is monotonically increasing; every journal entry has its own unique
1346 * last_seq is the oldest journal entry that still has keys the btree hasn't
1347 * flushed to disk yet.
1349 * version is for on disk format changes.
1352 struct bch_csum csum;
1359 __le32 u64s; /* size of d[] in u64s */
1361 __u8 encrypted_start[0];
1366 /* Sequence number of oldest dirty journal entry */
1371 struct jset_entry start[0];
1374 } __attribute__((packed, aligned(8)));
1376 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
1377 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
1379 #define BCH_JOURNAL_BUCKETS_MIN 20
1383 #define DEFINE_BCH_BTREE_IDS() \
1384 DEF_BTREE_ID(EXTENTS, 0, "extents") \
1385 DEF_BTREE_ID(INODES, 1, "inodes") \
1386 DEF_BTREE_ID(DIRENTS, 2, "dirents") \
1387 DEF_BTREE_ID(XATTRS, 3, "xattrs") \
1388 DEF_BTREE_ID(ALLOC, 4, "alloc") \
1389 DEF_BTREE_ID(QUOTAS, 5, "quotas")
1391 #define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
1394 DEFINE_BCH_BTREE_IDS()
1400 #define BTREE_MAX_DEPTH 4U
1404 /* Version 1: Seed pointer into btree node checksum
1406 #define BCACHE_BSET_CSUM 1
1407 #define BCACHE_BSET_KEY_v1 2
1408 #define BCACHE_BSET_JOURNAL_SEQ 3
1409 #define BCACHE_BSET_VERSION 3
1414 * On disk a btree node is a list/log of these; within each set the keys are
1421 * Highest journal entry this bset contains keys for.
1422 * If on recovery we don't see that journal entry, this bset is ignored:
1423 * this allows us to preserve the order of all index updates after a
1424 * crash, since the journal records a total order of all index updates
1425 * and anything that didn't make it to the journal doesn't get used.
1431 __le16 u64s; /* count of d[] in u64s */
1434 struct bkey_packed start[0];
1437 } __attribute__((packed, aligned(8)));
1439 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
1441 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
1442 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1443 struct bset, flags, 5, 6);
1446 struct bch_csum csum;
1449 /* this flags field is encrypted, unlike bset->flags: */
1452 /* Closed interval: */
1453 struct bpos min_key;
1454 struct bpos max_key;
1455 struct bch_extent_ptr ptr;
1456 struct bkey_format format;
1467 } __attribute__((packed, aligned(8)));
1469 LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
1470 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
1472 LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
1474 struct btree_node_entry {
1475 struct bch_csum csum;
1486 } __attribute__((packed, aligned(8)));
1488 #endif /* _BCACHEFS_FORMAT_H */