1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
6 * bcachefs on disk data structures
10 * There are three main types of on disk data structures in bcachefs (this is
11 * reduced from 5 in bcache)
17 * The btree is the primary structure; most metadata exists as keys in the
18 * various btrees. There are only a small number of btrees, they're not
19 * sharded - we have one btree for extents, another for inodes, et cetera.
23 * The superblock contains the location of the journal, the list of devices in
24 * the filesystem, and in general any metadata we need in order to decide
25 * whether we can start a filesystem or prior to reading the journal/btree
28 * The superblock is extensible, and most of the contents of the superblock are
29 * in variable length, type tagged fields; see struct bch_sb_field.
31 * Backup superblocks do not reside in a fixed location; also, superblocks do
32 * not have a fixed size. To locate backup superblocks we have struct
33 * bch_sb_layout; we store a copy of this inside every superblock, and also
34 * before the first superblock.
38 * The journal primarily records btree updates in the order they occurred;
39 * journal replay consists of just iterating over all the keys in the open
40 * journal entries and re-inserting them into the btrees.
42 * The journal also contains entry types for the btree roots, and blacklisted
43 * journal sequence numbers (see journal_seq_blacklist.c).
47 * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48 * 128k-256k) and log structured. We use struct btree_node for writing the first
49 * entry in a given node (offset 0), and struct btree_node_entry for all
52 * After the header, btree node entries contain a list of keys in sorted order.
53 * Values are stored inline with the keys; since values are variable length (and
54 * keys effectively are variable length too, due to packing) we can't do random
55 * access without building up additional in memory tables in the btree node read
58 * BTREE KEYS (struct bkey):
60 * The various btrees share a common format for the key - so as to avoid
61 * switching in fastpath lookup/comparison code - but define their own
62 * structures for the key values.
64 * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65 * size is just under 2k. The common part also contains a type tag for the
66 * value, and a format field indicating whether the key is packed or not (and
67 * also meant to allow adding new key fields in the future, if desired).
69 * bkeys, when stored within a btree node, may also be packed. In that case, the
70 * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71 * be generous with field sizes in the common part of the key format (64 bit
72 * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
81 #define BITMASK(name, type, field, offset, end) \
82 static const unsigned name##_OFFSET = offset; \
83 static const unsigned name##_BITS = (end - offset); \
85 static inline __u64 name(const type *k) \
87 return (k->field >> offset) & ~(~0ULL << (end - offset)); \
90 static inline void SET_##name(type *k, __u64 v) \
92 k->field &= ~(~(~0ULL << (end - offset)) << offset); \
93 k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
96 #define LE_BITMASK(_bits, name, type, field, offset, end) \
97 static const unsigned name##_OFFSET = offset; \
98 static const unsigned name##_BITS = (end - offset); \
99 static const __u##_bits name##_MAX = (1ULL << (end - offset)) - 1; \
101 static inline __u64 name(const type *k) \
103 return (__le##_bits##_to_cpu(k->field) >> offset) & \
104 ~(~0ULL << (end - offset)); \
107 static inline void SET_##name(type *k, __u64 v) \
109 __u##_bits new = __le##_bits##_to_cpu(k->field); \
111 new &= ~(~(~0ULL << (end - offset)) << offset); \
112 new |= (v & ~(~0ULL << (end - offset))) << offset; \
113 k->field = __cpu_to_le##_bits(new); \
116 #define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
117 #define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
118 #define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
123 /* One unused slot for now: */
124 __u8 bits_per_field[6];
125 __le64 field_offset[6];
128 /* Btree keys - all units are in sectors */
132 * Word order matches machine byte order - btree code treats a bpos as a
133 * single large integer, for search/comparison purposes
135 * Note that wherever a bpos is embedded in another on disk data
136 * structure, it has to be byte swabbed when reading in metadata that
137 * wasn't written in native endian order:
139 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
143 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
145 __u64 offset; /* Points to end of extent - sectors */
148 #error edit for your odd byteorder.
150 } __attribute__((packed, aligned(4)));
152 #define KEY_INODE_MAX ((__u64)~0ULL)
153 #define KEY_OFFSET_MAX ((__u64)~0ULL)
154 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
155 #define KEY_SIZE_MAX ((__u32)~0U)
157 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
159 return (struct bpos) {
162 .snapshot = snapshot,
166 #define POS_MIN SPOS(0, 0, 0)
167 #define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
168 #define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
169 #define POS(_inode, _offset) SPOS(_inode, _offset, 0)
171 /* Empty placeholder struct, for container_of() */
177 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
180 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
184 } __attribute__((packed, aligned(4)));
187 /* Size of combined key and value, in u64s */
190 /* Format of key (0 for format local to btree node) */
191 #if defined(__LITTLE_ENDIAN_BITFIELD)
194 #elif defined (__BIG_ENDIAN_BITFIELD)
195 __u8 needs_whiteout:1,
198 #error edit for your odd byteorder.
201 /* Type of the value */
204 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
207 struct bversion version;
208 __u32 size; /* extent size, in sectors */
210 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
212 __u32 size; /* extent size, in sectors */
213 struct bversion version;
217 } __attribute__((packed, aligned(8)));
222 /* Size of combined key and value, in u64s */
225 /* Format of key (0 for format local to btree node) */
228 * XXX: next incompat on disk format change, switch format and
229 * needs_whiteout - bkey_packed() will be cheaper if format is the high
230 * bits of the bitfield
232 #if defined(__LITTLE_ENDIAN_BITFIELD)
235 #elif defined (__BIG_ENDIAN_BITFIELD)
236 __u8 needs_whiteout:1,
240 /* Type of the value */
245 * We copy bkeys with struct assignment in various places, and while
246 * that shouldn't be done with packed bkeys we can't disallow it in C,
247 * and it's legal to cast a bkey to a bkey_packed - so padding it out
248 * to the same size as struct bkey should hopefully be safest.
250 __u8 pad[sizeof(struct bkey) - 3];
251 } __attribute__((packed, aligned(8)));
253 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
254 #define BKEY_U64s_MAX U8_MAX
255 #define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
257 #define KEY_PACKED_BITS_START 24
259 #define KEY_FORMAT_LOCAL_BTREE 0
260 #define KEY_FORMAT_CURRENT 1
262 enum bch_bkey_fields {
267 BKEY_FIELD_VERSION_HI,
268 BKEY_FIELD_VERSION_LO,
272 #define bkey_format_field(name, field) \
273 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
275 #define BKEY_FORMAT_CURRENT \
276 ((struct bkey_format) { \
277 .key_u64s = BKEY_U64s, \
278 .nr_fields = BKEY_NR_FIELDS, \
279 .bits_per_field = { \
280 bkey_format_field(INODE, p.inode), \
281 bkey_format_field(OFFSET, p.offset), \
282 bkey_format_field(SNAPSHOT, p.snapshot), \
283 bkey_format_field(SIZE, size), \
284 bkey_format_field(VERSION_HI, version.hi), \
285 bkey_format_field(VERSION_LO, version.lo), \
289 /* bkey with inline value */
295 /* Size of combined key and value, in u64s */
305 #define KEY(_inode, _offset, _size) \
308 .format = KEY_FORMAT_CURRENT, \
309 .p = POS(_inode, _offset), \
313 static inline void bkey_init(struct bkey *k)
318 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
320 #define __BKEY_PADDED(key, pad) \
321 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
324 * - DELETED keys are used internally to mark keys that should be ignored but
325 * override keys in composition order. Their version number is ignored.
327 * - DISCARDED keys indicate that the data is all 0s because it has been
328 * discarded. DISCARDs may have a version; if the version is nonzero the key
329 * will be persistent, otherwise the key will be dropped whenever the btree
330 * node is rewritten (like DELETED keys).
332 * - ERROR: any read of the data returns a read error, as the data was lost due
333 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
334 * by new writes or cluster-wide GC. Node repair can also overwrite them with
335 * the same or a more recent version number, but not with an older version
338 * - WHITEOUT: for hash table btrees
340 #define BCH_BKEY_TYPES() \
345 x(hash_whiteout, 4) \
350 x(inode_generation, 9) \
359 x(btree_ptr_v2, 18) \
360 x(indirect_inline_data, 19) \
372 #define x(name, nr) KEY_TYPE_##name = nr,
382 struct bch_whiteout {
395 struct bch_hash_whiteout {
406 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
407 * preceded by checksum/compression information (bch_extent_crc32 or
410 * One major determining factor in the format of extents is how we handle and
411 * represent extents that have been partially overwritten and thus trimmed:
413 * If an extent is not checksummed or compressed, when the extent is trimmed we
414 * don't have to remember the extent we originally allocated and wrote: we can
415 * merely adjust ptr->offset to point to the start of the data that is currently
416 * live. The size field in struct bkey records the current (live) size of the
417 * extent, and is also used to mean "size of region on disk that we point to" in
420 * Thus an extent that is not checksummed or compressed will consist only of a
421 * list of bch_extent_ptrs, with none of the fields in
422 * bch_extent_crc32/bch_extent_crc64.
424 * When an extent is checksummed or compressed, it's not possible to read only
425 * the data that is currently live: we have to read the entire extent that was
426 * originally written, and then return only the part of the extent that is
429 * Thus, in addition to the current size of the extent in struct bkey, we need
430 * to store the size of the originally allocated space - this is the
431 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
432 * when the extent is trimmed, instead of modifying the offset field of the
433 * pointer, we keep a second smaller offset field - "offset into the original
434 * extent of the currently live region".
436 * The other major determining factor is replication and data migration:
438 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
439 * write, we will initially write all the replicas in the same format, with the
440 * same checksum type and compression format - however, when copygc runs later (or
441 * tiering/cache promotion, anything that moves data), it is not in general
442 * going to rewrite all the pointers at once - one of the replicas may be in a
443 * bucket on one device that has very little fragmentation while another lives
444 * in a bucket that has become heavily fragmented, and thus is being rewritten
445 * sooner than the rest.
447 * Thus it will only move a subset of the pointers (or in the case of
448 * tiering/cache promotion perhaps add a single pointer without dropping any
449 * current pointers), and if the extent has been partially overwritten it must
450 * write only the currently live portion (or copygc would not be able to reduce
451 * fragmentation!) - which necessitates a different bch_extent_crc format for
454 * But in the interests of space efficiency, we don't want to store one
455 * bch_extent_crc for each pointer if we don't have to.
457 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
458 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
459 * type of a given entry with a scheme similar to utf8 (except we're encoding a
460 * type, not a size), encoding the type in the position of the first set bit:
462 * bch_extent_crc32 - 0b1
463 * bch_extent_ptr - 0b10
464 * bch_extent_crc64 - 0b100
466 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
467 * bch_extent_crc64 is the least constrained).
469 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
470 * until the next bch_extent_crc32/64.
472 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
473 * is neither checksummed nor compressed.
476 /* 128 bits, sufficient for cryptographic MACs: */
480 } __attribute__((packed, aligned(8)));
482 #define BCH_EXTENT_ENTRY_TYPES() \
488 #define BCH_EXTENT_ENTRY_MAX 5
490 enum bch_extent_entry_type {
491 #define x(f, n) BCH_EXTENT_ENTRY_##f = n,
492 BCH_EXTENT_ENTRY_TYPES()
496 /* Compressed/uncompressed size are stored biased by 1: */
497 struct bch_extent_crc32 {
498 #if defined(__LITTLE_ENDIAN_BITFIELD)
501 _uncompressed_size:7,
507 #elif defined (__BIG_ENDIAN_BITFIELD)
509 __u32 compression_type:4,
513 _uncompressed_size:7,
517 } __attribute__((packed, aligned(8)));
519 #define CRC32_SIZE_MAX (1U << 7)
520 #define CRC32_NONCE_MAX 0
522 struct bch_extent_crc64 {
523 #if defined(__LITTLE_ENDIAN_BITFIELD)
526 _uncompressed_size:9,
532 #elif defined (__BIG_ENDIAN_BITFIELD)
538 _uncompressed_size:9,
543 } __attribute__((packed, aligned(8)));
545 #define CRC64_SIZE_MAX (1U << 9)
546 #define CRC64_NONCE_MAX ((1U << 10) - 1)
548 struct bch_extent_crc128 {
549 #if defined(__LITTLE_ENDIAN_BITFIELD)
552 _uncompressed_size:13,
557 #elif defined (__BIG_ENDIAN_BITFIELD)
558 __u64 compression_type:4,
562 _uncompressed_size:13,
566 struct bch_csum csum;
567 } __attribute__((packed, aligned(8)));
569 #define CRC128_SIZE_MAX (1U << 13)
570 #define CRC128_NONCE_MAX ((1U << 13) - 1)
573 * @reservation - pointer hasn't been written to, just reserved
575 struct bch_extent_ptr {
576 #if defined(__LITTLE_ENDIAN_BITFIELD)
581 offset:44, /* 8 petabytes */
584 #elif defined (__BIG_ENDIAN_BITFIELD)
593 } __attribute__((packed, aligned(8)));
595 struct bch_extent_stripe_ptr {
596 #if defined(__LITTLE_ENDIAN_BITFIELD)
601 #elif defined (__BIG_ENDIAN_BITFIELD)
609 struct bch_extent_reservation {
610 #if defined(__LITTLE_ENDIAN_BITFIELD)
615 #elif defined (__BIG_ENDIAN_BITFIELD)
623 union bch_extent_entry {
624 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
626 #elif __BITS_PER_LONG == 32
632 #error edit for your odd byteorder.
635 #define x(f, n) struct bch_extent_##f f;
636 BCH_EXTENT_ENTRY_TYPES()
640 struct bch_btree_ptr {
644 struct bch_extent_ptr start[];
645 } __attribute__((packed, aligned(8)));
647 struct bch_btree_ptr_v2 {
652 __le16 sectors_written;
656 struct bch_extent_ptr start[];
657 } __attribute__((packed, aligned(8)));
659 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
665 union bch_extent_entry start[];
666 } __attribute__((packed, aligned(8)));
668 struct bch_reservation {
674 } __attribute__((packed, aligned(8)));
676 /* Maximum size (in u64s) a single pointer could be: */
677 #define BKEY_EXTENT_PTR_U64s_MAX\
678 ((sizeof(struct bch_extent_crc128) + \
679 sizeof(struct bch_extent_ptr)) / sizeof(u64))
681 /* Maximum possible size of an entire extent value: */
682 #define BKEY_EXTENT_VAL_U64s_MAX \
683 (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
685 /* * Maximum possible size of an entire extent, key + value: */
686 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
688 /* Btree pointers don't carry around checksums: */
689 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
690 ((sizeof(struct bch_btree_ptr_v2) + \
691 sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(u64))
692 #define BKEY_BTREE_PTR_U64s_MAX \
693 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
697 #define BLOCKDEV_INODE_MAX 4096
699 #define BCACHEFS_ROOT_INO 4096
708 } __attribute__((packed, aligned(8)));
710 struct bch_inode_v2 {
713 __le64 bi_journal_seq;
718 } __attribute__((packed, aligned(8)));
720 struct bch_inode_generation {
723 __le32 bi_generation;
725 } __attribute__((packed, aligned(8)));
728 * bi_subvol and bi_parent_subvol are only set for subvolume roots:
731 #define BCH_INODE_FIELDS() \
741 x(bi_generation, 32) \
743 x(bi_data_checksum, 8) \
744 x(bi_compression, 8) \
746 x(bi_background_compression, 8) \
747 x(bi_data_replicas, 8) \
748 x(bi_promote_target, 16) \
749 x(bi_foreground_target, 16) \
750 x(bi_background_target, 16) \
751 x(bi_erasure_code, 16) \
752 x(bi_fields_set, 16) \
754 x(bi_dir_offset, 64) \
756 x(bi_parent_subvol, 32)
758 /* subset of BCH_INODE_FIELDS */
759 #define BCH_INODE_OPTS() \
760 x(data_checksum, 8) \
763 x(background_compression, 8) \
764 x(data_replicas, 8) \
765 x(promote_target, 16) \
766 x(foreground_target, 16) \
767 x(background_target, 16) \
771 #define x(name, ...) \
780 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
783 __BCH_INODE_SYNC = 0,
784 __BCH_INODE_IMMUTABLE = 1,
785 __BCH_INODE_APPEND = 2,
786 __BCH_INODE_NODUMP = 3,
787 __BCH_INODE_NOATIME = 4,
789 __BCH_INODE_I_SIZE_DIRTY= 5,
790 __BCH_INODE_I_SECTORS_DIRTY= 6,
791 __BCH_INODE_UNLINKED = 7,
792 __BCH_INODE_BACKPTR_UNTRUSTED = 8,
794 /* bits 20+ reserved for packed fields below: */
797 #define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
798 #define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
799 #define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
800 #define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
801 #define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
802 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
803 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
804 #define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
805 #define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED)
807 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
808 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
809 LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
811 LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
812 LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
817 * Dirents (and xattrs) have to implement string lookups; since our b-tree
818 * doesn't support arbitrary length strings for the key, we instead index by a
819 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
820 * field of the key - using linear probing to resolve hash collisions. This also
821 * provides us with the readdir cookie posix requires.
823 * Linear probing requires us to use whiteouts for deletions, in the event of a
830 /* Target inode number: */
833 struct { /* DT_SUBVOL */
834 __le32 d_child_subvol;
835 __le32 d_parent_subvol;
840 * Copy of mode bits 12-15 from the target inode - so userspace can get
841 * the filetype without having to do a stat()
846 } __attribute__((packed, aligned(8)));
849 #define BCH_DT_MAX 17
851 #define BCH_NAME_MAX ((unsigned) (U8_MAX * sizeof(u64) - \
852 sizeof(struct bkey) - \
853 offsetof(struct bch_dirent, d_name)))
857 #define KEY_TYPE_XATTR_INDEX_USER 0
858 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
859 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
860 #define KEY_TYPE_XATTR_INDEX_TRUSTED 3
861 #define KEY_TYPE_XATTR_INDEX_SECURITY 4
869 } __attribute__((packed, aligned(8)));
871 /* Bucket/allocation information: */
878 } __attribute__((packed, aligned(8)));
880 #define BCH_ALLOC_FIELDS_V1() \
884 x(dirty_sectors, 16) \
885 x(cached_sectors, 16) \
888 x(stripe_redundancy, 8)
891 #define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
892 BCH_ALLOC_FIELDS_V1()
896 struct bch_alloc_v2 {
903 } __attribute__((packed, aligned(8)));
905 #define BCH_ALLOC_FIELDS_V2() \
908 x(dirty_sectors, 32) \
909 x(cached_sectors, 32) \
911 x(stripe_redundancy, 8)
913 struct bch_alloc_v3 {
922 } __attribute__((packed, aligned(8)));
924 LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
925 LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
927 struct bch_alloc_v4 {
934 __u8 stripe_redundancy;
936 __u32 cached_sectors;
939 __u32 nr_external_backpointers;
940 } __attribute__((packed, aligned(8)));
942 #define BCH_ALLOC_V4_U64s_V0 6
943 #define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(u64))
945 BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
946 BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
947 BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
948 BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
950 #define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX 40
952 struct bch_backpointer {
957 __u64 bucket_offset:40;
960 } __attribute__((packed, aligned(8)));
971 enum quota_counters {
977 struct bch_quota_counter {
984 struct bch_quota_counter c[Q_COUNTERS];
985 } __attribute__((packed, aligned(8)));
996 __u8 csum_granularity_bits;
1000 struct bch_extent_ptr ptrs[];
1001 } __attribute__((packed, aligned(8)));
1005 struct bch_reflink_p {
1009 * A reflink pointer might point to an indirect extent which is then
1010 * later split (by copygc or rebalance). If we only pointed to part of
1011 * the original indirect extent, and then one of the fragments is
1012 * outside the range we point to, we'd leak a refcount: so when creating
1013 * reflink pointers, we need to store pad values to remember the full
1014 * range we were taking a reference on.
1018 } __attribute__((packed, aligned(8)));
1020 struct bch_reflink_v {
1023 union bch_extent_entry start[0];
1025 } __attribute__((packed, aligned(8)));
1027 struct bch_indirect_inline_data {
1035 struct bch_inline_data {
1042 #define SUBVOL_POS_MIN POS(0, 1)
1043 #define SUBVOL_POS_MAX POS(0, S32_MAX)
1044 #define BCACHEFS_ROOT_SUBVOL 1
1046 struct bch_subvolume {
1053 LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
1055 * We need to know whether a subvolume is a snapshot so we can know whether we
1056 * can delete it (or whether it should just be rm -rf'd)
1058 LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
1059 LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3)
1063 struct bch_snapshot {
1072 LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
1074 /* True if a subvolume points to this snapshot node: */
1075 LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
1082 } __attribute__((packed, aligned(8)));
1084 #define LRU_ID_STRIPES (1U << 16)
1086 /* Optional/variable size superblock sections: */
1088 struct bch_sb_field {
1094 #define BCH_SB_FIELDS() \
1103 x(journal_seq_blacklist, 8) \
1107 enum bch_sb_field_type {
1108 #define x(f, nr) BCH_SB_FIELD_##f = nr,
1115 * Most superblock fields are replicated in all device's superblocks - a few are
1118 #define BCH_SINGLE_DEVICE_SB_FIELDS \
1119 ((1U << BCH_SB_FIELD_journal)| \
1120 (1U << BCH_SB_FIELD_journal_v2))
1122 /* BCH_SB_FIELD_journal: */
1124 struct bch_sb_field_journal {
1125 struct bch_sb_field field;
1129 struct bch_sb_field_journal_v2 {
1130 struct bch_sb_field field;
1132 struct bch_sb_field_journal_v2_entry {
1138 /* BCH_SB_FIELD_members: */
1140 #define BCH_MIN_NR_NBUCKETS (1 << 6)
1144 __le64 nbuckets; /* device size */
1145 __le16 first_bucket; /* index of first bucket used */
1146 __le16 bucket_size; /* sectors */
1148 __le64 last_mount; /* time_t */
1153 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
1154 /* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
1155 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15)
1156 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags[0], 15, 20)
1157 LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28)
1158 LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30)
1159 LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
1160 struct bch_member, flags[0], 30, 31)
1163 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
1164 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
1167 #define BCH_MEMBER_STATES() \
1173 enum bch_member_state {
1174 #define x(t, n) BCH_MEMBER_STATE_##t = n,
1180 struct bch_sb_field_members {
1181 struct bch_sb_field field;
1182 struct bch_member members[0];
1185 /* BCH_SB_FIELD_crypt: */
1195 #define BCH_KEY_MAGIC \
1196 (((u64) 'b' << 0)|((u64) 'c' << 8)| \
1197 ((u64) 'h' << 16)|((u64) '*' << 24)| \
1198 ((u64) '*' << 32)|((u64) 'k' << 40)| \
1199 ((u64) 'e' << 48)|((u64) 'y' << 56))
1201 struct bch_encrypted_key {
1207 * If this field is present in the superblock, it stores an encryption key which
1208 * is used encrypt all other data/metadata. The key will normally be encrypted
1209 * with the key userspace provides, but if encryption has been turned off we'll
1210 * just store the master key unencrypted in the superblock so we can access the
1211 * previously encrypted data.
1213 struct bch_sb_field_crypt {
1214 struct bch_sb_field field;
1218 struct bch_encrypted_key key;
1221 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
1223 enum bch_kdf_types {
1228 /* stored as base 2 log of scrypt params: */
1229 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
1230 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
1231 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
1233 /* BCH_SB_FIELD_replicas: */
1235 #define BCH_DATA_TYPES() \
1244 x(need_gc_gens, 8) \
1247 enum bch_data_type {
1248 #define x(t, n) BCH_DATA_##t,
1254 static inline bool data_type_is_empty(enum bch_data_type type)
1258 case BCH_DATA_need_gc_gens:
1259 case BCH_DATA_need_discard:
1266 static inline bool data_type_is_hidden(enum bch_data_type type)
1270 case BCH_DATA_journal:
1277 struct bch_replicas_entry_v0 {
1281 } __attribute__((packed));
1283 struct bch_sb_field_replicas_v0 {
1284 struct bch_sb_field field;
1285 struct bch_replicas_entry_v0 entries[];
1286 } __attribute__((packed, aligned(8)));
1288 struct bch_replicas_entry {
1293 } __attribute__((packed));
1295 #define replicas_entry_bytes(_i) \
1296 (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
1298 struct bch_sb_field_replicas {
1299 struct bch_sb_field field;
1300 struct bch_replicas_entry entries[0];
1301 } __attribute__((packed, aligned(8)));
1303 /* BCH_SB_FIELD_quota: */
1305 struct bch_sb_quota_counter {
1310 struct bch_sb_quota_type {
1312 struct bch_sb_quota_counter c[Q_COUNTERS];
1315 struct bch_sb_field_quota {
1316 struct bch_sb_field field;
1317 struct bch_sb_quota_type q[QTYP_NR];
1318 } __attribute__((packed, aligned(8)));
1320 /* BCH_SB_FIELD_disk_groups: */
1322 #define BCH_SB_LABEL_SIZE 32
1324 struct bch_disk_group {
1325 __u8 label[BCH_SB_LABEL_SIZE];
1327 } __attribute__((packed, aligned(8)));
1329 LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
1330 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
1331 LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
1333 struct bch_sb_field_disk_groups {
1334 struct bch_sb_field field;
1335 struct bch_disk_group entries[0];
1336 } __attribute__((packed, aligned(8)));
1338 /* BCH_SB_FIELD_counters */
1340 #define BCH_PERSISTENT_COUNTERS() \
1344 x(bucket_invalidate, 3) \
1345 x(bucket_discard, 4) \
1346 x(bucket_alloc, 5) \
1347 x(bucket_alloc_fail, 6) \
1348 x(btree_cache_scan, 7) \
1349 x(btree_cache_reap, 8) \
1350 x(btree_cache_cannibalize, 9) \
1351 x(btree_cache_cannibalize_lock, 10) \
1352 x(btree_cache_cannibalize_lock_fail, 11) \
1353 x(btree_cache_cannibalize_unlock, 12) \
1354 x(btree_node_write, 13) \
1355 x(btree_node_read, 14) \
1356 x(btree_node_compact, 15) \
1357 x(btree_node_merge, 16) \
1358 x(btree_node_split, 17) \
1359 x(btree_node_rewrite, 18) \
1360 x(btree_node_alloc, 19) \
1361 x(btree_node_free, 20) \
1362 x(btree_node_set_root, 21) \
1363 x(btree_path_relock_fail, 22) \
1364 x(btree_path_upgrade_fail, 23) \
1365 x(btree_reserve_get_fail, 24) \
1366 x(journal_entry_full, 25) \
1367 x(journal_full, 26) \
1368 x(journal_reclaim_finish, 27) \
1369 x(journal_reclaim_start, 28) \
1370 x(journal_write, 29) \
1371 x(read_promote, 30) \
1372 x(read_bounce, 31) \
1375 x(read_reuse_race, 34) \
1376 x(move_extent_read, 35) \
1377 x(move_extent_write, 36) \
1378 x(move_extent_finish, 37) \
1379 x(move_extent_race, 38) \
1380 x(move_extent_alloc_mem_fail, 39) \
1382 x(copygc_wait, 41) \
1383 x(gc_gens_end, 42) \
1384 x(gc_gens_start, 43) \
1385 x(trans_blocked_journal_reclaim, 44) \
1386 x(trans_restart_btree_node_reused, 45) \
1387 x(trans_restart_btree_node_split, 46) \
1388 x(trans_restart_fault_inject, 47) \
1389 x(trans_restart_iter_upgrade, 48) \
1390 x(trans_restart_journal_preres_get, 49) \
1391 x(trans_restart_journal_reclaim, 50) \
1392 x(trans_restart_journal_res_get, 51) \
1393 x(trans_restart_key_cache_key_realloced, 52) \
1394 x(trans_restart_key_cache_raced, 53) \
1395 x(trans_restart_mark_replicas, 54) \
1396 x(trans_restart_mem_realloced, 55) \
1397 x(trans_restart_memory_allocation_failure, 56) \
1398 x(trans_restart_relock, 57) \
1399 x(trans_restart_relock_after_fill, 58) \
1400 x(trans_restart_relock_key_cache_fill, 59) \
1401 x(trans_restart_relock_next_node, 60) \
1402 x(trans_restart_relock_parent_for_fill, 61) \
1403 x(trans_restart_relock_path, 62) \
1404 x(trans_restart_relock_path_intent, 63) \
1405 x(trans_restart_too_many_iters, 64) \
1406 x(trans_restart_traverse, 65) \
1407 x(trans_restart_upgrade, 66) \
1408 x(trans_restart_would_deadlock, 67) \
1409 x(trans_restart_would_deadlock_write, 68) \
1410 x(trans_restart_injected, 69) \
1411 x(trans_restart_key_cache_upgrade, 70) \
1412 x(trans_traverse_all, 71) \
1413 x(transaction_commit, 72) \
1414 x(write_super, 73) \
1415 x(trans_restart_would_deadlock_recursion_limit, 74)
1417 enum bch_persistent_counters {
1418 #define x(t, n, ...) BCH_COUNTER_##t,
1419 BCH_PERSISTENT_COUNTERS()
1424 struct bch_sb_field_counters {
1425 struct bch_sb_field field;
1430 * On clean shutdown, store btree roots and current journal sequence number in
1437 __u8 type; /* designates what this jset holds */
1441 struct bkey_i start[0];
1446 struct bch_sb_field_clean {
1447 struct bch_sb_field field;
1450 __le16 _read_clock; /* no longer used */
1451 __le16 _write_clock;
1455 struct jset_entry start[0];
1460 struct journal_seq_blacklist_entry {
1465 struct bch_sb_field_journal_seq_blacklist {
1466 struct bch_sb_field field;
1469 struct journal_seq_blacklist_entry start[0];
1477 * New versioning scheme:
1478 * One common version number for all on disk data structures - superblock, btree
1479 * nodes, journal entries
1481 #define BCH_JSET_VERSION_OLD 2
1482 #define BCH_BSET_VERSION_OLD 3
1484 #define BCH_METADATA_VERSIONS() \
1485 x(bkey_renumber, 10) \
1486 x(inode_btree_change, 11) \
1488 x(inode_backpointers, 13) \
1489 x(btree_ptr_sectors_written, 14) \
1491 x(reflink_p_fix, 16) \
1492 x(subvol_dirent, 17) \
1496 x(new_data_types, 21) \
1499 enum bcachefs_metadata_version {
1500 bcachefs_metadata_version_min = 9,
1501 #define x(t, n) bcachefs_metadata_version_##t = n,
1502 BCH_METADATA_VERSIONS()
1504 bcachefs_metadata_version_max
1507 #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
1509 #define BCH_SB_SECTOR 8
1510 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
1512 struct bch_sb_layout {
1513 uuid_le magic; /* bcachefs superblock UUID */
1515 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
1516 __u8 nr_superblocks;
1518 __le64 sb_offset[61];
1519 } __attribute__((packed, aligned(8)));
1521 #define BCH_SB_LAYOUT_SECTOR 7
1524 * @offset - sector where this sb was written
1525 * @version - on disk format version
1526 * @version_min - Oldest metadata version this filesystem contains; so we can
1527 * safely drop compatibility code and refuse to mount filesystems
1529 * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
1530 * @seq - incremented each time superblock is written
1531 * @uuid - used for generating various magic numbers and identifying
1532 * member devices, never changes
1533 * @user_uuid - user visible UUID, may be changed
1534 * @label - filesystem label
1535 * @seq - identifies most recent superblock, incremented each time
1536 * superblock is written
1537 * @features - enabled incompatible features
1540 struct bch_csum csum;
1547 __u8 label[BCH_SB_LABEL_SIZE];
1556 __le64 time_base_lo;
1557 __le32 time_base_hi;
1558 __le32 time_precision;
1564 struct bch_sb_layout layout;
1567 struct bch_sb_field start[0];
1570 } __attribute__((packed, aligned(8)));
1574 * BCH_SB_INITALIZED - set on first mount
1575 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
1576 * behaviour of mount/recovery path:
1577 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
1578 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
1579 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1580 * DATA/META_CSUM_TYPE. Also indicates encryption
1581 * algorithm in use, if/when we get more than one
1584 LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
1586 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
1587 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
1588 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
1589 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
1591 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
1593 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
1594 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
1596 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
1597 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
1599 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
1600 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
1602 LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
1603 LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
1604 LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
1605 LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
1607 LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
1608 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
1610 LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
1612 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
1613 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
1614 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
1616 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
1617 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
1620 * Max size of an extent that may require bouncing to read or write
1621 * (checksummed, compressed): 64k
1623 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1624 struct bch_sb, flags[1], 14, 20);
1626 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
1627 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
1629 LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
1630 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
1631 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
1633 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE,
1634 struct bch_sb, flags[2], 0, 4);
1635 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
1637 LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
1638 LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
1639 LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
1640 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
1641 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
1642 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
1643 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
1644 /* Obsolete, always enabled: */
1645 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
1650 * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
1651 * reflink: gates KEY_TYPE_reflink
1652 * inline_data: gates KEY_TYPE_inline_data
1653 * new_siphash: gates BCH_STR_HASH_siphash
1654 * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1656 #define BCH_SB_FEATURES() \
1660 x(atomic_nlink, 3) \
1662 x(journal_seq_blacklist_v3, 5) \
1666 x(new_extent_overwrite, 9) \
1667 x(incompressible, 10) \
1668 x(btree_ptr_v2, 11) \
1669 x(extents_above_btree_updates, 12) \
1670 x(btree_updates_journalled, 13) \
1671 x(reflink_inline_data, 14) \
1673 x(journal_no_flush, 16) \
1675 x(extents_across_btree_nodes, 18)
1677 #define BCH_SB_FEATURES_ALWAYS \
1678 ((1ULL << BCH_FEATURE_new_extent_overwrite)| \
1679 (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
1680 (1ULL << BCH_FEATURE_btree_updates_journalled)|\
1681 (1ULL << BCH_FEATURE_alloc_v2)|\
1682 (1ULL << BCH_FEATURE_extents_across_btree_nodes))
1684 #define BCH_SB_FEATURES_ALL \
1685 (BCH_SB_FEATURES_ALWAYS| \
1686 (1ULL << BCH_FEATURE_new_siphash)| \
1687 (1ULL << BCH_FEATURE_btree_ptr_v2)| \
1688 (1ULL << BCH_FEATURE_new_varint)| \
1689 (1ULL << BCH_FEATURE_journal_no_flush))
1691 enum bch_sb_feature {
1692 #define x(f, n) BCH_FEATURE_##f,
1698 #define BCH_SB_COMPAT() \
1700 x(alloc_metadata, 1) \
1701 x(extents_above_btree_updates_done, 2) \
1702 x(bformat_overflow_done, 3)
1704 enum bch_sb_compat {
1705 #define x(f, n) BCH_COMPAT_##f,
1713 #define BCH_REPLICAS_MAX 4U
1715 #define BCH_BKEY_PTRS_MAX 16U
1717 #define BCH_ERROR_ACTIONS() \
1722 enum bch_error_actions {
1723 #define x(t, n) BCH_ON_ERROR_##t = n,
1729 #define BCH_STR_HASH_TYPES() \
1735 enum bch_str_hash_type {
1736 #define x(t, n) BCH_STR_HASH_##t = n,
1737 BCH_STR_HASH_TYPES()
1742 #define BCH_STR_HASH_OPTS() \
1747 enum bch_str_hash_opts {
1748 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
1754 #define BCH_CSUM_TYPES() \
1756 x(crc32c_nonzero, 1) \
1757 x(crc64_nonzero, 2) \
1758 x(chacha20_poly1305_80, 3) \
1759 x(chacha20_poly1305_128, 4) \
1764 enum bch_csum_type {
1765 #define x(t, n) BCH_CSUM_##t = n,
1771 static const unsigned bch_crc_bytes[] = {
1772 [BCH_CSUM_none] = 0,
1773 [BCH_CSUM_crc32c_nonzero] = 4,
1774 [BCH_CSUM_crc32c] = 4,
1775 [BCH_CSUM_crc64_nonzero] = 8,
1776 [BCH_CSUM_crc64] = 8,
1777 [BCH_CSUM_xxhash] = 8,
1778 [BCH_CSUM_chacha20_poly1305_80] = 10,
1779 [BCH_CSUM_chacha20_poly1305_128] = 16,
1782 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
1785 case BCH_CSUM_chacha20_poly1305_80:
1786 case BCH_CSUM_chacha20_poly1305_128:
1793 #define BCH_CSUM_OPTS() \
1799 enum bch_csum_opts {
1800 #define x(t, n) BCH_CSUM_OPT_##t = n,
1806 #define BCH_COMPRESSION_TYPES() \
1812 x(incompressible, 5)
1814 enum bch_compression_type {
1815 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
1816 BCH_COMPRESSION_TYPES()
1818 BCH_COMPRESSION_TYPE_NR
1821 #define BCH_COMPRESSION_OPTS() \
1827 enum bch_compression_opts {
1828 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
1829 BCH_COMPRESSION_OPTS()
1831 BCH_COMPRESSION_OPT_NR
1837 * The various other data structures have their own magic numbers, which are
1838 * xored with the first part of the cache set's UUID
1841 #define BCACHE_MAGIC \
1842 UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
1843 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1845 #define BCACHEFS_STATFS_MAGIC 0xca451a4e
1847 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
1848 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
1850 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1853 memcpy(&ret, &sb->uuid, sizeof(ret));
1857 static inline __u64 __jset_magic(struct bch_sb *sb)
1859 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1862 static inline __u64 __bset_magic(struct bch_sb *sb)
1864 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1869 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
1871 #define BCH_JSET_ENTRY_TYPES() \
1876 x(blacklist_v2, 4) \
1885 #define x(f, nr) BCH_JSET_ENTRY_##f = nr,
1886 BCH_JSET_ENTRY_TYPES()
1892 * Journal sequence numbers can be blacklisted: bsets record the max sequence
1893 * number of all the journal entries they contain updates for, so that on
1894 * recovery we can ignore those bsets that contain index updates newer that what
1895 * made it into the journal.
1897 * This means that we can't reuse that journal_seq - we have to skip it, and
1898 * then record that we skipped it so that the next time we crash and recover we
1899 * don't think there was a missing journal entry.
1901 struct jset_entry_blacklist {
1902 struct jset_entry entry;
1906 struct jset_entry_blacklist_v2 {
1907 struct jset_entry entry;
1912 #define BCH_FS_USAGE_TYPES() \
1918 #define x(f, nr) BCH_FS_USAGE_##f = nr,
1919 BCH_FS_USAGE_TYPES()
1924 struct jset_entry_usage {
1925 struct jset_entry entry;
1927 } __attribute__((packed));
1929 struct jset_entry_data_usage {
1930 struct jset_entry entry;
1932 struct bch_replicas_entry r;
1933 } __attribute__((packed));
1935 struct jset_entry_clock {
1936 struct jset_entry entry;
1940 } __attribute__((packed));
1942 struct jset_entry_dev_usage_type {
1946 } __attribute__((packed));
1948 struct jset_entry_dev_usage {
1949 struct jset_entry entry;
1954 __le64 _buckets_unavailable; /* No longer used */
1956 struct jset_entry_dev_usage_type d[];
1957 } __attribute__((packed));
1959 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
1961 return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
1962 sizeof(struct jset_entry_dev_usage_type);
1965 struct jset_entry_log {
1966 struct jset_entry entry;
1968 } __attribute__((packed));
1971 * On disk format for a journal entry:
1972 * seq is monotonically increasing; every journal entry has its own unique
1975 * last_seq is the oldest journal entry that still has keys the btree hasn't
1976 * flushed to disk yet.
1978 * version is for on disk format changes.
1981 struct bch_csum csum;
1988 __le32 u64s; /* size of d[] in u64s */
1990 __u8 encrypted_start[0];
1992 __le16 _read_clock; /* no longer used */
1993 __le16 _write_clock;
1995 /* Sequence number of oldest dirty journal entry */
2000 struct jset_entry start[0];
2003 } __attribute__((packed, aligned(8)));
2005 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
2006 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
2007 LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
2009 #define BCH_JOURNAL_BUCKETS_MIN 8
2013 #define BCH_BTREE_IDS() \
2026 x(need_discard, 12) \
2030 #define x(kwd, val) BTREE_ID_##kwd = val,
2036 #define BTREE_MAX_DEPTH 4U
2043 * On disk a btree node is a list/log of these; within each set the keys are
2050 * Highest journal entry this bset contains keys for.
2051 * If on recovery we don't see that journal entry, this bset is ignored:
2052 * this allows us to preserve the order of all index updates after a
2053 * crash, since the journal records a total order of all index updates
2054 * and anything that didn't make it to the journal doesn't get used.
2060 __le16 u64s; /* count of d[] in u64s */
2063 struct bkey_packed start[0];
2066 } __attribute__((packed, aligned(8)));
2068 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
2070 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
2071 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
2072 struct bset, flags, 5, 6);
2074 /* Sector offset within the btree node: */
2075 LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32);
2078 struct bch_csum csum;
2081 /* this flags field is encrypted, unlike bset->flags: */
2084 /* Closed interval: */
2085 struct bpos min_key;
2086 struct bpos max_key;
2087 struct bch_extent_ptr _ptr; /* not used anymore */
2088 struct bkey_format format;
2099 } __attribute__((packed, aligned(8)));
2101 LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
2102 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
2103 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
2104 struct btree_node, flags, 8, 9);
2106 LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
2108 struct btree_node_entry {
2109 struct bch_csum csum;
2120 } __attribute__((packed, aligned(8)));
2122 #endif /* _BCACHEFS_FORMAT_H */