1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
6 * bcachefs on disk data structures
10 * There are three main types of on disk data structures in bcachefs (this is
11 * reduced from 5 in bcache)
17 * The btree is the primary structure; most metadata exists as keys in the
18 * various btrees. There are only a small number of btrees, they're not
19 * sharded - we have one btree for extents, another for inodes, et cetera.
23 * The superblock contains the location of the journal, the list of devices in
24 * the filesystem, and in general any metadata we need in order to decide
25 * whether we can start a filesystem or prior to reading the journal/btree
28 * The superblock is extensible, and most of the contents of the superblock are
29 * in variable length, type tagged fields; see struct bch_sb_field.
31 * Backup superblocks do not reside in a fixed location; also, superblocks do
32 * not have a fixed size. To locate backup superblocks we have struct
33 * bch_sb_layout; we store a copy of this inside every superblock, and also
34 * before the first superblock.
38 * The journal primarily records btree updates in the order they occurred;
39 * journal replay consists of just iterating over all the keys in the open
40 * journal entries and re-inserting them into the btrees.
42 * The journal also contains entry types for the btree roots, and blacklisted
43 * journal sequence numbers (see journal_seq_blacklist.c).
47 * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48 * 128k-256k) and log structured. We use struct btree_node for writing the first
49 * entry in a given node (offset 0), and struct btree_node_entry for all
52 * After the header, btree node entries contain a list of keys in sorted order.
53 * Values are stored inline with the keys; since values are variable length (and
54 * keys effectively are variable length too, due to packing) we can't do random
55 * access without building up additional in memory tables in the btree node read
58 * BTREE KEYS (struct bkey):
60 * The various btrees share a common format for the key - so as to avoid
61 * switching in fastpath lookup/comparison code - but define their own
62 * structures for the key values.
64 * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65 * size is just under 2k. The common part also contains a type tag for the
66 * value, and a format field indicating whether the key is packed or not (and
67 * also meant to allow adding new key fields in the future, if desired).
69 * bkeys, when stored within a btree node, may also be packed. In that case, the
70 * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71 * be generous with field sizes in the common part of the key format (64 bit
72 * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
80 #define LE_BITMASK(_bits, name, type, field, offset, end) \
81 static const unsigned name##_OFFSET = offset; \
82 static const unsigned name##_BITS = (end - offset); \
83 static const __u##_bits name##_MAX = (1ULL << (end - offset)) - 1; \
85 static inline __u64 name(const type *k) \
87 return (__le##_bits##_to_cpu(k->field) >> offset) & \
88 ~(~0ULL << (end - offset)); \
91 static inline void SET_##name(type *k, __u64 v) \
93 __u##_bits new = __le##_bits##_to_cpu(k->field); \
95 new &= ~(~(~0ULL << (end - offset)) << offset); \
96 new |= (v & ~(~0ULL << (end - offset))) << offset; \
97 k->field = __cpu_to_le##_bits(new); \
100 #define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
101 #define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
102 #define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
107 /* One unused slot for now: */
108 __u8 bits_per_field[6];
109 __le64 field_offset[6];
112 /* Btree keys - all units are in sectors */
116 * Word order matches machine byte order - btree code treats a bpos as a
117 * single large integer, for search/comparison purposes
119 * Note that wherever a bpos is embedded in another on disk data
120 * structure, it has to be byte swabbed when reading in metadata that
121 * wasn't written in native endian order:
123 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
127 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
129 __u64 offset; /* Points to end of extent - sectors */
132 #error edit for your odd byteorder.
134 } __attribute__((packed, aligned(4)));
136 #define KEY_INODE_MAX ((__u64)~0ULL)
137 #define KEY_OFFSET_MAX ((__u64)~0ULL)
138 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
139 #define KEY_SIZE_MAX ((__u32)~0U)
141 static inline struct bpos POS(__u64 inode, __u64 offset)
152 #define POS_MIN POS(0, 0)
153 #define POS_MAX POS(KEY_INODE_MAX, KEY_OFFSET_MAX)
155 /* Empty placeholder struct, for container_of() */
161 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
164 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
168 } __attribute__((packed, aligned(4)));
171 /* Size of combined key and value, in u64s */
174 /* Format of key (0 for format local to btree node) */
175 #if defined(__LITTLE_ENDIAN_BITFIELD)
178 #elif defined (__BIG_ENDIAN_BITFIELD)
179 __u8 needs_whiteout:1,
182 #error edit for your odd byteorder.
185 /* Type of the value */
188 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
191 struct bversion version;
192 __u32 size; /* extent size, in sectors */
194 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
196 __u32 size; /* extent size, in sectors */
197 struct bversion version;
201 } __attribute__((packed, aligned(8)));
206 /* Size of combined key and value, in u64s */
209 /* Format of key (0 for format local to btree node) */
212 * XXX: next incompat on disk format change, switch format and
213 * needs_whiteout - bkey_packed() will be cheaper if format is the high
214 * bits of the bitfield
216 #if defined(__LITTLE_ENDIAN_BITFIELD)
219 #elif defined (__BIG_ENDIAN_BITFIELD)
220 __u8 needs_whiteout:1,
224 /* Type of the value */
229 * We copy bkeys with struct assignment in various places, and while
230 * that shouldn't be done with packed bkeys we can't disallow it in C,
231 * and it's legal to cast a bkey to a bkey_packed - so padding it out
232 * to the same size as struct bkey should hopefully be safest.
234 __u8 pad[sizeof(struct bkey) - 3];
235 } __attribute__((packed, aligned(8)));
237 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
238 #define BKEY_U64s_MAX U8_MAX
239 #define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
241 #define KEY_PACKED_BITS_START 24
243 #define KEY_FORMAT_LOCAL_BTREE 0
244 #define KEY_FORMAT_CURRENT 1
246 enum bch_bkey_fields {
251 BKEY_FIELD_VERSION_HI,
252 BKEY_FIELD_VERSION_LO,
256 #define bkey_format_field(name, field) \
257 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
259 #define BKEY_FORMAT_CURRENT \
260 ((struct bkey_format) { \
261 .key_u64s = BKEY_U64s, \
262 .nr_fields = BKEY_NR_FIELDS, \
263 .bits_per_field = { \
264 bkey_format_field(INODE, p.inode), \
265 bkey_format_field(OFFSET, p.offset), \
266 bkey_format_field(SNAPSHOT, p.snapshot), \
267 bkey_format_field(SIZE, size), \
268 bkey_format_field(VERSION_HI, version.hi), \
269 bkey_format_field(VERSION_LO, version.lo), \
273 /* bkey with inline value */
279 /* Size of combined key and value, in u64s */
289 #define KEY(_inode, _offset, _size) \
292 .format = KEY_FORMAT_CURRENT, \
293 .p = POS(_inode, _offset), \
297 static inline void bkey_init(struct bkey *k)
302 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
304 #define __BKEY_PADDED(key, pad) \
305 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
308 * - DELETED keys are used internally to mark keys that should be ignored but
309 * override keys in composition order. Their version number is ignored.
311 * - DISCARDED keys indicate that the data is all 0s because it has been
312 * discarded. DISCARDs may have a version; if the version is nonzero the key
313 * will be persistent, otherwise the key will be dropped whenever the btree
314 * node is rewritten (like DELETED keys).
316 * - ERROR: any read of the data returns a read error, as the data was lost due
317 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
318 * by new writes or cluster-wide GC. Node repair can also overwrite them with
319 * the same or a more recent version number, but not with an older version
322 * - WHITEOUT: for hash table btrees
324 #define BCH_BKEY_TYPES() \
334 x(inode_generation, 9) \
346 #define x(name, nr) KEY_TYPE_##name = nr,
360 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
361 * preceded by checksum/compression information (bch_extent_crc32 or
364 * One major determining factor in the format of extents is how we handle and
365 * represent extents that have been partially overwritten and thus trimmed:
367 * If an extent is not checksummed or compressed, when the extent is trimmed we
368 * don't have to remember the extent we originally allocated and wrote: we can
369 * merely adjust ptr->offset to point to the start of the data that is currently
370 * live. The size field in struct bkey records the current (live) size of the
371 * extent, and is also used to mean "size of region on disk that we point to" in
374 * Thus an extent that is not checksummed or compressed will consist only of a
375 * list of bch_extent_ptrs, with none of the fields in
376 * bch_extent_crc32/bch_extent_crc64.
378 * When an extent is checksummed or compressed, it's not possible to read only
379 * the data that is currently live: we have to read the entire extent that was
380 * originally written, and then return only the part of the extent that is
383 * Thus, in addition to the current size of the extent in struct bkey, we need
384 * to store the size of the originally allocated space - this is the
385 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
386 * when the extent is trimmed, instead of modifying the offset field of the
387 * pointer, we keep a second smaller offset field - "offset into the original
388 * extent of the currently live region".
390 * The other major determining factor is replication and data migration:
392 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
393 * write, we will initially write all the replicas in the same format, with the
394 * same checksum type and compression format - however, when copygc runs later (or
395 * tiering/cache promotion, anything that moves data), it is not in general
396 * going to rewrite all the pointers at once - one of the replicas may be in a
397 * bucket on one device that has very little fragmentation while another lives
398 * in a bucket that has become heavily fragmented, and thus is being rewritten
399 * sooner than the rest.
401 * Thus it will only move a subset of the pointers (or in the case of
402 * tiering/cache promotion perhaps add a single pointer without dropping any
403 * current pointers), and if the extent has been partially overwritten it must
404 * write only the currently live portion (or copygc would not be able to reduce
405 * fragmentation!) - which necessitates a different bch_extent_crc format for
408 * But in the interests of space efficiency, we don't want to store one
409 * bch_extent_crc for each pointer if we don't have to.
411 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
412 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
413 * type of a given entry with a scheme similar to utf8 (except we're encoding a
414 * type, not a size), encoding the type in the position of the first set bit:
416 * bch_extent_crc32 - 0b1
417 * bch_extent_ptr - 0b10
418 * bch_extent_crc64 - 0b100
420 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
421 * bch_extent_crc64 is the least constrained).
423 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
424 * until the next bch_extent_crc32/64.
426 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
427 * is neither checksummed nor compressed.
430 /* 128 bits, sufficient for cryptographic MACs: */
434 } __attribute__((packed, aligned(8)));
436 #define BCH_EXTENT_ENTRY_TYPES() \
442 #define BCH_EXTENT_ENTRY_MAX 5
444 enum bch_extent_entry_type {
445 #define x(f, n) BCH_EXTENT_ENTRY_##f = n,
446 BCH_EXTENT_ENTRY_TYPES()
450 /* Compressed/uncompressed size are stored biased by 1: */
451 struct bch_extent_crc32 {
452 #if defined(__LITTLE_ENDIAN_BITFIELD)
455 _uncompressed_size:7,
461 #elif defined (__BIG_ENDIAN_BITFIELD)
463 __u32 compression_type:4,
467 _uncompressed_size:7,
471 } __attribute__((packed, aligned(8)));
473 #define CRC32_SIZE_MAX (1U << 7)
474 #define CRC32_NONCE_MAX 0
476 struct bch_extent_crc64 {
477 #if defined(__LITTLE_ENDIAN_BITFIELD)
480 _uncompressed_size:9,
486 #elif defined (__BIG_ENDIAN_BITFIELD)
492 _uncompressed_size:9,
497 } __attribute__((packed, aligned(8)));
499 #define CRC64_SIZE_MAX (1U << 9)
500 #define CRC64_NONCE_MAX ((1U << 10) - 1)
502 struct bch_extent_crc128 {
503 #if defined(__LITTLE_ENDIAN_BITFIELD)
506 _uncompressed_size:13,
511 #elif defined (__BIG_ENDIAN_BITFIELD)
512 __u64 compression_type:4,
516 _uncompressed_size:13,
520 struct bch_csum csum;
521 } __attribute__((packed, aligned(8)));
523 #define CRC128_SIZE_MAX (1U << 13)
524 #define CRC128_NONCE_MAX ((1U << 13) - 1)
527 * @reservation - pointer hasn't been written to, just reserved
529 struct bch_extent_ptr {
530 #if defined(__LITTLE_ENDIAN_BITFIELD)
535 offset:44, /* 8 petabytes */
538 #elif defined (__BIG_ENDIAN_BITFIELD)
547 } __attribute__((packed, aligned(8)));
549 struct bch_extent_stripe_ptr {
550 #if defined(__LITTLE_ENDIAN_BITFIELD)
554 #elif defined (__BIG_ENDIAN_BITFIELD)
561 struct bch_extent_reservation {
562 #if defined(__LITTLE_ENDIAN_BITFIELD)
567 #elif defined (__BIG_ENDIAN_BITFIELD)
575 union bch_extent_entry {
576 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
578 #elif __BITS_PER_LONG == 32
584 #error edit for your odd byteorder.
587 #define x(f, n) struct bch_extent_##f f;
588 BCH_EXTENT_ENTRY_TYPES()
592 struct bch_btree_ptr {
595 struct bch_extent_ptr start[0];
597 } __attribute__((packed, aligned(8)));
599 struct bch_btree_ptr_v2 {
604 __le16 sectors_written;
605 /* In case we ever decide to do variable size btree nodes: */
608 struct bch_extent_ptr start[0];
610 } __attribute__((packed, aligned(8)));
615 union bch_extent_entry start[0];
617 } __attribute__((packed, aligned(8)));
619 struct bch_reservation {
625 } __attribute__((packed, aligned(8)));
627 /* Maximum size (in u64s) a single pointer could be: */
628 #define BKEY_EXTENT_PTR_U64s_MAX\
629 ((sizeof(struct bch_extent_crc128) + \
630 sizeof(struct bch_extent_ptr)) / sizeof(u64))
632 /* Maximum possible size of an entire extent value: */
633 #define BKEY_EXTENT_VAL_U64s_MAX \
634 (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
636 #define BKEY_PADDED(key) __BKEY_PADDED(key, BKEY_EXTENT_VAL_U64s_MAX)
638 /* * Maximum possible size of an entire extent, key + value: */
639 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
641 /* Btree pointers don't carry around checksums: */
642 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
643 ((sizeof(struct bch_btree_ptr_v2) + \
644 sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(u64))
645 #define BKEY_BTREE_PTR_U64s_MAX \
646 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
650 #define BLOCKDEV_INODE_MAX 4096
652 #define BCACHEFS_ROOT_INO 4096
661 } __attribute__((packed, aligned(8)));
663 struct bch_inode_generation {
666 __le32 bi_generation;
668 } __attribute__((packed, aligned(8)));
670 #define BCH_INODE_FIELDS() \
680 x(bi_generation, 32) \
682 x(bi_data_checksum, 8) \
683 x(bi_compression, 8) \
685 x(bi_background_compression, 8) \
686 x(bi_data_replicas, 8) \
687 x(bi_promote_target, 16) \
688 x(bi_foreground_target, 16) \
689 x(bi_background_target, 16) \
690 x(bi_erasure_code, 16) \
693 /* subset of BCH_INODE_FIELDS */
694 #define BCH_INODE_OPTS() \
695 x(data_checksum, 8) \
698 x(background_compression, 8) \
699 x(data_replicas, 8) \
700 x(promote_target, 16) \
701 x(foreground_target, 16) \
702 x(background_target, 16) \
706 #define x(name, ...) \
715 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
718 __BCH_INODE_SYNC = 0,
719 __BCH_INODE_IMMUTABLE = 1,
720 __BCH_INODE_APPEND = 2,
721 __BCH_INODE_NODUMP = 3,
722 __BCH_INODE_NOATIME = 4,
724 __BCH_INODE_I_SIZE_DIRTY= 5,
725 __BCH_INODE_I_SECTORS_DIRTY= 6,
726 __BCH_INODE_UNLINKED = 7,
728 /* bits 20+ reserved for packed fields below: */
731 #define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
732 #define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
733 #define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
734 #define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
735 #define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
736 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
737 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
738 #define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
740 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
741 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 32);
746 * Dirents (and xattrs) have to implement string lookups; since our b-tree
747 * doesn't support arbitrary length strings for the key, we instead index by a
748 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
749 * field of the key - using linear probing to resolve hash collisions. This also
750 * provides us with the readdir cookie posix requires.
752 * Linear probing requires us to use whiteouts for deletions, in the event of a
759 /* Target inode number: */
763 * Copy of mode bits 12-15 from the target inode - so userspace can get
764 * the filetype without having to do a stat()
769 } __attribute__((packed, aligned(8)));
771 #define BCH_NAME_MAX (U8_MAX * sizeof(u64) - \
772 sizeof(struct bkey) - \
773 offsetof(struct bch_dirent, d_name))
778 #define KEY_TYPE_XATTR_INDEX_USER 0
779 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
780 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
781 #define KEY_TYPE_XATTR_INDEX_TRUSTED 3
782 #define KEY_TYPE_XATTR_INDEX_SECURITY 4
790 } __attribute__((packed, aligned(8)));
792 /* Bucket/allocation information: */
799 } __attribute__((packed, aligned(8)));
801 #define BCH_ALLOC_FIELDS() \
805 x(dirty_sectors, 16) \
806 x(cached_sectors, 16) \
810 #define x(name, bytes) BCH_ALLOC_FIELD_##name,
816 static const unsigned BCH_ALLOC_FIELD_BYTES[] = {
817 #define x(name, bits) [BCH_ALLOC_FIELD_##name] = bits / 8,
822 #define x(name, bits) + (bits / 8)
823 static const unsigned BKEY_ALLOC_VAL_U64s_MAX =
824 DIV_ROUND_UP(offsetof(struct bch_alloc, data)
825 BCH_ALLOC_FIELDS(), sizeof(u64));
828 #define BKEY_ALLOC_U64s_MAX (BKEY_U64s + BKEY_ALLOC_VAL_U64s_MAX)
839 enum quota_counters {
845 struct bch_quota_counter {
852 struct bch_quota_counter c[Q_COUNTERS];
853 } __attribute__((packed, aligned(8)));
864 __u8 csum_granularity_bits;
868 struct bch_extent_ptr ptrs[0];
869 } __attribute__((packed, aligned(8)));
873 struct bch_reflink_p {
877 __le32 reservation_generation;
882 struct bch_reflink_v {
885 union bch_extent_entry start[0];
891 struct bch_inline_data {
896 /* Optional/variable size superblock sections: */
898 struct bch_sb_field {
904 #define BCH_SB_FIELDS() \
913 x(journal_seq_blacklist, 8)
915 enum bch_sb_field_type {
916 #define x(f, nr) BCH_SB_FIELD_##f = nr,
922 /* BCH_SB_FIELD_journal: */
924 struct bch_sb_field_journal {
925 struct bch_sb_field field;
929 /* BCH_SB_FIELD_members: */
931 #define BCH_MIN_NR_NBUCKETS (1 << 6)
935 __le64 nbuckets; /* device size */
936 __le16 first_bucket; /* index of first bucket used */
937 __le16 bucket_size; /* sectors */
939 __le64 last_mount; /* time_t */
944 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
945 /* 4-10 unused, was TIER, HAS_(META)DATA */
946 LE64_BITMASK(BCH_MEMBER_REPLACEMENT, struct bch_member, flags[0], 10, 14)
947 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15)
948 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags[0], 15, 20)
949 LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28)
950 LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30)
952 #define BCH_TIER_MAX 4U
955 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
956 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
959 enum bch_member_state {
960 BCH_MEMBER_STATE_RW = 0,
961 BCH_MEMBER_STATE_RO = 1,
962 BCH_MEMBER_STATE_FAILED = 2,
963 BCH_MEMBER_STATE_SPARE = 3,
964 BCH_MEMBER_STATE_NR = 4,
967 enum cache_replacement {
968 CACHE_REPLACEMENT_LRU = 0,
969 CACHE_REPLACEMENT_FIFO = 1,
970 CACHE_REPLACEMENT_RANDOM = 2,
971 CACHE_REPLACEMENT_NR = 3,
974 struct bch_sb_field_members {
975 struct bch_sb_field field;
976 struct bch_member members[0];
979 /* BCH_SB_FIELD_crypt: */
989 #define BCH_KEY_MAGIC \
990 (((u64) 'b' << 0)|((u64) 'c' << 8)| \
991 ((u64) 'h' << 16)|((u64) '*' << 24)| \
992 ((u64) '*' << 32)|((u64) 'k' << 40)| \
993 ((u64) 'e' << 48)|((u64) 'y' << 56))
995 struct bch_encrypted_key {
1001 * If this field is present in the superblock, it stores an encryption key which
1002 * is used encrypt all other data/metadata. The key will normally be encrypted
1003 * with the key userspace provides, but if encryption has been turned off we'll
1004 * just store the master key unencrypted in the superblock so we can access the
1005 * previously encrypted data.
1007 struct bch_sb_field_crypt {
1008 struct bch_sb_field field;
1012 struct bch_encrypted_key key;
1015 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
1017 enum bch_kdf_types {
1022 /* stored as base 2 log of scrypt params: */
1023 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
1024 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
1025 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
1027 /* BCH_SB_FIELD_replicas: */
1029 #define BCH_DATA_TYPES() \
1037 enum bch_data_type {
1038 #define x(t, n) BCH_DATA_##t,
1044 struct bch_replicas_entry_v0 {
1048 } __attribute__((packed));
1050 struct bch_sb_field_replicas_v0 {
1051 struct bch_sb_field field;
1052 struct bch_replicas_entry_v0 entries[0];
1053 } __attribute__((packed, aligned(8)));
1055 struct bch_replicas_entry {
1060 } __attribute__((packed));
1062 #define replicas_entry_bytes(_i) \
1063 (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
1065 struct bch_sb_field_replicas {
1066 struct bch_sb_field field;
1067 struct bch_replicas_entry entries[0];
1068 } __attribute__((packed, aligned(8)));
1070 /* BCH_SB_FIELD_quota: */
1072 struct bch_sb_quota_counter {
1077 struct bch_sb_quota_type {
1079 struct bch_sb_quota_counter c[Q_COUNTERS];
1082 struct bch_sb_field_quota {
1083 struct bch_sb_field field;
1084 struct bch_sb_quota_type q[QTYP_NR];
1085 } __attribute__((packed, aligned(8)));
1087 /* BCH_SB_FIELD_disk_groups: */
1089 #define BCH_SB_LABEL_SIZE 32
1091 struct bch_disk_group {
1092 __u8 label[BCH_SB_LABEL_SIZE];
1094 } __attribute__((packed, aligned(8)));
1096 LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
1097 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
1098 LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
1100 struct bch_sb_field_disk_groups {
1101 struct bch_sb_field field;
1102 struct bch_disk_group entries[0];
1103 } __attribute__((packed, aligned(8)));
1106 * On clean shutdown, store btree roots and current journal sequence number in
1113 __u8 type; /* designates what this jset holds */
1117 struct bkey_i start[0];
1122 struct bch_sb_field_clean {
1123 struct bch_sb_field field;
1131 struct jset_entry start[0];
1136 struct journal_seq_blacklist_entry {
1141 struct bch_sb_field_journal_seq_blacklist {
1142 struct bch_sb_field field;
1145 struct journal_seq_blacklist_entry start[0];
1153 * New versioning scheme:
1154 * One common version number for all on disk data structures - superblock, btree
1155 * nodes, journal entries
1157 #define BCH_JSET_VERSION_OLD 2
1158 #define BCH_BSET_VERSION_OLD 3
1160 enum bcachefs_metadata_version {
1161 bcachefs_metadata_version_min = 9,
1162 bcachefs_metadata_version_new_versioning = 10,
1163 bcachefs_metadata_version_bkey_renumber = 10,
1164 bcachefs_metadata_version_inode_btree_change = 11,
1165 bcachefs_metadata_version_max = 12,
1168 #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
1170 #define BCH_SB_SECTOR 8
1171 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
1173 struct bch_sb_layout {
1174 uuid_le magic; /* bcachefs superblock UUID */
1176 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
1177 __u8 nr_superblocks;
1179 __le64 sb_offset[61];
1180 } __attribute__((packed, aligned(8)));
1182 #define BCH_SB_LAYOUT_SECTOR 7
1185 * @offset - sector where this sb was written
1186 * @version - on disk format version
1187 * @version_min - Oldest metadata version this filesystem contains; so we can
1188 * safely drop compatibility code and refuse to mount filesystems
1190 * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
1191 * @seq - incremented each time superblock is written
1192 * @uuid - used for generating various magic numbers and identifying
1193 * member devices, never changes
1194 * @user_uuid - user visible UUID, may be changed
1195 * @label - filesystem label
1196 * @seq - identifies most recent superblock, incremented each time
1197 * superblock is written
1198 * @features - enabled incompatible features
1201 struct bch_csum csum;
1208 __u8 label[BCH_SB_LABEL_SIZE];
1217 __le64 time_base_lo;
1218 __le32 time_base_hi;
1219 __le32 time_precision;
1225 struct bch_sb_layout layout;
1228 struct bch_sb_field start[0];
1231 } __attribute__((packed, aligned(8)));
1235 * BCH_SB_INITALIZED - set on first mount
1236 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
1237 * behaviour of mount/recovery path:
1238 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
1239 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
1240 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1241 * DATA/META_CSUM_TYPE. Also indicates encryption
1242 * algorithm in use, if/when we get more than one
1245 LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
1247 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
1248 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
1249 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
1250 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
1252 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
1254 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
1255 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
1257 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
1258 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
1260 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
1261 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
1263 LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
1264 LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
1265 LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
1266 LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
1268 LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
1270 LE64_BITMASK(BCH_SB_REFLINK, struct bch_sb, flags[0], 61, 62);
1274 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
1275 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
1276 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
1278 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
1279 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
1282 * Max size of an extent that may require bouncing to read or write
1283 * (checksummed, compressed): 64k
1285 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1286 struct bch_sb, flags[1], 14, 20);
1288 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
1289 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
1291 LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
1292 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
1293 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
1295 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE,
1296 struct bch_sb, flags[2], 0, 4);
1297 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
1299 LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
1304 * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
1305 * reflink: gates KEY_TYPE_reflink
1306 * inline_data: gates KEY_TYPE_inline_data
1307 * new_siphash: gates BCH_STR_HASH_SIPHASH
1308 * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1310 #define BCH_SB_FEATURES() \
1314 x(atomic_nlink, 3) \
1316 x(journal_seq_blacklist_v3, 5) \
1320 x(new_extent_overwrite, 9) \
1321 x(incompressible, 10) \
1322 x(btree_ptr_v2, 11) \
1323 x(extents_above_btree_updates, 12) \
1324 x(btree_updates_journalled, 13)
1326 #define BCH_SB_FEATURES_ALL \
1327 ((1ULL << BCH_FEATURE_new_siphash)| \
1328 (1ULL << BCH_FEATURE_new_extent_overwrite)| \
1329 (1ULL << BCH_FEATURE_btree_ptr_v2)| \
1330 (1ULL << BCH_FEATURE_extents_above_btree_updates))
1332 enum bch_sb_feature {
1333 #define x(f, n) BCH_FEATURE_##f,
1339 enum bch_sb_compat {
1340 BCH_COMPAT_FEAT_ALLOC_INFO = 0,
1341 BCH_COMPAT_FEAT_ALLOC_METADATA = 1,
1346 #define BCH_REPLICAS_MAX 4U
1348 enum bch_error_actions {
1349 BCH_ON_ERROR_CONTINUE = 0,
1350 BCH_ON_ERROR_RO = 1,
1351 BCH_ON_ERROR_PANIC = 2,
1352 BCH_NR_ERROR_ACTIONS = 3,
1355 enum bch_str_hash_type {
1356 BCH_STR_HASH_CRC32C = 0,
1357 BCH_STR_HASH_CRC64 = 1,
1358 BCH_STR_HASH_SIPHASH_OLD = 2,
1359 BCH_STR_HASH_SIPHASH = 3,
1360 BCH_STR_HASH_NR = 4,
1363 enum bch_str_hash_opts {
1364 BCH_STR_HASH_OPT_CRC32C = 0,
1365 BCH_STR_HASH_OPT_CRC64 = 1,
1366 BCH_STR_HASH_OPT_SIPHASH = 2,
1367 BCH_STR_HASH_OPT_NR = 3,
1370 enum bch_csum_type {
1372 BCH_CSUM_CRC32C_NONZERO = 1,
1373 BCH_CSUM_CRC64_NONZERO = 2,
1374 BCH_CSUM_CHACHA20_POLY1305_80 = 3,
1375 BCH_CSUM_CHACHA20_POLY1305_128 = 4,
1376 BCH_CSUM_CRC32C = 5,
1381 static const unsigned bch_crc_bytes[] = {
1382 [BCH_CSUM_NONE] = 0,
1383 [BCH_CSUM_CRC32C_NONZERO] = 4,
1384 [BCH_CSUM_CRC32C] = 4,
1385 [BCH_CSUM_CRC64_NONZERO] = 8,
1386 [BCH_CSUM_CRC64] = 8,
1387 [BCH_CSUM_CHACHA20_POLY1305_80] = 10,
1388 [BCH_CSUM_CHACHA20_POLY1305_128] = 16,
1391 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
1394 case BCH_CSUM_CHACHA20_POLY1305_80:
1395 case BCH_CSUM_CHACHA20_POLY1305_128:
1402 enum bch_csum_opts {
1403 BCH_CSUM_OPT_NONE = 0,
1404 BCH_CSUM_OPT_CRC32C = 1,
1405 BCH_CSUM_OPT_CRC64 = 2,
1406 BCH_CSUM_OPT_NR = 3,
1409 #define BCH_COMPRESSION_TYPES() \
1415 x(incompressible, 5)
1417 enum bch_compression_type {
1418 #define x(t, n) BCH_COMPRESSION_TYPE_##t,
1419 BCH_COMPRESSION_TYPES()
1421 BCH_COMPRESSION_TYPE_NR
1424 #define BCH_COMPRESSION_OPTS() \
1430 enum bch_compression_opts {
1431 #define x(t, n) BCH_COMPRESSION_OPT_##t,
1432 BCH_COMPRESSION_OPTS()
1434 BCH_COMPRESSION_OPT_NR
1440 * The various other data structures have their own magic numbers, which are
1441 * xored with the first part of the cache set's UUID
1444 #define BCACHE_MAGIC \
1445 UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
1446 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1448 #define BCACHEFS_STATFS_MAGIC 0xca451a4e
1450 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
1451 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
1453 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1456 memcpy(&ret, &sb->uuid, sizeof(ret));
1460 static inline __u64 __jset_magic(struct bch_sb *sb)
1462 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1465 static inline __u64 __bset_magic(struct bch_sb *sb)
1467 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1472 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
1474 #define BCH_JSET_ENTRY_TYPES() \
1479 x(blacklist_v2, 4) \
1484 #define x(f, nr) BCH_JSET_ENTRY_##f = nr,
1485 BCH_JSET_ENTRY_TYPES()
1491 * Journal sequence numbers can be blacklisted: bsets record the max sequence
1492 * number of all the journal entries they contain updates for, so that on
1493 * recovery we can ignore those bsets that contain index updates newer that what
1494 * made it into the journal.
1496 * This means that we can't reuse that journal_seq - we have to skip it, and
1497 * then record that we skipped it so that the next time we crash and recover we
1498 * don't think there was a missing journal entry.
1500 struct jset_entry_blacklist {
1501 struct jset_entry entry;
1505 struct jset_entry_blacklist_v2 {
1506 struct jset_entry entry;
1512 FS_USAGE_RESERVED = 0,
1513 FS_USAGE_INODES = 1,
1514 FS_USAGE_KEY_VERSION = 2,
1518 struct jset_entry_usage {
1519 struct jset_entry entry;
1521 } __attribute__((packed));
1523 struct jset_entry_data_usage {
1524 struct jset_entry entry;
1526 struct bch_replicas_entry r;
1527 } __attribute__((packed));
1530 * On disk format for a journal entry:
1531 * seq is monotonically increasing; every journal entry has its own unique
1534 * last_seq is the oldest journal entry that still has keys the btree hasn't
1535 * flushed to disk yet.
1537 * version is for on disk format changes.
1540 struct bch_csum csum;
1547 __le32 u64s; /* size of d[] in u64s */
1549 __u8 encrypted_start[0];
1554 /* Sequence number of oldest dirty journal entry */
1559 struct jset_entry start[0];
1562 } __attribute__((packed, aligned(8)));
1564 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
1565 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
1567 #define BCH_JOURNAL_BUCKETS_MIN 8
1571 #define BCH_BTREE_IDS() \
1572 x(EXTENTS, 0, "extents") \
1573 x(INODES, 1, "inodes") \
1574 x(DIRENTS, 2, "dirents") \
1575 x(XATTRS, 3, "xattrs") \
1576 x(ALLOC, 4, "alloc") \
1577 x(QUOTAS, 5, "quotas") \
1578 x(EC, 6, "stripes") \
1579 x(REFLINK, 7, "reflink")
1582 #define x(kwd, val, name) BTREE_ID_##kwd = val,
1588 #define BTREE_MAX_DEPTH 4U
1595 * On disk a btree node is a list/log of these; within each set the keys are
1602 * Highest journal entry this bset contains keys for.
1603 * If on recovery we don't see that journal entry, this bset is ignored:
1604 * this allows us to preserve the order of all index updates after a
1605 * crash, since the journal records a total order of all index updates
1606 * and anything that didn't make it to the journal doesn't get used.
1612 __le16 u64s; /* count of d[] in u64s */
1615 struct bkey_packed start[0];
1618 } __attribute__((packed, aligned(8)));
1620 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
1622 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
1623 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1624 struct bset, flags, 5, 6);
1627 struct bch_csum csum;
1630 /* this flags field is encrypted, unlike bset->flags: */
1633 /* Closed interval: */
1634 struct bpos min_key;
1635 struct bpos max_key;
1636 struct bch_extent_ptr ptr;
1637 struct bkey_format format;
1648 } __attribute__((packed, aligned(8)));
1650 LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
1651 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
1652 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
1653 struct btree_node, flags, 8, 9);
1655 LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
1657 struct btree_node_entry {
1658 struct bch_csum csum;
1669 } __attribute__((packed, aligned(8)));
1671 #endif /* _BCACHEFS_FORMAT_H */