1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_FORMAT_H
3 #define _BCACHEFS_FORMAT_H
6 * bcachefs on disk data structures
10 * There are three main types of on disk data structures in bcachefs (this is
11 * reduced from 5 in bcache)
17 * The btree is the primary structure; most metadata exists as keys in the
18 * various btrees. There are only a small number of btrees, they're not
19 * sharded - we have one btree for extents, another for inodes, et cetera.
23 * The superblock contains the location of the journal, the list of devices in
24 * the filesystem, and in general any metadata we need in order to decide
25 * whether we can start a filesystem or prior to reading the journal/btree
28 * The superblock is extensible, and most of the contents of the superblock are
29 * in variable length, type tagged fields; see struct bch_sb_field.
31 * Backup superblocks do not reside in a fixed location; also, superblocks do
32 * not have a fixed size. To locate backup superblocks we have struct
33 * bch_sb_layout; we store a copy of this inside every superblock, and also
34 * before the first superblock.
38 * The journal primarily records btree updates in the order they occurred;
39 * journal replay consists of just iterating over all the keys in the open
40 * journal entries and re-inserting them into the btrees.
42 * The journal also contains entry types for the btree roots, and blacklisted
43 * journal sequence numbers (see journal_seq_blacklist.c).
47 * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
48 * 128k-256k) and log structured. We use struct btree_node for writing the first
49 * entry in a given node (offset 0), and struct btree_node_entry for all
52 * After the header, btree node entries contain a list of keys in sorted order.
53 * Values are stored inline with the keys; since values are variable length (and
54 * keys effectively are variable length too, due to packing) we can't do random
55 * access without building up additional in memory tables in the btree node read
58 * BTREE KEYS (struct bkey):
60 * The various btrees share a common format for the key - so as to avoid
61 * switching in fastpath lookup/comparison code - but define their own
62 * structures for the key values.
64 * The size of a key/value pair is stored as a u8 in units of u64s, so the max
65 * size is just under 2k. The common part also contains a type tag for the
66 * value, and a format field indicating whether the key is packed or not (and
67 * also meant to allow adding new key fields in the future, if desired).
69 * bkeys, when stored within a btree node, may also be packed. In that case, the
70 * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
71 * be generous with field sizes in the common part of the key format (64 bit
72 * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
75 #include <asm/types.h>
76 #include <asm/byteorder.h>
77 #include <linux/kernel.h>
78 #include <linux/uuid.h>
81 #define LE_BITMASK(_bits, name, type, field, offset, end) \
82 static const unsigned name##_OFFSET = offset; \
83 static const unsigned name##_BITS = (end - offset); \
84 static const __u##_bits name##_MAX = (1ULL << (end - offset)) - 1; \
86 static inline __u64 name(const type *k) \
88 return (__le##_bits##_to_cpu(k->field) >> offset) & \
89 ~(~0ULL << (end - offset)); \
92 static inline void SET_##name(type *k, __u64 v) \
94 __u##_bits new = __le##_bits##_to_cpu(k->field); \
96 new &= ~(~(~0ULL << (end - offset)) << offset); \
97 new |= (v & ~(~0ULL << (end - offset))) << offset; \
98 k->field = __cpu_to_le##_bits(new); \
101 #define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
102 #define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
103 #define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
108 /* One unused slot for now: */
109 __u8 bits_per_field[6];
110 __le64 field_offset[6];
113 /* Btree keys - all units are in sectors */
117 * Word order matches machine byte order - btree code treats a bpos as a
118 * single large integer, for search/comparison purposes
120 * Note that wherever a bpos is embedded in another on disk data
121 * structure, it has to be byte swabbed when reading in metadata that
122 * wasn't written in native endian order:
124 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
128 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
130 __u64 offset; /* Points to end of extent - sectors */
133 #error edit for your odd byteorder.
135 } __attribute__((packed, aligned(4)));
137 #define KEY_INODE_MAX ((__u64)~0ULL)
138 #define KEY_OFFSET_MAX ((__u64)~0ULL)
139 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
140 #define KEY_SIZE_MAX ((__u32)~0U)
142 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
144 return (struct bpos) {
147 .snapshot = snapshot,
151 #define POS_MIN SPOS(0, 0, 0)
152 #define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
153 #define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
154 #define POS(_inode, _offset) SPOS(_inode, _offset, 0)
156 /* Empty placeholder struct, for container_of() */
162 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
165 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
169 } __attribute__((packed, aligned(4)));
172 /* Size of combined key and value, in u64s */
175 /* Format of key (0 for format local to btree node) */
176 #if defined(__LITTLE_ENDIAN_BITFIELD)
179 #elif defined (__BIG_ENDIAN_BITFIELD)
180 __u8 needs_whiteout:1,
183 #error edit for your odd byteorder.
186 /* Type of the value */
189 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
192 struct bversion version;
193 __u32 size; /* extent size, in sectors */
195 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
197 __u32 size; /* extent size, in sectors */
198 struct bversion version;
202 } __attribute__((packed, aligned(8)));
207 /* Size of combined key and value, in u64s */
210 /* Format of key (0 for format local to btree node) */
213 * XXX: next incompat on disk format change, switch format and
214 * needs_whiteout - bkey_packed() will be cheaper if format is the high
215 * bits of the bitfield
217 #if defined(__LITTLE_ENDIAN_BITFIELD)
220 #elif defined (__BIG_ENDIAN_BITFIELD)
221 __u8 needs_whiteout:1,
225 /* Type of the value */
230 * We copy bkeys with struct assignment in various places, and while
231 * that shouldn't be done with packed bkeys we can't disallow it in C,
232 * and it's legal to cast a bkey to a bkey_packed - so padding it out
233 * to the same size as struct bkey should hopefully be safest.
235 __u8 pad[sizeof(struct bkey) - 3];
236 } __attribute__((packed, aligned(8)));
238 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
239 #define BKEY_U64s_MAX U8_MAX
240 #define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
242 #define KEY_PACKED_BITS_START 24
244 #define KEY_FORMAT_LOCAL_BTREE 0
245 #define KEY_FORMAT_CURRENT 1
247 enum bch_bkey_fields {
252 BKEY_FIELD_VERSION_HI,
253 BKEY_FIELD_VERSION_LO,
257 #define bkey_format_field(name, field) \
258 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
260 #define BKEY_FORMAT_CURRENT \
261 ((struct bkey_format) { \
262 .key_u64s = BKEY_U64s, \
263 .nr_fields = BKEY_NR_FIELDS, \
264 .bits_per_field = { \
265 bkey_format_field(INODE, p.inode), \
266 bkey_format_field(OFFSET, p.offset), \
267 bkey_format_field(SNAPSHOT, p.snapshot), \
268 bkey_format_field(SIZE, size), \
269 bkey_format_field(VERSION_HI, version.hi), \
270 bkey_format_field(VERSION_LO, version.lo), \
274 /* bkey with inline value */
280 /* Size of combined key and value, in u64s */
290 #define KEY(_inode, _offset, _size) \
293 .format = KEY_FORMAT_CURRENT, \
294 .p = POS(_inode, _offset), \
298 static inline void bkey_init(struct bkey *k)
303 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
305 #define __BKEY_PADDED(key, pad) \
306 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
309 * - DELETED keys are used internally to mark keys that should be ignored but
310 * override keys in composition order. Their version number is ignored.
312 * - DISCARDED keys indicate that the data is all 0s because it has been
313 * discarded. DISCARDs may have a version; if the version is nonzero the key
314 * will be persistent, otherwise the key will be dropped whenever the btree
315 * node is rewritten (like DELETED keys).
317 * - ERROR: any read of the data returns a read error, as the data was lost due
318 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
319 * by new writes or cluster-wide GC. Node repair can also overwrite them with
320 * the same or a more recent version number, but not with an older version
323 * - WHITEOUT: for hash table btrees
325 #define BCH_BKEY_TYPES() \
330 x(hash_whiteout, 4) \
335 x(inode_generation, 9) \
344 x(btree_ptr_v2, 18) \
345 x(indirect_inline_data, 19) \
355 #define x(name, nr) KEY_TYPE_##name = nr,
365 struct bch_whiteout {
378 struct bch_hash_whiteout {
389 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
390 * preceded by checksum/compression information (bch_extent_crc32 or
393 * One major determining factor in the format of extents is how we handle and
394 * represent extents that have been partially overwritten and thus trimmed:
396 * If an extent is not checksummed or compressed, when the extent is trimmed we
397 * don't have to remember the extent we originally allocated and wrote: we can
398 * merely adjust ptr->offset to point to the start of the data that is currently
399 * live. The size field in struct bkey records the current (live) size of the
400 * extent, and is also used to mean "size of region on disk that we point to" in
403 * Thus an extent that is not checksummed or compressed will consist only of a
404 * list of bch_extent_ptrs, with none of the fields in
405 * bch_extent_crc32/bch_extent_crc64.
407 * When an extent is checksummed or compressed, it's not possible to read only
408 * the data that is currently live: we have to read the entire extent that was
409 * originally written, and then return only the part of the extent that is
412 * Thus, in addition to the current size of the extent in struct bkey, we need
413 * to store the size of the originally allocated space - this is the
414 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
415 * when the extent is trimmed, instead of modifying the offset field of the
416 * pointer, we keep a second smaller offset field - "offset into the original
417 * extent of the currently live region".
419 * The other major determining factor is replication and data migration:
421 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
422 * write, we will initially write all the replicas in the same format, with the
423 * same checksum type and compression format - however, when copygc runs later (or
424 * tiering/cache promotion, anything that moves data), it is not in general
425 * going to rewrite all the pointers at once - one of the replicas may be in a
426 * bucket on one device that has very little fragmentation while another lives
427 * in a bucket that has become heavily fragmented, and thus is being rewritten
428 * sooner than the rest.
430 * Thus it will only move a subset of the pointers (or in the case of
431 * tiering/cache promotion perhaps add a single pointer without dropping any
432 * current pointers), and if the extent has been partially overwritten it must
433 * write only the currently live portion (or copygc would not be able to reduce
434 * fragmentation!) - which necessitates a different bch_extent_crc format for
437 * But in the interests of space efficiency, we don't want to store one
438 * bch_extent_crc for each pointer if we don't have to.
440 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
441 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
442 * type of a given entry with a scheme similar to utf8 (except we're encoding a
443 * type, not a size), encoding the type in the position of the first set bit:
445 * bch_extent_crc32 - 0b1
446 * bch_extent_ptr - 0b10
447 * bch_extent_crc64 - 0b100
449 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
450 * bch_extent_crc64 is the least constrained).
452 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
453 * until the next bch_extent_crc32/64.
455 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
456 * is neither checksummed nor compressed.
459 /* 128 bits, sufficient for cryptographic MACs: */
463 } __attribute__((packed, aligned(8)));
465 #define BCH_EXTENT_ENTRY_TYPES() \
471 #define BCH_EXTENT_ENTRY_MAX 5
473 enum bch_extent_entry_type {
474 #define x(f, n) BCH_EXTENT_ENTRY_##f = n,
475 BCH_EXTENT_ENTRY_TYPES()
479 /* Compressed/uncompressed size are stored biased by 1: */
480 struct bch_extent_crc32 {
481 #if defined(__LITTLE_ENDIAN_BITFIELD)
484 _uncompressed_size:7,
490 #elif defined (__BIG_ENDIAN_BITFIELD)
492 __u32 compression_type:4,
496 _uncompressed_size:7,
500 } __attribute__((packed, aligned(8)));
502 #define CRC32_SIZE_MAX (1U << 7)
503 #define CRC32_NONCE_MAX 0
505 struct bch_extent_crc64 {
506 #if defined(__LITTLE_ENDIAN_BITFIELD)
509 _uncompressed_size:9,
515 #elif defined (__BIG_ENDIAN_BITFIELD)
521 _uncompressed_size:9,
526 } __attribute__((packed, aligned(8)));
528 #define CRC64_SIZE_MAX (1U << 9)
529 #define CRC64_NONCE_MAX ((1U << 10) - 1)
531 struct bch_extent_crc128 {
532 #if defined(__LITTLE_ENDIAN_BITFIELD)
535 _uncompressed_size:13,
540 #elif defined (__BIG_ENDIAN_BITFIELD)
541 __u64 compression_type:4,
545 _uncompressed_size:13,
549 struct bch_csum csum;
550 } __attribute__((packed, aligned(8)));
552 #define CRC128_SIZE_MAX (1U << 13)
553 #define CRC128_NONCE_MAX ((1U << 13) - 1)
556 * @reservation - pointer hasn't been written to, just reserved
558 struct bch_extent_ptr {
559 #if defined(__LITTLE_ENDIAN_BITFIELD)
564 offset:44, /* 8 petabytes */
567 #elif defined (__BIG_ENDIAN_BITFIELD)
576 } __attribute__((packed, aligned(8)));
578 struct bch_extent_stripe_ptr {
579 #if defined(__LITTLE_ENDIAN_BITFIELD)
584 #elif defined (__BIG_ENDIAN_BITFIELD)
592 struct bch_extent_reservation {
593 #if defined(__LITTLE_ENDIAN_BITFIELD)
598 #elif defined (__BIG_ENDIAN_BITFIELD)
606 union bch_extent_entry {
607 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
609 #elif __BITS_PER_LONG == 32
615 #error edit for your odd byteorder.
618 #define x(f, n) struct bch_extent_##f f;
619 BCH_EXTENT_ENTRY_TYPES()
623 struct bch_btree_ptr {
626 struct bch_extent_ptr start[0];
628 } __attribute__((packed, aligned(8)));
630 struct bch_btree_ptr_v2 {
635 __le16 sectors_written;
638 struct bch_extent_ptr start[0];
640 } __attribute__((packed, aligned(8)));
642 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
647 union bch_extent_entry start[0];
649 } __attribute__((packed, aligned(8)));
651 struct bch_reservation {
657 } __attribute__((packed, aligned(8)));
659 /* Maximum size (in u64s) a single pointer could be: */
660 #define BKEY_EXTENT_PTR_U64s_MAX\
661 ((sizeof(struct bch_extent_crc128) + \
662 sizeof(struct bch_extent_ptr)) / sizeof(u64))
664 /* Maximum possible size of an entire extent value: */
665 #define BKEY_EXTENT_VAL_U64s_MAX \
666 (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
668 /* * Maximum possible size of an entire extent, key + value: */
669 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
671 /* Btree pointers don't carry around checksums: */
672 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
673 ((sizeof(struct bch_btree_ptr_v2) + \
674 sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(u64))
675 #define BKEY_BTREE_PTR_U64s_MAX \
676 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
680 #define BLOCKDEV_INODE_MAX 4096
682 #define BCACHEFS_ROOT_INO 4096
691 } __attribute__((packed, aligned(8)));
693 struct bch_inode_v2 {
696 __le64 bi_journal_seq;
701 } __attribute__((packed, aligned(8)));
703 struct bch_inode_generation {
706 __le32 bi_generation;
708 } __attribute__((packed, aligned(8)));
711 * bi_subvol and bi_parent_subvol are only set for subvolume roots:
714 #define BCH_INODE_FIELDS() \
724 x(bi_generation, 32) \
726 x(bi_data_checksum, 8) \
727 x(bi_compression, 8) \
729 x(bi_background_compression, 8) \
730 x(bi_data_replicas, 8) \
731 x(bi_promote_target, 16) \
732 x(bi_foreground_target, 16) \
733 x(bi_background_target, 16) \
734 x(bi_erasure_code, 16) \
735 x(bi_fields_set, 16) \
737 x(bi_dir_offset, 64) \
739 x(bi_parent_subvol, 32)
741 /* subset of BCH_INODE_FIELDS */
742 #define BCH_INODE_OPTS() \
743 x(data_checksum, 8) \
746 x(background_compression, 8) \
747 x(data_replicas, 8) \
748 x(promote_target, 16) \
749 x(foreground_target, 16) \
750 x(background_target, 16) \
754 #define x(name, ...) \
763 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
766 __BCH_INODE_SYNC = 0,
767 __BCH_INODE_IMMUTABLE = 1,
768 __BCH_INODE_APPEND = 2,
769 __BCH_INODE_NODUMP = 3,
770 __BCH_INODE_NOATIME = 4,
772 __BCH_INODE_I_SIZE_DIRTY= 5,
773 __BCH_INODE_I_SECTORS_DIRTY= 6,
774 __BCH_INODE_UNLINKED = 7,
775 __BCH_INODE_BACKPTR_UNTRUSTED = 8,
777 /* bits 20+ reserved for packed fields below: */
780 #define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
781 #define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
782 #define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
783 #define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
784 #define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
785 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
786 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
787 #define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
788 #define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED)
790 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
791 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
792 LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
794 LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
795 LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
800 * Dirents (and xattrs) have to implement string lookups; since our b-tree
801 * doesn't support arbitrary length strings for the key, we instead index by a
802 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
803 * field of the key - using linear probing to resolve hash collisions. This also
804 * provides us with the readdir cookie posix requires.
806 * Linear probing requires us to use whiteouts for deletions, in the event of a
813 /* Target inode number: */
816 struct { /* DT_SUBVOL */
817 __le32 d_child_subvol;
818 __le32 d_parent_subvol;
823 * Copy of mode bits 12-15 from the target inode - so userspace can get
824 * the filetype without having to do a stat()
829 } __attribute__((packed, aligned(8)));
832 #define BCH_DT_MAX 17
834 #define BCH_NAME_MAX (U8_MAX * sizeof(u64) - \
835 sizeof(struct bkey) - \
836 offsetof(struct bch_dirent, d_name))
841 #define KEY_TYPE_XATTR_INDEX_USER 0
842 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
843 #define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
844 #define KEY_TYPE_XATTR_INDEX_TRUSTED 3
845 #define KEY_TYPE_XATTR_INDEX_SECURITY 4
853 } __attribute__((packed, aligned(8)));
855 /* Bucket/allocation information: */
862 } __attribute__((packed, aligned(8)));
864 #define BCH_ALLOC_FIELDS_V1() \
868 x(dirty_sectors, 16) \
869 x(cached_sectors, 16) \
872 x(stripe_redundancy, 8)
874 struct bch_alloc_v2 {
881 } __attribute__((packed, aligned(8)));
883 #define BCH_ALLOC_FIELDS_V2() \
886 x(dirty_sectors, 32) \
887 x(cached_sectors, 32) \
889 x(stripe_redundancy, 8)
891 struct bch_alloc_v3 {
900 } __attribute__((packed, aligned(8)));
902 LE32_BITMASK(BCH_ALLOC_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
903 LE32_BITMASK(BCH_ALLOC_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
906 #define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
907 BCH_ALLOC_FIELDS_V1()
920 enum quota_counters {
926 struct bch_quota_counter {
933 struct bch_quota_counter c[Q_COUNTERS];
934 } __attribute__((packed, aligned(8)));
945 __u8 csum_granularity_bits;
949 struct bch_extent_ptr ptrs[0];
950 } __attribute__((packed, aligned(8)));
954 struct bch_reflink_p {
958 * A reflink pointer might point to an indirect extent which is then
959 * later split (by copygc or rebalance). If we only pointed to part of
960 * the original indirect extent, and then one of the fragments is
961 * outside the range we point to, we'd leak a refcount: so when creating
962 * reflink pointers, we need to store pad values to remember the full
963 * range we were taking a reference on.
967 } __attribute__((packed, aligned(8)));
969 struct bch_reflink_v {
972 union bch_extent_entry start[0];
974 } __attribute__((packed, aligned(8)));
976 struct bch_indirect_inline_data {
984 struct bch_inline_data {
991 #define SUBVOL_POS_MIN POS(0, 1)
992 #define SUBVOL_POS_MAX POS(0, S32_MAX)
993 #define BCACHEFS_ROOT_SUBVOL 1
995 struct bch_subvolume {
1002 LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
1004 * We need to know whether a subvolume is a snapshot so we can know whether we
1005 * can delete it (or whether it should just be rm -rf'd)
1007 LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
1008 LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3)
1012 struct bch_snapshot {
1021 LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
1023 /* True if a subvolume points to this snapshot node: */
1024 LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
1031 } __attribute__((packed, aligned(8)));
1033 #define LRU_ID_STRIPES (1U << 16)
1035 /* Optional/variable size superblock sections: */
1037 struct bch_sb_field {
1043 #define BCH_SB_FIELDS() \
1052 x(journal_seq_blacklist, 8) \
1055 enum bch_sb_field_type {
1056 #define x(f, nr) BCH_SB_FIELD_##f = nr,
1063 * Most superblock fields are replicated in all device's superblocks - a few are
1066 #define BCH_SINGLE_DEVICE_SB_FIELDS \
1067 ((1U << BCH_SB_FIELD_journal)| \
1068 (1U << BCH_SB_FIELD_journal_v2))
1070 /* BCH_SB_FIELD_journal: */
1072 struct bch_sb_field_journal {
1073 struct bch_sb_field field;
1077 struct bch_sb_field_journal_v2 {
1078 struct bch_sb_field field;
1080 struct bch_sb_field_journal_v2_entry {
1086 /* BCH_SB_FIELD_members: */
1088 #define BCH_MIN_NR_NBUCKETS (1 << 6)
1092 __le64 nbuckets; /* device size */
1093 __le16 first_bucket; /* index of first bucket used */
1094 __le16 bucket_size; /* sectors */
1096 __le64 last_mount; /* time_t */
1101 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
1102 /* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
1103 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15)
1104 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags[0], 15, 20)
1105 LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28)
1106 LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30)
1107 LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
1108 struct bch_member, flags[0], 30, 31)
1111 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
1112 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
1115 #define BCH_MEMBER_STATES() \
1121 enum bch_member_state {
1122 #define x(t, n) BCH_MEMBER_STATE_##t = n,
1128 struct bch_sb_field_members {
1129 struct bch_sb_field field;
1130 struct bch_member members[0];
1133 /* BCH_SB_FIELD_crypt: */
1143 #define BCH_KEY_MAGIC \
1144 (((u64) 'b' << 0)|((u64) 'c' << 8)| \
1145 ((u64) 'h' << 16)|((u64) '*' << 24)| \
1146 ((u64) '*' << 32)|((u64) 'k' << 40)| \
1147 ((u64) 'e' << 48)|((u64) 'y' << 56))
1149 struct bch_encrypted_key {
1155 * If this field is present in the superblock, it stores an encryption key which
1156 * is used encrypt all other data/metadata. The key will normally be encrypted
1157 * with the key userspace provides, but if encryption has been turned off we'll
1158 * just store the master key unencrypted in the superblock so we can access the
1159 * previously encrypted data.
1161 struct bch_sb_field_crypt {
1162 struct bch_sb_field field;
1166 struct bch_encrypted_key key;
1169 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
1171 enum bch_kdf_types {
1176 /* stored as base 2 log of scrypt params: */
1177 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
1178 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
1179 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
1181 /* BCH_SB_FIELD_replicas: */
1183 #define BCH_DATA_TYPES() \
1192 enum bch_data_type {
1193 #define x(t, n) BCH_DATA_##t,
1199 struct bch_replicas_entry_v0 {
1203 } __attribute__((packed));
1205 struct bch_sb_field_replicas_v0 {
1206 struct bch_sb_field field;
1207 struct bch_replicas_entry_v0 entries[0];
1208 } __attribute__((packed, aligned(8)));
1210 struct bch_replicas_entry {
1215 } __attribute__((packed));
1217 #define replicas_entry_bytes(_i) \
1218 (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
1220 struct bch_sb_field_replicas {
1221 struct bch_sb_field field;
1222 struct bch_replicas_entry entries[0];
1223 } __attribute__((packed, aligned(8)));
1225 /* BCH_SB_FIELD_quota: */
1227 struct bch_sb_quota_counter {
1232 struct bch_sb_quota_type {
1234 struct bch_sb_quota_counter c[Q_COUNTERS];
1237 struct bch_sb_field_quota {
1238 struct bch_sb_field field;
1239 struct bch_sb_quota_type q[QTYP_NR];
1240 } __attribute__((packed, aligned(8)));
1242 /* BCH_SB_FIELD_disk_groups: */
1244 #define BCH_SB_LABEL_SIZE 32
1246 struct bch_disk_group {
1247 __u8 label[BCH_SB_LABEL_SIZE];
1249 } __attribute__((packed, aligned(8)));
1251 LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
1252 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
1253 LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
1255 struct bch_sb_field_disk_groups {
1256 struct bch_sb_field field;
1257 struct bch_disk_group entries[0];
1258 } __attribute__((packed, aligned(8)));
1261 * On clean shutdown, store btree roots and current journal sequence number in
1268 __u8 type; /* designates what this jset holds */
1272 struct bkey_i start[0];
1277 struct bch_sb_field_clean {
1278 struct bch_sb_field field;
1281 __le16 _read_clock; /* no longer used */
1282 __le16 _write_clock;
1286 struct jset_entry start[0];
1291 struct journal_seq_blacklist_entry {
1296 struct bch_sb_field_journal_seq_blacklist {
1297 struct bch_sb_field field;
1300 struct journal_seq_blacklist_entry start[0];
1308 * New versioning scheme:
1309 * One common version number for all on disk data structures - superblock, btree
1310 * nodes, journal entries
1312 #define BCH_JSET_VERSION_OLD 2
1313 #define BCH_BSET_VERSION_OLD 3
1315 #define BCH_METADATA_VERSIONS() \
1316 x(bkey_renumber, 10) \
1317 x(inode_btree_change, 11) \
1319 x(inode_backpointers, 13) \
1320 x(btree_ptr_sectors_written, 14) \
1322 x(reflink_p_fix, 16) \
1323 x(subvol_dirent, 17) \
1327 enum bcachefs_metadata_version {
1328 bcachefs_metadata_version_min = 9,
1329 #define x(t, n) bcachefs_metadata_version_##t = n,
1330 BCH_METADATA_VERSIONS()
1332 bcachefs_metadata_version_max
1335 #define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
1337 #define BCH_SB_SECTOR 8
1338 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
1340 struct bch_sb_layout {
1341 uuid_le magic; /* bcachefs superblock UUID */
1343 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
1344 __u8 nr_superblocks;
1346 __le64 sb_offset[61];
1347 } __attribute__((packed, aligned(8)));
1349 #define BCH_SB_LAYOUT_SECTOR 7
1352 * @offset - sector where this sb was written
1353 * @version - on disk format version
1354 * @version_min - Oldest metadata version this filesystem contains; so we can
1355 * safely drop compatibility code and refuse to mount filesystems
1357 * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
1358 * @seq - incremented each time superblock is written
1359 * @uuid - used for generating various magic numbers and identifying
1360 * member devices, never changes
1361 * @user_uuid - user visible UUID, may be changed
1362 * @label - filesystem label
1363 * @seq - identifies most recent superblock, incremented each time
1364 * superblock is written
1365 * @features - enabled incompatible features
1368 struct bch_csum csum;
1375 __u8 label[BCH_SB_LABEL_SIZE];
1384 __le64 time_base_lo;
1385 __le32 time_base_hi;
1386 __le32 time_precision;
1392 struct bch_sb_layout layout;
1395 struct bch_sb_field start[0];
1398 } __attribute__((packed, aligned(8)));
1402 * BCH_SB_INITALIZED - set on first mount
1403 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
1404 * behaviour of mount/recovery path:
1405 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
1406 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
1407 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
1408 * DATA/META_CSUM_TYPE. Also indicates encryption
1409 * algorithm in use, if/when we get more than one
1412 LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
1414 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
1415 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
1416 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
1417 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
1419 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
1421 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
1422 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
1424 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
1425 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
1427 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
1428 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
1430 LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
1431 LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
1432 LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
1433 LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
1435 LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
1436 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
1438 LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
1440 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
1441 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
1442 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
1444 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
1445 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
1448 * Max size of an extent that may require bouncing to read or write
1449 * (checksummed, compressed): 64k
1451 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
1452 struct bch_sb, flags[1], 14, 20);
1454 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
1455 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
1457 LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
1458 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
1459 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
1461 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE,
1462 struct bch_sb, flags[2], 0, 4);
1463 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
1465 LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
1466 LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
1467 LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
1468 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
1469 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
1470 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
1471 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
1472 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
1477 * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
1478 * reflink: gates KEY_TYPE_reflink
1479 * inline_data: gates KEY_TYPE_inline_data
1480 * new_siphash: gates BCH_STR_HASH_siphash
1481 * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
1483 #define BCH_SB_FEATURES() \
1487 x(atomic_nlink, 3) \
1489 x(journal_seq_blacklist_v3, 5) \
1493 x(new_extent_overwrite, 9) \
1494 x(incompressible, 10) \
1495 x(btree_ptr_v2, 11) \
1496 x(extents_above_btree_updates, 12) \
1497 x(btree_updates_journalled, 13) \
1498 x(reflink_inline_data, 14) \
1500 x(journal_no_flush, 16) \
1502 x(extents_across_btree_nodes, 18)
1504 #define BCH_SB_FEATURES_ALWAYS \
1505 ((1ULL << BCH_FEATURE_new_extent_overwrite)| \
1506 (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
1507 (1ULL << BCH_FEATURE_btree_updates_journalled)|\
1508 (1ULL << BCH_FEATURE_alloc_v2)|\
1509 (1ULL << BCH_FEATURE_extents_across_btree_nodes))
1511 #define BCH_SB_FEATURES_ALL \
1512 (BCH_SB_FEATURES_ALWAYS| \
1513 (1ULL << BCH_FEATURE_new_siphash)| \
1514 (1ULL << BCH_FEATURE_btree_ptr_v2)| \
1515 (1ULL << BCH_FEATURE_new_varint)| \
1516 (1ULL << BCH_FEATURE_journal_no_flush))
1518 enum bch_sb_feature {
1519 #define x(f, n) BCH_FEATURE_##f,
1525 #define BCH_SB_COMPAT() \
1527 x(alloc_metadata, 1) \
1528 x(extents_above_btree_updates_done, 2) \
1529 x(bformat_overflow_done, 3)
1531 enum bch_sb_compat {
1532 #define x(f, n) BCH_COMPAT_##f,
1540 #define BCH_REPLICAS_MAX 4U
1542 #define BCH_BKEY_PTRS_MAX 16U
1544 #define BCH_ERROR_ACTIONS() \
1549 enum bch_error_actions {
1550 #define x(t, n) BCH_ON_ERROR_##t = n,
1556 #define BCH_STR_HASH_TYPES() \
1562 enum bch_str_hash_type {
1563 #define x(t, n) BCH_STR_HASH_##t = n,
1564 BCH_STR_HASH_TYPES()
1569 #define BCH_STR_HASH_OPTS() \
1574 enum bch_str_hash_opts {
1575 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
1581 #define BCH_CSUM_TYPES() \
1583 x(crc32c_nonzero, 1) \
1584 x(crc64_nonzero, 2) \
1585 x(chacha20_poly1305_80, 3) \
1586 x(chacha20_poly1305_128, 4) \
1591 enum bch_csum_type {
1592 #define x(t, n) BCH_CSUM_##t = n,
1598 static const unsigned bch_crc_bytes[] = {
1599 [BCH_CSUM_none] = 0,
1600 [BCH_CSUM_crc32c_nonzero] = 4,
1601 [BCH_CSUM_crc32c] = 4,
1602 [BCH_CSUM_crc64_nonzero] = 8,
1603 [BCH_CSUM_crc64] = 8,
1604 [BCH_CSUM_xxhash] = 8,
1605 [BCH_CSUM_chacha20_poly1305_80] = 10,
1606 [BCH_CSUM_chacha20_poly1305_128] = 16,
1609 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
1612 case BCH_CSUM_chacha20_poly1305_80:
1613 case BCH_CSUM_chacha20_poly1305_128:
1620 #define BCH_CSUM_OPTS() \
1626 enum bch_csum_opts {
1627 #define x(t, n) BCH_CSUM_OPT_##t = n,
1633 #define BCH_COMPRESSION_TYPES() \
1639 x(incompressible, 5)
1641 enum bch_compression_type {
1642 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
1643 BCH_COMPRESSION_TYPES()
1645 BCH_COMPRESSION_TYPE_NR
1648 #define BCH_COMPRESSION_OPTS() \
1654 enum bch_compression_opts {
1655 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
1656 BCH_COMPRESSION_OPTS()
1658 BCH_COMPRESSION_OPT_NR
1664 * The various other data structures have their own magic numbers, which are
1665 * xored with the first part of the cache set's UUID
1668 #define BCACHE_MAGIC \
1669 UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
1670 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1672 #define BCACHEFS_STATFS_MAGIC 0xca451a4e
1674 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
1675 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
1677 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1680 memcpy(&ret, &sb->uuid, sizeof(ret));
1684 static inline __u64 __jset_magic(struct bch_sb *sb)
1686 return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1689 static inline __u64 __bset_magic(struct bch_sb *sb)
1691 return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1696 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
1698 #define BCH_JSET_ENTRY_TYPES() \
1703 x(blacklist_v2, 4) \
1711 #define x(f, nr) BCH_JSET_ENTRY_##f = nr,
1712 BCH_JSET_ENTRY_TYPES()
1718 * Journal sequence numbers can be blacklisted: bsets record the max sequence
1719 * number of all the journal entries they contain updates for, so that on
1720 * recovery we can ignore those bsets that contain index updates newer that what
1721 * made it into the journal.
1723 * This means that we can't reuse that journal_seq - we have to skip it, and
1724 * then record that we skipped it so that the next time we crash and recover we
1725 * don't think there was a missing journal entry.
1727 struct jset_entry_blacklist {
1728 struct jset_entry entry;
1732 struct jset_entry_blacklist_v2 {
1733 struct jset_entry entry;
1738 #define BCH_FS_USAGE_TYPES() \
1744 #define x(f, nr) BCH_FS_USAGE_##f = nr,
1745 BCH_FS_USAGE_TYPES()
1750 struct jset_entry_usage {
1751 struct jset_entry entry;
1753 } __attribute__((packed));
1755 struct jset_entry_data_usage {
1756 struct jset_entry entry;
1758 struct bch_replicas_entry r;
1759 } __attribute__((packed));
1761 struct jset_entry_clock {
1762 struct jset_entry entry;
1766 } __attribute__((packed));
1768 struct jset_entry_dev_usage_type {
1772 } __attribute__((packed));
1774 struct jset_entry_dev_usage {
1775 struct jset_entry entry;
1780 __le64 buckets_unavailable;
1782 struct jset_entry_dev_usage_type d[];
1783 } __attribute__((packed));
1785 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
1787 return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
1788 sizeof(struct jset_entry_dev_usage_type);
1791 struct jset_entry_log {
1792 struct jset_entry entry;
1794 } __attribute__((packed));
1797 * On disk format for a journal entry:
1798 * seq is monotonically increasing; every journal entry has its own unique
1801 * last_seq is the oldest journal entry that still has keys the btree hasn't
1802 * flushed to disk yet.
1804 * version is for on disk format changes.
1807 struct bch_csum csum;
1814 __le32 u64s; /* size of d[] in u64s */
1816 __u8 encrypted_start[0];
1818 __le16 _read_clock; /* no longer used */
1819 __le16 _write_clock;
1821 /* Sequence number of oldest dirty journal entry */
1826 struct jset_entry start[0];
1829 } __attribute__((packed, aligned(8)));
1831 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
1832 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
1833 LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
1835 #define BCH_JOURNAL_BUCKETS_MIN 8
1839 #define BCH_BTREE_IDS() \
1855 #define x(kwd, val) BTREE_ID_##kwd = val,
1861 #define BTREE_MAX_DEPTH 4U
1868 * On disk a btree node is a list/log of these; within each set the keys are
1875 * Highest journal entry this bset contains keys for.
1876 * If on recovery we don't see that journal entry, this bset is ignored:
1877 * this allows us to preserve the order of all index updates after a
1878 * crash, since the journal records a total order of all index updates
1879 * and anything that didn't make it to the journal doesn't get used.
1885 __le16 u64s; /* count of d[] in u64s */
1888 struct bkey_packed start[0];
1891 } __attribute__((packed, aligned(8)));
1893 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
1895 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
1896 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1897 struct bset, flags, 5, 6);
1899 /* Sector offset within the btree node: */
1900 LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32);
1903 struct bch_csum csum;
1906 /* this flags field is encrypted, unlike bset->flags: */
1909 /* Closed interval: */
1910 struct bpos min_key;
1911 struct bpos max_key;
1912 struct bch_extent_ptr _ptr; /* not used anymore */
1913 struct bkey_format format;
1924 } __attribute__((packed, aligned(8)));
1926 LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
1927 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
1928 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
1929 struct btree_node, flags, 8, 9);
1931 LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
1933 struct btree_node_entry {
1934 struct bch_csum csum;
1945 } __attribute__((packed, aligned(8)));
1947 #endif /* _BCACHEFS_FORMAT_H */