]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - c_src/libbcachefs/bcachefs_format.h
move Rust sources to top level, C sources into c_src
[bcachefs-tools-debian] / c_src / libbcachefs / bcachefs_format.h
diff --git a/c_src/libbcachefs/bcachefs_format.h b/c_src/libbcachefs/bcachefs_format.h
new file mode 100644 (file)
index 0000000..0d5ac41
--- /dev/null
@@ -0,0 +1,2451 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FORMAT_H
+#define _BCACHEFS_FORMAT_H
+
+/*
+ * bcachefs on disk data structures
+ *
+ * OVERVIEW:
+ *
+ * There are three main types of on disk data structures in bcachefs (this is
+ * reduced from 5 in bcache)
+ *
+ *  - superblock
+ *  - journal
+ *  - btree
+ *
+ * The btree is the primary structure; most metadata exists as keys in the
+ * various btrees. There are only a small number of btrees, they're not
+ * sharded - we have one btree for extents, another for inodes, et cetera.
+ *
+ * SUPERBLOCK:
+ *
+ * The superblock contains the location of the journal, the list of devices in
+ * the filesystem, and in general any metadata we need in order to decide
+ * whether we can start a filesystem or prior to reading the journal/btree
+ * roots.
+ *
+ * The superblock is extensible, and most of the contents of the superblock are
+ * in variable length, type tagged fields; see struct bch_sb_field.
+ *
+ * Backup superblocks do not reside in a fixed location; also, superblocks do
+ * not have a fixed size. To locate backup superblocks we have struct
+ * bch_sb_layout; we store a copy of this inside every superblock, and also
+ * before the first superblock.
+ *
+ * JOURNAL:
+ *
+ * The journal primarily records btree updates in the order they occurred;
+ * journal replay consists of just iterating over all the keys in the open
+ * journal entries and re-inserting them into the btrees.
+ *
+ * The journal also contains entry types for the btree roots, and blacklisted
+ * journal sequence numbers (see journal_seq_blacklist.c).
+ *
+ * BTREE:
+ *
+ * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
+ * 128k-256k) and log structured. We use struct btree_node for writing the first
+ * entry in a given node (offset 0), and struct btree_node_entry for all
+ * subsequent writes.
+ *
+ * After the header, btree node entries contain a list of keys in sorted order.
+ * Values are stored inline with the keys; since values are variable length (and
+ * keys effectively are variable length too, due to packing) we can't do random
+ * access without building up additional in memory tables in the btree node read
+ * path.
+ *
+ * BTREE KEYS (struct bkey):
+ *
+ * The various btrees share a common format for the key - so as to avoid
+ * switching in fastpath lookup/comparison code - but define their own
+ * structures for the key values.
+ *
+ * The size of a key/value pair is stored as a u8 in units of u64s, so the max
+ * size is just under 2k. The common part also contains a type tag for the
+ * value, and a format field indicating whether the key is packed or not (and
+ * also meant to allow adding new key fields in the future, if desired).
+ *
+ * bkeys, when stored within a btree node, may also be packed. In that case, the
+ * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
+ * be generous with field sizes in the common part of the key format (64 bit
+ * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
+ */
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/uuid.h>
+#include "vstructs.h"
+
+#ifdef __KERNEL__
+typedef uuid_t __uuid_t;
+#endif
+
+#define BITMASK(name, type, field, offset, end)                                \
+static const __maybe_unused unsigned   name##_OFFSET = offset;         \
+static const __maybe_unused unsigned   name##_BITS = (end - offset);   \
+                                                                       \
+static inline __u64 name(const type *k)                                        \
+{                                                                      \
+       return (k->field >> offset) & ~(~0ULL << (end - offset));       \
+}                                                                      \
+                                                                       \
+static inline void SET_##name(type *k, __u64 v)                                \
+{                                                                      \
+       k->field &= ~(~(~0ULL << (end - offset)) << offset);            \
+       k->field |= (v & ~(~0ULL << (end - offset))) << offset;         \
+}
+
+#define LE_BITMASK(_bits, name, type, field, offset, end)              \
+static const __maybe_unused unsigned   name##_OFFSET = offset;         \
+static const __maybe_unused unsigned   name##_BITS = (end - offset);   \
+static const __maybe_unused __u##_bits name##_MAX = (1ULL << (end - offset)) - 1;\
+                                                                       \
+static inline __u64 name(const type *k)                                        \
+{                                                                      \
+       return (__le##_bits##_to_cpu(k->field) >> offset) &             \
+               ~(~0ULL << (end - offset));                             \
+}                                                                      \
+                                                                       \
+static inline void SET_##name(type *k, __u64 v)                                \
+{                                                                      \
+       __u##_bits new = __le##_bits##_to_cpu(k->field);                \
+                                                                       \
+       new &= ~(~(~0ULL << (end - offset)) << offset);                 \
+       new |= (v & ~(~0ULL << (end - offset))) << offset;              \
+       k->field = __cpu_to_le##_bits(new);                             \
+}
+
+#define LE16_BITMASK(n, t, f, o, e)    LE_BITMASK(16, n, t, f, o, e)
+#define LE32_BITMASK(n, t, f, o, e)    LE_BITMASK(32, n, t, f, o, e)
+#define LE64_BITMASK(n, t, f, o, e)    LE_BITMASK(64, n, t, f, o, e)
+
+struct bkey_format {
+       __u8            key_u64s;
+       __u8            nr_fields;
+       /* One unused slot for now: */
+       __u8            bits_per_field[6];
+       __le64          field_offset[6];
+};
+
+/* Btree keys - all units are in sectors */
+
+struct bpos {
+       /*
+        * Word order matches machine byte order - btree code treats a bpos as a
+        * single large integer, for search/comparison purposes
+        *
+        * Note that wherever a bpos is embedded in another on disk data
+        * structure, it has to be byte swabbed when reading in metadata that
+        * wasn't written in native endian order:
+        */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       __u32           snapshot;
+       __u64           offset;
+       __u64           inode;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+       __u64           inode;
+       __u64           offset;         /* Points to end of extent - sectors */
+       __u32           snapshot;
+#else
+#error edit for your odd byteorder.
+#endif
+} __packed
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+__aligned(4)
+#endif
+;
+
+#define KEY_INODE_MAX                  ((__u64)~0ULL)
+#define KEY_OFFSET_MAX                 ((__u64)~0ULL)
+#define KEY_SNAPSHOT_MAX               ((__u32)~0U)
+#define KEY_SIZE_MAX                   ((__u32)~0U)
+
+static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
+{
+       return (struct bpos) {
+               .inode          = inode,
+               .offset         = offset,
+               .snapshot       = snapshot,
+       };
+}
+
+#define POS_MIN                                SPOS(0, 0, 0)
+#define POS_MAX                                SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
+#define SPOS_MAX                       SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
+#define POS(_inode, _offset)           SPOS(_inode, _offset, 0)
+
+/* Empty placeholder struct, for container_of() */
+struct bch_val {
+       __u64           __nothing[0];
+};
+
+struct bversion {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       __u64           lo;
+       __u32           hi;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+       __u32           hi;
+       __u64           lo;
+#endif
+} __packed __aligned(4);
+
+struct bkey {
+       /* Size of combined key and value, in u64s */
+       __u8            u64s;
+
+       /* Format of key (0 for format local to btree node) */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u8            format:7,
+                       needs_whiteout:1;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u8            needs_whiteout:1,
+                       format:7;
+#else
+#error edit for your odd byteorder.
+#endif
+
+       /* Type of the value */
+       __u8            type;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       __u8            pad[1];
+
+       struct bversion version;
+       __u32           size;           /* extent size, in sectors */
+       struct bpos     p;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+       struct bpos     p;
+       __u32           size;           /* extent size, in sectors */
+       struct bversion version;
+
+       __u8            pad[1];
+#endif
+} __packed __aligned(8);
+
+struct bkey_packed {
+       __u64           _data[0];
+
+       /* Size of combined key and value, in u64s */
+       __u8            u64s;
+
+       /* Format of key (0 for format local to btree node) */
+
+       /*
+        * XXX: next incompat on disk format change, switch format and
+        * needs_whiteout - bkey_packed() will be cheaper if format is the high
+        * bits of the bitfield
+        */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u8            format:7,
+                       needs_whiteout:1;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u8            needs_whiteout:1,
+                       format:7;
+#endif
+
+       /* Type of the value */
+       __u8            type;
+       __u8            key_start[0];
+
+       /*
+        * We copy bkeys with struct assignment in various places, and while
+        * that shouldn't be done with packed bkeys we can't disallow it in C,
+        * and it's legal to cast a bkey to a bkey_packed  - so padding it out
+        * to the same size as struct bkey should hopefully be safest.
+        */
+       __u8            pad[sizeof(struct bkey) - 3];
+} __packed __aligned(8);
+
+typedef struct {
+       __le64                  lo;
+       __le64                  hi;
+} bch_le128;
+
+#define BKEY_U64s                      (sizeof(struct bkey) / sizeof(__u64))
+#define BKEY_U64s_MAX                  U8_MAX
+#define BKEY_VAL_U64s_MAX              (BKEY_U64s_MAX - BKEY_U64s)
+
+#define KEY_PACKED_BITS_START          24
+
+#define KEY_FORMAT_LOCAL_BTREE         0
+#define KEY_FORMAT_CURRENT             1
+
+enum bch_bkey_fields {
+       BKEY_FIELD_INODE,
+       BKEY_FIELD_OFFSET,
+       BKEY_FIELD_SNAPSHOT,
+       BKEY_FIELD_SIZE,
+       BKEY_FIELD_VERSION_HI,
+       BKEY_FIELD_VERSION_LO,
+       BKEY_NR_FIELDS,
+};
+
+#define bkey_format_field(name, field)                                 \
+       [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
+
+#define BKEY_FORMAT_CURRENT                                            \
+((struct bkey_format) {                                                        \
+       .key_u64s       = BKEY_U64s,                                    \
+       .nr_fields      = BKEY_NR_FIELDS,                               \
+       .bits_per_field = {                                             \
+               bkey_format_field(INODE,        p.inode),               \
+               bkey_format_field(OFFSET,       p.offset),              \
+               bkey_format_field(SNAPSHOT,     p.snapshot),            \
+               bkey_format_field(SIZE,         size),                  \
+               bkey_format_field(VERSION_HI,   version.hi),            \
+               bkey_format_field(VERSION_LO,   version.lo),            \
+       },                                                              \
+})
+
+/* bkey with inline value */
+struct bkey_i {
+       __u64                   _data[0];
+
+       struct bkey     k;
+       struct bch_val  v;
+};
+
+#define POS_KEY(_pos)                                                  \
+((struct bkey) {                                                       \
+       .u64s           = BKEY_U64s,                                    \
+       .format         = KEY_FORMAT_CURRENT,                           \
+       .p              = _pos,                                         \
+})
+
+#define KEY(_inode, _offset, _size)                                    \
+((struct bkey) {                                                       \
+       .u64s           = BKEY_U64s,                                    \
+       .format         = KEY_FORMAT_CURRENT,                           \
+       .p              = POS(_inode, _offset),                         \
+       .size           = _size,                                        \
+})
+
+static inline void bkey_init(struct bkey *k)
+{
+       *k = KEY(0, 0, 0);
+}
+
+#define bkey_bytes(_k)         ((_k)->u64s * sizeof(__u64))
+
+#define __BKEY_PADDED(key, pad)                                        \
+       struct bkey_i key; __u64 key ## _pad[pad]
+
+/*
+ * - DELETED keys are used internally to mark keys that should be ignored but
+ *   override keys in composition order.  Their version number is ignored.
+ *
+ * - DISCARDED keys indicate that the data is all 0s because it has been
+ *   discarded. DISCARDs may have a version; if the version is nonzero the key
+ *   will be persistent, otherwise the key will be dropped whenever the btree
+ *   node is rewritten (like DELETED keys).
+ *
+ * - ERROR: any read of the data returns a read error, as the data was lost due
+ *   to a failing device. Like DISCARDED keys, they can be removed (overridden)
+ *   by new writes or cluster-wide GC. Node repair can also overwrite them with
+ *   the same or a more recent version number, but not with an older version
+ *   number.
+ *
+ * - WHITEOUT: for hash table btrees
+ */
+#define BCH_BKEY_TYPES()                               \
+       x(deleted,              0)                      \
+       x(whiteout,             1)                      \
+       x(error,                2)                      \
+       x(cookie,               3)                      \
+       x(hash_whiteout,        4)                      \
+       x(btree_ptr,            5)                      \
+       x(extent,               6)                      \
+       x(reservation,          7)                      \
+       x(inode,                8)                      \
+       x(inode_generation,     9)                      \
+       x(dirent,               10)                     \
+       x(xattr,                11)                     \
+       x(alloc,                12)                     \
+       x(quota,                13)                     \
+       x(stripe,               14)                     \
+       x(reflink_p,            15)                     \
+       x(reflink_v,            16)                     \
+       x(inline_data,          17)                     \
+       x(btree_ptr_v2,         18)                     \
+       x(indirect_inline_data, 19)                     \
+       x(alloc_v2,             20)                     \
+       x(subvolume,            21)                     \
+       x(snapshot,             22)                     \
+       x(inode_v2,             23)                     \
+       x(alloc_v3,             24)                     \
+       x(set,                  25)                     \
+       x(lru,                  26)                     \
+       x(alloc_v4,             27)                     \
+       x(backpointer,          28)                     \
+       x(inode_v3,             29)                     \
+       x(bucket_gens,          30)                     \
+       x(snapshot_tree,        31)                     \
+       x(logged_op_truncate,   32)                     \
+       x(logged_op_finsert,    33)
+
+enum bch_bkey_type {
+#define x(name, nr) KEY_TYPE_##name    = nr,
+       BCH_BKEY_TYPES()
+#undef x
+       KEY_TYPE_MAX,
+};
+
+struct bch_deleted {
+       struct bch_val          v;
+};
+
+struct bch_whiteout {
+       struct bch_val          v;
+};
+
+struct bch_error {
+       struct bch_val          v;
+};
+
+struct bch_cookie {
+       struct bch_val          v;
+       __le64                  cookie;
+};
+
+struct bch_hash_whiteout {
+       struct bch_val          v;
+};
+
+struct bch_set {
+       struct bch_val          v;
+};
+
+/* Extents */
+
+/*
+ * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
+ * preceded by checksum/compression information (bch_extent_crc32 or
+ * bch_extent_crc64).
+ *
+ * One major determining factor in the format of extents is how we handle and
+ * represent extents that have been partially overwritten and thus trimmed:
+ *
+ * If an extent is not checksummed or compressed, when the extent is trimmed we
+ * don't have to remember the extent we originally allocated and wrote: we can
+ * merely adjust ptr->offset to point to the start of the data that is currently
+ * live. The size field in struct bkey records the current (live) size of the
+ * extent, and is also used to mean "size of region on disk that we point to" in
+ * this case.
+ *
+ * Thus an extent that is not checksummed or compressed will consist only of a
+ * list of bch_extent_ptrs, with none of the fields in
+ * bch_extent_crc32/bch_extent_crc64.
+ *
+ * When an extent is checksummed or compressed, it's not possible to read only
+ * the data that is currently live: we have to read the entire extent that was
+ * originally written, and then return only the part of the extent that is
+ * currently live.
+ *
+ * Thus, in addition to the current size of the extent in struct bkey, we need
+ * to store the size of the originally allocated space - this is the
+ * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
+ * when the extent is trimmed, instead of modifying the offset field of the
+ * pointer, we keep a second smaller offset field - "offset into the original
+ * extent of the currently live region".
+ *
+ * The other major determining factor is replication and data migration:
+ *
+ * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
+ * write, we will initially write all the replicas in the same format, with the
+ * same checksum type and compression format - however, when copygc runs later (or
+ * tiering/cache promotion, anything that moves data), it is not in general
+ * going to rewrite all the pointers at once - one of the replicas may be in a
+ * bucket on one device that has very little fragmentation while another lives
+ * in a bucket that has become heavily fragmented, and thus is being rewritten
+ * sooner than the rest.
+ *
+ * Thus it will only move a subset of the pointers (or in the case of
+ * tiering/cache promotion perhaps add a single pointer without dropping any
+ * current pointers), and if the extent has been partially overwritten it must
+ * write only the currently live portion (or copygc would not be able to reduce
+ * fragmentation!) - which necessitates a different bch_extent_crc format for
+ * the new pointer.
+ *
+ * But in the interests of space efficiency, we don't want to store one
+ * bch_extent_crc for each pointer if we don't have to.
+ *
+ * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
+ * bch_extent_ptrs appended arbitrarily one after the other. We determine the
+ * type of a given entry with a scheme similar to utf8 (except we're encoding a
+ * type, not a size), encoding the type in the position of the first set bit:
+ *
+ * bch_extent_crc32    - 0b1
+ * bch_extent_ptr      - 0b10
+ * bch_extent_crc64    - 0b100
+ *
+ * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
+ * bch_extent_crc64 is the least constrained).
+ *
+ * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
+ * until the next bch_extent_crc32/64.
+ *
+ * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
+ * is neither checksummed nor compressed.
+ */
+
+/* 128 bits, sufficient for cryptographic MACs: */
+struct bch_csum {
+       __le64                  lo;
+       __le64                  hi;
+} __packed __aligned(8);
+
+#define BCH_EXTENT_ENTRY_TYPES()               \
+       x(ptr,                  0)              \
+       x(crc32,                1)              \
+       x(crc64,                2)              \
+       x(crc128,               3)              \
+       x(stripe_ptr,           4)              \
+       x(rebalance,            5)
+#define BCH_EXTENT_ENTRY_MAX   6
+
+enum bch_extent_entry_type {
+#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
+       BCH_EXTENT_ENTRY_TYPES()
+#undef x
+};
+
+/* Compressed/uncompressed size are stored biased by 1: */
+struct bch_extent_crc32 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u32                   type:2,
+                               _compressed_size:7,
+                               _uncompressed_size:7,
+                               offset:7,
+                               _unused:1,
+                               csum_type:4,
+                               compression_type:4;
+       __u32                   csum;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u32                   csum;
+       __u32                   compression_type:4,
+                               csum_type:4,
+                               _unused:1,
+                               offset:7,
+                               _uncompressed_size:7,
+                               _compressed_size:7,
+                               type:2;
+#endif
+} __packed __aligned(8);
+
+#define CRC32_SIZE_MAX         (1U << 7)
+#define CRC32_NONCE_MAX                0
+
+struct bch_extent_crc64 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:3,
+                               _compressed_size:9,
+                               _uncompressed_size:9,
+                               offset:9,
+                               nonce:10,
+                               csum_type:4,
+                               compression_type:4,
+                               csum_hi:16;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   csum_hi:16,
+                               compression_type:4,
+                               csum_type:4,
+                               nonce:10,
+                               offset:9,
+                               _uncompressed_size:9,
+                               _compressed_size:9,
+                               type:3;
+#endif
+       __u64                   csum_lo;
+} __packed __aligned(8);
+
+#define CRC64_SIZE_MAX         (1U << 9)
+#define CRC64_NONCE_MAX                ((1U << 10) - 1)
+
+struct bch_extent_crc128 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:4,
+                               _compressed_size:13,
+                               _uncompressed_size:13,
+                               offset:13,
+                               nonce:13,
+                               csum_type:4,
+                               compression_type:4;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   compression_type:4,
+                               csum_type:4,
+                               nonce:13,
+                               offset:13,
+                               _uncompressed_size:13,
+                               _compressed_size:13,
+                               type:4;
+#endif
+       struct bch_csum         csum;
+} __packed __aligned(8);
+
+#define CRC128_SIZE_MAX                (1U << 13)
+#define CRC128_NONCE_MAX       ((1U << 13) - 1)
+
+/*
+ * @reservation - pointer hasn't been written to, just reserved
+ */
+struct bch_extent_ptr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:1,
+                               cached:1,
+                               unused:1,
+                               unwritten:1,
+                               offset:44, /* 8 petabytes */
+                               dev:8,
+                               gen:8;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   gen:8,
+                               dev:8,
+                               offset:44,
+                               unwritten:1,
+                               unused:1,
+                               cached:1,
+                               type:1;
+#endif
+} __packed __aligned(8);
+
+struct bch_extent_stripe_ptr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:5,
+                               block:8,
+                               redundancy:4,
+                               idx:47;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   idx:47,
+                               redundancy:4,
+                               block:8,
+                               type:5;
+#endif
+};
+
+struct bch_extent_rebalance {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:6,
+                               unused:34,
+                               compression:8, /* enum bch_compression_opt */
+                               target:16;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   target:16,
+                               compression:8,
+                               unused:34,
+                               type:6;
+#endif
+};
+
+union bch_extent_entry {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ||  __BITS_PER_LONG == 64
+       unsigned long                   type;
+#elif __BITS_PER_LONG == 32
+       struct {
+               unsigned long           pad;
+               unsigned long           type;
+       };
+#else
+#error edit for your odd byteorder.
+#endif
+
+#define x(f, n) struct bch_extent_##f  f;
+       BCH_EXTENT_ENTRY_TYPES()
+#undef x
+};
+
+struct bch_btree_ptr {
+       struct bch_val          v;
+
+       __u64                   _data[0];
+       struct bch_extent_ptr   start[];
+} __packed __aligned(8);
+
+struct bch_btree_ptr_v2 {
+       struct bch_val          v;
+
+       __u64                   mem_ptr;
+       __le64                  seq;
+       __le16                  sectors_written;
+       __le16                  flags;
+       struct bpos             min_key;
+       __u64                   _data[0];
+       struct bch_extent_ptr   start[];
+} __packed __aligned(8);
+
+LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,  struct bch_btree_ptr_v2, flags, 0, 1);
+
+struct bch_extent {
+       struct bch_val          v;
+
+       __u64                   _data[0];
+       union bch_extent_entry  start[];
+} __packed __aligned(8);
+
+struct bch_reservation {
+       struct bch_val          v;
+
+       __le32                  generation;
+       __u8                    nr_replicas;
+       __u8                    pad[3];
+} __packed __aligned(8);
+
+/* Maximum size (in u64s) a single pointer could be: */
+#define BKEY_EXTENT_PTR_U64s_MAX\
+       ((sizeof(struct bch_extent_crc128) +                    \
+         sizeof(struct bch_extent_ptr)) / sizeof(__u64))
+
+/* Maximum possible size of an entire extent value: */
+#define BKEY_EXTENT_VAL_U64s_MAX                               \
+       (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
+
+/* * Maximum possible size of an entire extent, key + value: */
+#define BKEY_EXTENT_U64s_MAX           (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
+
+/* Btree pointers don't carry around checksums: */
+#define BKEY_BTREE_PTR_VAL_U64s_MAX                            \
+       ((sizeof(struct bch_btree_ptr_v2) +                     \
+         sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
+#define BKEY_BTREE_PTR_U64s_MAX                                        \
+       (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
+
+/* Inodes */
+
+#define BLOCKDEV_INODE_MAX     4096
+
+#define BCACHEFS_ROOT_INO      4096
+
+struct bch_inode {
+       struct bch_val          v;
+
+       __le64                  bi_hash_seed;
+       __le32                  bi_flags;
+       __le16                  bi_mode;
+       __u8                    fields[];
+} __packed __aligned(8);
+
+struct bch_inode_v2 {
+       struct bch_val          v;
+
+       __le64                  bi_journal_seq;
+       __le64                  bi_hash_seed;
+       __le64                  bi_flags;
+       __le16                  bi_mode;
+       __u8                    fields[];
+} __packed __aligned(8);
+
+struct bch_inode_v3 {
+       struct bch_val          v;
+
+       __le64                  bi_journal_seq;
+       __le64                  bi_hash_seed;
+       __le64                  bi_flags;
+       __le64                  bi_sectors;
+       __le64                  bi_size;
+       __le64                  bi_version;
+       __u8                    fields[];
+} __packed __aligned(8);
+
+#define INODEv3_FIELDS_START_INITIAL   6
+#define INODEv3_FIELDS_START_CUR       (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
+
+struct bch_inode_generation {
+       struct bch_val          v;
+
+       __le32                  bi_generation;
+       __le32                  pad;
+} __packed __aligned(8);
+
+/*
+ * bi_subvol and bi_parent_subvol are only set for subvolume roots:
+ */
+
+#define BCH_INODE_FIELDS_v2()                  \
+       x(bi_atime,                     96)     \
+       x(bi_ctime,                     96)     \
+       x(bi_mtime,                     96)     \
+       x(bi_otime,                     96)     \
+       x(bi_size,                      64)     \
+       x(bi_sectors,                   64)     \
+       x(bi_uid,                       32)     \
+       x(bi_gid,                       32)     \
+       x(bi_nlink,                     32)     \
+       x(bi_generation,                32)     \
+       x(bi_dev,                       32)     \
+       x(bi_data_checksum,             8)      \
+       x(bi_compression,               8)      \
+       x(bi_project,                   32)     \
+       x(bi_background_compression,    8)      \
+       x(bi_data_replicas,             8)      \
+       x(bi_promote_target,            16)     \
+       x(bi_foreground_target,         16)     \
+       x(bi_background_target,         16)     \
+       x(bi_erasure_code,              16)     \
+       x(bi_fields_set,                16)     \
+       x(bi_dir,                       64)     \
+       x(bi_dir_offset,                64)     \
+       x(bi_subvol,                    32)     \
+       x(bi_parent_subvol,             32)
+
+#define BCH_INODE_FIELDS_v3()                  \
+       x(bi_atime,                     96)     \
+       x(bi_ctime,                     96)     \
+       x(bi_mtime,                     96)     \
+       x(bi_otime,                     96)     \
+       x(bi_uid,                       32)     \
+       x(bi_gid,                       32)     \
+       x(bi_nlink,                     32)     \
+       x(bi_generation,                32)     \
+       x(bi_dev,                       32)     \
+       x(bi_data_checksum,             8)      \
+       x(bi_compression,               8)      \
+       x(bi_project,                   32)     \
+       x(bi_background_compression,    8)      \
+       x(bi_data_replicas,             8)      \
+       x(bi_promote_target,            16)     \
+       x(bi_foreground_target,         16)     \
+       x(bi_background_target,         16)     \
+       x(bi_erasure_code,              16)     \
+       x(bi_fields_set,                16)     \
+       x(bi_dir,                       64)     \
+       x(bi_dir_offset,                64)     \
+       x(bi_subvol,                    32)     \
+       x(bi_parent_subvol,             32)     \
+       x(bi_nocow,                     8)
+
+/* subset of BCH_INODE_FIELDS */
+#define BCH_INODE_OPTS()                       \
+       x(data_checksum,                8)      \
+       x(compression,                  8)      \
+       x(project,                      32)     \
+       x(background_compression,       8)      \
+       x(data_replicas,                8)      \
+       x(promote_target,               16)     \
+       x(foreground_target,            16)     \
+       x(background_target,            16)     \
+       x(erasure_code,                 16)     \
+       x(nocow,                        8)
+
+enum inode_opt_id {
+#define x(name, ...)                           \
+       Inode_opt_##name,
+       BCH_INODE_OPTS()
+#undef  x
+       Inode_opt_nr,
+};
+
+#define BCH_INODE_FLAGS()                      \
+       x(sync,                         0)      \
+       x(immutable,                    1)      \
+       x(append,                       2)      \
+       x(nodump,                       3)      \
+       x(noatime,                      4)      \
+       x(i_size_dirty,                 5)      \
+       x(i_sectors_dirty,              6)      \
+       x(unlinked,                     7)      \
+       x(backptr_untrusted,            8)
+
+/* bits 20+ reserved for packed fields below: */
+
+enum bch_inode_flags {
+#define x(t, n)        BCH_INODE_##t = 1U << n,
+       BCH_INODE_FLAGS()
+#undef x
+};
+
+enum __bch_inode_flags {
+#define x(t, n)        __BCH_INODE_##t = n,
+       BCH_INODE_FLAGS()
+#undef x
+};
+
+LE32_BITMASK(INODE_STR_HASH,   struct bch_inode, bi_flags, 20, 24);
+LE32_BITMASK(INODE_NR_FIELDS,  struct bch_inode, bi_flags, 24, 31);
+LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
+
+LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
+LE64_BITMASK(INODEv2_NR_FIELDS,        struct bch_inode_v2, bi_flags, 24, 31);
+
+LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
+LE64_BITMASK(INODEv3_NR_FIELDS,        struct bch_inode_v3, bi_flags, 24, 31);
+
+LE64_BITMASK(INODEv3_FIELDS_START,
+                               struct bch_inode_v3, bi_flags, 31, 36);
+LE64_BITMASK(INODEv3_MODE,     struct bch_inode_v3, bi_flags, 36, 52);
+
+/* Dirents */
+
+/*
+ * Dirents (and xattrs) have to implement string lookups; since our b-tree
+ * doesn't support arbitrary length strings for the key, we instead index by a
+ * 64 bit hash (currently truncated sha1) of the string, stored in the offset
+ * field of the key - using linear probing to resolve hash collisions. This also
+ * provides us with the readdir cookie posix requires.
+ *
+ * Linear probing requires us to use whiteouts for deletions, in the event of a
+ * collision:
+ */
+
+struct bch_dirent {
+       struct bch_val          v;
+
+       /* Target inode number: */
+       union {
+       __le64                  d_inum;
+       struct {                /* DT_SUBVOL */
+       __le32                  d_child_subvol;
+       __le32                  d_parent_subvol;
+       };
+       };
+
+       /*
+        * Copy of mode bits 12-15 from the target inode - so userspace can get
+        * the filetype without having to do a stat()
+        */
+       __u8                    d_type;
+
+       __u8                    d_name[];
+} __packed __aligned(8);
+
+#define DT_SUBVOL      16
+#define BCH_DT_MAX     17
+
+#define BCH_NAME_MAX   512
+
+/* Xattrs */
+
+#define KEY_TYPE_XATTR_INDEX_USER                      0
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS  1
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
+#define KEY_TYPE_XATTR_INDEX_TRUSTED                   3
+#define KEY_TYPE_XATTR_INDEX_SECURITY          4
+
+struct bch_xattr {
+       struct bch_val          v;
+       __u8                    x_type;
+       __u8                    x_name_len;
+       __le16                  x_val_len;
+       __u8                    x_name[];
+} __packed __aligned(8);
+
+/* Bucket/allocation information: */
+
+struct bch_alloc {
+       struct bch_val          v;
+       __u8                    fields;
+       __u8                    gen;
+       __u8                    data[];
+} __packed __aligned(8);
+
+#define BCH_ALLOC_FIELDS_V1()                  \
+       x(read_time,            16)             \
+       x(write_time,           16)             \
+       x(data_type,            8)              \
+       x(dirty_sectors,        16)             \
+       x(cached_sectors,       16)             \
+       x(oldest_gen,           8)              \
+       x(stripe,               32)             \
+       x(stripe_redundancy,    8)
+
+enum {
+#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
+       BCH_ALLOC_FIELDS_V1()
+#undef x
+};
+
+struct bch_alloc_v2 {
+       struct bch_val          v;
+       __u8                    nr_fields;
+       __u8                    gen;
+       __u8                    oldest_gen;
+       __u8                    data_type;
+       __u8                    data[];
+} __packed __aligned(8);
+
+#define BCH_ALLOC_FIELDS_V2()                  \
+       x(read_time,            64)             \
+       x(write_time,           64)             \
+       x(dirty_sectors,        32)             \
+       x(cached_sectors,       32)             \
+       x(stripe,               32)             \
+       x(stripe_redundancy,    8)
+
+struct bch_alloc_v3 {
+       struct bch_val          v;
+       __le64                  journal_seq;
+       __le32                  flags;
+       __u8                    nr_fields;
+       __u8                    gen;
+       __u8                    oldest_gen;
+       __u8                    data_type;
+       __u8                    data[];
+} __packed __aligned(8);
+
+LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags,  0,  1)
+LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags,  1,  2)
+
+struct bch_alloc_v4 {
+       struct bch_val          v;
+       __u64                   journal_seq;
+       __u32                   flags;
+       __u8                    gen;
+       __u8                    oldest_gen;
+       __u8                    data_type;
+       __u8                    stripe_redundancy;
+       __u32                   dirty_sectors;
+       __u32                   cached_sectors;
+       __u64                   io_time[2];
+       __u32                   stripe;
+       __u32                   nr_external_backpointers;
+       __u64                   fragmentation_lru;
+} __packed __aligned(8);
+
+#define BCH_ALLOC_V4_U64s_V0   6
+#define BCH_ALLOC_V4_U64s      (sizeof(struct bch_alloc_v4) / sizeof(__u64))
+
+BITMASK(BCH_ALLOC_V4_NEED_DISCARD,     struct bch_alloc_v4, flags,  0,  1)
+BITMASK(BCH_ALLOC_V4_NEED_INC_GEN,     struct bch_alloc_v4, flags,  1,  2)
+BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags,  2,  8)
+BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS,  struct bch_alloc_v4, flags,  8,  14)
+
+#define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX       40
+
+struct bch_backpointer {
+       struct bch_val          v;
+       __u8                    btree_id;
+       __u8                    level;
+       __u8                    data_type;
+       __u64                   bucket_offset:40;
+       __u32                   bucket_len;
+       struct bpos             pos;
+} __packed __aligned(8);
+
+#define KEY_TYPE_BUCKET_GENS_BITS      8
+#define KEY_TYPE_BUCKET_GENS_NR                (1U << KEY_TYPE_BUCKET_GENS_BITS)
+#define KEY_TYPE_BUCKET_GENS_MASK      (KEY_TYPE_BUCKET_GENS_NR - 1)
+
+struct bch_bucket_gens {
+       struct bch_val          v;
+       u8                      gens[KEY_TYPE_BUCKET_GENS_NR];
+} __packed __aligned(8);
+
+/* Quotas: */
+
+enum quota_types {
+       QTYP_USR                = 0,
+       QTYP_GRP                = 1,
+       QTYP_PRJ                = 2,
+       QTYP_NR                 = 3,
+};
+
+enum quota_counters {
+       Q_SPC                   = 0,
+       Q_INO                   = 1,
+       Q_COUNTERS              = 2,
+};
+
+struct bch_quota_counter {
+       __le64                  hardlimit;
+       __le64                  softlimit;
+};
+
+struct bch_quota {
+       struct bch_val          v;
+       struct bch_quota_counter c[Q_COUNTERS];
+} __packed __aligned(8);
+
+/* Erasure coding */
+
+struct bch_stripe {
+       struct bch_val          v;
+       __le16                  sectors;
+       __u8                    algorithm;
+       __u8                    nr_blocks;
+       __u8                    nr_redundant;
+
+       __u8                    csum_granularity_bits;
+       __u8                    csum_type;
+       __u8                    pad;
+
+       struct bch_extent_ptr   ptrs[];
+} __packed __aligned(8);
+
+/* Reflink: */
+
+struct bch_reflink_p {
+       struct bch_val          v;
+       __le64                  idx;
+       /*
+        * A reflink pointer might point to an indirect extent which is then
+        * later split (by copygc or rebalance). If we only pointed to part of
+        * the original indirect extent, and then one of the fragments is
+        * outside the range we point to, we'd leak a refcount: so when creating
+        * reflink pointers, we need to store pad values to remember the full
+        * range we were taking a reference on.
+        */
+       __le32                  front_pad;
+       __le32                  back_pad;
+} __packed __aligned(8);
+
+struct bch_reflink_v {
+       struct bch_val          v;
+       __le64                  refcount;
+       union bch_extent_entry  start[0];
+       __u64                   _data[];
+} __packed __aligned(8);
+
+struct bch_indirect_inline_data {
+       struct bch_val          v;
+       __le64                  refcount;
+       u8                      data[];
+};
+
+/* Inline data */
+
+struct bch_inline_data {
+       struct bch_val          v;
+       u8                      data[];
+};
+
+/* Subvolumes: */
+
+#define SUBVOL_POS_MIN         POS(0, 1)
+#define SUBVOL_POS_MAX         POS(0, S32_MAX)
+#define BCACHEFS_ROOT_SUBVOL   1
+
+struct bch_subvolume {
+       struct bch_val          v;
+       __le32                  flags;
+       __le32                  snapshot;
+       __le64                  inode;
+       /*
+        * Snapshot subvolumes form a tree, separate from the snapshot nodes
+        * tree - if this subvolume is a snapshot, this is the ID of the
+        * subvolume it was created from:
+        */
+       __le32                  parent;
+       __le32                  pad;
+       bch_le128               otime;
+};
+
+LE32_BITMASK(BCH_SUBVOLUME_RO,         struct bch_subvolume, flags,  0,  1)
+/*
+ * We need to know whether a subvolume is a snapshot so we can know whether we
+ * can delete it (or whether it should just be rm -rf'd)
+ */
+LE32_BITMASK(BCH_SUBVOLUME_SNAP,       struct bch_subvolume, flags,  1,  2)
+LE32_BITMASK(BCH_SUBVOLUME_UNLINKED,   struct bch_subvolume, flags,  2,  3)
+
+/* Snapshots */
+
+struct bch_snapshot {
+       struct bch_val          v;
+       __le32                  flags;
+       __le32                  parent;
+       __le32                  children[2];
+       __le32                  subvol;
+       /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
+       __le32                  tree;
+       __le32                  depth;
+       __le32                  skip[3];
+};
+
+LE32_BITMASK(BCH_SNAPSHOT_DELETED,     struct bch_snapshot, flags,  0,  1)
+
+/* True if a subvolume points to this snapshot node: */
+LE32_BITMASK(BCH_SNAPSHOT_SUBVOL,      struct bch_snapshot, flags,  1,  2)
+
+/*
+ * Snapshot trees:
+ *
+ * The snapshot_trees btree gives us persistent indentifier for each tree of
+ * bch_snapshot nodes, and allow us to record and easily find the root/master
+ * subvolume that other snapshots were created from:
+ */
+struct bch_snapshot_tree {
+       struct bch_val          v;
+       __le32                  master_subvol;
+       __le32                  root_snapshot;
+};
+
+/* LRU btree: */
+
+struct bch_lru {
+       struct bch_val          v;
+       __le64                  idx;
+} __packed __aligned(8);
+
+#define LRU_ID_STRIPES         (1U << 16)
+
+/* Logged operations btree: */
+
+struct bch_logged_op_truncate {
+       struct bch_val          v;
+       __le32                  subvol;
+       __le32                  pad;
+       __le64                  inum;
+       __le64                  new_i_size;
+};
+
+enum logged_op_finsert_state {
+       LOGGED_OP_FINSERT_start,
+       LOGGED_OP_FINSERT_shift_extents,
+       LOGGED_OP_FINSERT_finish,
+};
+
+struct bch_logged_op_finsert {
+       struct bch_val          v;
+       __u8                    state;
+       __u8                    pad[3];
+       __le32                  subvol;
+       __le64                  inum;
+       __le64                  dst_offset;
+       __le64                  src_offset;
+       __le64                  pos;
+};
+
+/* Optional/variable size superblock sections: */
+
+struct bch_sb_field {
+       __u64                   _data[0];
+       __le32                  u64s;
+       __le32                  type;
+};
+
+#define BCH_SB_FIELDS()                                \
+       x(journal,                      0)      \
+       x(members_v1,                   1)      \
+       x(crypt,                        2)      \
+       x(replicas_v0,                  3)      \
+       x(quota,                        4)      \
+       x(disk_groups,                  5)      \
+       x(clean,                        6)      \
+       x(replicas,                     7)      \
+       x(journal_seq_blacklist,        8)      \
+       x(journal_v2,                   9)      \
+       x(counters,                     10)     \
+       x(members_v2,                   11)     \
+       x(errors,                       12)     \
+       x(ext,                          13)     \
+       x(downgrade,                    14)
+
+enum bch_sb_field_type {
+#define x(f, nr)       BCH_SB_FIELD_##f = nr,
+       BCH_SB_FIELDS()
+#undef x
+       BCH_SB_FIELD_NR
+};
+
+/*
+ * Most superblock fields are replicated in all device's superblocks - a few are
+ * not:
+ */
+#define BCH_SINGLE_DEVICE_SB_FIELDS            \
+       ((1U << BCH_SB_FIELD_journal)|          \
+        (1U << BCH_SB_FIELD_journal_v2))
+
+/* BCH_SB_FIELD_journal: */
+
+struct bch_sb_field_journal {
+       struct bch_sb_field     field;
+       __le64                  buckets[];
+};
+
+struct bch_sb_field_journal_v2 {
+       struct bch_sb_field     field;
+
+       struct bch_sb_field_journal_v2_entry {
+               __le64          start;
+               __le64          nr;
+       }                       d[];
+};
+
+/* BCH_SB_FIELD_members_v1: */
+
+#define BCH_MIN_NR_NBUCKETS    (1 << 6)
+
+#define BCH_IOPS_MEASUREMENTS()                        \
+       x(seqread,      0)                      \
+       x(seqwrite,     1)                      \
+       x(randread,     2)                      \
+       x(randwrite,    3)
+
+enum bch_iops_measurement {
+#define x(t, n) BCH_IOPS_##t = n,
+       BCH_IOPS_MEASUREMENTS()
+#undef x
+       BCH_IOPS_NR
+};
+
+#define BCH_MEMBER_ERROR_TYPES()               \
+       x(read,         0)                      \
+       x(write,        1)                      \
+       x(checksum,     2)
+
+enum bch_member_error_type {
+#define x(t, n) BCH_MEMBER_ERROR_##t = n,
+       BCH_MEMBER_ERROR_TYPES()
+#undef x
+       BCH_MEMBER_ERROR_NR
+};
+
+struct bch_member {
+       __uuid_t                uuid;
+       __le64                  nbuckets;       /* device size */
+       __le16                  first_bucket;   /* index of first bucket used */
+       __le16                  bucket_size;    /* sectors */
+       __le32                  pad;
+       __le64                  last_mount;     /* time_t */
+
+       __le64                  flags;
+       __le32                  iops[4];
+       __le64                  errors[BCH_MEMBER_ERROR_NR];
+       __le64                  errors_at_reset[BCH_MEMBER_ERROR_NR];
+       __le64                  errors_reset_time;
+       __le64                  seq;
+};
+
+#define BCH_MEMBER_V1_BYTES    56
+
+LE64_BITMASK(BCH_MEMBER_STATE,         struct bch_member, flags,  0,  4)
+/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
+LE64_BITMASK(BCH_MEMBER_DISCARD,       struct bch_member, flags, 14, 15)
+LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED,  struct bch_member, flags, 15, 20)
+LE64_BITMASK(BCH_MEMBER_GROUP,         struct bch_member, flags, 20, 28)
+LE64_BITMASK(BCH_MEMBER_DURABILITY,    struct bch_member, flags, 28, 30)
+LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
+                                       struct bch_member, flags, 30, 31)
+
+#if 0
+LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS,        struct bch_member, flags[1], 0,  20);
+LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
+#endif
+
+#define BCH_MEMBER_STATES()                    \
+       x(rw,           0)                      \
+       x(ro,           1)                      \
+       x(failed,       2)                      \
+       x(spare,        3)
+
+enum bch_member_state {
+#define x(t, n) BCH_MEMBER_STATE_##t = n,
+       BCH_MEMBER_STATES()
+#undef x
+       BCH_MEMBER_STATE_NR
+};
+
+struct bch_sb_field_members_v1 {
+       struct bch_sb_field     field;
+       struct bch_member       _members[]; //Members are now variable size
+};
+
+struct bch_sb_field_members_v2 {
+       struct bch_sb_field     field;
+       __le16                  member_bytes; //size of single member entry
+       u8                      pad[6];
+       struct bch_member       _members[];
+};
+
+/* BCH_SB_FIELD_crypt: */
+
+struct nonce {
+       __le32                  d[4];
+};
+
+struct bch_key {
+       __le64                  key[4];
+};
+
+#define BCH_KEY_MAGIC                                  \
+       (((__u64) 'b' <<  0)|((__u64) 'c' <<  8)|               \
+        ((__u64) 'h' << 16)|((__u64) '*' << 24)|               \
+        ((__u64) '*' << 32)|((__u64) 'k' << 40)|               \
+        ((__u64) 'e' << 48)|((__u64) 'y' << 56))
+
+struct bch_encrypted_key {
+       __le64                  magic;
+       struct bch_key          key;
+};
+
+/*
+ * If this field is present in the superblock, it stores an encryption key which
+ * is used encrypt all other data/metadata. The key will normally be encrypted
+ * with the key userspace provides, but if encryption has been turned off we'll
+ * just store the master key unencrypted in the superblock so we can access the
+ * previously encrypted data.
+ */
+struct bch_sb_field_crypt {
+       struct bch_sb_field     field;
+
+       __le64                  flags;
+       __le64                  kdf_flags;
+       struct bch_encrypted_key key;
+};
+
+LE64_BITMASK(BCH_CRYPT_KDF_TYPE,       struct bch_sb_field_crypt, flags, 0, 4);
+
+enum bch_kdf_types {
+       BCH_KDF_SCRYPT          = 0,
+       BCH_KDF_NR              = 1,
+};
+
+/* stored as base 2 log of scrypt params: */
+LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags,  0, 16);
+LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
+LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
+
+/* BCH_SB_FIELD_replicas: */
+
+#define BCH_DATA_TYPES()               \
+       x(free,         0)              \
+       x(sb,           1)              \
+       x(journal,      2)              \
+       x(btree,        3)              \
+       x(user,         4)              \
+       x(cached,       5)              \
+       x(parity,       6)              \
+       x(stripe,       7)              \
+       x(need_gc_gens, 8)              \
+       x(need_discard, 9)
+
+enum bch_data_type {
+#define x(t, n) BCH_DATA_##t,
+       BCH_DATA_TYPES()
+#undef x
+       BCH_DATA_NR
+};
+
+static inline bool data_type_is_empty(enum bch_data_type type)
+{
+       switch (type) {
+       case BCH_DATA_free:
+       case BCH_DATA_need_gc_gens:
+       case BCH_DATA_need_discard:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static inline bool data_type_is_hidden(enum bch_data_type type)
+{
+       switch (type) {
+       case BCH_DATA_sb:
+       case BCH_DATA_journal:
+               return true;
+       default:
+               return false;
+       }
+}
+
+struct bch_replicas_entry_v0 {
+       __u8                    data_type;
+       __u8                    nr_devs;
+       __u8                    devs[];
+} __packed;
+
+struct bch_sb_field_replicas_v0 {
+       struct bch_sb_field     field;
+       struct bch_replicas_entry_v0 entries[];
+} __packed __aligned(8);
+
+struct bch_replicas_entry_v1 {
+       __u8                    data_type;
+       __u8                    nr_devs;
+       __u8                    nr_required;
+       __u8                    devs[];
+} __packed;
+
+#define replicas_entry_bytes(_i)                                       \
+       (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
+
+struct bch_sb_field_replicas {
+       struct bch_sb_field     field;
+       struct bch_replicas_entry_v1 entries[];
+} __packed __aligned(8);
+
+/* BCH_SB_FIELD_quota: */
+
+struct bch_sb_quota_counter {
+       __le32                          timelimit;
+       __le32                          warnlimit;
+};
+
+struct bch_sb_quota_type {
+       __le64                          flags;
+       struct bch_sb_quota_counter     c[Q_COUNTERS];
+};
+
+struct bch_sb_field_quota {
+       struct bch_sb_field             field;
+       struct bch_sb_quota_type        q[QTYP_NR];
+} __packed __aligned(8);
+
+/* BCH_SB_FIELD_disk_groups: */
+
+#define BCH_SB_LABEL_SIZE              32
+
+struct bch_disk_group {
+       __u8                    label[BCH_SB_LABEL_SIZE];
+       __le64                  flags[2];
+} __packed __aligned(8);
+
+LE64_BITMASK(BCH_GROUP_DELETED,                struct bch_disk_group, flags[0], 0,  1)
+LE64_BITMASK(BCH_GROUP_DATA_ALLOWED,   struct bch_disk_group, flags[0], 1,  6)
+LE64_BITMASK(BCH_GROUP_PARENT,         struct bch_disk_group, flags[0], 6, 24)
+
+struct bch_sb_field_disk_groups {
+       struct bch_sb_field     field;
+       struct bch_disk_group   entries[];
+} __packed __aligned(8);
+
+/* BCH_SB_FIELD_counters */
+
+#define BCH_PERSISTENT_COUNTERS()                              \
+       x(io_read,                                      0)      \
+       x(io_write,                                     1)      \
+       x(io_move,                                      2)      \
+       x(bucket_invalidate,                            3)      \
+       x(bucket_discard,                               4)      \
+       x(bucket_alloc,                                 5)      \
+       x(bucket_alloc_fail,                            6)      \
+       x(btree_cache_scan,                             7)      \
+       x(btree_cache_reap,                             8)      \
+       x(btree_cache_cannibalize,                      9)      \
+       x(btree_cache_cannibalize_lock,                 10)     \
+       x(btree_cache_cannibalize_lock_fail,            11)     \
+       x(btree_cache_cannibalize_unlock,               12)     \
+       x(btree_node_write,                             13)     \
+       x(btree_node_read,                              14)     \
+       x(btree_node_compact,                           15)     \
+       x(btree_node_merge,                             16)     \
+       x(btree_node_split,                             17)     \
+       x(btree_node_rewrite,                           18)     \
+       x(btree_node_alloc,                             19)     \
+       x(btree_node_free,                              20)     \
+       x(btree_node_set_root,                          21)     \
+       x(btree_path_relock_fail,                       22)     \
+       x(btree_path_upgrade_fail,                      23)     \
+       x(btree_reserve_get_fail,                       24)     \
+       x(journal_entry_full,                           25)     \
+       x(journal_full,                                 26)     \
+       x(journal_reclaim_finish,                       27)     \
+       x(journal_reclaim_start,                        28)     \
+       x(journal_write,                                29)     \
+       x(read_promote,                                 30)     \
+       x(read_bounce,                                  31)     \
+       x(read_split,                                   33)     \
+       x(read_retry,                                   32)     \
+       x(read_reuse_race,                              34)     \
+       x(move_extent_read,                             35)     \
+       x(move_extent_write,                            36)     \
+       x(move_extent_finish,                           37)     \
+       x(move_extent_fail,                             38)     \
+       x(move_extent_start_fail,                       39)     \
+       x(copygc,                                       40)     \
+       x(copygc_wait,                                  41)     \
+       x(gc_gens_end,                                  42)     \
+       x(gc_gens_start,                                43)     \
+       x(trans_blocked_journal_reclaim,                44)     \
+       x(trans_restart_btree_node_reused,              45)     \
+       x(trans_restart_btree_node_split,               46)     \
+       x(trans_restart_fault_inject,                   47)     \
+       x(trans_restart_iter_upgrade,                   48)     \
+       x(trans_restart_journal_preres_get,             49)     \
+       x(trans_restart_journal_reclaim,                50)     \
+       x(trans_restart_journal_res_get,                51)     \
+       x(trans_restart_key_cache_key_realloced,        52)     \
+       x(trans_restart_key_cache_raced,                53)     \
+       x(trans_restart_mark_replicas,                  54)     \
+       x(trans_restart_mem_realloced,                  55)     \
+       x(trans_restart_memory_allocation_failure,      56)     \
+       x(trans_restart_relock,                         57)     \
+       x(trans_restart_relock_after_fill,              58)     \
+       x(trans_restart_relock_key_cache_fill,          59)     \
+       x(trans_restart_relock_next_node,               60)     \
+       x(trans_restart_relock_parent_for_fill,         61)     \
+       x(trans_restart_relock_path,                    62)     \
+       x(trans_restart_relock_path_intent,             63)     \
+       x(trans_restart_too_many_iters,                 64)     \
+       x(trans_restart_traverse,                       65)     \
+       x(trans_restart_upgrade,                        66)     \
+       x(trans_restart_would_deadlock,                 67)     \
+       x(trans_restart_would_deadlock_write,           68)     \
+       x(trans_restart_injected,                       69)     \
+       x(trans_restart_key_cache_upgrade,              70)     \
+       x(trans_traverse_all,                           71)     \
+       x(transaction_commit,                           72)     \
+       x(write_super,                                  73)     \
+       x(trans_restart_would_deadlock_recursion_limit, 74)     \
+       x(trans_restart_write_buffer_flush,             75)     \
+       x(trans_restart_split_race,                     76)     \
+       x(write_buffer_flush_slowpath,                  77)     \
+       x(write_buffer_flush_sync,                      78)
+
+enum bch_persistent_counters {
+#define x(t, n, ...) BCH_COUNTER_##t,
+       BCH_PERSISTENT_COUNTERS()
+#undef x
+       BCH_COUNTER_NR
+};
+
+struct bch_sb_field_counters {
+       struct bch_sb_field     field;
+       __le64                  d[];
+};
+
+/*
+ * On clean shutdown, store btree roots and current journal sequence number in
+ * the superblock:
+ */
+struct jset_entry {
+       __le16                  u64s;
+       __u8                    btree_id;
+       __u8                    level;
+       __u8                    type; /* designates what this jset holds */
+       __u8                    pad[3];
+
+       struct bkey_i           start[0];
+       __u64                   _data[];
+};
+
+struct bch_sb_field_clean {
+       struct bch_sb_field     field;
+
+       __le32                  flags;
+       __le16                  _read_clock; /* no longer used */
+       __le16                  _write_clock;
+       __le64                  journal_seq;
+
+       struct jset_entry       start[0];
+       __u64                   _data[];
+};
+
+struct journal_seq_blacklist_entry {
+       __le64                  start;
+       __le64                  end;
+};
+
+struct bch_sb_field_journal_seq_blacklist {
+       struct bch_sb_field     field;
+       struct journal_seq_blacklist_entry start[];
+};
+
+struct bch_sb_field_errors {
+       struct bch_sb_field     field;
+       struct bch_sb_field_error_entry {
+               __le64          v;
+               __le64          last_error_time;
+       }                       entries[];
+};
+
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID,    struct bch_sb_field_error_entry, v,  0, 16);
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR,    struct bch_sb_field_error_entry, v, 16, 64);
+
+struct bch_sb_field_ext {
+       struct bch_sb_field     field;
+       __le64                  recovery_passes_required[2];
+       __le64                  errors_silent[8];
+};
+
+struct bch_sb_field_downgrade_entry {
+       __le16                  version;
+       __le64                  recovery_passes[2];
+       __le16                  nr_errors;
+       __le16                  errors[] __counted_by(nr_errors);
+} __packed __aligned(2);
+
+struct bch_sb_field_downgrade {
+       struct bch_sb_field     field;
+       struct bch_sb_field_downgrade_entry entries[];
+};
+
+/* Superblock: */
+
+/*
+ * New versioning scheme:
+ * One common version number for all on disk data structures - superblock, btree
+ * nodes, journal entries
+ */
+#define BCH_VERSION_MAJOR(_v)          ((__u16) ((_v) >> 10))
+#define BCH_VERSION_MINOR(_v)          ((__u16) ((_v) & ~(~0U << 10)))
+#define BCH_VERSION(_major, _minor)    (((_major) << 10)|(_minor) << 0)
+
+/*
+ * field 1:            version name
+ * field 2:            BCH_VERSION(major, minor)
+ * field 3:            recovery passess required on upgrade
+ */
+#define BCH_METADATA_VERSIONS()                                                \
+       x(bkey_renumber,                BCH_VERSION(0, 10))             \
+       x(inode_btree_change,           BCH_VERSION(0, 11))             \
+       x(snapshot,                     BCH_VERSION(0, 12))             \
+       x(inode_backpointers,           BCH_VERSION(0, 13))             \
+       x(btree_ptr_sectors_written,    BCH_VERSION(0, 14))             \
+       x(snapshot_2,                   BCH_VERSION(0, 15))             \
+       x(reflink_p_fix,                BCH_VERSION(0, 16))             \
+       x(subvol_dirent,                BCH_VERSION(0, 17))             \
+       x(inode_v2,                     BCH_VERSION(0, 18))             \
+       x(freespace,                    BCH_VERSION(0, 19))             \
+       x(alloc_v4,                     BCH_VERSION(0, 20))             \
+       x(new_data_types,               BCH_VERSION(0, 21))             \
+       x(backpointers,                 BCH_VERSION(0, 22))             \
+       x(inode_v3,                     BCH_VERSION(0, 23))             \
+       x(unwritten_extents,            BCH_VERSION(0, 24))             \
+       x(bucket_gens,                  BCH_VERSION(0, 25))             \
+       x(lru_v2,                       BCH_VERSION(0, 26))             \
+       x(fragmentation_lru,            BCH_VERSION(0, 27))             \
+       x(no_bps_in_alloc_keys,         BCH_VERSION(0, 28))             \
+       x(snapshot_trees,               BCH_VERSION(0, 29))             \
+       x(major_minor,                  BCH_VERSION(1,  0))             \
+       x(snapshot_skiplists,           BCH_VERSION(1,  1))             \
+       x(deleted_inodes,               BCH_VERSION(1,  2))             \
+       x(rebalance_work,               BCH_VERSION(1,  3))             \
+       x(member_seq,                   BCH_VERSION(1,  4))
+
+enum bcachefs_metadata_version {
+       bcachefs_metadata_version_min = 9,
+#define x(t, n)        bcachefs_metadata_version_##t = n,
+       BCH_METADATA_VERSIONS()
+#undef x
+       bcachefs_metadata_version_max
+};
+
+static const __maybe_unused
+unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
+
+#define bcachefs_metadata_version_current      (bcachefs_metadata_version_max - 1)
+
+#define BCH_SB_SECTOR                  8
+#define BCH_SB_MEMBERS_MAX             64 /* XXX kill */
+
+struct bch_sb_layout {
+       __uuid_t                magic;  /* bcachefs superblock UUID */
+       __u8                    layout_type;
+       __u8                    sb_max_size_bits; /* base 2 of 512 byte sectors */
+       __u8                    nr_superblocks;
+       __u8                    pad[5];
+       __le64                  sb_offset[61];
+} __packed __aligned(8);
+
+#define BCH_SB_LAYOUT_SECTOR   7
+
+/*
+ * @offset     - sector where this sb was written
+ * @version    - on disk format version
+ * @version_min        - Oldest metadata version this filesystem contains; so we can
+ *               safely drop compatibility code and refuse to mount filesystems
+ *               we'd need it for
+ * @magic      - identifies as a bcachefs superblock (BCHFS_MAGIC)
+ * @seq                - incremented each time superblock is written
+ * @uuid       - used for generating various magic numbers and identifying
+ *                member devices, never changes
+ * @user_uuid  - user visible UUID, may be changed
+ * @label      - filesystem label
+ * @seq                - identifies most recent superblock, incremented each time
+ *               superblock is written
+ * @features   - enabled incompatible features
+ */
+struct bch_sb {
+       struct bch_csum         csum;
+       __le16                  version;
+       __le16                  version_min;
+       __le16                  pad[2];
+       __uuid_t                magic;
+       __uuid_t                uuid;
+       __uuid_t                user_uuid;
+       __u8                    label[BCH_SB_LABEL_SIZE];
+       __le64                  offset;
+       __le64                  seq;
+
+       __le16                  block_size;
+       __u8                    dev_idx;
+       __u8                    nr_devices;
+       __le32                  u64s;
+
+       __le64                  time_base_lo;
+       __le32                  time_base_hi;
+       __le32                  time_precision;
+
+       __le64                  flags[7];
+       __le64                  write_time;
+       __le64                  features[2];
+       __le64                  compat[2];
+
+       struct bch_sb_layout    layout;
+
+       struct bch_sb_field     start[0];
+       __le64                  _data[];
+} __packed __aligned(8);
+
+/*
+ * Flags:
+ * BCH_SB_INITALIZED   - set on first mount
+ * BCH_SB_CLEAN                - did we shut down cleanly? Just a hint, doesn't affect
+ *                       behaviour of mount/recovery path:
+ * BCH_SB_INODE_32BIT  - limit inode numbers to 32 bits
+ * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
+ * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
+ *                        DATA/META_CSUM_TYPE. Also indicates encryption
+ *                        algorithm in use, if/when we get more than one
+ */
+
+LE16_BITMASK(BCH_SB_BLOCK_SIZE,                struct bch_sb, block_size, 0, 16);
+
+LE64_BITMASK(BCH_SB_INITIALIZED,       struct bch_sb, flags[0],  0,  1);
+LE64_BITMASK(BCH_SB_CLEAN,             struct bch_sb, flags[0],  1,  2);
+LE64_BITMASK(BCH_SB_CSUM_TYPE,         struct bch_sb, flags[0],  2,  8);
+LE64_BITMASK(BCH_SB_ERROR_ACTION,      struct bch_sb, flags[0],  8, 12);
+
+LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE,   struct bch_sb, flags[0], 12, 28);
+
+LE64_BITMASK(BCH_SB_GC_RESERVE,                struct bch_sb, flags[0], 28, 33);
+LE64_BITMASK(BCH_SB_ROOT_RESERVE,      struct bch_sb, flags[0], 33, 40);
+
+LE64_BITMASK(BCH_SB_META_CSUM_TYPE,    struct bch_sb, flags[0], 40, 44);
+LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE,    struct bch_sb, flags[0], 44, 48);
+
+LE64_BITMASK(BCH_SB_META_REPLICAS_WANT,        struct bch_sb, flags[0], 48, 52);
+LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT,        struct bch_sb, flags[0], 52, 56);
+
+LE64_BITMASK(BCH_SB_POSIX_ACL,         struct bch_sb, flags[0], 56, 57);
+LE64_BITMASK(BCH_SB_USRQUOTA,          struct bch_sb, flags[0], 57, 58);
+LE64_BITMASK(BCH_SB_GRPQUOTA,          struct bch_sb, flags[0], 58, 59);
+LE64_BITMASK(BCH_SB_PRJQUOTA,          struct bch_sb, flags[0], 59, 60);
+
+LE64_BITMASK(BCH_SB_HAS_ERRORS,                struct bch_sb, flags[0], 60, 61);
+LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
+
+LE64_BITMASK(BCH_SB_BIG_ENDIAN,                struct bch_sb, flags[0], 62, 63);
+
+LE64_BITMASK(BCH_SB_STR_HASH_TYPE,     struct bch_sb, flags[1],  0,  4);
+LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1],  4,  8);
+LE64_BITMASK(BCH_SB_INODE_32BIT,       struct bch_sb, flags[1],  8,  9);
+
+LE64_BITMASK(BCH_SB_128_BIT_MACS,      struct bch_sb, flags[1],  9, 10);
+LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE,   struct bch_sb, flags[1], 10, 14);
+
+/*
+ * Max size of an extent that may require bouncing to read or write
+ * (checksummed, compressed): 64k
+ */
+LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
+                                       struct bch_sb, flags[1], 14, 20);
+
+LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
+LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
+
+LE64_BITMASK(BCH_SB_PROMOTE_TARGET,    struct bch_sb, flags[1], 28, 40);
+LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
+LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
+
+LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
+                                       struct bch_sb, flags[2],  0,  4);
+LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES,  struct bch_sb, flags[2],  4, 64);
+
+LE64_BITMASK(BCH_SB_ERASURE_CODE,      struct bch_sb, flags[3],  0, 16);
+LE64_BITMASK(BCH_SB_METADATA_TARGET,   struct bch_sb, flags[3], 16, 28);
+LE64_BITMASK(BCH_SB_SHARD_INUMS,       struct bch_sb, flags[3], 28, 29);
+LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
+LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
+LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
+LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
+LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
+LE64_BITMASK(BCH_SB_NOCOW,             struct bch_sb, flags[4], 33, 34);
+LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54);
+LE64_BITMASK(BCH_SB_VERSION_UPGRADE,   struct bch_sb, flags[4], 54, 56);
+
+LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
+LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
+                                       struct bch_sb, flags[4], 60, 64);
+
+LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
+                                       struct bch_sb, flags[5],  0, 16);
+
+static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
+{
+       return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
+}
+
+static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
+{
+       SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
+       SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
+}
+
+static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
+{
+       return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
+               (BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
+}
+
+static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
+{
+       SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
+       SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
+}
+
+/*
+ * Features:
+ *
+ * journal_seq_blacklist_v3:   gates BCH_SB_FIELD_journal_seq_blacklist
+ * reflink:                    gates KEY_TYPE_reflink
+ * inline_data:                        gates KEY_TYPE_inline_data
+ * new_siphash:                        gates BCH_STR_HASH_siphash
+ * new_extent_overwrite:       gates BTREE_NODE_NEW_EXTENT_OVERWRITE
+ */
+#define BCH_SB_FEATURES()                      \
+       x(lz4,                          0)      \
+       x(gzip,                         1)      \
+       x(zstd,                         2)      \
+       x(atomic_nlink,                 3)      \
+       x(ec,                           4)      \
+       x(journal_seq_blacklist_v3,     5)      \
+       x(reflink,                      6)      \
+       x(new_siphash,                  7)      \
+       x(inline_data,                  8)      \
+       x(new_extent_overwrite,         9)      \
+       x(incompressible,               10)     \
+       x(btree_ptr_v2,                 11)     \
+       x(extents_above_btree_updates,  12)     \
+       x(btree_updates_journalled,     13)     \
+       x(reflink_inline_data,          14)     \
+       x(new_varint,                   15)     \
+       x(journal_no_flush,             16)     \
+       x(alloc_v2,                     17)     \
+       x(extents_across_btree_nodes,   18)
+
+#define BCH_SB_FEATURES_ALWAYS                         \
+       ((1ULL << BCH_FEATURE_new_extent_overwrite)|    \
+        (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
+        (1ULL << BCH_FEATURE_btree_updates_journalled)|\
+        (1ULL << BCH_FEATURE_alloc_v2)|\
+        (1ULL << BCH_FEATURE_extents_across_btree_nodes))
+
+#define BCH_SB_FEATURES_ALL                            \
+       (BCH_SB_FEATURES_ALWAYS|                        \
+        (1ULL << BCH_FEATURE_new_siphash)|             \
+        (1ULL << BCH_FEATURE_btree_ptr_v2)|            \
+        (1ULL << BCH_FEATURE_new_varint)|              \
+        (1ULL << BCH_FEATURE_journal_no_flush))
+
+enum bch_sb_feature {
+#define x(f, n) BCH_FEATURE_##f,
+       BCH_SB_FEATURES()
+#undef x
+       BCH_FEATURE_NR,
+};
+
+#define BCH_SB_COMPAT()                                        \
+       x(alloc_info,                           0)      \
+       x(alloc_metadata,                       1)      \
+       x(extents_above_btree_updates_done,     2)      \
+       x(bformat_overflow_done,                3)
+
+enum bch_sb_compat {
+#define x(f, n) BCH_COMPAT_##f,
+       BCH_SB_COMPAT()
+#undef x
+       BCH_COMPAT_NR,
+};
+
+/* options: */
+
+#define BCH_VERSION_UPGRADE_OPTS()     \
+       x(compatible,           0)      \
+       x(incompatible,         1)      \
+       x(none,                 2)
+
+enum bch_version_upgrade_opts {
+#define x(t, n) BCH_VERSION_UPGRADE_##t = n,
+       BCH_VERSION_UPGRADE_OPTS()
+#undef x
+};
+
+#define BCH_REPLICAS_MAX               4U
+
+#define BCH_BKEY_PTRS_MAX              16U
+
+#define BCH_ERROR_ACTIONS()            \
+       x(continue,             0)      \
+       x(ro,                   1)      \
+       x(panic,                2)
+
+enum bch_error_actions {
+#define x(t, n) BCH_ON_ERROR_##t = n,
+       BCH_ERROR_ACTIONS()
+#undef x
+       BCH_ON_ERROR_NR
+};
+
+#define BCH_STR_HASH_TYPES()           \
+       x(crc32c,               0)      \
+       x(crc64,                1)      \
+       x(siphash_old,          2)      \
+       x(siphash,              3)
+
+enum bch_str_hash_type {
+#define x(t, n) BCH_STR_HASH_##t = n,
+       BCH_STR_HASH_TYPES()
+#undef x
+       BCH_STR_HASH_NR
+};
+
+#define BCH_STR_HASH_OPTS()            \
+       x(crc32c,               0)      \
+       x(crc64,                1)      \
+       x(siphash,              2)
+
+enum bch_str_hash_opts {
+#define x(t, n) BCH_STR_HASH_OPT_##t = n,
+       BCH_STR_HASH_OPTS()
+#undef x
+       BCH_STR_HASH_OPT_NR
+};
+
+#define BCH_CSUM_TYPES()                       \
+       x(none,                         0)      \
+       x(crc32c_nonzero,               1)      \
+       x(crc64_nonzero,                2)      \
+       x(chacha20_poly1305_80,         3)      \
+       x(chacha20_poly1305_128,        4)      \
+       x(crc32c,                       5)      \
+       x(crc64,                        6)      \
+       x(xxhash,                       7)
+
+enum bch_csum_type {
+#define x(t, n) BCH_CSUM_##t = n,
+       BCH_CSUM_TYPES()
+#undef x
+       BCH_CSUM_NR
+};
+
+static const __maybe_unused unsigned bch_crc_bytes[] = {
+       [BCH_CSUM_none]                         = 0,
+       [BCH_CSUM_crc32c_nonzero]               = 4,
+       [BCH_CSUM_crc32c]                       = 4,
+       [BCH_CSUM_crc64_nonzero]                = 8,
+       [BCH_CSUM_crc64]                        = 8,
+       [BCH_CSUM_xxhash]                       = 8,
+       [BCH_CSUM_chacha20_poly1305_80]         = 10,
+       [BCH_CSUM_chacha20_poly1305_128]        = 16,
+};
+
+static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
+{
+       switch (type) {
+       case BCH_CSUM_chacha20_poly1305_80:
+       case BCH_CSUM_chacha20_poly1305_128:
+               return true;
+       default:
+               return false;
+       }
+}
+
+#define BCH_CSUM_OPTS()                        \
+       x(none,                 0)      \
+       x(crc32c,               1)      \
+       x(crc64,                2)      \
+       x(xxhash,               3)
+
+enum bch_csum_opts {
+#define x(t, n) BCH_CSUM_OPT_##t = n,
+       BCH_CSUM_OPTS()
+#undef x
+       BCH_CSUM_OPT_NR
+};
+
+#define BCH_COMPRESSION_TYPES()                \
+       x(none,                 0)      \
+       x(lz4_old,              1)      \
+       x(gzip,                 2)      \
+       x(lz4,                  3)      \
+       x(zstd,                 4)      \
+       x(incompressible,       5)
+
+enum bch_compression_type {
+#define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
+       BCH_COMPRESSION_TYPES()
+#undef x
+       BCH_COMPRESSION_TYPE_NR
+};
+
+#define BCH_COMPRESSION_OPTS()         \
+       x(none,         0)              \
+       x(lz4,          1)              \
+       x(gzip,         2)              \
+       x(zstd,         3)
+
+enum bch_compression_opts {
+#define x(t, n) BCH_COMPRESSION_OPT_##t = n,
+       BCH_COMPRESSION_OPTS()
+#undef x
+       BCH_COMPRESSION_OPT_NR
+};
+
+/*
+ * Magic numbers
+ *
+ * The various other data structures have their own magic numbers, which are
+ * xored with the first part of the cache set's UUID
+ */
+
+#define BCACHE_MAGIC                                                   \
+       UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca,                           \
+                 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
+#define BCHFS_MAGIC                                                    \
+       UUID_INIT(0xc68573f6, 0x66ce, 0x90a9,                           \
+                 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
+
+#define BCACHEFS_STATFS_MAGIC          0xca451a4e
+
+#define JSET_MAGIC             __cpu_to_le64(0x245235c1a3625032ULL)
+#define BSET_MAGIC             __cpu_to_le64(0x90135c78b99e07f5ULL)
+
+static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
+{
+       __le64 ret;
+
+       memcpy(&ret, &sb->uuid, sizeof(ret));
+       return ret;
+}
+
+static inline __u64 __jset_magic(struct bch_sb *sb)
+{
+       return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
+}
+
+static inline __u64 __bset_magic(struct bch_sb *sb)
+{
+       return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
+}
+
+/* Journal */
+
+#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
+
+#define BCH_JSET_ENTRY_TYPES()                 \
+       x(btree_keys,           0)              \
+       x(btree_root,           1)              \
+       x(prio_ptrs,            2)              \
+       x(blacklist,            3)              \
+       x(blacklist_v2,         4)              \
+       x(usage,                5)              \
+       x(data_usage,           6)              \
+       x(clock,                7)              \
+       x(dev_usage,            8)              \
+       x(log,                  9)              \
+       x(overwrite,            10)             \
+       x(write_buffer_keys,    11)
+
+enum {
+#define x(f, nr)       BCH_JSET_ENTRY_##f      = nr,
+       BCH_JSET_ENTRY_TYPES()
+#undef x
+       BCH_JSET_ENTRY_NR
+};
+
+static inline bool jset_entry_is_key(struct jset_entry *e)
+{
+       switch (e->type) {
+       case BCH_JSET_ENTRY_btree_keys:
+       case BCH_JSET_ENTRY_btree_root:
+       case BCH_JSET_ENTRY_overwrite:
+       case BCH_JSET_ENTRY_write_buffer_keys:
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * Journal sequence numbers can be blacklisted: bsets record the max sequence
+ * number of all the journal entries they contain updates for, so that on
+ * recovery we can ignore those bsets that contain index updates newer that what
+ * made it into the journal.
+ *
+ * This means that we can't reuse that journal_seq - we have to skip it, and
+ * then record that we skipped it so that the next time we crash and recover we
+ * don't think there was a missing journal entry.
+ */
+struct jset_entry_blacklist {
+       struct jset_entry       entry;
+       __le64                  seq;
+};
+
+struct jset_entry_blacklist_v2 {
+       struct jset_entry       entry;
+       __le64                  start;
+       __le64                  end;
+};
+
+#define BCH_FS_USAGE_TYPES()                   \
+       x(reserved,             0)              \
+       x(inodes,               1)              \
+       x(key_version,          2)
+
+enum {
+#define x(f, nr)       BCH_FS_USAGE_##f        = nr,
+       BCH_FS_USAGE_TYPES()
+#undef x
+       BCH_FS_USAGE_NR
+};
+
+struct jset_entry_usage {
+       struct jset_entry       entry;
+       __le64                  v;
+} __packed;
+
+struct jset_entry_data_usage {
+       struct jset_entry       entry;
+       __le64                  v;
+       struct bch_replicas_entry_v1 r;
+} __packed;
+
+struct jset_entry_clock {
+       struct jset_entry       entry;
+       __u8                    rw;
+       __u8                    pad[7];
+       __le64                  time;
+} __packed;
+
+struct jset_entry_dev_usage_type {
+       __le64                  buckets;
+       __le64                  sectors;
+       __le64                  fragmented;
+} __packed;
+
+struct jset_entry_dev_usage {
+       struct jset_entry       entry;
+       __le32                  dev;
+       __u32                   pad;
+
+       __le64                  _buckets_ec;            /* No longer used */
+       __le64                  _buckets_unavailable;   /* No longer used */
+
+       struct jset_entry_dev_usage_type d[];
+};
+
+static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
+{
+       return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
+               sizeof(struct jset_entry_dev_usage_type);
+}
+
+struct jset_entry_log {
+       struct jset_entry       entry;
+       u8                      d[];
+} __packed __aligned(8);
+
+/*
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+struct jset {
+       struct bch_csum         csum;
+
+       __le64                  magic;
+       __le64                  seq;
+       __le32                  version;
+       __le32                  flags;
+
+       __le32                  u64s; /* size of d[] in u64s */
+
+       __u8                    encrypted_start[0];
+
+       __le16                  _read_clock; /* no longer used */
+       __le16                  _write_clock;
+
+       /* Sequence number of oldest dirty journal entry */
+       __le64                  last_seq;
+
+
+       struct jset_entry       start[0];
+       __u64                   _data[];
+} __packed __aligned(8);
+
+LE32_BITMASK(JSET_CSUM_TYPE,   struct jset, flags, 0, 4);
+LE32_BITMASK(JSET_BIG_ENDIAN,  struct jset, flags, 4, 5);
+LE32_BITMASK(JSET_NO_FLUSH,    struct jset, flags, 5, 6);
+
+#define BCH_JOURNAL_BUCKETS_MIN                8
+
+/* Btree: */
+
+enum btree_id_flags {
+       BTREE_ID_EXTENTS        = BIT(0),
+       BTREE_ID_SNAPSHOTS      = BIT(1),
+       BTREE_ID_SNAPSHOT_FIELD = BIT(2),
+       BTREE_ID_DATA           = BIT(3),
+};
+
+#define BCH_BTREE_IDS()                                                                \
+       x(extents,              0,      BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
+         BIT_ULL(KEY_TYPE_whiteout)|                                           \
+         BIT_ULL(KEY_TYPE_error)|                                              \
+         BIT_ULL(KEY_TYPE_cookie)|                                             \
+         BIT_ULL(KEY_TYPE_extent)|                                             \
+         BIT_ULL(KEY_TYPE_reservation)|                                        \
+         BIT_ULL(KEY_TYPE_reflink_p)|                                          \
+         BIT_ULL(KEY_TYPE_inline_data))                                        \
+       x(inodes,               1,      BTREE_ID_SNAPSHOTS,                     \
+         BIT_ULL(KEY_TYPE_whiteout)|                                           \
+         BIT_ULL(KEY_TYPE_inode)|                                              \
+         BIT_ULL(KEY_TYPE_inode_v2)|                                           \
+         BIT_ULL(KEY_TYPE_inode_v3)|                                           \
+         BIT_ULL(KEY_TYPE_inode_generation))                                   \
+       x(dirents,              2,      BTREE_ID_SNAPSHOTS,                     \
+         BIT_ULL(KEY_TYPE_whiteout)|                                           \
+         BIT_ULL(KEY_TYPE_hash_whiteout)|                                      \
+         BIT_ULL(KEY_TYPE_dirent))                                             \
+       x(xattrs,               3,      BTREE_ID_SNAPSHOTS,                     \
+         BIT_ULL(KEY_TYPE_whiteout)|                                           \
+         BIT_ULL(KEY_TYPE_cookie)|                                             \
+         BIT_ULL(KEY_TYPE_hash_whiteout)|                                      \
+         BIT_ULL(KEY_TYPE_xattr))                                              \
+       x(alloc,                4,      0,                                      \
+         BIT_ULL(KEY_TYPE_alloc)|                                              \
+         BIT_ULL(KEY_TYPE_alloc_v2)|                                           \
+         BIT_ULL(KEY_TYPE_alloc_v3)|                                           \
+         BIT_ULL(KEY_TYPE_alloc_v4))                                           \
+       x(quotas,               5,      0,                                      \
+         BIT_ULL(KEY_TYPE_quota))                                              \
+       x(stripes,              6,      0,                                      \
+         BIT_ULL(KEY_TYPE_stripe))                                             \
+       x(reflink,              7,      BTREE_ID_EXTENTS|BTREE_ID_DATA,         \
+         BIT_ULL(KEY_TYPE_reflink_v)|                                          \
+         BIT_ULL(KEY_TYPE_indirect_inline_data))                               \
+       x(subvolumes,           8,      0,                                      \
+         BIT_ULL(KEY_TYPE_subvolume))                                          \
+       x(snapshots,            9,      0,                                      \
+         BIT_ULL(KEY_TYPE_snapshot))                                           \
+       x(lru,                  10,     0,                                      \
+         BIT_ULL(KEY_TYPE_set))                                                \
+       x(freespace,            11,     BTREE_ID_EXTENTS,                       \
+         BIT_ULL(KEY_TYPE_set))                                                \
+       x(need_discard,         12,     0,                                      \
+         BIT_ULL(KEY_TYPE_set))                                                \
+       x(backpointers,         13,     0,                                      \
+         BIT_ULL(KEY_TYPE_backpointer))                                        \
+       x(bucket_gens,          14,     0,                                      \
+         BIT_ULL(KEY_TYPE_bucket_gens))                                        \
+       x(snapshot_trees,       15,     0,                                      \
+         BIT_ULL(KEY_TYPE_snapshot_tree))                                      \
+       x(deleted_inodes,       16,     BTREE_ID_SNAPSHOT_FIELD,                \
+         BIT_ULL(KEY_TYPE_set))                                                \
+       x(logged_ops,           17,     0,                                      \
+         BIT_ULL(KEY_TYPE_logged_op_truncate)|                                 \
+         BIT_ULL(KEY_TYPE_logged_op_finsert))                                  \
+       x(rebalance_work,       18,     BTREE_ID_SNAPSHOT_FIELD,                \
+         BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))
+
+enum btree_id {
+#define x(name, nr, ...) BTREE_ID_##name = nr,
+       BCH_BTREE_IDS()
+#undef x
+       BTREE_ID_NR
+};
+
+#define BTREE_MAX_DEPTH                4U
+
+/* Btree nodes */
+
+/*
+ * Btree nodes
+ *
+ * On disk a btree node is a list/log of these; within each set the keys are
+ * sorted
+ */
+struct bset {
+       __le64                  seq;
+
+       /*
+        * Highest journal entry this bset contains keys for.
+        * If on recovery we don't see that journal entry, this bset is ignored:
+        * this allows us to preserve the order of all index updates after a
+        * crash, since the journal records a total order of all index updates
+        * and anything that didn't make it to the journal doesn't get used.
+        */
+       __le64                  journal_seq;
+
+       __le32                  flags;
+       __le16                  version;
+       __le16                  u64s; /* count of d[] in u64s */
+
+       struct bkey_packed      start[0];
+       __u64                   _data[];
+} __packed __aligned(8);
+
+LE32_BITMASK(BSET_CSUM_TYPE,   struct bset, flags, 0, 4);
+
+LE32_BITMASK(BSET_BIG_ENDIAN,  struct bset, flags, 4, 5);
+LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
+                               struct bset, flags, 5, 6);
+
+/* Sector offset within the btree node: */
+LE32_BITMASK(BSET_OFFSET,      struct bset, flags, 16, 32);
+
+struct btree_node {
+       struct bch_csum         csum;
+       __le64                  magic;
+
+       /* this flags field is encrypted, unlike bset->flags: */
+       __le64                  flags;
+
+       /* Closed interval: */
+       struct bpos             min_key;
+       struct bpos             max_key;
+       struct bch_extent_ptr   _ptr; /* not used anymore */
+       struct bkey_format      format;
+
+       union {
+       struct bset             keys;
+       struct {
+               __u8            pad[22];
+               __le16          u64s;
+               __u64           _data[0];
+
+       };
+       };
+} __packed __aligned(8);
+
+LE64_BITMASK(BTREE_NODE_ID_LO, struct btree_node, flags,  0,  4);
+LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags,  4,  8);
+LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
+                               struct btree_node, flags,  8,  9);
+LE64_BITMASK(BTREE_NODE_ID_HI, struct btree_node, flags,  9, 25);
+/* 25-32 unused */
+LE64_BITMASK(BTREE_NODE_SEQ,   struct btree_node, flags, 32, 64);
+
+static inline __u64 BTREE_NODE_ID(struct btree_node *n)
+{
+       return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
+}
+
+static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
+{
+       SET_BTREE_NODE_ID_LO(n, v);
+       SET_BTREE_NODE_ID_HI(n, v >> 4);
+}
+
+struct btree_node_entry {
+       struct bch_csum         csum;
+
+       union {
+       struct bset             keys;
+       struct {
+               __u8            pad[22];
+               __le16          u64s;
+               __u64           _data[0];
+       };
+       };
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_FORMAT_H */