]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/bcachefs_format.h
Update bcachefs sources to 18686af684 bcachefs: Inode backpointers
[bcachefs-tools-debian] / libbcachefs / bcachefs_format.h
index 48d14a30e03e7ed81b1c51a00e59f0c576bc3d3c..cb22595161a591a65b0418f0fa204ff9ce203bcb 100644 (file)
@@ -1,12 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _BCACHEFS_FORMAT_H
 #define _BCACHEFS_FORMAT_H
 
 /*
  * bcachefs on disk data structures
+ *
+ * OVERVIEW:
+ *
+ * There are three main types of on disk data structures in bcachefs (this is
+ * reduced from 5 in bcache)
+ *
+ *  - superblock
+ *  - journal
+ *  - btree
+ *
+ * The btree is the primary structure; most metadata exists as keys in the
+ * various btrees. There are only a small number of btrees, they're not
+ * sharded - we have one btree for extents, another for inodes, et cetera.
+ *
+ * SUPERBLOCK:
+ *
+ * The superblock contains the location of the journal, the list of devices in
+ * the filesystem, and in general any metadata we need in order to decide
+ * whether we can start a filesystem or prior to reading the journal/btree
+ * roots.
+ *
+ * The superblock is extensible, and most of the contents of the superblock are
+ * in variable length, type tagged fields; see struct bch_sb_field.
+ *
+ * Backup superblocks do not reside in a fixed location; also, superblocks do
+ * not have a fixed size. To locate backup superblocks we have struct
+ * bch_sb_layout; we store a copy of this inside every superblock, and also
+ * before the first superblock.
+ *
+ * JOURNAL:
+ *
+ * The journal primarily records btree updates in the order they occurred;
+ * journal replay consists of just iterating over all the keys in the open
+ * journal entries and re-inserting them into the btrees.
+ *
+ * The journal also contains entry types for the btree roots, and blacklisted
+ * journal sequence numbers (see journal_seq_blacklist.c).
+ *
+ * BTREE:
+ *
+ * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
+ * 128k-256k) and log structured. We use struct btree_node for writing the first
+ * entry in a given node (offset 0), and struct btree_node_entry for all
+ * subsequent writes.
+ *
+ * After the header, btree node entries contain a list of keys in sorted order.
+ * Values are stored inline with the keys; since values are variable length (and
+ * keys effectively are variable length too, due to packing) we can't do random
+ * access without building up additional in memory tables in the btree node read
+ * path.
+ *
+ * BTREE KEYS (struct bkey):
+ *
+ * The various btrees share a common format for the key - so as to avoid
+ * switching in fastpath lookup/comparison code - but define their own
+ * structures for the key values.
+ *
+ * The size of a key/value pair is stored as a u8 in units of u64s, so the max
+ * size is just under 2k. The common part also contains a type tag for the
+ * value, and a format field indicating whether the key is packed or not (and
+ * also meant to allow adding new key fields in the future, if desired).
+ *
+ * bkeys, when stored within a btree node, may also be packed. In that case, the
+ * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
+ * be generous with field sizes in the common part of the key format (64 bit
+ * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
  */
 
 #include <asm/types.h>
 #include <asm/byteorder.h>
+#include <linux/kernel.h>
 #include <linux/uuid.h>
 
 #define LE_BITMASK(_bits, name, type, field, offset, end)              \
@@ -44,12 +112,19 @@ struct bkey_format {
 /* Btree keys - all units are in sectors */
 
 struct bpos {
-       /* Word order matches machine byte order */
-#if defined(__LITTLE_ENDIAN)
+       /*
+        * Word order matches machine byte order - btree code treats a bpos as a
+        * single large integer, for search/comparison purposes
+        *
+        * Note that wherever a bpos is embedded in another on disk data
+        * structure, it has to be byte swabbed when reading in metadata that
+        * wasn't written in native endian order:
+        */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        __u32           snapshot;
        __u64           offset;
        __u64           inode;
-#elif defined(__BIG_ENDIAN)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
        __u64           inode;
        __u64           offset;         /* Points to end of extent - sectors */
        __u32           snapshot;
@@ -63,19 +138,18 @@ struct bpos {
 #define KEY_SNAPSHOT_MAX               ((__u32)~0U)
 #define KEY_SIZE_MAX                   ((__u32)~0U)
 
-static inline struct bpos POS(__u64 inode, __u64 offset)
+static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
 {
-       struct bpos ret;
-
-       ret.inode       = inode;
-       ret.offset      = offset;
-       ret.snapshot    = 0;
-
-       return ret;
+       return (struct bpos) {
+               .inode          = inode,
+               .offset         = offset,
+               .snapshot       = snapshot,
+       };
 }
 
-#define POS_MIN                                POS(0, 0)
-#define POS_MAX                                POS(KEY_INODE_MAX, KEY_OFFSET_MAX)
+#define POS_MIN                                SPOS(0, 0, 0)
+#define POS_MAX                                SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
+#define POS(_inode, _offset)           SPOS(_inode, _offset, 0)
 
 /* Empty placeholder struct, for container_of() */
 struct bch_val {
@@ -83,10 +157,10 @@ struct bch_val {
 };
 
 struct bversion {
-#if defined(__LITTLE_ENDIAN)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        __u64           lo;
        __u32           hi;
-#elif defined(__BIG_ENDIAN)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
        __u32           hi;
        __u64           lo;
 #endif
@@ -110,13 +184,13 @@ struct bkey {
        /* Type of the value */
        __u8            type;
 
-#if defined(__LITTLE_ENDIAN)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        __u8            pad[1];
 
        struct bversion version;
        __u32           size;           /* extent size, in sectors */
        struct bpos     p;
-#elif defined(__BIG_ENDIAN)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
        struct bpos     p;
        __u32           size;           /* extent size, in sectors */
        struct bversion version;
@@ -160,6 +234,9 @@ struct bkey_packed {
 } __attribute__((packed, aligned(8)));
 
 #define BKEY_U64s                      (sizeof(struct bkey) / sizeof(__u64))
+#define BKEY_U64s_MAX                  U8_MAX
+#define BKEY_VAL_U64s_MAX              (BKEY_U64s_MAX - BKEY_U64s)
+
 #define KEY_PACKED_BITS_START          24
 
 #define KEY_FORMAT_LOCAL_BTREE         0
@@ -226,15 +303,6 @@ static inline void bkey_init(struct bkey *k)
 #define __BKEY_PADDED(key, pad)                                        \
        struct { struct bkey_i key; __u64 key ## _pad[pad]; }
 
-#define BKEY_VAL_TYPE(name, nr)                                                \
-struct bkey_i_##name {                                                 \
-       union {                                                         \
-               struct bkey             k;                              \
-               struct bkey_i           k_i;                            \
-       };                                                              \
-       struct bch_##name               v;                              \
-}
-
 /*
  * - DELETED keys are used internally to mark keys that should be ignored but
  *   override keys in composition order.  Their version number is ignored.
@@ -249,19 +317,59 @@ struct bkey_i_##name {                                                    \
  *   by new writes or cluster-wide GC. Node repair can also overwrite them with
  *   the same or a more recent version number, but not with an older version
  *   number.
+ *
+ * - WHITEOUT: for hash table btrees
 */
-#define KEY_TYPE_DELETED               0
-#define KEY_TYPE_DISCARD               1
-#define KEY_TYPE_ERROR                 2
-#define KEY_TYPE_COOKIE                        3
-#define KEY_TYPE_PERSISTENT_DISCARD    4
-#define KEY_TYPE_GENERIC_NR            128
+#define BCH_BKEY_TYPES()                               \
+       x(deleted,              0)                      \
+       x(discard,              1)                      \
+       x(error,                2)                      \
+       x(cookie,               3)                      \
+       x(hash_whiteout,        4)                      \
+       x(btree_ptr,            5)                      \
+       x(extent,               6)                      \
+       x(reservation,          7)                      \
+       x(inode,                8)                      \
+       x(inode_generation,     9)                      \
+       x(dirent,               10)                     \
+       x(xattr,                11)                     \
+       x(alloc,                12)                     \
+       x(quota,                13)                     \
+       x(stripe,               14)                     \
+       x(reflink_p,            15)                     \
+       x(reflink_v,            16)                     \
+       x(inline_data,          17)                     \
+       x(btree_ptr_v2,         18)                     \
+       x(indirect_inline_data, 19)                     \
+       x(alloc_v2,             20)
+
+enum bch_bkey_type {
+#define x(name, nr) KEY_TYPE_##name    = nr,
+       BCH_BKEY_TYPES()
+#undef x
+       KEY_TYPE_MAX,
+};
+
+struct bch_deleted {
+       struct bch_val          v;
+};
+
+struct bch_discard {
+       struct bch_val          v;
+};
+
+struct bch_error {
+       struct bch_val          v;
+};
 
 struct bch_cookie {
        struct bch_val          v;
        __le64                  cookie;
 };
-BKEY_VAL_TYPE(cookie,          KEY_TYPE_COOKIE);
+
+struct bch_hash_whiteout {
+       struct bch_val          v;
+};
 
 /* Extents */
 
@@ -275,10 +383,10 @@ BKEY_VAL_TYPE(cookie,             KEY_TYPE_COOKIE);
  *
  * If an extent is not checksummed or compressed, when the extent is trimmed we
  * don't have to remember the extent we originally allocated and wrote: we can
- * merely adjust ptr->offset to point to the start of the start of the data that
- * is currently live. The size field in struct bkey records the current (live)
- * size of the extent, and is also used to mean "size of region on disk that we
- * point to" in this case.
+ * merely adjust ptr->offset to point to the start of the data that is currently
+ * live. The size field in struct bkey records the current (live) size of the
+ * extent, and is also used to mean "size of region on disk that we point to" in
+ * this case.
  *
  * Thus an extent that is not checksummed or compressed will consist only of a
  * list of bch_extent_ptrs, with none of the fields in
@@ -342,46 +450,20 @@ struct bch_csum {
        __le64                  hi;
 } __attribute__((packed, aligned(8)));
 
-enum bch_csum_type {
-       BCH_CSUM_NONE                   = 0,
-       BCH_CSUM_CRC32C_NONZERO         = 1,
-       BCH_CSUM_CRC64_NONZERO          = 2,
-       BCH_CSUM_CHACHA20_POLY1305_80   = 3,
-       BCH_CSUM_CHACHA20_POLY1305_128  = 4,
-       BCH_CSUM_CRC32C                 = 5,
-       BCH_CSUM_CRC64                  = 6,
-       BCH_CSUM_NR                     = 7,
-};
-
-static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
-{
-       switch (type) {
-       case BCH_CSUM_CHACHA20_POLY1305_80:
-       case BCH_CSUM_CHACHA20_POLY1305_128:
-               return true;
-       default:
-               return false;
-       }
-}
-
-enum bch_compression_type {
-       BCH_COMPRESSION_NONE            = 0,
-       BCH_COMPRESSION_LZ4_OLD         = 1,
-       BCH_COMPRESSION_GZIP            = 2,
-       BCH_COMPRESSION_LZ4             = 3,
-       BCH_COMPRESSION_ZSTD            = 4,
-       BCH_COMPRESSION_NR              = 5,
-};
+#define BCH_EXTENT_ENTRY_TYPES()               \
+       x(ptr,                  0)              \
+       x(crc32,                1)              \
+       x(crc64,                2)              \
+       x(crc128,               3)              \
+       x(stripe_ptr,           4)
+#define BCH_EXTENT_ENTRY_MAX   5
 
 enum bch_extent_entry_type {
-       BCH_EXTENT_ENTRY_ptr            = 0,
-       BCH_EXTENT_ENTRY_crc32          = 1,
-       BCH_EXTENT_ENTRY_crc64          = 2,
-       BCH_EXTENT_ENTRY_crc128         = 3,
+#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
+       BCH_EXTENT_ENTRY_TYPES()
+#undef x
 };
 
-#define BCH_EXTENT_ENTRY_MAX           4
-
 /* Compressed/uncompressed size are stored biased by 1: */
 struct bch_extent_crc32 {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -446,11 +528,11 @@ struct bch_extent_crc128 {
 #elif defined (__BIG_ENDIAN_BITFIELD)
        __u64                   compression_type:4,
                                csum_type:4,
-                               nonce:14,
+                               nonce:13,
                                offset:13,
                                _uncompressed_size:13,
                                _compressed_size:13,
-                               type:3;
+                               type:4;
 #endif
        struct bch_csum         csum;
 } __attribute__((packed, aligned(8)));
@@ -465,7 +547,7 @@ struct bch_extent_ptr {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
        __u64                   type:1,
                                cached:1,
-                               erasure_coded:1,
+                               unused:1,
                                reservation:1,
                                offset:44, /* 8 petabytes */
                                dev:8,
@@ -475,28 +557,42 @@ struct bch_extent_ptr {
                                dev:8,
                                offset:44,
                                reservation:1,
-                               erasure_coded:1,
+                               unused:1,
                                cached:1,
                                type:1;
 #endif
 } __attribute__((packed, aligned(8)));
 
-struct bch_extent_reservation {
+struct bch_extent_stripe_ptr {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
        __u64                   type:5,
-                               unused:23,
+                               block:8,
+                               redundancy:4,
+                               idx:47;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+       __u64                   idx:47,
+                               redundancy:4,
+                               block:8,
+                               type:5;
+#endif
+};
+
+struct bch_extent_reservation {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u64                   type:6,
+                               unused:22,
                                replicas:4,
                                generation:32;
 #elif defined (__BIG_ENDIAN_BITFIELD)
        __u64                   generation:32,
                                replicas:4,
-                               unused:23,
-                               type:5;
+                               unused:22,
+                               type:6;
 #endif
 };
 
 union bch_extent_entry {
-#if defined(__LITTLE_ENDIAN) ||  __BITS_PER_LONG == 64
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ||  __BITS_PER_LONG == 64
        unsigned long                   type;
 #elif __BITS_PER_LONG == 32
        struct {
@@ -506,27 +602,32 @@ union bch_extent_entry {
 #else
 #error edit for your odd byteorder.
 #endif
-       struct bch_extent_crc32         crc32;
-       struct bch_extent_crc64         crc64;
-       struct bch_extent_crc128        crc128;
-       struct bch_extent_ptr           ptr;
+
+#define x(f, n) struct bch_extent_##f  f;
+       BCH_EXTENT_ENTRY_TYPES()
+#undef x
 };
 
-enum {
-       BCH_EXTENT              = 128,
+struct bch_btree_ptr {
+       struct bch_val          v;
 
-       /*
-        * This is kind of a hack, we're overloading the type for a boolean that
-        * really should be part of the value - BCH_EXTENT and BCH_EXTENT_CACHED
-        * have the same value type:
-        */
-       BCH_EXTENT_CACHED       = 129,
+       struct bch_extent_ptr   start[0];
+       __u64                   _data[0];
+} __attribute__((packed, aligned(8)));
 
-       /*
-        * Persistent reservation:
-        */
-       BCH_RESERVATION         = 130,
-};
+struct bch_btree_ptr_v2 {
+       struct bch_val          v;
+
+       __u64                   mem_ptr;
+       __le64                  seq;
+       __le16                  sectors_written;
+       __le16                  flags;
+       struct bpos             min_key;
+       struct bch_extent_ptr   start[0];
+       __u64                   _data[0];
+} __attribute__((packed, aligned(8)));
+
+LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,  struct bch_btree_ptr_v2, flags, 0, 1);
 
 struct bch_extent {
        struct bch_val          v;
@@ -534,7 +635,6 @@ struct bch_extent {
        union bch_extent_entry  start[0];
        __u64                   _data[0];
 } __attribute__((packed, aligned(8)));
-BKEY_VAL_TYPE(extent,          BCH_EXTENT);
 
 struct bch_reservation {
        struct bch_val          v;
@@ -543,7 +643,6 @@ struct bch_reservation {
        __u8                    nr_replicas;
        __u8                    pad[3];
 } __attribute__((packed, aligned(8)));
-BKEY_VAL_TYPE(reservation,     BCH_RESERVATION);
 
 /* Maximum size (in u64s) a single pointer could be: */
 #define BKEY_EXTENT_PTR_U64s_MAX\
@@ -551,16 +650,16 @@ BKEY_VAL_TYPE(reservation,        BCH_RESERVATION);
          sizeof(struct bch_extent_ptr)) / sizeof(u64))
 
 /* Maximum possible size of an entire extent value: */
-/* There's a hack in the keylist code that needs to be fixed.. */
 #define BKEY_EXTENT_VAL_U64s_MAX                               \
-       (BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
+       (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
 
 /* * Maximum possible size of an entire extent, key + value: */
 #define BKEY_EXTENT_U64s_MAX           (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
 
 /* Btree pointers don't carry around checksums: */
 #define BKEY_BTREE_PTR_VAL_U64s_MAX                            \
-       ((sizeof(struct bch_extent_ptr)) / sizeof(u64) * BCH_REPLICAS_MAX)
+       ((sizeof(struct bch_btree_ptr_v2) +                     \
+         sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(u64))
 #define BKEY_BTREE_PTR_U64s_MAX                                        \
        (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
 
@@ -570,12 +669,6 @@ BKEY_VAL_TYPE(reservation, BCH_RESERVATION);
 
 #define BCACHEFS_ROOT_INO      4096
 
-enum bch_inode_types {
-       BCH_INODE_FS            = 128,
-       BCH_INODE_BLOCKDEV      = 129,
-       BCH_INODE_GENERATION    = 130,
-};
-
 struct bch_inode {
        struct bch_val          v;
 
@@ -584,7 +677,6 @@ struct bch_inode {
        __le16                  bi_mode;
        __u8                    fields[0];
 } __attribute__((packed, aligned(8)));
-BKEY_VAL_TYPE(inode,           BCH_INODE_FS);
 
 struct bch_inode_generation {
        struct bch_val          v;
@@ -592,38 +684,51 @@ struct bch_inode_generation {
        __le32                  bi_generation;
        __le32                  pad;
 } __attribute__((packed, aligned(8)));
-BKEY_VAL_TYPE(inode_generation,        BCH_INODE_GENERATION);
-
-#define BCH_INODE_FIELDS()                                     \
-       BCH_INODE_FIELD(bi_atime,                       64)     \
-       BCH_INODE_FIELD(bi_ctime,                       64)     \
-       BCH_INODE_FIELD(bi_mtime,                       64)     \
-       BCH_INODE_FIELD(bi_otime,                       64)     \
-       BCH_INODE_FIELD(bi_size,                        64)     \
-       BCH_INODE_FIELD(bi_sectors,                     64)     \
-       BCH_INODE_FIELD(bi_uid,                         32)     \
-       BCH_INODE_FIELD(bi_gid,                         32)     \
-       BCH_INODE_FIELD(bi_nlink,                       32)     \
-       BCH_INODE_FIELD(bi_generation,                  32)     \
-       BCH_INODE_FIELD(bi_dev,                         32)     \
-       BCH_INODE_FIELD(bi_data_checksum,               8)      \
-       BCH_INODE_FIELD(bi_compression,                 8)      \
-       BCH_INODE_FIELD(bi_project,                     32)     \
-       BCH_INODE_FIELD(bi_background_compression,      8)      \
-       BCH_INODE_FIELD(bi_data_replicas,               8)      \
-       BCH_INODE_FIELD(bi_promote_target,              16)     \
-       BCH_INODE_FIELD(bi_foreground_target,           16)     \
-       BCH_INODE_FIELD(bi_background_target,           16)
-
-#define BCH_INODE_FIELDS_INHERIT()                             \
-       BCH_INODE_FIELD(bi_data_checksum)                       \
-       BCH_INODE_FIELD(bi_compression)                         \
-       BCH_INODE_FIELD(bi_project)                             \
-       BCH_INODE_FIELD(bi_background_compression)              \
-       BCH_INODE_FIELD(bi_data_replicas)                       \
-       BCH_INODE_FIELD(bi_promote_target)                      \
-       BCH_INODE_FIELD(bi_foreground_target)                   \
-       BCH_INODE_FIELD(bi_background_target)
+
+#define BCH_INODE_FIELDS()                     \
+       x(bi_atime,                     96)     \
+       x(bi_ctime,                     96)     \
+       x(bi_mtime,                     96)     \
+       x(bi_otime,                     96)     \
+       x(bi_size,                      64)     \
+       x(bi_sectors,                   64)     \
+       x(bi_uid,                       32)     \
+       x(bi_gid,                       32)     \
+       x(bi_nlink,                     32)     \
+       x(bi_generation,                32)     \
+       x(bi_dev,                       32)     \
+       x(bi_data_checksum,             8)      \
+       x(bi_compression,               8)      \
+       x(bi_project,                   32)     \
+       x(bi_background_compression,    8)      \
+       x(bi_data_replicas,             8)      \
+       x(bi_promote_target,            16)     \
+       x(bi_foreground_target,         16)     \
+       x(bi_background_target,         16)     \
+       x(bi_erasure_code,              16)     \
+       x(bi_fields_set,                16)     \
+       x(bi_dir,                       64)     \
+       x(bi_dir_offset,                64)
+
+/* subset of BCH_INODE_FIELDS */
+#define BCH_INODE_OPTS()                       \
+       x(data_checksum,                8)      \
+       x(compression,                  8)      \
+       x(project,                      32)     \
+       x(background_compression,       8)      \
+       x(data_replicas,                8)      \
+       x(promote_target,               16)     \
+       x(foreground_target,            16)     \
+       x(background_target,            16)     \
+       x(erasure_code,                 16)
+
+enum inode_opt_id {
+#define x(name, ...)                           \
+       Inode_opt_##name,
+       BCH_INODE_OPTS()
+#undef  x
+       Inode_opt_nr,
+};
 
 enum {
        /*
@@ -638,9 +743,8 @@ enum {
 
        __BCH_INODE_I_SIZE_DIRTY= 5,
        __BCH_INODE_I_SECTORS_DIRTY= 6,
-
-       /* not implemented yet: */
-       __BCH_INODE_HAS_XATTRS  = 7, /* has xattrs in xattr btree */
+       __BCH_INODE_UNLINKED    = 7,
+       __BCH_INODE_BACKPTR_UNTRUSTED = 8,
 
        /* bits 20+ reserved for packed fields below: */
 };
@@ -652,28 +756,12 @@ enum {
 #define BCH_INODE_NOATIME      (1 << __BCH_INODE_NOATIME)
 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
-#define BCH_INODE_HAS_XATTRS   (1 << __BCH_INODE_HAS_XATTRS)
+#define BCH_INODE_UNLINKED     (1 << __BCH_INODE_UNLINKED)
+#define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED)
 
 LE32_BITMASK(INODE_STR_HASH,   struct bch_inode, bi_flags, 20, 24);
-LE32_BITMASK(INODE_NR_FIELDS,  struct bch_inode, bi_flags, 24, 32);
-
-struct bch_inode_blockdev {
-       struct bch_val          v;
-
-       __le64                  i_size;
-       __le64                  i_flags;
-
-       /* Seconds: */
-       __le64                  i_ctime;
-       __le64                  i_mtime;
-
-       uuid_le                 i_uuid;
-       __u8                    i_label[32];
-} __attribute__((packed, aligned(8)));
-BKEY_VAL_TYPE(inode_blockdev,  BCH_INODE_BLOCKDEV);
-
-/* Thin provisioned volume, or cache for another block device? */
-LE64_BITMASK(CACHED_DEV,       struct bch_inode_blockdev, i_flags, 0,  1)
+LE32_BITMASK(INODE_NR_FIELDS,  struct bch_inode, bi_flags, 24, 31);
+LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
 
 /* Dirents */
 
@@ -688,11 +776,6 @@ LE64_BITMASK(CACHED_DEV,   struct bch_inode_blockdev, i_flags, 0,  1)
  * collision:
  */
 
-enum {
-       BCH_DIRENT              = 128,
-       BCH_DIRENT_WHITEOUT     = 129,
-};
-
 struct bch_dirent {
        struct bch_val          v;
 
@@ -707,20 +790,19 @@ struct bch_dirent {
 
        __u8                    d_name[];
 } __attribute__((packed, aligned(8)));
-BKEY_VAL_TYPE(dirent,          BCH_DIRENT);
 
-/* Xattrs */
+#define BCH_NAME_MAX   (U8_MAX * sizeof(u64) -                         \
+                        sizeof(struct bkey) -                          \
+                        offsetof(struct bch_dirent, d_name))
 
-enum {
-       BCH_XATTR               = 128,
-       BCH_XATTR_WHITEOUT      = 129,
-};
 
-#define BCH_XATTR_INDEX_USER                   0
-#define BCH_XATTR_INDEX_POSIX_ACL_ACCESS       1
-#define BCH_XATTR_INDEX_POSIX_ACL_DEFAULT      2
-#define BCH_XATTR_INDEX_TRUSTED                        3
-#define BCH_XATTR_INDEX_SECURITY               4
+/* Xattrs */
+
+#define KEY_TYPE_XATTR_INDEX_USER                      0
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS  1
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
+#define KEY_TYPE_XATTR_INDEX_TRUSTED                   3
+#define KEY_TYPE_XATTR_INDEX_SECURITY          4
 
 struct bch_xattr {
        struct bch_val          v;
@@ -729,33 +811,52 @@ struct bch_xattr {
        __le16                  x_val_len;
        __u8                    x_name[];
 } __attribute__((packed, aligned(8)));
-BKEY_VAL_TYPE(xattr,           BCH_XATTR);
 
 /* Bucket/allocation information: */
 
-enum {
-       BCH_ALLOC               = 128,
-};
-
-enum {
-       BCH_ALLOC_FIELD_READ_TIME       = 0,
-       BCH_ALLOC_FIELD_WRITE_TIME      = 1,
-};
-
 struct bch_alloc {
        struct bch_val          v;
        __u8                    fields;
        __u8                    gen;
        __u8                    data[];
 } __attribute__((packed, aligned(8)));
-BKEY_VAL_TYPE(alloc,   BCH_ALLOC);
 
-/* Quotas: */
+#define BCH_ALLOC_FIELDS_V1()                  \
+       x(read_time,            16)             \
+       x(write_time,           16)             \
+       x(data_type,            8)              \
+       x(dirty_sectors,        16)             \
+       x(cached_sectors,       16)             \
+       x(oldest_gen,           8)              \
+       x(stripe,               32)             \
+       x(stripe_redundancy,    8)
+
+struct bch_alloc_v2 {
+       struct bch_val          v;
+       __u8                    nr_fields;
+       __u8                    gen;
+       __u8                    oldest_gen;
+       __u8                    data_type;
+       __u8                    data[];
+} __attribute__((packed, aligned(8)));
+
+#define BCH_ALLOC_FIELDS_V2()                  \
+       x(read_time,            64)             \
+       x(write_time,           64)             \
+       x(dirty_sectors,        16)             \
+       x(cached_sectors,       16)             \
+       x(stripe,               32)             \
+       x(stripe_redundancy,    8)
 
 enum {
-       BCH_QUOTA               = 128,
+#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
+       BCH_ALLOC_FIELDS_V1()
+#undef x
+       BCH_ALLOC_FIELD_NR
 };
 
+/* Quotas: */
+
 enum quota_types {
        QTYP_USR                = 0,
        QTYP_GRP                = 1,
@@ -778,7 +879,53 @@ struct bch_quota {
        struct bch_val          v;
        struct bch_quota_counter c[Q_COUNTERS];
 } __attribute__((packed, aligned(8)));
-BKEY_VAL_TYPE(quota,   BCH_QUOTA);
+
+/* Erasure coding */
+
+struct bch_stripe {
+       struct bch_val          v;
+       __le16                  sectors;
+       __u8                    algorithm;
+       __u8                    nr_blocks;
+       __u8                    nr_redundant;
+
+       __u8                    csum_granularity_bits;
+       __u8                    csum_type;
+       __u8                    pad;
+
+       struct bch_extent_ptr   ptrs[0];
+} __attribute__((packed, aligned(8)));
+
+/* Reflink: */
+
+struct bch_reflink_p {
+       struct bch_val          v;
+       __le64                  idx;
+
+       __le32                  reservation_generation;
+       __u8                    nr_replicas;
+       __u8                    pad[3];
+};
+
+struct bch_reflink_v {
+       struct bch_val          v;
+       __le64                  refcount;
+       union bch_extent_entry  start[0];
+       __u64                   _data[0];
+};
+
+struct bch_indirect_inline_data {
+       struct bch_val          v;
+       __le64                  refcount;
+       u8                      data[0];
+};
+
+/* Inline data */
+
+struct bch_inline_data {
+       struct bch_val          v;
+       u8                      data[0];
+};
 
 /* Optional/variable size superblock sections: */
 
@@ -792,9 +939,12 @@ struct bch_sb_field {
        x(journal,      0)      \
        x(members,      1)      \
        x(crypt,        2)      \
-       x(replicas,     3)      \
+       x(replicas_v0,  3)      \
        x(quota,        4)      \
-       x(disk_groups,  5)
+       x(disk_groups,  5)      \
+       x(clean,        6)      \
+       x(replicas,     7)      \
+       x(journal_seq_blacklist, 8)
 
 enum bch_sb_field_type {
 #define x(f, nr)       BCH_SB_FIELD_##f = nr,
@@ -812,6 +962,8 @@ struct bch_sb_field_journal {
 
 /* BCH_SB_FIELD_members: */
 
+#define BCH_MIN_NR_NBUCKETS    (1 << 6)
+
 struct bch_member {
        uuid_le                 uuid;
        __le64                  nbuckets;       /* device size */
@@ -838,19 +990,29 @@ LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS,   struct bch_member, flags[1], 0,  20);
 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
 #endif
 
+#define BCH_MEMBER_STATES()                    \
+       x(rw,           0)                      \
+       x(ro,           1)                      \
+       x(failed,       2)                      \
+       x(spare,        3)
+
 enum bch_member_state {
-       BCH_MEMBER_STATE_RW             = 0,
-       BCH_MEMBER_STATE_RO             = 1,
-       BCH_MEMBER_STATE_FAILED         = 2,
-       BCH_MEMBER_STATE_SPARE          = 3,
-       BCH_MEMBER_STATE_NR             = 4,
+#define x(t, n) BCH_MEMBER_STATE_##t = n,
+       BCH_MEMBER_STATES()
+#undef x
+       BCH_MEMBER_STATE_NR
 };
 
-enum cache_replacement {
-       CACHE_REPLACEMENT_LRU           = 0,
-       CACHE_REPLACEMENT_FIFO          = 1,
-       CACHE_REPLACEMENT_RANDOM        = 2,
-       CACHE_REPLACEMENT_NR            = 3,
+#define BCH_CACHE_REPLACEMENT_POLICIES()       \
+       x(lru,          0)                      \
+       x(fifo,         1)                      \
+       x(random,       2)
+
+enum bch_cache_replacement_policies {
+#define x(t, n) BCH_CACHE_REPLACEMENT_##t = n,
+       BCH_CACHE_REPLACEMENT_POLICIES()
+#undef x
+       BCH_CACHE_REPLACEMENT_NR
 };
 
 struct bch_sb_field_members {
@@ -908,26 +1070,47 @@ LE64_BITMASK(BCH_KDF_SCRYPT_P,   struct bch_sb_field_crypt, kdf_flags, 32, 48);
 
 /* BCH_SB_FIELD_replicas: */
 
+#define BCH_DATA_TYPES()               \
+       x(none,         0)              \
+       x(sb,           1)              \
+       x(journal,      2)              \
+       x(btree,        3)              \
+       x(user,         4)              \
+       x(cached,       5)              \
+       x(parity,       6)
+
 enum bch_data_type {
-       BCH_DATA_NONE           = 0,
-       BCH_DATA_SB             = 1,
-       BCH_DATA_JOURNAL        = 2,
-       BCH_DATA_BTREE          = 3,
-       BCH_DATA_USER           = 4,
-       BCH_DATA_CACHED         = 5,
-       BCH_DATA_NR             = 6,
+#define x(t, n) BCH_DATA_##t,
+       BCH_DATA_TYPES()
+#undef x
+       BCH_DATA_NR
 };
 
+struct bch_replicas_entry_v0 {
+       __u8                    data_type;
+       __u8                    nr_devs;
+       __u8                    devs[0];
+} __attribute__((packed));
+
+struct bch_sb_field_replicas_v0 {
+       struct bch_sb_field     field;
+       struct bch_replicas_entry_v0 entries[0];
+} __attribute__((packed, aligned(8)));
+
 struct bch_replicas_entry {
-       u8                      data_type;
-       u8                      nr;
-       u8                      devs[0];
-};
+       __u8                    data_type;
+       __u8                    nr_devs;
+       __u8                    nr_required;
+       __u8                    devs[0];
+} __attribute__((packed));
+
+#define replicas_entry_bytes(_i)                                       \
+       (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
 
 struct bch_sb_field_replicas {
        struct bch_sb_field     field;
        struct bch_replicas_entry entries[0];
-};
+} __attribute__((packed, aligned(8)));
 
 /* BCH_SB_FIELD_quota: */
 
@@ -953,7 +1136,7 @@ struct bch_sb_field_quota {
 struct bch_disk_group {
        __u8                    label[BCH_SB_LABEL_SIZE];
        __le64                  flags[2];
-};
+} __attribute__((packed, aligned(8)));
 
 LE64_BITMASK(BCH_GROUP_DELETED,                struct bch_disk_group, flags[0], 0,  1)
 LE64_BITMASK(BCH_GROUP_DATA_ALLOWED,   struct bch_disk_group, flags[0], 1,  6)
@@ -962,20 +1145,74 @@ LE64_BITMASK(BCH_GROUP_PARENT,           struct bch_disk_group, flags[0], 6, 24)
 struct bch_sb_field_disk_groups {
        struct bch_sb_field     field;
        struct bch_disk_group   entries[0];
+} __attribute__((packed, aligned(8)));
+
+/*
+ * On clean shutdown, store btree roots and current journal sequence number in
+ * the superblock:
+ */
+struct jset_entry {
+       __le16                  u64s;
+       __u8                    btree_id;
+       __u8                    level;
+       __u8                    type; /* designates what this jset holds */
+       __u8                    pad[3];
+
+       union {
+               struct bkey_i   start[0];
+               __u64           _data[0];
+       };
+};
+
+struct bch_sb_field_clean {
+       struct bch_sb_field     field;
+
+       __le32                  flags;
+       __le16                  _read_clock; /* no longer used */
+       __le16                  _write_clock;
+       __le64                  journal_seq;
+
+       union {
+               struct jset_entry start[0];
+               __u64           _data[0];
+       };
+};
+
+struct journal_seq_blacklist_entry {
+       __le64                  start;
+       __le64                  end;
+};
+
+struct bch_sb_field_journal_seq_blacklist {
+       struct bch_sb_field     field;
+
+       union {
+               struct journal_seq_blacklist_entry start[0];
+               __u64           _data[0];
+       };
 };
 
 /* Superblock: */
 
 /*
- * Version 8:  BCH_SB_ENCODED_EXTENT_MAX_BITS
- *             BCH_MEMBER_DATA_ALLOWED
- * Version 9:  incompatible extent nonce change
+ * New versioning scheme:
+ * One common version number for all on disk data structures - superblock, btree
+ * nodes, journal entries
  */
+#define BCH_JSET_VERSION_OLD                   2
+#define BCH_BSET_VERSION_OLD                   3
+
+enum bcachefs_metadata_version {
+       bcachefs_metadata_version_min                   = 9,
+       bcachefs_metadata_version_new_versioning        = 10,
+       bcachefs_metadata_version_bkey_renumber         = 10,
+       bcachefs_metadata_version_inode_btree_change    = 11,
+       bcachefs_metadata_version_snapshot              = 12,
+       bcachefs_metadata_version_inode_backpointers    = 13,
+       bcachefs_metadata_version_max                   = 14,
+};
 
-#define BCH_SB_VERSION_MIN             7
-#define BCH_SB_VERSION_EXTENT_MAX      8
-#define BCH_SB_VERSION_EXTENT_NONCE_V1 9
-#define BCH_SB_VERSION_MAX             9
+#define bcachefs_metadata_version_current      (bcachefs_metadata_version_max - 1)
 
 #define BCH_SB_SECTOR                  8
 #define BCH_SB_MEMBERS_MAX             64 /* XXX kill */
@@ -994,6 +1231,9 @@ struct bch_sb_layout {
 /*
  * @offset     - sector where this sb was written
  * @version    - on disk format version
+ * @version_min        - Oldest metadata version this filesystem contains; so we can
+ *               safely drop compatibility code and refuse to mount filesystems
+ *               we'd need it for
  * @magic      - identifies as a bcachefs superblock (BCACHE_MAGIC)
  * @seq                - incremented each time superblock is written
  * @uuid       - used for generating various magic numbers and identifying
@@ -1006,7 +1246,9 @@ struct bch_sb_layout {
  */
 struct bch_sb {
        struct bch_csum         csum;
-       __le64                  version;
+       __le16                  version;
+       __le16                  version_min;
+       __le16                  pad[2];
        uuid_le                 magic;
        uuid_le                 uuid;
        uuid_le                 user_uuid;
@@ -1070,7 +1312,12 @@ LE64_BITMASK(BCH_SB_USRQUOTA,            struct bch_sb, flags[0], 57, 58);
 LE64_BITMASK(BCH_SB_GRPQUOTA,          struct bch_sb, flags[0], 58, 59);
 LE64_BITMASK(BCH_SB_PRJQUOTA,          struct bch_sb, flags[0], 59, 60);
 
-/* 60-64 unused */
+LE64_BITMASK(BCH_SB_HAS_ERRORS,                struct bch_sb, flags[0], 60, 61);
+
+/* bit 61 was reflink option */
+LE64_BITMASK(BCH_SB_BIG_ENDIAN,                struct bch_sb, flags[0], 62, 63);
+
+/* 61-64 unused */
 
 LE64_BITMASK(BCH_SB_STR_HASH_TYPE,     struct bch_sb, flags[1],  0,  4);
 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE,  struct bch_sb, flags[1],  4,  8);
@@ -1095,48 +1342,175 @@ LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
 
 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE,
                                        struct bch_sb, flags[2],  0,  4);
+LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES,  struct bch_sb, flags[2],  4, 64);
+
+LE64_BITMASK(BCH_SB_ERASURE_CODE,      struct bch_sb, flags[3],  0, 16);
+LE64_BITMASK(BCH_SB_METADATA_TARGET,   struct bch_sb, flags[3], 16, 28);
 
-/* Features: */
-enum bch_sb_features {
-       BCH_FEATURE_LZ4                 = 0,
-       BCH_FEATURE_GZIP                = 1,
-       BCH_FEATURE_ZSTD                = 2,
+/*
+ * Features:
+ *
+ * journal_seq_blacklist_v3:   gates BCH_SB_FIELD_journal_seq_blacklist
+ * reflink:                    gates KEY_TYPE_reflink
+ * inline_data:                        gates KEY_TYPE_inline_data
+ * new_siphash:                        gates BCH_STR_HASH_SIPHASH
+ * new_extent_overwrite:       gates BTREE_NODE_NEW_EXTENT_OVERWRITE
+ */
+#define BCH_SB_FEATURES()                      \
+       x(lz4,                          0)      \
+       x(gzip,                         1)      \
+       x(zstd,                         2)      \
+       x(atomic_nlink,                 3)      \
+       x(ec,                           4)      \
+       x(journal_seq_blacklist_v3,     5)      \
+       x(reflink,                      6)      \
+       x(new_siphash,                  7)      \
+       x(inline_data,                  8)      \
+       x(new_extent_overwrite,         9)      \
+       x(incompressible,               10)     \
+       x(btree_ptr_v2,                 11)     \
+       x(extents_above_btree_updates,  12)     \
+       x(btree_updates_journalled,     13)     \
+       x(reflink_inline_data,          14)     \
+       x(new_varint,                   15)     \
+       x(journal_no_flush,             16)     \
+       x(alloc_v2,                     17)     \
+       x(extents_across_btree_nodes,   18)
+
+#define BCH_SB_FEATURES_ALWAYS                         \
+       ((1ULL << BCH_FEATURE_new_extent_overwrite)|    \
+        (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
+        (1ULL << BCH_FEATURE_btree_updates_journalled)|\
+        (1ULL << BCH_FEATURE_alloc_v2)|\
+        (1ULL << BCH_FEATURE_extents_across_btree_nodes))
+
+#define BCH_SB_FEATURES_ALL                            \
+       (BCH_SB_FEATURES_ALWAYS|                        \
+        (1ULL << BCH_FEATURE_new_siphash)|             \
+        (1ULL << BCH_FEATURE_btree_ptr_v2)|            \
+        (1ULL << BCH_FEATURE_new_varint)|              \
+        (1ULL << BCH_FEATURE_journal_no_flush))
+
+enum bch_sb_feature {
+#define x(f, n) BCH_FEATURE_##f,
+       BCH_SB_FEATURES()
+#undef x
+       BCH_FEATURE_NR,
+};
+
+enum bch_sb_compat {
+       BCH_COMPAT_FEAT_ALLOC_INFO              = 0,
+       BCH_COMPAT_FEAT_ALLOC_METADATA          = 1,
+       BCH_COMPAT_FEAT_EXTENTS_ABOVE_BTREE_UPDATES_DONE = 2,
+       BCH_COMPAT_FEAT_BFORMAT_OVERFLOW_DONE   = 3,
 };
 
 /* options: */
 
 #define BCH_REPLICAS_MAX               4U
 
+#define BCH_BKEY_PTRS_MAX              16U
+
+#define BCH_ERROR_ACTIONS()            \
+       x(continue,             0)      \
+       x(ro,                   1)      \
+       x(panic,                2)
+
 enum bch_error_actions {
-       BCH_ON_ERROR_CONTINUE           = 0,
-       BCH_ON_ERROR_RO                 = 1,
-       BCH_ON_ERROR_PANIC              = 2,
-       BCH_NR_ERROR_ACTIONS            = 3,
+#define x(t, n) BCH_ON_ERROR_##t = n,
+       BCH_ERROR_ACTIONS()
+#undef x
+       BCH_ON_ERROR_NR
 };
 
-enum bch_csum_opts {
-       BCH_CSUM_OPT_NONE               = 0,
-       BCH_CSUM_OPT_CRC32C             = 1,
-       BCH_CSUM_OPT_CRC64              = 2,
-       BCH_CSUM_OPT_NR                 = 3,
+enum bch_str_hash_type {
+       BCH_STR_HASH_CRC32C             = 0,
+       BCH_STR_HASH_CRC64              = 1,
+       BCH_STR_HASH_SIPHASH_OLD        = 2,
+       BCH_STR_HASH_SIPHASH            = 3,
+       BCH_STR_HASH_NR                 = 4,
 };
 
+#define BCH_STR_HASH_OPTS()            \
+       x(crc32c,               0)      \
+       x(crc64,                1)      \
+       x(siphash,              2)
+
 enum bch_str_hash_opts {
-       BCH_STR_HASH_CRC32C             = 0,
-       BCH_STR_HASH_CRC64              = 1,
-       BCH_STR_HASH_SIPHASH            = 2,
-       BCH_STR_HASH_NR                 = 3,
+#define x(t, n) BCH_STR_HASH_OPT_##t = n,
+       BCH_STR_HASH_OPTS()
+#undef x
+       BCH_STR_HASH_OPT_NR
+};
+
+enum bch_csum_type {
+       BCH_CSUM_NONE                   = 0,
+       BCH_CSUM_CRC32C_NONZERO         = 1,
+       BCH_CSUM_CRC64_NONZERO          = 2,
+       BCH_CSUM_CHACHA20_POLY1305_80   = 3,
+       BCH_CSUM_CHACHA20_POLY1305_128  = 4,
+       BCH_CSUM_CRC32C                 = 5,
+       BCH_CSUM_CRC64                  = 6,
+       BCH_CSUM_NR                     = 7,
+};
+
+static const unsigned bch_crc_bytes[] = {
+       [BCH_CSUM_NONE]                         = 0,
+       [BCH_CSUM_CRC32C_NONZERO]               = 4,
+       [BCH_CSUM_CRC32C]                       = 4,
+       [BCH_CSUM_CRC64_NONZERO]                = 8,
+       [BCH_CSUM_CRC64]                        = 8,
+       [BCH_CSUM_CHACHA20_POLY1305_80]         = 10,
+       [BCH_CSUM_CHACHA20_POLY1305_128]        = 16,
+};
+
+static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
+{
+       switch (type) {
+       case BCH_CSUM_CHACHA20_POLY1305_80:
+       case BCH_CSUM_CHACHA20_POLY1305_128:
+               return true;
+       default:
+               return false;
+       }
+}
+
+#define BCH_CSUM_OPTS()                        \
+       x(none,                 0)      \
+       x(crc32c,               1)      \
+       x(crc64,                2)
+
+enum bch_csum_opts {
+#define x(t, n) BCH_CSUM_OPT_##t = n,
+       BCH_CSUM_OPTS()
+#undef x
+       BCH_CSUM_OPT_NR
 };
 
 #define BCH_COMPRESSION_TYPES()                \
-       x(NONE)                         \
-       x(LZ4)                          \
-       x(GZIP)                         \
-       x(ZSTD)
+       x(none,                 0)      \
+       x(lz4_old,              1)      \
+       x(gzip,                 2)      \
+       x(lz4,                  3)      \
+       x(zstd,                 4)      \
+       x(incompressible,       5)
 
-enum bch_compression_opts {
-#define x(t) BCH_COMPRESSION_OPT_##t,
+enum bch_compression_type {
+#define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
        BCH_COMPRESSION_TYPES()
+#undef x
+       BCH_COMPRESSION_TYPE_NR
+};
+
+#define BCH_COMPRESSION_OPTS()         \
+       x(none,         0)              \
+       x(lz4,          1)              \
+       x(gzip,         2)              \
+       x(zstd,         3)
+
+enum bch_compression_opts {
+#define x(t, n) BCH_COMPRESSION_OPT_##t = n,
+       BCH_COMPRESSION_OPTS()
 #undef x
        BCH_COMPRESSION_OPT_NR
 };
@@ -1176,24 +1550,6 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
 
 /* Journal */
 
-#define BCACHE_JSET_VERSION_UUIDv1     1
-#define BCACHE_JSET_VERSION_UUID       1       /* Always latest UUID format */
-#define BCACHE_JSET_VERSION_JKEYS      2
-#define BCACHE_JSET_VERSION            2
-
-struct jset_entry {
-       __le16                  u64s;
-       __u8                    btree_id;
-       __u8                    level;
-       __u8                    type; /* designates what this jset holds */
-       __u8                    pad[3];
-
-       union {
-               struct bkey_i   start[0];
-               __u64           _data[0];
-       };
-};
-
 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
 
 #define BCH_JSET_ENTRY_TYPES()                 \
@@ -1201,7 +1557,11 @@ struct jset_entry {
        x(btree_root,           1)              \
        x(prio_ptrs,            2)              \
        x(blacklist,            3)              \
-       x(blacklist_v2,         4)
+       x(blacklist_v2,         4)              \
+       x(usage,                5)              \
+       x(data_usage,           6)              \
+       x(clock,                7)              \
+       x(dev_usage,            8)
 
 enum {
 #define x(f, nr)       BCH_JSET_ENTRY_##f      = nr,
@@ -1231,6 +1591,48 @@ struct jset_entry_blacklist_v2 {
        __le64                  end;
 };
 
+enum {
+       FS_USAGE_RESERVED               = 0,
+       FS_USAGE_INODES                 = 1,
+       FS_USAGE_KEY_VERSION            = 2,
+       FS_USAGE_NR                     = 3
+};
+
+struct jset_entry_usage {
+       struct jset_entry       entry;
+       __le64                  v;
+} __attribute__((packed));
+
+struct jset_entry_data_usage {
+       struct jset_entry       entry;
+       __le64                  v;
+       struct bch_replicas_entry r;
+} __attribute__((packed));
+
+struct jset_entry_clock {
+       struct jset_entry       entry;
+       __u8                    rw;
+       __u8                    pad[7];
+       __le64                  time;
+} __attribute__((packed));
+
+struct jset_entry_dev_usage_type {
+       __le64                  buckets;
+       __le64                  sectors;
+       __le64                  fragmented;
+} __attribute__((packed));
+
+struct jset_entry_dev_usage {
+       struct jset_entry       entry;
+       __le32                  dev;
+       __u32                   pad;
+
+       __le64                  buckets_ec;
+       __le64                  buckets_unavailable;
+
+       struct jset_entry_dev_usage_type d[];
+} __attribute__((packed));
+
 /*
  * On disk format for a journal entry:
  * seq is monotonically increasing; every journal entry has its own unique
@@ -1253,8 +1655,8 @@ struct jset {
 
        __u8                    encrypted_start[0];
 
-       __le16                  read_clock;
-       __le16                  write_clock;
+       __le16                  _read_clock; /* no longer used */
+       __le16                  _write_clock;
 
        /* Sequence number of oldest dirty journal entry */
        __le64                  last_seq;
@@ -1268,39 +1670,33 @@ struct jset {
 
 LE32_BITMASK(JSET_CSUM_TYPE,   struct jset, flags, 0, 4);
 LE32_BITMASK(JSET_BIG_ENDIAN,  struct jset, flags, 4, 5);
+LE32_BITMASK(JSET_NO_FLUSH,    struct jset, flags, 5, 6);
 
-#define BCH_JOURNAL_BUCKETS_MIN                20
+#define BCH_JOURNAL_BUCKETS_MIN                8
 
 /* Btree: */
 
-#define DEFINE_BCH_BTREE_IDS()                                 \
-       DEF_BTREE_ID(EXTENTS,   0, "extents")                   \
-       DEF_BTREE_ID(INODES,    1, "inodes")                    \
-       DEF_BTREE_ID(DIRENTS,   2, "dirents")                   \
-       DEF_BTREE_ID(XATTRS,    3, "xattrs")                    \
-       DEF_BTREE_ID(ALLOC,     4, "alloc")                     \
-       DEF_BTREE_ID(QUOTAS,    5, "quotas")
-
-#define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
+#define BCH_BTREE_IDS()                                \
+       x(extents,      0)                      \
+       x(inodes,       1)                      \
+       x(dirents,      2)                      \
+       x(xattrs,       3)                      \
+       x(alloc,        4)                      \
+       x(quotas,       5)                      \
+       x(stripes,      6)                      \
+       x(reflink,      7)
 
 enum btree_id {
-       DEFINE_BCH_BTREE_IDS()
+#define x(kwd, val) BTREE_ID_##kwd = val,
+       BCH_BTREE_IDS()
+#undef x
        BTREE_ID_NR
 };
 
-#undef DEF_BTREE_ID
-
 #define BTREE_MAX_DEPTH                4U
 
 /* Btree nodes */
 
-/* Version 1: Seed pointer into btree node checksum
- */
-#define BCACHE_BSET_CSUM               1
-#define BCACHE_BSET_KEY_v1             2
-#define BCACHE_BSET_JOURNAL_SEQ                3
-#define BCACHE_BSET_VERSION            3
-
 /*
  * Btree nodes
  *
@@ -1345,7 +1741,7 @@ struct btree_node {
        /* Closed interval: */
        struct bpos             min_key;
        struct bpos             max_key;
-       struct bch_extent_ptr   ptr;
+       struct bch_extent_ptr   _ptr; /* not used anymore */
        struct bkey_format      format;
 
        union {
@@ -1361,7 +1757,9 @@ struct btree_node {
 
 LE64_BITMASK(BTREE_NODE_ID,    struct btree_node, flags,  0,  4);
 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags,  4,  8);
-/* 8-32 unused */
+LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
+                               struct btree_node, flags,  8,  9);
+/* 9-32 unused */
 LE64_BITMASK(BTREE_NODE_SEQ,   struct btree_node, flags, 32, 64);
 
 struct btree_node_entry {
@@ -1378,33 +1776,4 @@ struct btree_node_entry {
        };
 } __attribute__((packed, aligned(8)));
 
-/* Obsolete: */
-
-struct prio_set {
-       struct bch_csum         csum;
-
-       __le64                  magic;
-       __le32                  nonce[3];
-       __le16                  version;
-       __le16                  flags;
-
-       __u8                    encrypted_start[0];
-
-       __le64                  next_bucket;
-
-       struct bucket_disk {
-               __le16          prio[2];
-               __u8            gen;
-       } __attribute__((packed)) data[];
-} __attribute__((packed, aligned(8)));
-
-LE32_BITMASK(PSET_CSUM_TYPE,   struct prio_set, flags, 0, 4);
-
-#define PSET_MAGIC             __cpu_to_le64(0x6750e15f87337f91ULL)
-
-static inline __u64 __pset_magic(struct bch_sb *sb)
-{
-       return __le64_to_cpu(__bch2_sb_magic(sb) ^ PSET_MAGIC);
-}
-
 #endif /* _BCACHEFS_FORMAT_H */