]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/bcachefs_format.h
Update bcachefs sources to 95ff72a6c1 fixup! mm: Centralize & improve oom reporting...
[bcachefs-tools-debian] / libbcachefs / bcachefs_format.h
index 296166fa41ff9339c3faca352d52caa38ec1a61b..147fde1417b05fc031802973f54b6b3440eb9a27 100644 (file)
 #include <asm/byteorder.h>
 #include <linux/kernel.h>
 #include <linux/uuid.h>
+#include "vstructs.h"
+
+#define BITMASK(name, type, field, offset, end)                                \
+static const unsigned  name##_OFFSET = offset;                         \
+static const unsigned  name##_BITS = (end - offset);                   \
+                                                                       \
+static inline __u64 name(const type *k)                                        \
+{                                                                      \
+       return (k->field >> offset) & ~(~0ULL << (end - offset));       \
+}                                                                      \
+                                                                       \
+static inline void SET_##name(type *k, __u64 v)                                \
+{                                                                      \
+       k->field &= ~(~(~0ULL << (end - offset)) << offset);            \
+       k->field |= (v & ~(~0ULL << (end - offset))) << offset;         \
+}
 
 #define LE_BITMASK(_bits, name, type, field, offset, end)              \
 static const unsigned  name##_OFFSET = offset;                         \
@@ -344,7 +360,13 @@ static inline void bkey_init(struct bkey *k)
        x(indirect_inline_data, 19)                     \
        x(alloc_v2,             20)                     \
        x(subvolume,            21)                     \
-       x(snapshot,             22)
+       x(snapshot,             22)                     \
+       x(inode_v2,             23)                     \
+       x(alloc_v3,             24)                     \
+       x(set,                  25)                     \
+       x(lru,                  26)                     \
+       x(alloc_v4,             27)                     \
+       x(backpointer,          28)
 
 enum bch_bkey_type {
 #define x(name, nr) KEY_TYPE_##name    = nr,
@@ -374,6 +396,10 @@ struct bch_hash_whiteout {
        struct bch_val          v;
 };
 
+struct bch_set {
+       struct bch_val          v;
+};
+
 /* Extents */
 
 /*
@@ -614,8 +640,8 @@ union bch_extent_entry {
 struct bch_btree_ptr {
        struct bch_val          v;
 
-       struct bch_extent_ptr   start[0];
        __u64                   _data[0];
+       struct bch_extent_ptr   start[];
 } __attribute__((packed, aligned(8)));
 
 struct bch_btree_ptr_v2 {
@@ -626,8 +652,8 @@ struct bch_btree_ptr_v2 {
        __le16                  sectors_written;
        __le16                  flags;
        struct bpos             min_key;
-       struct bch_extent_ptr   start[0];
        __u64                   _data[0];
+       struct bch_extent_ptr   start[];
 } __attribute__((packed, aligned(8)));
 
 LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,  struct bch_btree_ptr_v2, flags, 0, 1);
@@ -635,8 +661,8 @@ LE16_BITMASK(BTREE_PTR_RANGE_UPDATED,       struct bch_btree_ptr_v2, flags, 0, 1);
 struct bch_extent {
        struct bch_val          v;
 
-       union bch_extent_entry  start[0];
        __u64                   _data[0];
+       union bch_extent_entry  start[];
 } __attribute__((packed, aligned(8)));
 
 struct bch_reservation {
@@ -681,6 +707,16 @@ struct bch_inode {
        __u8                    fields[0];
 } __attribute__((packed, aligned(8)));
 
+struct bch_inode_v2 {
+       struct bch_val          v;
+
+       __le64                  bi_journal_seq;
+       __le64                  bi_hash_seed;
+       __le64                  bi_flags;
+       __le16                  bi_mode;
+       __u8                    fields[0];
+} __attribute__((packed, aligned(8)));
+
 struct bch_inode_generation {
        struct bch_val          v;
 
@@ -772,6 +808,9 @@ LE32_BITMASK(INODE_STR_HASH,        struct bch_inode, bi_flags, 20, 24);
 LE32_BITMASK(INODE_NR_FIELDS,  struct bch_inode, bi_flags, 24, 31);
 LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
 
+LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
+LE64_BITMASK(INODEv2_NR_FIELDS,        struct bch_inode_v2, bi_flags, 24, 31);
+
 /* Dirents */
 
 /*
@@ -789,7 +828,13 @@ struct bch_dirent {
        struct bch_val          v;
 
        /* Target inode number: */
+       union {
        __le64                  d_inum;
+       struct {                /* DT_SUBVOL */
+       __le32                  d_child_subvol;
+       __le32                  d_parent_subvol;
+       };
+       };
 
        /*
         * Copy of mode bits 12-15 from the target inode - so userspace can get
@@ -803,10 +848,9 @@ struct bch_dirent {
 #define DT_SUBVOL      16
 #define BCH_DT_MAX     17
 
-#define BCH_NAME_MAX   (U8_MAX * sizeof(u64) -                         \
+#define BCH_NAME_MAX   ((unsigned) (U8_MAX * sizeof(u64) -             \
                         sizeof(struct bkey) -                          \
-                        offsetof(struct bch_dirent, d_name))
-
+                        offsetof(struct bch_dirent, d_name)))
 
 /* Xattrs */
 
@@ -843,6 +887,12 @@ struct bch_alloc {
        x(stripe,               32)             \
        x(stripe_redundancy,    8)
 
+enum {
+#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
+       BCH_ALLOC_FIELDS_V1()
+#undef x
+};
+
 struct bch_alloc_v2 {
        struct bch_val          v;
        __u8                    nr_fields;
@@ -855,17 +905,59 @@ struct bch_alloc_v2 {
 #define BCH_ALLOC_FIELDS_V2()                  \
        x(read_time,            64)             \
        x(write_time,           64)             \
-       x(dirty_sectors,        16)             \
-       x(cached_sectors,       16)             \
+       x(dirty_sectors,        32)             \
+       x(cached_sectors,       32)             \
        x(stripe,               32)             \
        x(stripe_redundancy,    8)
 
-enum {
-#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
-       BCH_ALLOC_FIELDS_V1()
-#undef x
-       BCH_ALLOC_FIELD_NR
-};
+struct bch_alloc_v3 {
+       struct bch_val          v;
+       __le64                  journal_seq;
+       __le32                  flags;
+       __u8                    nr_fields;
+       __u8                    gen;
+       __u8                    oldest_gen;
+       __u8                    data_type;
+       __u8                    data[];
+} __attribute__((packed, aligned(8)));
+
+LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags,  0,  1)
+LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags,  1,  2)
+
+struct bch_alloc_v4 {
+       struct bch_val          v;
+       __u64                   journal_seq;
+       __u32                   flags;
+       __u8                    gen;
+       __u8                    oldest_gen;
+       __u8                    data_type;
+       __u8                    stripe_redundancy;
+       __u32                   dirty_sectors;
+       __u32                   cached_sectors;
+       __u64                   io_time[2];
+       __u32                   stripe;
+       __u32                   nr_external_backpointers;
+} __attribute__((packed, aligned(8)));
+
+#define BCH_ALLOC_V4_U64s_V0   6
+#define BCH_ALLOC_V4_U64s      (sizeof(struct bch_alloc_v4) / sizeof(u64))
+
+BITMASK(BCH_ALLOC_V4_NEED_DISCARD,     struct bch_alloc_v4, flags,  0,  1)
+BITMASK(BCH_ALLOC_V4_NEED_INC_GEN,     struct bch_alloc_v4, flags,  1,  2)
+BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags,  2,  8)
+BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS,  struct bch_alloc_v4, flags,  8,  14)
+
+#define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX       40
+
+struct bch_backpointer {
+       struct bch_val          v;
+       __u8                    btree_id;
+       __u8                    level;
+       __u8                    data_type;
+       __u64                   bucket_offset:40;
+       __u32                   bucket_len;
+       struct bpos             pos;
+} __attribute__((packed, aligned(8)));
 
 /* Quotas: */
 
@@ -905,7 +997,7 @@ struct bch_stripe {
        __u8                    csum_type;
        __u8                    pad;
 
-       struct bch_extent_ptr   ptrs[0];
+       struct bch_extent_ptr   ptrs[];
 } __attribute__((packed, aligned(8)));
 
 /* Reflink: */
@@ -964,6 +1056,7 @@ LE32_BITMASK(BCH_SUBVOLUME_RO,             struct bch_subvolume, flags,  0,  1)
  * can delete it (or whether it should just be rm -rf'd)
  */
 LE32_BITMASK(BCH_SUBVOLUME_SNAP,       struct bch_subvolume, flags,  1,  2)
+LE32_BITMASK(BCH_SUBVOLUME_UNLINKED,   struct bch_subvolume, flags,  2,  3)
 
 /* Snapshots */
 
@@ -981,6 +1074,15 @@ LE32_BITMASK(BCH_SNAPSHOT_DELETED,        struct bch_snapshot, flags,  0,  1)
 /* True if a subvolume points to this snapshot node: */
 LE32_BITMASK(BCH_SNAPSHOT_SUBVOL,      struct bch_snapshot, flags,  1,  2)
 
+/* LRU btree: */
+
+struct bch_lru {
+       struct bch_val          v;
+       __le64                  idx;
+} __attribute__((packed, aligned(8)));
+
+#define LRU_ID_STRIPES         (1U << 16)
+
 /* Optional/variable size superblock sections: */
 
 struct bch_sb_field {
@@ -989,16 +1091,18 @@ struct bch_sb_field {
        __le32                  type;
 };
 
-#define BCH_SB_FIELDS()                \
-       x(journal,      0)      \
-       x(members,      1)      \
-       x(crypt,        2)      \
-       x(replicas_v0,  3)      \
-       x(quota,        4)      \
-       x(disk_groups,  5)      \
-       x(clean,        6)      \
-       x(replicas,     7)      \
-       x(journal_seq_blacklist, 8)
+#define BCH_SB_FIELDS()                                \
+       x(journal,      0)                      \
+       x(members,      1)                      \
+       x(crypt,        2)                      \
+       x(replicas_v0,  3)                      \
+       x(quota,        4)                      \
+       x(disk_groups,  5)                      \
+       x(clean,        6)                      \
+       x(replicas,     7)                      \
+       x(journal_seq_blacklist, 8)             \
+       x(journal_v2,   9)                      \
+       x(counters,     10)
 
 enum bch_sb_field_type {
 #define x(f, nr)       BCH_SB_FIELD_##f = nr,
@@ -1007,6 +1111,14 @@ enum bch_sb_field_type {
        BCH_SB_FIELD_NR
 };
 
+/*
+ * Most superblock fields are replicated in all device's superblocks - a few are
+ * not:
+ */
+#define BCH_SINGLE_DEVICE_SB_FIELDS            \
+       ((1U << BCH_SB_FIELD_journal)|          \
+        (1U << BCH_SB_FIELD_journal_v2))
+
 /* BCH_SB_FIELD_journal: */
 
 struct bch_sb_field_journal {
@@ -1014,6 +1126,15 @@ struct bch_sb_field_journal {
        __le64                  buckets[0];
 };
 
+struct bch_sb_field_journal_v2 {
+       struct bch_sb_field     field;
+
+       struct bch_sb_field_journal_v2_entry {
+               __le64          start;
+               __le64          nr;
+       }                       d[0];
+};
+
 /* BCH_SB_FIELD_members: */
 
 #define BCH_MIN_NR_NBUCKETS    (1 << 6)
@@ -1030,12 +1151,13 @@ struct bch_member {
 };
 
 LE64_BITMASK(BCH_MEMBER_STATE,         struct bch_member, flags[0],  0,  4)
-/* 4-10 unused, was TIER, HAS_(META)DATA */
-LE64_BITMASK(BCH_MEMBER_REPLACEMENT,   struct bch_member, flags[0], 10, 14)
+/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
 LE64_BITMASK(BCH_MEMBER_DISCARD,       struct bch_member, flags[0], 14, 15)
 LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED,  struct bch_member, flags[0], 15, 20)
 LE64_BITMASK(BCH_MEMBER_GROUP,         struct bch_member, flags[0], 20, 28)
 LE64_BITMASK(BCH_MEMBER_DURABILITY,    struct bch_member, flags[0], 28, 30)
+LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
+                                       struct bch_member, flags[0], 30, 31)
 
 #if 0
 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS,        struct bch_member, flags[1], 0,  20);
@@ -1055,18 +1177,6 @@ enum bch_member_state {
        BCH_MEMBER_STATE_NR
 };
 
-#define BCH_CACHE_REPLACEMENT_POLICIES()       \
-       x(lru,          0)                      \
-       x(fifo,         1)                      \
-       x(random,       2)
-
-enum bch_cache_replacement_policies {
-#define x(t, n) BCH_CACHE_REPLACEMENT_##t = n,
-       BCH_CACHE_REPLACEMENT_POLICIES()
-#undef x
-       BCH_CACHE_REPLACEMENT_NR
-};
-
 struct bch_sb_field_members {
        struct bch_sb_field     field;
        struct bch_member       members[0];
@@ -1123,13 +1233,16 @@ LE64_BITMASK(BCH_KDF_SCRYPT_P,  struct bch_sb_field_crypt, kdf_flags, 32, 48);
 /* BCH_SB_FIELD_replicas: */
 
 #define BCH_DATA_TYPES()               \
-       x(none,         0)              \
+       x(free,         0)              \
        x(sb,           1)              \
        x(journal,      2)              \
        x(btree,        3)              \
        x(user,         4)              \
        x(cached,       5)              \
-       x(parity,       6)
+       x(parity,       6)              \
+       x(stripe,       7)              \
+       x(need_gc_gens, 8)              \
+       x(need_discard, 9)
 
 enum bch_data_type {
 #define x(t, n) BCH_DATA_##t,
@@ -1138,22 +1251,45 @@ enum bch_data_type {
        BCH_DATA_NR
 };
 
+static inline bool data_type_is_empty(enum bch_data_type type)
+{
+       switch (type) {
+       case BCH_DATA_free:
+       case BCH_DATA_need_gc_gens:
+       case BCH_DATA_need_discard:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static inline bool data_type_is_hidden(enum bch_data_type type)
+{
+       switch (type) {
+       case BCH_DATA_sb:
+       case BCH_DATA_journal:
+               return true;
+       default:
+               return false;
+       }
+}
+
 struct bch_replicas_entry_v0 {
        __u8                    data_type;
        __u8                    nr_devs;
-       __u8                    devs[0];
+       __u8                    devs[];
 } __attribute__((packed));
 
 struct bch_sb_field_replicas_v0 {
        struct bch_sb_field     field;
-       struct bch_replicas_entry_v0 entries[0];
+       struct bch_replicas_entry_v0 entries[];
 } __attribute__((packed, aligned(8)));
 
 struct bch_replicas_entry {
        __u8                    data_type;
        __u8                    nr_devs;
        __u8                    nr_required;
-       __u8                    devs[0];
+       __u8                    devs[];
 } __attribute__((packed));
 
 #define replicas_entry_bytes(_i)                                       \
@@ -1199,6 +1335,27 @@ struct bch_sb_field_disk_groups {
        struct bch_disk_group   entries[0];
 } __attribute__((packed, aligned(8)));
 
+/* BCH_SB_FIELD_counters */
+
+#define BCH_PERSISTENT_COUNTERS()                      \
+       x(io_read,              0)                      \
+       x(io_write,             1)                      \
+       x(io_move,              2)                      \
+       x(bucket_invalidate,    3)                      \
+       x(bucket_discard,       4)
+
+enum bch_persistent_counters {
+#define x(t, n, ...) BCH_COUNTER_##t,
+       BCH_PERSISTENT_COUNTERS()
+#undef x
+       BCH_COUNTER_NR
+};
+
+struct bch_sb_field_counters {
+       struct bch_sb_field     field;
+       __le64                  d[0];
+};
+
 /*
  * On clean shutdown, store btree roots and current journal sequence number in
  * the superblock:
@@ -1254,17 +1411,27 @@ struct bch_sb_field_journal_seq_blacklist {
 #define BCH_JSET_VERSION_OLD                   2
 #define BCH_BSET_VERSION_OLD                   3
 
+#define BCH_METADATA_VERSIONS()                                \
+       x(bkey_renumber,                10)             \
+       x(inode_btree_change,           11)             \
+       x(snapshot,                     12)             \
+       x(inode_backpointers,           13)             \
+       x(btree_ptr_sectors_written,    14)             \
+       x(snapshot_2,                   15)             \
+       x(reflink_p_fix,                16)             \
+       x(subvol_dirent,                17)             \
+       x(inode_v2,                     18)             \
+       x(freespace,                    19)             \
+       x(alloc_v4,                     20)             \
+       x(new_data_types,               21)             \
+       x(backpointers,                 22)
+
 enum bcachefs_metadata_version {
-       bcachefs_metadata_version_min                   = 9,
-       bcachefs_metadata_version_new_versioning        = 10,
-       bcachefs_metadata_version_bkey_renumber         = 10,
-       bcachefs_metadata_version_inode_btree_change    = 11,
-       bcachefs_metadata_version_snapshot              = 12,
-       bcachefs_metadata_version_inode_backpointers    = 13,
-       bcachefs_metadata_version_btree_ptr_sectors_written = 14,
-       bcachefs_metadata_version_snapshot_2            = 15,
-       bcachefs_metadata_version_reflink_p_fix         = 16,
-       bcachefs_metadata_version_max                   = 17,
+       bcachefs_metadata_version_min = 9,
+#define x(t, n)        bcachefs_metadata_version_##t = n,
+       BCH_METADATA_VERSIONS()
+#undef x
+       bcachefs_metadata_version_max
 };
 
 #define bcachefs_metadata_version_current      (bcachefs_metadata_version_max - 1)
@@ -1401,6 +1568,11 @@ LE64_BITMASK(BCH_SB_ERASURE_CODE,        struct bch_sb, flags[3],  0, 16);
 LE64_BITMASK(BCH_SB_METADATA_TARGET,   struct bch_sb, flags[3], 16, 28);
 LE64_BITMASK(BCH_SB_SHARD_INUMS,       struct bch_sb, flags[3], 28, 29);
 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
+LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
+LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
+LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
+/* Obsolete, always enabled: */
+LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
 
 /*
  * Features:
@@ -1408,7 +1580,7 @@ LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
  * journal_seq_blacklist_v3:   gates BCH_SB_FIELD_journal_seq_blacklist
  * reflink:                    gates KEY_TYPE_reflink
  * inline_data:                        gates KEY_TYPE_inline_data
- * new_siphash:                        gates BCH_STR_HASH_SIPHASH
+ * new_siphash:                        gates BCH_STR_HASH_siphash
  * new_extent_overwrite:       gates BTREE_NODE_NEW_EXTENT_OVERWRITE
  */
 #define BCH_SB_FEATURES()                      \
@@ -1484,12 +1656,17 @@ enum bch_error_actions {
        BCH_ON_ERROR_NR
 };
 
+#define BCH_STR_HASH_TYPES()           \
+       x(crc32c,               0)      \
+       x(crc64,                1)      \
+       x(siphash_old,          2)      \
+       x(siphash,              3)
+
 enum bch_str_hash_type {
-       BCH_STR_HASH_CRC32C             = 0,
-       BCH_STR_HASH_CRC64              = 1,
-       BCH_STR_HASH_SIPHASH_OLD        = 2,
-       BCH_STR_HASH_SIPHASH            = 3,
-       BCH_STR_HASH_NR                 = 4,
+#define x(t, n) BCH_STR_HASH_##t = n,
+       BCH_STR_HASH_TYPES()
+#undef x
+       BCH_STR_HASH_NR
 };
 
 #define BCH_STR_HASH_OPTS()            \
@@ -1504,34 +1681,39 @@ enum bch_str_hash_opts {
        BCH_STR_HASH_OPT_NR
 };
 
+#define BCH_CSUM_TYPES()                       \
+       x(none,                         0)      \
+       x(crc32c_nonzero,               1)      \
+       x(crc64_nonzero,                2)      \
+       x(chacha20_poly1305_80,         3)      \
+       x(chacha20_poly1305_128,        4)      \
+       x(crc32c,                       5)      \
+       x(crc64,                        6)      \
+       x(xxhash,                       7)
+
 enum bch_csum_type {
-       BCH_CSUM_NONE                   = 0,
-       BCH_CSUM_CRC32C_NONZERO         = 1,
-       BCH_CSUM_CRC64_NONZERO          = 2,
-       BCH_CSUM_CHACHA20_POLY1305_80   = 3,
-       BCH_CSUM_CHACHA20_POLY1305_128  = 4,
-       BCH_CSUM_CRC32C                 = 5,
-       BCH_CSUM_CRC64                  = 6,
-       BCH_CSUM_XXHASH                 = 7,
-       BCH_CSUM_NR                     = 8,
+#define x(t, n) BCH_CSUM_##t = n,
+       BCH_CSUM_TYPES()
+#undef x
+       BCH_CSUM_NR
 };
 
 static const unsigned bch_crc_bytes[] = {
-       [BCH_CSUM_NONE]                         = 0,
-       [BCH_CSUM_CRC32C_NONZERO]               = 4,
-       [BCH_CSUM_CRC32C]                       = 4,
-       [BCH_CSUM_CRC64_NONZERO]                = 8,
-       [BCH_CSUM_CRC64]                        = 8,
-       [BCH_CSUM_XXHASH]                       = 8,
-       [BCH_CSUM_CHACHA20_POLY1305_80]         = 10,
-       [BCH_CSUM_CHACHA20_POLY1305_128]        = 16,
+       [BCH_CSUM_none]                         = 0,
+       [BCH_CSUM_crc32c_nonzero]               = 4,
+       [BCH_CSUM_crc32c]                       = 4,
+       [BCH_CSUM_crc64_nonzero]                = 8,
+       [BCH_CSUM_crc64]                        = 8,
+       [BCH_CSUM_xxhash]                       = 8,
+       [BCH_CSUM_chacha20_poly1305_80]         = 10,
+       [BCH_CSUM_chacha20_poly1305_128]        = 16,
 };
 
 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
 {
        switch (type) {
-       case BCH_CSUM_CHACHA20_POLY1305_80:
-       case BCH_CSUM_CHACHA20_POLY1305_128:
+       case BCH_CSUM_chacha20_poly1305_80:
+       case BCH_CSUM_chacha20_poly1305_128:
                return true;
        default:
                return false;
@@ -1625,7 +1807,9 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
        x(usage,                5)              \
        x(data_usage,           6)              \
        x(clock,                7)              \
-       x(dev_usage,            8)
+       x(dev_usage,            8)              \
+       x(log,                  9)              \
+       x(overwrite,            10)
 
 enum {
 #define x(f, nr)       BCH_JSET_ENTRY_##f      = nr,
@@ -1655,11 +1839,16 @@ struct jset_entry_blacklist_v2 {
        __le64                  end;
 };
 
+#define BCH_FS_USAGE_TYPES()                   \
+       x(reserved,             0)              \
+       x(inodes,               1)              \
+       x(key_version,          2)
+
 enum {
-       FS_USAGE_RESERVED               = 0,
-       FS_USAGE_INODES                 = 1,
-       FS_USAGE_KEY_VERSION            = 2,
-       FS_USAGE_NR                     = 3
+#define x(f, nr)       BCH_FS_USAGE_##f        = nr,
+       BCH_FS_USAGE_TYPES()
+#undef x
+       BCH_FS_USAGE_NR
 };
 
 struct jset_entry_usage {
@@ -1692,11 +1881,22 @@ struct jset_entry_dev_usage {
        __u32                   pad;
 
        __le64                  buckets_ec;
-       __le64                  buckets_unavailable;
+       __le64                  _buckets_unavailable; /* No longer used */
 
        struct jset_entry_dev_usage_type d[];
 } __attribute__((packed));
 
+static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
+{
+       return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
+               sizeof(struct jset_entry_dev_usage_type);
+}
+
+struct jset_entry_log {
+       struct jset_entry       entry;
+       u8                      d[];
+} __attribute__((packed));
+
 /*
  * On disk format for a journal entry:
  * seq is monotonically increasing; every journal entry has its own unique
@@ -1750,7 +1950,11 @@ LE32_BITMASK(JSET_NO_FLUSH,      struct jset, flags, 5, 6);
        x(stripes,      6)                      \
        x(reflink,      7)                      \
        x(subvolumes,   8)                      \
-       x(snapshots,    9)
+       x(snapshots,    9)                      \
+       x(lru,          10)                     \
+       x(freespace,    11)                     \
+       x(need_discard, 12)                     \
+       x(backpointers, 13)
 
 enum btree_id {
 #define x(kwd, val) BTREE_ID_##kwd = val,