1 #ifndef _LINUX_BCACHE_H
2 #define _LINUX_BCACHE_H
5 * Bcache on disk data structures
13 #include <asm/types.h>
14 #include <asm/byteorder.h>
15 #include <linux/uuid.h>
17 #define LE32_BITMASK(name, type, field, offset, end) \
18 static const unsigned name##_OFFSET = offset; \
19 static const unsigned name##_BITS = (end - offset); \
20 static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
22 static inline __u64 name(const type *k) \
24 return (__le32_to_cpu(k->field) >> offset) & \
25 ~(~0ULL << (end - offset)); \
28 static inline void SET_##name(type *k, __u64 v) \
30 __u64 new = __le32_to_cpu(k->field); \
32 new &= ~(~(~0ULL << (end - offset)) << offset); \
33 new |= (v & ~(~0ULL << (end - offset))) << offset; \
34 k->field = __cpu_to_le32(new); \
37 #define LE64_BITMASK(name, type, field, offset, end) \
38 static const unsigned name##_OFFSET = offset; \
39 static const unsigned name##_BITS = (end - offset); \
40 static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
42 static inline __u64 name(const type *k) \
44 return (__le64_to_cpu(k->field) >> offset) & \
45 ~(~0ULL << (end - offset)); \
48 static inline void SET_##name(type *k, __u64 v) \
50 __u64 new = __le64_to_cpu(k->field); \
52 new &= ~(~(~0ULL << (end - offset)) << offset); \
53 new |= (v & ~(~0ULL << (end - offset))) << offset; \
54 k->field = __cpu_to_le64(new); \
60 /* One unused slot for now: */
61 __u8 bits_per_field[6];
62 __le64 field_offset[6];
65 /* Btree keys - all units are in sectors */
68 /* Word order matches machine byte order */
69 #if defined(__LITTLE_ENDIAN)
73 #elif defined(__BIG_ENDIAN)
75 __u64 offset; /* Points to end of extent - sectors */
78 #error edit for your odd byteorder.
80 } __attribute__((packed, aligned(4)));
82 #define KEY_INODE_MAX ((__u64)~0ULL)
83 #define KEY_OFFSET_MAX ((__u64)~0ULL)
84 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
86 static inline struct bpos POS(__u64 inode, __u64 offset)
97 #define POS_MIN POS(0, 0)
98 #define POS_MAX POS(KEY_INODE_MAX, KEY_OFFSET_MAX)
100 /* Empty placeholder struct, for container_of() */
106 #if defined(__LITTLE_ENDIAN)
109 #elif defined(__BIG_ENDIAN)
113 } __attribute__((packed, aligned(4)));
116 /* Size of combined key and value, in u64s */
119 /* Format of key (0 for format local to btree node) */
120 #if defined(__LITTLE_ENDIAN_BITFIELD)
123 #elif defined (__BIG_ENDIAN_BITFIELD)
124 __u8 needs_whiteout:1,
127 #error edit for your odd byteorder.
130 /* Type of the value */
133 #if defined(__LITTLE_ENDIAN)
136 struct bversion version;
137 __u32 size; /* extent size, in sectors */
139 #elif defined(__BIG_ENDIAN)
141 __u32 size; /* extent size, in sectors */
142 struct bversion version;
146 } __attribute__((packed, aligned(8)));
151 /* Size of combined key and value, in u64s */
154 /* Format of key (0 for format local to btree node) */
157 * XXX: next incompat on disk format change, switch format and
158 * needs_whiteout - bkey_packed() will be cheaper if format is the high
159 * bits of the bitfield
161 #if defined(__LITTLE_ENDIAN_BITFIELD)
164 #elif defined (__BIG_ENDIAN_BITFIELD)
165 __u8 needs_whiteout:1,
169 /* Type of the value */
174 * We copy bkeys with struct assignment in various places, and while
175 * that shouldn't be done with packed bkeys we can't disallow it in C,
176 * and it's legal to cast a bkey to a bkey_packed - so padding it out
177 * to the same size as struct bkey should hopefully be safest.
179 __u8 pad[sizeof(struct bkey) - 3];
180 } __attribute__((packed, aligned(8)));
182 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
183 #define KEY_PACKED_BITS_START 24
185 #define KEY_SIZE_MAX ((__u32)~0U)
187 #define KEY_FORMAT_LOCAL_BTREE 0
188 #define KEY_FORMAT_CURRENT 1
190 enum bch_bkey_fields {
195 BKEY_FIELD_VERSION_HI,
196 BKEY_FIELD_VERSION_LO,
200 #define bkey_format_field(name, field) \
201 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
203 #define BKEY_FORMAT_CURRENT \
204 ((struct bkey_format) { \
205 .key_u64s = BKEY_U64s, \
206 .nr_fields = BKEY_NR_FIELDS, \
207 .bits_per_field = { \
208 bkey_format_field(INODE, p.inode), \
209 bkey_format_field(OFFSET, p.offset), \
210 bkey_format_field(SNAPSHOT, p.snapshot), \
211 bkey_format_field(SIZE, size), \
212 bkey_format_field(VERSION_HI, version.hi), \
213 bkey_format_field(VERSION_LO, version.lo), \
217 /* bkey with inline value */
223 /* Size of combined key and value, in u64s */
235 #define KEY(_inode, _offset, _size) \
238 .format = KEY_FORMAT_CURRENT, \
239 .p = POS(_inode, _offset), \
245 static inline struct bkey KEY(__u64 inode, __u64 offset, __u64 size)
249 memset(&ret, 0, sizeof(ret));
250 ret.u64s = BKEY_U64s;
251 ret.format = KEY_FORMAT_CURRENT;
253 ret.p.offset = offset;
261 static inline void bkey_init(struct bkey *k)
266 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
268 #define __BKEY_PADDED(key, pad) \
269 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
271 #define BKEY_VAL_TYPE(name, nr) \
272 struct bkey_i_##name { \
277 struct bch_##name v; \
281 * - DELETED keys are used internally to mark keys that should be ignored but
282 * override keys in composition order. Their version number is ignored.
284 * - DISCARDED keys indicate that the data is all 0s because it has been
285 * discarded. DISCARDs may have a version; if the version is nonzero the key
286 * will be persistent, otherwise the key will be dropped whenever the btree
287 * node is rewritten (like DELETED keys).
289 * - ERROR: any read of the data returns a read error, as the data was lost due
290 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
291 * by new writes or cluster-wide GC. Node repair can also overwrite them with
292 * the same or a more recent version number, but not with an older version
295 #define KEY_TYPE_DELETED 0
296 #define KEY_TYPE_DISCARD 1
297 #define KEY_TYPE_ERROR 2
298 #define KEY_TYPE_COOKIE 3
299 #define KEY_TYPE_PERSISTENT_DISCARD 4
300 #define KEY_TYPE_GENERIC_NR 128
306 BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
311 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
312 * preceded by checksum/compression information (bch_extent_crc32 or
315 * One major determining factor in the format of extents is how we handle and
316 * represent extents that have been partially overwritten and thus trimmed:
318 * If an extent is not checksummed or compressed, when the extent is trimmed we
319 * don't have to remember the extent we originally allocated and wrote: we can
320 * merely adjust ptr->offset to point to the start of the start of the data that
321 * is currently live. The size field in struct bkey records the current (live)
322 * size of the extent, and is also used to mean "size of region on disk that we
323 * point to" in this case.
325 * Thus an extent that is not checksummed or compressed will consist only of a
326 * list of bch_extent_ptrs, with none of the fields in
327 * bch_extent_crc32/bch_extent_crc64.
329 * When an extent is checksummed or compressed, it's not possible to read only
330 * the data that is currently live: we have to read the entire extent that was
331 * originally written, and then return only the part of the extent that is
334 * Thus, in addition to the current size of the extent in struct bkey, we need
335 * to store the size of the originally allocated space - this is the
336 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
337 * when the extent is trimmed, instead of modifying the offset field of the
338 * pointer, we keep a second smaller offset field - "offset into the original
339 * extent of the currently live region".
341 * The other major determining factor is replication and data migration:
343 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
344 * write, we will initially write all the replicas in the same format, with the
345 * same checksum type and compression format - however, when copygc runs later (or
346 * tiering/cache promotion, anything that moves data), it is not in general
347 * going to rewrite all the pointers at once - one of the replicas may be in a
348 * bucket on one device that has very little fragmentation while another lives
349 * in a bucket that has become heavily fragmented, and thus is being rewritten
350 * sooner than the rest.
352 * Thus it will only move a subset of the pointers (or in the case of
353 * tiering/cache promotion perhaps add a single pointer without dropping any
354 * current pointers), and if the extent has been partially overwritten it must
355 * write only the currently live portion (or copygc would not be able to reduce
356 * fragmentation!) - which necessitates a different bch_extent_crc format for
359 * But in the interests of space efficiency, we don't want to store one
360 * bch_extent_crc for each pointer if we don't have to.
362 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
363 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
364 * type of a given entry with a scheme similar to utf8 (except we're encoding a
365 * type, not a size), encoding the type in the position of the first set bit:
367 * bch_extent_crc32 - 0b1
368 * bch_extent_ptr - 0b10
369 * bch_extent_crc64 - 0b100
371 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
372 * bch_extent_crc64 is the least constrained).
374 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
375 * until the next bch_extent_crc32/64.
377 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
378 * is neither checksummed nor compressed.
381 /* 128 bits, sufficient for cryptographic MACs: */
385 } __attribute__((packed, aligned(8)));
387 #define BCH_CSUM_NONE 0U
388 #define BCH_CSUM_CRC32C 1U
389 #define BCH_CSUM_CRC64 2U
390 #define BCH_CSUM_CHACHA20_POLY1305_80 3U
391 #define BCH_CSUM_CHACHA20_POLY1305_128 4U
392 #define BCH_CSUM_NR 5U
394 static inline _Bool bch_csum_type_is_encryption(unsigned type)
397 case BCH_CSUM_CHACHA20_POLY1305_80:
398 case BCH_CSUM_CHACHA20_POLY1305_128:
405 enum bch_extent_entry_type {
406 BCH_EXTENT_ENTRY_ptr = 0,
407 BCH_EXTENT_ENTRY_crc32 = 1,
408 BCH_EXTENT_ENTRY_crc64 = 2,
409 BCH_EXTENT_ENTRY_crc128 = 3,
412 #define BCH_EXTENT_ENTRY_MAX 4
414 /* Compressed/uncompressed size are stored biased by 1: */
415 struct bch_extent_crc32 {
416 #if defined(__LITTLE_ENDIAN_BITFIELD)
419 _uncompressed_size:7,
425 #elif defined (__BIG_ENDIAN_BITFIELD)
427 __u32 compression_type:4,
431 _uncompressed_size:7,
435 } __attribute__((packed, aligned(8)));
437 #define CRC32_SIZE_MAX (1U << 7)
438 #define CRC32_NONCE_MAX 0
440 struct bch_extent_crc64 {
441 #if defined(__LITTLE_ENDIAN_BITFIELD)
444 _uncompressed_size:9,
450 #elif defined (__BIG_ENDIAN_BITFIELD)
456 _uncompressed_size:9,
461 } __attribute__((packed, aligned(8)));
463 #define CRC64_SIZE_MAX (1U << 9)
464 #define CRC64_NONCE_MAX ((1U << 10) - 1)
466 struct bch_extent_crc128 {
467 #if defined(__LITTLE_ENDIAN_BITFIELD)
470 _uncompressed_size:13,
475 #elif defined (__BIG_ENDIAN_BITFIELD)
476 __u64 compression_type:4,
480 _uncompressed_size:13,
484 struct bch_csum csum;
485 } __attribute__((packed, aligned(8)));
487 #define CRC128_SIZE_MAX (1U << 13)
488 #define CRC128_NONCE_MAX ((1U << 13) - 1)
491 * Max size of an extent that may require bouncing to read or write
492 * (checksummed, compressed): 64k
494 #define BCH_ENCODED_EXTENT_MAX 128U
497 * @reservation - pointer hasn't been written to, just reserved
499 struct bch_extent_ptr {
500 #if defined(__LITTLE_ENDIAN_BITFIELD)
505 offset:44, /* 8 petabytes */
508 #elif defined (__BIG_ENDIAN_BITFIELD)
517 } __attribute__((packed, aligned(8)));
519 struct bch_extent_reservation {
520 #if defined(__LITTLE_ENDIAN_BITFIELD)
525 #elif defined (__BIG_ENDIAN_BITFIELD)
533 union bch_extent_entry {
534 #if defined(__LITTLE_ENDIAN) || __BITS_PER_LONG == 64
536 #elif __BITS_PER_LONG == 32
542 #error edit for your odd byteorder.
544 struct bch_extent_crc32 crc32;
545 struct bch_extent_crc64 crc64;
546 struct bch_extent_crc128 crc128;
547 struct bch_extent_ptr ptr;
554 * This is kind of a hack, we're overloading the type for a boolean that
555 * really should be part of the value - BCH_EXTENT and BCH_EXTENT_CACHED
556 * have the same value type:
558 BCH_EXTENT_CACHED = 129,
561 * Persistent reservation:
563 BCH_RESERVATION = 130,
569 union bch_extent_entry start[0];
571 } __attribute__((packed, aligned(8)));
572 BKEY_VAL_TYPE(extent, BCH_EXTENT);
574 struct bch_reservation {
580 } __attribute__((packed, aligned(8)));
581 BKEY_VAL_TYPE(reservation, BCH_RESERVATION);
583 /* Maximum size (in u64s) a single pointer could be: */
584 #define BKEY_EXTENT_PTR_U64s_MAX\
585 ((sizeof(struct bch_extent_crc128) + \
586 sizeof(struct bch_extent_ptr)) / sizeof(u64))
588 /* Maximum possible size of an entire extent value: */
589 /* There's a hack in the keylist code that needs to be fixed.. */
590 #define BKEY_EXTENT_VAL_U64s_MAX \
591 (BKEY_EXTENT_PTR_U64s_MAX * BCH_REPLICAS_MAX)
593 /* * Maximum possible size of an entire extent, key + value: */
594 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
596 /* Btree pointers don't carry around checksums: */
597 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
598 ((sizeof(struct bch_extent_ptr)) / sizeof(u64) * BCH_REPLICAS_MAX)
599 #define BKEY_BTREE_PTR_U64s_MAX \
600 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
604 #define BLOCKDEV_INODE_MAX 4096
606 #define BCACHE_ROOT_INO 4096
608 enum bch_inode_types {
610 BCH_INODE_BLOCKDEV = 129,
620 } __attribute__((packed));
621 BKEY_VAL_TYPE(inode, BCH_INODE_FS);
623 #define BCH_INODE_FIELDS() \
624 BCH_INODE_FIELD(i_atime, 64) \
625 BCH_INODE_FIELD(i_ctime, 64) \
626 BCH_INODE_FIELD(i_mtime, 64) \
627 BCH_INODE_FIELD(i_otime, 64) \
628 BCH_INODE_FIELD(i_size, 64) \
629 BCH_INODE_FIELD(i_sectors, 64) \
630 BCH_INODE_FIELD(i_uid, 32) \
631 BCH_INODE_FIELD(i_gid, 32) \
632 BCH_INODE_FIELD(i_nlink, 32) \
633 BCH_INODE_FIELD(i_generation, 32) \
634 BCH_INODE_FIELD(i_dev, 32)
638 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
641 __BCH_INODE_SYNC = 0,
642 __BCH_INODE_IMMUTABLE = 1,
643 __BCH_INODE_APPEND = 2,
644 __BCH_INODE_NODUMP = 3,
645 __BCH_INODE_NOATIME = 4,
647 __BCH_INODE_I_SIZE_DIRTY= 5,
648 __BCH_INODE_I_SECTORS_DIRTY= 6,
650 /* not implemented yet: */
651 __BCH_INODE_HAS_XATTRS = 7, /* has xattrs in xattr btree */
653 /* bits 20+ reserved for packed fields below: */
656 #define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
657 #define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
658 #define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
659 #define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
660 #define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
661 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
662 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
663 #define BCH_INODE_HAS_XATTRS (1 << __BCH_INODE_HAS_XATTRS)
665 LE32_BITMASK(INODE_STR_HASH, struct bch_inode, i_flags, 20, 24);
666 LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, i_flags, 24, 32);
668 struct bch_inode_blockdev {
680 } __attribute__((packed, aligned(8)));
681 BKEY_VAL_TYPE(inode_blockdev, BCH_INODE_BLOCKDEV);
683 /* Thin provisioned volume, or cache for another block device? */
684 LE64_BITMASK(CACHED_DEV, struct bch_inode_blockdev, i_flags, 0, 1)
689 * Dirents (and xattrs) have to implement string lookups; since our b-tree
690 * doesn't support arbitrary length strings for the key, we instead index by a
691 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
692 * field of the key - using linear probing to resolve hash collisions. This also
693 * provides us with the readdir cookie posix requires.
695 * Linear probing requires us to use whiteouts for deletions, in the event of a
701 BCH_DIRENT_WHITEOUT = 129,
707 /* Target inode number: */
711 * Copy of mode bits 12-15 from the target inode - so userspace can get
712 * the filetype without having to do a stat()
717 } __attribute__((packed));
718 BKEY_VAL_TYPE(dirent, BCH_DIRENT);
724 BCH_XATTR_WHITEOUT = 129,
727 #define BCH_XATTR_INDEX_USER 0
728 #define BCH_XATTR_INDEX_POSIX_ACL_ACCESS 1
729 #define BCH_XATTR_INDEX_POSIX_ACL_DEFAULT 2
730 #define BCH_XATTR_INDEX_TRUSTED 3
731 #define BCH_XATTR_INDEX_SECURITY 4
739 } __attribute__((packed));
740 BKEY_VAL_TYPE(xattr, BCH_XATTR);
744 /* Version 0: Cache device
745 * Version 1: Backing device
746 * Version 2: Seed pointer into btree node checksum
747 * Version 3: Cache device with new UUID format
748 * Version 4: Backing device with data offset
749 * Version 5: All the incompat changes
750 * Version 6: Cache device UUIDs all in superblock, another incompat bset change
751 * Version 7: Encryption (expanded checksum fields), other random things
753 #define BCACHE_SB_VERSION_CDEV_V0 0
754 #define BCACHE_SB_VERSION_BDEV 1
755 #define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
756 #define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
757 #define BCACHE_SB_VERSION_CDEV_V2 5
758 #define BCACHE_SB_VERSION_CDEV_V3 6
759 #define BCACHE_SB_VERSION_CDEV_V4 7
760 #define BCACHE_SB_VERSION_CDEV 7
761 #define BCACHE_SB_MAX_VERSION 7
763 #define BCH_SB_SECTOR 8
764 #define BCH_SB_LABEL_SIZE 32
765 #define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
769 __le64 nbuckets; /* device size */
770 __le16 first_bucket; /* index of first bucket used */
771 __le16 bucket_size; /* sectors */
773 __le64 last_mount; /* time_t */
778 LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
779 LE64_BITMASK(BCH_MEMBER_TIER, struct bch_member, flags[0], 4, 8)
780 LE64_BITMASK(BCH_MEMBER_HAS_METADATA, struct bch_member, flags[0], 8, 9)
781 LE64_BITMASK(BCH_MEMBER_HAS_DATA, struct bch_member, flags[0], 9, 10)
782 LE64_BITMASK(BCH_MEMBER_REPLACEMENT, struct bch_member, flags[0], 10, 14)
783 LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15);
786 LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
787 LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
790 enum bch_member_state {
791 BCH_MEMBER_STATE_ACTIVE = 0,
792 BCH_MEMBER_STATE_RO = 1,
793 BCH_MEMBER_STATE_FAILED = 2,
794 BCH_MEMBER_STATE_SPARE = 3,
795 BCH_MEMBER_STATE_NR = 4,
798 #define BCH_TIER_MAX 4U
800 enum cache_replacement {
801 CACHE_REPLACEMENT_LRU = 0,
802 CACHE_REPLACEMENT_FIFO = 1,
803 CACHE_REPLACEMENT_RANDOM = 2,
804 CACHE_REPLACEMENT_NR = 3,
807 struct bch_sb_layout {
808 uuid_le magic; /* bcache superblock UUID */
810 __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
814 } __attribute__((packed));
816 #define BCH_SB_LAYOUT_SECTOR 7
818 struct bch_sb_field {
824 enum bch_sb_field_types {
825 BCH_SB_FIELD_journal = 0,
826 BCH_SB_FIELD_members = 1,
827 BCH_SB_FIELD_crypt = 2,
831 struct bch_sb_field_journal {
832 struct bch_sb_field field;
836 struct bch_sb_field_members {
837 struct bch_sb_field field;
838 struct bch_member members[0];
851 #define BCH_KEY_MAGIC \
852 (((u64) 'b' << 0)|((u64) 'c' << 8)| \
853 ((u64) 'h' << 16)|((u64) '*' << 24)| \
854 ((u64) '*' << 32)|((u64) 'k' << 40)| \
855 ((u64) 'e' << 48)|((u64) 'y' << 56))
857 struct bch_encrypted_key {
863 * If this field is present in the superblock, it stores an encryption key which
864 * is used encrypt all other data/metadata. The key will normally be encrypted
865 * with the key userspace provides, but if encryption has been turned off we'll
866 * just store the master key unencrypted in the superblock so we can access the
867 * previously encrypted data.
869 struct bch_sb_field_crypt {
870 struct bch_sb_field field;
874 struct bch_encrypted_key key;
877 LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
884 /* stored as base 2 log of scrypt params: */
885 LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
886 LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
887 LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
890 * @offset - sector where this sb was written
891 * @version - on disk format version
892 * @magic - identifies as a bcache superblock (BCACHE_MAGIC)
893 * @seq - incremented each time superblock is written
894 * @uuid - used for generating various magic numbers and identifying
895 * member devices, never changes
896 * @user_uuid - user visible UUID, may be changed
897 * @label - filesystem label
898 * @seq - identifies most recent superblock, incremented each time
899 * superblock is written
900 * @features - enabled incompatible features
903 struct bch_csum csum;
908 __u8 label[BCH_SB_LABEL_SIZE];
919 __le32 time_precision;
925 struct bch_sb_layout layout;
928 struct bch_sb_field start[0];
931 } __attribute__((packed, aligned(8)));
935 * BCH_SB_INITALIZED - set on first mount
936 * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
937 * behaviour of mount/recovery path:
938 * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
939 * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
940 * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
941 * DATA/META_CSUM_TYPE. Also indicates encryption
942 * algorithm in use, if/when we get more than one
945 LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
946 LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
947 LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
948 LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
950 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
952 LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
953 LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
955 LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
956 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
958 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
959 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
961 LE64_BITMASK(BCH_SB_META_REPLICAS_HAVE, struct bch_sb, flags[0], 56, 60);
962 LE64_BITMASK(BCH_SB_DATA_REPLICAS_HAVE, struct bch_sb, flags[0], 60, 64);
964 LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
965 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
966 LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
968 LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
969 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
970 LE64_BITMASK(BCH_SB_JOURNAL_ENTRY_SIZE, struct bch_sb, flags[1], 14, 20);
973 enum bch_sb_features {
975 BCH_FEATURE_GZIP = 1,
980 #define BCH_REPLICAS_MAX 4U
983 #define BCH_ERROR_ACTIONS() \
984 x(BCH_ON_ERROR_CONTINUE, 0, "continue") \
985 x(BCH_ON_ERROR_RO, 1, "remount-ro") \
986 x(BCH_ON_ERROR_PANIC, 2, "panic") \
987 x(BCH_NR_ERROR_ACTIONS, 3, NULL)
989 enum bch_error_actions {
990 #define x(_opt, _nr, _str) _opt = _nr,
996 enum bch_error_actions {
997 BCH_ON_ERROR_CONTINUE = 0,
999 BCH_ON_ERROR_PANIC = 2,
1000 BCH_NR_ERROR_ACTIONS = 3,
1003 enum bch_csum_opts {
1004 BCH_CSUM_OPT_NONE = 0,
1005 BCH_CSUM_OPT_CRC32C = 1,
1006 BCH_CSUM_OPT_CRC64 = 2,
1007 BCH_CSUM_OPT_NR = 3,
1010 enum bch_str_hash_opts {
1011 BCH_STR_HASH_CRC32C = 0,
1012 BCH_STR_HASH_CRC64 = 1,
1013 BCH_STR_HASH_SIPHASH = 2,
1014 BCH_STR_HASH_NR = 3,
1017 enum bch_compression_opts {
1018 BCH_COMPRESSION_NONE = 0,
1019 BCH_COMPRESSION_LZ4 = 1,
1020 BCH_COMPRESSION_GZIP = 2,
1021 BCH_COMPRESSION_NR = 3,
1024 /* backing device specific stuff: */
1026 struct backingdev_sb {
1028 __le64 offset; /* sector where this sb was written */
1029 __le64 version; /* of on disk format */
1031 uuid_le magic; /* bcache superblock UUID */
1036 * Internal cache set UUID - xored with various magic numbers and thus
1037 * must never change:
1043 __u8 label[BCH_SB_LABEL_SIZE];
1047 /* Incremented each time superblock is written: */
1051 * User visible UUID for identifying the cache set the user is allowed
1060 __le16 block_size; /* sectors */
1063 __le32 last_mount; /* time_t */
1065 /* size of variable length portion - always 0 for backingdev superblock */
1070 LE64_BITMASK(BDEV_CACHE_MODE, struct backingdev_sb, flags, 0, 4);
1071 #define CACHE_MODE_WRITETHROUGH 0U
1072 #define CACHE_MODE_WRITEBACK 1U
1073 #define CACHE_MODE_WRITEAROUND 2U
1074 #define CACHE_MODE_NONE 3U
1076 LE64_BITMASK(BDEV_STATE, struct backingdev_sb, flags, 61, 63);
1077 #define BDEV_STATE_NONE 0U
1078 #define BDEV_STATE_CLEAN 1U
1079 #define BDEV_STATE_DIRTY 2U
1080 #define BDEV_STATE_STALE 3U
1082 #define BDEV_DATA_START_DEFAULT 16 /* sectors */
1084 static inline _Bool __SB_IS_BDEV(__u64 version)
1086 return version == BCACHE_SB_VERSION_BDEV
1087 || version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
1090 static inline _Bool SB_IS_BDEV(const struct bch_sb *sb)
1092 return __SB_IS_BDEV(sb->version);
1098 * The various other data structures have their own magic numbers, which are
1099 * xored with the first part of the cache set's UUID
1102 #define BCACHE_MAGIC \
1103 UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
1104 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1106 #define BCACHE_STATFS_MAGIC 0xca451a4e
1108 #define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
1109 #define PSET_MAGIC __cpu_to_le64(0x6750e15f87337f91ULL)
1110 #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
1112 static inline __le64 __bch_sb_magic(struct bch_sb *sb)
1115 memcpy(&ret, &sb->uuid, sizeof(ret));
1119 static inline __u64 __jset_magic(struct bch_sb *sb)
1121 return __le64_to_cpu(__bch_sb_magic(sb) ^ JSET_MAGIC);
1124 static inline __u64 __pset_magic(struct bch_sb *sb)
1126 return __le64_to_cpu(__bch_sb_magic(sb) ^ PSET_MAGIC);
1129 static inline __u64 __bset_magic(struct bch_sb *sb)
1131 return __le64_to_cpu(__bch_sb_magic(sb) ^ BSET_MAGIC);
1136 #define BCACHE_JSET_VERSION_UUIDv1 1
1137 #define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
1138 #define BCACHE_JSET_VERSION_JKEYS 2
1139 #define BCACHE_JSET_VERSION 2
1145 __le32 flags; /* designates what this jset holds */
1148 struct bkey_i start[0];
1153 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
1155 LE32_BITMASK(JOURNAL_ENTRY_TYPE, struct jset_entry, flags, 0, 8);
1157 JOURNAL_ENTRY_BTREE_KEYS = 0,
1158 JOURNAL_ENTRY_BTREE_ROOT = 1,
1159 JOURNAL_ENTRY_PRIO_PTRS = 2,
1162 * Journal sequence numbers can be blacklisted: bsets record the max
1163 * sequence number of all the journal entries they contain updates for,
1164 * so that on recovery we can ignore those bsets that contain index
1165 * updates newer that what made it into the journal.
1167 * This means that we can't reuse that journal_seq - we have to skip it,
1168 * and then record that we skipped it so that the next time we crash and
1169 * recover we don't think there was a missing journal entry.
1171 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED = 3,
1175 * On disk format for a journal entry:
1176 * seq is monotonically increasing; every journal entry has its own unique
1179 * last_seq is the oldest journal entry that still has keys the btree hasn't
1180 * flushed to disk yet.
1182 * version is for on disk format changes.
1185 struct bch_csum csum;
1192 __le32 u64s; /* size of d[] in u64s */
1194 __u8 encrypted_start[0];
1199 /* Sequence number of oldest dirty journal entry */
1204 struct jset_entry start[0];
1207 } __attribute__((packed));
1209 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
1210 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
1212 #define BCH_JOURNAL_BUCKETS_MIN 20
1214 /* Bucket prios/gens */
1217 struct bch_csum csum;
1224 __u8 encrypted_start[0];
1228 struct bucket_disk {
1232 } __attribute__((packed)) data[];
1233 } __attribute__((packed));
1235 LE32_BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4);
1239 #define DEFINE_BCH_BTREE_IDS() \
1240 DEF_BTREE_ID(EXTENTS, 0, "extents") \
1241 DEF_BTREE_ID(INODES, 1, "inodes") \
1242 DEF_BTREE_ID(DIRENTS, 2, "dirents") \
1243 DEF_BTREE_ID(XATTRS, 3, "xattrs")
1245 #define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
1248 DEFINE_BCH_BTREE_IDS()
1254 #define BTREE_MAX_DEPTH 4U
1258 /* Version 1: Seed pointer into btree node checksum
1260 #define BCACHE_BSET_CSUM 1
1261 #define BCACHE_BSET_KEY_v1 2
1262 #define BCACHE_BSET_JOURNAL_SEQ 3
1263 #define BCACHE_BSET_VERSION 3
1268 * On disk a btree node is a list/log of these; within each set the keys are
1275 * Highest journal entry this bset contains keys for.
1276 * If on recovery we don't see that journal entry, this bset is ignored:
1277 * this allows us to preserve the order of all index updates after a
1278 * crash, since the journal records a total order of all index updates
1279 * and anything that didn't make it to the journal doesn't get used.
1285 __le16 u64s; /* count of d[] in u64s */
1288 struct bkey_packed start[0];
1291 } __attribute__((packed));
1293 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
1295 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
1296 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1297 struct bset, flags, 5, 6);
1300 struct bch_csum csum;
1303 /* this flags field is encrypted, unlike bset->flags: */
1306 /* Closed interval: */
1307 struct bpos min_key;
1308 struct bpos max_key;
1309 struct bch_extent_ptr ptr;
1310 struct bkey_format format;
1321 } __attribute__((packed));
1323 LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
1324 LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
1326 struct btree_node_entry {
1327 struct bch_csum csum;
1338 } __attribute__((packed));
1342 #define BITMASK(name, type, field, offset, end) \
1343 static const unsigned name##_OFFSET = offset; \
1344 static const unsigned name##_BITS = (end - offset); \
1345 static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
1347 static inline __u64 name(const type *k) \
1348 { return (k->field >> offset) & ~(~0ULL << (end - offset)); } \
1350 static inline void SET_##name(type *k, __u64 v) \
1352 k->field &= ~(~(~0ULL << (end - offset)) << offset); \
1353 k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
1362 #define KEY0_FIELD(name, field, offset, size) \
1363 BITMASK(name, struct bkey_v0, field, offset, size)
1365 KEY0_FIELD(KEY0_PTRS, high, 60, 63)
1366 KEY0_FIELD(KEY0_CSUM, high, 56, 58)
1367 KEY0_FIELD(KEY0_DIRTY, high, 36, 37)
1369 KEY0_FIELD(KEY0_SIZE, high, 20, 36)
1370 KEY0_FIELD(KEY0_INODE, high, 0, 20)
1372 static inline unsigned long bkey_v0_u64s(const struct bkey_v0 *k)
1374 return (sizeof(struct bkey_v0) / sizeof(__u64)) + KEY0_PTRS(k);
1377 static inline struct bkey_v0 *bkey_v0_next(const struct bkey_v0 *k)
1379 __u64 *d = (__u64 *) k;
1381 return (struct bkey_v0 *) (d + bkey_v0_u64s(k));
1393 __BKEY_PADDED(uuid_bucket, 4);
1394 __BKEY_PADDED(btree_root, 4);
1398 __u64 prio_bucket[64];
1401 struct bkey start[0];
1406 /* UUIDS - per backing device/flash only volume metadata */
1408 struct uuid_entry_v0 {
1427 /* Size of flash only volumes */
1435 BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
1440 #endif /* _LINUX_BCACHE_H */
1442 /* vim: set foldnestmax=2: */