1 #ifndef _LINUX_BCACHE_H
2 #define _LINUX_BCACHE_H
5 * Bcache on disk data structures
13 #include <asm/types.h>
14 #include <asm/byteorder.h>
15 #include <linux/uuid.h>
17 #define LE32_BITMASK(name, type, field, offset, end) \
18 static const unsigned name##_OFFSET = offset; \
19 static const unsigned name##_BITS = (end - offset); \
20 static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
22 static inline __u64 name(const type *k) \
24 return (__le32_to_cpu(k->field) >> offset) & \
25 ~(~0ULL << (end - offset)); \
28 static inline void SET_##name(type *k, __u64 v) \
30 __u64 new = __le32_to_cpu(k->field); \
32 new &= ~(~(~0ULL << (end - offset)) << offset); \
33 new |= (v & ~(~0ULL << (end - offset))) << offset; \
34 k->field = __cpu_to_le32(new); \
37 #define LE64_BITMASK(name, type, field, offset, end) \
38 static const unsigned name##_OFFSET = offset; \
39 static const unsigned name##_BITS = (end - offset); \
40 static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
42 static inline __u64 name(const type *k) \
44 return (__le64_to_cpu(k->field) >> offset) & \
45 ~(~0ULL << (end - offset)); \
48 static inline void SET_##name(type *k, __u64 v) \
50 __u64 new = __le64_to_cpu(k->field); \
52 new &= ~(~(~0ULL << (end - offset)) << offset); \
53 new |= (v & ~(~0ULL << (end - offset))) << offset; \
54 k->field = __cpu_to_le64(new); \
60 /* One unused slot for now: */
61 __u8 bits_per_field[6];
62 __le64 field_offset[6];
65 /* Btree keys - all units are in sectors */
68 /* Word order matches machine byte order */
69 #if defined(__LITTLE_ENDIAN)
73 #elif defined(__BIG_ENDIAN)
75 __u64 offset; /* Points to end of extent - sectors */
78 #error edit for your odd byteorder.
80 } __attribute__((packed, aligned(4)));
82 #define KEY_INODE_MAX ((__u64)~0ULL)
83 #define KEY_OFFSET_MAX ((__u64)~0ULL)
84 #define KEY_SNAPSHOT_MAX ((__u32)~0U)
86 static inline struct bpos POS(__u64 inode, __u64 offset)
97 #define POS_MIN POS(0, 0)
98 #define POS_MAX POS(KEY_INODE_MAX, KEY_OFFSET_MAX)
100 /* Empty placeholder struct, for container_of() */
108 /* Size of combined key and value, in u64s */
111 /* Format of key (0 for format local to btree node) */
112 #if defined(__LITTLE_ENDIAN_BITFIELD)
115 #elif defined (__BIG_ENDIAN_BITFIELD)
116 __u8 needs_whiteout:1,
119 #error edit for your odd byteorder.
122 /* Type of the value */
125 #if defined(__LITTLE_ENDIAN)
129 __u32 size; /* extent size, in sectors */
131 #elif defined(__BIG_ENDIAN)
133 __u32 size; /* extent size, in sectors */
138 } __attribute__((packed, aligned(8)));
143 /* Size of combined key and value, in u64s */
146 /* Format of key (0 for format local to btree node) */
149 * XXX: next incompat on disk format change, switch format and
150 * needs_whiteout - bkey_packed() will be cheaper if format is the high
151 * bits of the bitfield
153 #if defined(__LITTLE_ENDIAN_BITFIELD)
156 #elif defined (__BIG_ENDIAN_BITFIELD)
157 __u8 needs_whiteout:1,
161 /* Type of the value */
166 * We copy bkeys with struct assignment in various places, and while
167 * that shouldn't be done with packed bkeys we can't disallow it in C,
168 * and it's legal to cast a bkey to a bkey_packed - so padding it out
169 * to the same size as struct bkey should hopefully be safest.
171 __u8 pad[sizeof(struct bkey) - 3];
172 } __attribute__((packed, aligned(8)));
174 #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
175 #define KEY_PACKED_BITS_START 24
177 #define KEY_SIZE_MAX ((__u32)~0U)
179 #define KEY_FORMAT_LOCAL_BTREE 0
180 #define KEY_FORMAT_CURRENT 1
182 enum bch_bkey_fields {
191 #define bkey_format_field(name, field) \
192 [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
194 #define BKEY_FORMAT_CURRENT \
195 ((struct bkey_format) { \
196 .key_u64s = BKEY_U64s, \
197 .nr_fields = BKEY_NR_FIELDS, \
198 .bits_per_field = { \
199 bkey_format_field(INODE, p.inode), \
200 bkey_format_field(OFFSET, p.offset), \
201 bkey_format_field(SNAPSHOT, p.snapshot), \
202 bkey_format_field(SIZE, size), \
203 bkey_format_field(VERSION, version), \
207 /* bkey with inline value */
215 #define KEY(_inode, _offset, _size) \
218 .format = KEY_FORMAT_CURRENT, \
219 .p = POS(_inode, _offset), \
225 static inline struct bkey KEY(__u64 inode, __u64 offset, __u64 size)
229 memset(&ret, 0, sizeof(ret));
230 ret.u64s = BKEY_U64s;
231 ret.format = KEY_FORMAT_CURRENT;
233 ret.p.offset = offset;
241 static inline void bkey_init(struct bkey *k)
246 #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
248 #define __BKEY_PADDED(key, pad) \
249 struct { struct bkey_i key; __u64 key ## _pad[pad]; }
251 #define BKEY_VAL_TYPE(name, nr) \
252 struct bkey_i_##name { \
257 struct bch_##name v; \
261 * - DELETED keys are used internally to mark keys that should be ignored but
262 * override keys in composition order. Their version number is ignored.
264 * - DISCARDED keys indicate that the data is all 0s because it has been
265 * discarded. DISCARDs may have a version; if the version is nonzero the key
266 * will be persistent, otherwise the key will be dropped whenever the btree
267 * node is rewritten (like DELETED keys).
269 * - ERROR: any read of the data returns a read error, as the data was lost due
270 * to a failing device. Like DISCARDED keys, they can be removed (overridden)
271 * by new writes or cluster-wide GC. Node repair can also overwrite them with
272 * the same or a more recent version number, but not with an older version
275 #define KEY_TYPE_DELETED 0
276 #define KEY_TYPE_DISCARD 1
277 #define KEY_TYPE_ERROR 2
278 #define KEY_TYPE_COOKIE 3
279 #define KEY_TYPE_PERSISTENT_DISCARD 4
280 #define KEY_TYPE_GENERIC_NR 128
286 BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
291 * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
292 * preceded by checksum/compression information (bch_extent_crc32 or
295 * One major determining factor in the format of extents is how we handle and
296 * represent extents that have been partially overwritten and thus trimmed:
298 * If an extent is not checksummed or compressed, when the extent is trimmed we
299 * don't have to remember the extent we originally allocated and wrote: we can
300 * merely adjust ptr->offset to point to the start of the start of the data that
301 * is currently live. The size field in struct bkey records the current (live)
302 * size of the extent, and is also used to mean "size of region on disk that we
303 * point to" in this case.
305 * Thus an extent that is not checksummed or compressed will consist only of a
306 * list of bch_extent_ptrs, with none of the fields in
307 * bch_extent_crc32/bch_extent_crc64.
309 * When an extent is checksummed or compressed, it's not possible to read only
310 * the data that is currently live: we have to read the entire extent that was
311 * originally written, and then return only the part of the extent that is
314 * Thus, in addition to the current size of the extent in struct bkey, we need
315 * to store the size of the originally allocated space - this is the
316 * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
317 * when the extent is trimmed, instead of modifying the offset field of the
318 * pointer, we keep a second smaller offset field - "offset into the original
319 * extent of the currently live region".
321 * The other major determining factor is replication and data migration:
323 * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
324 * write, we will initially write all the replicas in the same format, with the
325 * same checksum type and compression format - however, when copygc runs later (or
326 * tiering/cache promotion, anything that moves data), it is not in general
327 * going to rewrite all the pointers at once - one of the replicas may be in a
328 * bucket on one device that has very little fragmentation while another lives
329 * in a bucket that has become heavily fragmented, and thus is being rewritten
330 * sooner than the rest.
332 * Thus it will only move a subset of the pointers (or in the case of
333 * tiering/cache promotion perhaps add a single pointer without dropping any
334 * current pointers), and if the extent has been partially overwritten it must
335 * write only the currently live portion (or copygc would not be able to reduce
336 * fragmentation!) - which necessitates a different bch_extent_crc format for
339 * But in the interests of space efficiency, we don't want to store one
340 * bch_extent_crc for each pointer if we don't have to.
342 * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
343 * bch_extent_ptrs appended arbitrarily one after the other. We determine the
344 * type of a given entry with a scheme similar to utf8 (except we're encoding a
345 * type, not a size), encoding the type in the position of the first set bit:
347 * bch_extent_crc32 - 0b1
348 * bch_extent_ptr - 0b10
349 * bch_extent_crc64 - 0b100
351 * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
352 * bch_extent_crc64 is the least constrained).
354 * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
355 * until the next bch_extent_crc32/64.
357 * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
358 * is neither checksummed nor compressed.
361 enum bch_extent_entry_type {
362 BCH_EXTENT_ENTRY_crc32 = 0,
363 BCH_EXTENT_ENTRY_ptr = 1,
364 BCH_EXTENT_ENTRY_crc64 = 2,
367 #define BCH_EXTENT_ENTRY_MAX 3
369 struct bch_extent_crc32 {
370 #if defined(__LITTLE_ENDIAN_BITFIELD)
378 #elif defined (__BIG_ENDIAN_BITFIELD)
380 __u32 compression_type:4,
387 } __attribute__((packed, aligned(8)));
389 #define CRC32_EXTENT_SIZE_MAX (1U << 7)
392 #define BCH_COMPRESSED_EXTENT_MAX 128U
394 struct bch_extent_crc64 {
395 #if defined(__LITTLE_ENDIAN_BITFIELD)
399 uncompressed_size:18,
402 #elif defined (__BIG_ENDIAN_BITFIELD)
403 __u64 compression_type:4,
405 uncompressed_size:18,
411 } __attribute__((packed, aligned(8)));
413 #define CRC64_EXTENT_SIZE_MAX (1U << 17)
416 * @reservation - pointer hasn't been written to, just reserved
418 struct bch_extent_ptr {
419 #if defined(__LITTLE_ENDIAN_BITFIELD)
423 offset:44, /* 8 petabytes */
426 #elif defined (__BIG_ENDIAN_BITFIELD)
434 } __attribute__((packed, aligned(8)));
436 union bch_extent_entry {
437 #if defined(__LITTLE_ENDIAN) || __BITS_PER_LONG == 64
439 #elif __BITS_PER_LONG == 32
445 #error edit for your odd byteorder.
447 struct bch_extent_crc32 crc32;
448 struct bch_extent_crc64 crc64;
449 struct bch_extent_ptr ptr;
456 * This is kind of a hack, we're overloading the type for a boolean that
457 * really should be part of the value - BCH_EXTENT and BCH_EXTENT_CACHED
458 * have the same value type:
460 BCH_EXTENT_CACHED = 129,
463 * Persistent reservation:
465 BCH_RESERVATION = 130,
471 union bch_extent_entry start[0];
473 } __attribute__((packed, aligned(8)));
474 BKEY_VAL_TYPE(extent, BCH_EXTENT);
476 /* Maximum size (in u64s) a single pointer could be: */
477 #define BKEY_EXTENT_PTR_U64s_MAX\
478 ((sizeof(struct bch_extent_crc64) + \
479 sizeof(struct bch_extent_ptr)) / sizeof(u64))
481 /* Maximum possible size of an entire extent value: */
482 /* There's a hack in the keylist code that needs to be fixed.. */
483 #define BKEY_EXTENT_VAL_U64s_MAX \
484 (BKEY_EXTENT_PTR_U64s_MAX * BCH_REPLICAS_MAX)
486 /* * Maximum possible size of an entire extent, key + value: */
487 #define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
489 /* Btree pointers don't carry around checksums: */
490 #define BKEY_BTREE_PTR_VAL_U64s_MAX \
491 ((sizeof(struct bch_extent_ptr)) / sizeof(u64) * BCH_REPLICAS_MAX)
492 #define BKEY_BTREE_PTR_U64s_MAX \
493 (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
497 #define BLOCKDEV_INODE_MAX 4096
499 #define BCACHE_ROOT_INO 4096
501 enum bch_inode_types {
503 BCH_INODE_BLOCKDEV = 129,
528 } __attribute__((packed));
529 BKEY_VAL_TYPE(inode, BCH_INODE_FS);
533 * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
536 __BCH_INODE_SYNC = 0,
537 __BCH_INODE_IMMUTABLE = 1,
538 __BCH_INODE_APPEND = 2,
539 __BCH_INODE_NODUMP = 3,
540 __BCH_INODE_NOATIME = 4,
542 __BCH_INODE_I_SIZE_DIRTY= 5,
543 __BCH_INODE_I_SECTORS_DIRTY= 6,
545 /* not implemented yet: */
546 __BCH_INODE_HAS_XATTRS = 7, /* has xattrs in xattr btree */
549 LE32_BITMASK(INODE_STR_HASH_TYPE, struct bch_inode, i_flags, 28, 32);
551 #define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
552 #define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
553 #define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
554 #define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
555 #define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
556 #define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
557 #define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
558 #define BCH_INODE_HAS_XATTRS (1 << __BCH_INODE_HAS_XATTRS)
560 struct bch_inode_blockdev {
572 } __attribute__((packed, aligned(8)));
573 BKEY_VAL_TYPE(inode_blockdev, BCH_INODE_BLOCKDEV);
575 /* Thin provisioned volume, or cache for another block device? */
576 LE64_BITMASK(CACHED_DEV, struct bch_inode_blockdev, i_flags, 0, 1)
580 * Dirents (and xattrs) have to implement string lookups; since our b-tree
581 * doesn't support arbitrary length strings for the key, we instead index by a
582 * 64 bit hash (currently truncated sha1) of the string, stored in the offset
583 * field of the key - using linear probing to resolve hash collisions. This also
584 * provides us with the readdir cookie posix requires.
586 * Linear probing requires us to use whiteouts for deletions, in the event of a
592 BCH_DIRENT_WHITEOUT = 129,
598 /* Target inode number: */
602 * Copy of mode bits 12-15 from the target inode - so userspace can get
603 * the filetype without having to do a stat()
608 } __attribute__((packed));
609 BKEY_VAL_TYPE(dirent, BCH_DIRENT);
615 BCH_XATTR_WHITEOUT = 129,
618 #define BCH_XATTR_INDEX_USER 0
619 #define BCH_XATTR_INDEX_POSIX_ACL_ACCESS 1
620 #define BCH_XATTR_INDEX_POSIX_ACL_DEFAULT 2
621 #define BCH_XATTR_INDEX_TRUSTED 3
622 #define BCH_XATTR_INDEX_SECURITY 4
630 } __attribute__((packed));
631 BKEY_VAL_TYPE(xattr, BCH_XATTR);
635 /* Version 0: Cache device
636 * Version 1: Backing device
637 * Version 2: Seed pointer into btree node checksum
638 * Version 3: Cache device with new UUID format
639 * Version 4: Backing device with data offset
640 * Version 5: All the incompat changes
641 * Version 6: Cache device UUIDs all in superblock, another incompat bset change
643 #define BCACHE_SB_VERSION_CDEV_V0 0
644 #define BCACHE_SB_VERSION_BDEV 1
645 #define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
646 #define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
647 #define BCACHE_SB_VERSION_CDEV_V2 5
648 #define BCACHE_SB_VERSION_CDEV_V3 6
649 #define BCACHE_SB_VERSION_CDEV 6
650 #define BCACHE_SB_MAX_VERSION 6
653 #define SB_LABEL_SIZE 32
654 #define MAX_CACHES_PER_SET 64
656 #define BDEV_DATA_START_DEFAULT 16 /* sectors */
658 struct cache_member {
660 __le64 nbuckets; /* device size */
661 __le16 first_bucket; /* index of first bucket used */
662 __le16 bucket_size; /* sectors */
664 __le64 last_mount; /* time_t */
670 LE64_BITMASK(CACHE_STATE, struct cache_member, f1, 0, 4)
671 #define CACHE_ACTIVE 0U
673 #define CACHE_FAILED 2U
674 #define CACHE_SPARE 3U
675 #define CACHE_STATE_NR 4U
677 LE64_BITMASK(CACHE_TIER, struct cache_member, f1, 4, 8)
678 #define CACHE_TIERS 4U
680 LE64_BITMASK(CACHE_REPLICATION_SET, struct cache_member, f1, 8, 16)
682 LE64_BITMASK(CACHE_HAS_METADATA, struct cache_member, f1, 24, 25)
683 LE64_BITMASK(CACHE_HAS_DATA, struct cache_member, f1, 25, 26)
685 LE64_BITMASK(CACHE_REPLACEMENT, struct cache_member, f1, 26, 30)
686 #define CACHE_REPLACEMENT_LRU 0U
687 #define CACHE_REPLACEMENT_FIFO 1U
688 #define CACHE_REPLACEMENT_RANDOM 2U
689 #define CACHE_REPLACEMENT_NR 3U
691 LE64_BITMASK(CACHE_DISCARD, struct cache_member, f1, 30, 31);
693 LE64_BITMASK(CACHE_NR_READ_ERRORS, struct cache_member, f2, 0, 20);
694 LE64_BITMASK(CACHE_NR_WRITE_ERRORS, struct cache_member, f2, 20, 40);
698 __le64 offset; /* sector where this sb was written */
699 __le64 version; /* of on disk format */
701 uuid_le magic; /* bcache superblock UUID */
703 /* Identifies this disk within the cache set: */
707 * Internal cache set UUID - xored with various magic numbers and thus
715 __u8 label[SB_LABEL_SIZE];
719 /* Incremented each time superblock is written: */
723 * User visible UUID for identifying the cache set the user is allowed
731 /* Number of cache_member entries: */
735 * Index of this device - for PTR_DEV(), and also this device's
736 * slot in the cache_member array:
741 __le16 block_size; /* sectors */
744 __le16 u64s; /* size of variable length portion */
747 struct cache_member members[0];
749 * Journal buckets also in the variable length portion, after
756 /* XXX: rename CACHE_SET -> BCH_FS or something? */
758 LE64_BITMASK(CACHE_SET_SYNC, struct cache_sb, flags, 0, 1);
760 LE64_BITMASK(CACHE_SET_ERROR_ACTION, struct cache_sb, flags, 1, 4);
761 #define BCH_ON_ERROR_CONTINUE 0U
762 #define BCH_ON_ERROR_RO 1U
763 #define BCH_ON_ERROR_PANIC 2U
764 #define BCH_NR_ERROR_ACTIONS 3U
766 LE64_BITMASK(CACHE_SET_META_REPLICAS_WANT,struct cache_sb, flags, 4, 8);
767 LE64_BITMASK(CACHE_SET_DATA_REPLICAS_WANT,struct cache_sb, flags, 8, 12);
769 #define BCH_REPLICAS_MAX 4U
771 LE64_BITMASK(CACHE_SB_CSUM_TYPE, struct cache_sb, flags, 12, 16);
773 LE64_BITMASK(CACHE_SET_META_PREFERRED_CSUM_TYPE,struct cache_sb, flags, 16, 20);
774 #define BCH_CSUM_NONE 0U
775 #define BCH_CSUM_CRC32C 1U
776 #define BCH_CSUM_CRC64 2U
777 #define BCH_CSUM_NR 3U
779 LE64_BITMASK(CACHE_SET_BTREE_NODE_SIZE, struct cache_sb, flags, 20, 36);
781 LE64_BITMASK(CACHE_SET_META_REPLICAS_HAVE,struct cache_sb, flags, 36, 40);
782 LE64_BITMASK(CACHE_SET_DATA_REPLICAS_HAVE,struct cache_sb, flags, 40, 44);
784 LE64_BITMASK(CACHE_SET_STR_HASH_TYPE,struct cache_sb, flags, 44, 48);
785 enum bch_str_hash_type {
786 BCH_STR_HASH_CRC32C = 0,
787 BCH_STR_HASH_CRC64 = 1,
788 BCH_STR_HASH_SIPHASH = 2,
789 BCH_STR_HASH_SHA1 = 3,
792 #define BCH_STR_HASH_NR 4
794 LE64_BITMASK(CACHE_SET_DATA_PREFERRED_CSUM_TYPE, struct cache_sb, flags, 48, 52);
796 LE64_BITMASK(CACHE_SET_COMPRESSION_TYPE, struct cache_sb, flags, 52, 56);
798 BCH_COMPRESSION_NONE = 0,
799 BCH_COMPRESSION_LZ4 = 1,
800 BCH_COMPRESSION_GZIP = 2,
803 #define BCH_COMPRESSION_NR 3U
805 /* Limit inode numbers to 32 bits: */
806 LE64_BITMASK(CACHE_INODE_32BIT, struct cache_sb, flags, 56, 57);
808 LE64_BITMASK(CACHE_SET_GC_RESERVE, struct cache_sb, flags, 57, 63);
810 LE64_BITMASK(CACHE_SET_ROOT_RESERVE, struct cache_sb, flags2, 0, 6);
813 * Did we shut down cleanly? Just a hint, doesn't affect behaviour of
814 * mount/recovery path:
816 LE64_BITMASK(CACHE_SET_CLEAN, struct cache_sb, flags2, 6, 7);
818 LE64_BITMASK(CACHE_SET_JOURNAL_ENTRY_SIZE, struct cache_sb, flags2, 7, 15);
823 * CACHE_SET_OPT(name, choices, min, max, sb_option, sysfs_writeable)
825 * @name - name of mount option, sysfs attribute, and struct cache_set_opts
828 * @choices - array of strings that the user can select from - option is by
831 * Booleans are special cased; if @choices is bch_bool_opt the mount
832 * options name and noname will work as expected.
836 * @sb_option - name of corresponding superblock option
838 * @sysfs_writeable - if true, option will be modifiable at runtime via sysfs
841 #define CACHE_SET_SB_OPTS() \
842 CACHE_SET_OPT(errors, \
844 0, BCH_NR_ERROR_ACTIONS, \
845 CACHE_SET_ERROR_ACTION, \
847 CACHE_SET_OPT(metadata_replicas, \
849 0, BCH_REPLICAS_MAX, \
850 CACHE_SET_META_REPLICAS_WANT, \
852 CACHE_SET_OPT(data_replicas, \
854 0, BCH_REPLICAS_MAX, \
855 CACHE_SET_DATA_REPLICAS_WANT, \
857 CACHE_SET_OPT(metadata_checksum, \
860 CACHE_SET_META_PREFERRED_CSUM_TYPE, \
862 CACHE_SET_OPT(data_checksum, \
865 CACHE_SET_DATA_PREFERRED_CSUM_TYPE, \
867 CACHE_SET_OPT(compression, \
868 bch_compression_types, \
869 0, BCH_COMPRESSION_NR, \
870 CACHE_SET_COMPRESSION_TYPE, \
872 CACHE_SET_OPT(str_hash, \
873 bch_str_hash_types, \
874 0, BCH_STR_HASH_NR, \
875 CACHE_SET_STR_HASH_TYPE, \
877 CACHE_SET_OPT(inodes_32bit, \
878 bch_bool_opt, 0, 2, \
881 CACHE_SET_OPT(gc_reserve_percent, \
884 CACHE_SET_GC_RESERVE, \
886 CACHE_SET_OPT(root_reserve_percent, \
889 CACHE_SET_ROOT_RESERVE, \
892 /* backing device specific stuff: */
894 struct backingdev_sb {
896 __le64 offset; /* sector where this sb was written */
897 __le64 version; /* of on disk format */
899 uuid_le magic; /* bcache superblock UUID */
904 * Internal cache set UUID - xored with various magic numbers and thus
911 __u8 label[SB_LABEL_SIZE];
915 /* Incremented each time superblock is written: */
919 * User visible UUID for identifying the cache set the user is allowed
928 __le16 block_size; /* sectors */
931 __le32 last_mount; /* time_t */
933 /* size of variable length portion - always 0 for backingdev superblock */
938 LE64_BITMASK(BDEV_CACHE_MODE, struct backingdev_sb, flags, 0, 4);
939 #define CACHE_MODE_WRITETHROUGH 0U
940 #define CACHE_MODE_WRITEBACK 1U
941 #define CACHE_MODE_WRITEAROUND 2U
942 #define CACHE_MODE_NONE 3U
944 LE64_BITMASK(BDEV_STATE, struct backingdev_sb, flags, 61, 63);
945 #define BDEV_STATE_NONE 0U
946 #define BDEV_STATE_CLEAN 1U
947 #define BDEV_STATE_DIRTY 2U
948 #define BDEV_STATE_STALE 3U
950 static inline unsigned bch_journal_buckets_offset(struct cache_sb *sb)
952 return sb->nr_in_set * (sizeof(struct cache_member) / sizeof(__u64));
955 static inline unsigned bch_nr_journal_buckets(struct cache_sb *sb)
957 return __le16_to_cpu(sb->u64s) - bch_journal_buckets_offset(sb);
960 static inline _Bool __SB_IS_BDEV(__u64 version)
962 return version == BCACHE_SB_VERSION_BDEV
963 || version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
966 static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
968 return __SB_IS_BDEV(sb->version);
974 * The various other data structures have their own magic numbers, which are
975 * xored with the first part of the cache set's UUID
978 #define BCACHE_MAGIC \
979 UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
980 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
982 #define BCACHE_STATFS_MAGIC 0xca451a4e
984 #define BCACHE_SB_MAGIC 0xca451a4ef67385c6ULL
985 #define BCACHE_SB_MAGIC2 0x816dba487ff56582ULL
986 #define JSET_MAGIC 0x245235c1a3625032ULL
987 #define PSET_MAGIC 0x6750e15f87337f91ULL
988 #define BSET_MAGIC 0x90135c78b99e07f5ULL
990 static inline __u64 jset_magic(struct cache_sb *sb)
992 return __le64_to_cpu(sb->set_magic) ^ JSET_MAGIC;
995 static inline __u64 pset_magic(struct cache_sb *sb)
997 return __le64_to_cpu(sb->set_magic) ^ PSET_MAGIC;
1000 static inline __u64 bset_magic(struct cache_sb *sb)
1002 return __le64_to_cpu(sb->set_magic) ^ BSET_MAGIC;
1008 #define BCACHE_JSET_VERSION_UUIDv1 1
1009 #define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
1010 #define BCACHE_JSET_VERSION_JKEYS 2
1011 #define BCACHE_JSET_VERSION 2
1017 __le32 flags; /* designates what this jset holds */
1020 struct bkey_i start[0];
1025 #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
1027 LE32_BITMASK(JOURNAL_ENTRY_TYPE, struct jset_entry, flags, 0, 8);
1029 JOURNAL_ENTRY_BTREE_KEYS = 0,
1030 JOURNAL_ENTRY_BTREE_ROOT = 1,
1031 JOURNAL_ENTRY_PRIO_PTRS = 2,
1034 * Journal sequence numbers can be blacklisted: bsets record the max
1035 * sequence number of all the journal entries they contain updates for,
1036 * so that on recovery we can ignore those bsets that contain index
1037 * updates newer that what made it into the journal.
1039 * This means that we can't reuse that journal_seq - we have to skip it,
1040 * and then record that we skipped it so that the next time we crash and
1041 * recover we don't think there was a missing journal entry.
1043 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED = 3,
1047 * On disk format for a journal entry:
1048 * seq is monotonically increasing; every journal entry has its own unique
1051 * last_seq is the oldest journal entry that still has keys the btree hasn't
1052 * flushed to disk yet.
1054 * version is for on disk format changes.
1062 /* Sequence number of oldest dirty journal entry */
1068 __le32 u64s; /* size of d[] in u64s */
1071 struct jset_entry start[0];
1076 LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
1077 LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
1079 #define BCH_JOURNAL_BUCKETS_MIN 20
1081 /* Bucket prios/gens */
1091 struct bucket_disk {
1095 } __attribute__((packed)) data[];
1098 LE32_BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4);
1102 #define DEFINE_BCH_BTREE_IDS() \
1103 DEF_BTREE_ID(EXTENTS, 0, "extents") \
1104 DEF_BTREE_ID(INODES, 1, "inodes") \
1105 DEF_BTREE_ID(DIRENTS, 2, "dirents") \
1106 DEF_BTREE_ID(XATTRS, 3, "xattrs")
1108 #define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
1111 DEFINE_BCH_BTREE_IDS()
1117 #define BTREE_MAX_DEPTH 4U
1121 /* Version 1: Seed pointer into btree node checksum
1123 #define BCACHE_BSET_CSUM 1
1124 #define BCACHE_BSET_KEY_v1 2
1125 #define BCACHE_BSET_JOURNAL_SEQ 3
1126 #define BCACHE_BSET_VERSION 3
1131 * On disk a btree node is a list/log of these; within each set the keys are
1138 * Highest journal entry this bset contains keys for.
1139 * If on recovery we don't see that journal entry, this bset is ignored:
1140 * this allows us to preserve the order of all index updates after a
1141 * crash, since the journal records a total order of all index updates
1142 * and anything that didn't make it to the journal doesn't get used.
1148 __le16 u64s; /* count of d[] in u64s */
1151 struct bkey_packed start[0];
1154 } __attribute__((packed));
1156 LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
1158 /* Only used in first bset */
1159 LE32_BITMASK(BSET_BTREE_LEVEL, struct bset, flags, 4, 8);
1161 LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 8, 9);
1162 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1163 struct bset, flags, 9, 10);
1169 /* Closed interval: */
1170 struct bpos min_key;
1171 struct bpos max_key;
1172 struct bkey_format format;
1175 } __attribute__((packed));
1177 struct btree_node_entry {
1180 } __attribute__((packed));
1184 #define BITMASK(name, type, field, offset, end) \
1185 static const unsigned name##_OFFSET = offset; \
1186 static const unsigned name##_BITS = (end - offset); \
1187 static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
1189 static inline __u64 name(const type *k) \
1190 { return (k->field >> offset) & ~(~0ULL << (end - offset)); } \
1192 static inline void SET_##name(type *k, __u64 v) \
1194 k->field &= ~(~(~0ULL << (end - offset)) << offset); \
1195 k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
1204 #define KEY0_FIELD(name, field, offset, size) \
1205 BITMASK(name, struct bkey_v0, field, offset, size)
1207 KEY0_FIELD(KEY0_PTRS, high, 60, 63)
1208 KEY0_FIELD(KEY0_CSUM, high, 56, 58)
1209 KEY0_FIELD(KEY0_DIRTY, high, 36, 37)
1211 KEY0_FIELD(KEY0_SIZE, high, 20, 36)
1212 KEY0_FIELD(KEY0_INODE, high, 0, 20)
1214 static inline unsigned long bkey_v0_u64s(const struct bkey_v0 *k)
1216 return (sizeof(struct bkey_v0) / sizeof(__u64)) + KEY0_PTRS(k);
1219 static inline struct bkey_v0 *bkey_v0_next(const struct bkey_v0 *k)
1221 __u64 *d = (__u64 *) k;
1223 return (struct bkey_v0 *) (d + bkey_v0_u64s(k));
1235 __BKEY_PADDED(uuid_bucket, 4);
1236 __BKEY_PADDED(btree_root, 4);
1240 __u64 prio_bucket[MAX_CACHES_PER_SET];
1243 struct bkey start[0];
1248 /* UUIDS - per backing device/flash only volume metadata */
1250 struct uuid_entry_v0 {
1269 /* Size of flash only volumes */
1277 BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
1282 #endif /* _LINUX_BCACHE_H */
1284 /* vim: set foldnestmax=2: */