1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_CHECKSUM_H
3 #define _BCACHEFS_CHECKSUM_H
6 #include "extents_types.h"
9 #include <linux/crc64.h>
10 #include <crypto/chacha.h>
12 static inline bool bch2_checksum_mergeable(unsigned type)
25 struct bch_csum bch2_checksum_merge(unsigned, struct bch_csum,
26 struct bch_csum, size_t);
28 #define BCH_NONCE_EXTENT cpu_to_le32(1 << 28)
29 #define BCH_NONCE_BTREE cpu_to_le32(2 << 28)
30 #define BCH_NONCE_JOURNAL cpu_to_le32(3 << 28)
31 #define BCH_NONCE_PRIO cpu_to_le32(4 << 28)
32 #define BCH_NONCE_POLY cpu_to_le32(1 << 31)
34 struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce,
35 const void *, size_t);
38 * This is used for various on disk data structures - bch_sb, prio_set, bset,
39 * jset: The checksum is _always_ the first field of these structs
41 #define csum_vstruct(_c, _type, _nonce, _i) \
43 const void *start = ((const void *) (_i)) + sizeof((_i)->csum); \
44 const void *end = vstruct_end(_i); \
46 bch2_checksum(_c, _type, _nonce, start, end - start); \
49 int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
50 int bch2_request_key(struct bch_sb *, struct bch_key *);
52 int bch2_encrypt(struct bch_fs *, unsigned, struct nonce,
55 struct bch_csum bch2_checksum_bio(struct bch_fs *, unsigned,
56 struct nonce, struct bio *);
58 int bch2_rechecksum_bio(struct bch_fs *, struct bio *, struct bversion,
59 struct bch_extent_crc_unpacked,
60 struct bch_extent_crc_unpacked *,
61 struct bch_extent_crc_unpacked *,
62 unsigned, unsigned, unsigned);
64 int __bch2_encrypt_bio(struct bch_fs *, unsigned,
65 struct nonce, struct bio *);
67 static inline int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
68 struct nonce nonce, struct bio *bio)
70 return bch2_csum_type_is_encryption(type)
71 ? __bch2_encrypt_bio(c, type, nonce, bio)
75 int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *,
78 int bch2_disable_encryption(struct bch_fs *);
79 int bch2_enable_encryption(struct bch_fs *, bool);
81 void bch2_fs_encryption_exit(struct bch_fs *);
82 int bch2_fs_encryption_init(struct bch_fs *);
84 static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type,
88 case BCH_CSUM_OPT_none:
90 case BCH_CSUM_OPT_crc32c:
91 return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
92 case BCH_CSUM_OPT_crc64:
93 return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
94 case BCH_CSUM_OPT_xxhash:
95 return BCH_CSUM_xxhash;
101 static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c,
102 struct bch_io_opts opts)
107 if (c->sb.encryption_type)
108 return c->opts.wide_macs
109 ? BCH_CSUM_chacha20_poly1305_128
110 : BCH_CSUM_chacha20_poly1305_80;
112 return bch2_csum_opt_to_type(opts.data_checksum, true);
115 static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c)
117 if (c->sb.encryption_type)
118 return BCH_CSUM_chacha20_poly1305_128;
120 return bch2_csum_opt_to_type(c->opts.metadata_checksum, false);
123 static inline bool bch2_checksum_type_valid(const struct bch_fs *c,
126 if (type >= BCH_CSUM_NR)
129 if (bch2_csum_type_is_encryption(type) && !c->chacha20)
135 /* returns true if not equal */
136 static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r)
139 * XXX: need some way of preventing the compiler from optimizing this
140 * into a form that isn't constant time..
142 return ((l.lo ^ r.lo) | (l.hi ^ r.hi)) != 0;
145 /* for skipping ahead and encrypting/decrypting at an offset: */
146 static inline struct nonce nonce_add(struct nonce nonce, unsigned offset)
148 EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1));
150 le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE);
154 static inline struct nonce null_nonce(void)
158 memset(&ret, 0, sizeof(ret));
162 static inline struct nonce extent_nonce(struct bversion version,
163 struct bch_extent_crc_unpacked crc)
165 unsigned compression_type = crc_is_compressed(crc)
166 ? crc.compression_type
168 unsigned size = compression_type ? crc.uncompressed_size : 0;
169 struct nonce nonce = (struct nonce) {{
170 [0] = cpu_to_le32(size << 22),
171 [1] = cpu_to_le32(version.lo),
172 [2] = cpu_to_le32(version.lo >> 32),
173 [3] = cpu_to_le32(version.hi|
174 (compression_type << 24))^BCH_NONCE_EXTENT,
177 return nonce_add(nonce, crc.nonce << 9);
180 static inline bool bch2_key_is_encrypted(struct bch_encrypted_key *key)
182 return le64_to_cpu(key->magic) != BCH_KEY_MAGIC;
185 static inline struct nonce __bch2_sb_key_nonce(struct bch_sb *sb)
187 __le64 magic = __bch2_sb_magic(sb);
189 return (struct nonce) {{
192 [2] = ((__le32 *) &magic)[0],
193 [3] = ((__le32 *) &magic)[1],
197 static inline struct nonce bch2_sb_key_nonce(struct bch_fs *c)
199 __le64 magic = bch2_sb_magic(c);
201 return (struct nonce) {{
204 [2] = ((__le32 *) &magic)[0],
205 [3] = ((__le32 *) &magic)[1],
209 #endif /* _BCACHEFS_CHECKSUM_H */