1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/crc32c.h>
8 #include <linux/crypto.h>
9 #include <linux/xxhash.h>
10 #include <linux/key.h>
11 #include <linux/random.h>
12 #include <linux/scatterlist.h>
13 #include <crypto/algapi.h>
14 #include <crypto/chacha.h>
15 #include <crypto/hash.h>
16 #include <crypto/poly1305.h>
17 #include <crypto/skcipher.h>
18 #include <keys/user-type.h>
21 * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
22 * it features page merging without having the checksum algorithm lose its state.
23 * for native checksum aglorithms (like crc), a default seed value will do.
24 * for hash-like algorithms, a state needs to be stored
27 struct bch2_checksum_state {
30 struct xxh64_state h64state;
35 static void bch2_checksum_init(struct bch2_checksum_state *state)
37 switch (state->type) {
43 case BCH_CSUM_crc32c_nonzero:
44 state->seed = U32_MAX;
46 case BCH_CSUM_crc64_nonzero:
47 state->seed = U64_MAX;
50 xxh64_reset(&state->h64state, 0);
57 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
59 switch (state->type) {
64 case BCH_CSUM_crc32c_nonzero:
65 return state->seed ^ U32_MAX;
66 case BCH_CSUM_crc64_nonzero:
67 return state->seed ^ U64_MAX;
69 return xxh64_digest(&state->h64state);
75 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
77 switch (state->type) {
80 case BCH_CSUM_crc32c_nonzero:
82 state->seed = crc32c(state->seed, data, len);
84 case BCH_CSUM_crc64_nonzero:
86 state->seed = crc64_be(state->seed, data, len);
89 xxh64_update(&state->h64state, data, len);
96 static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
98 struct scatterlist *sg, size_t len)
100 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
103 skcipher_request_set_sync_tfm(req, tfm);
104 skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
106 ret = crypto_skcipher_encrypt(req);
108 pr_err("got error %i from crypto_skcipher_encrypt()", ret);
113 static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
115 void *buf, size_t len)
117 if (!is_vmalloc_addr(buf)) {
118 struct scatterlist sg;
120 sg_init_table(&sg, 1);
123 ? vmalloc_to_page(buf)
125 len, offset_in_page(buf));
126 return do_encrypt_sg(tfm, nonce, &sg, len);
128 unsigned pages = buf_pages(buf, len);
129 struct scatterlist *sg;
130 size_t orig_len = len;
133 sg = kmalloc_array(sizeof(*sg), pages, GFP_KERNEL);
137 sg_init_table(sg, pages);
139 for (i = 0; i < pages; i++) {
140 unsigned offset = offset_in_page(buf);
141 unsigned pg_len = min(len, PAGE_SIZE - offset);
143 sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
148 ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
154 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
155 void *buf, size_t len)
157 struct crypto_sync_skcipher *chacha20 =
158 crypto_alloc_sync_skcipher("chacha20", 0, 0);
162 pr_err("error requesting chacha20 module: %li", PTR_ERR(chacha20));
163 return PTR_ERR(chacha20);
166 ret = crypto_skcipher_setkey(&chacha20->base,
167 (void *) key, sizeof(*key));
169 pr_err("crypto_skcipher_setkey() error: %i", ret);
173 ret = do_encrypt(chacha20, nonce, buf, len);
175 crypto_free_sync_skcipher(chacha20);
179 static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
182 u8 key[POLY1305_KEY_SIZE];
185 nonce.d[3] ^= BCH_NONCE_POLY;
187 memset(key, 0, sizeof(key));
188 ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
192 desc->tfm = c->poly1305;
193 crypto_shash_init(desc);
194 crypto_shash_update(desc, key, sizeof(key));
198 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
199 struct nonce nonce, const void *data, size_t len)
203 case BCH_CSUM_crc32c_nonzero:
204 case BCH_CSUM_crc64_nonzero:
205 case BCH_CSUM_crc32c:
206 case BCH_CSUM_xxhash:
207 case BCH_CSUM_crc64: {
208 struct bch2_checksum_state state;
212 bch2_checksum_init(&state);
213 bch2_checksum_update(&state, data, len);
215 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
218 case BCH_CSUM_chacha20_poly1305_80:
219 case BCH_CSUM_chacha20_poly1305_128: {
220 SHASH_DESC_ON_STACK(desc, c->poly1305);
221 u8 digest[POLY1305_DIGEST_SIZE];
222 struct bch_csum ret = { 0 };
224 gen_poly_key(c, desc, nonce);
226 crypto_shash_update(desc, data, len);
227 crypto_shash_final(desc, digest);
229 memcpy(&ret, digest, bch_crc_bytes[type]);
237 int bch2_encrypt(struct bch_fs *c, unsigned type,
238 struct nonce nonce, void *data, size_t len)
240 if (!bch2_csum_type_is_encryption(type))
243 return do_encrypt(c->chacha20, nonce, data, len);
246 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
247 struct nonce nonce, struct bio *bio,
248 struct bvec_iter *iter)
254 return (struct bch_csum) { 0 };
255 case BCH_CSUM_crc32c_nonzero:
256 case BCH_CSUM_crc64_nonzero:
257 case BCH_CSUM_crc32c:
258 case BCH_CSUM_xxhash:
259 case BCH_CSUM_crc64: {
260 struct bch2_checksum_state state;
263 bch2_checksum_init(&state);
265 #ifdef CONFIG_HIGHMEM
266 __bio_for_each_segment(bv, bio, *iter, *iter) {
267 void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
268 bch2_checksum_update(&state, p, bv.bv_len);
272 __bio_for_each_bvec(bv, bio, *iter, *iter)
273 bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
276 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
279 case BCH_CSUM_chacha20_poly1305_80:
280 case BCH_CSUM_chacha20_poly1305_128: {
281 SHASH_DESC_ON_STACK(desc, c->poly1305);
282 u8 digest[POLY1305_DIGEST_SIZE];
283 struct bch_csum ret = { 0 };
285 gen_poly_key(c, desc, nonce);
287 #ifdef CONFIG_HIGHMEM
288 __bio_for_each_segment(bv, bio, *iter, *iter) {
289 void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
291 crypto_shash_update(desc, p, bv.bv_len);
295 __bio_for_each_bvec(bv, bio, *iter, *iter)
296 crypto_shash_update(desc,
297 page_address(bv.bv_page) + bv.bv_offset,
300 crypto_shash_final(desc, digest);
302 memcpy(&ret, digest, bch_crc_bytes[type]);
310 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
311 struct nonce nonce, struct bio *bio)
313 struct bvec_iter iter = bio->bi_iter;
315 return __bch2_checksum_bio(c, type, nonce, bio, &iter);
318 int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
319 struct nonce nonce, struct bio *bio)
322 struct bvec_iter iter;
323 struct scatterlist sgl[16], *sg = sgl;
327 if (!bch2_csum_type_is_encryption(type))
330 sg_init_table(sgl, ARRAY_SIZE(sgl));
332 bio_for_each_segment(bv, bio, iter) {
333 if (sg == sgl + ARRAY_SIZE(sgl)) {
336 ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
340 nonce = nonce_add(nonce, bytes);
343 sg_init_table(sgl, ARRAY_SIZE(sgl));
347 sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
352 return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
355 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
356 struct bch_csum b, size_t b_len)
358 struct bch2_checksum_state state;
361 bch2_checksum_init(&state);
364 BUG_ON(!bch2_checksum_mergeable(type));
367 unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
369 bch2_checksum_update(&state,
370 page_address(ZERO_PAGE(0)), b);
373 a.lo = bch2_checksum_final(&state);
379 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
380 struct bversion version,
381 struct bch_extent_crc_unpacked crc_old,
382 struct bch_extent_crc_unpacked *crc_a,
383 struct bch_extent_crc_unpacked *crc_b,
384 unsigned len_a, unsigned len_b,
385 unsigned new_csum_type)
387 struct bvec_iter iter = bio->bi_iter;
388 struct nonce nonce = extent_nonce(version, crc_old);
389 struct bch_csum merged = { 0 };
391 struct bch_extent_crc_unpacked *crc;
394 struct bch_csum csum;
396 { crc_a, len_a, new_csum_type },
397 { crc_b, len_b, new_csum_type },
398 { NULL, bio_sectors(bio) - len_a - len_b, new_csum_type },
400 bool mergeable = crc_old.csum_type == new_csum_type &&
401 bch2_checksum_mergeable(new_csum_type);
402 unsigned crc_nonce = crc_old.nonce;
404 BUG_ON(len_a + len_b > bio_sectors(bio));
405 BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
406 BUG_ON(crc_is_compressed(crc_old));
407 BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
408 bch2_csum_type_is_encryption(new_csum_type));
410 for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
411 iter.bi_size = i->len << 9;
412 if (mergeable || i->crc)
413 i->csum = __bch2_checksum_bio(c, i->csum_type,
416 bio_advance_iter(bio, &iter, i->len << 9);
417 nonce = nonce_add(nonce, i->len << 9);
421 for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
422 merged = bch2_checksum_merge(new_csum_type, merged,
423 i->csum, i->len << 9);
425 merged = bch2_checksum_bio(c, crc_old.csum_type,
426 extent_nonce(version, crc_old), bio);
428 if (bch2_crc_cmp(merged, crc_old.csum)) {
429 bch_err(c, "checksum error in bch2_rechecksum_bio() (memory corruption or bug?)\n"
430 "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
435 bch2_csum_types[crc_old.csum_type],
436 bch2_csum_types[new_csum_type]);
440 for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
442 *i->crc = (struct bch_extent_crc_unpacked) {
443 .csum_type = i->csum_type,
444 .compression_type = crc_old.compression_type,
445 .compressed_size = i->len,
446 .uncompressed_size = i->len,
453 if (bch2_csum_type_is_encryption(new_csum_type))
461 static int __bch2_request_key(char *key_description, struct bch_key *key)
463 struct key *keyring_key;
464 const struct user_key_payload *ukp;
467 keyring_key = request_key(&key_type_user, key_description, NULL);
468 if (IS_ERR(keyring_key))
469 return PTR_ERR(keyring_key);
471 down_read(&keyring_key->sem);
472 ukp = dereference_key_locked(keyring_key);
473 if (ukp->datalen == sizeof(*key)) {
474 memcpy(key, ukp->data, ukp->datalen);
479 up_read(&keyring_key->sem);
480 key_put(keyring_key);
485 #include <keyutils.h>
487 static int __bch2_request_key(char *key_description, struct bch_key *key)
491 key_id = request_key("user", key_description, NULL,
492 KEY_SPEC_USER_KEYRING);
496 if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
503 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
505 struct printbuf key_description = PRINTBUF;
508 prt_printf(&key_description, "bcachefs:");
509 pr_uuid(&key_description, sb->user_uuid.b);
511 ret = __bch2_request_key(key_description.buf, key);
512 printbuf_exit(&key_description);
516 int bch2_decrypt_sb_key(struct bch_fs *c,
517 struct bch_sb_field_crypt *crypt,
520 struct bch_encrypted_key sb_key = crypt->key;
521 struct bch_key user_key;
524 /* is key encrypted? */
525 if (!bch2_key_is_encrypted(&sb_key))
528 ret = bch2_request_key(c->disk_sb.sb, &user_key);
530 bch_err(c, "error requesting encryption key: %i", ret);
534 /* decrypt real key: */
535 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
536 &sb_key, sizeof(sb_key));
540 if (bch2_key_is_encrypted(&sb_key)) {
541 bch_err(c, "incorrect encryption key");
548 memzero_explicit(&sb_key, sizeof(sb_key));
549 memzero_explicit(&user_key, sizeof(user_key));
553 static int bch2_alloc_ciphers(struct bch_fs *c)
556 c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
557 if (IS_ERR(c->chacha20)) {
558 bch_err(c, "error requesting chacha20 module: %li",
559 PTR_ERR(c->chacha20));
560 return PTR_ERR(c->chacha20);
564 c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
565 if (IS_ERR(c->poly1305)) {
566 bch_err(c, "error requesting poly1305 module: %li",
567 PTR_ERR(c->poly1305));
568 return PTR_ERR(c->poly1305);
574 int bch2_disable_encryption(struct bch_fs *c)
576 struct bch_sb_field_crypt *crypt;
580 mutex_lock(&c->sb_lock);
582 crypt = bch2_sb_get_crypt(c->disk_sb.sb);
586 /* is key encrypted? */
588 if (bch2_key_is_encrypted(&crypt->key))
591 ret = bch2_decrypt_sb_key(c, crypt, &key);
595 crypt->key.magic = BCH_KEY_MAGIC;
596 crypt->key.key = key;
598 SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
601 mutex_unlock(&c->sb_lock);
606 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
608 struct bch_encrypted_key key;
609 struct bch_key user_key;
610 struct bch_sb_field_crypt *crypt;
613 mutex_lock(&c->sb_lock);
615 /* Do we already have an encryption key? */
616 if (bch2_sb_get_crypt(c->disk_sb.sb))
619 ret = bch2_alloc_ciphers(c);
623 key.magic = BCH_KEY_MAGIC;
624 get_random_bytes(&key.key, sizeof(key.key));
627 ret = bch2_request_key(c->disk_sb.sb, &user_key);
629 bch_err(c, "error requesting encryption key: %i", ret);
633 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
639 ret = crypto_skcipher_setkey(&c->chacha20->base,
640 (void *) &key.key, sizeof(key.key));
644 crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
646 ret = -ENOMEM; /* XXX this technically could be -ENOSPC */
652 /* write superblock */
653 SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
656 mutex_unlock(&c->sb_lock);
657 memzero_explicit(&user_key, sizeof(user_key));
658 memzero_explicit(&key, sizeof(key));
662 void bch2_fs_encryption_exit(struct bch_fs *c)
664 if (!IS_ERR_OR_NULL(c->poly1305))
665 crypto_free_shash(c->poly1305);
666 if (!IS_ERR_OR_NULL(c->chacha20))
667 crypto_free_sync_skcipher(c->chacha20);
668 if (!IS_ERR_OR_NULL(c->sha256))
669 crypto_free_shash(c->sha256);
672 int bch2_fs_encryption_init(struct bch_fs *c)
674 struct bch_sb_field_crypt *crypt;
678 pr_verbose_init(c->opts, "");
680 c->sha256 = crypto_alloc_shash("sha256", 0, 0);
681 if (IS_ERR(c->sha256)) {
682 bch_err(c, "error requesting sha256 module");
683 ret = PTR_ERR(c->sha256);
687 crypt = bch2_sb_get_crypt(c->disk_sb.sb);
691 ret = bch2_alloc_ciphers(c);
695 ret = bch2_decrypt_sb_key(c, crypt, &key);
699 ret = crypto_skcipher_setkey(&c->chacha20->base,
700 (void *) &key.key, sizeof(key.key));
704 memzero_explicit(&key, sizeof(key));
705 pr_verbose_init(c->opts, "ret %i", ret);