1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/crc32c.h>
8 #include <linux/crypto.h>
10 #include <linux/random.h>
11 #include <linux/scatterlist.h>
12 #include <crypto/algapi.h>
13 #include <crypto/chacha.h>
14 #include <crypto/hash.h>
15 #include <crypto/poly1305.h>
16 #include <crypto/skcipher.h>
17 #include <keys/user-type.h>
19 static u64 bch2_checksum_init(unsigned type)
24 case BCH_CSUM_CRC32C_NONZERO:
26 case BCH_CSUM_CRC64_NONZERO:
37 static u64 bch2_checksum_final(unsigned type, u64 crc)
42 case BCH_CSUM_CRC32C_NONZERO:
44 case BCH_CSUM_CRC64_NONZERO:
55 static u64 bch2_checksum_update(unsigned type, u64 crc, const void *data, size_t len)
60 case BCH_CSUM_CRC32C_NONZERO:
62 return crc32c(crc, data, len);
63 case BCH_CSUM_CRC64_NONZERO:
65 return crc64_be(crc, data, len);
71 static inline void do_encrypt_sg(struct crypto_sync_skcipher *tfm,
73 struct scatterlist *sg, size_t len)
75 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
78 skcipher_request_set_sync_tfm(req, tfm);
79 skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
81 ret = crypto_skcipher_encrypt(req);
85 static inline void do_encrypt(struct crypto_sync_skcipher *tfm,
87 void *buf, size_t len)
89 struct scatterlist sg;
91 sg_init_one(&sg, buf, len);
92 do_encrypt_sg(tfm, nonce, &sg, len);
95 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
96 void *buf, size_t len)
98 struct crypto_sync_skcipher *chacha20 =
99 crypto_alloc_sync_skcipher("chacha20", 0, 0);
103 pr_err("error requesting chacha20 module: %li", PTR_ERR(chacha20));
104 return PTR_ERR(chacha20);
107 ret = crypto_skcipher_setkey(&chacha20->base,
108 (void *) key, sizeof(*key));
110 pr_err("crypto_skcipher_setkey() error: %i", ret);
114 do_encrypt(chacha20, nonce, buf, len);
116 crypto_free_sync_skcipher(chacha20);
120 static void gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
123 u8 key[POLY1305_KEY_SIZE];
125 nonce.d[3] ^= BCH_NONCE_POLY;
127 memset(key, 0, sizeof(key));
128 do_encrypt(c->chacha20, nonce, key, sizeof(key));
130 desc->tfm = c->poly1305;
131 crypto_shash_init(desc);
132 crypto_shash_update(desc, key, sizeof(key));
135 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
136 struct nonce nonce, const void *data, size_t len)
140 case BCH_CSUM_CRC32C_NONZERO:
141 case BCH_CSUM_CRC64_NONZERO:
142 case BCH_CSUM_CRC32C:
143 case BCH_CSUM_CRC64: {
144 u64 crc = bch2_checksum_init(type);
146 crc = bch2_checksum_update(type, crc, data, len);
147 crc = bch2_checksum_final(type, crc);
149 return (struct bch_csum) { .lo = cpu_to_le64(crc) };
152 case BCH_CSUM_CHACHA20_POLY1305_80:
153 case BCH_CSUM_CHACHA20_POLY1305_128: {
154 SHASH_DESC_ON_STACK(desc, c->poly1305);
155 u8 digest[POLY1305_DIGEST_SIZE];
156 struct bch_csum ret = { 0 };
158 gen_poly_key(c, desc, nonce);
160 crypto_shash_update(desc, data, len);
161 crypto_shash_final(desc, digest);
163 memcpy(&ret, digest, bch_crc_bytes[type]);
171 void bch2_encrypt(struct bch_fs *c, unsigned type,
172 struct nonce nonce, void *data, size_t len)
174 if (!bch2_csum_type_is_encryption(type))
177 do_encrypt(c->chacha20, nonce, data, len);
180 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
181 struct nonce nonce, struct bio *bio,
182 struct bvec_iter *iter)
188 return (struct bch_csum) { 0 };
189 case BCH_CSUM_CRC32C_NONZERO:
190 case BCH_CSUM_CRC64_NONZERO:
191 case BCH_CSUM_CRC32C:
192 case BCH_CSUM_CRC64: {
193 u64 crc = bch2_checksum_init(type);
195 #ifdef CONFIG_HIGHMEM
196 __bio_for_each_segment(bv, bio, *iter, *iter) {
197 void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
198 crc = bch2_checksum_update(type,
203 __bio_for_each_bvec(bv, bio, *iter, *iter)
204 crc = bch2_checksum_update(type, crc,
205 page_address(bv.bv_page) + bv.bv_offset,
208 crc = bch2_checksum_final(type, crc);
209 return (struct bch_csum) { .lo = cpu_to_le64(crc) };
212 case BCH_CSUM_CHACHA20_POLY1305_80:
213 case BCH_CSUM_CHACHA20_POLY1305_128: {
214 SHASH_DESC_ON_STACK(desc, c->poly1305);
215 u8 digest[POLY1305_DIGEST_SIZE];
216 struct bch_csum ret = { 0 };
218 gen_poly_key(c, desc, nonce);
220 #ifdef CONFIG_HIGHMEM
221 __bio_for_each_segment(bv, bio, *iter, *iter) {
222 void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
224 crypto_shash_update(desc, p, bv.bv_len);
228 __bio_for_each_bvec(bv, bio, *iter, *iter)
229 crypto_shash_update(desc,
230 page_address(bv.bv_page) + bv.bv_offset,
233 crypto_shash_final(desc, digest);
235 memcpy(&ret, digest, bch_crc_bytes[type]);
243 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
244 struct nonce nonce, struct bio *bio)
246 struct bvec_iter iter = bio->bi_iter;
248 return __bch2_checksum_bio(c, type, nonce, bio, &iter);
251 void bch2_encrypt_bio(struct bch_fs *c, unsigned type,
252 struct nonce nonce, struct bio *bio)
255 struct bvec_iter iter;
256 struct scatterlist sgl[16], *sg = sgl;
259 if (!bch2_csum_type_is_encryption(type))
262 sg_init_table(sgl, ARRAY_SIZE(sgl));
264 bio_for_each_segment(bv, bio, iter) {
265 if (sg == sgl + ARRAY_SIZE(sgl)) {
267 do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
269 nonce = nonce_add(nonce, bytes);
272 sg_init_table(sgl, ARRAY_SIZE(sgl));
276 sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
281 do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
284 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
285 struct bch_csum b, size_t b_len)
287 BUG_ON(!bch2_checksum_mergeable(type));
290 unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
292 a.lo = bch2_checksum_update(type, a.lo,
293 page_address(ZERO_PAGE(0)), b);
302 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
303 struct bversion version,
304 struct bch_extent_crc_unpacked crc_old,
305 struct bch_extent_crc_unpacked *crc_a,
306 struct bch_extent_crc_unpacked *crc_b,
307 unsigned len_a, unsigned len_b,
308 unsigned new_csum_type)
310 struct bvec_iter iter = bio->bi_iter;
311 struct nonce nonce = extent_nonce(version, crc_old);
312 struct bch_csum merged = { 0 };
314 struct bch_extent_crc_unpacked *crc;
317 struct bch_csum csum;
319 { crc_a, len_a, new_csum_type },
320 { crc_b, len_b, new_csum_type },
321 { NULL, bio_sectors(bio) - len_a - len_b, new_csum_type },
323 bool mergeable = crc_old.csum_type == new_csum_type &&
324 bch2_checksum_mergeable(new_csum_type);
325 unsigned crc_nonce = crc_old.nonce;
327 BUG_ON(len_a + len_b > bio_sectors(bio));
328 BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
329 BUG_ON(crc_is_compressed(crc_old));
330 BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
331 bch2_csum_type_is_encryption(new_csum_type));
333 for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
334 iter.bi_size = i->len << 9;
335 if (mergeable || i->crc)
336 i->csum = __bch2_checksum_bio(c, i->csum_type,
339 bio_advance_iter(bio, &iter, i->len << 9);
340 nonce = nonce_add(nonce, i->len << 9);
344 for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
345 merged = bch2_checksum_merge(new_csum_type, merged,
346 i->csum, i->len << 9);
348 merged = bch2_checksum_bio(c, crc_old.csum_type,
349 extent_nonce(version, crc_old), bio);
351 if (bch2_crc_cmp(merged, crc_old.csum))
354 for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
356 *i->crc = (struct bch_extent_crc_unpacked) {
357 .csum_type = i->csum_type,
358 .compression_type = crc_old.compression_type,
359 .compressed_size = i->len,
360 .uncompressed_size = i->len,
367 if (bch2_csum_type_is_encryption(new_csum_type))
375 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
377 char key_description[60];
378 struct key *keyring_key;
379 const struct user_key_payload *ukp;
382 snprintf(key_description, sizeof(key_description),
383 "bcachefs:%pUb", &sb->user_uuid);
385 keyring_key = request_key(&key_type_logon, key_description, NULL);
386 if (IS_ERR(keyring_key))
387 return PTR_ERR(keyring_key);
389 down_read(&keyring_key->sem);
390 ukp = dereference_key_locked(keyring_key);
391 if (ukp->datalen == sizeof(*key)) {
392 memcpy(key, ukp->data, ukp->datalen);
397 up_read(&keyring_key->sem);
398 key_put(keyring_key);
403 #include <keyutils.h>
404 #include <uuid/uuid.h>
406 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
409 char key_description[60];
412 uuid_unparse_lower(sb->user_uuid.b, uuid);
413 sprintf(key_description, "bcachefs:%s", uuid);
415 key_id = request_key("user", key_description, NULL,
416 KEY_SPEC_USER_KEYRING);
420 if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
427 int bch2_decrypt_sb_key(struct bch_fs *c,
428 struct bch_sb_field_crypt *crypt,
431 struct bch_encrypted_key sb_key = crypt->key;
432 struct bch_key user_key;
435 /* is key encrypted? */
436 if (!bch2_key_is_encrypted(&sb_key))
439 ret = bch2_request_key(c->disk_sb.sb, &user_key);
441 bch_err(c, "error requesting encryption key: %i", ret);
445 /* decrypt real key: */
446 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
447 &sb_key, sizeof(sb_key));
451 if (bch2_key_is_encrypted(&sb_key)) {
452 bch_err(c, "incorrect encryption key");
459 memzero_explicit(&sb_key, sizeof(sb_key));
460 memzero_explicit(&user_key, sizeof(user_key));
464 static int bch2_alloc_ciphers(struct bch_fs *c)
467 c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
468 if (IS_ERR(c->chacha20)) {
469 bch_err(c, "error requesting chacha20 module: %li",
470 PTR_ERR(c->chacha20));
471 return PTR_ERR(c->chacha20);
475 c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
476 if (IS_ERR(c->poly1305)) {
477 bch_err(c, "error requesting poly1305 module: %li",
478 PTR_ERR(c->poly1305));
479 return PTR_ERR(c->poly1305);
485 int bch2_disable_encryption(struct bch_fs *c)
487 struct bch_sb_field_crypt *crypt;
491 mutex_lock(&c->sb_lock);
493 crypt = bch2_sb_get_crypt(c->disk_sb.sb);
497 /* is key encrypted? */
499 if (bch2_key_is_encrypted(&crypt->key))
502 ret = bch2_decrypt_sb_key(c, crypt, &key);
506 crypt->key.magic = BCH_KEY_MAGIC;
507 crypt->key.key = key;
509 SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
512 mutex_unlock(&c->sb_lock);
517 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
519 struct bch_encrypted_key key;
520 struct bch_key user_key;
521 struct bch_sb_field_crypt *crypt;
524 mutex_lock(&c->sb_lock);
526 /* Do we already have an encryption key? */
527 if (bch2_sb_get_crypt(c->disk_sb.sb))
530 ret = bch2_alloc_ciphers(c);
534 key.magic = BCH_KEY_MAGIC;
535 get_random_bytes(&key.key, sizeof(key.key));
538 ret = bch2_request_key(c->disk_sb.sb, &user_key);
540 bch_err(c, "error requesting encryption key: %i", ret);
544 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
550 ret = crypto_skcipher_setkey(&c->chacha20->base,
551 (void *) &key.key, sizeof(key.key));
555 crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
557 ret = -ENOMEM; /* XXX this technically could be -ENOSPC */
563 /* write superblock */
564 SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
567 mutex_unlock(&c->sb_lock);
568 memzero_explicit(&user_key, sizeof(user_key));
569 memzero_explicit(&key, sizeof(key));
573 void bch2_fs_encryption_exit(struct bch_fs *c)
575 if (!IS_ERR_OR_NULL(c->poly1305))
576 crypto_free_shash(c->poly1305);
577 if (!IS_ERR_OR_NULL(c->chacha20))
578 crypto_free_sync_skcipher(c->chacha20);
579 if (!IS_ERR_OR_NULL(c->sha256))
580 crypto_free_shash(c->sha256);
583 int bch2_fs_encryption_init(struct bch_fs *c)
585 struct bch_sb_field_crypt *crypt;
589 pr_verbose_init(c->opts, "");
591 c->sha256 = crypto_alloc_shash("sha256", 0, 0);
592 if (IS_ERR(c->sha256)) {
593 bch_err(c, "error requesting sha256 module");
594 ret = PTR_ERR(c->sha256);
598 crypt = bch2_sb_get_crypt(c->disk_sb.sb);
602 ret = bch2_alloc_ciphers(c);
606 ret = bch2_decrypt_sb_key(c, crypt, &key);
610 ret = crypto_skcipher_setkey(&c->chacha20->base,
611 (void *) &key.key, sizeof(key.key));
615 memzero_explicit(&key, sizeof(key));
616 pr_verbose_init(c->opts, "ret %i", ret);