]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/checksum.c
Update bcachefs sources to 50ac18afbb bcachefs: Fix an uninitialized variable
[bcachefs-tools-debian] / libbcachefs / checksum.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "super.h"
5 #include "super-io.h"
6
7 #include <linux/crc32c.h>
8 #include <linux/crypto.h>
9 #include <linux/xxhash.h>
10 #include <linux/key.h>
11 #include <linux/random.h>
12 #include <linux/scatterlist.h>
13 #include <crypto/algapi.h>
14 #include <crypto/chacha.h>
15 #include <crypto/hash.h>
16 #include <crypto/poly1305.h>
17 #include <crypto/skcipher.h>
18 #include <keys/user-type.h>
19
20 /*
21  * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
22  * it features page merging without having the checksum algorithm lose its state.
23  * for native checksum aglorithms (like crc), a default seed value will do.
24  * for hash-like algorithms, a state needs to be stored
25  */
26
27 struct bch2_checksum_state {
28         union {
29                 u64 seed;
30                 struct xxh64_state h64state;
31         };
32         unsigned int type;
33 };
34
35 static void bch2_checksum_init(struct bch2_checksum_state *state)
36 {
37         switch (state->type) {
38         case BCH_CSUM_none:
39         case BCH_CSUM_crc32c:
40         case BCH_CSUM_crc64:
41                 state->seed = 0;
42                 break;
43         case BCH_CSUM_crc32c_nonzero:
44                 state->seed = U32_MAX;
45                 break;
46         case BCH_CSUM_crc64_nonzero:
47                 state->seed = U64_MAX;
48                 break;
49         case BCH_CSUM_xxhash:
50                 xxh64_reset(&state->h64state, 0);
51                 break;
52         default:
53                 BUG();
54         }
55 }
56
57 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
58 {
59         switch (state->type) {
60         case BCH_CSUM_none:
61         case BCH_CSUM_crc32c:
62         case BCH_CSUM_crc64:
63                 return state->seed;
64         case BCH_CSUM_crc32c_nonzero:
65                 return state->seed ^ U32_MAX;
66         case BCH_CSUM_crc64_nonzero:
67                 return state->seed ^ U64_MAX;
68         case BCH_CSUM_xxhash:
69                 return xxh64_digest(&state->h64state);
70         default:
71                 BUG();
72         }
73 }
74
75 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
76 {
77         switch (state->type) {
78         case BCH_CSUM_none:
79                 return;
80         case BCH_CSUM_crc32c_nonzero:
81         case BCH_CSUM_crc32c:
82                 state->seed = crc32c(state->seed, data, len);
83                 break;
84         case BCH_CSUM_crc64_nonzero:
85         case BCH_CSUM_crc64:
86                 state->seed = crc64_be(state->seed, data, len);
87                 break;
88         case BCH_CSUM_xxhash:
89                 xxh64_update(&state->h64state, data, len);
90                 break;
91         default:
92                 BUG();
93         }
94 }
95
96 static inline void do_encrypt_sg(struct crypto_sync_skcipher *tfm,
97                                  struct nonce nonce,
98                                  struct scatterlist *sg, size_t len)
99 {
100         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
101         int ret;
102
103         skcipher_request_set_sync_tfm(req, tfm);
104         skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
105
106         ret = crypto_skcipher_encrypt(req);
107         BUG_ON(ret);
108 }
109
110 static inline void do_encrypt(struct crypto_sync_skcipher *tfm,
111                               struct nonce nonce,
112                               void *buf, size_t len)
113 {
114         struct scatterlist sg;
115
116         sg_init_one(&sg, buf, len);
117         do_encrypt_sg(tfm, nonce, &sg, len);
118 }
119
120 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
121                             void *buf, size_t len)
122 {
123         struct crypto_sync_skcipher *chacha20 =
124                 crypto_alloc_sync_skcipher("chacha20", 0, 0);
125         int ret;
126
127         if (!chacha20) {
128                 pr_err("error requesting chacha20 module: %li", PTR_ERR(chacha20));
129                 return PTR_ERR(chacha20);
130         }
131
132         ret = crypto_skcipher_setkey(&chacha20->base,
133                                      (void *) key, sizeof(*key));
134         if (ret) {
135                 pr_err("crypto_skcipher_setkey() error: %i", ret);
136                 goto err;
137         }
138
139         do_encrypt(chacha20, nonce, buf, len);
140 err:
141         crypto_free_sync_skcipher(chacha20);
142         return ret;
143 }
144
145 static void gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
146                          struct nonce nonce)
147 {
148         u8 key[POLY1305_KEY_SIZE];
149
150         nonce.d[3] ^= BCH_NONCE_POLY;
151
152         memset(key, 0, sizeof(key));
153         do_encrypt(c->chacha20, nonce, key, sizeof(key));
154
155         desc->tfm = c->poly1305;
156         crypto_shash_init(desc);
157         crypto_shash_update(desc, key, sizeof(key));
158 }
159
160 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
161                               struct nonce nonce, const void *data, size_t len)
162 {
163         switch (type) {
164         case BCH_CSUM_none:
165         case BCH_CSUM_crc32c_nonzero:
166         case BCH_CSUM_crc64_nonzero:
167         case BCH_CSUM_crc32c:
168         case BCH_CSUM_xxhash:
169         case BCH_CSUM_crc64: {
170                 struct bch2_checksum_state state;
171
172                 state.type = type;
173
174                 bch2_checksum_init(&state);
175                 bch2_checksum_update(&state, data, len);
176
177                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
178         }
179
180         case BCH_CSUM_chacha20_poly1305_80:
181         case BCH_CSUM_chacha20_poly1305_128: {
182                 SHASH_DESC_ON_STACK(desc, c->poly1305);
183                 u8 digest[POLY1305_DIGEST_SIZE];
184                 struct bch_csum ret = { 0 };
185
186                 gen_poly_key(c, desc, nonce);
187
188                 crypto_shash_update(desc, data, len);
189                 crypto_shash_final(desc, digest);
190
191                 memcpy(&ret, digest, bch_crc_bytes[type]);
192                 return ret;
193         }
194         default:
195                 BUG();
196         }
197 }
198
199 void bch2_encrypt(struct bch_fs *c, unsigned type,
200                   struct nonce nonce, void *data, size_t len)
201 {
202         if (!bch2_csum_type_is_encryption(type))
203                 return;
204
205         do_encrypt(c->chacha20, nonce, data, len);
206 }
207
208 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
209                                            struct nonce nonce, struct bio *bio,
210                                            struct bvec_iter *iter)
211 {
212         struct bio_vec bv;
213
214         switch (type) {
215         case BCH_CSUM_none:
216                 return (struct bch_csum) { 0 };
217         case BCH_CSUM_crc32c_nonzero:
218         case BCH_CSUM_crc64_nonzero:
219         case BCH_CSUM_crc32c:
220         case BCH_CSUM_xxhash:
221         case BCH_CSUM_crc64: {
222                 struct bch2_checksum_state state;
223
224                 state.type = type;
225                 bch2_checksum_init(&state);
226
227 #ifdef CONFIG_HIGHMEM
228                 __bio_for_each_segment(bv, bio, *iter, *iter) {
229                         void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
230                         bch2_checksum_update(&state, p, bv.bv_len);
231                         kunmap_atomic(p);
232                 }
233 #else
234                 __bio_for_each_bvec(bv, bio, *iter, *iter)
235                         bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
236                                 bv.bv_len);
237 #endif
238                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
239         }
240
241         case BCH_CSUM_chacha20_poly1305_80:
242         case BCH_CSUM_chacha20_poly1305_128: {
243                 SHASH_DESC_ON_STACK(desc, c->poly1305);
244                 u8 digest[POLY1305_DIGEST_SIZE];
245                 struct bch_csum ret = { 0 };
246
247                 gen_poly_key(c, desc, nonce);
248
249 #ifdef CONFIG_HIGHMEM
250                 __bio_for_each_segment(bv, bio, *iter, *iter) {
251                         void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
252
253                         crypto_shash_update(desc, p, bv.bv_len);
254                         kunmap_atomic(p);
255                 }
256 #else
257                 __bio_for_each_bvec(bv, bio, *iter, *iter)
258                         crypto_shash_update(desc,
259                                 page_address(bv.bv_page) + bv.bv_offset,
260                                 bv.bv_len);
261 #endif
262                 crypto_shash_final(desc, digest);
263
264                 memcpy(&ret, digest, bch_crc_bytes[type]);
265                 return ret;
266         }
267         default:
268                 BUG();
269         }
270 }
271
272 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
273                                   struct nonce nonce, struct bio *bio)
274 {
275         struct bvec_iter iter = bio->bi_iter;
276
277         return __bch2_checksum_bio(c, type, nonce, bio, &iter);
278 }
279
280 void bch2_encrypt_bio(struct bch_fs *c, unsigned type,
281                       struct nonce nonce, struct bio *bio)
282 {
283         struct bio_vec bv;
284         struct bvec_iter iter;
285         struct scatterlist sgl[16], *sg = sgl;
286         size_t bytes = 0;
287
288         if (!bch2_csum_type_is_encryption(type))
289                 return;
290
291         sg_init_table(sgl, ARRAY_SIZE(sgl));
292
293         bio_for_each_segment(bv, bio, iter) {
294                 if (sg == sgl + ARRAY_SIZE(sgl)) {
295                         sg_mark_end(sg - 1);
296                         do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
297
298                         nonce = nonce_add(nonce, bytes);
299                         bytes = 0;
300
301                         sg_init_table(sgl, ARRAY_SIZE(sgl));
302                         sg = sgl;
303                 }
304
305                 sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
306                 bytes += bv.bv_len;
307         }
308
309         sg_mark_end(sg - 1);
310         do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
311 }
312
313 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
314                                     struct bch_csum b, size_t b_len)
315 {
316         struct bch2_checksum_state state;
317
318         state.type = type;
319         bch2_checksum_init(&state);
320         state.seed = a.lo;
321
322         BUG_ON(!bch2_checksum_mergeable(type));
323
324         while (b_len) {
325                 unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
326
327                 bch2_checksum_update(&state,
328                                 page_address(ZERO_PAGE(0)), b);
329                 b_len -= b;
330         }
331         a.lo = bch2_checksum_final(&state);
332         a.lo ^= b.lo;
333         a.hi ^= b.hi;
334         return a;
335 }
336
337 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
338                         struct bversion version,
339                         struct bch_extent_crc_unpacked crc_old,
340                         struct bch_extent_crc_unpacked *crc_a,
341                         struct bch_extent_crc_unpacked *crc_b,
342                         unsigned len_a, unsigned len_b,
343                         unsigned new_csum_type)
344 {
345         struct bvec_iter iter = bio->bi_iter;
346         struct nonce nonce = extent_nonce(version, crc_old);
347         struct bch_csum merged = { 0 };
348         struct crc_split {
349                 struct bch_extent_crc_unpacked  *crc;
350                 unsigned                        len;
351                 unsigned                        csum_type;
352                 struct bch_csum                 csum;
353         } splits[3] = {
354                 { crc_a, len_a, new_csum_type },
355                 { crc_b, len_b, new_csum_type },
356                 { NULL,  bio_sectors(bio) - len_a - len_b, new_csum_type },
357         }, *i;
358         bool mergeable = crc_old.csum_type == new_csum_type &&
359                 bch2_checksum_mergeable(new_csum_type);
360         unsigned crc_nonce = crc_old.nonce;
361
362         BUG_ON(len_a + len_b > bio_sectors(bio));
363         BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
364         BUG_ON(crc_is_compressed(crc_old));
365         BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
366                bch2_csum_type_is_encryption(new_csum_type));
367
368         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
369                 iter.bi_size = i->len << 9;
370                 if (mergeable || i->crc)
371                         i->csum = __bch2_checksum_bio(c, i->csum_type,
372                                                       nonce, bio, &iter);
373                 else
374                         bio_advance_iter(bio, &iter, i->len << 9);
375                 nonce = nonce_add(nonce, i->len << 9);
376         }
377
378         if (mergeable)
379                 for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
380                         merged = bch2_checksum_merge(new_csum_type, merged,
381                                                      i->csum, i->len << 9);
382         else
383                 merged = bch2_checksum_bio(c, crc_old.csum_type,
384                                 extent_nonce(version, crc_old), bio);
385
386         if (bch2_crc_cmp(merged, crc_old.csum))
387                 return -EIO;
388
389         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
390                 if (i->crc)
391                         *i->crc = (struct bch_extent_crc_unpacked) {
392                                 .csum_type              = i->csum_type,
393                                 .compression_type       = crc_old.compression_type,
394                                 .compressed_size        = i->len,
395                                 .uncompressed_size      = i->len,
396                                 .offset                 = 0,
397                                 .live_size              = i->len,
398                                 .nonce                  = crc_nonce,
399                                 .csum                   = i->csum,
400                         };
401
402                 if (bch2_csum_type_is_encryption(new_csum_type))
403                         crc_nonce += i->len;
404         }
405
406         return 0;
407 }
408
409 #ifdef __KERNEL__
410 static int __bch2_request_key(char *key_description, struct bch_key *key)
411 {
412         struct key *keyring_key;
413         const struct user_key_payload *ukp;
414         int ret;
415
416         keyring_key = request_key(&key_type_logon, key_description, NULL);
417         if (IS_ERR(keyring_key))
418                 return PTR_ERR(keyring_key);
419
420         down_read(&keyring_key->sem);
421         ukp = dereference_key_locked(keyring_key);
422         if (ukp->datalen == sizeof(*key)) {
423                 memcpy(key, ukp->data, ukp->datalen);
424                 ret = 0;
425         } else {
426                 ret = -EINVAL;
427         }
428         up_read(&keyring_key->sem);
429         key_put(keyring_key);
430
431         return ret;
432 }
433 #else
434 #include <keyutils.h>
435
436 static int __bch2_request_key(char *key_description, struct bch_key *key)
437 {
438         key_serial_t key_id;
439
440         key_id = request_key("user", key_description, NULL,
441                              KEY_SPEC_USER_KEYRING);
442         if (key_id < 0)
443                 return -errno;
444
445         if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
446                 return -1;
447
448         return 0;
449 }
450 #endif
451
452 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
453 {
454         char key_description[60];
455         char uuid[40];
456
457         uuid_unparse_lower(sb->user_uuid.b, uuid);
458         sprintf(key_description, "bcachefs:%s", uuid);
459
460         return __bch2_request_key(key_description, key);
461 }
462
463 int bch2_decrypt_sb_key(struct bch_fs *c,
464                         struct bch_sb_field_crypt *crypt,
465                         struct bch_key *key)
466 {
467         struct bch_encrypted_key sb_key = crypt->key;
468         struct bch_key user_key;
469         int ret = 0;
470
471         /* is key encrypted? */
472         if (!bch2_key_is_encrypted(&sb_key))
473                 goto out;
474
475         ret = bch2_request_key(c->disk_sb.sb, &user_key);
476         if (ret) {
477                 bch_err(c, "error requesting encryption key: %i", ret);
478                 goto err;
479         }
480
481         /* decrypt real key: */
482         ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
483                              &sb_key, sizeof(sb_key));
484         if (ret)
485                 goto err;
486
487         if (bch2_key_is_encrypted(&sb_key)) {
488                 bch_err(c, "incorrect encryption key");
489                 ret = -EINVAL;
490                 goto err;
491         }
492 out:
493         *key = sb_key.key;
494 err:
495         memzero_explicit(&sb_key, sizeof(sb_key));
496         memzero_explicit(&user_key, sizeof(user_key));
497         return ret;
498 }
499
500 static int bch2_alloc_ciphers(struct bch_fs *c)
501 {
502         if (!c->chacha20)
503                 c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
504         if (IS_ERR(c->chacha20)) {
505                 bch_err(c, "error requesting chacha20 module: %li",
506                         PTR_ERR(c->chacha20));
507                 return PTR_ERR(c->chacha20);
508         }
509
510         if (!c->poly1305)
511                 c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
512         if (IS_ERR(c->poly1305)) {
513                 bch_err(c, "error requesting poly1305 module: %li",
514                         PTR_ERR(c->poly1305));
515                 return PTR_ERR(c->poly1305);
516         }
517
518         return 0;
519 }
520
521 int bch2_disable_encryption(struct bch_fs *c)
522 {
523         struct bch_sb_field_crypt *crypt;
524         struct bch_key key;
525         int ret = -EINVAL;
526
527         mutex_lock(&c->sb_lock);
528
529         crypt = bch2_sb_get_crypt(c->disk_sb.sb);
530         if (!crypt)
531                 goto out;
532
533         /* is key encrypted? */
534         ret = 0;
535         if (bch2_key_is_encrypted(&crypt->key))
536                 goto out;
537
538         ret = bch2_decrypt_sb_key(c, crypt, &key);
539         if (ret)
540                 goto out;
541
542         crypt->key.magic        = BCH_KEY_MAGIC;
543         crypt->key.key          = key;
544
545         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
546         bch2_write_super(c);
547 out:
548         mutex_unlock(&c->sb_lock);
549
550         return ret;
551 }
552
553 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
554 {
555         struct bch_encrypted_key key;
556         struct bch_key user_key;
557         struct bch_sb_field_crypt *crypt;
558         int ret = -EINVAL;
559
560         mutex_lock(&c->sb_lock);
561
562         /* Do we already have an encryption key? */
563         if (bch2_sb_get_crypt(c->disk_sb.sb))
564                 goto err;
565
566         ret = bch2_alloc_ciphers(c);
567         if (ret)
568                 goto err;
569
570         key.magic = BCH_KEY_MAGIC;
571         get_random_bytes(&key.key, sizeof(key.key));
572
573         if (keyed) {
574                 ret = bch2_request_key(c->disk_sb.sb, &user_key);
575                 if (ret) {
576                         bch_err(c, "error requesting encryption key: %i", ret);
577                         goto err;
578                 }
579
580                 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
581                                               &key, sizeof(key));
582                 if (ret)
583                         goto err;
584         }
585
586         ret = crypto_skcipher_setkey(&c->chacha20->base,
587                         (void *) &key.key, sizeof(key.key));
588         if (ret)
589                 goto err;
590
591         crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
592         if (!crypt) {
593                 ret = -ENOMEM; /* XXX this technically could be -ENOSPC */
594                 goto err;
595         }
596
597         crypt->key = key;
598
599         /* write superblock */
600         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
601         bch2_write_super(c);
602 err:
603         mutex_unlock(&c->sb_lock);
604         memzero_explicit(&user_key, sizeof(user_key));
605         memzero_explicit(&key, sizeof(key));
606         return ret;
607 }
608
609 void bch2_fs_encryption_exit(struct bch_fs *c)
610 {
611         if (!IS_ERR_OR_NULL(c->poly1305))
612                 crypto_free_shash(c->poly1305);
613         if (!IS_ERR_OR_NULL(c->chacha20))
614                 crypto_free_sync_skcipher(c->chacha20);
615         if (!IS_ERR_OR_NULL(c->sha256))
616                 crypto_free_shash(c->sha256);
617 }
618
619 int bch2_fs_encryption_init(struct bch_fs *c)
620 {
621         struct bch_sb_field_crypt *crypt;
622         struct bch_key key;
623         int ret = 0;
624
625         pr_verbose_init(c->opts, "");
626
627         c->sha256 = crypto_alloc_shash("sha256", 0, 0);
628         if (IS_ERR(c->sha256)) {
629                 bch_err(c, "error requesting sha256 module");
630                 ret = PTR_ERR(c->sha256);
631                 goto out;
632         }
633
634         crypt = bch2_sb_get_crypt(c->disk_sb.sb);
635         if (!crypt)
636                 goto out;
637
638         ret = bch2_alloc_ciphers(c);
639         if (ret)
640                 goto out;
641
642         ret = bch2_decrypt_sb_key(c, crypt, &key);
643         if (ret)
644                 goto out;
645
646         ret = crypto_skcipher_setkey(&c->chacha20->base,
647                         (void *) &key.key, sizeof(key.key));
648         if (ret)
649                 goto out;
650 out:
651         memzero_explicit(&key, sizeof(key));
652         pr_verbose_init(c->opts, "ret %i", ret);
653         return ret;
654 }