]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/checksum.c
Update bcachefs sources to 24f7e08cd8 bcachefs: shrinker.to_text() methods
[bcachefs-tools-debian] / libbcachefs / checksum.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "super.h"
5 #include "super-io.h"
6
7 #include <linux/crc32c.h>
8 #include <linux/crypto.h>
9 #include <linux/xxhash.h>
10 #include <linux/key.h>
11 #include <linux/random.h>
12 #include <linux/scatterlist.h>
13 #include <crypto/algapi.h>
14 #include <crypto/chacha.h>
15 #include <crypto/hash.h>
16 #include <crypto/poly1305.h>
17 #include <crypto/skcipher.h>
18 #include <keys/user-type.h>
19
20 /*
21  * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
22  * it features page merging without having the checksum algorithm lose its state.
23  * for native checksum aglorithms (like crc), a default seed value will do.
24  * for hash-like algorithms, a state needs to be stored
25  */
26
27 struct bch2_checksum_state {
28         union {
29                 u64 seed;
30                 struct xxh64_state h64state;
31         };
32         unsigned int type;
33 };
34
35 static void bch2_checksum_init(struct bch2_checksum_state *state)
36 {
37         switch (state->type) {
38         case BCH_CSUM_none:
39         case BCH_CSUM_crc32c:
40         case BCH_CSUM_crc64:
41                 state->seed = 0;
42                 break;
43         case BCH_CSUM_crc32c_nonzero:
44                 state->seed = U32_MAX;
45                 break;
46         case BCH_CSUM_crc64_nonzero:
47                 state->seed = U64_MAX;
48                 break;
49         case BCH_CSUM_xxhash:
50                 xxh64_reset(&state->h64state, 0);
51                 break;
52         default:
53                 BUG();
54         }
55 }
56
57 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
58 {
59         switch (state->type) {
60         case BCH_CSUM_none:
61         case BCH_CSUM_crc32c:
62         case BCH_CSUM_crc64:
63                 return state->seed;
64         case BCH_CSUM_crc32c_nonzero:
65                 return state->seed ^ U32_MAX;
66         case BCH_CSUM_crc64_nonzero:
67                 return state->seed ^ U64_MAX;
68         case BCH_CSUM_xxhash:
69                 return xxh64_digest(&state->h64state);
70         default:
71                 BUG();
72         }
73 }
74
75 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
76 {
77         switch (state->type) {
78         case BCH_CSUM_none:
79                 return;
80         case BCH_CSUM_crc32c_nonzero:
81         case BCH_CSUM_crc32c:
82                 state->seed = crc32c(state->seed, data, len);
83                 break;
84         case BCH_CSUM_crc64_nonzero:
85         case BCH_CSUM_crc64:
86                 state->seed = crc64_be(state->seed, data, len);
87                 break;
88         case BCH_CSUM_xxhash:
89                 xxh64_update(&state->h64state, data, len);
90                 break;
91         default:
92                 BUG();
93         }
94 }
95
96 static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
97                                 struct nonce nonce,
98                                 struct scatterlist *sg, size_t len)
99 {
100         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
101         int ret;
102
103         skcipher_request_set_sync_tfm(req, tfm);
104         skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
105
106         ret = crypto_skcipher_encrypt(req);
107         if (ret)
108                 pr_err("got error %i from crypto_skcipher_encrypt()", ret);
109
110         return ret;
111 }
112
113 static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
114                               struct nonce nonce,
115                               void *buf, size_t len)
116 {
117         if (!is_vmalloc_addr(buf)) {
118                 struct scatterlist sg;
119
120                 sg_init_table(&sg, 1);
121                 sg_set_page(&sg,
122                             is_vmalloc_addr(buf)
123                             ? vmalloc_to_page(buf)
124                             : virt_to_page(buf),
125                             len, offset_in_page(buf));
126                 return do_encrypt_sg(tfm, nonce, &sg, len);
127         } else {
128                 unsigned pages = buf_pages(buf, len);
129                 struct scatterlist *sg;
130                 size_t orig_len = len;
131                 int ret, i;
132
133                 sg = kmalloc_array(sizeof(*sg), pages, GFP_KERNEL);
134                 if (!sg)
135                         return -ENOMEM;
136
137                 sg_init_table(sg, pages);
138
139                 for (i = 0; i < pages; i++) {
140                         unsigned offset = offset_in_page(buf);
141                         unsigned pg_len = min(len, PAGE_SIZE - offset);
142
143                         sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
144                         buf += pg_len;
145                         len -= pg_len;
146                 }
147
148                 ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
149                 kfree(sg);
150                 return ret;
151         }
152 }
153
154 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
155                             void *buf, size_t len)
156 {
157         struct crypto_sync_skcipher *chacha20 =
158                 crypto_alloc_sync_skcipher("chacha20", 0, 0);
159         int ret;
160
161         if (!chacha20) {
162                 pr_err("error requesting chacha20 module: %li", PTR_ERR(chacha20));
163                 return PTR_ERR(chacha20);
164         }
165
166         ret = crypto_skcipher_setkey(&chacha20->base,
167                                      (void *) key, sizeof(*key));
168         if (ret) {
169                 pr_err("crypto_skcipher_setkey() error: %i", ret);
170                 goto err;
171         }
172
173         ret = do_encrypt(chacha20, nonce, buf, len);
174 err:
175         crypto_free_sync_skcipher(chacha20);
176         return ret;
177 }
178
179 static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
180                         struct nonce nonce)
181 {
182         u8 key[POLY1305_KEY_SIZE];
183         int ret;
184
185         nonce.d[3] ^= BCH_NONCE_POLY;
186
187         memset(key, 0, sizeof(key));
188         ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
189         if (ret)
190                 return ret;
191
192         desc->tfm = c->poly1305;
193         crypto_shash_init(desc);
194         crypto_shash_update(desc, key, sizeof(key));
195         return 0;
196 }
197
198 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
199                               struct nonce nonce, const void *data, size_t len)
200 {
201         switch (type) {
202         case BCH_CSUM_none:
203         case BCH_CSUM_crc32c_nonzero:
204         case BCH_CSUM_crc64_nonzero:
205         case BCH_CSUM_crc32c:
206         case BCH_CSUM_xxhash:
207         case BCH_CSUM_crc64: {
208                 struct bch2_checksum_state state;
209
210                 state.type = type;
211
212                 bch2_checksum_init(&state);
213                 bch2_checksum_update(&state, data, len);
214
215                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
216         }
217
218         case BCH_CSUM_chacha20_poly1305_80:
219         case BCH_CSUM_chacha20_poly1305_128: {
220                 SHASH_DESC_ON_STACK(desc, c->poly1305);
221                 u8 digest[POLY1305_DIGEST_SIZE];
222                 struct bch_csum ret = { 0 };
223
224                 gen_poly_key(c, desc, nonce);
225
226                 crypto_shash_update(desc, data, len);
227                 crypto_shash_final(desc, digest);
228
229                 memcpy(&ret, digest, bch_crc_bytes[type]);
230                 return ret;
231         }
232         default:
233                 BUG();
234         }
235 }
236
237 int bch2_encrypt(struct bch_fs *c, unsigned type,
238                   struct nonce nonce, void *data, size_t len)
239 {
240         if (!bch2_csum_type_is_encryption(type))
241                 return 0;
242
243         return do_encrypt(c->chacha20, nonce, data, len);
244 }
245
246 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
247                                            struct nonce nonce, struct bio *bio,
248                                            struct bvec_iter *iter)
249 {
250         struct bio_vec bv;
251
252         switch (type) {
253         case BCH_CSUM_none:
254                 return (struct bch_csum) { 0 };
255         case BCH_CSUM_crc32c_nonzero:
256         case BCH_CSUM_crc64_nonzero:
257         case BCH_CSUM_crc32c:
258         case BCH_CSUM_xxhash:
259         case BCH_CSUM_crc64: {
260                 struct bch2_checksum_state state;
261
262                 state.type = type;
263                 bch2_checksum_init(&state);
264
265 #ifdef CONFIG_HIGHMEM
266                 __bio_for_each_segment(bv, bio, *iter, *iter) {
267                         void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
268                         bch2_checksum_update(&state, p, bv.bv_len);
269                         kunmap_atomic(p);
270                 }
271 #else
272                 __bio_for_each_bvec(bv, bio, *iter, *iter)
273                         bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
274                                 bv.bv_len);
275 #endif
276                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
277         }
278
279         case BCH_CSUM_chacha20_poly1305_80:
280         case BCH_CSUM_chacha20_poly1305_128: {
281                 SHASH_DESC_ON_STACK(desc, c->poly1305);
282                 u8 digest[POLY1305_DIGEST_SIZE];
283                 struct bch_csum ret = { 0 };
284
285                 gen_poly_key(c, desc, nonce);
286
287 #ifdef CONFIG_HIGHMEM
288                 __bio_for_each_segment(bv, bio, *iter, *iter) {
289                         void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
290
291                         crypto_shash_update(desc, p, bv.bv_len);
292                         kunmap_atomic(p);
293                 }
294 #else
295                 __bio_for_each_bvec(bv, bio, *iter, *iter)
296                         crypto_shash_update(desc,
297                                 page_address(bv.bv_page) + bv.bv_offset,
298                                 bv.bv_len);
299 #endif
300                 crypto_shash_final(desc, digest);
301
302                 memcpy(&ret, digest, bch_crc_bytes[type]);
303                 return ret;
304         }
305         default:
306                 BUG();
307         }
308 }
309
310 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
311                                   struct nonce nonce, struct bio *bio)
312 {
313         struct bvec_iter iter = bio->bi_iter;
314
315         return __bch2_checksum_bio(c, type, nonce, bio, &iter);
316 }
317
318 int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
319                      struct nonce nonce, struct bio *bio)
320 {
321         struct bio_vec bv;
322         struct bvec_iter iter;
323         struct scatterlist sgl[16], *sg = sgl;
324         size_t bytes = 0;
325         int ret = 0;
326
327         if (!bch2_csum_type_is_encryption(type))
328                 return 0;
329
330         sg_init_table(sgl, ARRAY_SIZE(sgl));
331
332         bio_for_each_segment(bv, bio, iter) {
333                 if (sg == sgl + ARRAY_SIZE(sgl)) {
334                         sg_mark_end(sg - 1);
335
336                         ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
337                         if (ret)
338                                 return ret;
339
340                         nonce = nonce_add(nonce, bytes);
341                         bytes = 0;
342
343                         sg_init_table(sgl, ARRAY_SIZE(sgl));
344                         sg = sgl;
345                 }
346
347                 sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
348                 bytes += bv.bv_len;
349         }
350
351         sg_mark_end(sg - 1);
352         return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
353 }
354
355 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
356                                     struct bch_csum b, size_t b_len)
357 {
358         struct bch2_checksum_state state;
359
360         state.type = type;
361         bch2_checksum_init(&state);
362         state.seed = a.lo;
363
364         BUG_ON(!bch2_checksum_mergeable(type));
365
366         while (b_len) {
367                 unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
368
369                 bch2_checksum_update(&state,
370                                 page_address(ZERO_PAGE(0)), b);
371                 b_len -= b;
372         }
373         a.lo = bch2_checksum_final(&state);
374         a.lo ^= b.lo;
375         a.hi ^= b.hi;
376         return a;
377 }
378
379 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
380                         struct bversion version,
381                         struct bch_extent_crc_unpacked crc_old,
382                         struct bch_extent_crc_unpacked *crc_a,
383                         struct bch_extent_crc_unpacked *crc_b,
384                         unsigned len_a, unsigned len_b,
385                         unsigned new_csum_type)
386 {
387         struct bvec_iter iter = bio->bi_iter;
388         struct nonce nonce = extent_nonce(version, crc_old);
389         struct bch_csum merged = { 0 };
390         struct crc_split {
391                 struct bch_extent_crc_unpacked  *crc;
392                 unsigned                        len;
393                 unsigned                        csum_type;
394                 struct bch_csum                 csum;
395         } splits[3] = {
396                 { crc_a, len_a, new_csum_type },
397                 { crc_b, len_b, new_csum_type },
398                 { NULL,  bio_sectors(bio) - len_a - len_b, new_csum_type },
399         }, *i;
400         bool mergeable = crc_old.csum_type == new_csum_type &&
401                 bch2_checksum_mergeable(new_csum_type);
402         unsigned crc_nonce = crc_old.nonce;
403
404         BUG_ON(len_a + len_b > bio_sectors(bio));
405         BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
406         BUG_ON(crc_is_compressed(crc_old));
407         BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
408                bch2_csum_type_is_encryption(new_csum_type));
409
410         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
411                 iter.bi_size = i->len << 9;
412                 if (mergeable || i->crc)
413                         i->csum = __bch2_checksum_bio(c, i->csum_type,
414                                                       nonce, bio, &iter);
415                 else
416                         bio_advance_iter(bio, &iter, i->len << 9);
417                 nonce = nonce_add(nonce, i->len << 9);
418         }
419
420         if (mergeable)
421                 for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
422                         merged = bch2_checksum_merge(new_csum_type, merged,
423                                                      i->csum, i->len << 9);
424         else
425                 merged = bch2_checksum_bio(c, crc_old.csum_type,
426                                 extent_nonce(version, crc_old), bio);
427
428         if (bch2_crc_cmp(merged, crc_old.csum))
429                 return -EIO;
430
431         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
432                 if (i->crc)
433                         *i->crc = (struct bch_extent_crc_unpacked) {
434                                 .csum_type              = i->csum_type,
435                                 .compression_type       = crc_old.compression_type,
436                                 .compressed_size        = i->len,
437                                 .uncompressed_size      = i->len,
438                                 .offset                 = 0,
439                                 .live_size              = i->len,
440                                 .nonce                  = crc_nonce,
441                                 .csum                   = i->csum,
442                         };
443
444                 if (bch2_csum_type_is_encryption(new_csum_type))
445                         crc_nonce += i->len;
446         }
447
448         return 0;
449 }
450
451 #ifdef __KERNEL__
452 static int __bch2_request_key(char *key_description, struct bch_key *key)
453 {
454         struct key *keyring_key;
455         const struct user_key_payload *ukp;
456         int ret;
457
458         keyring_key = request_key(&key_type_user, key_description, NULL);
459         if (IS_ERR(keyring_key))
460                 return PTR_ERR(keyring_key);
461
462         down_read(&keyring_key->sem);
463         ukp = dereference_key_locked(keyring_key);
464         if (ukp->datalen == sizeof(*key)) {
465                 memcpy(key, ukp->data, ukp->datalen);
466                 ret = 0;
467         } else {
468                 ret = -EINVAL;
469         }
470         up_read(&keyring_key->sem);
471         key_put(keyring_key);
472
473         return ret;
474 }
475 #else
476 #include <keyutils.h>
477
478 static int __bch2_request_key(char *key_description, struct bch_key *key)
479 {
480         key_serial_t key_id;
481
482         key_id = request_key("user", key_description, NULL,
483                              KEY_SPEC_USER_KEYRING);
484         if (key_id < 0)
485                 return -errno;
486
487         if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
488                 return -1;
489
490         return 0;
491 }
492 #endif
493
494 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
495 {
496         struct printbuf key_description = PRINTBUF;
497         int ret;
498
499         prt_printf(&key_description, "bcachefs:");
500         pr_uuid(&key_description, sb->user_uuid.b);
501
502         ret = __bch2_request_key(key_description.buf, key);
503         printbuf_exit(&key_description);
504         return ret;
505 }
506
507 int bch2_decrypt_sb_key(struct bch_fs *c,
508                         struct bch_sb_field_crypt *crypt,
509                         struct bch_key *key)
510 {
511         struct bch_encrypted_key sb_key = crypt->key;
512         struct bch_key user_key;
513         int ret = 0;
514
515         /* is key encrypted? */
516         if (!bch2_key_is_encrypted(&sb_key))
517                 goto out;
518
519         ret = bch2_request_key(c->disk_sb.sb, &user_key);
520         if (ret) {
521                 bch_err(c, "error requesting encryption key: %i", ret);
522                 goto err;
523         }
524
525         /* decrypt real key: */
526         ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
527                              &sb_key, sizeof(sb_key));
528         if (ret)
529                 goto err;
530
531         if (bch2_key_is_encrypted(&sb_key)) {
532                 bch_err(c, "incorrect encryption key");
533                 ret = -EINVAL;
534                 goto err;
535         }
536 out:
537         *key = sb_key.key;
538 err:
539         memzero_explicit(&sb_key, sizeof(sb_key));
540         memzero_explicit(&user_key, sizeof(user_key));
541         return ret;
542 }
543
544 static int bch2_alloc_ciphers(struct bch_fs *c)
545 {
546         if (!c->chacha20)
547                 c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
548         if (IS_ERR(c->chacha20)) {
549                 bch_err(c, "error requesting chacha20 module: %li",
550                         PTR_ERR(c->chacha20));
551                 return PTR_ERR(c->chacha20);
552         }
553
554         if (!c->poly1305)
555                 c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
556         if (IS_ERR(c->poly1305)) {
557                 bch_err(c, "error requesting poly1305 module: %li",
558                         PTR_ERR(c->poly1305));
559                 return PTR_ERR(c->poly1305);
560         }
561
562         return 0;
563 }
564
565 int bch2_disable_encryption(struct bch_fs *c)
566 {
567         struct bch_sb_field_crypt *crypt;
568         struct bch_key key;
569         int ret = -EINVAL;
570
571         mutex_lock(&c->sb_lock);
572
573         crypt = bch2_sb_get_crypt(c->disk_sb.sb);
574         if (!crypt)
575                 goto out;
576
577         /* is key encrypted? */
578         ret = 0;
579         if (bch2_key_is_encrypted(&crypt->key))
580                 goto out;
581
582         ret = bch2_decrypt_sb_key(c, crypt, &key);
583         if (ret)
584                 goto out;
585
586         crypt->key.magic        = BCH_KEY_MAGIC;
587         crypt->key.key          = key;
588
589         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
590         bch2_write_super(c);
591 out:
592         mutex_unlock(&c->sb_lock);
593
594         return ret;
595 }
596
597 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
598 {
599         struct bch_encrypted_key key;
600         struct bch_key user_key;
601         struct bch_sb_field_crypt *crypt;
602         int ret = -EINVAL;
603
604         mutex_lock(&c->sb_lock);
605
606         /* Do we already have an encryption key? */
607         if (bch2_sb_get_crypt(c->disk_sb.sb))
608                 goto err;
609
610         ret = bch2_alloc_ciphers(c);
611         if (ret)
612                 goto err;
613
614         key.magic = BCH_KEY_MAGIC;
615         get_random_bytes(&key.key, sizeof(key.key));
616
617         if (keyed) {
618                 ret = bch2_request_key(c->disk_sb.sb, &user_key);
619                 if (ret) {
620                         bch_err(c, "error requesting encryption key: %i", ret);
621                         goto err;
622                 }
623
624                 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
625                                               &key, sizeof(key));
626                 if (ret)
627                         goto err;
628         }
629
630         ret = crypto_skcipher_setkey(&c->chacha20->base,
631                         (void *) &key.key, sizeof(key.key));
632         if (ret)
633                 goto err;
634
635         crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
636         if (!crypt) {
637                 ret = -ENOMEM; /* XXX this technically could be -ENOSPC */
638                 goto err;
639         }
640
641         crypt->key = key;
642
643         /* write superblock */
644         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
645         bch2_write_super(c);
646 err:
647         mutex_unlock(&c->sb_lock);
648         memzero_explicit(&user_key, sizeof(user_key));
649         memzero_explicit(&key, sizeof(key));
650         return ret;
651 }
652
653 void bch2_fs_encryption_exit(struct bch_fs *c)
654 {
655         if (!IS_ERR_OR_NULL(c->poly1305))
656                 crypto_free_shash(c->poly1305);
657         if (!IS_ERR_OR_NULL(c->chacha20))
658                 crypto_free_sync_skcipher(c->chacha20);
659         if (!IS_ERR_OR_NULL(c->sha256))
660                 crypto_free_shash(c->sha256);
661 }
662
663 int bch2_fs_encryption_init(struct bch_fs *c)
664 {
665         struct bch_sb_field_crypt *crypt;
666         struct bch_key key;
667         int ret = 0;
668
669         pr_verbose_init(c->opts, "");
670
671         c->sha256 = crypto_alloc_shash("sha256", 0, 0);
672         if (IS_ERR(c->sha256)) {
673                 bch_err(c, "error requesting sha256 module");
674                 ret = PTR_ERR(c->sha256);
675                 goto out;
676         }
677
678         crypt = bch2_sb_get_crypt(c->disk_sb.sb);
679         if (!crypt)
680                 goto out;
681
682         ret = bch2_alloc_ciphers(c);
683         if (ret)
684                 goto out;
685
686         ret = bch2_decrypt_sb_key(c, crypt, &key);
687         if (ret)
688                 goto out;
689
690         ret = crypto_skcipher_setkey(&c->chacha20->base,
691                         (void *) &key.key, sizeof(key.key));
692         if (ret)
693                 goto out;
694 out:
695         memzero_explicit(&key, sizeof(key));
696         pr_verbose_init(c->opts, "ret %i", ret);
697         return ret;
698 }