]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/checksum.c
Update bcachefs sources to b0788c47d9 bcachefs: Fix check_version_upgrade()
[bcachefs-tools-debian] / libbcachefs / checksum.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "errcode.h"
5 #include "super.h"
6 #include "super-io.h"
7
8 #include <linux/crc32c.h>
9 #include <linux/crypto.h>
10 #include <linux/xxhash.h>
11 #include <linux/key.h>
12 #include <linux/random.h>
13 #include <linux/scatterlist.h>
14 #include <crypto/algapi.h>
15 #include <crypto/chacha.h>
16 #include <crypto/hash.h>
17 #include <crypto/poly1305.h>
18 #include <crypto/skcipher.h>
19 #include <keys/user-type.h>
20
21 /*
22  * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
23  * it features page merging without having the checksum algorithm lose its state.
24  * for native checksum aglorithms (like crc), a default seed value will do.
25  * for hash-like algorithms, a state needs to be stored
26  */
27
28 struct bch2_checksum_state {
29         union {
30                 u64 seed;
31                 struct xxh64_state h64state;
32         };
33         unsigned int type;
34 };
35
36 static void bch2_checksum_init(struct bch2_checksum_state *state)
37 {
38         switch (state->type) {
39         case BCH_CSUM_none:
40         case BCH_CSUM_crc32c:
41         case BCH_CSUM_crc64:
42                 state->seed = 0;
43                 break;
44         case BCH_CSUM_crc32c_nonzero:
45                 state->seed = U32_MAX;
46                 break;
47         case BCH_CSUM_crc64_nonzero:
48                 state->seed = U64_MAX;
49                 break;
50         case BCH_CSUM_xxhash:
51                 xxh64_reset(&state->h64state, 0);
52                 break;
53         default:
54                 BUG();
55         }
56 }
57
58 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
59 {
60         switch (state->type) {
61         case BCH_CSUM_none:
62         case BCH_CSUM_crc32c:
63         case BCH_CSUM_crc64:
64                 return state->seed;
65         case BCH_CSUM_crc32c_nonzero:
66                 return state->seed ^ U32_MAX;
67         case BCH_CSUM_crc64_nonzero:
68                 return state->seed ^ U64_MAX;
69         case BCH_CSUM_xxhash:
70                 return xxh64_digest(&state->h64state);
71         default:
72                 BUG();
73         }
74 }
75
76 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
77 {
78         switch (state->type) {
79         case BCH_CSUM_none:
80                 return;
81         case BCH_CSUM_crc32c_nonzero:
82         case BCH_CSUM_crc32c:
83                 state->seed = crc32c(state->seed, data, len);
84                 break;
85         case BCH_CSUM_crc64_nonzero:
86         case BCH_CSUM_crc64:
87                 state->seed = crc64_be(state->seed, data, len);
88                 break;
89         case BCH_CSUM_xxhash:
90                 xxh64_update(&state->h64state, data, len);
91                 break;
92         default:
93                 BUG();
94         }
95 }
96
97 static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
98                                 struct nonce nonce,
99                                 struct scatterlist *sg, size_t len)
100 {
101         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
102         int ret;
103
104         skcipher_request_set_sync_tfm(req, tfm);
105         skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
106
107         ret = crypto_skcipher_encrypt(req);
108         if (ret)
109                 pr_err("got error %i from crypto_skcipher_encrypt()", ret);
110
111         return ret;
112 }
113
114 static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
115                               struct nonce nonce,
116                               void *buf, size_t len)
117 {
118         if (!is_vmalloc_addr(buf)) {
119                 struct scatterlist sg;
120
121                 sg_init_table(&sg, 1);
122                 sg_set_page(&sg,
123                             is_vmalloc_addr(buf)
124                             ? vmalloc_to_page(buf)
125                             : virt_to_page(buf),
126                             len, offset_in_page(buf));
127                 return do_encrypt_sg(tfm, nonce, &sg, len);
128         } else {
129                 unsigned pages = buf_pages(buf, len);
130                 struct scatterlist *sg;
131                 size_t orig_len = len;
132                 int ret, i;
133
134                 sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
135                 if (!sg)
136                         return -BCH_ERR_ENOMEM_do_encrypt;
137
138                 sg_init_table(sg, pages);
139
140                 for (i = 0; i < pages; i++) {
141                         unsigned offset = offset_in_page(buf);
142                         unsigned pg_len = min(len, PAGE_SIZE - offset);
143
144                         sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
145                         buf += pg_len;
146                         len -= pg_len;
147                 }
148
149                 ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
150                 kfree(sg);
151                 return ret;
152         }
153 }
154
155 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
156                             void *buf, size_t len)
157 {
158         struct crypto_sync_skcipher *chacha20 =
159                 crypto_alloc_sync_skcipher("chacha20", 0, 0);
160         int ret;
161
162         if (!chacha20) {
163                 pr_err("error requesting chacha20 module: %li", PTR_ERR(chacha20));
164                 return PTR_ERR(chacha20);
165         }
166
167         ret = crypto_skcipher_setkey(&chacha20->base,
168                                      (void *) key, sizeof(*key));
169         if (ret) {
170                 pr_err("crypto_skcipher_setkey() error: %i", ret);
171                 goto err;
172         }
173
174         ret = do_encrypt(chacha20, nonce, buf, len);
175 err:
176         crypto_free_sync_skcipher(chacha20);
177         return ret;
178 }
179
180 static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
181                         struct nonce nonce)
182 {
183         u8 key[POLY1305_KEY_SIZE];
184         int ret;
185
186         nonce.d[3] ^= BCH_NONCE_POLY;
187
188         memset(key, 0, sizeof(key));
189         ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
190         if (ret)
191                 return ret;
192
193         desc->tfm = c->poly1305;
194         crypto_shash_init(desc);
195         crypto_shash_update(desc, key, sizeof(key));
196         return 0;
197 }
198
199 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
200                               struct nonce nonce, const void *data, size_t len)
201 {
202         switch (type) {
203         case BCH_CSUM_none:
204         case BCH_CSUM_crc32c_nonzero:
205         case BCH_CSUM_crc64_nonzero:
206         case BCH_CSUM_crc32c:
207         case BCH_CSUM_xxhash:
208         case BCH_CSUM_crc64: {
209                 struct bch2_checksum_state state;
210
211                 state.type = type;
212
213                 bch2_checksum_init(&state);
214                 bch2_checksum_update(&state, data, len);
215
216                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
217         }
218
219         case BCH_CSUM_chacha20_poly1305_80:
220         case BCH_CSUM_chacha20_poly1305_128: {
221                 SHASH_DESC_ON_STACK(desc, c->poly1305);
222                 u8 digest[POLY1305_DIGEST_SIZE];
223                 struct bch_csum ret = { 0 };
224
225                 gen_poly_key(c, desc, nonce);
226
227                 crypto_shash_update(desc, data, len);
228                 crypto_shash_final(desc, digest);
229
230                 memcpy(&ret, digest, bch_crc_bytes[type]);
231                 return ret;
232         }
233         default:
234                 BUG();
235         }
236 }
237
238 int bch2_encrypt(struct bch_fs *c, unsigned type,
239                   struct nonce nonce, void *data, size_t len)
240 {
241         if (!bch2_csum_type_is_encryption(type))
242                 return 0;
243
244         return do_encrypt(c->chacha20, nonce, data, len);
245 }
246
247 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
248                                            struct nonce nonce, struct bio *bio,
249                                            struct bvec_iter *iter)
250 {
251         struct bio_vec bv;
252
253         switch (type) {
254         case BCH_CSUM_none:
255                 return (struct bch_csum) { 0 };
256         case BCH_CSUM_crc32c_nonzero:
257         case BCH_CSUM_crc64_nonzero:
258         case BCH_CSUM_crc32c:
259         case BCH_CSUM_xxhash:
260         case BCH_CSUM_crc64: {
261                 struct bch2_checksum_state state;
262
263                 state.type = type;
264                 bch2_checksum_init(&state);
265
266 #ifdef CONFIG_HIGHMEM
267                 __bio_for_each_segment(bv, bio, *iter, *iter) {
268                         void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
269
270                         bch2_checksum_update(&state, p, bv.bv_len);
271                         kunmap_local(p);
272                 }
273 #else
274                 __bio_for_each_bvec(bv, bio, *iter, *iter)
275                         bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
276                                 bv.bv_len);
277 #endif
278                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
279         }
280
281         case BCH_CSUM_chacha20_poly1305_80:
282         case BCH_CSUM_chacha20_poly1305_128: {
283                 SHASH_DESC_ON_STACK(desc, c->poly1305);
284                 u8 digest[POLY1305_DIGEST_SIZE];
285                 struct bch_csum ret = { 0 };
286
287                 gen_poly_key(c, desc, nonce);
288
289 #ifdef CONFIG_HIGHMEM
290                 __bio_for_each_segment(bv, bio, *iter, *iter) {
291                         void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
292
293                         crypto_shash_update(desc, p, bv.bv_len);
294                         kunmap_local(p);
295                 }
296 #else
297                 __bio_for_each_bvec(bv, bio, *iter, *iter)
298                         crypto_shash_update(desc,
299                                 page_address(bv.bv_page) + bv.bv_offset,
300                                 bv.bv_len);
301 #endif
302                 crypto_shash_final(desc, digest);
303
304                 memcpy(&ret, digest, bch_crc_bytes[type]);
305                 return ret;
306         }
307         default:
308                 BUG();
309         }
310 }
311
312 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
313                                   struct nonce nonce, struct bio *bio)
314 {
315         struct bvec_iter iter = bio->bi_iter;
316
317         return __bch2_checksum_bio(c, type, nonce, bio, &iter);
318 }
319
320 int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
321                      struct nonce nonce, struct bio *bio)
322 {
323         struct bio_vec bv;
324         struct bvec_iter iter;
325         struct scatterlist sgl[16], *sg = sgl;
326         size_t bytes = 0;
327         int ret = 0;
328
329         if (!bch2_csum_type_is_encryption(type))
330                 return 0;
331
332         sg_init_table(sgl, ARRAY_SIZE(sgl));
333
334         bio_for_each_segment(bv, bio, iter) {
335                 if (sg == sgl + ARRAY_SIZE(sgl)) {
336                         sg_mark_end(sg - 1);
337
338                         ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
339                         if (ret)
340                                 return ret;
341
342                         nonce = nonce_add(nonce, bytes);
343                         bytes = 0;
344
345                         sg_init_table(sgl, ARRAY_SIZE(sgl));
346                         sg = sgl;
347                 }
348
349                 sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
350                 bytes += bv.bv_len;
351         }
352
353         sg_mark_end(sg - 1);
354         return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
355 }
356
357 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
358                                     struct bch_csum b, size_t b_len)
359 {
360         struct bch2_checksum_state state;
361
362         state.type = type;
363         bch2_checksum_init(&state);
364         state.seed = (u64 __force) a.lo;
365
366         BUG_ON(!bch2_checksum_mergeable(type));
367
368         while (b_len) {
369                 unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
370
371                 bch2_checksum_update(&state,
372                                 page_address(ZERO_PAGE(0)), b);
373                 b_len -= b;
374         }
375         a.lo = (__le64 __force) bch2_checksum_final(&state);
376         a.lo ^= b.lo;
377         a.hi ^= b.hi;
378         return a;
379 }
380
381 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
382                         struct bversion version,
383                         struct bch_extent_crc_unpacked crc_old,
384                         struct bch_extent_crc_unpacked *crc_a,
385                         struct bch_extent_crc_unpacked *crc_b,
386                         unsigned len_a, unsigned len_b,
387                         unsigned new_csum_type)
388 {
389         struct bvec_iter iter = bio->bi_iter;
390         struct nonce nonce = extent_nonce(version, crc_old);
391         struct bch_csum merged = { 0 };
392         struct crc_split {
393                 struct bch_extent_crc_unpacked  *crc;
394                 unsigned                        len;
395                 unsigned                        csum_type;
396                 struct bch_csum                 csum;
397         } splits[3] = {
398                 { crc_a, len_a, new_csum_type },
399                 { crc_b, len_b, new_csum_type },
400                 { NULL,  bio_sectors(bio) - len_a - len_b, new_csum_type },
401         }, *i;
402         bool mergeable = crc_old.csum_type == new_csum_type &&
403                 bch2_checksum_mergeable(new_csum_type);
404         unsigned crc_nonce = crc_old.nonce;
405
406         BUG_ON(len_a + len_b > bio_sectors(bio));
407         BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
408         BUG_ON(crc_is_compressed(crc_old));
409         BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
410                bch2_csum_type_is_encryption(new_csum_type));
411
412         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
413                 iter.bi_size = i->len << 9;
414                 if (mergeable || i->crc)
415                         i->csum = __bch2_checksum_bio(c, i->csum_type,
416                                                       nonce, bio, &iter);
417                 else
418                         bio_advance_iter(bio, &iter, i->len << 9);
419                 nonce = nonce_add(nonce, i->len << 9);
420         }
421
422         if (mergeable)
423                 for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
424                         merged = bch2_checksum_merge(new_csum_type, merged,
425                                                      i->csum, i->len << 9);
426         else
427                 merged = bch2_checksum_bio(c, crc_old.csum_type,
428                                 extent_nonce(version, crc_old), bio);
429
430         if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
431                 bch_err(c, "checksum error in %s() (memory corruption or bug?)\n"
432                         "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
433                         __func__,
434                         crc_old.csum.hi,
435                         crc_old.csum.lo,
436                         merged.hi,
437                         merged.lo,
438                         bch2_csum_types[crc_old.csum_type],
439                         bch2_csum_types[new_csum_type]);
440                 return -EIO;
441         }
442
443         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
444                 if (i->crc)
445                         *i->crc = (struct bch_extent_crc_unpacked) {
446                                 .csum_type              = i->csum_type,
447                                 .compression_type       = crc_old.compression_type,
448                                 .compressed_size        = i->len,
449                                 .uncompressed_size      = i->len,
450                                 .offset                 = 0,
451                                 .live_size              = i->len,
452                                 .nonce                  = crc_nonce,
453                                 .csum                   = i->csum,
454                         };
455
456                 if (bch2_csum_type_is_encryption(new_csum_type))
457                         crc_nonce += i->len;
458         }
459
460         return 0;
461 }
462
463 /* BCH_SB_FIELD_crypt: */
464
465 static int bch2_sb_crypt_validate(struct bch_sb *sb,
466                                   struct bch_sb_field *f,
467                                   struct printbuf *err)
468 {
469         struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
470
471         if (vstruct_bytes(&crypt->field) < sizeof(*crypt)) {
472                 prt_printf(err, "wrong size (got %zu should be %zu)",
473                        vstruct_bytes(&crypt->field), sizeof(*crypt));
474                 return -BCH_ERR_invalid_sb_crypt;
475         }
476
477         if (BCH_CRYPT_KDF_TYPE(crypt)) {
478                 prt_printf(err, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt));
479                 return -BCH_ERR_invalid_sb_crypt;
480         }
481
482         return 0;
483 }
484
485 static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
486                                   struct bch_sb_field *f)
487 {
488         struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
489
490         prt_printf(out, "KFD:               %llu", BCH_CRYPT_KDF_TYPE(crypt));
491         prt_newline(out);
492         prt_printf(out, "scrypt n:          %llu", BCH_KDF_SCRYPT_N(crypt));
493         prt_newline(out);
494         prt_printf(out, "scrypt r:          %llu", BCH_KDF_SCRYPT_R(crypt));
495         prt_newline(out);
496         prt_printf(out, "scrypt p:          %llu", BCH_KDF_SCRYPT_P(crypt));
497         prt_newline(out);
498 }
499
500 const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
501         .validate       = bch2_sb_crypt_validate,
502         .to_text        = bch2_sb_crypt_to_text,
503 };
504
505 #ifdef __KERNEL__
506 static int __bch2_request_key(char *key_description, struct bch_key *key)
507 {
508         struct key *keyring_key;
509         const struct user_key_payload *ukp;
510         int ret;
511
512         keyring_key = request_key(&key_type_user, key_description, NULL);
513         if (IS_ERR(keyring_key))
514                 return PTR_ERR(keyring_key);
515
516         down_read(&keyring_key->sem);
517         ukp = dereference_key_locked(keyring_key);
518         if (ukp->datalen == sizeof(*key)) {
519                 memcpy(key, ukp->data, ukp->datalen);
520                 ret = 0;
521         } else {
522                 ret = -EINVAL;
523         }
524         up_read(&keyring_key->sem);
525         key_put(keyring_key);
526
527         return ret;
528 }
529 #else
530 #include <keyutils.h>
531
532 static int __bch2_request_key(char *key_description, struct bch_key *key)
533 {
534         key_serial_t key_id;
535
536         key_id = request_key("user", key_description, NULL,
537                              KEY_SPEC_USER_KEYRING);
538         if (key_id < 0)
539                 return -errno;
540
541         if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
542                 return -1;
543
544         return 0;
545 }
546 #endif
547
548 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
549 {
550         struct printbuf key_description = PRINTBUF;
551         int ret;
552
553         prt_printf(&key_description, "bcachefs:");
554         pr_uuid(&key_description, sb->user_uuid.b);
555
556         ret = __bch2_request_key(key_description.buf, key);
557         printbuf_exit(&key_description);
558         return ret;
559 }
560
561 int bch2_decrypt_sb_key(struct bch_fs *c,
562                         struct bch_sb_field_crypt *crypt,
563                         struct bch_key *key)
564 {
565         struct bch_encrypted_key sb_key = crypt->key;
566         struct bch_key user_key;
567         int ret = 0;
568
569         /* is key encrypted? */
570         if (!bch2_key_is_encrypted(&sb_key))
571                 goto out;
572
573         ret = bch2_request_key(c->disk_sb.sb, &user_key);
574         if (ret) {
575                 bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
576                 goto err;
577         }
578
579         /* decrypt real key: */
580         ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
581                              &sb_key, sizeof(sb_key));
582         if (ret)
583                 goto err;
584
585         if (bch2_key_is_encrypted(&sb_key)) {
586                 bch_err(c, "incorrect encryption key");
587                 ret = -EINVAL;
588                 goto err;
589         }
590 out:
591         *key = sb_key.key;
592 err:
593         memzero_explicit(&sb_key, sizeof(sb_key));
594         memzero_explicit(&user_key, sizeof(user_key));
595         return ret;
596 }
597
598 static int bch2_alloc_ciphers(struct bch_fs *c)
599 {
600         int ret;
601
602         if (!c->chacha20)
603                 c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
604         ret = PTR_ERR_OR_ZERO(c->chacha20);
605
606         if (ret) {
607                 bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
608                 return ret;
609         }
610
611         if (!c->poly1305)
612                 c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
613         ret = PTR_ERR_OR_ZERO(c->poly1305);
614
615         if (ret) {
616                 bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
617                 return ret;
618         }
619
620         return 0;
621 }
622
623 int bch2_disable_encryption(struct bch_fs *c)
624 {
625         struct bch_sb_field_crypt *crypt;
626         struct bch_key key;
627         int ret = -EINVAL;
628
629         mutex_lock(&c->sb_lock);
630
631         crypt = bch2_sb_get_crypt(c->disk_sb.sb);
632         if (!crypt)
633                 goto out;
634
635         /* is key encrypted? */
636         ret = 0;
637         if (bch2_key_is_encrypted(&crypt->key))
638                 goto out;
639
640         ret = bch2_decrypt_sb_key(c, crypt, &key);
641         if (ret)
642                 goto out;
643
644         crypt->key.magic        = cpu_to_le64(BCH_KEY_MAGIC);
645         crypt->key.key          = key;
646
647         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
648         bch2_write_super(c);
649 out:
650         mutex_unlock(&c->sb_lock);
651
652         return ret;
653 }
654
655 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
656 {
657         struct bch_encrypted_key key;
658         struct bch_key user_key;
659         struct bch_sb_field_crypt *crypt;
660         int ret = -EINVAL;
661
662         mutex_lock(&c->sb_lock);
663
664         /* Do we already have an encryption key? */
665         if (bch2_sb_get_crypt(c->disk_sb.sb))
666                 goto err;
667
668         ret = bch2_alloc_ciphers(c);
669         if (ret)
670                 goto err;
671
672         key.magic = cpu_to_le64(BCH_KEY_MAGIC);
673         get_random_bytes(&key.key, sizeof(key.key));
674
675         if (keyed) {
676                 ret = bch2_request_key(c->disk_sb.sb, &user_key);
677                 if (ret) {
678                         bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
679                         goto err;
680                 }
681
682                 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
683                                               &key, sizeof(key));
684                 if (ret)
685                         goto err;
686         }
687
688         ret = crypto_skcipher_setkey(&c->chacha20->base,
689                         (void *) &key.key, sizeof(key.key));
690         if (ret)
691                 goto err;
692
693         crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
694         if (!crypt) {
695                 ret = -BCH_ERR_ENOSPC_sb_crypt;
696                 goto err;
697         }
698
699         crypt->key = key;
700
701         /* write superblock */
702         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
703         bch2_write_super(c);
704 err:
705         mutex_unlock(&c->sb_lock);
706         memzero_explicit(&user_key, sizeof(user_key));
707         memzero_explicit(&key, sizeof(key));
708         return ret;
709 }
710
711 void bch2_fs_encryption_exit(struct bch_fs *c)
712 {
713         if (!IS_ERR_OR_NULL(c->poly1305))
714                 crypto_free_shash(c->poly1305);
715         if (!IS_ERR_OR_NULL(c->chacha20))
716                 crypto_free_sync_skcipher(c->chacha20);
717         if (!IS_ERR_OR_NULL(c->sha256))
718                 crypto_free_shash(c->sha256);
719 }
720
721 int bch2_fs_encryption_init(struct bch_fs *c)
722 {
723         struct bch_sb_field_crypt *crypt;
724         struct bch_key key;
725         int ret = 0;
726
727         c->sha256 = crypto_alloc_shash("sha256", 0, 0);
728         ret = PTR_ERR_OR_ZERO(c->sha256);
729         if (ret) {
730                 bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
731                 goto out;
732         }
733
734         crypt = bch2_sb_get_crypt(c->disk_sb.sb);
735         if (!crypt)
736                 goto out;
737
738         ret = bch2_alloc_ciphers(c);
739         if (ret)
740                 goto out;
741
742         ret = bch2_decrypt_sb_key(c, crypt, &key);
743         if (ret)
744                 goto out;
745
746         ret = crypto_skcipher_setkey(&c->chacha20->base,
747                         (void *) &key.key, sizeof(key.key));
748         if (ret)
749                 goto out;
750 out:
751         memzero_explicit(&key, sizeof(key));
752         return ret;
753 }