]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/checksum.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / checksum.c
index 6f1afa4a31199fb11ed7135c10d6c6803500a1d5..4701457f6381ca820e17a12707009c272ed5b4ac 100644 (file)
@@ -1,11 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0
 #include "bcachefs.h"
 #include "checksum.h"
+#include "errcode.h"
 #include "super.h"
 #include "super-io.h"
 
 #include <linux/crc32c.h>
 #include <linux/crypto.h>
+#include <linux/xxhash.h>
 #include <linux/key.h>
 #include <linux/random.h>
 #include <linux/scatterlist.h>
 #include <crypto/chacha.h>
 #include <crypto/hash.h>
 #include <crypto/poly1305.h>
+#include <crypto/skcipher.h>
 #include <keys/user-type.h>
 
-static u64 bch2_checksum_init(unsigned type)
+/*
+ * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
+ * it features page merging without having the checksum algorithm lose its state.
+ * for native checksum aglorithms (like crc), a default seed value will do.
+ * for hash-like algorithms, a state needs to be stored
+ */
+
+struct bch2_checksum_state {
+       union {
+               u64 seed;
+               struct xxh64_state h64state;
+       };
+       unsigned int type;
+};
+
+static void bch2_checksum_init(struct bch2_checksum_state *state)
 {
-       switch (type) {
-       case BCH_CSUM_NONE:
-               return 0;
-       case BCH_CSUM_CRC32C_NONZERO:
-               return U32_MAX;
-       case BCH_CSUM_CRC64_NONZERO:
-               return U64_MAX;
-       case BCH_CSUM_CRC32C:
-               return 0;
-       case BCH_CSUM_CRC64:
-               return 0;
+       switch (state->type) {
+       case BCH_CSUM_none:
+       case BCH_CSUM_crc32c:
+       case BCH_CSUM_crc64:
+               state->seed = 0;
+               break;
+       case BCH_CSUM_crc32c_nonzero:
+               state->seed = U32_MAX;
+               break;
+       case BCH_CSUM_crc64_nonzero:
+               state->seed = U64_MAX;
+               break;
+       case BCH_CSUM_xxhash:
+               xxh64_reset(&state->h64state, 0);
+               break;
        default:
                BUG();
        }
 }
 
-static u64 bch2_checksum_final(unsigned type, u64 crc)
+static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
 {
-       switch (type) {
-       case BCH_CSUM_NONE:
-               return 0;
-       case BCH_CSUM_CRC32C_NONZERO:
-               return crc ^ U32_MAX;
-       case BCH_CSUM_CRC64_NONZERO:
-               return crc ^ U64_MAX;
-       case BCH_CSUM_CRC32C:
-               return crc;
-       case BCH_CSUM_CRC64:
-               return crc;
+       switch (state->type) {
+       case BCH_CSUM_none:
+       case BCH_CSUM_crc32c:
+       case BCH_CSUM_crc64:
+               return state->seed;
+       case BCH_CSUM_crc32c_nonzero:
+               return state->seed ^ U32_MAX;
+       case BCH_CSUM_crc64_nonzero:
+               return state->seed ^ U64_MAX;
+       case BCH_CSUM_xxhash:
+               return xxh64_digest(&state->h64state);
        default:
                BUG();
        }
 }
 
-static u64 bch2_checksum_update(unsigned type, u64 crc, const void *data, size_t len)
+static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
 {
-       switch (type) {
-       case BCH_CSUM_NONE:
-               return 0;
-       case BCH_CSUM_CRC32C_NONZERO:
-       case BCH_CSUM_CRC32C:
-               return crc32c(crc, data, len);
-       case BCH_CSUM_CRC64_NONZERO:
-       case BCH_CSUM_CRC64:
-               return crc64_be(crc, data, len);
+       switch (state->type) {
+       case BCH_CSUM_none:
+               return;
+       case BCH_CSUM_crc32c_nonzero:
+       case BCH_CSUM_crc32c:
+               state->seed = crc32c(state->seed, data, len);
+               break;
+       case BCH_CSUM_crc64_nonzero:
+       case BCH_CSUM_crc64:
+               state->seed = crc64_be(state->seed, data, len);
+               break;
+       case BCH_CSUM_xxhash:
+               xxh64_update(&state->h64state, data, len);
+               break;
        default:
                BUG();
        }
 }
 
-static inline void do_encrypt_sg(struct crypto_sync_skcipher *tfm,
-                                struct nonce nonce,
-                                struct scatterlist *sg, size_t len)
+static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
+                               struct nonce nonce,
+                               struct scatterlist *sg, size_t len)
 {
        SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
        int ret;
@@ -78,17 +105,51 @@ static inline void do_encrypt_sg(struct crypto_sync_skcipher *tfm,
        skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
 
        ret = crypto_skcipher_encrypt(req);
-       BUG_ON(ret);
+       if (ret)
+               pr_err("got error %i from crypto_skcipher_encrypt()", ret);
+
+       return ret;
 }
 
-static inline void do_encrypt(struct crypto_sync_skcipher *tfm,
+static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
                              struct nonce nonce,
                              void *buf, size_t len)
 {
-       struct scatterlist sg;
+       if (!is_vmalloc_addr(buf)) {
+               struct scatterlist sg;
+
+               sg_init_table(&sg, 1);
+               sg_set_page(&sg,
+                           is_vmalloc_addr(buf)
+                           ? vmalloc_to_page(buf)
+                           : virt_to_page(buf),
+                           len, offset_in_page(buf));
+               return do_encrypt_sg(tfm, nonce, &sg, len);
+       } else {
+               unsigned pages = buf_pages(buf, len);
+               struct scatterlist *sg;
+               size_t orig_len = len;
+               int ret, i;
 
-       sg_init_one(&sg, buf, len);
-       do_encrypt_sg(tfm, nonce, &sg, len);
+               sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
+               if (!sg)
+                       return -BCH_ERR_ENOMEM_do_encrypt;
+
+               sg_init_table(sg, pages);
+
+               for (i = 0; i < pages; i++) {
+                       unsigned offset = offset_in_page(buf);
+                       unsigned pg_len = min_t(size_t, len, PAGE_SIZE - offset);
+
+                       sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
+                       buf += pg_len;
+                       len -= pg_len;
+               }
+
+               ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
+               kfree(sg);
+               return ret;
+       }
 }
 
 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
@@ -98,58 +159,66 @@ int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
                crypto_alloc_sync_skcipher("chacha20", 0, 0);
        int ret;
 
-       if (!chacha20) {
-               pr_err("error requesting chacha20 module: %li", PTR_ERR(chacha20));
-               return PTR_ERR(chacha20);
+       ret = PTR_ERR_OR_ZERO(chacha20);
+       if (ret) {
+               pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
+               return ret;
        }
 
        ret = crypto_skcipher_setkey(&chacha20->base,
                                     (void *) key, sizeof(*key));
        if (ret) {
-               pr_err("crypto_skcipher_setkey() error: %i", ret);
+               pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
                goto err;
        }
 
-       do_encrypt(chacha20, nonce, buf, len);
+       ret = do_encrypt(chacha20, nonce, buf, len);
 err:
        crypto_free_sync_skcipher(chacha20);
        return ret;
 }
 
-static void gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
-                        struct nonce nonce)
+static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
+                       struct nonce nonce)
 {
        u8 key[POLY1305_KEY_SIZE];
+       int ret;
 
        nonce.d[3] ^= BCH_NONCE_POLY;
 
        memset(key, 0, sizeof(key));
-       do_encrypt(c->chacha20, nonce, key, sizeof(key));
+       ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
+       if (ret)
+               return ret;
 
        desc->tfm = c->poly1305;
        crypto_shash_init(desc);
        crypto_shash_update(desc, key, sizeof(key));
+       return 0;
 }
 
 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
                              struct nonce nonce, const void *data, size_t len)
 {
        switch (type) {
-       case BCH_CSUM_NONE:
-       case BCH_CSUM_CRC32C_NONZERO:
-       case BCH_CSUM_CRC64_NONZERO:
-       case BCH_CSUM_CRC32C:
-       case BCH_CSUM_CRC64: {
-               u64 crc = bch2_checksum_init(type);
+       case BCH_CSUM_none:
+       case BCH_CSUM_crc32c_nonzero:
+       case BCH_CSUM_crc64_nonzero:
+       case BCH_CSUM_crc32c:
+       case BCH_CSUM_xxhash:
+       case BCH_CSUM_crc64: {
+               struct bch2_checksum_state state;
+
+               state.type = type;
 
-               crc = bch2_checksum_update(type, crc, data, len);
-               crc = bch2_checksum_final(type, crc);
+               bch2_checksum_init(&state);
+               bch2_checksum_update(&state, data, len);
 
-               return (struct bch_csum) { .lo = cpu_to_le64(crc) };
+               return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
        }
 
-       case BCH_CSUM_CHACHA20_POLY1305_80:
-       case BCH_CSUM_CHACHA20_POLY1305_128: {
+       case BCH_CSUM_chacha20_poly1305_80:
+       case BCH_CSUM_chacha20_poly1305_128: {
                SHASH_DESC_ON_STACK(desc, c->poly1305);
                u8 digest[POLY1305_DIGEST_SIZE];
                struct bch_csum ret = { 0 };
@@ -167,13 +236,13 @@ struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
        }
 }
 
-void bch2_encrypt(struct bch_fs *c, unsigned type,
+int bch2_encrypt(struct bch_fs *c, unsigned type,
                  struct nonce nonce, void *data, size_t len)
 {
        if (!bch2_csum_type_is_encryption(type))
-               return;
+               return 0;
 
-       do_encrypt(c->chacha20, nonce, data, len);
+       return do_encrypt(c->chacha20, nonce, data, len);
 }
 
 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
@@ -183,33 +252,35 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
        struct bio_vec bv;
 
        switch (type) {
-       case BCH_CSUM_NONE:
+       case BCH_CSUM_none:
                return (struct bch_csum) { 0 };
-       case BCH_CSUM_CRC32C_NONZERO:
-       case BCH_CSUM_CRC64_NONZERO:
-       case BCH_CSUM_CRC32C:
-       case BCH_CSUM_CRC64: {
-               u64 crc = bch2_checksum_init(type);
+       case BCH_CSUM_crc32c_nonzero:
+       case BCH_CSUM_crc64_nonzero:
+       case BCH_CSUM_crc32c:
+       case BCH_CSUM_xxhash:
+       case BCH_CSUM_crc64: {
+               struct bch2_checksum_state state;
+
+               state.type = type;
+               bch2_checksum_init(&state);
 
 #ifdef CONFIG_HIGHMEM
                __bio_for_each_segment(bv, bio, *iter, *iter) {
-                       void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
-                       crc = bch2_checksum_update(type,
-                               crc, p, bv.bv_len);
-                       kunmap_atomic(p);
+                       void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
+
+                       bch2_checksum_update(&state, p, bv.bv_len);
+                       kunmap_local(p);
                }
 #else
                __bio_for_each_bvec(bv, bio, *iter, *iter)
-                       crc = bch2_checksum_update(type, crc,
-                               page_address(bv.bv_page) + bv.bv_offset,
+                       bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
                                bv.bv_len);
 #endif
-               crc = bch2_checksum_final(type, crc);
-               return (struct bch_csum) { .lo = cpu_to_le64(crc) };
+               return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
        }
 
-       case BCH_CSUM_CHACHA20_POLY1305_80:
-       case BCH_CSUM_CHACHA20_POLY1305_128: {
+       case BCH_CSUM_chacha20_poly1305_80:
+       case BCH_CSUM_chacha20_poly1305_128: {
                SHASH_DESC_ON_STACK(desc, c->poly1305);
                u8 digest[POLY1305_DIGEST_SIZE];
                struct bch_csum ret = { 0 };
@@ -218,10 +289,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
 
 #ifdef CONFIG_HIGHMEM
                __bio_for_each_segment(bv, bio, *iter, *iter) {
-                       void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
+                       void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
 
                        crypto_shash_update(desc, p, bv.bv_len);
-                       kunmap_atomic(p);
+                       kunmap_local(p);
                }
 #else
                __bio_for_each_bvec(bv, bio, *iter, *iter)
@@ -247,23 +318,27 @@ struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
        return __bch2_checksum_bio(c, type, nonce, bio, &iter);
 }
 
-void bch2_encrypt_bio(struct bch_fs *c, unsigned type,
-                     struct nonce nonce, struct bio *bio)
+int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
+                    struct nonce nonce, struct bio *bio)
 {
        struct bio_vec bv;
        struct bvec_iter iter;
        struct scatterlist sgl[16], *sg = sgl;
        size_t bytes = 0;
+       int ret = 0;
 
        if (!bch2_csum_type_is_encryption(type))
-               return;
+               return 0;
 
        sg_init_table(sgl, ARRAY_SIZE(sgl));
 
        bio_for_each_segment(bv, bio, iter) {
                if (sg == sgl + ARRAY_SIZE(sgl)) {
                        sg_mark_end(sg - 1);
-                       do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
+
+                       ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
+                       if (ret)
+                               return ret;
 
                        nonce = nonce_add(nonce, bytes);
                        bytes = 0;
@@ -277,22 +352,28 @@ void bch2_encrypt_bio(struct bch_fs *c, unsigned type,
        }
 
        sg_mark_end(sg - 1);
-       do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
+       return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
 }
 
 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
                                    struct bch_csum b, size_t b_len)
 {
+       struct bch2_checksum_state state;
+
+       state.type = type;
+       bch2_checksum_init(&state);
+       state.seed = le64_to_cpu(a.lo);
+
        BUG_ON(!bch2_checksum_mergeable(type));
 
        while (b_len) {
-               unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
+               unsigned page_len = min_t(unsigned, b_len, PAGE_SIZE);
 
-               a.lo = bch2_checksum_update(type, a.lo,
-                               page_address(ZERO_PAGE(0)), b);
-               b_len -= b;
+               bch2_checksum_update(&state,
+                               page_address(ZERO_PAGE(0)), page_len);
+               b_len -= page_len;
        }
-
+       a.lo = cpu_to_le64(bch2_checksum_final(&state));
        a.lo ^= b.lo;
        a.hi ^= b.hi;
        return a;
@@ -315,9 +396,9 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
                unsigned                        csum_type;
                struct bch_csum                 csum;
        } splits[3] = {
-               { crc_a, len_a, new_csum_type },
-               { crc_b, len_b, new_csum_type },
-               { NULL,  bio_sectors(bio) - len_a - len_b, new_csum_type },
+               { crc_a, len_a, new_csum_type, { 0 }},
+               { crc_b, len_b, new_csum_type, { 0 } },
+               { NULL,  bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } },
        }, *i;
        bool mergeable = crc_old.csum_type == new_csum_type &&
                bch2_checksum_mergeable(new_csum_type);
@@ -347,8 +428,18 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
                merged = bch2_checksum_bio(c, crc_old.csum_type,
                                extent_nonce(version, crc_old), bio);
 
-       if (bch2_crc_cmp(merged, crc_old.csum))
+       if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
+               bch_err(c, "checksum error in %s() (memory corruption or bug?)\n"
+                       "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
+                       __func__,
+                       crc_old.csum.hi,
+                       crc_old.csum.lo,
+                       merged.hi,
+                       merged.lo,
+                       bch2_csum_types[crc_old.csum_type],
+                       bch2_csum_types[new_csum_type]);
                return -EIO;
+       }
 
        for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
                if (i->crc)
@@ -370,18 +461,56 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
        return 0;
 }
 
+/* BCH_SB_FIELD_crypt: */
+
+static int bch2_sb_crypt_validate(struct bch_sb *sb,
+                                 struct bch_sb_field *f,
+                                 struct printbuf *err)
+{
+       struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
+
+       if (vstruct_bytes(&crypt->field) < sizeof(*crypt)) {
+               prt_printf(err, "wrong size (got %zu should be %zu)",
+                      vstruct_bytes(&crypt->field), sizeof(*crypt));
+               return -BCH_ERR_invalid_sb_crypt;
+       }
+
+       if (BCH_CRYPT_KDF_TYPE(crypt)) {
+               prt_printf(err, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt));
+               return -BCH_ERR_invalid_sb_crypt;
+       }
+
+       return 0;
+}
+
+static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
+                                 struct bch_sb_field *f)
+{
+       struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
+
+       prt_printf(out, "KFD:               %llu", BCH_CRYPT_KDF_TYPE(crypt));
+       prt_newline(out);
+       prt_printf(out, "scrypt n:          %llu", BCH_KDF_SCRYPT_N(crypt));
+       prt_newline(out);
+       prt_printf(out, "scrypt r:          %llu", BCH_KDF_SCRYPT_R(crypt));
+       prt_newline(out);
+       prt_printf(out, "scrypt p:          %llu", BCH_KDF_SCRYPT_P(crypt));
+       prt_newline(out);
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
+       .validate       = bch2_sb_crypt_validate,
+       .to_text        = bch2_sb_crypt_to_text,
+};
+
 #ifdef __KERNEL__
-int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
+static int __bch2_request_key(char *key_description, struct bch_key *key)
 {
-       char key_description[60];
        struct key *keyring_key;
        const struct user_key_payload *ukp;
        int ret;
 
-       snprintf(key_description, sizeof(key_description),
-                "bcachefs:%pUb", &sb->user_uuid);
-
-       keyring_key = request_key(&key_type_logon, key_description, NULL);
+       keyring_key = request_key(&key_type_user, key_description, NULL);
        if (IS_ERR(keyring_key))
                return PTR_ERR(keyring_key);
 
@@ -400,27 +529,83 @@ int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
 }
 #else
 #include <keyutils.h>
-#include <uuid/uuid.h>
 
-int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
+static int __bch2_request_key(char *key_description, struct bch_key *key)
 {
        key_serial_t key_id;
-       char key_description[60];
-       char uuid[40];
 
-       uuid_unparse_lower(sb->user_uuid.b, uuid);
-       sprintf(key_description, "bcachefs:%s", uuid);
+       key_id = request_key("user", key_description, NULL,
+                            KEY_SPEC_SESSION_KEYRING);
+       if (key_id >= 0)
+               goto got_key;
 
        key_id = request_key("user", key_description, NULL,
                             KEY_SPEC_USER_KEYRING);
-       if (key_id < 0)
-               return -errno;
+       if (key_id >= 0)
+               goto got_key;
+
+       key_id = request_key("user", key_description, NULL,
+                            KEY_SPEC_USER_SESSION_KEYRING);
+       if (key_id >= 0)
+               goto got_key;
+
+       return -errno;
+got_key:
 
        if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
                return -1;
 
        return 0;
 }
+
+#include "crypto.h"
+#endif
+
+int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
+{
+       struct printbuf key_description = PRINTBUF;
+       int ret;
+
+       prt_printf(&key_description, "bcachefs:");
+       pr_uuid(&key_description, sb->user_uuid.b);
+
+       ret = __bch2_request_key(key_description.buf, key);
+       printbuf_exit(&key_description);
+
+#ifndef __KERNEL__
+       if (ret) {
+               char *passphrase = read_passphrase("Enter passphrase: ");
+               struct bch_encrypted_key sb_key;
+
+               bch2_passphrase_check(sb, passphrase,
+                                     key, &sb_key);
+               ret = 0;
+       }
+#endif
+
+       /* stash with memfd, pass memfd fd to mount */
+
+       return ret;
+}
+
+#ifndef __KERNEL__
+int bch2_revoke_key(struct bch_sb *sb)
+{
+       key_serial_t key_id;
+       struct printbuf key_description = PRINTBUF;
+
+       prt_printf(&key_description, "bcachefs:");
+       pr_uuid(&key_description, sb->user_uuid.b);
+
+       key_id = request_key("user", key_description.buf, NULL, KEY_SPEC_USER_KEYRING);
+       printbuf_exit(&key_description);
+       if (key_id < 0)
+               return errno;
+
+       keyctl_revoke(key_id);
+
+       return 0;
+}
 #endif
 
 int bch2_decrypt_sb_key(struct bch_fs *c,
@@ -437,13 +622,13 @@ int bch2_decrypt_sb_key(struct bch_fs *c,
 
        ret = bch2_request_key(c->disk_sb.sb, &user_key);
        if (ret) {
-               bch_err(c, "error requesting encryption key: %i", ret);
+               bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
                goto err;
        }
 
        /* decrypt real key: */
        ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
-                            &sb_key, sizeof(sb_key));
+                                     &sb_key, sizeof(sb_key));
        if (ret)
                goto err;
 
@@ -462,20 +647,24 @@ err:
 
 static int bch2_alloc_ciphers(struct bch_fs *c)
 {
+       int ret;
+
        if (!c->chacha20)
                c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
-       if (IS_ERR(c->chacha20)) {
-               bch_err(c, "error requesting chacha20 module: %li",
-                       PTR_ERR(c->chacha20));
-               return PTR_ERR(c->chacha20);
+       ret = PTR_ERR_OR_ZERO(c->chacha20);
+
+       if (ret) {
+               bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
+               return ret;
        }
 
        if (!c->poly1305)
                c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
-       if (IS_ERR(c->poly1305)) {
-               bch_err(c, "error requesting poly1305 module: %li",
-                       PTR_ERR(c->poly1305));
-               return PTR_ERR(c->poly1305);
+       ret = PTR_ERR_OR_ZERO(c->poly1305);
+
+       if (ret) {
+               bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
+               return ret;
        }
 
        return 0;
@@ -489,7 +678,7 @@ int bch2_disable_encryption(struct bch_fs *c)
 
        mutex_lock(&c->sb_lock);
 
-       crypt = bch2_sb_get_crypt(c->disk_sb.sb);
+       crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
        if (!crypt)
                goto out;
 
@@ -502,7 +691,7 @@ int bch2_disable_encryption(struct bch_fs *c)
        if (ret)
                goto out;
 
-       crypt->key.magic        = BCH_KEY_MAGIC;
+       crypt->key.magic        = cpu_to_le64(BCH_KEY_MAGIC);
        crypt->key.key          = key;
 
        SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
@@ -523,20 +712,20 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed)
        mutex_lock(&c->sb_lock);
 
        /* Do we already have an encryption key? */
-       if (bch2_sb_get_crypt(c->disk_sb.sb))
+       if (bch2_sb_field_get(c->disk_sb.sb, crypt))
                goto err;
 
        ret = bch2_alloc_ciphers(c);
        if (ret)
                goto err;
 
-       key.magic = BCH_KEY_MAGIC;
+       key.magic = cpu_to_le64(BCH_KEY_MAGIC);
        get_random_bytes(&key.key, sizeof(key.key));
 
        if (keyed) {
                ret = bch2_request_key(c->disk_sb.sb, &user_key);
                if (ret) {
-                       bch_err(c, "error requesting encryption key: %i", ret);
+                       bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
                        goto err;
                }
 
@@ -551,9 +740,10 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed)
        if (ret)
                goto err;
 
-       crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
+       crypt = bch2_sb_field_resize(&c->disk_sb, crypt,
+                                    sizeof(*crypt) / sizeof(u64));
        if (!crypt) {
-               ret = -ENOMEM; /* XXX this technically could be -ENOSPC */
+               ret = -BCH_ERR_ENOSPC_sb_crypt;
                goto err;
        }
 
@@ -585,16 +775,14 @@ int bch2_fs_encryption_init(struct bch_fs *c)
        struct bch_key key;
        int ret = 0;
 
-       pr_verbose_init(c->opts, "");
-
        c->sha256 = crypto_alloc_shash("sha256", 0, 0);
-       if (IS_ERR(c->sha256)) {
-               bch_err(c, "error requesting sha256 module");
-               ret = PTR_ERR(c->sha256);
+       ret = PTR_ERR_OR_ZERO(c->sha256);
+       if (ret) {
+               bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
                goto out;
        }
 
-       crypt = bch2_sb_get_crypt(c->disk_sb.sb);
+       crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
        if (!crypt)
                goto out;
 
@@ -612,6 +800,5 @@ int bch2_fs_encryption_init(struct bch_fs *c)
                goto out;
 out:
        memzero_explicit(&key, sizeof(key));
-       pr_verbose_init(c->opts, "ret %i", ret);
        return ret;
 }