switch (type) {
case BCH_CSUM_NONE:
return 0;
- case BCH_CSUM_CRC32C:
+ case BCH_CSUM_CRC32C_NONZERO:
return U32_MAX;
- case BCH_CSUM_CRC64:
+ case BCH_CSUM_CRC64_NONZERO:
return U64_MAX;
+ case BCH_CSUM_CRC32C:
+ return 0;
+ case BCH_CSUM_CRC64:
+ return 0;
default:
BUG();
}
switch (type) {
case BCH_CSUM_NONE:
return 0;
- case BCH_CSUM_CRC32C:
+ case BCH_CSUM_CRC32C_NONZERO:
return crc ^ U32_MAX;
- case BCH_CSUM_CRC64:
+ case BCH_CSUM_CRC64_NONZERO:
return crc ^ U64_MAX;
+ case BCH_CSUM_CRC32C:
+ return crc;
+ case BCH_CSUM_CRC64:
+ return crc;
default:
BUG();
}
switch (type) {
case BCH_CSUM_NONE:
return 0;
+ case BCH_CSUM_CRC32C_NONZERO:
case BCH_CSUM_CRC32C:
return crc32c(crc, data, len);
+ case BCH_CSUM_CRC64_NONZERO:
case BCH_CSUM_CRC64:
return bch2_crc64_update(crc, data, len);
default:
crypto_alloc_skcipher("chacha20", 0, 0);
int ret;
- if (!chacha20)
+ if (!chacha20) {
+ pr_err("error requesting chacha20 module: %li", PTR_ERR(chacha20));
return PTR_ERR(chacha20);
+ }
ret = crypto_skcipher_setkey(chacha20, (void *) key, sizeof(*key));
- if (ret)
+ if (ret) {
+ pr_err("crypto_skcipher_setkey() error: %i", ret);
goto err;
+ }
do_encrypt(chacha20, nonce, buf, len);
err:
{
switch (type) {
case BCH_CSUM_NONE:
+ case BCH_CSUM_CRC32C_NONZERO:
+ case BCH_CSUM_CRC64_NONZERO:
case BCH_CSUM_CRC32C:
case BCH_CSUM_CRC64: {
u64 crc = bch2_checksum_init(type);
crc = bch2_checksum_update(type, crc, data, len);
crc = bch2_checksum_final(type, crc);
- return (struct bch_csum) { .lo = crc };
+ return (struct bch_csum) { .lo = cpu_to_le64(crc) };
}
case BCH_CSUM_CHACHA20_POLY1305_80:
do_encrypt(c->chacha20, nonce, data, len);
}
-struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
- struct nonce nonce, struct bio *bio)
+static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
+ struct nonce nonce, struct bio *bio,
+ struct bvec_iter *iter)
{
struct bio_vec bv;
- struct bvec_iter iter;
switch (type) {
case BCH_CSUM_NONE:
return (struct bch_csum) { 0 };
+ case BCH_CSUM_CRC32C_NONZERO:
+ case BCH_CSUM_CRC64_NONZERO:
case BCH_CSUM_CRC32C:
case BCH_CSUM_CRC64: {
u64 crc = bch2_checksum_init(type);
- bio_for_each_contig_segment(bv, bio, iter) {
+#ifdef CONFIG_HIGHMEM
+ __bio_for_each_segment(bv, bio, *iter, *iter) {
void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
crc = bch2_checksum_update(type,
crc, p, bv.bv_len);
kunmap_atomic(p);
}
-
+#else
+ __bio_for_each_contig_segment(bv, bio, *iter, *iter)
+ crc = bch2_checksum_update(type, crc,
+ page_address(bv.bv_page) + bv.bv_offset,
+ bv.bv_len);
+#endif
crc = bch2_checksum_final(type, crc);
- return (struct bch_csum) { .lo = crc };
+ return (struct bch_csum) { .lo = cpu_to_le64(crc) };
}
case BCH_CSUM_CHACHA20_POLY1305_80:
gen_poly_key(c, desc, nonce);
- bio_for_each_contig_segment(bv, bio, iter) {
+#ifdef CONFIG_HIGHMEM
+ __bio_for_each_segment(bv, bio, *iter, *iter) {
void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
crypto_shash_update(desc, p, bv.bv_len);
kunmap_atomic(p);
}
-
+#else
+ __bio_for_each_contig_segment(bv, bio, *iter, *iter)
+ crypto_shash_update(desc,
+ page_address(bv.bv_page) + bv.bv_offset,
+ bv.bv_len);
+#endif
crypto_shash_final(desc, digest);
memcpy(&ret, digest, bch_crc_bytes[type]);
}
}
+struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
+ struct nonce nonce, struct bio *bio)
+{
+ struct bvec_iter iter = bio->bi_iter;
+
+ return __bch2_checksum_bio(c, type, nonce, bio, &iter);
+}
+
void bch2_encrypt_bio(struct bch_fs *c, unsigned type,
struct nonce nonce, struct bio *bio)
{
sg_init_table(sgl, ARRAY_SIZE(sgl));
- bio_for_each_contig_segment(bv, bio, iter) {
+ bio_for_each_segment(bv, bio, iter) {
if (sg == sgl + ARRAY_SIZE(sgl)) {
sg_mark_end(sg - 1);
do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
- le32_add_cpu(nonce.d, bytes / CHACHA20_BLOCK_SIZE);
+ nonce = nonce_add(nonce, bytes);
bytes = 0;
sg_init_table(sgl, ARRAY_SIZE(sgl));
sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
bytes += bv.bv_len;
-
}
sg_mark_end(sg - 1);
do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
}
+static inline bool bch2_checksum_mergeable(unsigned type)
+{
+
+ switch (type) {
+ case BCH_CSUM_NONE:
+ case BCH_CSUM_CRC32C:
+ case BCH_CSUM_CRC64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct bch_csum bch2_checksum_merge(unsigned type,
+ struct bch_csum a,
+ struct bch_csum b, size_t b_len)
+{
+ BUG_ON(!bch2_checksum_mergeable(type));
+
+ while (b_len) {
+ unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
+
+ a.lo = bch2_checksum_update(type, a.lo,
+ page_address(ZERO_PAGE(0)), b);
+ b_len -= b;
+ }
+
+ a.lo ^= b.lo;
+ a.hi ^= b.hi;
+ return a;
+}
+
+int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
+ struct bversion version,
+ struct bch_extent_crc_unpacked crc_old,
+ struct bch_extent_crc_unpacked *crc_a,
+ struct bch_extent_crc_unpacked *crc_b,
+ unsigned len_a, unsigned len_b,
+ unsigned new_csum_type)
+{
+ struct bvec_iter iter = bio->bi_iter;
+ struct nonce nonce = extent_nonce(version, crc_old);
+ struct bch_csum merged = { 0 };
+ struct crc_split {
+ struct bch_extent_crc_unpacked *crc;
+ unsigned len;
+ unsigned csum_type;
+ struct bch_csum csum;
+ } splits[3] = {
+ { crc_a, len_a, new_csum_type },
+ { crc_b, len_b, new_csum_type },
+ { NULL, bio_sectors(bio) - len_a - len_b, new_csum_type },
+ }, *i;
+ bool mergeable = crc_old.csum_type == new_csum_type &&
+ bch2_checksum_mergeable(new_csum_type);
+ unsigned crc_nonce = crc_old.nonce;
+
+ BUG_ON(len_a + len_b > bio_sectors(bio));
+ BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
+ BUG_ON(crc_old.compression_type);
+ BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
+ bch2_csum_type_is_encryption(new_csum_type));
+
+ for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
+ iter.bi_size = i->len << 9;
+ if (mergeable || i->crc)
+ i->csum = __bch2_checksum_bio(c, i->csum_type,
+ nonce, bio, &iter);
+ else
+ bio_advance_iter(bio, &iter, i->len << 9);
+ nonce = nonce_add(nonce, i->len << 9);
+ }
+
+ if (mergeable)
+ for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
+ merged = bch2_checksum_merge(new_csum_type, merged,
+ i->csum, i->len << 9);
+ else
+ merged = bch2_checksum_bio(c, crc_old.csum_type,
+ extent_nonce(version, crc_old), bio);
+
+ if (bch2_crc_cmp(merged, crc_old.csum))
+ return -EIO;
+
+ for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
+ if (i->crc)
+ *i->crc = (struct bch_extent_crc_unpacked) {
+ .csum_type = i->csum_type,
+ .compressed_size = i->len,
+ .uncompressed_size = i->len,
+ .offset = 0,
+ .live_size = i->len,
+ .nonce = crc_nonce,
+ .csum = i->csum,
+ };
+
+ if (bch2_csum_type_is_encryption(new_csum_type))
+ crc_nonce += i->len;
+ }
+
+ return 0;
+}
+
#ifdef __KERNEL__
int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
{
}
#endif
-static int bch2_decrypt_sb_key(struct bch_fs *c,
- struct bch_sb_field_crypt *crypt,
- struct bch_key *key)
+int bch2_decrypt_sb_key(struct bch_fs *c,
+ struct bch_sb_field_crypt *crypt,
+ struct bch_key *key)
{
struct bch_encrypted_key sb_key = crypt->key;
struct bch_key user_key;
if (!bch2_key_is_encrypted(&sb_key))
goto out;
- ret = bch2_request_key(c->disk_sb, &user_key);
+ ret = bch2_request_key(c->disk_sb.sb, &user_key);
if (ret) {
- bch_err(c, "error requesting encryption key");
+ bch_err(c, "error requesting encryption key: %i", ret);
goto err;
}
{
if (!c->chacha20)
c->chacha20 = crypto_alloc_skcipher("chacha20", 0, 0);
- if (IS_ERR(c->chacha20))
+ if (IS_ERR(c->chacha20)) {
+ bch_err(c, "error requesting chacha20 module: %li",
+ PTR_ERR(c->chacha20));
return PTR_ERR(c->chacha20);
+ }
if (!c->poly1305)
c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
- if (IS_ERR(c->poly1305))
+ if (IS_ERR(c->poly1305)) {
+ bch_err(c, "error requesting poly1305 module: %li",
+ PTR_ERR(c->poly1305));
return PTR_ERR(c->poly1305);
+ }
return 0;
}
mutex_lock(&c->sb_lock);
- crypt = bch2_sb_get_crypt(c->disk_sb);
+ crypt = bch2_sb_get_crypt(c->disk_sb.sb);
if (!crypt)
goto out;
crypt->key.magic = BCH_KEY_MAGIC;
crypt->key.key = key;
- SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb, 0);
+ SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
bch2_write_super(c);
out:
mutex_unlock(&c->sb_lock);
mutex_lock(&c->sb_lock);
/* Do we already have an encryption key? */
- if (bch2_sb_get_crypt(c->disk_sb))
+ if (bch2_sb_get_crypt(c->disk_sb.sb))
goto err;
ret = bch2_alloc_ciphers(c);
get_random_bytes(&key.key, sizeof(key.key));
if (keyed) {
- ret = bch2_request_key(c->disk_sb, &user_key);
+ ret = bch2_request_key(c->disk_sb.sb, &user_key);
if (ret) {
- bch_err(c, "error requesting encryption key");
+ bch_err(c, "error requesting encryption key: %i", ret);
goto err;
}
if (ret)
goto err;
- crypt = bch2_fs_sb_resize_crypt(c, sizeof(*crypt) / sizeof(u64));
+ crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
if (!crypt) {
ret = -ENOMEM; /* XXX this technically could be -ENOSPC */
goto err;
crypt->key = key;
/* write superblock */
- SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb, 1);
+ SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
bch2_write_super(c);
err:
mutex_unlock(&c->sb_lock);
{
struct bch_sb_field_crypt *crypt;
struct bch_key key;
- int ret;
+ int ret = 0;
+
+ pr_verbose_init(c->opts, "");
c->sha256 = crypto_alloc_shash("sha256", 0, 0);
- if (IS_ERR(c->sha256))
- return PTR_ERR(c->sha256);
+ if (IS_ERR(c->sha256)) {
+ bch_err(c, "error requesting sha256 module");
+ ret = PTR_ERR(c->sha256);
+ goto out;
+ }
- crypt = bch2_sb_get_crypt(c->disk_sb);
+ crypt = bch2_sb_get_crypt(c->disk_sb.sb);
if (!crypt)
- return 0;
+ goto out;
ret = bch2_alloc_ciphers(c);
if (ret)
- return ret;
+ goto out;
ret = bch2_decrypt_sb_key(c, crypt, &key);
if (ret)
- goto err;
+ goto out;
ret = crypto_skcipher_setkey(c->chacha20,
(void *) &key.key, sizeof(key.key));
-err:
+ if (ret)
+ goto out;
+out:
memzero_explicit(&key, sizeof(key));
+ pr_verbose_init(c->opts, "ret %i", ret);
return ret;
}