X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=libbcachefs%2Fcompress.c;h=aebf46bb1d21e5fd269e94f3193c73a23f56cc2a;hb=d2a118d921dfdf43adfa37aed1d9df62925bda66;hp=bb557eda111b5c8a1820e5870adbc03a68f7e6bc;hpb=c07ac50dcabb4b9b406703182d08750d21010298;p=bcachefs-tools-debian diff --git a/libbcachefs/compress.c b/libbcachefs/compress.c index bb557ed..aebf46b 100644 --- a/libbcachefs/compress.c +++ b/libbcachefs/compress.c @@ -17,7 +17,6 @@ struct bbuf { BB_NONE, BB_VMAP, BB_KMALLOC, - BB_VMALLOC, BB_MEMPOOL, } type; int rw; @@ -33,23 +32,31 @@ static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw) if (b) return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw }; - b = mempool_alloc(&c->compression_bounce[rw], GFP_NOWAIT); - b = b ? page_address(b) : NULL; - if (b) - return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw }; - - b = vmalloc(size); - if (b) - return (struct bbuf) { .b = b, .type = BB_VMALLOC, .rw = rw }; - b = mempool_alloc(&c->compression_bounce[rw], GFP_NOIO); - b = b ? page_address(b) : NULL; if (b) return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw }; BUG(); } +static bool bio_phys_contig(struct bio *bio, struct bvec_iter start) +{ + struct bio_vec bv; + struct bvec_iter iter; + void *expected_start = NULL; + + __bio_for_each_bvec(bv, bio, iter, start) { + if (expected_start && + expected_start != page_address(bv.bv_page) + bv.bv_offset) + return false; + + expected_start = page_address(bv.bv_page) + + bv.bv_offset + bv.bv_len; + } + + return true; +} + static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio, struct bvec_iter start, int rw) { @@ -59,27 +66,28 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio, unsigned nr_pages = 0; struct page *stack_pages[16]; struct page **pages = NULL; - bool first = true; - unsigned prev_end = PAGE_SIZE; void *data; BUG_ON(bvec_iter_sectors(start) > c->sb.encoded_extent_max); -#ifndef CONFIG_HIGHMEM - __bio_for_each_bvec(bv, bio, iter, start) { - if (bv.bv_len == start.bi_size) - return (struct bbuf) { - .b = page_address(bv.bv_page) + bv.bv_offset, - .type = BB_NONE, .rw = rw - }; - } -#endif + if (!PageHighMem(bio_iter_page(bio, start)) && + bio_phys_contig(bio, start)) + return (struct bbuf) { + .b = page_address(bio_iter_page(bio, start)) + + bio_iter_offset(bio, start), + .type = BB_NONE, .rw = rw + }; + + /* check if we can map the pages contiguously: */ __bio_for_each_segment(bv, bio, iter, start) { - if ((!first && bv.bv_offset) || - prev_end != PAGE_SIZE) + if (iter.bi_size != start.bi_size && + bv.bv_offset) + goto bounce; + + if (bv.bv_len < iter.bi_size && + bv.bv_offset + bv.bv_len < PAGE_SIZE) goto bounce; - prev_end = bv.bv_offset + bv.bv_len; nr_pages++; } @@ -129,12 +137,8 @@ static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf) case BB_KMALLOC: kfree(buf.b); break; - case BB_VMALLOC: - vfree(buf.b); - break; case BB_MEMPOOL: - mempool_free(virt_to_page(buf.b), - &c->compression_bounce[buf.rw]); + mempool_free(buf.b, &c->compression_bounce[buf.rw]); break; } } @@ -187,20 +191,21 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, } case BCH_COMPRESSION_TYPE_zstd: { ZSTD_DCtx *ctx; - size_t len; + size_t real_src_len = le32_to_cpup(src_data.b); + + if (real_src_len > src_len - 4) + goto err; workspace = mempool_alloc(&c->decompress_workspace, GFP_NOIO); ctx = ZSTD_initDCtx(workspace, ZSTD_DCtxWorkspaceBound()); - src_len = le32_to_cpup(src_data.b); - - len = ZSTD_decompressDCtx(ctx, + ret = ZSTD_decompressDCtx(ctx, dst_data, dst_len, - src_data.b + 4, src_len); + src_data.b + 4, real_src_len); mempool_free(workspace, &c->decompress_workspace); - if (len != dst_len) + if (ret != dst_len) goto err; break; } @@ -279,7 +284,8 @@ int bch2_bio_uncompress(struct bch_fs *c, struct bio *src, if (ret) goto err; - if (dst_data.type != BB_NONE) + if (dst_data.type != BB_NONE && + dst_data.type != BB_VMAP) memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9)); err: bio_unmap_or_unbounce(c, dst_data); @@ -422,7 +428,8 @@ static unsigned __bio_compress(struct bch_fs *c, memset(dst_data.b + *dst_len, 0, pad); *dst_len += pad; - if (dst_data.type != BB_NONE) + if (dst_data.type != BB_NONE && + dst_data.type != BB_VMAP) memcpy_to_bio(dst, dst->bi_iter, dst_data.b); BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size); @@ -434,7 +441,7 @@ out: bio_unmap_or_unbounce(c, dst_data); return compression_type; err: - compression_type = 0; + compression_type = BCH_COMPRESSION_TYPE_incompressible; goto out; } @@ -527,7 +534,6 @@ void bch2_fs_compress_exit(struct bch_fs *c) static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) { size_t max_extent = c->sb.encoded_extent_max << 9; - size_t order = get_order(max_extent); size_t decompress_workspace_size = 0; bool decompress_workspace_needed; ZSTD_parameters params = ZSTD_getParams(0, max_extent, 0); @@ -561,15 +567,15 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) have_compressed: if (!mempool_initialized(&c->compression_bounce[READ])) { - ret = mempool_init_page_pool(&c->compression_bounce[READ], - 1, order); + ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[READ], + 1, max_extent); if (ret) goto out; } if (!mempool_initialized(&c->compression_bounce[WRITE])) { - ret = mempool_init_page_pool(&c->compression_bounce[WRITE], - 1, order); + ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE], + 1, max_extent); if (ret) goto out; } @@ -597,7 +603,7 @@ have_compressed: } if (!mempool_initialized(&c->decompress_workspace)) { - ret = mempool_init_kmalloc_pool( + ret = mempool_init_kvpmalloc_pool( &c->decompress_workspace, 1, decompress_workspace_size); if (ret)