]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/io.c
Update bcachefs sources to bdf6d7c135 fixup! bcachefs: Kill journal buf bloom filter
[bcachefs-tools-debian] / libbcachefs / io.c
index 5a3c9eff1b50dbd8a6106098e0e7b15a272dbbb0..1ad4c7d77812998a17d4fec011ce645176d49d81 100644 (file)
@@ -665,11 +665,7 @@ static void init_append_extent(struct bch_write_op *op,
 {
        struct bch_fs *c = op->c;
        struct bkey_i_extent *e;
-       struct open_bucket *ob;
-       unsigned i;
 
-       BUG_ON(crc.compressed_size > wp->sectors_free);
-       wp->sectors_free -= crc.compressed_size;
        op->pos.offset += crc.uncompressed_size;
 
        e = bkey_extent_init(op->insert_keys.top);
@@ -682,22 +678,8 @@ static void init_append_extent(struct bch_write_op *op,
            crc.nonce)
                bch2_extent_crc_append(&e->k_i, crc);
 
-       open_bucket_for_each(c, &wp->ptrs, ob, i) {
-               struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-               union bch_extent_entry *end =
-                       bkey_val_end(bkey_i_to_s(&e->k_i));
-
-               end->ptr = ob->ptr;
-               end->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
-               end->ptr.cached = !ca->mi.durability ||
-                       (op->flags & BCH_WRITE_CACHED) != 0;
-               end->ptr.offset += ca->mi.bucket_size - ob->sectors_free;
-
-               e->k.u64s++;
-
-               BUG_ON(crc.compressed_size > ob->sectors_free);
-               ob->sectors_free -= crc.compressed_size;
-       }
+       bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, crc.compressed_size,
+                                      op->flags & BCH_WRITE_CACHED);
 
        bch2_keylist_push(&op->insert_keys);
 }
@@ -738,7 +720,7 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
         */
        bch2_bio_alloc_pages_pool(c, bio,
                                  min_t(unsigned, output_available,
-                                       c->sb.encoded_extent_max << 9));
+                                       c->opts.encoded_extent_max));
 
        if (bio->bi_iter.bi_size < output_available)
                *page_alloc_failed =
@@ -782,6 +764,7 @@ static int bch2_write_decrypt(struct bch_write_op *op)
        struct bch_fs *c = op->c;
        struct nonce nonce = extent_nonce(op->version, op->crc);
        struct bch_csum csum;
+       int ret;
 
        if (!bch2_csum_type_is_encryption(op->crc.csum_type))
                return 0;
@@ -796,10 +779,10 @@ static int bch2_write_decrypt(struct bch_write_op *op)
        if (bch2_crc_cmp(op->crc.csum, csum))
                return -EIO;
 
-       bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
+       ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
        op->crc.csum_type = 0;
        op->crc.csum = (struct bch_csum) { 0, 0 };
-       return 0;
+       return ret;
 }
 
 static enum prep_encoded_ret {
@@ -935,8 +918,8 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
                size_t dst_len, src_len;
 
                if (page_alloc_failed &&
-                   bio_sectors(dst) < wp->sectors_free &&
-                   bio_sectors(dst) < c->sb.encoded_extent_max)
+                   dst->bi_iter.bi_size  < (wp->sectors_free << 9) &&
+                   dst->bi_iter.bi_size < c->opts.encoded_extent_max)
                        break;
 
                BUG_ON(op->compression_type &&
@@ -956,7 +939,7 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
 
                        if (op->csum_type)
                                dst_len = min_t(unsigned, dst_len,
-                                               c->sb.encoded_extent_max << 9);
+                                               c->opts.encoded_extent_max);
 
                        if (bounce) {
                                swap(dst->bi_iter.bi_size, dst_len);
@@ -1014,8 +997,11 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
                        crc.live_size           = src_len >> 9;
 
                        swap(dst->bi_iter.bi_size, dst_len);
-                       bch2_encrypt_bio(c, op->csum_type,
-                                        extent_nonce(version, crc), dst);
+                       ret = bch2_encrypt_bio(c, op->csum_type,
+                                              extent_nonce(version, crc), dst);
+                       if (ret)
+                               goto err;
+
                        crc.csum = bch2_checksum_bio(c, op->csum_type,
                                         extent_nonce(version, crc), dst);
                        crc.csum_type = op->csum_type;
@@ -1073,7 +1059,7 @@ static void __bch2_write(struct closure *cl)
        struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
        struct bch_fs *c = op->c;
        struct write_point *wp;
-       struct bio *bio;
+       struct bio *bio = NULL;
        bool skip_put = true;
        unsigned nofs_flags;
        int ret;
@@ -1111,7 +1097,7 @@ again:
                 */
                wp = bch2_alloc_sectors_start(c,
                        op->target,
-                       op->opts.erasure_code,
+                       op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
                        op->write_point,
                        &op->devs_have,
                        op->nr_replicas,
@@ -1289,7 +1275,7 @@ void bch2_write(struct closure *cl)
        bch2_keylist_init(&op->insert_keys, op->inline_keys);
        wbio_init(bio)->put_bio = false;
 
-       if (bio_sectors(bio) & (c->opts.block_size - 1)) {
+       if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
                bch_err_inum_ratelimited(c, op->pos.inode,
                                         "misaligned write");
                op->error = -EIO;
@@ -1302,6 +1288,7 @@ void bch2_write(struct closure *cl)
                goto err;
        }
 
+       this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
        bch2_increment_clock(c, bio_sectors(bio), WRITE);
 
        data_len = min_t(u64, bio->bi_iter.bi_size,
@@ -1790,6 +1777,7 @@ static void __bch2_read_endio(struct work_struct *work)
        struct nonce nonce = extent_nonce(rbio->version, crc);
        unsigned nofs_flags;
        struct bch_csum csum;
+       int ret;
 
        nofs_flags = memalloc_nofs_save();
 
@@ -1824,7 +1812,10 @@ static void __bch2_read_endio(struct work_struct *work)
        crc.live_size   = bvec_iter_sectors(rbio->bvec_iter);
 
        if (crc_is_compressed(crc)) {
-               bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+               ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+               if (ret)
+                       goto decrypt_err;
+
                if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
                        goto decompression_err;
        } else {
@@ -1835,7 +1826,9 @@ static void __bch2_read_endio(struct work_struct *work)
                BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
                src->bi_iter.bi_size = dst_iter.bi_size;
 
-               bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+               ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+               if (ret)
+                       goto decrypt_err;
 
                if (rbio->bounce) {
                        struct bvec_iter src_iter = src->bi_iter;
@@ -1848,7 +1841,10 @@ static void __bch2_read_endio(struct work_struct *work)
                 * Re encrypt data we decrypted, so it's consistent with
                 * rbio->crc:
                 */
-               bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+               ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+               if (ret)
+                       goto decrypt_err;
+
                promote_start(rbio->promote, rbio);
                rbio->promote = NULL;
        }
@@ -1883,6 +1879,11 @@ decompression_err:
                                 "decompression error");
        bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
        goto out;
+decrypt_err:
+       bch_err_inum_ratelimited(c, rbio->read_pos.inode,
+                                "decrypt error");
+       bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
+       goto out;
 }
 
 static void bch2_read_endio(struct bio *bio)
@@ -1911,9 +1912,8 @@ static void bch2_read_endio(struct bio *bio)
                return;
        }
 
-       if (rbio->pick.ptr.cached &&
-           (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
-            ptr_stale(ca, &rbio->pick.ptr))) {
+       if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
+           ptr_stale(ca, &rbio->pick.ptr)) {
                atomic_long_inc(&c->read_realloc_races);
 
                if (rbio->flags & BCH_READ_RETRY_IF_STALE)
@@ -1972,6 +1972,41 @@ err:
        return ret;
 }
 
+static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
+                                                  struct bkey_s_c k,
+                                                  struct bch_extent_ptr ptr)
+{
+       struct bch_fs *c = trans->c;
+       struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
+       struct btree_iter iter;
+       struct printbuf buf = PRINTBUF;
+       int ret;
+
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
+                            PTR_BUCKET_POS(c, &ptr),
+                            BTREE_ITER_CACHED);
+
+       pr_buf(&buf, "Attempting to read from stale dirty pointer:");
+       pr_indent_push(&buf, 2);
+       pr_newline(&buf);
+
+       bch2_bkey_val_to_text(&buf, c, k);
+       pr_newline(&buf);
+
+       pr_buf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
+
+       ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+       if (!ret) {
+               pr_newline(&buf);
+               bch2_bkey_val_to_text(&buf, c, k);
+       }
+
+       bch2_fs_inconsistent(c, "%s", buf.buf);
+
+       bch2_trans_iter_exit(trans, &iter);
+       printbuf_exit(&buf);
+}
+
 int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
                       struct bvec_iter iter, struct bpos read_pos,
                       enum btree_id data_btree, struct bkey_s_c k,
@@ -1981,7 +2016,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
        struct bch_fs *c = trans->c;
        struct extent_ptr_decoded pick;
        struct bch_read_bio *rbio = NULL;
-       struct bch_dev *ca;
+       struct bch_dev *ca = NULL;
        struct promote_op *promote = NULL;
        bool bounce = false, read_full = false, narrow_crcs = false;
        struct bpos data_pos = bkey_start_pos(k.k);
@@ -1998,7 +2033,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
                zero_fill_bio_iter(&orig->bio, iter);
                goto out_read_done;
        }
-
+retry_pick:
        pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
 
        /* hole or reservation - just zero fill: */
@@ -2011,8 +2046,27 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
                goto err;
        }
 
-       if (pick_ret > 0)
-               ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+       ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+
+       /*
+        * Stale dirty pointers are treated as IO errors, but @failed isn't
+        * allocated unless we're in the retry path - so if we're not in the
+        * retry path, don't check here, it'll be caught in bch2_read_endio()
+        * and we'll end up in the retry path:
+        */
+       if ((flags & BCH_READ_IN_RETRY) &&
+           !pick.ptr.cached &&
+           unlikely(ptr_stale(ca, &pick.ptr))) {
+               read_from_stale_dirty_pointer(trans, k, pick.ptr);
+               bch2_mark_io_failure(failed, &pick);
+               goto retry_pick;
+       }
+
+       /*
+        * Unlock the iterator while the btree node's lock is still in
+        * cache, before doing the IO:
+        */
+       bch2_trans_unlock(trans);
 
        if (flags & BCH_READ_NODECODE) {
                /*
@@ -2147,6 +2201,7 @@ get_bio:
        if (rbio->bounce)
                trace_read_bounce(&rbio->bio);
 
+       this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
        bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
 
        /*
@@ -2259,7 +2314,7 @@ retry:
 
        bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
                             SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
-                            BTREE_ITER_SLOTS|BTREE_ITER_FILTER_SNAPSHOTS);
+                            BTREE_ITER_SLOTS);
        while (1) {
                unsigned bytes, sectors, offset_into_extent;
                enum btree_id data_btree = BTREE_ID_extents;
@@ -2300,12 +2355,6 @@ retry:
                 */
                sectors = min(sectors, k.k->size - offset_into_extent);
 
-               /*
-                * Unlock the iterator while the btree node's lock is still in
-                * cache, before doing the IO:
-                */
-               bch2_trans_unlock(&trans);
-
                bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
                swap(bvec_iter.bi_size, bytes);
 
@@ -2366,8 +2415,8 @@ int bch2_fs_io_init(struct bch_fs *c)
            mempool_init_page_pool(&c->bio_bounce_pages,
                                   max_t(unsigned,
                                         c->opts.btree_node_size,
-                                        c->sb.encoded_extent_max) /
-                                  PAGE_SECTORS, 0) ||
+                                        c->opts.encoded_extent_max) /
+                                  PAGE_SIZE, 0) ||
            rhashtable_init(&c->promote_table, &bch_promote_params))
                return -ENOMEM;