]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/io_write.c
Update bcachefs sources to 6d44812757dd bcachefs: BCH_IOCTL_FSCK_ONLINE
[bcachefs-tools-debian] / libbcachefs / io_write.c
index 613f384366403816dd4a0ca5cf066e8b55b3d8f5..7c904f7d53f975a346ece464506194f02efcff9e 100644 (file)
@@ -202,6 +202,17 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
        struct btree_iter iter;
        struct bkey_i *k;
        struct bkey_i_inode_v3 *inode;
+       /*
+        * Crazy performance optimization:
+        * Every extent update needs to also update the inode: the inode trigger
+        * will set bi->journal_seq to the journal sequence number of this
+        * transaction - for fsync.
+        *
+        * But if that's the only reason we're updating the inode (we're not
+        * updating bi_size or bi_sectors), then we don't need the inode update
+        * to be journalled - if we crash, the bi_journal_seq update will be
+        * lost, but that's fine.
+        */
        unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
        int ret;
 
@@ -223,7 +234,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
 
        inode = bkey_i_to_inode_v3(k);
 
-       if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
+       if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
            new_i_size > le64_to_cpu(inode->v.bi_size)) {
                inode->v.bi_size = cpu_to_le64(new_i_size);
                inode_update_flags = 0;
@@ -305,8 +316,8 @@ int bch2_extent_update(struct btree_trans *trans,
                                                  i_sectors_delta) ?:
                bch2_trans_update(trans, iter, k, 0) ?:
                bch2_trans_commit(trans, disk_res, NULL,
-                               BTREE_INSERT_NOCHECK_RW|
-                               BTREE_INSERT_NOFAIL);
+                               BCH_TRANS_COMMIT_no_check_rw|
+                               BCH_TRANS_COMMIT_no_enospc);
        if (unlikely(ret))
                return ret;
 
@@ -392,8 +403,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
        BUG_ON(c->opts.nochanges);
 
        bkey_for_each_ptr(ptrs, ptr) {
-               BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
-                      !c->devs[ptr->dev]);
+               BUG_ON(!bch2_dev_exists2(c, ptr->dev));
 
                ca = bch_dev_bkey_exists(c, ptr->dev);
 
@@ -569,9 +579,9 @@ static inline void wp_update_state(struct write_point *wp, bool running)
        __wp_update_state(wp, state);
 }
 
-static void bch2_write_index(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_write_index)
 {
-       struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+       closure_type(op, struct bch_write_op, cl);
        struct write_point *wp = op->wp;
        struct workqueue_struct *wq = index_update_wq(op);
        unsigned long flags;
@@ -784,7 +794,7 @@ static int bch2_write_decrypt(struct bch_write_op *op)
         * checksum:
         */
        csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
-       if (bch2_crc_cmp(op->crc.csum, csum))
+       if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
                return -EIO;
 
        ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
@@ -1165,7 +1175,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
                ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
                                     bkey_start_pos(&orig->k), orig->k.p,
                                     BTREE_ITER_INTENT, k,
-                                    NULL, NULL, BTREE_INSERT_NOFAIL, ({
+                                    NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
                        bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
                }));
 
@@ -1197,9 +1207,9 @@ static void __bch2_nocow_write_done(struct bch_write_op *op)
                bch2_nocow_write_convert_unwritten(op);
 }
 
-static void bch2_nocow_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(bch2_nocow_write_done)
 {
-       struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+       closure_type(op, struct bch_write_op, cl);
 
        __bch2_nocow_write_done(op);
        bch2_write_done(cl);
@@ -1352,7 +1362,7 @@ err:
                op->insert_keys.top = op->insert_keys.keys;
        } else if (op->flags & BCH_WRITE_SYNC) {
                closure_sync(&op->cl);
-               bch2_nocow_write_done(&op->cl);
+               bch2_nocow_write_done(&op->cl.work);
        } else {
                /*
                 * XXX
@@ -1555,9 +1565,9 @@ err:
  * If op->discard is true, instead of inserting the data it invalidates the
  * region of the cache represented by op->bio and op->inode.
  */
-void bch2_write(struct closure *cl)
+CLOSURE_CALLBACK(bch2_write)
 {
-       struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+       closure_type(op, struct bch_write_op, cl);
        struct bio *bio = &op->wbio.bio;
        struct bch_fs *c = op->c;
        unsigned data_len;