*/
#include "bcachefs.h"
+#include "alloc_background.h"
#include "alloc_foreground.h"
+#include "bkey_on_stack.h"
#include "bset.h"
#include "btree_update.h"
#include "buckets.h"
#include "disk_groups.h"
#include "ec.h"
#include "error.h"
-#include "extents.h"
+#include "extent_update.h"
#include "inode.h"
#include "io.h"
#include "journal.h"
#include <linux/blkdev.h>
#include <linux/random.h>
+#include <linux/sched/mm.h>
#include <trace/events/bcachefs.h>
+const char *bch2_blk_status_to_str(blk_status_t status)
+{
+ if (status == BLK_STS_REMOVED)
+ return "device removed";
+ return blk_status_to_str(status);
+}
+
static bool bch2_target_congested(struct bch_fs *c, u16 target)
{
const struct bch_devs_mask *devs;
return false;
rcu_read_lock();
- devs = bch2_target_to_mask(c, target);
+ devs = bch2_target_to_mask(c, target) ?:
+ &c->rw_devs[BCH_DATA_user];
+
for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
ca = rcu_dereference(c->devs[d]);
if (!ca)
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
if (!may_allocate &&
- bch2_bkey_nr_ptrs_allocated(old) <
- bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new))) {
+ bch2_bkey_nr_ptrs_fully_allocated(old) <
+ bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new))) {
ret = -ENOSPC;
break;
}
if (delta || new_i_size) {
bch2_inode_pack(&inode_p, &inode_u);
bch2_trans_update(trans, inode_iter,
- &inode_p.inode.k_i);
+ &inode_p.inode.k_i, 0);
}
bch2_trans_iter_put(trans, inode_iter);
}
- bch2_trans_update(trans, iter, k);
+ bch2_trans_update(trans, iter, k, 0);
ret = bch2_trans_commit(trans, disk_res, journal_seq,
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL|
- BTREE_INSERT_ATOMIC|
BTREE_INSERT_USE_RESERVE);
if (!ret && i_sectors_delta)
*i_sectors_delta += delta;
bch2_disk_reservation_init(c, 0);
struct bkey_i delete;
+ bch2_trans_begin(trans);
+
ret = bkey_err(k);
if (ret)
goto btree_err;
/* create the biggest key we can */
bch2_key_resize(&delete.k, max_sectors);
- bch2_cut_back(end, &delete.k);
-
- bch2_trans_begin_updates(trans);
+ bch2_cut_back(end, &delete);
ret = bch2_extent_update(trans, iter, &delete,
&disk_res, journal_seq,
int bch2_write_index_default(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
+ struct bkey_on_stack sk;
struct keylist *keys = &op->insert_keys;
struct bkey_i *k = bch2_keylist_front(keys);
struct btree_trans trans;
struct btree_iter *iter;
int ret;
+ bkey_on_stack_init(&sk);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
do {
- BKEY_PADDED(k) tmp;
+ bch2_trans_begin(&trans);
- bkey_copy(&tmp.k, bch2_keylist_front(keys));
+ k = bch2_keylist_front(keys);
- bch2_trans_begin_updates(&trans);
+ bkey_on_stack_realloc(&sk, c, k->k.u64s);
+ bkey_copy(sk.k, k);
+ bch2_cut_front(iter->pos, sk.k);
- ret = bch2_extent_update(&trans, iter, &tmp.k,
+ ret = bch2_extent_update(&trans, iter, sk.k,
&op->res, op_journal_seq(op),
op->new_i_size, &op->i_sectors_delta);
if (ret == -EINTR)
if (ret)
break;
- if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
- bch2_cut_front(iter->pos, bch2_keylist_front(keys));
- else
+ if (bkey_cmp(iter->pos, k->k.p) >= 0)
bch2_keylist_pop_front(keys);
} while (!bch2_keylist_empty(keys));
bch2_trans_exit(&trans);
+ bkey_on_stack_exit(&sk, c);
return ret;
}
n->c = c;
n->dev = ptr->dev;
- n->have_ioref = bch2_dev_get_ioref(ca, WRITE);
+ n->have_ioref = bch2_dev_get_ioref(ca,
+ type == BCH_DATA_btree ? READ : WRITE);
n->submit_time = local_clock();
n->bio.bi_iter.bi_sector = ptr->offset;
if (!op->error && (op->flags & BCH_WRITE_FLUSH))
op->error = bch2_journal_error(&c->journal);
- if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
- bch2_disk_reservation_put(c, &op->res);
+ bch2_disk_reservation_put(c, &op->res);
percpu_ref_put(&c->writes);
bch2_keylist_free(&op->insert_keys, op->inline_keys);
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
- if (op->end_io)
+ if (!(op->flags & BCH_WRITE_FROM_INTERNAL))
+ up(&c->io_in_flight);
+
+ if (op->end_io) {
+ EBUG_ON(cl->parent);
+ closure_debug_destroy(cl);
op->end_io(op);
- if (cl->parent)
+ } else {
closure_return(cl);
- else
- closure_debug_destroy(cl);
+ }
}
/**
for (src = keys->keys; src != keys->top; src = n) {
n = bkey_next(src);
- bkey_copy(dst, src);
- bch2_bkey_drop_ptrs(bkey_i_to_s(dst), ptr,
- test_bit(ptr->dev, op->failed.d));
+ if (bkey_extent_is_direct_data(&src->k)) {
+ bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
+ test_bit(ptr->dev, op->failed.d));
- if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(dst))) {
- ret = -EIO;
- goto err;
+ if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src))) {
+ ret = -EIO;
+ goto err;
+ }
}
+ if (dst != src)
+ memmove_u64s_down(dst, src, src->u64s);
dst = bkey_next(dst);
}
* particularly want to plumb io_opts all the way through the btree
* update stack right now
*/
- for_each_keylist_key(keys, k)
+ for_each_keylist_key(keys, k) {
bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
+ if (bch2_bkey_is_incompressible(bkey_i_to_s_c(k)))
+ bch2_check_set_feature(op->c, BCH_FEATURE_incompressible);
+
+ }
+
if (!bch2_keylist_empty(keys)) {
u64 sectors_start = keylist_sectors(keys);
int ret = op->index_update_fn(op);
__bch2_write_index(op);
- if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
+ if (!(op->flags & BCH_WRITE_DONE)) {
+ continue_at(cl, __bch2_write, index_update_wq(op));
+ } else if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
bch2_journal_flush_seq_async(&c->journal,
*op_journal_seq(op),
cl);
struct bch_fs *c = wbio->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
- if (bch2_dev_io_err_on(bio->bi_status, ca, "data write"))
+ if (bch2_dev_io_err_on(bio->bi_status, ca, "data write: %s",
+ bch2_blk_status_to_str(bio->bi_status)))
set_bit(wbio->dev, op->failed.d);
if (wbio->have_ioref) {
/* Can we just write the entire extent as is? */
if (op->crc.uncompressed_size == op->crc.live_size &&
op->crc.compressed_size <= wp->sectors_free &&
- op->crc.compression_type == op->compression_type) {
- if (!op->crc.compression_type &&
+ (op->crc.compression_type == op->compression_type ||
+ op->incompressible)) {
+ if (!crc_is_compressed(op->crc) &&
op->csum_type != op->crc.csum_type &&
bch2_write_rechecksum(c, op, op->csum_type))
return PREP_ENCODED_CHECKSUM_ERR;
* If the data is compressed and we couldn't write the entire extent as
* is, we have to decompress it:
*/
- if (op->crc.compression_type) {
+ if (crc_is_compressed(op->crc)) {
struct bch_csum csum;
if (bch2_write_decrypt(op))
ret = -EIO;
goto err;
case PREP_ENCODED_CHECKSUM_ERR:
+ BUG();
goto csum_err;
case PREP_ENCODED_DO_WRITE:
/* XXX look for bug here */
bch2_csum_type_is_encryption(op->crc.csum_type));
BUG_ON(op->compression_type && !bounce);
- crc.compression_type = op->compression_type
- ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
- op->compression_type)
+ crc.compression_type = op->incompressible
+ ? BCH_COMPRESSION_TYPE_incompressible
+ : op->compression_type
+ ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
+ op->compression_type)
: 0;
- if (!crc.compression_type) {
+ if (!crc_is_compressed(crc)) {
dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
if (bch2_csum_type_is_encryption(op->csum_type)) {
if (bversion_zero(version)) {
- version.lo = atomic64_inc_return(&c->key_version) + 1;
+ version.lo = atomic64_inc_return(&c->key_version);
} else {
crc.nonce = op->nonce;
op->nonce += src_len >> 9;
}
if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
- !crc.compression_type &&
+ !crc_is_compressed(crc) &&
bch2_csum_type_is_encryption(op->crc.csum_type) ==
bch2_csum_type_is_encryption(op->csum_type)) {
/*
struct write_point *wp;
struct bio *bio;
bool skip_put = true;
+ unsigned nofs_flags;
int ret;
+
+ nofs_flags = memalloc_nofs_save();
again:
memset(&op->failed, 0, sizeof(op->failed));
BKEY_EXTENT_U64s_MAX))
goto flush_io;
+ if ((op->flags & BCH_WRITE_FROM_INTERNAL) &&
+ percpu_ref_is_dying(&c->writes)) {
+ ret = -EROFS;
+ goto err;
+ }
+
+ /*
+ * The copygc thread is now global, which means it's no longer
+ * freeing up space on specific disks, which means that
+ * allocations for specific disks may hang arbitrarily long:
+ */
wp = bch2_alloc_sectors_start(c,
op->target,
op->opts.erasure_code,
op->nr_replicas_required,
op->alloc_reserve,
op->flags,
- (op->flags & BCH_WRITE_ALLOC_NOWAIT) ? NULL : cl);
+ (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
+ BCH_WRITE_ONLY_SPECIFIED_DEVS)) ? NULL : cl);
EBUG_ON(!wp);
if (unlikely(IS_ERR(wp))) {
goto flush_io;
}
+ /*
+ * It's possible for the allocator to fail, put us on the
+ * freelist waitlist, and then succeed in one of various retry
+ * paths: if that happens, we need to disable the skip_put
+ * optimization because otherwise there won't necessarily be a
+ * barrier before we free the bch_write_op:
+ */
+ if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
+ skip_put = false;
+
bch2_open_bucket_get(c, wp, &op->open_buckets);
ret = bch2_write_extent(op, wp, &bio);
bch2_alloc_sectors_done(c, wp);
if (ret < 0)
goto err;
- if (ret)
+ if (ret) {
skip_put = false;
+ } else {
+ /*
+ * for the skip_put optimization this has to be set
+ * before we submit the bio:
+ */
+ op->flags |= BCH_WRITE_DONE;
+ }
bio->bi_end_io = bch2_write_endio;
bio->bi_private = &op->cl;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf |= REQ_OP_WRITE;
if (!skip_put)
closure_get(bio->bi_private);
key_to_write = (void *) (op->insert_keys.keys_p +
key_to_write_offset);
- bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_USER,
+ bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
key_to_write);
} while (ret);
if (!skip_put)
continue_at(cl, bch2_write_index, index_update_wq(op));
+out:
+ memalloc_nofs_restore(nofs_flags);
return;
err:
op->error = ret;
+ op->flags |= BCH_WRITE_DONE;
continue_at(cl, bch2_write_index, index_update_wq(op));
- return;
+ goto out;
flush_io:
+ /*
+ * If the write can't all be submitted at once, we generally want to
+ * block synchronously as that signals backpressure to the caller.
+ *
+ * However, if we're running out of a workqueue, we can't block here
+ * because we'll be blocking other work items from completing:
+ */
+ if (current->flags & PF_WQ_WORKER) {
+ continue_at(cl, bch2_write_index, index_update_wq(op));
+ goto out;
+ }
+
closure_sync(cl);
if (!bch2_keylist_empty(&op->insert_keys)) {
__bch2_write_index(op);
if (op->error) {
+ op->flags |= BCH_WRITE_DONE;
continue_at_nobarrier(cl, bch2_write_done, NULL);
- return;
+ goto out;
}
}
goto again;
}
+static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
+{
+ struct closure *cl = &op->cl;
+ struct bio *bio = &op->wbio.bio;
+ struct bvec_iter iter;
+ struct bkey_i_inline_data *id;
+ unsigned sectors;
+ int ret;
+
+ bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
+
+ ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
+ ARRAY_SIZE(op->inline_keys),
+ BKEY_U64s + DIV_ROUND_UP(data_len, 8));
+ if (ret) {
+ op->error = ret;
+ goto err;
+ }
+
+ sectors = bio_sectors(bio);
+ op->pos.offset += sectors;
+
+ id = bkey_inline_data_init(op->insert_keys.top);
+ id->k.p = op->pos;
+ id->k.version = op->version;
+ id->k.size = sectors;
+
+ iter = bio->bi_iter;
+ iter.bi_size = data_len;
+ memcpy_from_bio(id->v.data, bio, iter);
+
+ while (data_len & 7)
+ id->v.data[data_len++] = '\0';
+ set_bkey_val_bytes(&id->k, data_len);
+ bch2_keylist_push(&op->insert_keys);
+
+ op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
+ op->flags |= BCH_WRITE_DONE;
+
+ continue_at_nobarrier(cl, bch2_write_index, NULL);
+ return;
+err:
+ bch2_write_done(&op->cl);
+}
+
/**
* bch_write - handle a write to a cache device or flash only volume
*
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
struct bio *bio = &op->wbio.bio;
struct bch_fs *c = op->c;
+ unsigned data_len;
BUG_ON(!op->nr_replicas);
BUG_ON(!op->write_point.v);
BUG_ON(!bkey_cmp(op->pos, POS_MAX));
+ op->start_time = local_clock();
+ bch2_keylist_init(&op->insert_keys, op->inline_keys);
+ wbio_init(bio)->put_bio = false;
+
if (bio_sectors(bio) & (c->opts.block_size - 1)) {
__bcache_io_error(c, "misaligned write");
op->error = -EIO;
goto err;
}
- op->start_time = local_clock();
-
- bch2_keylist_init(&op->insert_keys, op->inline_keys);
- wbio_init(bio)->put_bio = false;
-
if (c->opts.nochanges ||
!percpu_ref_tryget(&c->writes)) {
- __bcache_io_error(c, "read only");
+ if (!(op->flags & BCH_WRITE_FROM_INTERNAL))
+ __bcache_io_error(c, "read only");
op->error = -EROFS;
goto err;
}
+ /*
+ * Can't ratelimit copygc - we'd deadlock:
+ */
+ if (!(op->flags & BCH_WRITE_FROM_INTERNAL))
+ down(&c->io_in_flight);
+
bch2_increment_clock(c, bio_sectors(bio), WRITE);
+ data_len = min_t(u64, bio->bi_iter.bi_size,
+ op->new_i_size - (op->pos.offset << 9));
+
+ if (c->opts.inline_data &&
+ data_len <= min(block_bytes(c) / 2, 1024U)) {
+ bch2_write_data_inline(op, data_len);
+ return;
+ }
+
continue_at_nobarrier(cl, __bch2_write, NULL);
return;
err:
- if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
- bch2_disk_reservation_put(c, &op->res);
- closure_return(cl);
+ bch2_disk_reservation_put(c, &op->res);
+
+ if (op->end_io) {
+ EBUG_ON(cl->parent);
+ closure_debug_destroy(cl);
+ op->end_io(op);
+ } else {
+ closure_return(cl);
+ }
}
/* Cache promotion on read */
static struct promote_op *__promote_alloc(struct bch_fs *c,
enum btree_id btree_id,
+ struct bkey_s_c k,
struct bpos pos,
struct extent_ptr_decoded *pick,
struct bch_io_opts opts,
opts,
DATA_PROMOTE,
(struct data_opts) {
- .target = opts.promote_target
+ .target = opts.promote_target,
+ .nr_replicas = 1,
},
- btree_id,
- bkey_s_c_null);
+ btree_id, k);
BUG_ON(ret);
return op;
k.k->type == KEY_TYPE_reflink_v
? BTREE_ID_REFLINK
: BTREE_ID_EXTENTS,
- pos, pick, opts, sectors, rbio);
+ k, pos, pick, opts, sectors, rbio);
if (!promote)
return NULL;
{
struct btree_trans trans;
struct btree_iter *iter;
- BKEY_PADDED(k) tmp;
+ struct bkey_on_stack sk;
struct bkey_s_c k;
int ret;
flags &= ~BCH_READ_LAST_FRAGMENT;
flags |= BCH_READ_MUST_CLONE;
+ bkey_on_stack_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
if (bkey_err(k))
goto err;
- bkey_reassemble(&tmp.k, k);
- k = bkey_i_to_s_c(&tmp.k);
+ bkey_on_stack_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
bch2_trans_unlock(&trans);
- if (!bch2_bkey_matches_ptr(c, bkey_i_to_s_c(&tmp.k),
+ if (!bch2_bkey_matches_ptr(c, k,
rbio->pick.ptr,
rbio->pos.offset -
rbio->pick.crc.offset)) {
goto out;
}
- ret = __bch2_read_extent(c, rbio, bvec_iter, k, 0, failed, flags);
+ ret = __bch2_read_extent(&trans, rbio, bvec_iter, k, 0, failed, flags);
if (ret == READ_RETRY)
goto retry;
if (ret)
out:
bch2_rbio_done(rbio);
bch2_trans_exit(&trans);
+ bkey_on_stack_exit(&sk, c);
return;
err:
rbio->bio.bi_status = BLK_STS_IOERR;
{
struct btree_trans trans;
struct btree_iter *iter;
+ struct bkey_on_stack sk;
struct bkey_s_c k;
int ret;
flags &= ~BCH_READ_LAST_FRAGMENT;
flags |= BCH_READ_MUST_CLONE;
+ bkey_on_stack_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
POS(inode, bvec_iter.bi_sector),
BTREE_ITER_SLOTS, k, ret) {
- BKEY_PADDED(k) tmp;
unsigned bytes, sectors, offset_into_extent;
- bkey_reassemble(&tmp.k, k);
- k = bkey_i_to_s_c(&tmp.k);
+ bkey_on_stack_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
offset_into_extent = iter->pos.offset -
bkey_start_offset(k.k);
sectors = k.k->size - offset_into_extent;
ret = bch2_read_indirect_extent(&trans,
- &offset_into_extent, &tmp.k);
+ &offset_into_extent, &sk);
if (ret)
break;
bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
swap(bvec_iter.bi_size, bytes);
- ret = __bch2_read_extent(c, rbio, bvec_iter, k,
+ ret = __bch2_read_extent(&trans, rbio, bvec_iter, k,
offset_into_extent, failed, flags);
switch (ret) {
case READ_RETRY:
rbio->bio.bi_status = BLK_STS_IOERR;
out:
bch2_trans_exit(&trans);
+ bkey_on_stack_exit(&sk, c);
bch2_rbio_done(rbio);
}
}
}
-static void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
+static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
+ struct bch_read_bio *rbio)
{
struct bch_fs *c = rbio->c;
- struct btree_trans trans;
- struct btree_iter *iter;
- struct bkey_s_c k;
- BKEY_PADDED(k) new;
- struct bch_extent_crc_unpacked new_crc;
u64 data_offset = rbio->pos.offset - rbio->pick.crc.offset;
- int ret;
-
- if (rbio->pick.crc.compression_type)
- return;
+ struct bch_extent_crc_unpacked new_crc;
+ struct btree_iter *iter = NULL;
+ struct bkey_i *new;
+ struct bkey_s_c k;
+ int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
-retry:
- bch2_trans_begin(&trans);
+ if (crc_is_compressed(rbio->pick.crc))
+ return 0;
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, rbio->pos,
+ iter = bch2_trans_get_iter(trans, BTREE_ID_EXTENTS, rbio->pos,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ if ((ret = PTR_ERR_OR_ZERO(iter)))
+ goto out;
+
k = bch2_btree_iter_peek_slot(iter);
- if (IS_ERR_OR_NULL(k.k))
+ if ((ret = bkey_err(k)))
+ goto out;
+
+ /*
+ * going to be temporarily appending another checksum entry:
+ */
+ new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
+ BKEY_EXTENT_U64s_MAX * 8);
+ if ((ret = PTR_ERR_OR_ZERO(new)))
goto out;
- bkey_reassemble(&new.k, k);
- k = bkey_i_to_s_c(&new.k);
+ bkey_reassemble(new, k);
+ k = bkey_i_to_s_c(new);
if (bversion_cmp(k.k->version, rbio->version) ||
!bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
bkey_start_offset(k.k) - data_offset, k.k->size,
rbio->pick.crc.csum_type)) {
bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
+ ret = 0;
goto out;
}
- if (!bch2_bkey_narrow_crcs(&new.k, new_crc))
+ if (!bch2_bkey_narrow_crcs(new, new_crc))
goto out;
- bch2_trans_update(&trans, iter, &new.k);
- ret = bch2_trans_commit(&trans, NULL, NULL,
- BTREE_INSERT_ATOMIC|
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_NOWAIT);
- if (ret == -EINTR)
- goto retry;
+ bch2_trans_update(trans, iter, new, 0);
out:
- bch2_trans_exit(&trans);
+ bch2_trans_iter_put(trans, iter);
+ return ret;
+}
+
+static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
+{
+ bch2_trans_do(rbio->c, NULL, NULL, BTREE_INSERT_NOFAIL,
+ __bch2_rbio_narrow_crcs(&trans, rbio));
}
/* Inner part that may run in process context */
crc.offset += rbio->offset_into_extent;
crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
- if (crc.compression_type != BCH_COMPRESSION_NONE) {
+ if (crc_is_compressed(crc)) {
bch2_encrypt_bio(c, crc.csum_type, nonce, src);
if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
goto decompression_err;
if (!rbio->split)
rbio->bio.bi_end_io = rbio->end_io;
- if (bch2_dev_io_err_on(bio->bi_status, ca, "data read")) {
+ if (bch2_dev_io_err_on(bio->bi_status, ca, "data read; %s",
+ bch2_blk_status_to_str(bio->bi_status))) {
bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
return;
}
}
if (rbio->narrow_crcs ||
- rbio->pick.crc.compression_type ||
+ crc_is_compressed(rbio->pick.crc) ||
bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
else if (rbio->pick.crc.csum_type)
int __bch2_read_indirect_extent(struct btree_trans *trans,
unsigned *offset_into_extent,
- struct bkey_i *orig_k)
+ struct bkey_on_stack *orig_k)
{
struct btree_iter *iter;
struct bkey_s_c k;
u64 reflink_offset;
int ret;
- reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k)->v.idx) +
+ reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
*offset_into_extent;
iter = bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
}
*offset_into_extent = iter->pos.offset - bkey_start_offset(k.k);
- bkey_reassemble(orig_k, k);
+ bkey_on_stack_reassemble(orig_k, trans->c, k);
err:
bch2_trans_iter_put(trans, iter);
return ret;
}
-int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
+int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
struct bvec_iter iter, struct bkey_s_c k,
unsigned offset_into_extent,
struct bch_io_failures *failed, unsigned flags)
{
+ struct bch_fs *c = trans->c;
struct extent_ptr_decoded pick;
struct bch_read_bio *rbio = NULL;
struct bch_dev *ca;
struct bpos pos = bkey_start_pos(k.k);
int pick_ret;
+ if (k.k->type == KEY_TYPE_inline_data) {
+ struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
+ unsigned bytes = min_t(unsigned, iter.bi_size,
+ bkey_val_bytes(d.k));
+
+ swap(iter.bi_size, bytes);
+ memcpy_to_bio(&orig->bio, iter, d.v->data);
+ swap(iter.bi_size, bytes);
+ bio_advance_iter(&orig->bio, &iter, bytes);
+ zero_fill_bio_iter(&orig->bio, iter);
+ goto out_read_done;
+ }
+
pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
/* hole or reservation - just zero fill: */
goto hole;
iter.bi_size = pick.crc.compressed_size << 9;
- goto noclone;
+ goto get_bio;
}
if (!(flags & BCH_READ_LAST_FRAGMENT) ||
EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
- if (pick.crc.compression_type != BCH_COMPRESSION_NONE ||
+ if (crc_is_compressed(pick.crc) ||
(pick.crc.csum_type != BCH_CSUM_NONE &&
(bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
(bch2_csum_type_is_encryption(pick.crc.csum_type) &&
&rbio, &bounce, &read_full);
if (!read_full) {
- EBUG_ON(pick.crc.compression_type);
+ EBUG_ON(crc_is_compressed(pick.crc));
EBUG_ON(pick.crc.csum_type &&
(bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
bvec_iter_sectors(iter) != pick.crc.live_size ||
pick.crc.live_size = bvec_iter_sectors(iter);
offset_into_extent = 0;
}
-
+get_bio:
if (rbio) {
/*
* promote already allocated bounce rbio:
rbio->bio.bi_iter = iter;
rbio->split = true;
} else {
-noclone:
rbio = orig;
rbio->bio.bi_iter = iter;
EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
- rcu_read_lock();
- bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
- rcu_read_unlock();
+ if (pick.ptr.cached)
+ bch2_bucket_io_time_reset(trans, pick.ptr.dev,
+ PTR_BUCKET_NR(ca, &pick.ptr), READ);
if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
bio_inc_remaining(&orig->bio);
goto out;
}
- this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_USER],
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
bio_sectors(&rbio->bio));
bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
{
struct btree_trans trans;
struct btree_iter *iter;
+ struct bkey_on_stack sk;
struct bkey_s_c k;
unsigned flags = BCH_READ_RETRY_IF_STALE|
BCH_READ_MAY_PROMOTE|
rbio->c = c;
rbio->start_time = local_clock();
+ bkey_on_stack_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
POS(inode, rbio->bio.bi_iter.bi_sector),
BTREE_ITER_SLOTS);
while (1) {
- BKEY_PADDED(k) tmp;
unsigned bytes, sectors, offset_into_extent;
bch2_btree_iter_set_pos(iter,
if (ret)
goto err;
- bkey_reassemble(&tmp.k, k);
- k = bkey_i_to_s_c(&tmp.k);
-
offset_into_extent = iter->pos.offset -
bkey_start_offset(k.k);
sectors = k.k->size - offset_into_extent;
+ bkey_on_stack_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
+
ret = bch2_read_indirect_extent(&trans,
- &offset_into_extent, &tmp.k);
+ &offset_into_extent, &sk);
if (ret)
goto err;
if (rbio->bio.bi_iter.bi_size == bytes)
flags |= BCH_READ_LAST_FRAGMENT;
- bch2_read_extent(c, rbio, k, offset_into_extent, flags);
+ bch2_read_extent(&trans, rbio, k, offset_into_extent, flags);
if (flags & BCH_READ_LAST_FRAGMENT)
break;
}
out:
bch2_trans_exit(&trans);
+ bkey_on_stack_exit(&sk, c);
return;
err:
if (ret == -EINTR)