ca = bch_dev_bkey_exists(c, ptr->dev);
if (to_entry(ptr + 1) < ptrs.end) {
- n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
- &ca->replica_set));
+ n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
+ GFP_NOIO, &ca->replica_set));
n->bio.bi_end_io = wbio->bio.bi_end_io;
n->bio.bi_private = wbio->bio.bi_private;
{
struct bch_fs *c = op->c;
struct bkey_i_extent *e;
- struct open_bucket *ob;
- unsigned i;
- BUG_ON(crc.compressed_size > wp->sectors_free);
- wp->sectors_free -= crc.compressed_size;
op->pos.offset += crc.uncompressed_size;
e = bkey_extent_init(op->insert_keys.top);
crc.nonce)
bch2_extent_crc_append(&e->k_i, crc);
- open_bucket_for_each(c, &wp->ptrs, ob, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
- union bch_extent_entry *end =
- bkey_val_end(bkey_i_to_s(&e->k_i));
-
- end->ptr = ob->ptr;
- end->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
- end->ptr.cached = !ca->mi.durability ||
- (op->flags & BCH_WRITE_CACHED) != 0;
- end->ptr.offset += ca->mi.bucket_size - ob->sectors_free;
-
- e->k.u64s++;
-
- BUG_ON(crc.compressed_size > ob->sectors_free);
- ob->sectors_free -= crc.compressed_size;
- }
+ bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, crc.compressed_size,
+ op->flags & BCH_WRITE_CACHED);
bch2_keylist_push(&op->insert_keys);
}
pages = min(pages, BIO_MAX_VECS);
- bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
+ bio = bio_alloc_bioset(NULL, pages, 0,
+ GFP_NOIO, &c->bio_write);
wbio = wbio_init(bio);
wbio->put_bio = true;
/* copy WRITE_SYNC flag */
*/
bch2_bio_alloc_pages_pool(c, bio,
min_t(unsigned, output_available,
- c->sb.encoded_extent_max << 9));
+ c->opts.encoded_extent_max));
if (bio->bi_iter.bi_size < output_available)
*page_alloc_failed =
struct bch_fs *c = op->c;
struct nonce nonce = extent_nonce(op->version, op->crc);
struct bch_csum csum;
+ int ret;
if (!bch2_csum_type_is_encryption(op->crc.csum_type))
return 0;
if (bch2_crc_cmp(op->crc.csum, csum))
return -EIO;
- bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
+ ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
op->crc.csum_type = 0;
op->crc.csum = (struct bch_csum) { 0, 0 };
- return 0;
+ return ret;
}
static enum prep_encoded_ret {
size_t dst_len, src_len;
if (page_alloc_failed &&
- bio_sectors(dst) < wp->sectors_free &&
- bio_sectors(dst) < c->sb.encoded_extent_max)
+ dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
+ dst->bi_iter.bi_size < c->opts.encoded_extent_max)
break;
BUG_ON(op->compression_type &&
if (op->csum_type)
dst_len = min_t(unsigned, dst_len,
- c->sb.encoded_extent_max << 9);
+ c->opts.encoded_extent_max);
if (bounce) {
swap(dst->bi_iter.bi_size, dst_len);
crc.live_size = src_len >> 9;
swap(dst->bi_iter.bi_size, dst_len);
- bch2_encrypt_bio(c, op->csum_type,
- extent_nonce(version, crc), dst);
+ ret = bch2_encrypt_bio(c, op->csum_type,
+ extent_nonce(version, crc), dst);
+ if (ret)
+ goto err;
+
crc.csum = bch2_checksum_bio(c, op->csum_type,
extent_nonce(version, crc), dst);
crc.csum_type = op->csum_type;
*_dst = dst;
return more;
csum_err:
- bch_err(c, "error verifying existing checksum while "
- "rewriting existing data (memory corruption?)");
+ bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
ret = -EIO;
err:
if (to_wbio(dst)->bounce)
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
struct bch_fs *c = op->c;
struct write_point *wp;
- struct bio *bio;
+ struct bio *bio = NULL;
bool skip_put = true;
unsigned nofs_flags;
int ret;
BKEY_EXTENT_U64s_MAX))
goto flush_io;
- if ((op->flags & BCH_WRITE_FROM_INTERNAL) &&
- percpu_ref_is_dying(&c->writes)) {
- ret = -EROFS;
- goto err;
- }
-
/*
* The copygc thread is now global, which means it's no longer
* freeing up space on specific disks, which means that
*/
wp = bch2_alloc_sectors_start(c,
op->target,
- op->opts.erasure_code,
+ op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
op->write_point,
&op->devs_have,
op->nr_replicas,
bch2_keylist_init(&op->insert_keys, op->inline_keys);
wbio_init(bio)->put_bio = false;
- if (bio_sectors(bio) & (c->opts.block_size - 1)) {
+ if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
bch_err_inum_ratelimited(c, op->pos.inode,
"misaligned write");
op->error = -EIO;
}
if (c->opts.nochanges ||
- !percpu_ref_tryget(&c->writes)) {
+ !percpu_ref_tryget_live(&c->writes)) {
op->error = -EROFS;
goto err;
}
+ this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
bch2_increment_clock(c, bio_sectors(bio), WRITE);
data_len = min_t(u64, bio->bi_iter.bi_size,
struct rhash_head hash;
struct bpos pos;
- struct migrate_write write;
+ struct data_update write;
struct bio_vec bi_inline_vecs[0]; /* must be last */
};
bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
op->start_time);
- bch2_bio_free_pages_pool(c, &op->write.op.wbio.bio);
+ bch2_data_update_exit(&op->write);
promote_free(c, op);
}
static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
{
- struct bch_fs *c = rbio->c;
struct closure *cl = &op->cl;
struct bio *bio = &op->write.op.wbio.bio;
sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
- bch2_migrate_read_done(&op->write, rbio);
-
closure_init(cl, NULL);
- closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, cl);
+ bch2_data_update_read_done(&op->write, rbio->pick.crc, cl);
closure_return_with_destructor(cl, promote_done);
}
unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
int ret;
- if (!percpu_ref_tryget(&c->writes))
+ if (!percpu_ref_tryget_live(&c->writes))
return NULL;
op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
goto err;
rbio_init(&(*rbio)->bio, opts);
- bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs, pages);
+ bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
GFP_NOIO))
goto err;
bio = &op->write.op.wbio.bio;
- bio_init(bio, bio->bi_inline_vecs, pages);
+ bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
- ret = bch2_migrate_write_init(c, &op->write,
+ ret = bch2_data_update_init(c, &op->write,
writepoint_hashed((unsigned long) current),
opts,
- DATA_PROMOTE,
- (struct data_opts) {
+ (struct data_update_opts) {
.target = opts.promote_target,
- .nr_replicas = 1,
+ .extra_replicas = 1,
+ .write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
},
btree_id, k);
BUG_ON(ret);
struct nonce nonce = extent_nonce(rbio->version, crc);
unsigned nofs_flags;
struct bch_csum csum;
+ int ret;
nofs_flags = memalloc_nofs_save();
crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
if (crc_is_compressed(crc)) {
- bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ if (ret)
+ goto decrypt_err;
+
if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
goto decompression_err;
} else {
BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
src->bi_iter.bi_size = dst_iter.bi_size;
- bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ if (ret)
+ goto decrypt_err;
if (rbio->bounce) {
struct bvec_iter src_iter = src->bi_iter;
* Re encrypt data we decrypted, so it's consistent with
* rbio->crc:
*/
- bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ if (ret)
+ goto decrypt_err;
+
promote_start(rbio->promote, rbio);
rbio->promote = NULL;
}
}
bch2_dev_inum_io_error(ca, rbio->read_pos.inode, (u64) rbio->bvec_iter.bi_sector,
- "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %u)",
+ "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
- csum.hi, csum.lo, crc.csum_type);
+ csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
goto out;
decompression_err:
"decompression error");
bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
goto out;
+decrypt_err:
+ bch_err_inum_ratelimited(c, rbio->read_pos.inode,
+ "decrypt error");
+ bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
+ goto out;
}
static void bch2_read_endio(struct bio *bio)
return;
}
- if (rbio->pick.ptr.cached &&
- (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
- ptr_stale(ca, &rbio->pick.ptr))) {
+ if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
+ ptr_stale(ca, &rbio->pick.ptr)) {
atomic_long_inc(&c->read_realloc_races);
if (rbio->flags & BCH_READ_RETRY_IF_STALE)
return ret;
}
+static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
+ struct bkey_s_c k,
+ struct bch_extent_ptr ptr)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
+ struct btree_iter iter;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
+ PTR_BUCKET_POS(c, &ptr),
+ BTREE_ITER_CACHED);
+
+ prt_printf(&buf, "Attempting to read from stale dirty pointer:");
+ printbuf_indent_add(&buf, 2);
+ prt_newline(&buf);
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
+
+ ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ if (!ret) {
+ prt_newline(&buf);
+ bch2_bkey_val_to_text(&buf, c, k);
+ }
+
+ bch2_fs_inconsistent(c, "%s", buf.buf);
+
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
+}
+
int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
struct bvec_iter iter, struct bpos read_pos,
enum btree_id data_btree, struct bkey_s_c k,
struct bch_fs *c = trans->c;
struct extent_ptr_decoded pick;
struct bch_read_bio *rbio = NULL;
- struct bch_dev *ca;
+ struct bch_dev *ca = NULL;
struct promote_op *promote = NULL;
bool bounce = false, read_full = false, narrow_crcs = false;
struct bpos data_pos = bkey_start_pos(k.k);
zero_fill_bio_iter(&orig->bio, iter);
goto out_read_done;
}
-
+retry_pick:
pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
/* hole or reservation - just zero fill: */
goto err;
}
- if (pick_ret > 0)
- ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+ ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+
+ /*
+ * Stale dirty pointers are treated as IO errors, but @failed isn't
+ * allocated unless we're in the retry path - so if we're not in the
+ * retry path, don't check here, it'll be caught in bch2_read_endio()
+ * and we'll end up in the retry path:
+ */
+ if ((flags & BCH_READ_IN_RETRY) &&
+ !pick.ptr.cached &&
+ unlikely(ptr_stale(ca, &pick.ptr))) {
+ read_from_stale_dirty_pointer(trans, k, pick.ptr);
+ bch2_mark_io_failure(failed, &pick);
+ goto retry_pick;
+ }
+
+ /*
+ * Unlock the iterator while the btree node's lock is still in
+ * cache, before doing the IO:
+ */
+ bch2_trans_unlock(trans);
if (flags & BCH_READ_NODECODE) {
/*
} else if (bounce) {
unsigned sectors = pick.crc.compressed_size;
- rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
+ rbio = rbio_init(bio_alloc_bioset(NULL,
DIV_ROUND_UP(sectors, PAGE_SECTORS),
+ 0,
+ GFP_NOIO,
&c->bio_read_split),
orig->opts);
* from the whole bio, in which case we don't want to retry and
* lose the error)
*/
- rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
- &c->bio_read_split),
+ rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO,
+ &c->bio_read_split),
orig->opts);
rbio->bio.bi_iter = iter;
rbio->split = true;
if (rbio->bounce)
trace_read_bounce(&rbio->bio);
+ this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
/*
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
- BTREE_ITER_SLOTS|BTREE_ITER_FILTER_SNAPSHOTS);
+ BTREE_ITER_SLOTS);
while (1) {
unsigned bytes, sectors, offset_into_extent;
enum btree_id data_btree = BTREE_ID_extents;
*/
sectors = min(sectors, k.k->size - offset_into_extent);
- /*
- * Unlock the iterator while the btree node's lock is still in
- * cache, before doing the IO:
- */
- bch2_trans_unlock(&trans);
-
bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
swap(bvec_iter.bi_size, bytes);
swap(bvec_iter.bi_size, bytes);
bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
+
+ ret = btree_trans_too_many_iters(&trans);
+ if (ret)
+ break;
}
err:
bch2_trans_iter_exit(&trans, &iter);
mempool_init_page_pool(&c->bio_bounce_pages,
max_t(unsigned,
c->opts.btree_node_size,
- c->sb.encoded_extent_max) /
- PAGE_SECTORS, 0) ||
+ c->opts.encoded_extent_max) /
+ PAGE_SIZE, 0) ||
rhashtable_init(&c->promote_table, &bch_promote_params))
return -ENOMEM;