#include "btree_update_interior.h"
#include "buckets.h"
#include "disk_groups.h"
+#include "ec.h"
#include "inode.h"
#include "io.h"
#include "journal_reclaim.h"
if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
struct bkey_i *update;
- size_t i;
+ u32 *i;
- for (i = 0; i < s.nr; i++)
- if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, s.d[i]))
+ darray_for_each(s.ids, i)
+ if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, *i))
goto next;
update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
}
}
bch2_trans_iter_exit(trans, &iter);
- kfree(s.d);
+ darray_exit(s.ids);
return ret;
}
struct btree_iter iter;
struct migrate_write *m =
container_of(op, struct migrate_write, op);
+ struct open_bucket *ec_ob = ec_open_bucket(c, &op->open_buckets);
struct keylist *keys = &op->insert_keys;
struct bkey_buf _new, _insert;
int ret = 0;
struct extent_ptr_decoded p;
struct bpos next_pos;
bool did_work = false;
- bool extending = false, should_check_enospc;
+ bool should_check_enospc;
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
bch2_trans_begin(&trans);
op->opts.data_replicas);
ret = bch2_sum_sector_overwrites(&trans, &iter, insert,
- &extending,
&should_check_enospc,
&i_sectors_delta,
&disk_sectors_delta);
if (!ret) {
bch2_btree_iter_set_pos(&iter, next_pos);
atomic_long_inc(&c->extent_migrate_done);
+ if (ec_ob)
+ bch2_ob_add_backpointer(c, ec_ob, &insert->k);
}
err:
if (ret == -EINTR)
}
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) {
- m->op.alloc_reserve = RESERVE_MOVINGGC;
- m->op.flags |= BCH_WRITE_ALLOC_NOWAIT;
+ m->op.alloc_reserve = RESERVE_movinggc;
} else {
/* XXX: this should probably be passed in */
m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
unsigned compressed_sectors = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (p.ptr.dev == data_opts.rewrite_dev &&
- !p.ptr.cached &&
- crc_is_compressed(p.crc))
- compressed_sectors += p.crc.compressed_size;
+ if (p.ptr.dev == data_opts.rewrite_dev) {
+ if (p.ptr.cached)
+ m->op.flags |= BCH_WRITE_CACHED;
+
+ if (!p.ptr.cached &&
+ crc_is_compressed(p.crc))
+ compressed_sectors += p.crc.compressed_size;
+ }
if (compressed_sectors) {
ret = bch2_disk_reservation_add(c, &m->op.res,
atomic_sub(io->read_sectors, &ctxt->read_sectors);
io->read_completed = true;
- if (next_pending_write(ctxt))
- wake_up(&ctxt->wait);
-
+ wake_up(&ctxt->wait);
closure_put(&ctxt->cl);
}
-static void do_pending_writes(struct moving_context *ctxt)
+static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *trans)
{
struct moving_io *io;
+ if (trans)
+ bch2_trans_unlock(trans);
+
while ((io = next_pending_write(ctxt))) {
list_del(&io->list);
closure_call(&io->cl, move_write, NULL, &ctxt->cl);
}
}
-#define move_ctxt_wait_event(_ctxt, _cond) \
+#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
do { \
- do_pending_writes(_ctxt); \
+ do_pending_writes(_ctxt, _trans); \
\
if (_cond) \
break; \
next_pending_write(_ctxt) || (_cond)); \
} while (1)
-static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
+static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
+ struct btree_trans *trans)
{
unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
- move_ctxt_wait_event(ctxt,
+ move_ctxt_wait_event(ctxt, trans,
!atomic_read(&ctxt->write_sectors) ||
atomic_read(&ctxt->write_sectors) != sectors_pending);
}
unsigned sectors = k.k->size, pages;
int ret = -ENOMEM;
- move_ctxt_wait_event(ctxt,
- atomic_read(&ctxt->write_sectors) <
- SECTORS_IN_FLIGHT_PER_DEVICE);
-
- move_ctxt_wait_event(ctxt,
- atomic_read(&ctxt->read_sectors) <
- SECTORS_IN_FLIGHT_PER_DEVICE);
-
/* write path might have to decompress data: */
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
goto err;
}
- ret = k.k->type == KEY_TYPE_inode ? 0 : -EIO;
+ ret = bkey_is_inode(k.k) ? 0 : -EIO;
if (ret)
goto err;
- ret = bch2_inode_unpack(bkey_s_c_to_inode(k), inode);
+ ret = bch2_inode_unpack(k, inode);
if (ret)
goto err;
err:
schedule_timeout(delay);
if (unlikely(freezing(current))) {
- bch2_trans_unlock(&trans);
- move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
+ move_ctxt_wait_event(ctxt, &trans, list_empty(&ctxt->reads));
try_to_freeze();
}
} while (delay);
- bch2_trans_begin(&trans);
+ move_ctxt_wait_event(ctxt, &trans,
+ atomic_read(&ctxt->write_sectors) <
+ SECTORS_IN_FLIGHT_PER_DEVICE);
- k = bch2_btree_iter_peek(&iter);
+ move_ctxt_wait_event(ctxt, &trans,
+ atomic_read(&ctxt->read_sectors) <
+ SECTORS_IN_FLIGHT_PER_DEVICE);
- stats->pos = iter.pos;
+ bch2_trans_begin(&trans);
+ k = bch2_btree_iter_peek(&iter);
if (!k.k)
break;
+
ret = bkey_err(k);
+ if (ret == -EINTR)
+ continue;
if (ret)
break;
+
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
+ stats->pos = iter.pos;
+
if (!bkey_extent_is_direct_data(k.k))
goto next_nondata;
BUG();
}
- /* unlock before doing IO: */
+ /*
+ * The iterator gets unlocked by __bch2_read_extent - need to
+ * save a copy of @k elsewhere:
+ */
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
- bch2_trans_unlock(&trans);
ret2 = bch2_move_extent(&trans, ctxt, wp, io_opts, btree_id, k,
data_cmd, data_opts);
if (ret2) {
- if (ret2 == -EINTR) {
- bch2_trans_begin(&trans);
+ if (ret2 == -EINTR)
continue;
- }
if (ret2 == -ENOMEM) {
/* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt);
+ bch2_move_ctxt_wait_for_io(ctxt, &trans);
continue;
}
if (rate)
bch2_ratelimit_increment(rate, k.k->size);
next:
- atomic64_add(k.k->size * bch2_bkey_nr_ptrs_allocated(k),
- &stats->sectors_seen);
+ atomic64_add(k.k->size, &stats->sectors_seen);
next_nondata:
bch2_btree_iter_advance(&iter);
}
}
- move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads));
+ move_ctxt_wait_event(&ctxt, NULL, list_empty(&ctxt.reads));
closure_sync(&ctxt.cl);
EBUG_ON(atomic_read(&ctxt.write_sectors));