-82c5cc8f00d08f4a315f99595e328d7b74cbd2b7
+043cfba30c743a6faa4e53c5a88a259f8726ac01
#undef x
}
-static int bch2_alloc_read_fn(struct bch_fs *c, struct bkey_s_c k)
+static int bch2_alloc_read_fn(struct btree_trans *trans, struct bkey_s_c k)
{
+ struct bch_fs *c = trans->c;
struct bch_dev *ca;
struct bucket *g;
struct bkey_alloc_unpacked u;
int bch2_alloc_read(struct bch_fs *c)
{
+ struct btree_trans trans;
int ret;
+ bch2_trans_init(&trans, c, 0, 0);
down_read(&c->gc_lock);
- ret = bch2_btree_and_journal_walk(c, BTREE_ID_alloc, bch2_alloc_read_fn);
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_alloc, bch2_alloc_read_fn);
up_read(&c->gc_lock);
+ bch2_trans_exit(&trans);
if (ret) {
bch_err(c, "error reading alloc info: %i", ret);
return ret;
/* marking of btree keys/nodes: */
-static int bch2_gc_mark_key(struct bch_fs *c, enum btree_id btree_id,
+static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
unsigned level, bool is_root,
struct bkey_s_c *k,
u8 *max_stale, bool initial)
{
+ struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs;
const struct bch_extent_ptr *ptr;
unsigned flags =
- BTREE_TRIGGER_INSERT|
BTREE_TRIGGER_GC|
(initial ? BTREE_TRIGGER_NOATOMIC : 0);
+ char buf[200];
int ret = 0;
if (initial) {
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
fsck_err_on(!bch2_bkey_replicas_marked(c, *k), c,
- "superblock not marked as containing replicas (type %u)",
- k->k->type)) {
+ "superblock not marked as containing replicas\n"
+ " while marking %s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
ret = bch2_mark_bkey_replicas(c, *k);
if (ret) {
bch_err(c, "error marking bkey replicas: %i", ret);
*max_stale = max(*max_stale, ptr_stale(ca, ptr));
}
- ret = bch2_mark_key(c, *k, flags);
+ ret = bch2_mark_key(trans, *k, flags);
fsck_err:
err:
if (ret)
return ret;
}
-static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
+static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, u8 *max_stale,
bool initial)
{
+ struct bch_fs *c = trans->c;
struct btree_node_iter iter;
struct bkey unpacked;
struct bkey_s_c k;
bkey_init(&prev.k->k);
while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
- ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
&k, max_stale, initial);
if (ret)
break;
return ret;
}
-static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
+static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
bool initial, bool metadata_only)
{
- struct btree_trans trans;
+ struct bch_fs *c = trans->c;
struct btree_iter iter;
struct btree *b;
unsigned depth = metadata_only ? 1
u8 max_stale = 0;
int ret = 0;
- bch2_trans_init(&trans, c, 0, 0);
-
gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
- __for_each_btree_node(&trans, iter, btree_id, POS_MIN,
+ __for_each_btree_node(trans, iter, btree_id, POS_MIN,
0, depth, BTREE_ITER_PREFETCH, b, ret) {
bch2_verify_btree_nr_keys(b);
gc_pos_set(c, gc_pos_btree_node(b));
- ret = btree_gc_mark_node(c, b, &max_stale, initial);
+ ret = btree_gc_mark_node(trans, b, &max_stale, initial);
if (ret)
break;
if (!initial) {
if (max_stale > 64)
- bch2_btree_node_rewrite(&trans, &iter, b,
+ bch2_btree_node_rewrite(trans, &iter, b,
BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
else if (!bch2_btree_gc_rewrite_disabled &&
(bch2_btree_gc_always_rewrite || max_stale > 16))
- bch2_btree_node_rewrite(&trans, &iter,
+ bch2_btree_node_rewrite(trans, &iter,
b, BTREE_INSERT_NOWAIT|
BTREE_INSERT_GC_LOCK_HELD);
}
}
- bch2_trans_iter_exit(&trans, &iter);
+ bch2_trans_iter_exit(trans, &iter);
- bch2_trans_exit(&trans);
if (ret)
return ret;
if (!btree_node_fake(b)) {
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
- ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true,
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
&k, &max_stale, initial);
}
gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
return ret;
}
-static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
+static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
unsigned target_depth)
{
+ struct bch_fs *c = trans->c;
struct btree_and_journal_iter iter;
struct bkey_s_c k;
struct bkey_buf cur, prev;
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
- ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, false,
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
&k, &max_stale, true);
if (ret) {
bch_err(c, "%s: error %i from bch2_gc_mark_key", __func__, ret);
break;
}
- ret = bch2_gc_btree_init_recurse(c, child,
+ ret = bch2_gc_btree_init_recurse(trans, child,
target_depth);
six_unlock_read(&child->c.lock);
return ret;
}
-static int bch2_gc_btree_init(struct bch_fs *c,
+static int bch2_gc_btree_init(struct btree_trans *trans,
enum btree_id btree_id,
bool metadata_only)
{
+ struct bch_fs *c = trans->c;
struct btree *b;
unsigned target_depth = metadata_only ? 1
: bch2_expensive_debug_checks ? 0
}
if (b->c.level >= target_depth)
- ret = bch2_gc_btree_init_recurse(c, b, target_depth);
+ ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
if (!ret) {
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
- ret = bch2_gc_mark_key(c, b->c.btree_id, b->c.level, true,
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, true,
&k, &max_stale, true);
}
fsck_err:
static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
{
+ struct btree_trans trans;
enum btree_id ids[BTREE_ID_NR];
unsigned i;
int ret = 0;
+ bch2_trans_init(&trans, c, 0, 0);
+
for (i = 0; i < BTREE_ID_NR; i++)
ids[i] = i;
bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
for (i = 0; i < BTREE_ID_NR && !ret; i++)
ret = initial
- ? bch2_gc_btree_init(c, ids[i], metadata_only)
- : bch2_gc_btree(c, ids[i], initial, metadata_only);
+ ? bch2_gc_btree_init(&trans, ids[i], metadata_only)
+ : bch2_gc_btree(&trans, ids[i], initial, metadata_only);
if (ret < 0)
bch_err(c, "%s: ret %i", __func__, ret);
+
+ bch2_trans_exit(&trans);
return ret;
}
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
- bch2_mark_key(c, bkey_i_to_s_c(&d->key),
- BTREE_TRIGGER_INSERT|BTREE_TRIGGER_GC);
+ bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
mutex_unlock(&c->btree_interior_update_lock);
}
return 0;
}
-static int bch2_gc_reflink_done_initial_fn(struct bch_fs *c, struct bkey_s_c k)
+static int bch2_gc_reflink_done_initial_fn(struct btree_trans *trans,
+ struct bkey_s_c k)
{
+ struct bch_fs *c = trans->c;
struct reflink_gc *r;
const __le64 *refcount = bkey_refcount_c(k);
char buf[200];
if (metadata_only)
return 0;
+ bch2_trans_init(&trans, c, 0, 0);
+
if (initial) {
c->reflink_gc_idx = 0;
- ret = bch2_btree_and_journal_walk(c, BTREE_ID_reflink,
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_reflink,
bch2_gc_reflink_done_initial_fn);
goto out;
}
- bch2_trans_init(&trans, c, 0, 0);
-
for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
const __le64 *refcount = bkey_refcount_c(k);
}
fsck_err:
bch2_trans_iter_exit(&trans, &iter);
- bch2_trans_exit(&trans);
out:
genradix_free(&c->reflink_gc_table);
c->reflink_gc_nr = 0;
+ bch2_trans_exit(&trans);
return ret;
}
-static int bch2_gc_reflink_start_initial_fn(struct bch_fs *c, struct bkey_s_c k)
+static int bch2_gc_reflink_start_initial_fn(struct btree_trans *trans,
+ struct bkey_s_c k)
{
+ struct bch_fs *c = trans->c;
struct reflink_gc *r;
const __le64 *refcount = bkey_refcount_c(k);
struct btree_iter iter;
struct bkey_s_c k;
struct reflink_gc *r;
- int ret;
+ int ret = 0;
if (metadata_only)
return 0;
+ bch2_trans_init(&trans, c, 0, 0);
genradix_free(&c->reflink_gc_table);
c->reflink_gc_nr = 0;
- if (initial)
- return bch2_btree_and_journal_walk(c, BTREE_ID_reflink,
- bch2_gc_reflink_start_initial_fn);
-
- bch2_trans_init(&trans, c, 0, 0);
+ if (initial) {
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_reflink,
+ bch2_gc_reflink_start_initial_fn);
+ goto out;
+ }
for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
r->refcount = 0;
}
bch2_trans_iter_exit(&trans, &iter);
-
+out:
bch2_trans_exit(&trans);
- return 0;
+ return ret;
}
/**
for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
if (!path->l[i].b) {
- BUG_ON(c->btree_roots[path->btree_id].b->c.level > i);
+ BUG_ON(!path->cached &&
+ c->btree_roots[path->btree_id].b->c.level > i);
break;
}
trans_for_each_update(trans, i)
__btree_path_put(i->path, true);
+ memset(&trans->journal_res, 0, sizeof(trans->journal_res));
trans->extra_journal_res = 0;
trans->nr_updates = 0;
trans->mem_top = 0;
BUG_ON(owned_by_allocator == old.owned_by_allocator);
}
-static int bch2_mark_alloc(struct bch_fs *c,
+static int bch2_mark_alloc(struct btree_trans *trans,
struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
struct bkey_alloc_unpacked u;
struct bch_dev *ca;
struct bucket *g;
: sectors;
}
-static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
+static int check_bucket_ref(struct bch_fs *c,
+ struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
s64 sectors, enum bch_data_type ptr_data_type,
u8 bucket_gen, u8 bucket_data_type,
return 0;
}
-static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k,
- unsigned ptr_idx,
- u64 journal_seq, unsigned flags)
+static int mark_stripe_bucket(struct btree_trans *trans,
+ struct bkey_s_c k,
+ unsigned ptr_idx,
+ u64 journal_seq, unsigned flags)
{
+ struct bch_fs *c = trans->c;
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
unsigned nr_data = s->nr_blocks - s->nr_redundant;
bool parity = ptr_idx >= nr_data;
return 0;
}
-static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
+static int __mark_pointer(struct btree_trans *trans,
+ struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
s64 sectors, enum bch_data_type ptr_data_type,
u8 bucket_gen, u8 *bucket_data_type,
u16 *dst_sectors = !ptr->cached
? dirty_sectors
: cached_sectors;
- int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
+ int ret = check_bucket_ref(trans->c, k, ptr, sectors, ptr_data_type,
bucket_gen, *bucket_data_type,
*dirty_sectors, *cached_sectors);
return 0;
}
-static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
+static int bch2_mark_pointer(struct btree_trans *trans,
+ struct bkey_s_c k,
struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type,
- u64 journal_seq, unsigned flags)
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
struct bucket_mark old, new;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
new.v.counter = old.v.counter = v;
bucket_data_type = new.data_type;
- ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
+ ret = __mark_pointer(trans, k, &p.ptr, sectors,
+ data_type, new.gen,
&bucket_data_type,
&new.dirty_sectors,
&new.cached_sectors);
return 0;
}
-static int bch2_mark_stripe_ptr(struct bch_fs *c,
+static int bch2_mark_stripe_ptr(struct btree_trans *trans,
struct bch_extent_stripe_ptr p,
enum bch_data_type data_type,
s64 sectors,
- unsigned journal_seq, unsigned flags)
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
+ struct bch_fs *c = trans->c;
struct bch_replicas_padded r;
struct stripe *m;
unsigned i, blocks_nonempty = 0;
spin_unlock(&c->ec_stripes_heap_lock);
r.e.data_type = data_type;
- update_replicas(c, &r.e, sectors, journal_seq, gc);
+ update_replicas(c, &r.e, sectors, trans->journal_res.seq, gc);
return 0;
}
-static int bch2_mark_extent(struct bch_fs *c,
+static int bch2_mark_extent(struct btree_trans *trans,
struct bkey_s_c old, struct bkey_s_c new,
- unsigned journal_seq, unsigned flags)
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
bool stale;
int ret;
- BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
- (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
-
r.e.data_type = data_type;
r.e.nr_devs = 0;
r.e.nr_required = 1;
if (flags & BTREE_TRIGGER_OVERWRITE)
disk_sectors = -disk_sectors;
- ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
- journal_seq, flags);
+ ret = bch2_mark_pointer(trans, k, p, disk_sectors,
+ data_type, flags);
if (ret < 0)
return ret;
dirty_sectors += disk_sectors;
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
} else {
- ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
- disk_sectors, journal_seq, flags);
+ ret = bch2_mark_stripe_ptr(trans, p.ec, data_type,
+ disk_sectors, flags);
if (ret)
return ret;
return 0;
}
-static int bch2_mark_stripe(struct bch_fs *c,
- struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_stripe(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
size_t idx = new.k->p.offset;
const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
? bkey_s_c_to_stripe(old).v : NULL;
BUG_ON(gc && old_s);
if (!m || (old_s && !m->alive)) {
- bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
- idx);
+ char buf1[200], buf2[200];
+
+ bch2_bkey_val_to_text(&PBUF(buf1), c, old);
+ bch2_bkey_val_to_text(&PBUF(buf2), c, new);
+ bch_err_ratelimited(c, "error marking nonexistent stripe %zu while marking\n"
+ "old %s\n"
+ "new %s", idx, buf1, buf2);
bch2_inconsistent_error(c);
return -1;
}
m->blocks_nonempty = 0;
for (i = 0; i < new_s->nr_blocks; i++) {
- ret = mark_stripe_bucket(c, new, i, journal_seq, flags);
+ ret = mark_stripe_bucket(trans, new, i, journal_seq, flags);
if (ret)
return ret;
}
return 0;
}
-static int bch2_mark_inode(struct bch_fs *c,
- struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_inode(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
+ struct bch_fs *c = trans->c;
struct bch_fs_usage __percpu *fs_usage;
preempt_disable();
- fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
+ fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode;
fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode;
preempt_enable();
return 0;
}
-static int bch2_mark_reservation(struct bch_fs *c,
- struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_reservation(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
struct bch_fs_usage __percpu *fs_usage;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
sectors *= replicas;
preempt_disable();
- fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
+ fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
replicas = clamp_t(unsigned, replicas, 1,
ARRAY_SIZE(fs_usage->persistent_reserved));
return ret;
}
-static int bch2_mark_reflink_p(struct bch_fs *c,
- struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_reflink_p(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
struct reflink_gc *ref;
size_t l, r, m;
le32_to_cpu(p.v->back_pad);
int ret = 0;
- BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
- (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
-
l = 0;
r = c->reflink_gc_nr;
while (l < r) {
return ret;
}
-static int bch2_mark_key_locked(struct bch_fs *c,
+static int bch2_mark_key_locked(struct btree_trans *trans,
struct bkey_s_c old,
struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+ unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
-
- BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
switch (k.k->type) {
case KEY_TYPE_alloc:
case KEY_TYPE_alloc_v2:
- return bch2_mark_alloc(c, old, new, journal_seq, flags);
+ return bch2_mark_alloc(trans, old, new, flags);
case KEY_TYPE_btree_ptr:
case KEY_TYPE_btree_ptr_v2:
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v:
- return bch2_mark_extent(c, old, new, journal_seq, flags);
+ return bch2_mark_extent(trans, old, new, flags);
case KEY_TYPE_stripe:
- return bch2_mark_stripe(c, old, new, journal_seq, flags);
+ return bch2_mark_stripe(trans, old, new, flags);
case KEY_TYPE_inode:
- return bch2_mark_inode(c, old, new, journal_seq, flags);
+ return bch2_mark_inode(trans, old, new, flags);
case KEY_TYPE_reservation:
- return bch2_mark_reservation(c, old, new, journal_seq, flags);
+ return bch2_mark_reservation(trans, old, new, flags);
case KEY_TYPE_reflink_p:
- return bch2_mark_reflink_p(c, old, new, journal_seq, flags);
+ return bch2_mark_reflink_p(trans, old, new, flags);
case KEY_TYPE_snapshot:
- return bch2_mark_snapshot(c, old, new, journal_seq, flags);
+ return bch2_mark_snapshot(trans, old, new, flags);
default:
return 0;
}
}
-int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new, unsigned flags)
+int bch2_mark_key(struct btree_trans *trans, struct bkey_s_c new, unsigned flags)
{
+ struct bch_fs *c = trans->c;
struct bkey deleted = KEY(0, 0, 0);
struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
int ret;
+ deleted.p = new.k->p;
+
percpu_down_read(&c->mark_lock);
- ret = bch2_mark_key_locked(c, old, new, 0, flags);
+ ret = bch2_mark_key_locked(trans, old, new, flags);
percpu_up_read(&c->mark_lock);
return ret;
int bch2_mark_update(struct btree_trans *trans, struct btree_path *path,
struct bkey_i *new, unsigned flags)
{
- struct bch_fs *c = trans->c;
struct bkey _deleted = KEY(0, 0, 0);
struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
struct bkey_s_c old;
struct bkey unpacked;
int ret;
+ _deleted.p = path->pos;
+
if (unlikely(flags & BTREE_TRIGGER_NORUN))
return 0;
if (old.k->type == new->k.type &&
((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
- ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
- trans->journal_res.seq,
+ ret = bch2_mark_key_locked(trans, old, bkey_i_to_s_c(new),
BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
} else {
- ret = bch2_mark_key_locked(c, deleted, bkey_i_to_s_c(new),
- trans->journal_res.seq,
+ ret = bch2_mark_key_locked(trans, deleted, bkey_i_to_s_c(new),
BTREE_TRIGGER_INSERT|flags) ?:
- bch2_mark_key_locked(c, old, deleted,
- trans->journal_res.seq,
+ bch2_mark_key_locked(trans, old, deleted,
BTREE_TRIGGER_OVERWRITE|flags);
}
if (IS_ERR(a))
return PTR_ERR(a);
- ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
+ ret = __mark_pointer(trans, k, &p.ptr, sectors, data_type,
+ u.gen, &u.data_type,
&u.dirty_sectors, &u.cached_sectors);
if (ret)
goto out;
bool stale;
int ret;
- BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
- (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
-
r.e.data_type = data_type;
r.e.nr_devs = 0;
r.e.nr_required = 1;
s64 sectors = (s64) k.k->size;
struct replicas_delta_list *d;
- BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
- (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
-
if (flags & BTREE_TRIGGER_OVERWRITE)
sectors = -sectors;
sectors *= replicas;
int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c old,
struct bkey_s_c new, unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
-
- BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
switch (k.k->type) {
case KEY_TYPE_btree_ptr:
struct bkey unpacked;
int ret;
+ _deleted.p = path->pos;
+
if (unlikely(flags & BTREE_TRIGGER_NORUN))
return 0;
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);
-int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned);
+int bch2_mark_key(struct btree_trans *, struct bkey_s_c, unsigned);
int bch2_mark_update(struct btree_trans *, struct btree_path *,
struct bkey_i *, unsigned);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
bkey_start_pos(pos),
BTREE_ITER_INTENT);
-
- while ((k = bch2_btree_iter_peek(&iter)).k &&
+retry:
+ while (bch2_trans_begin(&trans),
+ (k = bch2_btree_iter_peek(&iter)).k &&
!(ret = bkey_err(k)) &&
bkey_cmp(bkey_start_pos(k.k), pos->p) < 0) {
struct bch_extent_ptr *ptr, *ec_ptr = NULL;
BTREE_INSERT_NOFAIL);
if (!ret)
bch2_btree_iter_set_pos(&iter, next_pos);
- if (ret == -EINTR)
- ret = 0;
if (ret)
break;
}
+ if (ret == -EINTR)
+ goto retry;
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
}
-void bch2_ec_add_backpointer(struct bch_fs *c, struct write_point *wp,
- struct bpos pos, unsigned sectors)
+void bch2_ob_add_backpointer(struct bch_fs *c, struct open_bucket *ob,
+ struct bkey *k)
{
- struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
- struct ec_stripe_new *ec;
+ struct ec_stripe_new *ec = ob->ec;
- if (!ob)
+ if (!ec)
return;
- ec = ob->ec;
mutex_lock(&ec->lock);
if (bch2_keylist_realloc(&ec->keys, ec->inline_keys,
}
bkey_init(&ec->keys.top->k);
- ec->keys.top->k.p = pos;
- bch2_key_resize(&ec->keys.top->k, sectors);
+ ec->keys.top->k.p = k->p;
+ ec->keys.top->k.size = k->size;
bch2_keylist_push(&ec->keys);
mutex_unlock(&ec->lock);
return ret;
}
-static int bch2_stripes_read_fn(struct bch_fs *c, struct bkey_s_c k)
+static int bch2_stripes_read_fn(struct btree_trans *trans, struct bkey_s_c k)
{
+ struct bch_fs *c = trans->c;
int ret = 0;
if (k.k->type == KEY_TYPE_stripe)
ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL) ?:
- bch2_mark_key(c, k,
- BTREE_TRIGGER_INSERT|
+ bch2_mark_key(trans, k,
BTREE_TRIGGER_NOATOMIC);
return ret;
int bch2_stripes_read(struct bch_fs *c)
{
- int ret = bch2_btree_and_journal_walk(c, BTREE_ID_stripes,
- bch2_stripes_read_fn);
+ struct btree_trans trans;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+ ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_stripes,
+ bch2_stripes_read_fn);
+ bch2_trans_exit(&trans);
if (ret)
bch_err(c, "error reading stripes: %i", ret);
int bch2_ec_read_extent(struct bch_fs *, struct bch_read_bio *);
void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
-void bch2_ec_add_backpointer(struct bch_fs *, struct write_point *,
- struct bpos, unsigned);
+void bch2_ob_add_backpointer(struct bch_fs *, struct open_bucket *,
+ struct bkey *);
void bch2_ec_bucket_written(struct bch_fs *, struct open_bucket *);
void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
if (fsck_err_on(ret == INT_MAX, c,
"extent in missing inode:\n %s",
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
- return __bch2_trans_do(trans, NULL, NULL, BTREE_INSERT_LAZY_RW,
- bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
+ return bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
if (ret == INT_MAX)
return 0;
"extent in non regular inode mode %o:\n %s",
i->inode.bi_mode,
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
- return __bch2_trans_do(trans, NULL, NULL, BTREE_INSERT_LAZY_RW,
- bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
+ return bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
if (!bch2_snapshot_internal_node(c, k.k->p.snapshot)) {
for_each_visible_inode(c, s, inode, k.k->p.snapshot, i) {
BTREE_ITER_ALL_SNAPSHOTS);
do {
- ret = lockrestart_do(&trans,
+ ret = __bch2_trans_do(&trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL,
check_extent(&trans, &iter, &w, &s));
if (ret)
break;
{
struct bch_fs *c = op->c;
struct bkey_buf sk;
+ struct open_bucket *ec_ob = ec_open_bucket(c, &op->open_buckets);
struct keylist *keys = &op->insert_keys;
struct bkey_i *k = bch2_keylist_front(keys);
struct btree_trans trans;
if (ret)
break;
+ if (ec_ob)
+ bch2_ob_add_backpointer(c, ec_ob, &sk.k->k);
+
if (bkey_cmp(iter.pos, k->k.p) >= 0)
bch2_keylist_pop_front(&op->insert_keys);
else
struct bio *src = &op->wbio.bio, *dst = src;
struct bvec_iter saved_iter;
void *ec_buf;
- struct bpos ec_pos = op->pos;
unsigned total_output = 0, total_input = 0;
bool bounce = false;
bool page_alloc_failed = false;
dst->bi_iter.bi_size = total_output;
do_write:
- /* might have done a realloc... */
- bch2_ec_add_backpointer(c, wp, ec_pos, total_input >> 9);
-
*_dst = dst;
return more;
csum_err:
ret = 0;
if ((flags & JOURNAL_RES_GET_RESERVED) ||
+ test_bit(JOURNAL_NOCHANGES, &j->flags) ||
new.reserved + d < new.remaining) {
new.reserved += d;
ret = 1;
w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
- if (c->opts.nochanges)
+ if (test_bit(JOURNAL_NOCHANGES, &j->flags))
goto no_io;
for_each_rw_member(ca, c, i)
struct journal_device *ja,
enum journal_space_from from)
{
- unsigned available = (journal_space_from(ja, from) -
- ja->cur_idx - 1 + ja->nr) % ja->nr;
+ unsigned available = !test_bit(JOURNAL_NOCHANGES, &j->flags)
+ ? ((journal_space_from(ja, from) -
+ ja->cur_idx - 1 + ja->nr) % ja->nr)
+ : ja->nr;
/*
* Don't use the last bucket unless writing the new last_seq
JOURNAL_NEED_WRITE,
JOURNAL_MAY_GET_UNRESERVED,
JOURNAL_MAY_SKIP_FLUSH,
+ JOURNAL_NOCHANGES,
};
/* Embedded in struct bch_fs */
#include "btree_update_interior.h"
#include "buckets.h"
#include "disk_groups.h"
+#include "ec.h"
#include "inode.h"
#include "io.h"
#include "journal_reclaim.h"
struct btree_iter iter;
struct migrate_write *m =
container_of(op, struct migrate_write, op);
+ struct open_bucket *ec_ob = ec_open_bucket(c, &op->open_buckets);
struct keylist *keys = &op->insert_keys;
struct bkey_buf _new, _insert;
int ret = 0;
if (!ret) {
bch2_btree_iter_set_pos(&iter, next_pos);
atomic_long_inc(&c->extent_migrate_done);
+ if (ec_ob)
+ bch2_ob_add_backpointer(c, ec_ob, &insert->k);
}
err:
if (ret == -EINTR)
bch2_bkey_buf_exit(&tmp, c);
}
-static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
+static int bch2_btree_and_journal_walk_recurse(struct btree_trans *trans, struct btree *b,
enum btree_id btree_id,
btree_walk_key_fn key_fn)
{
+ struct bch_fs *c = trans->c;
struct btree_and_journal_iter iter;
struct bkey_s_c k;
struct bkey_buf tmp;
btree_and_journal_iter_prefetch(c, b, iter);
- ret = bch2_btree_and_journal_walk_recurse(c, child,
+ ret = bch2_btree_and_journal_walk_recurse(trans, child,
btree_id, key_fn);
six_unlock_read(&child->c.lock);
} else {
- ret = key_fn(c, k);
+ ret = key_fn(trans, k);
}
if (ret)
return ret;
}
-int bch2_btree_and_journal_walk(struct bch_fs *c, enum btree_id btree_id,
+int bch2_btree_and_journal_walk(struct btree_trans *trans, enum btree_id btree_id,
btree_walk_key_fn key_fn)
{
+ struct bch_fs *c = trans->c;
struct btree *b = c->btree_roots[btree_id].b;
int ret = 0;
return 0;
six_lock_read(&b->c.lock, NULL, NULL);
- ret = bch2_btree_and_journal_walk_recurse(c, b, btree_id, key_fn);
+ ret = bch2_btree_and_journal_walk_recurse(trans, b, btree_id, key_fn);
six_unlock_read(&b->c.lock);
return ret;
struct bch_fs *,
struct btree *);
-typedef int (*btree_walk_key_fn)(struct bch_fs *c, struct bkey_s_c k);
+typedef int (*btree_walk_key_fn)(struct btree_trans *, struct bkey_s_c);
-int bch2_btree_and_journal_walk(struct bch_fs *, enum btree_id, btree_walk_key_fn);
+int bch2_btree_and_journal_walk(struct btree_trans *, enum btree_id, btree_walk_key_fn);
void bch2_journal_keys_free(struct journal_keys *);
void bch2_journal_entries_free(struct list_head *);
return NULL;
}
-int bch2_mark_snapshot(struct bch_fs *c,
+int bch2_mark_snapshot(struct btree_trans *trans,
struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+ unsigned flags)
{
+ struct bch_fs *c = trans->c;
struct snapshot_t *t;
t = genradix_ptr_alloc(&c->snapshots,
if (BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v))
have_deleted = true;
- ret = bch2_mark_snapshot(c, bkey_s_c_null, k, 0, 0);
+ ret = bch2_mark_snapshot(&trans, bkey_s_c_null, k, 0);
if (ret)
break;
}
bch2_trans_update(trans, &iter, &n->k_i, 0);
- ret = bch2_mark_snapshot(trans->c, bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0, 0);
+ ret = bch2_mark_snapshot(trans, bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
if (ret)
break;
.val_to_text = bch2_snapshot_to_text, \
}
-int bch2_mark_snapshot(struct bch_fs *, struct bkey_s_c,
- struct bkey_s_c, u64, unsigned);
+int bch2_mark_snapshot(struct btree_trans *, struct bkey_s_c,
+ struct bkey_s_c, unsigned);
static inline struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
{
bch2_fs_fsio_init(c))
goto err;
+ if (c->opts.nochanges)
+ set_bit(JOURNAL_NOCHANGES, &c->journal.flags);
+
mi = bch2_sb_get_members(c->disk_sb.sb);
for (i = 0; i < c->sb.nr_devices; i++)
if (bch2_dev_exists(c->disk_sb.sb, mi, i) &&