#include "buckets.h"
#include "ec.h"
#include "error.h"
+#include "inode.h"
#include "movinggc.h"
#include "recovery.h"
#include "reflink.h"
unsigned journal_seq,
bool gc)
{
+ BUG_ON(!gc && !journal_seq);
+
return this_cpu_ptr(gc
? ca->usage_gc
: ca->usage[journal_seq & JOURNAL_BUF_MASK]);
unsigned journal_seq,
bool gc)
{
+ percpu_rwsem_assert_held(&c->mark_lock);
+ BUG_ON(!gc && !journal_seq);
+
return this_cpu_ptr(gc
? c->usage_gc
: c->usage[journal_seq & JOURNAL_BUF_MASK]);
struct bch_fs_usage *fs_usage;
struct bch_dev_usage *u;
- percpu_rwsem_assert_held(&c->mark_lock);
+ /*
+ * Hack for bch2_fs_initialize path, where we're first marking sb and
+ * journal non-transactionally:
+ */
+ if (!journal_seq && !test_bit(BCH_FS_INITIALIZED, &c->flags))
+ journal_seq = 1;
preempt_disable();
fs_usage = fs_usage_ptr(c, journal_seq, gc);
return 0;
}
-static inline int update_replicas(struct bch_fs *c,
+static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
struct bch_replicas_entry *r, s64 sectors,
unsigned journal_seq, bool gc)
{
struct bch_fs_usage __percpu *fs_usage;
- int idx = bch2_replicas_entry_idx(c, r);
+ int idx, ret = 0;
+ char buf[200];
- if (idx < 0)
- return -1;
+ percpu_down_read(&c->mark_lock);
+
+ idx = bch2_replicas_entry_idx(c, r);
+ if (idx < 0 &&
+ (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
+ fsck_err(c, "no replicas entry\n"
+ " while marking %s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))) {
+ percpu_up_read(&c->mark_lock);
+ ret = bch2_mark_replicas(c, r);
+ if (ret)
+ return ret;
+
+ percpu_down_read(&c->mark_lock);
+ idx = bch2_replicas_entry_idx(c, r);
+ }
+ if (idx < 0) {
+ ret = -1;
+ goto err;
+ }
preempt_disable();
fs_usage = fs_usage_ptr(c, journal_seq, gc);
fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
fs_usage->replicas[idx] += sectors;
preempt_enable();
- return 0;
+err:
+fsck_err:
+ percpu_up_read(&c->mark_lock);
+ return ret;
}
static inline int update_cached_sectors(struct bch_fs *c,
+ struct bkey_s_c k,
unsigned dev, s64 sectors,
unsigned journal_seq, bool gc)
{
bch2_replicas_entry_cached(&r.e, dev);
- return update_replicas(c, &r.e, sectors, journal_seq, gc);
+ return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
}
static struct replicas_delta_list *
BUG_ON(owned_by_allocator == old.owned_by_allocator);
}
-static int bch2_mark_alloc(struct bch_fs *c,
+static int bch2_mark_alloc(struct btree_trans *trans,
struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
struct bkey_alloc_unpacked u;
struct bch_dev *ca;
struct bucket *g;
struct bucket_mark old_m, m;
+ int ret = 0;
/* We don't do anything for deletions - do we?: */
- if (new.k->type != KEY_TYPE_alloc &&
- new.k->type != KEY_TYPE_alloc_v2)
+ if (!bkey_is_alloc(new.k))
return 0;
/*
!(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
return 0;
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_alloc_v3 *v = (struct bch_alloc_v3 *) new.v;
+
+ BUG_ON(!journal_seq);
+ BUG_ON(new.k->type != KEY_TYPE_alloc_v3);
+
+ v->journal_seq = cpu_to_le64(journal_seq);
+ }
+
ca = bch_dev_bkey_exists(c, new.k->p.inode);
if (new.k->p.offset >= ca->mi.nbuckets)
return 0;
+ percpu_down_read(&c->mark_lock);
g = __bucket(ca, new.k->p.offset, gc);
u = bch2_alloc_unpack(new);
g->gen_valid = 1;
g->stripe = u.stripe;
g->stripe_redundancy = u.stripe_redundancy;
+ percpu_up_read(&c->mark_lock);
/*
* need to know if we're getting called from the invalidate path or
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
old_m.cached_sectors) {
- if (update_cached_sectors(c, ca->dev_idx, -old_m.cached_sectors,
- journal_seq, gc)) {
+ ret = update_cached_sectors(c, new, ca->dev_idx,
+ -old_m.cached_sectors,
+ journal_seq, gc);
+ if (ret) {
bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
- return -1;
+ return ret;
}
trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
: sectors;
}
-static int check_bucket_ref(struct bch_fs *c, struct bkey_s_c k,
+static int check_bucket_ref(struct bch_fs *c,
+ struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
s64 sectors, enum bch_data_type ptr_data_type,
u8 bucket_gen, u8 bucket_data_type,
return 0;
}
-static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k,
- unsigned ptr_idx,
- u64 journal_seq, unsigned flags)
+static int mark_stripe_bucket(struct btree_trans *trans,
+ struct bkey_s_c k,
+ unsigned ptr_idx,
+ u64 journal_seq, unsigned flags)
{
+ struct bch_fs *c = trans->c;
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
unsigned nr_data = s->nr_blocks - s->nr_redundant;
bool parity = ptr_idx >= nr_data;
+ enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
+ s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
bool gc = flags & BTREE_TRIGGER_GC;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_BUCKET(ca, ptr, gc);
+ struct bucket *g;
struct bucket_mark new, old;
char buf[200];
- int ret;
+ int ret = 0;
+
+ /* * XXX doesn't handle deletion */
- if (g->stripe && g->stripe != k.k->p.offset) {
+ percpu_down_read(&c->mark_lock);
+ g = PTR_BUCKET(ca, ptr, gc);
+
+ if (g->mark.dirty_sectors ||
+ (g->stripe && g->stripe != k.k->p.offset)) {
bch2_fs_inconsistent(c,
"bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
old = bucket_cmpxchg(g, new, ({
- ret = check_bucket_ref(c, k, ptr, 0, 0, new.gen, new.data_type,
+ ret = check_bucket_ref(c, k, ptr, sectors, data_type,
+ new.gen, new.data_type,
new.dirty_sectors, new.cached_sectors);
if (ret)
- return ret;
+ goto err;
- if (parity) {
- new.data_type = BCH_DATA_parity;
- new.dirty_sectors = le16_to_cpu(s->sectors);
- }
+ new.dirty_sectors += sectors;
+ if (data_type)
+ new.data_type = data_type;
if (journal_seq) {
new.journal_seq_valid = 1;
new.journal_seq = journal_seq;
}
+
+ new.stripe = true;
}));
g->stripe = k.k->p.offset;
g->stripe_redundancy = s->nr_redundant;
bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
+err:
+ percpu_up_read(&c->mark_lock);
+
return 0;
}
-static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
+static int __mark_pointer(struct btree_trans *trans,
+ struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
s64 sectors, enum bch_data_type ptr_data_type,
u8 bucket_gen, u8 *bucket_data_type,
u16 *dst_sectors = !ptr->cached
? dirty_sectors
: cached_sectors;
- int ret = check_bucket_ref(c, k, ptr, sectors, ptr_data_type,
+ int ret = check_bucket_ref(trans->c, k, ptr, sectors, ptr_data_type,
bucket_gen, *bucket_data_type,
*dirty_sectors, *cached_sectors);
return 0;
}
-static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
+static int bch2_mark_pointer(struct btree_trans *trans,
+ struct bkey_s_c k,
struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type,
- u64 journal_seq, unsigned flags)
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
struct bucket_mark old, new;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
u8 bucket_data_type;
u64 v;
- int ret;
+ int ret = 0;
+
+ percpu_down_read(&c->mark_lock);
+ g = PTR_BUCKET(ca, &p.ptr, gc);
v = atomic64_read(&g->_mark.v);
do {
new.v.counter = old.v.counter = v;
bucket_data_type = new.data_type;
- ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, new.gen,
+ ret = __mark_pointer(trans, k, &p.ptr, sectors,
+ data_type, new.gen,
&bucket_data_type,
&new.dirty_sectors,
&new.cached_sectors);
if (ret)
- return ret;
+ goto err;
new.data_type = bucket_data_type;
bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
BUG_ON(!gc && bucket_became_unavailable(old, new));
+err:
+ percpu_up_read(&c->mark_lock);
- return 0;
+ return ret;
}
-static int bch2_mark_stripe_ptr(struct bch_fs *c,
+static int bch2_mark_stripe_ptr(struct btree_trans *trans,
+ struct bkey_s_c k,
struct bch_extent_stripe_ptr p,
enum bch_data_type data_type,
s64 sectors,
- unsigned journal_seq, unsigned flags)
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
+ struct bch_fs *c = trans->c;
struct bch_replicas_padded r;
- struct stripe *m;
- unsigned i, blocks_nonempty = 0;
- m = genradix_ptr(&c->stripes[gc], p.idx);
+ if (!gc) {
+ BUG();
+ } else {
+ struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
- spin_lock(&c->ec_stripes_heap_lock);
+ if (!m)
+ return -ENOMEM;
- if (!m || !m->alive) {
- spin_unlock(&c->ec_stripes_heap_lock);
- bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
- (u64) p.idx);
- bch2_inconsistent_error(c);
- return -EIO;
- }
+ spin_lock(&c->ec_stripes_heap_lock);
- m->block_sectors[p.block] += sectors;
+ if (!m || !m->alive) {
+ spin_unlock(&c->ec_stripes_heap_lock);
+ bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
+ (u64) p.idx);
+ bch2_inconsistent_error(c);
+ return -EIO;
+ }
- r = m->r;
+ m->block_sectors[p.block] += sectors;
- for (i = 0; i < m->nr_blocks; i++)
- blocks_nonempty += m->block_sectors[i] != 0;
+ r = m->r;
+ spin_unlock(&c->ec_stripes_heap_lock);
- if (m->blocks_nonempty != blocks_nonempty) {
- m->blocks_nonempty = blocks_nonempty;
- if (!gc)
- bch2_stripes_heap_update(c, m, p.idx);
+ r.e.data_type = data_type;
+ update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, gc);
}
- spin_unlock(&c->ec_stripes_heap_lock);
-
- r.e.data_type = data_type;
- update_replicas(c, &r.e, sectors, journal_seq, gc);
-
return 0;
}
-static int bch2_mark_extent(struct bch_fs *c,
+static int bch2_mark_extent(struct btree_trans *trans,
struct bkey_s_c old, struct bkey_s_c new,
- unsigned journal_seq, unsigned flags)
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
bool stale;
int ret;
- BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
- (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
-
r.e.data_type = data_type;
r.e.nr_devs = 0;
r.e.nr_required = 1;
if (flags & BTREE_TRIGGER_OVERWRITE)
disk_sectors = -disk_sectors;
- ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
- journal_seq, flags);
+ ret = bch2_mark_pointer(trans, k, p, disk_sectors,
+ data_type, flags);
if (ret < 0)
return ret;
stale = ret > 0;
if (p.ptr.cached) {
- if (!stale)
- if (update_cached_sectors(c, p.ptr.dev, disk_sectors,
- journal_seq, gc)) {
+ if (!stale) {
+ ret = update_cached_sectors(c, k, p.ptr.dev,
+ disk_sectors, journal_seq, gc);
+ if (ret) {
bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
- return -1;
-
+ return ret;
}
+ }
} else if (!p.has_ec) {
dirty_sectors += disk_sectors;
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
} else {
- ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
- disk_sectors, journal_seq, flags);
+ ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
+ disk_sectors, flags);
if (ret)
return ret;
}
if (r.e.nr_devs) {
- if (update_replicas(c, &r.e, dirty_sectors, journal_seq, gc)) {
+ ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, gc);
+ if (ret) {
char buf[200];
bch2_bkey_val_to_text(&PBUF(buf), c, k);
bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
- return -1;
+ return ret;
}
}
return 0;
}
-static int bch2_mark_stripe(struct bch_fs *c,
- struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_stripe(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
size_t idx = new.k->p.offset;
const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
? bkey_s_c_to_stripe(old).v : NULL;
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
? bkey_s_c_to_stripe(new).v : NULL;
- struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
unsigned i;
int ret;
BUG_ON(gc && old_s);
- if (!m || (old_s && !m->alive)) {
- bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
- idx);
- bch2_inconsistent_error(c);
- return -1;
- }
+ if (!gc) {
+ struct stripe *m = genradix_ptr(&c->stripes, idx);
- if (!new_s) {
- spin_lock(&c->ec_stripes_heap_lock);
- bch2_stripes_heap_del(c, m, idx);
- spin_unlock(&c->ec_stripes_heap_lock);
+ if (!m || (old_s && !m->alive)) {
+ char buf1[200], buf2[200];
- memset(m, 0, sizeof(*m));
+ bch2_bkey_val_to_text(&PBUF(buf1), c, old);
+ bch2_bkey_val_to_text(&PBUF(buf2), c, new);
+ bch_err_ratelimited(c, "error marking nonexistent stripe %zu while marking\n"
+ "old %s\n"
+ "new %s", idx, buf1, buf2);
+ bch2_inconsistent_error(c);
+ return -1;
+ }
+
+ if (!new_s) {
+ spin_lock(&c->ec_stripes_heap_lock);
+ bch2_stripes_heap_del(c, m, idx);
+ spin_unlock(&c->ec_stripes_heap_lock);
+
+ memset(m, 0, sizeof(*m));
+ } else {
+ m->alive = true;
+ m->sectors = le16_to_cpu(new_s->sectors);
+ m->algorithm = new_s->algorithm;
+ m->nr_blocks = new_s->nr_blocks;
+ m->nr_redundant = new_s->nr_redundant;
+ m->blocks_nonempty = 0;
+
+ for (i = 0; i < new_s->nr_blocks; i++)
+ m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
+
+ spin_lock(&c->ec_stripes_heap_lock);
+ bch2_stripes_heap_update(c, m, idx);
+ spin_unlock(&c->ec_stripes_heap_lock);
+ }
} else {
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes, idx);
+
+ /*
+ * This will be wrong when we bring back runtime gc: we should
+ * be unmarking the old key and then marking the new key
+ */
m->alive = true;
m->sectors = le16_to_cpu(new_s->sectors);
- m->algorithm = new_s->algorithm;
m->nr_blocks = new_s->nr_blocks;
m->nr_redundant = new_s->nr_redundant;
- m->blocks_nonempty = 0;
-
- for (i = 0; i < new_s->nr_blocks; i++) {
- m->block_sectors[i] =
- stripe_blockcount_get(new_s, i);
- m->blocks_nonempty += !!m->block_sectors[i];
+ for (i = 0; i < new_s->nr_blocks; i++)
m->ptrs[i] = new_s->ptrs[i];
- }
bch2_bkey_to_replicas(&m->r.e, new);
- if (!gc) {
- spin_lock(&c->ec_stripes_heap_lock);
- bch2_stripes_heap_update(c, m, idx);
- spin_unlock(&c->ec_stripes_heap_lock);
- }
- }
-
- if (gc) {
/*
* gc recalculates this field from stripe ptr
* references:
*/
memset(m->block_sectors, 0, sizeof(m->block_sectors));
- m->blocks_nonempty = 0;
for (i = 0; i < new_s->nr_blocks; i++) {
- ret = mark_stripe_bucket(c, new, i, journal_seq, flags);
+ ret = mark_stripe_bucket(trans, new, i, journal_seq, flags);
if (ret)
return ret;
}
- if (update_replicas(c, &m->r.e,
- ((s64) m->sectors * m->nr_redundant),
- journal_seq, gc)) {
+ ret = update_replicas(c, new, &m->r.e,
+ ((s64) m->sectors * m->nr_redundant),
+ journal_seq, gc);
+ if (ret) {
char buf[200];
bch2_bkey_val_to_text(&PBUF(buf), c, new);
bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
- return -1;
+ return ret;
}
}
return 0;
}
-static int bch2_mark_inode(struct bch_fs *c,
- struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_inode(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
+ struct bch_fs *c = trans->c;
struct bch_fs_usage __percpu *fs_usage;
+ u64 journal_seq = trans->journal_res.seq;
- preempt_disable();
- fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
- fs_usage->nr_inodes += new.k->type == KEY_TYPE_inode;
- fs_usage->nr_inodes -= old.k->type == KEY_TYPE_inode;
- preempt_enable();
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_inode_v2 *v = (struct bch_inode_v2 *) new.v;
+
+ BUG_ON(!journal_seq);
+ BUG_ON(new.k->type != KEY_TYPE_inode_v2);
+
+ v->bi_journal_seq = cpu_to_le64(journal_seq);
+ }
+
+ if (flags & BTREE_TRIGGER_GC) {
+ percpu_down_read(&c->mark_lock);
+ preempt_disable();
+
+ fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
+ fs_usage->nr_inodes += bkey_is_inode(new.k);
+ fs_usage->nr_inodes -= bkey_is_inode(old.k);
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
+ }
return 0;
}
-static int bch2_mark_reservation(struct bch_fs *c,
- struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_reservation(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
struct bch_fs_usage __percpu *fs_usage;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
sectors = -sectors;
sectors *= replicas;
+ percpu_down_read(&c->mark_lock);
preempt_disable();
- fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
+
+ fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
replicas = clamp_t(unsigned, replicas, 1,
ARRAY_SIZE(fs_usage->persistent_reserved));
fs_usage->reserved += sectors;
fs_usage->persistent_reserved[replicas - 1] += sectors;
+
preempt_enable();
+ percpu_up_read(&c->mark_lock);
return 0;
}
static s64 __bch2_mark_reflink_p(struct bch_fs *c, struct bkey_s_c_reflink_p p,
- u64 idx, unsigned flags, size_t *r_idx)
+ u64 *idx, unsigned flags, size_t r_idx)
{
struct reflink_gc *r;
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
s64 ret = 0;
- while (*r_idx < c->reflink_gc_nr) {
- r = genradix_ptr(&c->reflink_gc_table, *r_idx);
- BUG_ON(!r);
-
- if (idx < r->offset)
- break;
- (*r_idx)++;
- }
+ if (r_idx >= c->reflink_gc_nr)
+ goto not_found;
- if (*r_idx >= c->reflink_gc_nr ||
- idx < r->offset - r->size) {
- ret = p.k->size;
+ r = genradix_ptr(&c->reflink_gc_table, r_idx);
+ if (*idx < r->offset - r->size)
goto not_found;
- }
BUG_ON((s64) r->refcount + add < 0);
r->refcount += add;
- return r->offset - idx;
+ *idx = r->offset;
+ return 0;
not_found:
- if ((flags & BTREE_TRIGGER_GC) &&
- (flags & BTREE_TRIGGER_NOATOMIC)) {
- /*
- * XXX: we're replacing the entire reflink pointer with an error
- * key, we should just be replacing the part that was missing:
- */
- if (fsck_err(c, "%llu:%llu len %u points to nonexistent indirect extent %llu",
- p.k->p.inode, p.k->p.offset, p.k->size, idx)) {
- struct bkey_i_error *new;
-
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new) {
- bch_err(c, "%s: error allocating new key", __func__);
- return -ENOMEM;
- }
+ *idx = U64_MAX;
+ ret = -EIO;
- bkey_init(&new->k);
- new->k.type = KEY_TYPE_error;
- new->k.p = p.k->p;
- new->k.size = p.k->size;
- ret = bch2_journal_key_insert(c, BTREE_ID_extents, 0, &new->k_i);
+ /*
+ * XXX: we're replacing the entire reflink pointer with an error
+ * key, we should just be replacing the part that was missing:
+ */
+ if (fsck_err(c, "%llu:%llu len %u points to nonexistent indirect extent %llu",
+ p.k->p.inode, p.k->p.offset, p.k->size, *idx)) {
+ struct bkey_i_error *new;
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ bch_err(c, "%s: error allocating new key", __func__);
+ return -ENOMEM;
}
- } else {
- bch2_fs_inconsistent(c,
- "%llu:%llu len %u points to nonexistent indirect extent %llu",
- p.k->p.inode, p.k->p.offset, p.k->size, idx);
- bch2_inconsistent_error(c);
- ret = -EIO;
+
+ bkey_init(&new->k);
+ new->k.type = KEY_TYPE_error;
+ new->k.p = p.k->p;
+ new->k.size = p.k->size;
+ ret = bch2_journal_key_insert(c, BTREE_ID_extents, 0, &new->k_i);
}
fsck_err:
return ret;
}
-static int bch2_mark_reflink_p(struct bch_fs *c,
- struct bkey_s_c old, struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_reflink_p(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
struct reflink_gc *ref;
size_t l, r, m;
u64 idx = le64_to_cpu(p.v->idx);
- unsigned sectors = p.k->size;
- s64 ret = 0;
+ u64 end = le64_to_cpu(p.v->idx) + p.k->size;
+ int ret = 0;
- BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
- (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
+ if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
+ idx -= le32_to_cpu(p.v->front_pad);
+ end += le32_to_cpu(p.v->back_pad);
+ }
l = 0;
r = c->reflink_gc_nr;
r = m;
}
- while (sectors) {
- ret = __bch2_mark_reflink_p(c, p, idx, flags, &l);
- if (ret <= 0)
- return ret;
-
- ret = min_t(s64, ret, sectors);
- idx += ret;
- sectors -= ret;
- }
+ while (idx < end && !ret)
+ ret = __bch2_mark_reflink_p(c, p, &idx, flags, l++);
- return 0;
+ return ret;
}
-static int bch2_mark_key_locked(struct bch_fs *c,
- struct bkey_s_c old,
- struct bkey_s_c new,
- u64 journal_seq, unsigned flags)
+int bch2_mark_key(struct btree_trans *trans,
+ struct bkey_s_c old,
+ struct bkey_s_c new,
+ unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
-
- BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
switch (k.k->type) {
case KEY_TYPE_alloc:
case KEY_TYPE_alloc_v2:
- return bch2_mark_alloc(c, old, new, journal_seq, flags);
+ case KEY_TYPE_alloc_v3:
+ return bch2_mark_alloc(trans, old, new, flags);
case KEY_TYPE_btree_ptr:
case KEY_TYPE_btree_ptr_v2:
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v:
- return bch2_mark_extent(c, old, new, journal_seq, flags);
+ return bch2_mark_extent(trans, old, new, flags);
case KEY_TYPE_stripe:
- return bch2_mark_stripe(c, old, new, journal_seq, flags);
+ return bch2_mark_stripe(trans, old, new, flags);
case KEY_TYPE_inode:
- return bch2_mark_inode(c, old, new, journal_seq, flags);
+ case KEY_TYPE_inode_v2:
+ return bch2_mark_inode(trans, old, new, flags);
case KEY_TYPE_reservation:
- return bch2_mark_reservation(c, old, new, journal_seq, flags);
+ return bch2_mark_reservation(trans, old, new, flags);
case KEY_TYPE_reflink_p:
- return bch2_mark_reflink_p(c, old, new, journal_seq, flags);
+ return bch2_mark_reflink_p(trans, old, new, flags);
case KEY_TYPE_snapshot:
- return bch2_mark_snapshot(c, old, new, journal_seq, flags);
+ return bch2_mark_snapshot(trans, old, new, flags);
default:
return 0;
}
}
-int bch2_mark_key(struct bch_fs *c, struct bkey_s_c new, unsigned flags)
-{
- struct bkey deleted = KEY(0, 0, 0);
- struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
- int ret;
-
- percpu_down_read(&c->mark_lock);
- ret = bch2_mark_key_locked(c, old, new, 0, flags);
- percpu_up_read(&c->mark_lock);
-
- return ret;
-}
-
int bch2_mark_update(struct btree_trans *trans, struct btree_path *path,
struct bkey_i *new, unsigned flags)
{
- struct bch_fs *c = trans->c;
struct bkey _deleted = KEY(0, 0, 0);
struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
struct bkey_s_c old;
struct bkey unpacked;
int ret;
+ _deleted.p = path->pos;
+
if (unlikely(flags & BTREE_TRIGGER_NORUN))
return 0;
if (old.k->type == new->k.type &&
((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
- ret = bch2_mark_key_locked(c, old, bkey_i_to_s_c(new),
- trans->journal_res.seq,
+ ret = bch2_mark_key(trans, old, bkey_i_to_s_c(new),
BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
} else {
- ret = bch2_mark_key_locked(c, deleted, bkey_i_to_s_c(new),
- trans->journal_res.seq,
+ ret = bch2_mark_key(trans, deleted, bkey_i_to_s_c(new),
BTREE_TRIGGER_INSERT|flags) ?:
- bch2_mark_key_locked(c, old, deleted,
- trans->journal_res.seq,
+ bch2_mark_key(trans, old, deleted,
BTREE_TRIGGER_OVERWRITE|flags);
}
__WARN();
}
-void bch2_trans_fs_usage_apply(struct btree_trans *trans,
- struct replicas_delta_list *deltas)
+int bch2_trans_fs_usage_apply(struct btree_trans *trans,
+ struct replicas_delta_list *deltas)
{
struct bch_fs *c = trans->c;
static int warned_disk_usage = 0;
bool warn = false;
unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
- struct replicas_delta *d = deltas->d;
+ struct replicas_delta *d = deltas->d, *d2;
struct replicas_delta *top = (void *) deltas->d + deltas->used;
struct bch_fs_usage *dst;
s64 added = 0, should_not_have_added;
unsigned i;
- percpu_rwsem_assert_held(&c->mark_lock);
-
+ percpu_down_read(&c->mark_lock);
preempt_disable();
dst = fs_usage_ptr(c, trans->journal_res.seq, false);
added += d->delta;
}
- BUG_ON(__update_replicas(c, dst, &d->r, d->delta));
+ if (__update_replicas(c, dst, &d->r, d->delta))
+ goto need_mark;
}
dst->nr_inodes += deltas->nr_inodes;
}
preempt_enable();
+ percpu_up_read(&c->mark_lock);
if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
+ return 0;
+need_mark:
+ /* revert changes: */
+ for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
+ BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
+ return -1;
}
/* trans_mark: */
-static struct bkey_alloc_buf *
-bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
+static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
const struct bch_extent_ptr *ptr,
struct bkey_alloc_unpacked *u)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
- struct bucket *g;
- struct bkey_alloc_buf *a;
struct bkey_i *update = btree_trans_peek_updates(trans, BTREE_ID_alloc, pos);
int ret;
- a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
- if (IS_ERR(a))
- return a;
-
bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
ret = bch2_btree_iter_traverse(iter);
if (ret) {
bch2_trans_iter_exit(trans, iter);
- return ERR_PTR(ret);
+ return ret;
}
- if (update && !bpos_cmp(update->k.p, pos)) {
- *u = bch2_alloc_unpack(bkey_i_to_s_c(update));
- } else {
- percpu_down_read(&c->mark_lock);
- g = bucket(ca, pos.offset);
- *u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
- percpu_up_read(&c->mark_lock);
- }
+ *u = update && !bpos_cmp(update->k.p, pos)
+ ? bch2_alloc_unpack(bkey_i_to_s_c(update))
+ : alloc_mem_to_key(c, iter);
- return a;
+ return 0;
}
static int bch2_trans_mark_pointer(struct btree_trans *trans,
struct bkey_s_c k, struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type)
{
- struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_alloc_unpacked u;
- struct bkey_alloc_buf *a;
int ret;
- a = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
- if (IS_ERR(a))
- return PTR_ERR(a);
+ ret = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
+ if (ret)
+ return ret;
- ret = __mark_pointer(c, k, &p.ptr, sectors, data_type, u.gen, &u.data_type,
+ ret = __mark_pointer(trans, k, &p.ptr, sectors, data_type,
+ u.gen, &u.data_type,
&u.dirty_sectors, &u.cached_sectors);
if (ret)
goto out;
- bch2_alloc_pack(c, a, u);
- bch2_trans_update(trans, &iter, &a->k, 0);
+ ret = bch2_alloc_write(trans, &iter, &u, 0);
+ if (ret)
+ goto out;
out:
bch2_trans_iter_exit(trans, &iter);
return ret;
stripe_blockcount_set(&s->v, p.ec.block,
stripe_blockcount_get(&s->v, p.ec.block) +
sectors);
- bch2_trans_update(trans, &iter, &s->k_i, 0);
+
+ ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
+ if (ret)
+ goto err;
bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
r.e.data_type = data_type;
bool stale;
int ret;
- BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
- (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
-
r.e.data_type = data_type;
r.e.nr_devs = 0;
r.e.nr_required = 1;
return 0;
}
-static int bch2_trans_mark_stripe_alloc_ref(struct btree_trans *trans,
- struct bkey_s_c_stripe s,
- unsigned idx, bool deleting)
+static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
+ struct bkey_s_c_stripe s,
+ unsigned idx, bool deleting)
{
struct bch_fs *c = trans->c;
const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
- struct bkey_alloc_buf *a;
struct btree_iter iter;
struct bkey_alloc_unpacked u;
- bool parity = idx >= s.v->nr_blocks - s.v->nr_redundant;
+ enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
+ ? BCH_DATA_parity : 0;
+ s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
int ret = 0;
- a = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
- if (IS_ERR(a))
- return PTR_ERR(a);
-
- if (parity) {
- s64 sectors = le16_to_cpu(s.v->sectors);
+ if (deleting)
+ sectors = -sectors;
- if (deleting)
- sectors = -sectors;
+ ret = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
+ if (ret)
+ return ret;
- u.dirty_sectors += sectors;
- u.data_type = u.dirty_sectors
- ? BCH_DATA_parity
- : 0;
- }
+ ret = check_bucket_ref(c, s.s_c, ptr, sectors, data_type,
+ u.gen, u.data_type,
+ u.dirty_sectors, u.cached_sectors);
+ if (ret)
+ goto err;
if (!deleting) {
- if (bch2_fs_inconsistent_on(u.stripe && u.stripe != s.k->p.offset, c,
- "bucket %llu:%llu gen %u: multiple stripes using same bucket (%u, %llu)",
+ if (bch2_fs_inconsistent_on(u.stripe ||
+ u.stripe_redundancy, c,
+ "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
iter.pos.inode, iter.pos.offset, u.gen,
+ bch2_data_types[u.data_type],
+ u.dirty_sectors,
u.stripe, s.k->p.offset)) {
ret = -EIO;
goto err;
}
+ if (bch2_fs_inconsistent_on(data_type && u.dirty_sectors, c,
+ "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
+ iter.pos.inode, iter.pos.offset, u.gen,
+ bch2_data_types[u.data_type],
+ u.dirty_sectors,
+ s.k->p.offset)) {
+ ret = -EIO;
+ goto err;
+ }
+
u.stripe = s.k->p.offset;
u.stripe_redundancy = s.v->nr_redundant;
} else {
+ if (bch2_fs_inconsistent_on(u.stripe != s.k->p.offset ||
+ u.stripe_redundancy != s.v->nr_redundant, c,
+ "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
+ iter.pos.inode, iter.pos.offset, u.gen,
+ s.k->p.offset, u.stripe)) {
+ ret = -EIO;
+ goto err;
+ }
+
u.stripe = 0;
u.stripe_redundancy = 0;
}
- bch2_alloc_pack(c, a, u);
- bch2_trans_update(trans, &iter, &a->k, 0);
+ u.dirty_sectors += sectors;
+ if (data_type)
+ u.data_type = !deleting ? data_type : 0;
+
+ ret = bch2_alloc_write(trans, &iter, &u, 0);
+ if (ret)
+ goto err;
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
struct bkey_s_c_stripe old_s = { .k = NULL };
struct bkey_s_c_stripe new_s = { .k = NULL };
struct bch_replicas_padded r;
- unsigned i;
+ unsigned i, nr_blocks;
int ret = 0;
if (old.k->type == KEY_TYPE_stripe)
new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
return 0;
+ BUG_ON(new_s.k && old_s.k &&
+ (new_s.v->nr_blocks != old_s.v->nr_blocks ||
+ new_s.v->nr_redundant != old_s.v->nr_redundant));
+
+ nr_blocks = new_s.k ? new_s.v->nr_blocks : old_s.v->nr_blocks;
+
if (new_s.k) {
s64 sectors = le16_to_cpu(new_s.v->sectors);
bch2_bkey_to_replicas(&r.e, new);
update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
-
- for (i = 0; i < new_s.v->nr_blocks; i++) {
- ret = bch2_trans_mark_stripe_alloc_ref(trans, new_s,
- i, false);
- if (ret)
- return ret;
- }
}
if (old_s.k) {
bch2_bkey_to_replicas(&r.e, old);
update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
+ }
+
+ for (i = 0; i < nr_blocks; i++) {
+ if (new_s.k && old_s.k &&
+ !memcmp(&new_s.v->ptrs[i],
+ &old_s.v->ptrs[i],
+ sizeof(new_s.v->ptrs[i])))
+ continue;
- for (i = 0; i < old_s.v->nr_blocks; i++) {
- ret = bch2_trans_mark_stripe_alloc_ref(trans, old_s,
- i, true);
+ if (new_s.k) {
+ ret = bch2_trans_mark_stripe_bucket(trans, new_s, i, false);
if (ret)
- return ret;
+ break;
+ }
+
+ if (old_s.k) {
+ ret = bch2_trans_mark_stripe_bucket(trans, old_s, i, true);
+ if (ret)
+ break;
}
}
struct bkey_s_c new,
unsigned flags)
{
- int nr = (new.k->type == KEY_TYPE_inode) -
- (old.k->type == KEY_TYPE_inode);
+ int nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
if (nr) {
struct replicas_delta_list *d =
s64 sectors = (s64) k.k->size;
struct replicas_delta_list *d;
- BUG_ON((flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)) ==
- (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE));
-
if (flags & BTREE_TRIGGER_OVERWRITE)
sectors = -sectors;
sectors *= replicas;
static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
struct bkey_s_c_reflink_p p,
- u64 idx, unsigned flags)
+ u64 *idx, unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_i *n;
__le64 *refcount;
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
- s64 ret;
+ char buf[200];
+ int ret;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, idx),
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
BTREE_ITER_INTENT|
BTREE_ITER_WITH_UPDATES);
k = bch2_btree_iter_peek_slot(&iter);
refcount = bkey_refcount(n);
if (!refcount) {
+ bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
bch2_fs_inconsistent(c,
- "%llu:%llu len %u points to nonexistent indirect extent %llu",
- p.k->p.inode, p.k->p.offset, p.k->size, idx);
- bch2_inconsistent_error(c);
+ "nonexistent indirect extent at %llu while marking\n %s",
+ *idx, buf);
+ ret = -EIO;
+ goto err;
+ }
+
+ if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
+ bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
+ bch2_fs_inconsistent(c,
+ "indirect extent refcount underflow at %llu while marking\n %s",
+ *idx, buf);
ret = -EIO;
goto err;
}
- BUG_ON(!*refcount && (flags & BTREE_TRIGGER_OVERWRITE));
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
+ u64 pad;
+
+ pad = max_t(s64, le32_to_cpu(v->front_pad),
+ le64_to_cpu(v->idx) - bkey_start_offset(k.k));
+ BUG_ON(pad > U32_MAX);
+ v->front_pad = cpu_to_le32(pad);
+
+ pad = max_t(s64, le32_to_cpu(v->back_pad),
+ k.k->p.offset - p.k->size - le64_to_cpu(v->idx));
+ BUG_ON(pad > U32_MAX);
+ v->back_pad = cpu_to_le32(pad);
+ }
+
le64_add_cpu(refcount, add);
if (!*refcount) {
if (ret)
goto err;
- ret = k.k->p.offset - idx;
+ *idx = k.k->p.offset;
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
struct bkey_s_c k, unsigned flags)
{
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
- u64 idx = le64_to_cpu(p.v->idx);
- unsigned sectors = p.k->size;
- s64 ret = 0;
+ u64 idx, end_idx;
+ int ret = 0;
- while (sectors) {
- ret = __bch2_trans_mark_reflink_p(trans, p, idx, flags);
- if (ret < 0)
- return ret;
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
- ret = min_t(s64, ret, sectors);
- idx += ret;
- sectors -= ret;
+ v->front_pad = v->back_pad = 0;
}
- return 0;
+ idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
+ end_idx = le64_to_cpu(p.v->idx) + p.k->size +
+ le32_to_cpu(p.v->back_pad);
+
+ while (idx < end_idx && !ret)
+ ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
+
+ return ret;
}
int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c old,
struct bkey_s_c new, unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_INSERT ? new : old;
-
- BUG_ON(!(flags & (BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE)));
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
switch (k.k->type) {
case KEY_TYPE_btree_ptr:
case KEY_TYPE_stripe:
return bch2_trans_mark_stripe(trans, old, new, flags);
case KEY_TYPE_inode:
+ case KEY_TYPE_inode_v2:
return bch2_trans_mark_inode(trans, old, new, flags);
case KEY_TYPE_reservation:
return bch2_trans_mark_reservation(trans, k, flags);
}
}
-int bch2_trans_mark_update(struct btree_trans *trans,
- struct btree_path *path,
- struct bkey_i *new,
- unsigned flags)
-{
- struct bkey _deleted = KEY(0, 0, 0);
- struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
- struct bkey_s_c old;
- struct bkey unpacked;
- int ret;
-
- if (unlikely(flags & BTREE_TRIGGER_NORUN))
- return 0;
-
- if (!btree_node_type_needs_gc(path->btree_id))
- return 0;
-
- old = bch2_btree_path_peek_slot(path, &unpacked);
-
- if (old.k->type == new->k.type &&
- ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
- ret = bch2_trans_mark_key(trans, old, bkey_i_to_s_c(new),
- BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
- } else {
- ret = bch2_trans_mark_key(trans, deleted, bkey_i_to_s_c(new),
- BTREE_TRIGGER_INSERT|flags) ?:
- bch2_trans_mark_key(trans, old, deleted,
- BTREE_TRIGGER_OVERWRITE|flags);
- }
-
- return ret;
-}
-
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
struct bch_dev *ca, size_t b,
enum bch_data_type type,
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_alloc_unpacked u;
- struct bkey_alloc_buf *a;
struct bch_extent_ptr ptr = {
.dev = ca->dev_idx,
.offset = bucket_to_sector(ca, b),
if (b >= ca->mi.nbuckets)
return 0;
- a = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
- if (IS_ERR(a))
- return PTR_ERR(a);
+ ret = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
+ if (ret)
+ return ret;
if (u.data_type && u.data_type != type) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
u.data_type = type;
u.dirty_sectors = sectors;
- bch2_alloc_pack(c, a, u);
- bch2_trans_update(trans, &iter, &a->k, 0);
+ ret = bch2_alloc_write(trans, &iter, &u, 0);
+ if (ret)
+ goto out;
out:
bch2_trans_iter_exit(trans, &iter);
return ret;