#include "reflink.h"
#include "replicas.h"
#include "subvolume.h"
+#include "trace.h"
#include <linux/preempt.h>
-#include <trace/events/bcachefs.h>
static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
enum bch_data_type data_type,
} while (read_seqcount_retry(&c->usage_lock, seq));
}
-static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
- unsigned journal_seq,
- bool gc)
-{
- percpu_rwsem_assert_held(&c->mark_lock);
- BUG_ON(!gc && !journal_seq);
-
- return this_cpu_ptr(gc
- ? c->usage_gc
- : c->usage[journal_seq & JOURNAL_BUF_MASK]);
-}
-
u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
{
ssize_t offset = v - (u64 *) c->usage_base;
struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
{
struct bch_fs_usage_online *ret;
- unsigned seq, i, u64s;
+ unsigned nr_replicas = READ_ONCE(c->replicas.nr);
+ unsigned seq, i;
+retry:
+ ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL);
+ if (unlikely(!ret))
+ return NULL;
percpu_down_read(&c->mark_lock);
- ret = kmalloc(sizeof(struct bch_fs_usage_online) +
- sizeof(u64) * c->replicas.nr, GFP_NOFS);
- if (unlikely(!ret)) {
+ if (nr_replicas != c->replicas.nr) {
+ nr_replicas = c->replicas.nr;
percpu_up_read(&c->mark_lock);
- return NULL;
+ kfree(ret);
+ goto retry;
}
ret->online_reserved = percpu_u64_get(c->online_reserved);
- u64s = fs_usage_u64s(c);
do {
seq = read_seqcount_begin(&c->usage_lock);
- memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
+ unsafe_memcpy(&ret->u, c->usage_base,
+ __fs_usage_u64s(nr_replicas) * sizeof(u64),
+ "embedded variable length struct");
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
- acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
+ acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
+ __fs_usage_u64s(nr_replicas));
} while (read_seqcount_retry(&c->usage_lock, seq));
return ret;
struct bch_replicas_entry *r, s64 sectors,
unsigned journal_seq, bool gc)
{
- struct bch_fs_usage __percpu *fs_usage;
+ struct bch_fs_usage *fs_usage;
int idx, ret = 0;
struct printbuf buf = PRINTBUF;
percpu_down_read(&c->mark_lock);
- buf.atomic++;
idx = bch2_replicas_entry_idx(c, r);
if (idx < 0 &&
return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
}
-static struct replicas_delta_list *
-replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
+static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more,
+ gfp_t gfp)
{
struct replicas_delta_list *d = trans->fs_usage_deltas;
unsigned new_size = d ? (d->size + more) * 2 : 128;
WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
if (!d || d->used + more > d->size) {
- d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO);
+ d = krealloc(d, alloc_size, gfp|__GFP_ZERO);
- BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
+ if (unlikely(!d)) {
+ if (alloc_size > REPLICAS_DELTA_LIST_MAX)
+ return -ENOMEM;
+
+ d = mempool_alloc(&trans->c->replicas_delta_pool, gfp);
+ if (!d)
+ return -ENOMEM;
- if (!d) {
- d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO);
memset(d, 0, REPLICAS_DELTA_LIST_MAX);
if (trans->fs_usage_deltas)
d->size = new_size;
trans->fs_usage_deltas = d;
}
- return d;
+
+ return 0;
+}
+
+int bch2_replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
+{
+ return allocate_dropping_locks_errcode(trans,
+ __replicas_deltas_realloc(trans, more, _gfp));
}
-static inline void update_replicas_list(struct btree_trans *trans,
+static inline int update_replicas_list(struct btree_trans *trans,
struct bch_replicas_entry *r,
s64 sectors)
{
struct replicas_delta_list *d;
struct replicas_delta *n;
unsigned b;
+ int ret;
if (!sectors)
- return;
+ return 0;
b = replicas_entry_bytes(r) + 8;
- d = replicas_deltas_realloc(trans, b);
+ ret = bch2_replicas_deltas_realloc(trans, b);
+ if (ret)
+ return ret;
+ d = trans->fs_usage_deltas;
n = (void *) d->d + d->used;
n->delta = sectors;
- memcpy((void *) n + offsetof(struct replicas_delta, r),
- r, replicas_entry_bytes(r));
+ unsafe_memcpy((void *) n + offsetof(struct replicas_delta, r),
+ r, replicas_entry_bytes(r),
+ "flexible array member embedded in strcuct with padding");
bch2_replicas_entry_sort(&n->r);
d->used += b;
+ return 0;
}
-static inline void update_cached_sectors_list(struct btree_trans *trans,
+static inline int update_cached_sectors_list(struct btree_trans *trans,
unsigned dev, s64 sectors)
{
struct bch_replicas_padded r;
bch2_replicas_entry_cached(&r.e, dev);
- update_replicas_list(trans, &r.e, sectors);
+ return update_replicas_list(trans, &r.e, sectors);
}
int bch2_mark_alloc(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
+ u64 bucket_journal_seq;
struct bch_fs *c = trans->c;
- struct bch_alloc_v4 old_a, new_a;
+ struct bch_alloc_v4 old_a_convert, new_a_convert;
+ const struct bch_alloc_v4 *old_a, *new_a;
struct bch_dev *ca;
int ret = 0;
ca = bch_dev_bkey_exists(c, new.k->p.inode);
- bch2_alloc_to_v4(old, &old_a);
- bch2_alloc_to_v4(new, &new_a);
+ old_a = bch2_alloc_to_v4(old, &old_a_convert);
+ new_a = bch2_alloc_to_v4(new, &new_a_convert);
+
+ bucket_journal_seq = new_a->journal_seq;
if ((flags & BTREE_TRIGGER_INSERT) &&
- data_type_is_empty(old_a.data_type) !=
- data_type_is_empty(new_a.data_type) &&
+ data_type_is_empty(old_a->data_type) !=
+ data_type_is_empty(new_a->data_type) &&
new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
- BUG_ON(!journal_seq);
+ EBUG_ON(!journal_seq);
/*
* If the btree updates referring to a bucket weren't flushed
* before the bucket became empty again, then the we don't have
* to wait on a journal flush before we can reuse the bucket:
*/
- new_a.journal_seq = data_type_is_empty(new_a.data_type) &&
+ v->journal_seq = bucket_journal_seq =
+ data_type_is_empty(new_a->data_type) &&
(journal_seq == v->journal_seq ||
bch2_journal_noflush_seq(&c->journal, v->journal_seq))
? 0 : journal_seq;
- v->journal_seq = new_a.journal_seq;
}
- if (!data_type_is_empty(old_a.data_type) &&
- data_type_is_empty(new_a.data_type) &&
- new_a.journal_seq) {
+ if (!data_type_is_empty(old_a->data_type) &&
+ data_type_is_empty(new_a->data_type) &&
+ bucket_journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new.k->p.inode, new.k->p.offset,
- new_a.journal_seq);
+ bucket_journal_seq);
if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
}
percpu_down_read(&c->mark_lock);
- if (!gc && new_a.gen != old_a.gen)
- *bucket_gen(ca, new.k->p.offset) = new_a.gen;
+ if (!gc && new_a->gen != old_a->gen)
+ *bucket_gen(ca, new.k->p.offset) = new_a->gen;
- bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
+ bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
if (gc) {
struct bucket *g = gc_bucket(ca, new.k->p.offset);
bucket_lock(g);
g->gen_valid = 1;
- g->gen = new_a.gen;
- g->data_type = new_a.data_type;
- g->stripe = new_a.stripe;
- g->stripe_redundancy = new_a.stripe_redundancy;
- g->dirty_sectors = new_a.dirty_sectors;
- g->cached_sectors = new_a.cached_sectors;
+ g->gen = new_a->gen;
+ g->data_type = new_a->data_type;
+ g->stripe = new_a->stripe;
+ g->stripe_redundancy = new_a->stripe_redundancy;
+ g->dirty_sectors = new_a->dirty_sectors;
+ g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g);
}
*/
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
- old_a.cached_sectors) {
+ old_a->cached_sectors) {
ret = update_cached_sectors(c, new, ca->dev_idx,
- -((s64) old_a.cached_sectors),
+ -((s64) old_a->cached_sectors),
journal_seq, gc);
if (ret) {
bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
}
}
- if (new_a.data_type == BCH_DATA_free &&
- (!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
+ if (new_a->data_type == BCH_DATA_free &&
+ (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait);
- if (new_a.data_type == BCH_DATA_need_discard &&
- (!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
+ if (new_a->data_type == BCH_DATA_need_discard &&
+ (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
bch2_do_discards(c);
- if (old_a.data_type != BCH_DATA_cached &&
- new_a.data_type == BCH_DATA_cached &&
+ if (old_a->data_type != BCH_DATA_cached &&
+ new_a->data_type == BCH_DATA_cached &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
bch2_do_invalidates(c);
- if (new_a.data_type == BCH_DATA_need_gc_gens)
+ if (new_a->data_type == BCH_DATA_need_gc_gens)
bch2_do_gc_gens(c);
return 0;
return ret;
}
-static int check_bucket_ref(struct bch_fs *c,
+static int check_bucket_ref(struct btree_trans *trans,
struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
s64 sectors, enum bch_data_type ptr_data_type,
u8 b_gen, u8 bucket_data_type,
u32 dirty_sectors, u32 cached_sectors)
{
+ struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
- u16 bucket_sectors = !ptr->cached
+ u32 bucket_sectors = !ptr->cached
? dirty_sectors
: cached_sectors;
struct printbuf buf = PRINTBUF;
if (b_gen != ptr->gen) {
ret = 1;
- goto err;
+ goto out;
}
if (!data_type_is_empty(bucket_data_type) &&
goto err;
}
- if ((unsigned) (bucket_sectors + sectors) > U32_MAX) {
+ if ((u64) bucket_sectors + sectors > U32_MAX) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
+ "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
bch2_data_types[bucket_data_type ?: ptr_data_type],
ret = -EIO;
goto err;
}
-err:
+out:
printbuf_exit(&buf);
return ret;
+err:
+ bch2_dump_trans_updates(trans);
+ goto out;
}
static int mark_stripe_bucket(struct btree_trans *trans,
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
unsigned nr_data = s->nr_blocks - s->nr_redundant;
bool parity = ptr_idx >= nr_data;
- enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
+ enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
/* * XXX doesn't handle deletion */
percpu_down_read(&c->mark_lock);
- buf.atomic++;
g = PTR_GC_BUCKET(ca, ptr);
if (g->dirty_sectors ||
bucket_lock(g);
old = *g;
- ret = check_bucket_ref(c, k, ptr, sectors, data_type,
+ ret = check_bucket_ref(trans, k, ptr, sectors, data_type,
g->gen, g->data_type,
g->dirty_sectors, g->cached_sectors);
if (ret)
goto err;
- if (data_type)
- g->data_type = data_type;
+ g->data_type = data_type;
g->dirty_sectors += sectors;
g->stripe = k.k->p.offset;
u32 *dst_sectors = !ptr->cached
? dirty_sectors
: cached_sectors;
- int ret = check_bucket_ref(trans->c, k, ptr, sectors, ptr_data_type,
+ int ret = check_bucket_ref(trans, k, ptr, sectors, ptr_data_type,
bucket_gen, *bucket_data_type,
*dirty_sectors, *cached_sectors);
}
static int bch2_mark_pointer(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
struct bkey_s_c k,
struct extent_ptr_decoded p,
- s64 sectors, enum bch_data_type data_type,
+ s64 sectors,
unsigned flags)
{
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket old, new, *g;
+ enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
u8 bucket_data_type;
int ret = 0;
if (!m) {
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
(u64) p.idx);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_mark_stripe_ptr;
}
- spin_lock(&c->ec_stripes_heap_lock);
+ mutex_lock(&c->ec_stripes_heap_lock);
if (!m || !m->alive) {
- spin_unlock(&c->ec_stripes_heap_lock);
+ mutex_unlock(&c->ec_stripes_heap_lock);
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
(u64) p.idx);
bch2_inconsistent_error(c);
m->block_sectors[p.block] += sectors;
r = m->r;
- spin_unlock(&c->ec_stripes_heap_lock);
+ mutex_unlock(&c->ec_stripes_heap_lock);
r.e.data_type = data_type;
update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
return 0;
}
-int bch2_mark_extent(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+static int __mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
if (flags & BTREE_TRIGGER_OVERWRITE)
disk_sectors = -disk_sectors;
- ret = bch2_mark_pointer(trans, k, p, disk_sectors,
- data_type, flags);
+ ret = bch2_mark_pointer(trans, btree_id, level, k, p, disk_sectors, flags);
if (ret < 0)
return ret;
return 0;
}
+int bch2_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ return mem_trigger_run_overwrite_then_insert(__mark_extent, trans, btree_id, level, old, new, flags);
+}
+
int bch2_mark_stripe(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
if (!gc) {
struct stripe *m = genradix_ptr(&c->stripes, idx);
- if (!m || (old_s && !m->alive)) {
+ if (!m) {
struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
}
if (!new_s) {
- spin_lock(&c->ec_stripes_heap_lock);
bch2_stripes_heap_del(c, m, idx);
- spin_unlock(&c->ec_stripes_heap_lock);
memset(m, 0, sizeof(*m));
} else {
- m->alive = true;
m->sectors = le16_to_cpu(new_s->sectors);
m->algorithm = new_s->algorithm;
m->nr_blocks = new_s->nr_blocks;
for (i = 0; i < new_s->nr_blocks; i++)
m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
- spin_lock(&c->ec_stripes_heap_lock);
- bch2_stripes_heap_update(c, m, idx);
- spin_unlock(&c->ec_stripes_heap_lock);
+ if (!old_s)
+ bch2_stripes_heap_insert(c, m, idx);
+ else
+ bch2_stripes_heap_update(c, m, idx);
}
} else {
struct gc_stripe *m =
if (!m) {
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
idx);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_mark_stripe;
}
/*
* This will be wrong when we bring back runtime gc: we should
return 0;
}
-int bch2_mark_inode(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
-{
- struct bch_fs *c = trans->c;
- struct bch_fs_usage __percpu *fs_usage;
- u64 journal_seq = trans->journal_res.seq;
-
- if (flags & BTREE_TRIGGER_INSERT) {
- struct bch_inode_v3 *v = (struct bch_inode_v3 *) new.v;
-
- BUG_ON(!journal_seq);
- BUG_ON(new.k->type != KEY_TYPE_inode_v3);
-
- v->bi_journal_seq = cpu_to_le64(journal_seq);
- }
-
- if (flags & BTREE_TRIGGER_GC) {
- percpu_down_read(&c->mark_lock);
- preempt_disable();
-
- fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
- fs_usage->nr_inodes += bkey_is_inode(new.k);
- fs_usage->nr_inodes -= bkey_is_inode(old.k);
-
- preempt_enable();
- percpu_up_read(&c->mark_lock);
- }
- return 0;
-}
-
-int bch2_mark_reservation(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+static int __mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
- struct bch_fs_usage __percpu *fs_usage;
+ struct bch_fs_usage *fs_usage;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
return 0;
}
+int bch2_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ return mem_trigger_run_overwrite_then_insert(__mark_reservation, trans, btree_id, level, old, new, flags);
+}
+
static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
struct bkey_s_c_reflink_p p,
u64 start, u64 end,
" missing range %llu-%llu",
(bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
*idx, next_idx)) {
- struct bkey_i_error new;
+ struct bkey_i_error *new;
+
+ new = bch2_trans_kmalloc(trans, sizeof(*new));
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ goto err;
- bkey_init(&new.k);
- new.k.type = KEY_TYPE_error;
- new.k.p = bkey_start_pos(p.k);
- new.k.p.offset += *idx - start;
- bch2_key_resize(&new.k, next_idx - *idx);
- ret = __bch2_btree_insert(trans, BTREE_ID_extents, &new.k_i);
+ bkey_init(&new->k);
+ new->k.type = KEY_TYPE_error;
+ new->k.p = bkey_start_pos(p.k);
+ new->k.p.offset += *idx - start;
+ bch2_key_resize(&new->k, next_idx - *idx);
+ ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i,
+ BTREE_TRIGGER_NORUN);
}
*idx = next_idx;
+err:
fsck_err:
printbuf_exit(&buf);
return ret;
}
-int bch2_mark_reflink_p(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+static int __mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
struct reflink_gc *ref;
size_t l, r, m;
BUG_ON(!(flags & BTREE_TRIGGER_GC));
- if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
+ if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_reflink_p_fix) {
idx -= le32_to_cpu(p.v->front_pad);
end += le32_to_cpu(p.v->back_pad);
}
return ret;
}
-static noinline __cold
-void fs_usage_apply_warn(struct btree_trans *trans,
- unsigned disk_res_sectors,
- s64 should_not_have_added)
+int bch2_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ return mem_trigger_run_overwrite_then_insert(__mark_reflink_p, trans, btree_id, level, old, new, flags);
+}
+
+void bch2_trans_fs_usage_revert(struct btree_trans *trans,
+ struct replicas_delta_list *deltas)
{
struct bch_fs *c = trans->c;
- struct btree_insert_entry *i;
- struct printbuf buf = PRINTBUF;
+ struct bch_fs_usage *dst;
+ struct replicas_delta *d, *top = (void *) deltas->d + deltas->used;
+ s64 added = 0;
+ unsigned i;
- prt_printf(&buf,
- bch2_fmt(c, "disk usage increased %lli more than %u sectors reserved)"),
- should_not_have_added, disk_res_sectors);
+ percpu_down_read(&c->mark_lock);
+ preempt_disable();
+ dst = fs_usage_ptr(c, trans->journal_res.seq, false);
- trans_for_each_update(trans, i) {
- struct bkey_s_c old = { &i->old_k, i->old_v };
+ /* revert changes: */
+ for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
+ switch (d->r.data_type) {
+ case BCH_DATA_btree:
+ case BCH_DATA_user:
+ case BCH_DATA_parity:
+ added += d->delta;
+ }
+ BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
+ }
- prt_str(&buf, "new ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
- prt_newline(&buf);
+ dst->nr_inodes -= deltas->nr_inodes;
- prt_str(&buf, "old ");
- bch2_bkey_val_to_text(&buf, c, old);
- prt_newline(&buf);
+ for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+ added -= deltas->persistent_reserved[i];
+ dst->reserved -= deltas->persistent_reserved[i];
+ dst->persistent_reserved[i] -= deltas->persistent_reserved[i];
}
- __WARN();
- bch2_print_string_as_lines(KERN_ERR, buf.buf);
- printbuf_exit(&buf);
+ if (added > 0) {
+ trans->disk_res->sectors += added;
+ this_cpu_add(*c->online_reserved, added);
+ }
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
}
int bch2_trans_fs_usage_apply(struct btree_trans *trans,
static int warned_disk_usage = 0;
bool warn = false;
unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
- struct replicas_delta *d = deltas->d, *d2;
+ struct replicas_delta *d, *d2;
struct replicas_delta *top = (void *) deltas->d + deltas->used;
struct bch_fs_usage *dst;
s64 added = 0, should_not_have_added;
percpu_up_read(&c->mark_lock);
if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
- fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
+ bch2_trans_inconsistent(trans,
+ "disk usage increased %lli more than %u sectors reserved)",
+ should_not_have_added, disk_res_sectors);
return 0;
need_mark:
/* revert changes: */
/* trans_mark: */
-static int bch2_trans_mark_pointer(struct btree_trans *trans,
+static inline int bch2_trans_mark_pointer(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, struct extent_ptr_decoded p,
unsigned flags)
bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
struct btree_iter iter;
struct bkey_i_alloc_v4 *a;
- struct bpos bucket_pos;
+ struct bpos bucket;
struct bch_backpointer bp;
s64 sectors;
int ret;
- bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket_pos, &bp);
+ bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
sectors = bp.bucket_len;
if (!insert)
sectors = -sectors;
- a = bch2_trans_start_alloc_update(trans, &iter, bucket_pos);
+ a = bch2_trans_start_alloc_update(trans, &iter, bucket);
if (IS_ERR(a))
return PTR_ERR(a);
ret = __mark_pointer(trans, k, &p.ptr, sectors, bp.data_type,
a->v.gen, &a->v.data_type,
- &a->v.dirty_sectors, &a->v.cached_sectors);
+ &a->v.dirty_sectors, &a->v.cached_sectors) ?:
+ bch2_trans_update(trans, &iter, &a->k_i, 0);
+ bch2_trans_iter_exit(trans, &iter);
+
if (ret)
- goto err;
+ return ret;
if (!p.ptr.cached) {
- ret = insert
- ? bch2_bucket_backpointer_add(trans, a, bp, k)
- : bch2_bucket_backpointer_del(trans, a, bp, k);
+ ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
if (ret)
- goto err;
+ return ret;
}
- ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
+ return 0;
}
static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
s64 sectors, enum bch_data_type data_type)
{
struct btree_iter iter;
- struct bkey_s_c k;
struct bkey_i_stripe *s;
struct bch_replicas_padded r;
int ret = 0;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
- BTREE_ITER_INTENT|
- BTREE_ITER_WITH_UPDATES);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_stripe) {
- bch2_trans_inconsistent(trans,
+ s = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_stripes, POS(0, p.ec.idx),
+ BTREE_ITER_WITH_UPDATES, stripe);
+ ret = PTR_ERR_OR_ZERO(s);
+ if (unlikely(ret)) {
+ bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
"pointer to nonexistent stripe %llu",
(u64) p.ec.idx);
- ret = -EIO;
goto err;
}
- if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
+ if (!bch2_ptr_matches_stripe(&s->v, p)) {
bch2_trans_inconsistent(trans,
"stripe pointer doesn't match stripe %llu",
(u64) p.ec.idx);
goto err;
}
- s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
- ret = PTR_ERR_OR_ZERO(s);
- if (ret)
- goto err;
-
- bkey_reassemble(&s->k_i, k);
stripe_blockcount_set(&s->v, p.ec.block,
stripe_blockcount_get(&s->v, p.ec.block) +
sectors);
- ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
- if (ret)
- goto err;
-
bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
r.e.data_type = data_type;
- update_replicas_list(trans, &r.e, sectors);
+ ret = update_replicas_list(trans, &r.e, sectors);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
-int bch2_trans_mark_extent(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, struct bkey_i *new,
- unsigned flags)
+static int __trans_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
: k.k->size;
s64 dirty_sectors = 0;
bool stale;
- int ret;
+ int ret = 0;
r.e.data_type = data_type;
r.e.nr_devs = 0;
stale = ret > 0;
if (p.ptr.cached) {
- if (!stale)
- update_cached_sectors_list(trans, p.ptr.dev,
- disk_sectors);
+ if (!stale) {
+ ret = update_cached_sectors_list(trans, p.ptr.dev,
+ disk_sectors);
+ if (ret)
+ return ret;
+ }
} else if (!p.has_ec) {
dirty_sectors += disk_sectors;
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
}
if (r.e.nr_devs)
- update_replicas_list(trans, &r.e, dirty_sectors);
+ ret = update_replicas_list(trans, &r.e, dirty_sectors);
- return 0;
+ return ret;
+}
+
+int bch2_trans_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ int mod = (int) bch2_bkey_needs_rebalance(c, bkey_i_to_s_c(new)) -
+ (int) bch2_bkey_needs_rebalance(c, old);
+
+ if (mod) {
+ int ret = bch2_btree_bit_mod(trans, BTREE_ID_rebalance_work, new->k.p, mod > 0);
+ if (ret)
+ return ret;
+ }
+
+ return trigger_run_overwrite_then_insert(__trans_mark_extent, trans, btree_id, level, old, new, flags);
}
static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
if (IS_ERR(a))
return PTR_ERR(a);
- ret = check_bucket_ref(c, s.s_c, ptr, sectors, data_type,
+ ret = check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
a->v.gen, a->v.data_type,
a->v.dirty_sectors, a->v.cached_sectors);
if (ret)
a->v.stripe = s.k->p.offset;
a->v.stripe_redundancy = s.v->nr_redundant;
+ a->v.data_type = BCH_DATA_stripe;
} else {
if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
a->v.stripe_redundancy != s.v->nr_redundant, trans,
a->v.stripe = 0;
a->v.stripe_redundancy = 0;
+ a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
}
a->v.dirty_sectors += sectors;
s64 sectors = le16_to_cpu(new_s->sectors);
bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(new));
- update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
+ ret = update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
+ if (ret)
+ return ret;
}
if (old_s) {
s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
bch2_bkey_to_replicas(&r.e, old);
- update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
+ ret = update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
+ if (ret)
+ return ret;
}
for (i = 0; i < nr_blocks; i++) {
return ret;
}
-int bch2_trans_mark_inode(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- struct bkey_i *new,
- unsigned flags)
-{
- int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
-
- if (nr) {
- struct replicas_delta_list *d =
- replicas_deltas_realloc(trans, 0);
- d->nr_inodes += nr;
- }
-
- return 0;
-}
-
-int bch2_trans_mark_reservation(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- struct bkey_i *new,
- unsigned flags)
+static int __trans_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
struct replicas_delta_list *d;
+ int ret;
if (flags & BTREE_TRIGGER_OVERWRITE)
sectors = -sectors;
sectors *= replicas;
- d = replicas_deltas_realloc(trans, 0);
+ ret = bch2_replicas_deltas_realloc(trans, 0);
+ if (ret)
+ return ret;
+ d = trans->fs_usage_deltas;
replicas = clamp_t(unsigned, replicas, 1,
ARRAY_SIZE(d->persistent_reserved));
return 0;
}
-static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
+int bch2_trans_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
+{
+ return trigger_run_overwrite_then_insert(__trans_mark_reservation, trans, btree_id, level, old, new, flags);
+}
+
+static int trans_mark_reflink_p_segment(struct btree_trans *trans,
struct bkey_s_c_reflink_p p,
u64 *idx, unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
- struct bkey_s_c k;
- struct bkey_i *n;
+ struct bkey_i *k;
__le64 *refcount;
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
struct printbuf buf = PRINTBUF;
int ret;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
- BTREE_ITER_INTENT|
- BTREE_ITER_WITH_UPDATES);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
- ret = PTR_ERR_OR_ZERO(n);
+ k = bch2_bkey_get_mut_noupdate(trans, &iter,
+ BTREE_ID_reflink, POS(0, *idx),
+ BTREE_ITER_WITH_UPDATES);
+ ret = PTR_ERR_OR_ZERO(k);
if (ret)
goto err;
- bkey_reassemble(n, k);
-
- refcount = bkey_refcount(n);
+ refcount = bkey_refcount(k);
if (!refcount) {
bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_trans_inconsistent(trans,
u64 pad;
pad = max_t(s64, le32_to_cpu(v->front_pad),
- le64_to_cpu(v->idx) - bkey_start_offset(k.k));
+ le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
BUG_ON(pad > U32_MAX);
v->front_pad = cpu_to_le32(pad);
pad = max_t(s64, le32_to_cpu(v->back_pad),
- k.k->p.offset - p.k->size - le64_to_cpu(v->idx));
+ k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
BUG_ON(pad > U32_MAX);
v->back_pad = cpu_to_le32(pad);
}
le64_add_cpu(refcount, add);
bch2_btree_iter_set_pos_to_extent_start(&iter);
- ret = bch2_trans_update(trans, &iter, n, 0);
+ ret = bch2_trans_update(trans, &iter, k, 0);
if (ret)
goto err;
- *idx = k.k->p.offset;
+ *idx = k->k.p.offset;
err:
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
return ret;
}
-int bch2_trans_mark_reflink_p(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old,
- struct bkey_i *new,
- unsigned flags)
+static int __trans_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
- ? old
- : bkey_i_to_s_c(new);
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
u64 idx, end_idx;
int ret = 0;
- if (flags & BTREE_TRIGGER_INSERT) {
- struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
-
- v->front_pad = v->back_pad = 0;
- }
-
idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
end_idx = le64_to_cpu(p.v->idx) + p.k->size +
le32_to_cpu(p.v->back_pad);
while (idx < end_idx && !ret)
- ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
-
+ ret = trans_mark_reflink_p_segment(trans, p, &idx, flags);
return ret;
}
+int bch2_trans_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
+{
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_reflink_p *v = &bkey_i_to_reflink_p(new)->v;
+
+ v->front_pad = v->back_pad = 0;
+ }
+
+ return trigger_run_overwrite_then_insert(__trans_mark_reflink_p, trans, btree_id, level, old, new, flags);
+}
+
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
struct bch_dev *ca, size_t b,
enum bch_data_type type,
if (IS_ERR(a))
return PTR_ERR(a);
- if (a->v.data_type && a->v.data_type != type) {
+ if (a->v.data_type && type && a->v.data_type != type) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
bch2_data_types[type],
bch2_data_types[type]);
ret = -EIO;
- goto out;
+ goto err;
}
- a->v.data_type = type;
- a->v.dirty_sectors = sectors;
-
- ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
- if (ret)
- goto out;
-out:
+ if (a->v.data_type != type ||
+ a->v.dirty_sectors != sectors) {
+ a->v.data_type = type;
+ a->v.dirty_sectors = sectors;
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
+ }
+err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
{
- return bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
+ int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+int bch2_trans_mark_dev_sbs(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ for_each_online_member(ca, c, i) {
+ int ret = bch2_trans_mark_dev_sb(c, ca);
+ if (ret) {
+ percpu_ref_put(&ca->ref);
+ return ret;
+ }
+ }
+
+ return 0;
}
/* Disk reservations: */
struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
unsigned long *buckets_nouse = NULL;
bool resize = ca->bucket_gens != NULL;
- int ret = -ENOMEM;
+ int ret;
if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
- GFP_KERNEL|__GFP_ZERO)) ||
- (c->opts.buckets_nouse &&
+ GFP_KERNEL|__GFP_ZERO))) {
+ ret = -BCH_ERR_ENOMEM_bucket_gens;
+ goto err;
+ }
+
+ if ((c->opts.buckets_nouse &&
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long),
- GFP_KERNEL|__GFP_ZERO))))
+ GFP_KERNEL|__GFP_ZERO)))) {
+ ret = -BCH_ERR_ENOMEM_buckets_nouse;
goto err;
+ }
bucket_gens->first_bucket = ca->mi.first_bucket;
bucket_gens->nbuckets = nbuckets;
ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
if (!ca->usage_base)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_usage_init;
for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
ca->usage[i] = alloc_percpu(struct bch_dev_usage);
if (!ca->usage[i])
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_usage_init;
}
return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);