#include "reflink.h"
#include "replicas.h"
#include "subvolume.h"
+#include "trace.h"
#include <linux/preempt.h>
-#include <trace/events/bcachefs.h>
static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
enum bch_data_type data_type,
}
int bch2_mark_alloc(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
unsigned nr_data = s->nr_blocks - s->nr_redundant;
bool parity = ptr_idx >= nr_data;
- enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
+ enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
if (ret)
goto err;
- if (data_type)
- g->data_type = data_type;
+ g->data_type = data_type;
g->dirty_sectors += sectors;
g->stripe = k.k->p.offset;
}
static int bch2_mark_pointer(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
struct bkey_s_c k,
struct extent_ptr_decoded p,
- s64 sectors, enum bch_data_type data_type,
+ s64 sectors,
unsigned flags)
{
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket old, new, *g;
+ enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
u8 bucket_data_type;
int ret = 0;
if (!m) {
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
(u64) p.idx);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_mark_stripe_ptr;
}
- spin_lock(&c->ec_stripes_heap_lock);
+ mutex_lock(&c->ec_stripes_heap_lock);
if (!m || !m->alive) {
- spin_unlock(&c->ec_stripes_heap_lock);
+ mutex_unlock(&c->ec_stripes_heap_lock);
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
(u64) p.idx);
bch2_inconsistent_error(c);
m->block_sectors[p.block] += sectors;
r = m->r;
- spin_unlock(&c->ec_stripes_heap_lock);
+ mutex_unlock(&c->ec_stripes_heap_lock);
r.e.data_type = data_type;
update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
}
int bch2_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
if (flags & BTREE_TRIGGER_OVERWRITE)
disk_sectors = -disk_sectors;
- ret = bch2_mark_pointer(trans, k, p, disk_sectors,
- data_type, flags);
+ ret = bch2_mark_pointer(trans, btree_id, level, k, p, disk_sectors, flags);
if (ret < 0)
return ret;
}
int bch2_mark_stripe(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
if (!gc) {
struct stripe *m = genradix_ptr(&c->stripes, idx);
- if (!m || (old_s && !m->alive)) {
+ if (!m) {
struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
}
if (!new_s) {
- spin_lock(&c->ec_stripes_heap_lock);
bch2_stripes_heap_del(c, m, idx);
- spin_unlock(&c->ec_stripes_heap_lock);
memset(m, 0, sizeof(*m));
} else {
- m->alive = true;
m->sectors = le16_to_cpu(new_s->sectors);
m->algorithm = new_s->algorithm;
m->nr_blocks = new_s->nr_blocks;
for (i = 0; i < new_s->nr_blocks; i++)
m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
- spin_lock(&c->ec_stripes_heap_lock);
- bch2_stripes_heap_update(c, m, idx);
- spin_unlock(&c->ec_stripes_heap_lock);
+ if (!old_s)
+ bch2_stripes_heap_insert(c, m, idx);
+ else
+ bch2_stripes_heap_update(c, m, idx);
}
} else {
struct gc_stripe *m =
if (!m) {
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
idx);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_mark_stripe;
}
/*
* This will be wrong when we bring back runtime gc: we should
}
int bch2_mark_inode(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
}
int bch2_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
}
int bch2_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
struct btree_iter iter;
struct bkey_i_alloc_v4 *a;
- struct bpos bucket_pos;
+ struct bpos bucket;
struct bch_backpointer bp;
s64 sectors;
int ret;
- bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket_pos, &bp);
+ bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
sectors = bp.bucket_len;
if (!insert)
sectors = -sectors;
- a = bch2_trans_start_alloc_update(trans, &iter, bucket_pos);
+ a = bch2_trans_start_alloc_update(trans, &iter, bucket);
if (IS_ERR(a))
return PTR_ERR(a);
goto err;
if (!p.ptr.cached) {
- ret = bch2_bucket_backpointer_mod(trans, a, bp, k, insert);
+ ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
if (ret)
goto err;
}
struct bch_replicas_padded r;
int ret = 0;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
- BTREE_ITER_INTENT|
- BTREE_ITER_WITH_UPDATES);
- s = bch2_bkey_get_mut_typed(trans, &iter, stripe);
+ s = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_stripes, POS(0, p.ec.idx),
+ BTREE_ITER_WITH_UPDATES, stripe);
ret = PTR_ERR_OR_ZERO(s);
if (unlikely(ret)) {
bch2_trans_inconsistent_on(ret == -ENOENT, trans,
stripe_blockcount_get(&s->v, p.ec.block) +
sectors);
- ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
- if (ret)
- goto err;
-
bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
r.e.data_type = data_type;
update_replicas_list(trans, &r.e, sectors);
a->v.stripe = s.k->p.offset;
a->v.stripe_redundancy = s.v->nr_redundant;
+ a->v.data_type = BCH_DATA_stripe;
} else {
if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
a->v.stripe_redundancy != s.v->nr_redundant, trans,
a->v.stripe = 0;
a->v.stripe_redundancy = 0;
+ a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
}
a->v.dirty_sectors += sectors;
struct printbuf buf = PRINTBUF;
int ret;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
- BTREE_ITER_INTENT|
- BTREE_ITER_WITH_UPDATES);
- k = bch2_bkey_get_mut(trans, &iter);
+ k = bch2_bkey_get_mut_noupdate(trans, &iter,
+ BTREE_ID_reflink, POS(0, *idx),
+ BTREE_ITER_WITH_UPDATES);
ret = PTR_ERR_OR_ZERO(k);
if (ret)
goto err;
if (IS_ERR(a))
return PTR_ERR(a);
- if (a->v.data_type && a->v.data_type != type) {
+ if (a->v.data_type && type && a->v.data_type != type) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
unsigned long *buckets_nouse = NULL;
bool resize = ca->bucket_gens != NULL;
- int ret = -ENOMEM;
+ int ret;
if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
- GFP_KERNEL|__GFP_ZERO)) ||
- (c->opts.buckets_nouse &&
+ GFP_KERNEL|__GFP_ZERO))) {
+ ret = -BCH_ERR_ENOMEM_bucket_gens;
+ goto err;
+ }
+
+ if ((c->opts.buckets_nouse &&
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long),
- GFP_KERNEL|__GFP_ZERO))))
+ GFP_KERNEL|__GFP_ZERO)))) {
+ ret = -BCH_ERR_ENOMEM_buckets_nouse;
goto err;
+ }
bucket_gens->first_bucket = ca->mi.first_bucket;
bucket_gens->nbuckets = nbuckets;
ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
if (!ca->usage_base)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_usage_init;
for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
ca->usage[i] = alloc_percpu(struct bch_dev_usage);
if (!ca->usage[i])
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_usage_init;
}
return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);