#include "btree_gc.h"
#include "btree_update.h"
#include "buckets.h"
+#include "buckets_waiting_for_journal.h"
#include "ec.h"
#include "error.h"
#include "inode.h"
}
}
-/*
- * Clear journal_seq_valid for buckets for which it's not needed, to prevent
- * wraparound:
- */
-void bch2_bucket_seq_cleanup(struct bch_fs *c)
-{
- u64 journal_seq = atomic64_read(&c->journal.seq);
- u16 last_seq_ondisk = c->journal.last_seq_ondisk;
- struct bch_dev *ca;
- struct bucket_array *buckets;
- struct bucket *g;
- struct bucket_mark m;
- unsigned i;
-
- if (journal_seq - c->last_bucket_seq_cleanup <
- (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
- return;
-
- c->last_bucket_seq_cleanup = journal_seq;
-
- for_each_member_device(ca, c, i) {
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
-
- for_each_bucket(g, buckets) {
- bucket_cmpxchg(g, m, ({
- if (!m.journal_seq_valid ||
- bucket_needs_journal_commit(m, last_seq_ondisk))
- break;
-
- m.journal_seq_valid = 0;
- }));
- }
- up_read(&ca->bucket_lock);
- }
-}
-
void bch2_fs_usage_initialize(struct bch_fs *c)
{
struct bch_fs_usage *usage;
static inline int bucket_sectors_fragmented(struct bch_dev *ca,
struct bucket_mark m)
{
- return bucket_sectors_used(m)
- ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
+ return m.dirty_sectors
+ ? max(0, (int) ca->mi.bucket_size - (int) m.dirty_sectors)
: 0;
}
: m.data_type;
}
-static bool bucket_became_unavailable(struct bucket_mark old,
- struct bucket_mark new)
-{
- return is_available_bucket(old) &&
- !is_available_bucket(new);
-}
-
static inline void account_bucket(struct bch_fs_usage *fs_usage,
struct bch_dev_usage *dev_usage,
enum bch_data_type type,
struct bch_fs_usage *fs_usage;
struct bch_dev_usage *u;
- /*
- * Hack for bch2_fs_initialize path, where we're first marking sb and
- * journal non-transactionally:
- */
- if (!journal_seq && !test_bit(BCH_FS_INITIALIZED, &c->flags))
- journal_seq = 1;
-
preempt_disable();
fs_usage = fs_usage_ptr(c, journal_seq, gc);
u = dev_usage_ptr(ca, journal_seq, gc);
{
struct bch_fs_usage __percpu *fs_usage;
int idx, ret = 0;
- char buf[200];
+ struct printbuf buf = PRINTBUF;
percpu_down_read(&c->mark_lock);
+ buf.atomic++;
idx = bch2_replicas_entry_idx(c, r);
if (idx < 0 &&
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
fsck_err(c, "no replicas entry\n"
" while marking %s",
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))) {
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
percpu_up_read(&c->mark_lock);
ret = bch2_mark_replicas(c, r);
- if (ret)
- return ret;
-
percpu_down_read(&c->mark_lock);
+
+ if (ret)
+ goto err;
idx = bch2_replicas_entry_idx(c, r);
}
if (idx < 0) {
err:
fsck_err:
percpu_up_read(&c->mark_lock);
+ printbuf_exit(&buf);
return ret;
}
update_replicas_list(trans, &r.e, sectors);
}
-#define do_mark_fn(fn, c, pos, flags, ...) \
-({ \
- int gc, ret = 0; \
- \
- percpu_rwsem_assert_held(&c->mark_lock); \
- \
- for (gc = 0; gc < 2 && !ret; gc++) \
- if (!gc == !(flags & BTREE_TRIGGER_GC) || \
- (gc && gc_visited(c, pos))) \
- ret = fn(c, __VA_ARGS__, gc); \
- ret; \
-})
-
void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator)
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
- struct bkey_alloc_unpacked u;
+ struct bkey_alloc_unpacked old_u = bch2_alloc_unpack(old);
+ struct bkey_alloc_unpacked new_u = bch2_alloc_unpack(new);
struct bch_dev *ca;
struct bucket *g;
struct bucket_mark old_m, m;
int ret = 0;
- /* We don't do anything for deletions - do we?: */
- if (!bkey_is_alloc(new.k))
- return 0;
-
/*
* alloc btree is read in by bch2_alloc_read, not gc:
*/
!(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
return 0;
- if (flags & BTREE_TRIGGER_INSERT) {
+ if ((flags & BTREE_TRIGGER_INSERT) &&
+ !old_u.data_type != !new_u.data_type &&
+ new.k->type == KEY_TYPE_alloc_v3) {
struct bch_alloc_v3 *v = (struct bch_alloc_v3 *) new.v;
+ u64 old_journal_seq = le64_to_cpu(v->journal_seq);
BUG_ON(!journal_seq);
- BUG_ON(new.k->type != KEY_TYPE_alloc_v3);
- v->journal_seq = cpu_to_le64(journal_seq);
+ /*
+ * If the btree updates referring to a bucket weren't flushed
+ * before the bucket became empty again, then the we don't have
+ * to wait on a journal flush before we can reuse the bucket:
+ */
+ new_u.journal_seq = !new_u.data_type &&
+ (journal_seq == old_journal_seq ||
+ bch2_journal_noflush_seq(&c->journal, old_journal_seq))
+ ? 0 : journal_seq;
+ v->journal_seq = cpu_to_le64(new_u.journal_seq);
+ }
+
+ if (old_u.data_type && !new_u.data_type && new_u.journal_seq) {
+ ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+ c->journal.flushed_seq_ondisk,
+ new_u.dev, new_u.bucket,
+ new_u.journal_seq);
+ if (ret) {
+ bch2_fs_fatal_error(c,
+ "error setting bucket_needs_journal_commit: %i", ret);
+ return ret;
+ }
}
- ca = bch_dev_bkey_exists(c, new.k->p.inode);
+ ca = bch_dev_bkey_exists(c, new_u.dev);
- if (new.k->p.offset >= ca->mi.nbuckets)
+ if (new_u.bucket >= ca->mi.nbuckets)
return 0;
percpu_down_read(&c->mark_lock);
- g = __bucket(ca, new.k->p.offset, gc);
- u = bch2_alloc_unpack(new);
+ if (!gc && new_u.gen != old_u.gen)
+ *bucket_gen(ca, new_u.bucket) = new_u.gen;
+
+ g = __bucket(ca, new_u.bucket, gc);
old_m = bucket_cmpxchg(g, m, ({
- m.gen = u.gen;
- m.data_type = u.data_type;
- m.dirty_sectors = u.dirty_sectors;
- m.cached_sectors = u.cached_sectors;
- m.stripe = u.stripe != 0;
-
- if (journal_seq) {
- m.journal_seq_valid = 1;
- m.journal_seq = journal_seq;
- }
+ m.gen = new_u.gen;
+ m.data_type = new_u.data_type;
+ m.dirty_sectors = new_u.dirty_sectors;
+ m.cached_sectors = new_u.cached_sectors;
+ m.stripe = new_u.stripe != 0;
}));
bch2_dev_usage_update(c, ca, old_m, m, journal_seq, gc);
- g->io_time[READ] = u.read_time;
- g->io_time[WRITE] = u.write_time;
- g->oldest_gen = u.oldest_gen;
+ g->io_time[READ] = new_u.read_time;
+ g->io_time[WRITE] = new_u.write_time;
+ g->oldest_gen = new_u.oldest_gen;
g->gen_valid = 1;
- g->stripe = u.stripe;
- g->stripe_redundancy = u.stripe_redundancy;
+ g->stripe = new_u.stripe;
+ g->stripe_redundancy = new_u.stripe_redundancy;
percpu_up_read(&c->mark_lock);
/*
return ret;
}
- trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
+ trace_invalidate(ca, bucket_to_sector(ca, new_u.bucket),
old_m.cached_sectors);
}
overflow; \
})
-static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, enum bch_data_type data_type,
- unsigned sectors, bool gc)
+void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, enum bch_data_type data_type,
+ unsigned sectors, struct gc_pos pos,
+ unsigned flags)
{
- struct bucket *g = __bucket(ca, b, gc);
+ struct bucket *g;
struct bucket_mark old, new;
bool overflow;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
BUG_ON(data_type != BCH_DATA_sb &&
data_type != BCH_DATA_journal);
+ /*
+ * Backup superblock might be past the end of our normal usable space:
+ */
+ if (b >= ca->mi.nbuckets)
+ return;
+
+ percpu_down_read(&c->mark_lock);
+ g = gc_bucket(ca, b);
old = bucket_cmpxchg(g, new, ({
new.data_type = data_type;
overflow = checked_add(new.dirty_sectors, sectors);
bch2_data_types[old.data_type ?: data_type],
old.dirty_sectors, sectors);
- if (c)
- bch2_dev_usage_update(c, ca, old, new, 0, gc);
-
- return 0;
-}
-
-void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, enum bch_data_type type,
- unsigned sectors, struct gc_pos pos,
- unsigned flags)
-{
- BUG_ON(type != BCH_DATA_sb &&
- type != BCH_DATA_journal);
-
- /*
- * Backup superblock might be past the end of our normal usable space:
- */
- if (b >= ca->mi.nbuckets)
- return;
-
- if (likely(c)) {
- do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
- ca, b, type, sectors);
- } else {
- __bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
- }
+ bch2_dev_usage_update(c, ca, old, new, 0, true);
+ percpu_up_read(&c->mark_lock);
}
static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
s64 sectors, enum bch_data_type ptr_data_type,
- u8 bucket_gen, u8 bucket_data_type,
+ u8 b_gen, u8 bucket_data_type,
u16 dirty_sectors, u16 cached_sectors)
{
- size_t bucket_nr = PTR_BUCKET_NR(bch_dev_bkey_exists(c, ptr->dev), ptr);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
u16 bucket_sectors = !ptr->cached
? dirty_sectors
: cached_sectors;
- char buf[200];
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
- if (gen_after(ptr->gen, bucket_gen)) {
+ if (gen_after(ptr->gen, b_gen)) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
"bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
"while marking %s",
- ptr->dev, bucket_nr, bucket_gen,
+ ptr->dev, bucket_nr, b_gen,
bch2_data_types[bucket_data_type ?: ptr_data_type],
ptr->gen,
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
- return -EIO;
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
}
- if (gen_cmp(bucket_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
+ if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
"while marking %s",
- ptr->dev, bucket_nr, bucket_gen,
+ ptr->dev, bucket_nr, b_gen,
bch2_data_types[bucket_data_type ?: ptr_data_type],
ptr->gen,
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
- return -EIO;
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
}
- if (bucket_gen != ptr->gen && !ptr->cached) {
+ if (b_gen != ptr->gen && !ptr->cached) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
+ "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
"while marking %s",
- ptr->dev, bucket_nr, bucket_gen,
+ ptr->dev, bucket_nr, b_gen,
+ *bucket_gen(ca, bucket_nr),
bch2_data_types[bucket_data_type ?: ptr_data_type],
ptr->gen,
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
- return -EIO;
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
}
- if (bucket_gen != ptr->gen)
- return 1;
+ if (b_gen != ptr->gen) {
+ ret = 1;
+ goto err;
+ }
if (bucket_data_type && ptr_data_type &&
bucket_data_type != ptr_data_type) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
- ptr->dev, bucket_nr, bucket_gen,
+ ptr->dev, bucket_nr, b_gen,
bch2_data_types[bucket_data_type],
bch2_data_types[ptr_data_type],
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
- return -EIO;
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
}
if ((unsigned) (bucket_sectors + sectors) > U16_MAX) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
"while marking %s",
- ptr->dev, bucket_nr, bucket_gen,
+ ptr->dev, bucket_nr, b_gen,
bch2_data_types[bucket_data_type ?: ptr_data_type],
bucket_sectors, sectors,
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
- return -EIO;
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
}
-
- return 0;
+err:
+ printbuf_exit(&buf);
+ return ret;
}
static int mark_stripe_bucket(struct btree_trans *trans,
struct bkey_s_c k,
unsigned ptr_idx,
- u64 journal_seq, unsigned flags)
+ unsigned flags)
{
struct bch_fs *c = trans->c;
+ u64 journal_seq = trans->journal_res.seq;
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
unsigned nr_data = s->nr_blocks - s->nr_redundant;
bool parity = ptr_idx >= nr_data;
enum bch_data_type data_type = parity ? BCH_DATA_parity : 0;
s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
- bool gc = flags & BTREE_TRIGGER_GC;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g;
struct bucket_mark new, old;
- char buf[200];
+ struct printbuf buf = PRINTBUF;
int ret = 0;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
/* * XXX doesn't handle deletion */
percpu_down_read(&c->mark_lock);
- g = PTR_BUCKET(ca, ptr, gc);
+ buf.atomic++;
+ g = PTR_GC_BUCKET(ca, ptr);
if (g->mark.dirty_sectors ||
(g->stripe && g->stripe != k.k->p.offset)) {
bch2_fs_inconsistent(c,
"bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
ret = -EINVAL;
goto err;
}
if (data_type)
new.data_type = data_type;
- if (journal_seq) {
- new.journal_seq_valid = 1;
- new.journal_seq = journal_seq;
- }
-
new.stripe = true;
}));
g->stripe = k.k->p.offset;
g->stripe_redundancy = s->nr_redundant;
- bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
+ bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
err:
percpu_up_read(&c->mark_lock);
-
- return 0;
+ printbuf_exit(&buf);
+ return ret;
}
static int __mark_pointer(struct btree_trans *trans,
s64 sectors, enum bch_data_type data_type,
unsigned flags)
{
- bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
struct bucket_mark old, new;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
+ struct bucket *g;
u8 bucket_data_type;
u64 v;
int ret = 0;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
percpu_down_read(&c->mark_lock);
- g = PTR_BUCKET(ca, &p.ptr, gc);
+ g = PTR_GC_BUCKET(ca, &p.ptr);
v = atomic64_read(&g->_mark.v);
do {
new.data_type = bucket_data_type;
- if (journal_seq) {
- new.journal_seq_valid = 1;
- new.journal_seq = journal_seq;
- }
-
if (flags & BTREE_TRIGGER_NOATOMIC) {
g->_mark = new;
break;
old.v.counter,
new.v.counter)) != old.v.counter);
- bch2_dev_usage_update(c, ca, old, new, journal_seq, gc);
-
- BUG_ON(!gc && bucket_became_unavailable(old, new));
+ bch2_dev_usage_update(c, ca, old, new, journal_seq, true);
err:
percpu_up_read(&c->mark_lock);
s64 sectors,
unsigned flags)
{
- bool gc = flags & BTREE_TRIGGER_GC;
struct bch_fs *c = trans->c;
struct bch_replicas_padded r;
+ struct gc_stripe *m;
- if (!gc) {
- BUG();
- } else {
- struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
-
- if (!m)
- return -ENOMEM;
-
- spin_lock(&c->ec_stripes_heap_lock);
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
- if (!m || !m->alive) {
- spin_unlock(&c->ec_stripes_heap_lock);
- bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
- (u64) p.idx);
- bch2_inconsistent_error(c);
- return -EIO;
- }
+ m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
+ if (!m) {
+ bch_err(c, "error allocating memory for gc_stripes, idx %llu",
+ (u64) p.idx);
+ return -ENOMEM;
+ }
- m->block_sectors[p.block] += sectors;
+ spin_lock(&c->ec_stripes_heap_lock);
- r = m->r;
+ if (!m || !m->alive) {
spin_unlock(&c->ec_stripes_heap_lock);
-
- r.e.data_type = data_type;
- update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, gc);
+ bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
+ (u64) p.idx);
+ bch2_inconsistent_error(c);
+ return -EIO;
}
+ m->block_sectors[p.block] += sectors;
+
+ r = m->r;
+ spin_unlock(&c->ec_stripes_heap_lock);
+
+ r.e.data_type = data_type;
+ update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
return 0;
}
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
- bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
? BCH_DATA_btree
: BCH_DATA_user;
s64 sectors = bkey_is_btree_ptr(k.k)
- ? c->opts.btree_node_size
+ ? btree_sectors(c)
: k.k->size;
s64 dirty_sectors = 0;
bool stale;
int ret;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
r.e.data_type = data_type;
r.e.nr_devs = 0;
r.e.nr_required = 1;
if (p.ptr.cached) {
if (!stale) {
ret = update_cached_sectors(c, k, p.ptr.dev,
- disk_sectors, journal_seq, gc);
+ disk_sectors, journal_seq, true);
if (ret) {
bch2_fs_fatal_error(c, "bch2_mark_extent(): no replicas entry while updating cached sectors");
return ret;
}
if (r.e.nr_devs) {
- ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, gc);
+ ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
if (ret) {
- char buf[200];
+ struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&PBUF(buf), c, k);
- bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
+ bch2_bkey_val_to_text(&buf, c, k);
+ bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
+ printbuf_exit(&buf);
return ret;
}
}
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
- size_t idx = new.k->p.offset;
+ u64 idx = new.k->p.offset;
const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
? bkey_s_c_to_stripe(old).v : NULL;
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
struct stripe *m = genradix_ptr(&c->stripes, idx);
if (!m || (old_s && !m->alive)) {
- char buf1[200], buf2[200];
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
- bch2_bkey_val_to_text(&PBUF(buf1), c, old);
- bch2_bkey_val_to_text(&PBUF(buf2), c, new);
- bch_err_ratelimited(c, "error marking nonexistent stripe %zu while marking\n"
+ bch2_bkey_val_to_text(&buf1, c, old);
+ bch2_bkey_val_to_text(&buf2, c, new);
+ bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
"old %s\n"
- "new %s", idx, buf1, buf2);
+ "new %s", idx, buf1.buf, buf2.buf);
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
bch2_inconsistent_error(c);
return -1;
}
spin_unlock(&c->ec_stripes_heap_lock);
}
} else {
- struct gc_stripe *m = genradix_ptr(&c->gc_stripes, idx);
+ struct gc_stripe *m =
+ genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
+ if (!m) {
+ bch_err(c, "error allocating memory for gc_stripes, idx %llu",
+ idx);
+ return -ENOMEM;
+ }
/*
* This will be wrong when we bring back runtime gc: we should
* be unmarking the old key and then marking the new key
*/
m->alive = true;
m->sectors = le16_to_cpu(new_s->sectors);
- m->algorithm = new_s->algorithm;
m->nr_blocks = new_s->nr_blocks;
m->nr_redundant = new_s->nr_redundant;
memset(m->block_sectors, 0, sizeof(m->block_sectors));
for (i = 0; i < new_s->nr_blocks; i++) {
- ret = mark_stripe_bucket(trans, new, i, journal_seq, flags);
+ ret = mark_stripe_bucket(trans, new, i, flags);
if (ret)
return ret;
}
((s64) m->sectors * m->nr_redundant),
journal_seq, gc);
if (ret) {
- char buf[200];
+ struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&PBUF(buf), c, new);
- bch2_fs_fatal_error(c, "no replicas entry for %s", buf);
+ bch2_bkey_val_to_text(&buf, c, new);
+ bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
+ printbuf_exit(&buf);
return ret;
}
}
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
if (flags & BTREE_TRIGGER_OVERWRITE)
sectors = -sectors;
sectors *= replicas;
return 0;
}
-static s64 __bch2_mark_reflink_p(struct bch_fs *c, struct bkey_s_c_reflink_p p,
+static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
+ struct bkey_s_c_reflink_p p,
+ u64 start, u64 end,
u64 *idx, unsigned flags, size_t r_idx)
{
+ struct bch_fs *c = trans->c;
struct reflink_gc *r;
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
+ u64 next_idx = end;
s64 ret = 0;
+ struct printbuf buf = PRINTBUF;
if (r_idx >= c->reflink_gc_nr)
goto not_found;
r = genradix_ptr(&c->reflink_gc_table, r_idx);
- if (*idx < r->offset - r->size)
+ next_idx = min(next_idx, r->offset - r->size);
+ if (*idx < next_idx)
goto not_found;
BUG_ON((s64) r->refcount + add < 0);
*idx = r->offset;
return 0;
not_found:
- *idx = U64_MAX;
- ret = -EIO;
-
- /*
- * XXX: we're replacing the entire reflink pointer with an error
- * key, we should just be replacing the part that was missing:
- */
- if (fsck_err(c, "%llu:%llu len %u points to nonexistent indirect extent %llu",
- p.k->p.inode, p.k->p.offset, p.k->size, *idx)) {
- struct bkey_i_error *new;
-
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new) {
- bch_err(c, "%s: error allocating new key", __func__);
- return -ENOMEM;
- }
-
- bkey_init(&new->k);
- new->k.type = KEY_TYPE_error;
- new->k.p = p.k->p;
- new->k.size = p.k->size;
- ret = bch2_journal_key_insert(c, BTREE_ID_extents, 0, &new->k_i);
+ if (fsck_err(c, "pointer to missing indirect extent\n"
+ " %s\n"
+ " missing range %llu-%llu",
+ (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
+ *idx, next_idx)) {
+ struct bkey_i_error new;
+
+ bkey_init(&new.k);
+ new.k.type = KEY_TYPE_error;
+ new.k.p = bkey_start_pos(p.k);
+ new.k.p.offset += *idx - start;
+ bch2_key_resize(&new.k, next_idx - *idx);
+ ret = __bch2_btree_insert(trans, BTREE_ID_extents, &new.k_i);
}
+
+ *idx = next_idx;
fsck_err:
+ printbuf_exit(&buf);
return ret;
}
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
struct reflink_gc *ref;
size_t l, r, m;
- u64 idx = le64_to_cpu(p.v->idx);
+ u64 idx = le64_to_cpu(p.v->idx), start = idx;
u64 end = le64_to_cpu(p.v->idx) + p.k->size;
int ret = 0;
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
idx -= le32_to_cpu(p.v->front_pad);
end += le32_to_cpu(p.v->back_pad);
}
while (idx < end && !ret)
- ret = __bch2_mark_reflink_p(c, p, &idx, flags, l++);
+ ret = __bch2_mark_reflink_p(trans, p, start, end,
+ &idx, flags, l++);
return ret;
}
}
}
-int bch2_mark_update(struct btree_trans *trans, struct btree_path *path,
- struct bkey_i *new, unsigned flags)
-{
- struct bkey _deleted = KEY(0, 0, 0);
- struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
- struct bkey_s_c old;
- struct bkey unpacked;
- int ret;
-
- _deleted.p = path->pos;
-
- if (unlikely(flags & BTREE_TRIGGER_NORUN))
- return 0;
-
- if (!btree_node_type_needs_gc(path->btree_id))
- return 0;
-
- old = bch2_btree_path_peek_slot(path, &unpacked);
-
- if (old.k->type == new->k.type &&
- ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
- ret = bch2_mark_key(trans, old, bkey_i_to_s_c(new),
- BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
- } else {
- ret = bch2_mark_key(trans, deleted, bkey_i_to_s_c(new),
- BTREE_TRIGGER_INSERT|flags) ?:
- bch2_mark_key(trans, old, deleted,
- BTREE_TRIGGER_OVERWRITE|flags);
- }
-
- return ret;
-}
-
static noinline __cold
void fs_usage_apply_warn(struct btree_trans *trans,
unsigned disk_res_sectors,
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
- char buf[200];
+ struct printbuf buf = PRINTBUF;
bch_err(c, "disk usage increased %lli more than %u sectors reserved",
should_not_have_added, disk_res_sectors);
trans_for_each_update(trans, i) {
+ struct bkey_s_c old = { &i->old_k, i->old_v };
+
pr_err("while inserting");
- bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
- pr_err("%s", buf);
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
+ pr_err(" %s", buf.buf);
pr_err("overlapping with");
-
- if (!i->cached) {
- struct bkey u;
- struct bkey_s_c k = bch2_btree_path_peek_slot(i->path, &u);
-
- bch2_bkey_val_to_text(&PBUF(buf), c, k);
- pr_err("%s", buf);
- } else {
- struct bkey_cached *ck = (void *) i->path->l[0].b;
-
- if (ck->valid) {
- bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
- pr_err("%s", buf);
- }
- }
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, old);
+ pr_err(" %s", buf.buf);
}
+
__WARN();
+ printbuf_exit(&buf);
}
int bch2_trans_fs_usage_apply(struct btree_trans *trans,
/* trans_mark: */
-static struct bkey_alloc_buf *
-bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
+static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
const struct bch_extent_ptr *ptr,
struct bkey_alloc_unpacked *u)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
- struct bucket *g;
- struct bkey_alloc_buf *a;
- struct bkey_i *update = btree_trans_peek_updates(trans, BTREE_ID_alloc, pos);
+ struct bkey_s_c k;
int ret;
- a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
- if (IS_ERR(a))
- return a;
-
- bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
+ bch2_trans_iter_init(trans, iter, BTREE_ID_alloc,
+ POS(ptr->dev, PTR_BUCKET_NR(ca, ptr)),
+ BTREE_ITER_WITH_UPDATES|
BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
- ret = bch2_btree_iter_traverse(iter);
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
if (ret) {
bch2_trans_iter_exit(trans, iter);
- return ERR_PTR(ret);
- }
-
- if (update && !bpos_cmp(update->k.p, pos)) {
- *u = bch2_alloc_unpack(bkey_i_to_s_c(update));
- } else {
- percpu_down_read(&c->mark_lock);
- g = bucket(ca, pos.offset);
- *u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
- percpu_up_read(&c->mark_lock);
+ return ret;
}
- return a;
+ *u = bch2_alloc_unpack(k);
+ return 0;
}
static int bch2_trans_mark_pointer(struct btree_trans *trans,
struct bkey_s_c k, struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type)
{
- struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_alloc_unpacked u;
- struct bkey_alloc_buf *a;
int ret;
- a = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
- if (IS_ERR(a))
- return PTR_ERR(a);
+ ret = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
+ if (ret)
+ return ret;
ret = __mark_pointer(trans, k, &p.ptr, sectors, data_type,
u.gen, &u.data_type,
if (ret)
goto out;
- bch2_alloc_pack(c, a, u);
- ret = bch2_trans_update(trans, &iter, &a->k, 0);
+ ret = bch2_alloc_write(trans, &iter, &u, 0);
if (ret)
goto out;
out:
? BCH_DATA_btree
: BCH_DATA_user;
s64 sectors = bkey_is_btree_ptr(k.k)
- ? c->opts.btree_node_size
+ ? btree_sectors(c)
: k.k->size;
s64 dirty_sectors = 0;
bool stale;
{
struct bch_fs *c = trans->c;
const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
- struct bkey_alloc_buf *a;
struct btree_iter iter;
struct bkey_alloc_unpacked u;
enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
if (deleting)
sectors = -sectors;
- a = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
- if (IS_ERR(a))
- return PTR_ERR(a);
+ ret = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
+ if (ret)
+ return ret;
ret = check_bucket_ref(c, s.s_c, ptr, sectors, data_type,
u.gen, u.data_type,
if (data_type)
u.data_type = !deleting ? data_type : 0;
- bch2_alloc_pack(c, a, u);
- ret = bch2_trans_update(trans, &iter, &a->k, 0);
+ ret = bch2_alloc_write(trans, &iter, &u, 0);
if (ret)
goto err;
err:
}
static int bch2_trans_mark_stripe(struct btree_trans *trans,
- struct bkey_s_c old, struct bkey_s_c new,
+ struct bkey_s_c old, struct bkey_i *new,
unsigned flags)
{
- struct bkey_s_c_stripe old_s = { .k = NULL };
- struct bkey_s_c_stripe new_s = { .k = NULL };
+ const struct bch_stripe *old_s = NULL;
+ struct bch_stripe *new_s = NULL;
struct bch_replicas_padded r;
unsigned i, nr_blocks;
int ret = 0;
if (old.k->type == KEY_TYPE_stripe)
- old_s = bkey_s_c_to_stripe(old);
- if (new.k->type == KEY_TYPE_stripe)
- new_s = bkey_s_c_to_stripe(new);
+ old_s = bkey_s_c_to_stripe(old).v;
+ if (new->k.type == KEY_TYPE_stripe)
+ new_s = &bkey_i_to_stripe(new)->v;
/*
* If the pointers aren't changing, we don't need to do anything:
*/
- if (new_s.k && old_s.k &&
- new_s.v->nr_blocks == old_s.v->nr_blocks &&
- new_s.v->nr_redundant == old_s.v->nr_redundant &&
- !memcmp(old_s.v->ptrs, new_s.v->ptrs,
- new_s.v->nr_blocks * sizeof(struct bch_extent_ptr)))
+ if (new_s && old_s &&
+ new_s->nr_blocks == old_s->nr_blocks &&
+ new_s->nr_redundant == old_s->nr_redundant &&
+ !memcmp(old_s->ptrs, new_s->ptrs,
+ new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
return 0;
- BUG_ON(new_s.k && old_s.k &&
- (new_s.v->nr_blocks != old_s.v->nr_blocks ||
- new_s.v->nr_redundant != old_s.v->nr_redundant));
+ BUG_ON(new_s && old_s &&
+ (new_s->nr_blocks != old_s->nr_blocks ||
+ new_s->nr_redundant != old_s->nr_redundant));
- nr_blocks = new_s.k ? new_s.v->nr_blocks : old_s.v->nr_blocks;
+ nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
- if (new_s.k) {
- s64 sectors = le16_to_cpu(new_s.v->sectors);
+ if (new_s) {
+ s64 sectors = le16_to_cpu(new_s->sectors);
- bch2_bkey_to_replicas(&r.e, new);
- update_replicas_list(trans, &r.e, sectors * new_s.v->nr_redundant);
+ bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(new));
+ update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
}
- if (old_s.k) {
- s64 sectors = -((s64) le16_to_cpu(old_s.v->sectors));
+ if (old_s) {
+ s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
bch2_bkey_to_replicas(&r.e, old);
- update_replicas_list(trans, &r.e, sectors * old_s.v->nr_redundant);
+ update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
}
for (i = 0; i < nr_blocks; i++) {
- if (new_s.k && old_s.k &&
- !memcmp(&new_s.v->ptrs[i],
- &old_s.v->ptrs[i],
- sizeof(new_s.v->ptrs[i])))
+ if (new_s && old_s &&
+ !memcmp(&new_s->ptrs[i],
+ &old_s->ptrs[i],
+ sizeof(new_s->ptrs[i])))
continue;
- if (new_s.k) {
- ret = bch2_trans_mark_stripe_bucket(trans, new_s, i, false);
+ if (new_s) {
+ ret = bch2_trans_mark_stripe_bucket(trans,
+ bkey_i_to_s_c_stripe(new), i, false);
if (ret)
break;
}
- if (old_s.k) {
- ret = bch2_trans_mark_stripe_bucket(trans, old_s, i, true);
+ if (old_s) {
+ ret = bch2_trans_mark_stripe_bucket(trans,
+ bkey_s_c_to_stripe(old), i, true);
if (ret)
break;
}
static int bch2_trans_mark_inode(struct btree_trans *trans,
struct bkey_s_c old,
- struct bkey_s_c new,
+ struct bkey_i *new,
unsigned flags)
{
- int nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
+ int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
if (nr) {
struct replicas_delta_list *d =
struct bkey_i *n;
__le64 *refcount;
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
- char buf[200];
+ struct printbuf buf = PRINTBUF;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
refcount = bkey_refcount(n);
if (!refcount) {
- bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
+ bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_fs_inconsistent(c,
"nonexistent indirect extent at %llu while marking\n %s",
- *idx, buf);
+ *idx, buf.buf);
ret = -EIO;
goto err;
}
if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
- bch2_bkey_val_to_text(&PBUF(buf), c, p.s_c);
+ bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_fs_inconsistent(c,
"indirect extent refcount underflow at %llu while marking\n %s",
- *idx, buf);
+ *idx, buf.buf);
ret = -EIO;
goto err;
}
*idx = k.k->p.offset;
err:
bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
return ret;
}
}
int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c old,
- struct bkey_s_c new, unsigned flags)
+ struct bkey_i *new, unsigned flags)
{
- struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
+ ? old
+ : bkey_i_to_s_c(new);
switch (k.k->type) {
case KEY_TYPE_btree_ptr:
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_alloc_unpacked u;
- struct bkey_alloc_buf *a;
struct bch_extent_ptr ptr = {
.dev = ca->dev_idx,
.offset = bucket_to_sector(ca, b),
if (b >= ca->mi.nbuckets)
return 0;
- a = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
- if (IS_ERR(a))
- return PTR_ERR(a);
+ ret = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
+ if (ret)
+ return ret;
if (u.data_type && u.data_type != type) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
u.data_type = type;
u.dirty_sectors = sectors;
- bch2_alloc_pack(c, a, u);
- ret = bch2_trans_update(trans, &iter, &a->k, 0);
+ ret = bch2_alloc_write(trans, &iter, &u, 0);
if (ret)
goto out;
out:
container_of(rcu, struct bucket_array, rcu);
kvpfree(buckets,
- sizeof(struct bucket_array) +
+ sizeof(*buckets) +
buckets->nbuckets * sizeof(struct bucket));
}
+static void bucket_gens_free_rcu(struct rcu_head *rcu)
+{
+ struct bucket_gens *buckets =
+ container_of(rcu, struct bucket_gens, rcu);
+
+ kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
+}
+
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
{
struct bucket_array *buckets = NULL, *old_buckets = NULL;
+ struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
unsigned long *buckets_nouse = NULL;
alloc_fifo free[RESERVE_NR];
alloc_fifo free_inc;
alloc_heap alloc_heap;
size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
- ca->mi.bucket_size / c->opts.btree_node_size);
+ ca->mi.bucket_size / btree_sectors(c));
/* XXX: these should be tunable */
size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);
if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
nbuckets * sizeof(struct bucket),
GFP_KERNEL|__GFP_ZERO)) ||
- !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
- sizeof(unsigned long),
+ !(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
GFP_KERNEL|__GFP_ZERO)) ||
+ (c->opts.buckets_nouse &&
+ !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
+ sizeof(unsigned long),
+ GFP_KERNEL|__GFP_ZERO))) ||
!init_fifo(&free[RESERVE_MOVINGGC],
copygc_reserve, GFP_KERNEL) ||
!init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
buckets->first_bucket = ca->mi.first_bucket;
buckets->nbuckets = nbuckets;
+ bucket_gens->first_bucket = ca->mi.first_bucket;
+ bucket_gens->nbuckets = nbuckets;
bch2_copygc_stop(c);
}
old_buckets = bucket_array(ca);
+ old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
if (resize) {
size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
memcpy(buckets->b,
old_buckets->b,
n * sizeof(struct bucket));
- memcpy(buckets_nouse,
- ca->buckets_nouse,
- BITS_TO_LONGS(n) * sizeof(unsigned long));
+ memcpy(bucket_gens->b,
+ old_bucket_gens->b,
+ n);
+ if (buckets_nouse)
+ memcpy(buckets_nouse,
+ ca->buckets_nouse,
+ BITS_TO_LONGS(n) * sizeof(unsigned long));
}
rcu_assign_pointer(ca->buckets[0], buckets);
- buckets = old_buckets;
+ rcu_assign_pointer(ca->bucket_gens, bucket_gens);
+ buckets = old_buckets;
+ bucket_gens = old_bucket_gens;
swap(ca->buckets_nouse, buckets_nouse);
free_fifo(&free[i]);
kvpfree(buckets_nouse,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
+ if (bucket_gens)
+ call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
if (buckets)
- call_rcu(&old_buckets->rcu, buckets_free_rcu);
+ call_rcu(&buckets->rcu, buckets_free_rcu);
return ret;
}
free_fifo(&ca->free[i]);
kvpfree(ca->buckets_nouse,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
+ kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
+ sizeof(struct bucket_gens) + ca->mi.nbuckets);
kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket));