#include "btree_update_interior.h"
#include "btree_gc.h"
#include "buckets.h"
+#include "buckets_waiting_for_journal.h"
#include "clock.h"
#include "debug.h"
#include "ec.h"
return 0;
}
-static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
+static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
+ const u8 *in = a.v->data;
+ const u8 *end = bkey_val_end(a);
+ unsigned fieldnr = 0;
+ int ret;
+ u64 v;
+
+ out->gen = a.v->gen;
+ out->oldest_gen = a.v->oldest_gen;
+ out->data_type = a.v->data_type;
+ out->journal_seq = le64_to_cpu(a.v->journal_seq);
+
+#define x(_name, _bits) \
+ if (fieldnr < a.v->nr_fields) { \
+ ret = bch2_varint_decode_fast(in, end, &v); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ } else { \
+ v = 0; \
+ } \
+ out->_name = v; \
+ if (v != out->_name) \
+ return -1; \
+ fieldnr++;
+
+ BCH_ALLOC_FIELDS_V2()
+#undef x
+ return 0;
+}
+
+static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
const struct bkey_alloc_unpacked src)
{
- struct bkey_i_alloc_v2 *a = bkey_alloc_v2_init(&dst->k);
+ struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
u8 *out = a->v.data;
u8 *end = (void *) &dst[1];
a->v.gen = src.gen;
a->v.oldest_gen = src.oldest_gen;
a->v.data_type = src.data_type;
+ a->v.journal_seq = cpu_to_le64(src.journal_seq);
#define x(_name, _bits) \
nr_fields++; \
.gen = 0,
};
- if (k.k->type == KEY_TYPE_alloc_v2)
- bch2_alloc_unpack_v2(&ret, k);
- else if (k.k->type == KEY_TYPE_alloc)
+ switch (k.k->type) {
+ case KEY_TYPE_alloc:
bch2_alloc_unpack_v1(&ret, k);
+ break;
+ case KEY_TYPE_alloc_v2:
+ bch2_alloc_unpack_v2(&ret, k);
+ break;
+ case KEY_TYPE_alloc_v3:
+ bch2_alloc_unpack_v3(&ret, k);
+ break;
+ }
return ret;
}
-void bch2_alloc_pack(struct bch_fs *c,
- struct bkey_alloc_buf *dst,
- const struct bkey_alloc_unpacked src)
+struct bkey_alloc_buf *bch2_alloc_pack(struct btree_trans *trans,
+ const struct bkey_alloc_unpacked src)
{
- bch2_alloc_pack_v2(dst, src);
+ struct bkey_alloc_buf *dst;
+
+ dst = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
+ if (!IS_ERR(dst))
+ bch2_alloc_pack_v3(dst, src);
+
+ return dst;
+}
+
+int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_alloc_unpacked *u, unsigned trigger_flags)
+{
+ struct bkey_alloc_buf *a = bch2_alloc_pack(trans, *u);
+
+ return PTR_ERR_OR_ZERO(a) ?:
+ bch2_trans_update(trans, iter, &a->k, trigger_flags);
}
static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
return NULL;
}
-void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
-
- pr_buf(out, "gen %u oldest_gen %u data_type %s",
- u.gen, u.oldest_gen, bch2_data_types[u.data_type]);
-#define x(_name, ...) pr_buf(out, " " #_name " %llu", (u64) u._name);
- BCH_ALLOC_FIELDS_V2()
-#undef x
-}
-
-static int bch2_alloc_read_fn(struct bch_fs *c, struct bkey_s_c k)
+const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- struct bch_dev *ca;
- struct bucket *g;
struct bkey_alloc_unpacked u;
- if (k.k->type != KEY_TYPE_alloc &&
- k.k->type != KEY_TYPE_alloc_v2)
- return 0;
-
- ca = bch_dev_bkey_exists(c, k.k->p.inode);
- g = bucket(ca, k.k->p.offset);
- u = bch2_alloc_unpack(k);
+ if (k.k->p.inode >= c->sb.nr_devices ||
+ !c->devs[k.k->p.inode])
+ return "invalid device";
- g->_mark.gen = u.gen;
- g->_mark.data_type = u.data_type;
- g->_mark.dirty_sectors = u.dirty_sectors;
- g->_mark.cached_sectors = u.cached_sectors;
- g->io_time[READ] = u.read_time;
- g->io_time[WRITE] = u.write_time;
- g->oldest_gen = u.oldest_gen;
- g->gen_valid = 1;
+ if (bch2_alloc_unpack_v3(&u, k))
+ return "unpack error";
- return 0;
+ return NULL;
}
-int bch2_alloc_read(struct bch_fs *c)
+void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
{
- int ret;
-
- down_read(&c->gc_lock);
- ret = bch2_btree_and_journal_walk(c, BTREE_ID_alloc, bch2_alloc_read_fn);
- up_read(&c->gc_lock);
- if (ret) {
- bch_err(c, "error reading alloc info: %i", ret);
- return ret;
- }
+ struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
- return 0;
+ pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu",
+ u.gen, u.oldest_gen, bch2_data_types[u.data_type],
+ u.journal_seq);
+#define x(_name, ...) pr_buf(out, " " #_name " %llu", (u64) u._name);
+ BCH_ALLOC_FIELDS_V2()
+#undef x
}
-static int bch2_alloc_write_key(struct btree_trans *trans,
- struct btree_iter *iter,
- unsigned flags)
+int bch2_alloc_read(struct bch_fs *c, bool gc, bool metadata_only)
{
- struct bch_fs *c = trans->c;
+ struct btree_trans trans;
+ struct btree_iter iter;
struct bkey_s_c k;
struct bch_dev *ca;
struct bucket *g;
- struct bucket_mark m;
- struct bkey_alloc_unpacked old_u, new_u;
- struct bkey_alloc_buf a;
+ struct bkey_alloc_unpacked u;
int ret;
-retry:
- bch2_trans_begin(trans);
-
- ret = bch2_btree_key_cache_flush(trans,
- BTREE_ID_alloc, iter->pos);
- if (ret)
- goto err;
-
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- old_u = bch2_alloc_unpack(k);
-
- percpu_down_read(&c->mark_lock);
- ca = bch_dev_bkey_exists(c, iter->pos.inode);
- g = bucket(ca, iter->pos.offset);
- m = READ_ONCE(g->mark);
- new_u = alloc_mem_to_key(iter, g, m);
- percpu_up_read(&c->mark_lock);
-
- if (!bkey_alloc_unpacked_cmp(old_u, new_u))
- return 0;
- bch2_alloc_pack(c, &a, new_u);
- ret = bch2_trans_update(trans, iter, &a.k,
- BTREE_TRIGGER_NORUN) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BTREE_INSERT_NOFAIL|flags);
-err:
- if (ret == -EINTR)
- goto retry;
- return ret;
-}
-
-int bch2_alloc_write(struct bch_fs *c, unsigned flags)
-{
- struct btree_trans trans;
- struct btree_iter *iter;
- struct bch_dev *ca;
- unsigned i;
- int ret = 0;
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ g = __bucket(ca, k.k->p.offset, gc);
+ u = bch2_alloc_unpack(k);
+
+ if (!gc)
+ *bucket_gen(ca, k.k->p.offset) = u.gen;
+
+ g->_mark.gen = u.gen;
+ g->io_time[READ] = u.read_time;
+ g->io_time[WRITE] = u.write_time;
+ g->oldest_gen = !gc ? u.oldest_gen : u.gen;
+ g->gen_valid = 1;
+
+ if (!gc ||
+ (metadata_only &&
+ (u.data_type == BCH_DATA_user ||
+ u.data_type == BCH_DATA_cached ||
+ u.data_type == BCH_DATA_parity))) {
+ g->_mark.data_type = u.data_type;
+ g->_mark.dirty_sectors = u.dirty_sectors;
+ g->_mark.cached_sectors = u.cached_sectors;
+ g->_mark.stripe = u.stripe != 0;
+ g->stripe = u.stripe;
+ g->stripe_redundancy = u.stripe_redundancy;
+ }
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ }
+ bch2_trans_iter_exit(&trans, &iter);
- for_each_member_device(ca, c, i) {
- bch2_btree_iter_set_pos(iter,
- POS(ca->dev_idx, ca->mi.first_bucket));
+ bch2_trans_exit(&trans);
- while (iter->pos.offset < ca->mi.nbuckets) {
- bch2_trans_cond_resched(&trans);
+ if (ret)
+ bch_err(c, "error reading alloc info: %i", ret);
- ret = bch2_alloc_write_key(&trans, iter, flags);
- if (ret) {
- percpu_ref_put(&ca->ref);
- goto err;
- }
- bch2_btree_iter_advance(iter);
- }
- }
-err:
- bch2_trans_iter_put(&trans, iter);
- bch2_trans_exit(&trans);
return ret;
}
size_t bucket_nr, int rw)
{
struct bch_fs *c = trans->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
- struct btree_iter *iter;
- struct bucket *g;
- struct bkey_alloc_buf *a;
+ struct btree_iter iter;
+ struct bkey_s_c k;
struct bkey_alloc_unpacked u;
u64 *time, now;
int ret = 0;
- iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, POS(dev, bucket_nr),
- BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL|
- BTREE_ITER_INTENT);
- ret = bch2_btree_iter_traverse(iter);
- if (ret)
- goto out;
-
- a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
- ret = PTR_ERR_OR_ZERO(a);
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
+ BTREE_ITER_CACHED|
+ BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
if (ret)
goto out;
- percpu_down_read(&c->mark_lock);
- g = bucket(ca, bucket_nr);
- u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
- percpu_up_read(&c->mark_lock);
+ u = bch2_alloc_unpack(k);
time = rw == READ ? &u.read_time : &u.write_time;
now = atomic64_read(&c->io_clock[rw].now);
*time = now;
- bch2_alloc_pack(c, a, u);
- ret = bch2_trans_update(trans, iter, &a->k, 0) ?:
+ ret = bch2_alloc_write(trans, &iter, &u, 0) ?:
bch2_trans_commit(trans, NULL, NULL, 0);
out:
- bch2_trans_iter_put(trans, iter);
+ bch2_trans_iter_exit(trans, &iter);
return ret;
}
test_bit(b, ca->buckets_nouse))
return false;
+ if (ca->new_fs_bucket_idx) {
+ /*
+ * Device or filesystem is still being initialized, and we
+ * haven't fully marked superblocks & journal:
+ */
+ if (is_superblock_bucket(ca, b))
+ return false;
+
+ if (b < ca->new_fs_bucket_idx)
+ return false;
+ }
+
gc_gen = bucket_gc_gen(bucket(ca, b));
ca->inc_gen_needs_gc += gc_gen >= BUCKET_GC_GEN_MAX / 2;
static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
u64 now, u64 last_seq_ondisk)
{
- unsigned used = bucket_sectors_used(m);
+ unsigned used = m.cached_sectors;
if (used) {
/*
* keys when there's only a small difference, so that we can
* keep sequential buckets together:
*/
- return (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
- (bucket_gc_gen(g) >> 4);
+ return bucket_gc_gen(g) >> 4;
}
}
buckets = bucket_array(ca);
ca->alloc_heap.used = 0;
now = atomic64_read(&c->io_clock[READ].now);
- last_seq_ondisk = c->journal.last_seq_ondisk;
+ last_seq_ondisk = c->journal.flushed_seq_ondisk;
/*
* Find buckets with lowest read priority, by building a maxheap sorted
if (!bch2_can_invalidate_bucket(ca, b, m))
continue;
+ if (!m.data_type &&
+ bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+ last_seq_ondisk,
+ ca->dev_idx, b)) {
+ ca->buckets_waiting_on_journal++;
+ continue;
+ }
+
if (e.nr && e.bucket + e.nr == b && e.key == key) {
e.nr++;
} else {
up_read(&ca->bucket_lock);
}
-static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
-{
- struct bucket_array *buckets = bucket_array(ca);
- struct bucket_mark m;
- size_t b, start;
-
- if (ca->fifo_last_bucket < ca->mi.first_bucket ||
- ca->fifo_last_bucket >= ca->mi.nbuckets)
- ca->fifo_last_bucket = ca->mi.first_bucket;
-
- start = ca->fifo_last_bucket;
-
- do {
- ca->fifo_last_bucket++;
- if (ca->fifo_last_bucket == ca->mi.nbuckets)
- ca->fifo_last_bucket = ca->mi.first_bucket;
-
- b = ca->fifo_last_bucket;
- m = READ_ONCE(buckets->b[b].mark);
-
- if (bch2_can_invalidate_bucket(ca, b, m)) {
- struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
-
- heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
- if (heap_full(&ca->alloc_heap))
- break;
- }
-
- cond_resched();
- } while (ca->fifo_last_bucket != start);
-}
-
-static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
-{
- struct bucket_array *buckets = bucket_array(ca);
- struct bucket_mark m;
- size_t checked, i;
-
- for (checked = 0;
- checked < ca->mi.nbuckets / 2;
- checked++) {
- size_t b = bch2_rand_range(ca->mi.nbuckets -
- ca->mi.first_bucket) +
- ca->mi.first_bucket;
-
- m = READ_ONCE(buckets->b[b].mark);
-
- if (bch2_can_invalidate_bucket(ca, b, m)) {
- struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
-
- heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
- if (heap_full(&ca->alloc_heap))
- break;
- }
-
- cond_resched();
- }
-
- sort(ca->alloc_heap.data,
- ca->alloc_heap.used,
- sizeof(ca->alloc_heap.data[0]),
- bucket_idx_cmp, NULL);
-
- /* remove duplicates: */
- for (i = 0; i + 1 < ca->alloc_heap.used; i++)
- if (ca->alloc_heap.data[i].bucket ==
- ca->alloc_heap.data[i + 1].bucket)
- ca->alloc_heap.data[i].nr = 0;
-}
-
static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
{
size_t i, nr = 0;
ca->inc_gen_needs_gc = 0;
ca->inc_gen_really_needs_gc = 0;
+ ca->buckets_waiting_on_journal = 0;
- switch (ca->mi.replacement) {
- case BCH_CACHE_REPLACEMENT_lru:
- find_reclaimable_buckets_lru(c, ca);
- break;
- case BCH_CACHE_REPLACEMENT_fifo:
- find_reclaimable_buckets_fifo(c, ca);
- break;
- case BCH_CACHE_REPLACEMENT_random:
- find_reclaimable_buckets_random(c, ca);
- break;
- }
+ find_reclaimable_buckets_lru(c, ca);
heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
return nr;
}
-/*
- * returns sequence number of most recent journal entry that updated this
- * bucket:
- */
-static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
-{
- if (m.journal_seq_valid) {
- u64 journal_seq = atomic64_read(&c->journal.seq);
- u64 bucket_seq = journal_seq;
-
- bucket_seq &= ~((u64) U16_MAX);
- bucket_seq |= m.journal_seq;
-
- if (bucket_seq > journal_seq)
- bucket_seq -= 1 << 16;
-
- return bucket_seq;
- } else {
- return 0;
- }
-}
-
static int bucket_invalidate_btree(struct btree_trans *trans,
- struct bch_dev *ca, u64 b)
+ struct bch_dev *ca, u64 b,
+ struct bkey_alloc_unpacked *u)
{
struct bch_fs *c = trans->c;
- struct bkey_alloc_buf *a;
- struct bkey_alloc_unpacked u;
- struct bucket *g;
- struct bucket_mark m;
- struct btree_iter *iter =
- bch2_trans_get_iter(trans, BTREE_ID_alloc,
- POS(ca->dev_idx, b),
- BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL|
- BTREE_ITER_INTENT);
+ struct btree_iter iter;
+ struct bkey_s_c k;
int ret;
- a = bch2_trans_kmalloc(trans, sizeof(*a));
- ret = PTR_ERR_OR_ZERO(a);
- if (ret)
- goto err;
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
+ POS(ca->dev_idx, b),
+ BTREE_ITER_CACHED|
+ BTREE_ITER_INTENT);
- ret = bch2_btree_iter_traverse(iter);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
if (ret)
goto err;
- percpu_down_read(&c->mark_lock);
- g = bucket(ca, b);
- m = READ_ONCE(g->mark);
- u = alloc_mem_to_key(iter, g, m);
- percpu_up_read(&c->mark_lock);
-
- u.gen++;
- u.data_type = 0;
- u.dirty_sectors = 0;
- u.cached_sectors = 0;
- u.read_time = atomic64_read(&c->io_clock[READ].now);
- u.write_time = atomic64_read(&c->io_clock[WRITE].now);
+ *u = bch2_alloc_unpack(k);
+ u->gen++;
+ u->data_type = 0;
+ u->dirty_sectors = 0;
+ u->cached_sectors = 0;
+ u->read_time = atomic64_read(&c->io_clock[READ].now);
+ u->write_time = atomic64_read(&c->io_clock[WRITE].now);
- bch2_alloc_pack(c, a, u);
- ret = bch2_trans_update(trans, iter, &a->k,
- BTREE_TRIGGER_BUCKET_INVALIDATE);
+ ret = bch2_alloc_write(trans, &iter, u,
+ BTREE_TRIGGER_BUCKET_INVALIDATE);
err:
- bch2_trans_iter_put(trans, iter);
+ bch2_trans_iter_exit(trans, &iter);
return ret;
}
static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 *journal_seq, unsigned flags)
{
- struct bucket *g;
- struct bucket_mark m;
+ struct bkey_alloc_unpacked u;
size_t b;
+ u64 commit_seq = 0;
int ret = 0;
+ /*
+ * If the read-only path is trying to shut down, we can't be generating
+ * new btree updates:
+ */
+ if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags))
+ return 1;
+
BUG_ON(!ca->alloc_heap.used ||
!ca->alloc_heap.data[0].nr);
b = ca->alloc_heap.data[0].bucket;
/* first, put on free_inc and mark as owned by allocator: */
percpu_down_read(&c->mark_lock);
- g = bucket(ca, b);
- m = READ_ONCE(g->mark);
-
- BUG_ON(m.dirty_sectors);
bch2_mark_alloc_bucket(c, ca, b, true);
BUG_ON(!fifo_push(&ca->free_inc, b));
spin_unlock(&c->freelist_lock);
- /*
- * If we're not invalidating cached data, we only increment the bucket
- * gen in memory here, the incremented gen will be updated in the btree
- * by bch2_trans_mark_pointer():
- */
- if (!m.cached_sectors &&
- !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
- BUG_ON(m.data_type);
- bucket_cmpxchg(g, m, m.gen++);
- percpu_up_read(&c->mark_lock);
- goto out;
- }
-
percpu_up_read(&c->mark_lock);
- /*
- * If the read-only path is trying to shut down, we can't be generating
- * new btree updates:
- */
- if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
- ret = 1;
- goto out;
- }
-
- ret = bch2_trans_do(c, NULL, journal_seq,
+ ret = bch2_trans_do(c, NULL, &commit_seq,
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_JOURNAL_RESERVED|
flags,
- bucket_invalidate_btree(&trans, ca, b));
-out:
+ bucket_invalidate_btree(&trans, ca, b, &u));
+
if (!ret) {
/* remove from alloc_heap: */
struct alloc_heap_entry e, *top = ca->alloc_heap.data;
heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
/*
- * Make sure we flush the last journal entry that updated this
- * bucket (i.e. deleting the last reference) before writing to
- * this bucket again:
+ * If we invalidating cached data then we need to wait on the
+ * journal commit:
*/
- *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
+ if (u.data_type)
+ *journal_seq = max(*journal_seq, commit_seq);
+
+ /*
+ * We already waiting on u.alloc_seq when we filtered out
+ * buckets that need journal commit:
+ */
+ BUG_ON(*journal_seq > u.journal_seq);
} else {
size_t b2;
/* If we used NOWAIT, don't return the error: */
if (!fifo_empty(&ca->free_inc))
ret = 0;
- if (ret) {
+ if (ret < 0)
bch_err(ca, "error invalidating buckets: %i", ret);
+ if (ret)
return ret;
- }
if (journal_seq)
ret = bch2_journal_flush_seq(&c->journal, journal_seq);
gc_count = c->gc_count;
nr = find_reclaimable_buckets(c, ca);
- trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
- ca->inc_gen_really_needs_gc);
+ if (!nr && ca->buckets_waiting_on_journal) {
+ ret = bch2_journal_flush(&c->journal);
+ if (ret)
+ goto stop;
+ } else if (nr < (ca->mi.nbuckets >> 6) &&
+ ca->buckets_waiting_on_journal >= nr / 2) {
+ bch2_journal_flush_async(&c->journal, NULL);
+ }
if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
ca->inc_gen_really_needs_gc) &&
atomic_inc(&c->kick_gc);
wake_up_process(c->gc_thread);
}
+
+ trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
+ ca->inc_gen_really_needs_gc);
}
ret = bch2_invalidate_buckets(c, ca);
lockdep_assert_held(&c->state_lock);
for_each_online_member(ca, c, i) {
- struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
+ struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
ra_pages += bdi->ra_pages;
}
ob++) {
spin_lock(&ob->lock);
if (ob->valid && !ob->on_partial_list &&
- ob->ptr.dev == ca->dev_idx)
+ ob->dev == ca->dev_idx)
ret = true;
spin_unlock(&ob->lock);
}
{
spin_lock_init(&c->freelist_lock);
}
-
-void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
-{
- struct open_bucket *ob;
-
- for (ob = c->open_buckets;
- ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
- ob++) {
- spin_lock(&ob->lock);
- if (ob->valid && !ob->on_partial_list) {
- pr_buf(out, "%zu ref %u type %s\n",
- ob - c->open_buckets,
- atomic_read(&ob->pin),
- bch2_data_types[ob->type]);
- }
- spin_unlock(&ob->lock);
- }
-
-}