#include "btree_gc.h"
#include "buckets.h"
#include "error.h"
+#include "movinggc.h"
#include <linux/preempt.h>
#include <trace/events/bcachefs.h>
{
struct bch_fs_usage stats =
__bch2_fs_usage_read(c);
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(stats.s); i++) {
+ if ((s64) stats.s[i].data[S_META] < 0)
+ panic("replicas %u meta underflow: %lli\n",
+ i + 1, stats.s[i].data[S_META]);
+
+ if ((s64) stats.s[i].data[S_DIRTY] < 0)
+ panic("replicas %u dirty underflow: %lli\n",
+ i + 1, stats.s[i].data[S_DIRTY]);
+
+ if ((s64) stats.s[i].persistent_reserved < 0)
+ panic("replicas %u reserved underflow: %lli\n",
+ i + 1, stats.s[i].persistent_reserved);
+ }
+
+ if ((s64) stats.online_reserved < 0)
+ panic("sectors_online_reserved underflow: %lli\n",
+ stats.online_reserved);
+}
- if ((s64) stats.sectors_dirty < 0)
- panic("sectors_dirty underflow: %lli\n", stats.sectors_dirty);
+static void bch2_dev_stats_verify(struct bch_dev *ca)
+{
+ struct bch_dev_usage stats =
+ __bch2_dev_usage_read(ca);
+ u64 n = ca->mi.nbuckets - ca->mi.first_bucket;
+ unsigned i;
- if ((s64) stats.sectors_cached < 0)
- panic("sectors_cached underflow: %lli\n", stats.sectors_cached);
+ for (i = 0; i < ARRAY_SIZE(stats.buckets); i++)
+ BUG_ON(stats.buckets[i] > n);
+ BUG_ON(stats.buckets_alloc > n);
+ BUG_ON(stats.buckets_unavailable > n);
+}
- if ((s64) stats.sectors_meta < 0)
- panic("sectors_meta underflow: %lli\n", stats.sectors_meta);
+static void bch2_disk_reservations_verify(struct bch_fs *c, int flags)
+{
+ if (!(flags & BCH_DISK_RESERVATION_NOFAIL)) {
+ u64 used = __bch2_fs_sectors_used(c);
+ u64 cached = 0;
+ u64 avail = atomic64_read(&c->sectors_available);
+ int cpu;
- if ((s64) stats.sectors_persistent_reserved < 0)
- panic("sectors_persistent_reserved underflow: %lli\n", stats.sectors_persistent_reserved);
+ for_each_possible_cpu(cpu)
+ cached += per_cpu_ptr(c->usage_percpu, cpu)->available_cache;
- if ((s64) stats.sectors_online_reserved < 0)
- panic("sectors_online_reserved underflow: %lli\n", stats.sectors_online_reserved);
+ if (used + avail + cached > c->capacity)
+ panic("used %llu avail %llu cached %llu capacity %llu\n",
+ used, avail, cached, c->capacity);
+ }
}
#else
static void bch2_fs_stats_verify(struct bch_fs *c) {}
+static void bch2_dev_stats_verify(struct bch_dev *ca) {}
+static void bch2_disk_reservations_verify(struct bch_fs *c, int flags) {}
#endif
{
u16 last_seq_ondisk = c->journal.last_seq_ondisk;
struct bch_dev *ca;
+ struct bucket_array *buckets;
struct bucket *g;
struct bucket_mark m;
unsigned i;
- for_each_member_device(ca, c, i)
- for_each_bucket(g, ca) {
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets) {
bucket_cmpxchg(g, m, ({
if (!m.journal_seq_valid ||
bucket_needs_journal_commit(m, last_seq_ondisk))
m.journal_seq_valid = 0;
}));
}
+ up_read(&ca->bucket_lock);
+ }
}
#define bch2_usage_add(_acc, _stats) \
#define bch2_usage_read_raw(_stats) \
({ \
- typeof(*this_cpu_ptr(_stats)) _acc = { 0 }; \
+ typeof(*this_cpu_ptr(_stats)) _acc; \
int cpu; \
\
+ memset(&_acc, 0, sizeof(_acc)); \
+ \
for_each_possible_cpu(cpu) \
bch2_usage_add(&_acc, per_cpu_ptr((_stats), cpu)); \
\
return bch2_usage_read_raw(ca->usage_percpu);
}
-struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
+struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
{
- return bch2_usage_read_cached(ca->fs,
- ca->usage_cached,
- ca->usage_percpu);
+ return bch2_usage_read_cached(c, ca->usage_cached, ca->usage_percpu);
}
struct bch_fs_usage
c->usage_percpu);
}
-static inline int is_meta_bucket(struct bucket_mark m)
+struct fs_usage_sum {
+ u64 data;
+ u64 reserved;
+};
+
+static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats)
{
- return m.data_type != BUCKET_DATA;
+ struct fs_usage_sum sum = { 0 };
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(stats.s); i++) {
+ sum.data += (stats.s[i].data[S_META] +
+ stats.s[i].data[S_DIRTY]) * (i + 1);
+ sum.reserved += stats.s[i].persistent_reserved * (i + 1);
+ }
+
+ sum.reserved += stats.online_reserved;
+ return sum;
}
-static inline int is_dirty_bucket(struct bucket_mark m)
+#define RESERVE_FACTOR 6
+
+static u64 reserve_factor(u64 r)
{
- return m.data_type == BUCKET_DATA && !!m.dirty_sectors;
+ return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
}
-static inline int is_cached_bucket(struct bucket_mark m)
+static u64 avail_factor(u64 r)
{
- return m.data_type == BUCKET_DATA &&
- !m.dirty_sectors && !!m.cached_sectors;
+ return (r << RESERVE_FACTOR) / (1 << RESERVE_FACTOR) + 1;
}
-static inline enum s_alloc bucket_type(struct bucket_mark m)
+u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
{
- return is_meta_bucket(m) ? S_META : S_DIRTY;
+ struct fs_usage_sum sum = __fs_usage_sum(stats);
+
+ return sum.data + reserve_factor(sum.reserved);
+}
+
+u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
+{
+ return min(c->capacity, __bch2_fs_sectors_used(c, stats));
+}
+
+u64 bch2_fs_sectors_free(struct bch_fs *c, struct bch_fs_usage stats)
+{
+ return avail_factor(c->capacity - bch2_fs_sectors_used(c, stats));
+}
+
+static inline int is_unavailable_bucket(struct bucket_mark m)
+{
+ return !is_available_bucket(m);
+}
+
+static inline int is_fragmented_bucket(struct bucket_mark m,
+ struct bch_dev *ca)
+{
+ if (!m.owned_by_allocator &&
+ m.data_type == BCH_DATA_USER &&
+ bucket_sectors_used(m))
+ return max_t(int, 0, (int) ca->mi.bucket_size -
+ bucket_sectors_used(m));
+ return 0;
+}
+
+static inline enum bch_data_type bucket_type(struct bucket_mark m)
+{
+ return m.cached_sectors && !m.dirty_sectors
+ ? BCH_DATA_CACHED
+ : m.data_type;
}
static bool bucket_became_unavailable(struct bch_fs *c,
{
return is_available_bucket(old) &&
!is_available_bucket(new) &&
- c && c->gc_pos.phase == GC_PHASE_DONE;
+ (!c || c->gc_pos.phase == GC_PHASE_DONE);
}
void bch2_fs_usage_apply(struct bch_fs *c,
struct disk_reservation *disk_res,
struct gc_pos gc_pos)
{
- s64 added =
- stats->s[S_COMPRESSED][S_META] +
- stats->s[S_COMPRESSED][S_DIRTY] +
- stats->persistent_reserved +
- stats->online_reserved;
+ struct fs_usage_sum sum = __fs_usage_sum(*stats);
+ s64 added = sum.data + sum.reserved;
/*
* Not allowed to reduce sectors_available except by getting a
stats->online_reserved -= added;
}
- lg_local_lock(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->usage_lock);
/* online_reserved not subject to gc: */
this_cpu_ptr(c->usage_percpu)->online_reserved +=
stats->online_reserved;
bch2_usage_add(this_cpu_ptr(c->usage_percpu), stats);
bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->usage_lock);
memset(stats, 0, sizeof(*stats));
}
-static void bch2_fs_usage_update(struct bch_fs_usage *fs_usage,
- struct bucket_mark old, struct bucket_mark new)
-{
- fs_usage->s[S_COMPRESSED][S_CACHED] +=
- (int) new.cached_sectors - (int) old.cached_sectors;
- fs_usage->s[S_COMPRESSED][bucket_type(old)] -=
- old.dirty_sectors;
- fs_usage->s[S_COMPRESSED][bucket_type(new)] +=
- new.dirty_sectors;
-}
-
-static void bch2_dev_usage_update(struct bch_dev *ca,
- struct bucket_mark old, struct bucket_mark new)
+static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
+ struct bucket_mark old, struct bucket_mark new)
{
- struct bch_fs *c = ca->fs;
struct bch_dev_usage *dev_usage;
- bch2_fs_inconsistent_on(old.data_type && new.data_type &&
- old.data_type != new.data_type, c,
- "different types of metadata in same bucket: %u, %u",
- old.data_type, new.data_type);
+ if (c)
+ percpu_rwsem_assert_held(&c->usage_lock);
- preempt_disable();
- dev_usage = this_cpu_ptr(ca->usage_percpu);
+ if (old.data_type && new.data_type &&
+ old.data_type != new.data_type) {
+ BUG_ON(!c);
+ bch2_fs_inconsistent(c,
+ "different types of data in same bucket: %s, %s",
+ bch2_data_types[old.data_type],
+ bch2_data_types[new.data_type]);
+ }
- dev_usage->sectors[S_CACHED] +=
- (int) new.cached_sectors - (int) old.cached_sectors;
+ dev_usage = this_cpu_ptr(ca->usage_percpu);
- dev_usage->sectors[bucket_type(old)] -= old.dirty_sectors;
- dev_usage->sectors[bucket_type(new)] += new.dirty_sectors;
+ dev_usage->buckets[bucket_type(old)]--;
+ dev_usage->buckets[bucket_type(new)]++;
dev_usage->buckets_alloc +=
(int) new.owned_by_allocator - (int) old.owned_by_allocator;
+ dev_usage->buckets_unavailable +=
+ is_unavailable_bucket(new) - is_unavailable_bucket(old);
- dev_usage->buckets_meta += is_meta_bucket(new) - is_meta_bucket(old);
- dev_usage->buckets_cached += is_cached_bucket(new) - is_cached_bucket(old);
- dev_usage->buckets_dirty += is_dirty_bucket(new) - is_dirty_bucket(old);
- preempt_enable();
+ dev_usage->sectors[old.data_type] -= old.dirty_sectors;
+ dev_usage->sectors[new.data_type] += new.dirty_sectors;
+ dev_usage->sectors[BCH_DATA_CACHED] +=
+ (int) new.cached_sectors - (int) old.cached_sectors;
+ dev_usage->sectors_fragmented +=
+ is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
if (!is_available_bucket(old) && is_available_bucket(new))
bch2_wake_allocator(ca);
+
+ bch2_dev_stats_verify(ca);
}
-#define bucket_data_cmpxchg(ca, g, new, expr) \
+#define bucket_data_cmpxchg(c, ca, g, new, expr) \
({ \
struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
\
- bch2_dev_usage_update(ca, _old, new); \
+ bch2_dev_usage_update(c, ca, _old, new); \
_old; \
})
-void bch2_invalidate_bucket(struct bch_dev *ca, struct bucket *g)
+bool bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, struct bucket_mark *old)
{
- struct bch_fs_usage stats = { 0 };
- struct bucket_mark old, new;
-
- old = bucket_data_cmpxchg(ca, g, new, ({
- new.owned_by_allocator = 1;
- new.had_metadata = 0;
- new.data_type = 0;
- new.cached_sectors = 0;
- new.dirty_sectors = 0;
- new.copygc = 0;
- new.gen++;
- }));
+ struct bucket *g;
+ struct bucket_mark new;
- /* XXX: we're not actually updating fs usage's cached sectors... */
- bch2_fs_usage_update(&stats, old, new);
+ percpu_rwsem_assert_held(&c->usage_lock);
- if (!old.owned_by_allocator && old.cached_sectors)
- trace_invalidate(ca, g - ca->buckets,
- old.cached_sectors);
-}
+ g = bucket(ca, b);
-void bch2_mark_free_bucket(struct bch_dev *ca, struct bucket *g)
-{
- struct bucket_mark old, new;
+ *old = bucket_data_cmpxchg(c, ca, g, new, ({
+ if (!is_available_bucket(new)) {
+ percpu_up_read_preempt_enable(&c->usage_lock);
+ return false;
+ }
- old = bucket_data_cmpxchg(ca, g, new, ({
- new.owned_by_allocator = 0;
+ new.owned_by_allocator = 1;
new.data_type = 0;
new.cached_sectors = 0;
new.dirty_sectors = 0;
+ new.gen++;
}));
- BUG_ON(bucket_became_unavailable(ca->fs, old, new));
-}
-
-void bch2_mark_alloc_bucket(struct bch_dev *ca, struct bucket *g,
- bool owned_by_allocator)
-{
- struct bucket_mark new;
-
- bucket_data_cmpxchg(ca, g, new, ({
- new.owned_by_allocator = owned_by_allocator;
- }));
+ if (!old->owned_by_allocator && old->cached_sectors)
+ trace_invalidate(ca, bucket_to_sector(ca, b),
+ old->cached_sectors);
+ return true;
}
-void bch2_mark_metadata_bucket(struct bch_dev *ca, struct bucket *g,
- enum bucket_data_type type,
- bool may_make_unavailable)
+void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, bool owned_by_allocator,
+ struct gc_pos pos, unsigned flags)
{
+ struct bucket *g;
struct bucket_mark old, new;
- BUG_ON(!type);
+ percpu_rwsem_assert_held(&c->usage_lock);
+ g = bucket(ca, b);
- old = bucket_data_cmpxchg(ca, g, new, ({
- new.data_type = type;
- new.had_metadata = 1;
+ if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
+ gc_will_visit(c, pos))
+ return;
+
+ old = bucket_data_cmpxchg(c, ca, g, new, ({
+ new.owned_by_allocator = owned_by_allocator;
}));
- BUG_ON(old.cached_sectors);
- BUG_ON(old.dirty_sectors);
- BUG_ON(!may_make_unavailable &&
- bucket_became_unavailable(ca->fs, old, new));
+ BUG_ON(!owned_by_allocator && !old.owned_by_allocator &&
+ c->gc_pos.phase == GC_PHASE_DONE);
}
#define saturated_add(ca, dst, src, max) \
} \
} while (0)
-#if 0
-/* Reverting this until the copygc + compression issue is fixed: */
-
-static unsigned __disk_sectors(const union bch_extent_crc *crc, unsigned sectors)
+void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, enum bch_data_type type,
+ unsigned sectors, struct gc_pos pos,
+ unsigned flags)
{
- return crc_compression_type(crc)
- ? sectors * crc_compressed_size(crc) / crc_uncompressed_size(crc)
- : sectors;
-}
+ struct bucket *g;
+ struct bucket_mark old, new;
-static unsigned __compressed_sectors(const union bch_extent_crc *crc, unsigned sectors)
-{
- return crc_compression_type(crc)
- ? min_t(unsigned, crc_compressed_size(crc), sectors)
- : sectors;
-}
-#else
-static unsigned __disk_sectors(const union bch_extent_crc *crc, unsigned sectors)
-{
- return sectors;
+ BUG_ON(!type);
+
+ if (likely(c)) {
+ percpu_rwsem_assert_held(&c->usage_lock);
+
+ if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
+ gc_will_visit(c, pos))
+ return;
+ }
+
+ rcu_read_lock();
+
+ g = bucket(ca, b);
+ old = bucket_data_cmpxchg(c, ca, g, new, ({
+ saturated_add(ca, new.dirty_sectors, sectors,
+ GC_MAX_SECTORS_USED);
+ new.data_type = type;
+ }));
+
+ rcu_read_unlock();
+
+ BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
+ bucket_became_unavailable(c, old, new));
}
-static unsigned __compressed_sectors(const union bch_extent_crc *crc, unsigned sectors)
+/* Reverting this until the copygc + compression issue is fixed: */
+
+static int __disk_sectors(struct bch_extent_crc_unpacked crc, unsigned sectors)
{
- return sectors;
+ if (!sectors)
+ return 0;
+
+ return max(1U, DIV_ROUND_UP(sectors * crc.compressed_size,
+ crc.uncompressed_size));
}
-#endif
/*
* Checking against gc's position has to be done here, inside the cmpxchg()
* that with the gc pos seqlock held.
*/
static void bch2_mark_pointer(struct bch_fs *c,
- struct bkey_s_c_extent e,
- const union bch_extent_crc *crc,
- const struct bch_extent_ptr *ptr,
- s64 sectors, enum s_alloc type,
- bool may_make_unavailable,
- struct bch_fs_usage *stats,
- bool gc_will_visit, u64 journal_seq)
+ struct bkey_s_c_extent e,
+ const struct bch_extent_ptr *ptr,
+ struct bch_extent_crc_unpacked crc,
+ s64 sectors, enum s_alloc type,
+ struct bch_fs_usage *stats,
+ u64 journal_seq, unsigned flags)
{
struct bucket_mark old, new;
unsigned saturated;
- struct bch_dev *ca = c->devs[ptr->dev];
- struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
- unsigned old_sectors, new_sectors;
- int disk_sectors, compressed_sectors;
-
- if (sectors > 0) {
- old_sectors = 0;
- new_sectors = sectors;
- } else {
- old_sectors = e.k->size;
- new_sectors = e.k->size + sectors;
- }
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr);
+ enum bch_data_type data_type = type == S_META
+ ? BCH_DATA_BTREE : BCH_DATA_USER;
+ u64 v;
+
+ if (crc.compression_type) {
+ unsigned old_sectors, new_sectors;
+
+ if (sectors > 0) {
+ old_sectors = 0;
+ new_sectors = sectors;
+ } else {
+ old_sectors = e.k->size;
+ new_sectors = e.k->size + sectors;
+ }
- disk_sectors = -__disk_sectors(crc, old_sectors)
- + __disk_sectors(crc, new_sectors);
- compressed_sectors = -__compressed_sectors(crc, old_sectors)
- + __compressed_sectors(crc, new_sectors);
+ sectors = -__disk_sectors(crc, old_sectors)
+ +__disk_sectors(crc, new_sectors);
+ }
- if (gc_will_visit) {
+ if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) {
if (journal_seq)
- bucket_cmpxchg(g, new, new.journal_seq = journal_seq);
+ bucket_cmpxchg(g, new, ({
+ new.journal_seq_valid = 1;
+ new.journal_seq = journal_seq;
+ }));
- goto out;
+ return;
}
- old = bucket_data_cmpxchg(ca, g, new, ({
+ v = atomic64_read(&g->_mark.v);
+ do {
+ new.v.counter = old.v.counter = v;
saturated = 0;
/*
* checked the gen
*/
if (gen_after(new.gen, ptr->gen)) {
- EBUG_ON(type != S_CACHED &&
+ BUG_ON(!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags));
+ EBUG_ON(!ptr->cached &&
test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
return;
}
- EBUG_ON(type != S_CACHED &&
- !may_make_unavailable &&
- is_available_bucket(new) &&
- test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
-
- if (type != S_CACHED &&
+ if (!ptr->cached &&
new.dirty_sectors == GC_MAX_SECTORS_USED &&
- disk_sectors < 0)
- saturated = -disk_sectors;
+ sectors < 0)
+ saturated = -sectors;
- if (type == S_CACHED)
- saturated_add(ca, new.cached_sectors, disk_sectors,
+ if (ptr->cached)
+ saturated_add(ca, new.cached_sectors, sectors,
GC_MAX_SECTORS_USED);
else
- saturated_add(ca, new.dirty_sectors, disk_sectors,
+ saturated_add(ca, new.dirty_sectors, sectors,
GC_MAX_SECTORS_USED);
if (!new.dirty_sectors &&
new.journal_seq = journal_seq;
}
} else {
- new.data_type = type == S_META
- ? BUCKET_BTREE : BUCKET_DATA;
+ new.data_type = data_type;
}
- new.had_metadata |= is_meta_bucket(new);
- }));
+ if (flags & BCH_BUCKET_MARK_NOATOMIC) {
+ g->_mark = new;
+ break;
+ }
+ } while ((v = atomic64_cmpxchg(&g->_mark.v,
+ old.v.counter,
+ new.v.counter)) != old.v.counter);
- BUG_ON(!may_make_unavailable &&
+ bch2_dev_usage_update(c, ca, old, new);
+
+ BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
bucket_became_unavailable(c, old, new));
if (saturated &&
atomic_long_add_return(saturated,
&ca->saturated_count) >=
- ca->free_inc.size << ca->bucket_bits) {
+ bucket_to_sector(ca, ca->free_inc.size)) {
if (c->gc_thread) {
trace_gc_sectors_saturated(c);
wake_up_process(c->gc_thread);
}
}
-out:
- stats->s[S_COMPRESSED][type] += compressed_sectors;
- stats->s[S_UNCOMPRESSED][type] += sectors;
-}
-
-static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c_extent e,
- s64 sectors, bool metadata,
- bool may_make_unavailable,
- struct bch_fs_usage *stats,
- bool gc_will_visit, u64 journal_seq)
-{
- const struct bch_extent_ptr *ptr;
- const union bch_extent_crc *crc;
- enum s_alloc type = metadata ? S_META : S_DIRTY;
-
- BUG_ON(metadata && bkey_extent_is_cached(e.k));
- BUG_ON(!sectors);
-
- extent_for_each_ptr_crc(e, ptr, crc)
- bch2_mark_pointer(c, e, crc, ptr, sectors,
- ptr->cached ? S_CACHED : type,
- may_make_unavailable,
- stats, gc_will_visit, journal_seq);
-}
-
-static void __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, bool metadata,
- bool may_make_unavailable,
- struct bch_fs_usage *stats,
- bool gc_will_visit, u64 journal_seq)
-{
- switch (k.k->type) {
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED:
- bch2_mark_extent(c, bkey_s_c_to_extent(k), sectors, metadata,
- may_make_unavailable, stats,
- gc_will_visit, journal_seq);
- break;
- case BCH_RESERVATION: {
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
-
- stats->persistent_reserved += r.v->nr_replicas * sectors;
- break;
- }
- }
-}
-
-void __bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, bool metadata,
- struct bch_fs_usage *stats)
-{
- __bch2_mark_key(c, k, sectors, metadata, true, stats, false, 0);
-}
-
-void bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, bool metadata)
-{
- struct bch_fs_usage stats = { 0 };
-
- __bch2_gc_mark_key(c, k, sectors, metadata, &stats);
-
- preempt_disable();
- bch2_usage_add(this_cpu_ptr(c->usage_percpu), &stats);
- preempt_enable();
}
void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, bool metadata, struct gc_pos gc_pos,
- struct bch_fs_usage *stats, u64 journal_seq)
+ s64 sectors, bool metadata,
+ struct gc_pos pos,
+ struct bch_fs_usage *stats,
+ u64 journal_seq, unsigned flags)
{
/*
* synchronization w.r.t. GC:
* To know whether we should mark a given reference (GC either isn't
* running, or has already marked references at this position) we
* construct a total order for everything GC walks. Then, we can simply
- * compare the position of the reference we're marking - @gc_pos - with
+ * compare the position of the reference we're marking - @pos - with
* GC's current position. If GC is going to mark this reference, GC's
- * current position will be less than @gc_pos; if GC's current position
- * is greater than @gc_pos GC has either already walked this position,
- * or isn't running.
+ * current position will be less than @pos; if GC's current position is
+ * greater than @pos GC has either already walked this position, or
+ * isn't running.
*
* To avoid racing with GC's position changing, we have to deal with
* - GC's position being set to GC_POS_MIN when GC starts:
* usage_lock guards against this
- * - GC's position overtaking @gc_pos: we guard against this with
+ * - GC's position overtaking @pos: we guard against this with
* whatever lock protects the data structure the reference lives in
* (e.g. the btree node lock, or the relevant allocator lock).
*/
- lg_local_lock(&c->usage_lock);
- __bch2_mark_key(c, k, sectors, metadata, false, stats,
- gc_will_visit(c, gc_pos), journal_seq);
- bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
-}
+ percpu_down_read_preempt_disable(&c->usage_lock);
+ if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
+ gc_will_visit(c, pos))
+ flags |= BCH_BUCKET_MARK_GC_WILL_VISIT;
-static u64 __recalc_sectors_available(struct bch_fs *c)
-{
- return c->capacity - bch2_fs_sectors_used(c);
+ if (!stats)
+ stats = this_cpu_ptr(c->usage_percpu);
+
+ switch (k.k->type) {
+ case BCH_EXTENT:
+ case BCH_EXTENT_CACHED: {
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ const struct bch_extent_ptr *ptr;
+ struct bch_extent_crc_unpacked crc;
+ enum s_alloc type = metadata ? S_META : S_DIRTY;
+ unsigned replicas = 0;
+
+ BUG_ON(metadata && bkey_extent_is_cached(e.k));
+ BUG_ON(!sectors);
+
+ extent_for_each_ptr_crc(e, ptr, crc) {
+ bch2_mark_pointer(c, e, ptr, crc, sectors, type,
+ stats, journal_seq, flags);
+ replicas += !ptr->cached;
+ }
+
+ if (replicas) {
+ BUG_ON(replicas - 1 > ARRAY_SIZE(stats->s));
+ stats->s[replicas - 1].data[type] += sectors;
+ }
+ break;
+ }
+ case BCH_RESERVATION: {
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+
+ if (r.v->nr_replicas) {
+ BUG_ON(r.v->nr_replicas - 1 > ARRAY_SIZE(stats->s));
+ stats->s[r.v->nr_replicas - 1].persistent_reserved += sectors;
+ }
+ break;
+ }
+ }
+ percpu_up_read_preempt_enable(&c->usage_lock);
}
-/* Used by gc when it's starting: */
-void bch2_recalc_sectors_available(struct bch_fs *c)
+/* Disk reservations: */
+
+static u64 __recalc_sectors_available(struct bch_fs *c)
{
int cpu;
- lg_global_lock(&c->usage_lock);
-
for_each_possible_cpu(cpu)
per_cpu_ptr(c->usage_percpu, cpu)->available_cache = 0;
- atomic64_set(&c->sectors_available,
- __recalc_sectors_available(c));
+ return bch2_fs_sectors_free(c, bch2_fs_usage_read(c));
+}
- lg_global_unlock(&c->usage_lock);
+/* Used by gc when it's starting: */
+void bch2_recalc_sectors_available(struct bch_fs *c)
+{
+ percpu_down_write(&c->usage_lock);
+ atomic64_set(&c->sectors_available, __recalc_sectors_available(c));
+ percpu_up_write(&c->usage_lock);
}
-void bch2_disk_reservation_put(struct bch_fs *c,
- struct disk_reservation *res)
+void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
{
- if (res->sectors) {
- lg_local_lock(&c->usage_lock);
- this_cpu_sub(c->usage_percpu->online_reserved,
- res->sectors);
+ percpu_down_read_preempt_disable(&c->usage_lock);
+ this_cpu_sub(c->usage_percpu->online_reserved,
+ res->sectors);
- bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ bch2_fs_stats_verify(c);
+ percpu_up_read_preempt_enable(&c->usage_lock);
- res->sectors = 0;
- }
+ res->sectors = 0;
}
#define SECTORS_CACHE 1024
-int bch2_disk_reservation_add(struct bch_fs *c,
- struct disk_reservation *res,
- unsigned sectors, int flags)
+int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
+ unsigned sectors, int flags)
{
struct bch_fs_usage *stats;
- u64 old, new, v;
+ u64 old, v, get;
s64 sectors_available;
int ret;
- sectors *= res->nr_replicas;
-
- lg_local_lock(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->usage_lock);
stats = this_cpu_ptr(c->usage_percpu);
- if (sectors >= stats->available_cache)
+ if (sectors <= stats->available_cache)
goto out;
v = atomic64_read(&c->sectors_available);
do {
old = v;
- if (old < sectors) {
- lg_local_unlock(&c->usage_lock);
+ get = min((u64) sectors + SECTORS_CACHE, old);
+
+ if (get < sectors) {
+ percpu_up_read_preempt_enable(&c->usage_lock);
goto recalculate;
}
-
- new = max_t(s64, 0, old - sectors - SECTORS_CACHE);
} while ((v = atomic64_cmpxchg(&c->sectors_available,
- old, new)) != old);
+ old, old - get)) != old);
+
+ stats->available_cache += get;
- stats->available_cache += old - new;
out:
stats->available_cache -= sectors;
stats->online_reserved += sectors;
res->sectors += sectors;
+ bch2_disk_reservations_verify(c, flags);
bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->usage_lock);
return 0;
recalculate:
else if (!down_read_trylock(&c->gc_lock))
return -EINTR;
}
- lg_global_lock(&c->usage_lock);
+ percpu_down_write(&c->usage_lock);
sectors_available = __recalc_sectors_available(c);
if (sectors <= sectors_available ||
stats->online_reserved += sectors;
res->sectors += sectors;
ret = 0;
+
+ bch2_disk_reservations_verify(c, flags);
} else {
atomic64_set(&c->sectors_available, sectors_available);
ret = -ENOSPC;
}
bch2_fs_stats_verify(c);
- lg_global_unlock(&c->usage_lock);
+ percpu_up_write(&c->usage_lock);
+
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
return ret;
}
-int bch2_disk_reservation_get(struct bch_fs *c,
- struct disk_reservation *res,
- unsigned sectors, int flags)
+/* Startup/shutdown: */
+
+static void buckets_free_rcu(struct rcu_head *rcu)
{
- res->sectors = 0;
- res->gen = c->capacity_gen;
- res->nr_replicas = (flags & BCH_DISK_RESERVATION_METADATA)
- ? c->opts.metadata_replicas
- : c->opts.data_replicas;
+ struct bucket_array *buckets =
+ container_of(rcu, struct bucket_array, rcu);
+
+ kvpfree(buckets,
+ sizeof(struct bucket_array) +
+ buckets->nbuckets * sizeof(struct bucket));
+}
+
+int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
+{
+ struct bucket_array *buckets = NULL, *old_buckets = NULL;
+ unsigned long *buckets_dirty = NULL;
+ u8 *oldest_gens = NULL;
+ alloc_fifo free[RESERVE_NR];
+ alloc_fifo free_inc;
+ alloc_heap alloc_heap;
+ copygc_heap copygc_heap;
+
+ size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
+ ca->mi.bucket_size / c->opts.btree_node_size);
+ /* XXX: these should be tunable */
+ size_t reserve_none = max_t(size_t, 4, ca->mi.nbuckets >> 9);
+ size_t copygc_reserve = max_t(size_t, 16, ca->mi.nbuckets >> 7);
+ size_t free_inc_reserve = copygc_reserve / 2;
+ bool resize = ca->buckets != NULL,
+ start_copygc = ca->copygc_thread != NULL;
+ int ret = -ENOMEM;
+ unsigned i;
+
+ memset(&free, 0, sizeof(free));
+ memset(&free_inc, 0, sizeof(free_inc));
+ memset(&alloc_heap, 0, sizeof(alloc_heap));
+ memset(©gc_heap, 0, sizeof(copygc_heap));
+
+ if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
+ nbuckets * sizeof(struct bucket),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !(oldest_gens = kvpmalloc(nbuckets * sizeof(u8),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !(buckets_dirty = kvpmalloc(BITS_TO_LONGS(nbuckets) *
+ sizeof(unsigned long),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
+ !init_fifo(&free[RESERVE_MOVINGGC],
+ copygc_reserve, GFP_KERNEL) ||
+ !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
+ !init_fifo(&free_inc, free_inc_reserve, GFP_KERNEL) ||
+ !init_heap(&alloc_heap, free_inc_reserve, GFP_KERNEL) ||
+ !init_heap(©gc_heap, copygc_reserve, GFP_KERNEL))
+ goto err;
+
+ buckets->first_bucket = ca->mi.first_bucket;
+ buckets->nbuckets = nbuckets;
+
+ bch2_copygc_stop(ca);
+
+ if (resize) {
+ down_write(&c->gc_lock);
+ down_write(&ca->bucket_lock);
+ percpu_down_write(&c->usage_lock);
+ }
+
+ old_buckets = bucket_array(ca);
+
+ if (resize) {
+ size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
+
+ memcpy(buckets->b,
+ old_buckets->b,
+ n * sizeof(struct bucket));
+ memcpy(oldest_gens,
+ ca->oldest_gens,
+ n * sizeof(u8));
+ memcpy(buckets_dirty,
+ ca->buckets_dirty,
+ BITS_TO_LONGS(n) * sizeof(unsigned long));
+ }
+
+ rcu_assign_pointer(ca->buckets, buckets);
+ buckets = old_buckets;
+
+ swap(ca->oldest_gens, oldest_gens);
+ swap(ca->buckets_dirty, buckets_dirty);
+
+ if (resize)
+ percpu_up_write(&c->usage_lock);
+
+ spin_lock(&c->freelist_lock);
+ for (i = 0; i < RESERVE_NR; i++) {
+ fifo_move(&free[i], &ca->free[i]);
+ swap(ca->free[i], free[i]);
+ }
+ fifo_move(&free_inc, &ca->free_inc);
+ swap(ca->free_inc, free_inc);
+ spin_unlock(&c->freelist_lock);
+
+ /* with gc lock held, alloc_heap can't be in use: */
+ swap(ca->alloc_heap, alloc_heap);
+
+ /* and we shut down copygc: */
+ swap(ca->copygc_heap, copygc_heap);
+
+ nbuckets = ca->mi.nbuckets;
+
+ if (resize) {
+ up_write(&ca->bucket_lock);
+ up_write(&c->gc_lock);
+ }
+
+ if (start_copygc &&
+ bch2_copygc_start(c, ca))
+ bch_err(ca, "error restarting copygc thread");
+
+ ret = 0;
+err:
+ free_heap(©gc_heap);
+ free_heap(&alloc_heap);
+ free_fifo(&free_inc);
+ for (i = 0; i < RESERVE_NR; i++)
+ free_fifo(&free[i]);
+ kvpfree(buckets_dirty,
+ BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
+ kvpfree(oldest_gens,
+ nbuckets * sizeof(u8));
+ if (buckets)
+ call_rcu(&old_buckets->rcu, buckets_free_rcu);
+
+ return ret;
+}
+
+void bch2_dev_buckets_free(struct bch_dev *ca)
+{
+ unsigned i;
+
+ free_heap(&ca->copygc_heap);
+ free_heap(&ca->alloc_heap);
+ free_fifo(&ca->free_inc);
+ for (i = 0; i < RESERVE_NR; i++)
+ free_fifo(&ca->free[i]);
+ kvpfree(ca->buckets_dirty,
+ BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
+ kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
+ kvpfree(rcu_dereference_protected(ca->buckets, 1),
+ sizeof(struct bucket_array) +
+ ca->mi.nbuckets * sizeof(struct bucket));
+
+ free_percpu(ca->usage_percpu);
+}
+
+int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+ if (!(ca->usage_percpu = alloc_percpu(struct bch_dev_usage)))
+ return -ENOMEM;
- return bch2_disk_reservation_add(c, res, sectors, flags);
+ return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
}