*/
#include "bcachefs.h"
-#include "alloc.h"
+#include "alloc_background.h"
+#include "bset.h"
#include "btree_gc.h"
+#include "btree_update.h"
#include "buckets.h"
+#include "ec.h"
#include "error.h"
+#include "movinggc.h"
+#include "replicas.h"
#include <linux/preempt.h>
#include <trace/events/bcachefs.h>
-#ifdef DEBUG_BUCKETS
-
-#define lg_local_lock lg_global_lock
-#define lg_local_unlock lg_global_unlock
-
-static void bch2_fs_stats_verify(struct bch_fs *c)
-{
- struct bch_fs_usage stats =
- __bch2_fs_usage_read(c);
- unsigned i;
-
- for (i = 0; i < BCH_REPLICAS_MAX; i++) {
- if ((s64) stats.s[i].data[S_META] < 0)
- panic("replicas %u meta underflow: %lli\n",
- i + 1, stats.s[i].data[S_META]);
-
- if ((s64) stats.s[i].data[S_DIRTY] < 0)
- panic("replicas %u dirty underflow: %lli\n",
- i + 1, stats.s[i].data[S_DIRTY]);
-
- if ((s64) stats.s[i].persistent_reserved < 0)
- panic("replicas %u reserved underflow: %lli\n",
- i + 1, stats.s[i].persistent_reserved);
- }
-
- if ((s64) stats.online_reserved < 0)
- panic("sectors_online_reserved underflow: %lli\n",
- stats.online_reserved);
-}
-
-#else
-
-static void bch2_fs_stats_verify(struct bch_fs *c) {}
-
-#endif
-
/*
* Clear journal_seq_valid for buckets for which it's not needed, to prevent
* wraparound:
*/
void bch2_bucket_seq_cleanup(struct bch_fs *c)
{
+ u64 journal_seq = atomic64_read(&c->journal.seq);
u16 last_seq_ondisk = c->journal.last_seq_ondisk;
struct bch_dev *ca;
+ struct bucket_array *buckets;
struct bucket *g;
struct bucket_mark m;
unsigned i;
- for_each_member_device(ca, c, i)
- for_each_bucket(g, ca) {
+ if (journal_seq - c->last_bucket_seq_cleanup <
+ (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
+ return;
+
+ c->last_bucket_seq_cleanup = journal_seq;
+
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets) {
bucket_cmpxchg(g, m, ({
if (!m.journal_seq_valid ||
bucket_needs_journal_commit(m, last_seq_ondisk))
m.journal_seq_valid = 0;
}));
}
+ up_read(&ca->bucket_lock);
+ }
}
-#define bch2_usage_add(_acc, _stats) \
-do { \
- typeof(_acc) _a = (_acc), _s = (_stats); \
- unsigned i; \
- \
- for (i = 0; i < sizeof(*_a) / sizeof(u64); i++) \
- ((u64 *) (_a))[i] += ((u64 *) (_s))[i]; \
-} while (0)
-
#define bch2_usage_read_raw(_stats) \
({ \
- typeof(*this_cpu_ptr(_stats)) _acc = { 0 }; \
- int cpu; \
+ typeof(*this_cpu_ptr(_stats)) _acc; \
\
- for_each_possible_cpu(cpu) \
- bch2_usage_add(&_acc, per_cpu_ptr((_stats), cpu)); \
+ memset(&_acc, 0, sizeof(_acc)); \
+ acc_u64s_percpu((u64 *) &_acc, \
+ (u64 __percpu *) _stats, \
+ sizeof(_acc) / sizeof(u64)); \
\
_acc; \
})
-#define bch2_usage_read_cached(_c, _cached, _uncached) \
-({ \
- typeof(_cached) _ret; \
- unsigned _seq; \
- \
- do { \
- _seq = read_seqcount_begin(&(_c)->gc_pos_lock); \
- _ret = (_c)->gc_pos.phase == GC_PHASE_DONE \
- ? bch2_usage_read_raw(_uncached) \
- : (_cached); \
- } while (read_seqcount_retry(&(_c)->gc_pos_lock, _seq)); \
- \
- _ret; \
-})
+struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
+{
+ return bch2_usage_read_raw(ca->usage[0]);
+}
-struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *ca)
+struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
{
- return bch2_usage_read_raw(ca->usage_percpu);
+ struct bch_fs_usage *ret;
+ unsigned nr = READ_ONCE(c->replicas.nr);
+retry:
+ ret = kzalloc(sizeof(*ret) + nr * sizeof(u64), GFP_NOFS);
+ if (unlikely(!ret))
+ return NULL;
+
+ percpu_down_read_preempt_disable(&c->mark_lock);
+
+ if (unlikely(nr < c->replicas.nr)) {
+ nr = c->replicas.nr;
+ percpu_up_read_preempt_enable(&c->mark_lock);
+ kfree(ret);
+ goto retry;
+ }
+
+ acc_u64s_percpu((u64 *) ret,
+ (u64 __percpu *) c->usage[0],
+ sizeof(*ret) / sizeof(u64) + nr);
+
+ return ret;
}
-struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
+#define RESERVE_FACTOR 6
+
+static u64 reserve_factor(u64 r)
{
- return bch2_usage_read_cached(ca->fs,
- ca->usage_cached,
- ca->usage_percpu);
+ return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
}
-struct bch_fs_usage
-__bch2_fs_usage_read(struct bch_fs *c)
+static u64 avail_factor(u64 r)
{
- return bch2_usage_read_raw(c->usage_percpu);
+ return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
}
-struct bch_fs_usage
-bch2_fs_usage_read(struct bch_fs *c)
+u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage fs_usage)
{
- return bch2_usage_read_cached(c,
- c->usage_cached,
- c->usage_percpu);
+ return min(fs_usage.s.hidden +
+ fs_usage.s.data +
+ reserve_factor(fs_usage.s.reserved +
+ fs_usage.s.online_reserved),
+ c->capacity);
}
-static inline int is_meta_bucket(struct bucket_mark m)
+struct bch_fs_usage_short
+bch2_fs_usage_read_short(struct bch_fs *c)
{
- return m.data_type != BUCKET_DATA;
+ struct bch_fs_usage_summarized usage =
+ bch2_usage_read_raw(&c->usage[0]->s);
+ struct bch_fs_usage_short ret;
+
+ ret.capacity = READ_ONCE(c->capacity) - usage.hidden;
+ ret.used = min(ret.capacity, usage.data +
+ reserve_factor(usage.reserved +
+ usage.online_reserved));
+ ret.nr_inodes = usage.nr_inodes;
+
+ return ret;
}
-static inline int is_dirty_bucket(struct bucket_mark m)
+static inline int is_unavailable_bucket(struct bucket_mark m)
{
- return m.data_type == BUCKET_DATA && !!m.dirty_sectors;
+ return !is_available_bucket(m);
}
-static inline int is_cached_bucket(struct bucket_mark m)
+static inline int is_fragmented_bucket(struct bucket_mark m,
+ struct bch_dev *ca)
{
- return m.data_type == BUCKET_DATA &&
- !m.dirty_sectors && !!m.cached_sectors;
+ if (!m.owned_by_allocator &&
+ m.data_type == BCH_DATA_USER &&
+ bucket_sectors_used(m))
+ return max_t(int, 0, (int) ca->mi.bucket_size -
+ bucket_sectors_used(m));
+ return 0;
}
-static inline enum s_alloc bucket_type(struct bucket_mark m)
+static inline enum bch_data_type bucket_type(struct bucket_mark m)
{
- return is_meta_bucket(m) ? S_META : S_DIRTY;
+ return m.cached_sectors && !m.dirty_sectors
+ ? BCH_DATA_CACHED
+ : m.data_type;
}
-static bool bucket_became_unavailable(struct bch_fs *c,
- struct bucket_mark old,
+static bool bucket_became_unavailable(struct bucket_mark old,
struct bucket_mark new)
{
return is_available_bucket(old) &&
- !is_available_bucket(new) &&
- c && c->gc_pos.phase == GC_PHASE_DONE;
+ !is_available_bucket(new);
}
-void bch2_fs_usage_apply(struct bch_fs *c,
- struct bch_fs_usage *stats,
+int bch2_fs_usage_apply(struct bch_fs *c,
+ struct bch_fs_usage *fs_usage,
struct disk_reservation *disk_res,
struct gc_pos gc_pos)
{
- struct fs_usage_sum sum = __fs_usage_sum(*stats);
- s64 added = sum.data + sum.reserved;
+ s64 added = fs_usage->s.data + fs_usage->s.reserved;
+ s64 should_not_have_added;
+ int ret = 0;
+
+ percpu_rwsem_assert_held(&c->mark_lock);
/*
* Not allowed to reduce sectors_available except by getting a
* reservation:
*/
- BUG_ON(added > (s64) (disk_res ? disk_res->sectors : 0));
+ should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
+ if (WARN_ONCE(should_not_have_added > 0,
+ "disk usage increased without a reservation")) {
+ atomic64_sub(should_not_have_added, &c->sectors_available);
+ added -= should_not_have_added;
+ ret = -1;
+ }
if (added > 0) {
- disk_res->sectors -= added;
- stats->online_reserved -= added;
+ disk_res->sectors -= added;
+ fs_usage->s.online_reserved -= added;
}
- lg_local_lock(&c->usage_lock);
- /* online_reserved not subject to gc: */
- this_cpu_ptr(c->usage_percpu)->online_reserved +=
- stats->online_reserved;
- stats->online_reserved = 0;
+ acc_u64s((u64 *) this_cpu_ptr(c->usage[0]),
+ (u64 *) fs_usage,
+ sizeof(*fs_usage) / sizeof(u64) + c->replicas.nr);
- if (!gc_will_visit(c, gc_pos))
- bch2_usage_add(this_cpu_ptr(c->usage_percpu), stats);
+ if (gc_visited(c, gc_pos)) {
+ BUG_ON(!c->usage[1]);
+ acc_u64s((u64 *) this_cpu_ptr(c->usage[1]),
+ (u64 *) fs_usage,
+ sizeof(*fs_usage) / sizeof(u64) + c->replicas.nr);
+ }
- bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ return ret;
+}
- memset(stats, 0, sizeof(*stats));
+static inline void account_bucket(struct bch_fs_usage *fs_usage,
+ struct bch_dev_usage *dev_usage,
+ enum bch_data_type type,
+ int nr, s64 size)
+{
+ if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
+ fs_usage->s.hidden += size;
+
+ dev_usage->buckets[type] += nr;
}
-static void bch2_dev_usage_update(struct bch_dev *ca,
- struct bucket_mark old, struct bucket_mark new)
+static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
+ struct bch_fs_usage *fs_usage,
+ struct bucket_mark old, struct bucket_mark new,
+ bool gc)
{
- struct bch_fs *c = ca->fs;
struct bch_dev_usage *dev_usage;
+ percpu_rwsem_assert_held(&c->mark_lock);
+
bch2_fs_inconsistent_on(old.data_type && new.data_type &&
- old.data_type != new.data_type, c,
- "different types of metadata in same bucket: %u, %u",
- old.data_type, new.data_type);
+ old.data_type != new.data_type, c,
+ "different types of data in same bucket: %s, %s",
+ bch2_data_types[old.data_type],
+ bch2_data_types[new.data_type]);
- preempt_disable();
- dev_usage = this_cpu_ptr(ca->usage_percpu);
+ dev_usage = this_cpu_ptr(ca->usage[gc]);
- dev_usage->sectors_cached +=
- (int) new.cached_sectors - (int) old.cached_sectors;
+ if (bucket_type(old))
+ account_bucket(fs_usage, dev_usage, bucket_type(old),
+ -1, -ca->mi.bucket_size);
- dev_usage->sectors[bucket_type(old)] -= old.dirty_sectors;
- dev_usage->sectors[bucket_type(new)] += new.dirty_sectors;
+ if (bucket_type(new))
+ account_bucket(fs_usage, dev_usage, bucket_type(new),
+ 1, ca->mi.bucket_size);
dev_usage->buckets_alloc +=
(int) new.owned_by_allocator - (int) old.owned_by_allocator;
-
- dev_usage->buckets[S_META] += is_meta_bucket(new) - is_meta_bucket(old);
- dev_usage->buckets[S_DIRTY] += is_dirty_bucket(new) - is_dirty_bucket(old);
- dev_usage->buckets_cached += is_cached_bucket(new) - is_cached_bucket(old);
- preempt_enable();
+ dev_usage->buckets_ec +=
+ (int) new.stripe - (int) old.stripe;
+ dev_usage->buckets_unavailable +=
+ is_unavailable_bucket(new) - is_unavailable_bucket(old);
+
+ dev_usage->sectors[old.data_type] -= old.dirty_sectors;
+ dev_usage->sectors[new.data_type] += new.dirty_sectors;
+ dev_usage->sectors[BCH_DATA_CACHED] +=
+ (int) new.cached_sectors - (int) old.cached_sectors;
+ dev_usage->sectors_fragmented +=
+ is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
if (!is_available_bucket(old) && is_available_bucket(new))
bch2_wake_allocator(ca);
}
-#define bucket_data_cmpxchg(ca, g, new, expr) \
+void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct bucket_mark old = { .v.counter = 0 };
+ struct bch_fs_usage *fs_usage;
+ struct bucket_array *buckets;
+ struct bucket *g;
+
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ fs_usage = this_cpu_ptr(c->usage[0]);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets)
+ if (g->mark.data_type)
+ bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
+ percpu_up_read_preempt_enable(&c->mark_lock);
+}
+
+#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \
({ \
struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
\
- bch2_dev_usage_update(ca, _old, new); \
+ bch2_dev_usage_update(c, ca, fs_usage, _old, new, gc); \
_old; \
})
-bool bch2_invalidate_bucket(struct bch_dev *ca, struct bucket *g,
- struct bucket_mark *old)
+static inline void update_replicas(struct bch_fs *c,
+ struct bch_fs_usage *fs_usage,
+ struct bch_replicas_entry *r,
+ s64 sectors)
+{
+ int idx = bch2_replicas_entry_idx(c, r);
+
+ BUG_ON(idx < 0);
+ BUG_ON(!sectors);
+
+ if (r->data_type == BCH_DATA_CACHED)
+ fs_usage->s.cached += sectors;
+ else
+ fs_usage->s.data += sectors;
+ fs_usage->data[idx] += sectors;
+}
+
+static inline void update_cached_sectors(struct bch_fs *c,
+ struct bch_fs_usage *fs_usage,
+ unsigned dev, s64 sectors)
+{
+ struct bch_replicas_padded r;
+
+ bch2_replicas_entry_cached(&r.e, dev);
+
+ update_replicas(c, fs_usage, &r.e, sectors);
+}
+
+static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, struct bucket_mark *old,
+ bool gc)
{
+ struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
+ struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark new;
- *old = bucket_data_cmpxchg(ca, g, new, ({
- if (!is_available_bucket(new))
- return false;
+ *old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
+ BUG_ON(!is_available_bucket(new));
- new.owned_by_allocator = 1;
- new.touched_this_mount = 1;
+ new.owned_by_allocator = true;
+ new.dirty = true;
new.data_type = 0;
new.cached_sectors = 0;
new.dirty_sectors = 0;
new.gen++;
}));
- if (!old->owned_by_allocator && old->cached_sectors)
- trace_invalidate(ca, g - ca->buckets, old->cached_sectors);
- return true;
+ if (old->cached_sectors)
+ update_cached_sectors(c, fs_usage, ca->dev_idx,
+ -old->cached_sectors);
}
-bool bch2_mark_alloc_bucket_startup(struct bch_dev *ca, struct bucket *g)
+void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, struct bucket_mark *old)
{
- struct bucket_mark new, old;
+ percpu_rwsem_assert_held(&c->mark_lock);
- old = bucket_data_cmpxchg(ca, g, new, ({
- if (new.touched_this_mount ||
- !is_available_bucket(new))
- return false;
+ __bch2_invalidate_bucket(c, ca, b, old, false);
- new.owned_by_allocator = 1;
- new.touched_this_mount = 1;
- }));
-
- return true;
+ if (!old->owned_by_allocator && old->cached_sectors)
+ trace_invalidate(ca, bucket_to_sector(ca, b),
+ old->cached_sectors);
}
-void bch2_mark_free_bucket(struct bch_dev *ca, struct bucket *g)
+static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, bool owned_by_allocator,
+ bool gc)
{
+ struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
+ struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark old, new;
- old = bucket_data_cmpxchg(ca, g, new, ({
- new.touched_this_mount = 1;
- new.owned_by_allocator = 0;
- new.data_type = 0;
- new.cached_sectors = 0;
- new.dirty_sectors = 0;
+ old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
+ new.owned_by_allocator = owned_by_allocator;
}));
- BUG_ON(bucket_became_unavailable(ca->fs, old, new));
+ BUG_ON(!gc &&
+ !owned_by_allocator && !old.owned_by_allocator);
}
-void bch2_mark_alloc_bucket(struct bch_dev *ca, struct bucket *g,
- bool owned_by_allocator)
+void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, bool owned_by_allocator,
+ struct gc_pos pos, unsigned flags)
{
- struct bucket_mark old, new;
+ percpu_rwsem_assert_held(&c->mark_lock);
- old = bucket_data_cmpxchg(ca, g, new, ({
- new.touched_this_mount = 1;
- new.owned_by_allocator = owned_by_allocator;
- }));
+ if (!(flags & BCH_BUCKET_MARK_GC))
+ __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false);
- BUG_ON(!owned_by_allocator && !old.owned_by_allocator &&
- ca->fs->gc_pos.phase == GC_PHASE_DONE);
+ if ((flags & BCH_BUCKET_MARK_GC) ||
+ gc_visited(c, pos))
+ __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, true);
}
-#define saturated_add(ca, dst, src, max) \
+#define checked_add(a, b) \
do { \
- BUG_ON((int) (dst) + (src) < 0); \
- if ((dst) == (max)) \
- ; \
- else if ((dst) + (src) <= (max)) \
- dst += (src); \
- else { \
- dst = (max); \
- trace_sectors_saturated(ca); \
- } \
+ unsigned _res = (unsigned) (a) + (b); \
+ (a) = _res; \
+ BUG_ON((a) != _res); \
} while (0)
-void bch2_mark_metadata_bucket(struct bch_dev *ca, struct bucket *g,
- enum bucket_data_type type,
- bool may_make_unavailable)
+static void __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, enum bch_data_type type,
+ unsigned sectors, bool gc)
{
- struct bucket_mark old, new;
+ struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
+ struct bucket *g = __bucket(ca, b, gc);
+ struct bucket_mark new;
- BUG_ON(!type);
+ BUG_ON(type != BCH_DATA_SB &&
+ type != BCH_DATA_JOURNAL);
- old = bucket_data_cmpxchg(ca, g, new, ({
- saturated_add(ca, new.dirty_sectors, ca->mi.bucket_size,
- GC_MAX_SECTORS_USED);
- new.data_type = type;
- new.touched_this_mount = 1;
+ bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
+ new.dirty = true;
+ new.data_type = type;
+ checked_add(new.dirty_sectors, sectors);
}));
+}
- if (old.data_type != type &&
- (old.data_type ||
- old.cached_sectors ||
- old.dirty_sectors))
- bch_err(ca->fs, "bucket %zu has multiple types of data (%u, %u)",
- g - ca->buckets, old.data_type, new.data_type);
+void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, enum bch_data_type type,
+ unsigned sectors, struct gc_pos pos,
+ unsigned flags)
+{
+ BUG_ON(type != BCH_DATA_SB &&
+ type != BCH_DATA_JOURNAL);
+
+ if (likely(c)) {
+ percpu_rwsem_assert_held(&c->mark_lock);
+
+ if (!(flags & BCH_BUCKET_MARK_GC))
+ __bch2_mark_metadata_bucket(c, ca, b, type, sectors,
+ false);
+ if ((flags & BCH_BUCKET_MARK_GC) ||
+ gc_visited(c, pos))
+ __bch2_mark_metadata_bucket(c, ca, b, type, sectors,
+ true);
+ } else {
+ struct bucket *g;
+ struct bucket_mark new;
- BUG_ON(!may_make_unavailable &&
- bucket_became_unavailable(ca->fs, old, new));
-}
+ rcu_read_lock();
+
+ g = bucket(ca, b);
+ bucket_cmpxchg(g, new, ({
+ new.dirty = true;
+ new.data_type = type;
+ checked_add(new.dirty_sectors, sectors);
+ }));
-/* Reverting this until the copygc + compression issue is fixed: */
+ rcu_read_unlock();
+ }
+}
-static int __disk_sectors(const union bch_extent_crc *crc, unsigned sectors)
+static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
+ s64 delta)
{
- return sectors * crc_compressed_size(NULL, crc) /
- crc_uncompressed_size(NULL, crc);
+ if (delta > 0) {
+ /*
+ * marking a new extent, which _will have size_ @delta
+ *
+ * in the bch2_mark_update -> BCH_EXTENT_OVERLAP_MIDDLE
+ * case, we haven't actually created the key we'll be inserting
+ * yet (for the split) - so we don't want to be using
+ * k->size/crc.live_size here:
+ */
+ return __ptr_disk_sectors(p, delta);
+ } else {
+ BUG_ON(-delta > p.crc.live_size);
+
+ return (s64) __ptr_disk_sectors(p, p.crc.live_size + delta) -
+ (s64) ptr_disk_sectors(p);
+ }
}
/*
* that with the gc pos seqlock held.
*/
static void bch2_mark_pointer(struct bch_fs *c,
- struct bkey_s_c_extent e,
- const union bch_extent_crc *crc,
- const struct bch_extent_ptr *ptr,
- s64 sectors, enum s_alloc type,
- struct bch_fs_usage *stats,
- u64 journal_seq, unsigned flags)
+ struct extent_ptr_decoded p,
+ s64 sectors, enum bch_data_type data_type,
+ struct bch_fs_usage *fs_usage,
+ unsigned journal_seq, unsigned flags,
+ bool gc)
{
struct bucket_mark old, new;
- unsigned saturated;
- struct bch_dev *ca = c->devs[ptr->dev];
- struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
- unsigned data_type = type == S_META
- ? BUCKET_BTREE : BUCKET_DATA;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ size_t b = PTR_BUCKET_NR(ca, &p.ptr);
+ struct bucket *g = __bucket(ca, b, gc);
u64 v;
- if (crc_compression_type(crc)) {
- unsigned old_sectors, new_sectors;
-
- if (sectors > 0) {
- old_sectors = 0;
- new_sectors = sectors;
- } else {
- old_sectors = e.k->size;
- new_sectors = e.k->size + sectors;
- }
-
- sectors = -__disk_sectors(crc, old_sectors)
- +__disk_sectors(crc, new_sectors);
- }
-
- if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) {
- if (journal_seq)
- bucket_cmpxchg(g, new, ({
- new.touched_this_mount = 1;
- new.journal_seq_valid = 1;
- new.journal_seq = journal_seq;
- }));
-
- return;
- }
-
- v = READ_ONCE(g->_mark.counter);
+ v = atomic64_read(&g->_mark.v);
do {
- new.counter = old.counter = v;
- saturated = 0;
+ new.v.counter = old.v.counter = v;
+
+ new.dirty = true;
/*
* Check this after reading bucket mark to guard against
* the allocator invalidating a bucket after we've already
* checked the gen
*/
- if (gen_after(new.gen, ptr->gen)) {
- EBUG_ON(!ptr->cached &&
+ if (gen_after(new.gen, p.ptr.gen)) {
+ BUG_ON(!test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags));
+ EBUG_ON(!p.ptr.cached &&
test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
return;
}
- if (!ptr->cached &&
- new.dirty_sectors == GC_MAX_SECTORS_USED &&
- sectors < 0)
- saturated = -sectors;
-
- if (ptr->cached)
- saturated_add(ca, new.cached_sectors, sectors,
- GC_MAX_SECTORS_USED);
+ if (!p.ptr.cached)
+ checked_add(new.dirty_sectors, sectors);
else
- saturated_add(ca, new.dirty_sectors, sectors,
- GC_MAX_SECTORS_USED);
+ checked_add(new.cached_sectors, sectors);
if (!new.dirty_sectors &&
!new.cached_sectors) {
new.data_type = data_type;
}
- new.touched_this_mount = 1;
-
if (flags & BCH_BUCKET_MARK_NOATOMIC) {
g->_mark = new;
break;
}
- } while ((v = cmpxchg(&g->_mark.counter,
- old.counter,
- new.counter)) != old.counter);
-
- bch2_dev_usage_update(ca, old, new);
-
- if (old.data_type != data_type &&
- (old.data_type ||
- old.cached_sectors ||
- old.dirty_sectors))
- bch_err(ca->fs, "bucket %zu has multiple types of data (%u, %u)",
- g - ca->buckets, old.data_type, new.data_type);
-
- BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
- bucket_became_unavailable(c, old, new));
-
- if (saturated &&
- atomic_long_add_return(saturated,
- &ca->saturated_count) >=
- ca->free_inc.size << ca->bucket_bits) {
- if (c->gc_thread) {
- trace_gc_sectors_saturated(c);
- wake_up_process(c->gc_thread);
- }
+ } while ((v = atomic64_cmpxchg(&g->_mark.v,
+ old.v.counter,
+ new.v.counter)) != old.v.counter);
+
+ bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
+
+ BUG_ON(!gc && bucket_became_unavailable(old, new));
+}
+
+static int bch2_mark_stripe_ptr(struct bch_fs *c,
+ struct bch_extent_stripe_ptr p,
+ enum bch_data_type data_type,
+ struct bch_fs_usage *fs_usage,
+ s64 sectors, unsigned flags,
+ bool gc)
+{
+ struct stripe *m;
+ unsigned old, new, nr_data;
+ int blocks_nonempty_delta;
+ s64 parity_sectors;
+
+ BUG_ON(!sectors);
+
+ m = genradix_ptr(&c->stripes[gc], p.idx);
+
+ spin_lock(&c->ec_stripes_heap_lock);
+
+ if (!m || !m->alive) {
+ spin_unlock(&c->ec_stripes_heap_lock);
+ bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
+ (u64) p.idx);
+ return -1;
}
+
+ BUG_ON(m->r.e.data_type != data_type);
+
+ nr_data = m->nr_blocks - m->nr_redundant;
+
+ parity_sectors = DIV_ROUND_UP(abs(sectors) * m->nr_redundant, nr_data);
+
+ if (sectors < 0)
+ parity_sectors = -parity_sectors;
+ sectors += parity_sectors;
+
+ old = m->block_sectors[p.block];
+ m->block_sectors[p.block] += sectors;
+ new = m->block_sectors[p.block];
+
+ blocks_nonempty_delta = (int) !!new - (int) !!old;
+ if (blocks_nonempty_delta) {
+ m->blocks_nonempty += blocks_nonempty_delta;
+
+ if (!gc)
+ bch2_stripes_heap_update(c, m, p.idx);
+ }
+
+ m->dirty = true;
+
+ spin_unlock(&c->ec_stripes_heap_lock);
+
+ update_replicas(c, fs_usage, &m->r.e, sectors);
+
+ return 0;
}
-static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c_extent e,
- s64 sectors, bool metadata,
- struct bch_fs_usage *stats,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
+ s64 sectors, enum bch_data_type data_type,
+ struct bch_fs_usage *fs_usage,
+ unsigned journal_seq, unsigned flags,
+ bool gc)
{
- const struct bch_extent_ptr *ptr;
- const union bch_extent_crc *crc;
- enum s_alloc type = metadata ? S_META : S_DIRTY;
- unsigned replicas = 0;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ struct bch_replicas_padded r;
+ s64 dirty_sectors = 0;
+ unsigned i;
+ int ret;
+
+ r.e.data_type = data_type;
+ r.e.nr_devs = 0;
+ r.e.nr_required = 1;
- BUG_ON(metadata && bkey_extent_is_cached(e.k));
BUG_ON(!sectors);
- extent_for_each_ptr_crc(e, ptr, crc) {
- bch2_mark_pointer(c, e, crc, ptr, sectors, type,
- stats, journal_seq, flags);
- replicas += !ptr->cached;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ s64 disk_sectors = data_type == BCH_DATA_BTREE
+ ? sectors
+ : ptr_disk_sectors_delta(p, sectors);
+
+ bch2_mark_pointer(c, p, disk_sectors, data_type,
+ fs_usage, journal_seq, flags, gc);
+
+ if (p.ptr.cached) {
+ update_cached_sectors(c, fs_usage, p.ptr.dev,
+ disk_sectors);
+ } else if (!p.ec_nr) {
+ dirty_sectors += disk_sectors;
+ r.e.devs[r.e.nr_devs++] = p.ptr.dev;
+ } else {
+ for (i = 0; i < p.ec_nr; i++) {
+ ret = bch2_mark_stripe_ptr(c, p.ec[i],
+ data_type, fs_usage,
+ disk_sectors, flags, gc);
+ if (ret)
+ return ret;
+ }
+
+ r.e.nr_required = 0;
+ }
}
- BUG_ON(replicas >= BCH_REPLICAS_MAX);
+ if (dirty_sectors)
+ update_replicas(c, fs_usage, &r.e, dirty_sectors);
+
+ return 0;
+}
- if (replicas)
- stats->s[replicas - 1].data[type] += sectors;
+static void bucket_set_stripe(struct bch_fs *c,
+ const struct bch_stripe *v,
+ bool enabled,
+ struct bch_fs_usage *fs_usage,
+ u64 journal_seq,
+ bool gc)
+{
+ unsigned i;
+
+ for (i = 0; i < v->nr_blocks; i++) {
+ const struct bch_extent_ptr *ptr = v->ptrs + i;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ size_t b = PTR_BUCKET_NR(ca, ptr);
+ struct bucket *g = __bucket(ca, b, gc);
+ struct bucket_mark new, old;
+
+ BUG_ON(ptr_stale(ca, ptr));
+
+ old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
+ new.dirty = true;
+ new.stripe = enabled;
+ if (journal_seq) {
+ new.journal_seq_valid = 1;
+ new.journal_seq = journal_seq;
+ }
+ }));
+ }
}
-void __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, bool metadata,
- struct bch_fs_usage *stats,
- u64 journal_seq, unsigned flags)
+static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
+ bool inserting,
+ struct bch_fs_usage *fs_usage,
+ u64 journal_seq, unsigned flags,
+ bool gc)
{
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+ size_t idx = s.k->p.offset;
+ struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
+ unsigned i;
+
+ spin_lock(&c->ec_stripes_heap_lock);
+
+ if (!m || (!inserting && !m->alive)) {
+ spin_unlock(&c->ec_stripes_heap_lock);
+ bch_err_ratelimited(c, "error marking nonexistent stripe %zu",
+ idx);
+ return -1;
+ }
+
+ if (m->alive)
+ bch2_stripes_heap_del(c, m, idx);
+
+ memset(m, 0, sizeof(*m));
+
+ if (inserting) {
+ m->sectors = le16_to_cpu(s.v->sectors);
+ m->algorithm = s.v->algorithm;
+ m->nr_blocks = s.v->nr_blocks;
+ m->nr_redundant = s.v->nr_redundant;
+
+ memset(&m->r, 0, sizeof(m->r));
+
+ m->r.e.data_type = BCH_DATA_USER;
+ m->r.e.nr_devs = s.v->nr_blocks;
+ m->r.e.nr_required = s.v->nr_blocks - s.v->nr_redundant;
+
+ for (i = 0; i < s.v->nr_blocks; i++)
+ m->r.e.devs[i] = s.v->ptrs[i].dev;
+
+ /*
+ * XXX: account for stripes somehow here
+ */
+#if 0
+ update_replicas(c, fs_usage, &m->r.e, stripe_sectors);
+#endif
+
+ /* gc recalculates these fields: */
+ if (!(flags & BCH_BUCKET_MARK_GC)) {
+ for (i = 0; i < s.v->nr_blocks; i++) {
+ m->block_sectors[i] =
+ stripe_blockcount_get(s.v, i);
+ m->blocks_nonempty += !!m->block_sectors[i];
+ }
+ }
+
+ if (!gc)
+ bch2_stripes_heap_insert(c, m, idx);
+ else
+ m->alive = true;
+ }
+
+ spin_unlock(&c->ec_stripes_heap_lock);
+
+ bucket_set_stripe(c, s.v, inserting, fs_usage, 0, gc);
+ return 0;
+}
+
+static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
+ bool inserting, s64 sectors,
+ struct bch_fs_usage *fs_usage,
+ unsigned journal_seq, unsigned flags,
+ bool gc)
+{
+ int ret = 0;
+
switch (k.k->type) {
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED:
- bch2_mark_extent(c, bkey_s_c_to_extent(k), sectors, metadata,
- stats, journal_seq, flags);
+ case KEY_TYPE_btree_ptr:
+ ret = bch2_mark_extent(c, k, inserting
+ ? c->opts.btree_node_size
+ : -c->opts.btree_node_size,
+ BCH_DATA_BTREE,
+ fs_usage, journal_seq, flags, gc);
+ break;
+ case KEY_TYPE_extent:
+ ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
+ fs_usage, journal_seq, flags, gc);
break;
- case BCH_RESERVATION: {
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+ case KEY_TYPE_stripe:
+ ret = bch2_mark_stripe(c, k, inserting,
+ fs_usage, journal_seq, flags, gc);
+ break;
+ case KEY_TYPE_alloc:
+ if (inserting)
+ fs_usage->s.nr_inodes++;
+ else
+ fs_usage->s.nr_inodes--;
+ break;
+ case KEY_TYPE_reservation: {
+ unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
+
+ sectors *= replicas;
+ replicas = clamp_t(unsigned, replicas, 1,
+ ARRAY_SIZE(fs_usage->persistent_reserved));
- if (r.v->nr_replicas)
- stats->s[r.v->nr_replicas - 1].persistent_reserved += sectors;
+ fs_usage->s.reserved += sectors;
+ fs_usage->persistent_reserved[replicas - 1] += sectors;
break;
}
+ default:
+ break;
}
+
+ return ret;
}
-void bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, bool metadata, unsigned flags)
+int bch2_mark_key_locked(struct bch_fs *c,
+ struct bkey_s_c k,
+ bool inserting, s64 sectors,
+ struct gc_pos pos,
+ struct bch_fs_usage *fs_usage,
+ u64 journal_seq, unsigned flags)
{
- struct bch_fs_usage stats = { 0 };
+ int ret;
- __bch2_mark_key(c, k, sectors, metadata, &stats, 0,
- flags|BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE);
+ if (!(flags & BCH_BUCKET_MARK_GC)) {
+ ret = __bch2_mark_key(c, k, inserting, sectors,
+ fs_usage ?: this_cpu_ptr(c->usage[0]),
+ journal_seq, flags, false);
+ if (ret)
+ return ret;
+ }
+
+ if ((flags & BCH_BUCKET_MARK_GC) ||
+ gc_visited(c, pos)) {
+ ret = __bch2_mark_key(c, k, inserting, sectors,
+ this_cpu_ptr(c->usage[1]),
+ journal_seq, flags, true);
+ if (ret)
+ return ret;
+ }
- preempt_disable();
- bch2_usage_add(this_cpu_ptr(c->usage_percpu), &stats);
- preempt_enable();
+ return 0;
}
-void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
- s64 sectors, bool metadata, struct gc_pos gc_pos,
- struct bch_fs_usage *stats, u64 journal_seq)
+int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
+ bool inserting, s64 sectors,
+ struct gc_pos pos,
+ struct bch_fs_usage *fs_usage,
+ u64 journal_seq, unsigned flags)
{
- unsigned flags = gc_will_visit(c, gc_pos)
- ? BCH_BUCKET_MARK_GC_WILL_VISIT : 0;
- /*
- * synchronization w.r.t. GC:
- *
- * Normally, bucket sector counts/marks are updated on the fly, as
- * references are added/removed from the btree, the lists of buckets the
- * allocator owns, other metadata buckets, etc.
- *
- * When GC is in progress and going to mark this reference, we do _not_
- * mark this reference here, to avoid double counting - GC will count it
- * when it gets to it.
- *
- * To know whether we should mark a given reference (GC either isn't
- * running, or has already marked references at this position) we
- * construct a total order for everything GC walks. Then, we can simply
- * compare the position of the reference we're marking - @gc_pos - with
- * GC's current position. If GC is going to mark this reference, GC's
- * current position will be less than @gc_pos; if GC's current position
- * is greater than @gc_pos GC has either already walked this position,
- * or isn't running.
- *
- * To avoid racing with GC's position changing, we have to deal with
- * - GC's position being set to GC_POS_MIN when GC starts:
- * usage_lock guards against this
- * - GC's position overtaking @gc_pos: we guard against this with
- * whatever lock protects the data structure the reference lives in
- * (e.g. the btree node lock, or the relevant allocator lock).
- */
- lg_local_lock(&c->usage_lock);
- __bch2_mark_key(c, k, sectors, metadata, stats, journal_seq, flags);
- bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ int ret;
+
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ ret = bch2_mark_key_locked(c, k, inserting, sectors,
+ pos, fs_usage, journal_seq, flags);
+ percpu_up_read_preempt_enable(&c->mark_lock);
+
+ return ret;
}
-static u64 __recalc_sectors_available(struct bch_fs *c)
+void bch2_mark_update(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
{
- return c->capacity - bch2_fs_sectors_used(c);
+ struct bch_fs *c = trans->c;
+ struct btree_iter *iter = insert->iter;
+ struct btree *b = iter->l[0].b;
+ struct btree_node_iter node_iter = iter->l[0].iter;
+ struct bch_fs_usage *fs_usage;
+ struct gc_pos pos = gc_pos_btree_node(b);
+ struct bkey_packed *_k;
+ u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
+ static int warned_disk_usage = 0;
+
+ if (!btree_node_type_needs_gc(iter->btree_id))
+ return;
+
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ fs_usage = bch2_fs_usage_get_scratch(c);
+
+ if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
+ bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
+ bpos_min(insert->k->k.p, b->key.k.p).offset -
+ bkey_start_offset(&insert->k->k),
+ pos, fs_usage, trans->journal_res.seq, 0);
+
+ while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
+ KEY_TYPE_discard))) {
+ struct bkey unpacked;
+ struct bkey_s_c k;
+ s64 sectors = 0;
+
+ k = bkey_disassemble(b, _k, &unpacked);
+
+ if (btree_node_is_extents(b)
+ ? bkey_cmp(insert->k->k.p, bkey_start_pos(k.k)) <= 0
+ : bkey_cmp(insert->k->k.p, k.k->p))
+ break;
+
+ if (btree_node_is_extents(b)) {
+ switch (bch2_extent_overlap(&insert->k->k, k.k)) {
+ case BCH_EXTENT_OVERLAP_ALL:
+ sectors = -((s64) k.k->size);
+ break;
+ case BCH_EXTENT_OVERLAP_BACK:
+ sectors = bkey_start_offset(&insert->k->k) -
+ k.k->p.offset;
+ break;
+ case BCH_EXTENT_OVERLAP_FRONT:
+ sectors = bkey_start_offset(k.k) -
+ insert->k->k.p.offset;
+ break;
+ case BCH_EXTENT_OVERLAP_MIDDLE:
+ sectors = k.k->p.offset - insert->k->k.p.offset;
+ BUG_ON(sectors <= 0);
+
+ bch2_mark_key_locked(c, k, true, sectors,
+ pos, fs_usage, trans->journal_res.seq, 0);
+
+ sectors = bkey_start_offset(&insert->k->k) -
+ k.k->p.offset;
+ break;
+ }
+
+ BUG_ON(sectors >= 0);
+ }
+
+ bch2_mark_key_locked(c, k, false, sectors,
+ pos, fs_usage, trans->journal_res.seq, 0);
+
+ bch2_btree_node_iter_advance(&node_iter, b);
+ }
+
+ if (bch2_fs_usage_apply(c, fs_usage, trans->disk_res, pos) &&
+ !warned_disk_usage &&
+ !xchg(&warned_disk_usage, 1)) {
+ char buf[200];
+
+ pr_err("disk usage increased more than %llu sectors reserved", disk_res_sectors);
+
+ pr_err("while inserting");
+ bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(insert->k));
+ pr_err("%s", buf);
+ pr_err("overlapping with");
+
+ node_iter = iter->l[0].iter;
+ while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
+ KEY_TYPE_discard))) {
+ struct bkey unpacked;
+ struct bkey_s_c k;
+
+ k = bkey_disassemble(b, _k, &unpacked);
+
+ if (btree_node_is_extents(b)
+ ? bkey_cmp(insert->k->k.p, bkey_start_pos(k.k)) <= 0
+ : bkey_cmp(insert->k->k.p, k.k->p))
+ break;
+
+ bch2_bkey_val_to_text(&PBUF(buf), c, k);
+ pr_err("%s", buf);
+
+ bch2_btree_node_iter_advance(&node_iter, b);
+ }
+ }
+
+ percpu_up_read_preempt_enable(&c->mark_lock);
}
-/* Used by gc when it's starting: */
-void bch2_recalc_sectors_available(struct bch_fs *c)
+/* Disk reservations: */
+
+static u64 bch2_recalc_sectors_available(struct bch_fs *c)
{
int cpu;
- lg_global_lock(&c->usage_lock);
-
for_each_possible_cpu(cpu)
- per_cpu_ptr(c->usage_percpu, cpu)->available_cache = 0;
+ per_cpu_ptr(c->pcpu, cpu)->sectors_available = 0;
- atomic64_set(&c->sectors_available,
- __recalc_sectors_available(c));
-
- lg_global_unlock(&c->usage_lock);
+ return avail_factor(bch2_fs_sectors_free(c));
}
-void bch2_disk_reservation_put(struct bch_fs *c,
- struct disk_reservation *res)
+void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
{
- if (res->sectors) {
- lg_local_lock(&c->usage_lock);
- this_cpu_sub(c->usage_percpu->online_reserved,
- res->sectors);
-
- bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ this_cpu_sub(c->usage[0]->s.online_reserved,
+ res->sectors);
+ percpu_up_read_preempt_enable(&c->mark_lock);
- res->sectors = 0;
- }
+ res->sectors = 0;
}
#define SECTORS_CACHE 1024
-int bch2_disk_reservation_add(struct bch_fs *c,
- struct disk_reservation *res,
- unsigned sectors, int flags)
+int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
+ unsigned sectors, int flags)
{
- struct bch_fs_usage *stats;
- u64 old, new, v;
+ struct bch_fs_pcpu *pcpu;
+ u64 old, v, get;
s64 sectors_available;
int ret;
- sectors *= res->nr_replicas;
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ pcpu = this_cpu_ptr(c->pcpu);
- lg_local_lock(&c->usage_lock);
- stats = this_cpu_ptr(c->usage_percpu);
-
- if (sectors >= stats->available_cache)
+ if (sectors <= pcpu->sectors_available)
goto out;
v = atomic64_read(&c->sectors_available);
do {
old = v;
- if (old < sectors) {
- lg_local_unlock(&c->usage_lock);
+ get = min((u64) sectors + SECTORS_CACHE, old);
+
+ if (get < sectors) {
+ percpu_up_read_preempt_enable(&c->mark_lock);
goto recalculate;
}
-
- new = max_t(s64, 0, old - sectors - SECTORS_CACHE);
} while ((v = atomic64_cmpxchg(&c->sectors_available,
- old, new)) != old);
+ old, old - get)) != old);
+
+ pcpu->sectors_available += get;
- stats->available_cache += old - new;
out:
- stats->available_cache -= sectors;
- stats->online_reserved += sectors;
- res->sectors += sectors;
+ pcpu->sectors_available -= sectors;
+ this_cpu_add(c->usage[0]->s.online_reserved, sectors);
+ res->sectors += sectors;
- bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->mark_lock);
return 0;
recalculate:
else if (!down_read_trylock(&c->gc_lock))
return -EINTR;
}
- lg_global_lock(&c->usage_lock);
- sectors_available = __recalc_sectors_available(c);
+ percpu_down_write(&c->mark_lock);
+ sectors_available = bch2_recalc_sectors_available(c);
if (sectors <= sectors_available ||
(flags & BCH_DISK_RESERVATION_NOFAIL)) {
atomic64_set(&c->sectors_available,
max_t(s64, 0, sectors_available - sectors));
- stats->online_reserved += sectors;
- res->sectors += sectors;
+ this_cpu_add(c->usage[0]->s.online_reserved, sectors);
+ res->sectors += sectors;
ret = 0;
} else {
atomic64_set(&c->sectors_available, sectors_available);
ret = -ENOSPC;
}
- bch2_fs_stats_verify(c);
- lg_global_unlock(&c->usage_lock);
+ percpu_up_write(&c->mark_lock);
+
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
return ret;
}
-int bch2_disk_reservation_get(struct bch_fs *c,
- struct disk_reservation *res,
- unsigned sectors, int flags)
+/* Startup/shutdown: */
+
+static void buckets_free_rcu(struct rcu_head *rcu)
{
- res->sectors = 0;
- res->gen = c->capacity_gen;
- res->nr_replicas = (flags & BCH_DISK_RESERVATION_METADATA)
- ? c->opts.metadata_replicas
- : c->opts.data_replicas;
+ struct bucket_array *buckets =
+ container_of(rcu, struct bucket_array, rcu);
+
+ kvpfree(buckets,
+ sizeof(struct bucket_array) +
+ buckets->nbuckets * sizeof(struct bucket));
+}
+
+int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
+{
+ struct bucket_array *buckets = NULL, *old_buckets = NULL;
+ unsigned long *buckets_nouse = NULL;
+ unsigned long *buckets_written = NULL;
+ u8 *oldest_gens = NULL;
+ alloc_fifo free[RESERVE_NR];
+ alloc_fifo free_inc;
+ alloc_heap alloc_heap;
+ copygc_heap copygc_heap;
+
+ size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
+ ca->mi.bucket_size / c->opts.btree_node_size);
+ /* XXX: these should be tunable */
+ size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
+ size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 7);
+ size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
+ btree_reserve * 2);
+ bool resize = ca->buckets[0] != NULL,
+ start_copygc = ca->copygc_thread != NULL;
+ int ret = -ENOMEM;
+ unsigned i;
+
+ memset(&free, 0, sizeof(free));
+ memset(&free_inc, 0, sizeof(free_inc));
+ memset(&alloc_heap, 0, sizeof(alloc_heap));
+ memset(©gc_heap, 0, sizeof(copygc_heap));
+
+ if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
+ nbuckets * sizeof(struct bucket),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !(oldest_gens = kvpmalloc(nbuckets * sizeof(u8),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
+ sizeof(unsigned long),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !(buckets_written = kvpmalloc(BITS_TO_LONGS(nbuckets) *
+ sizeof(unsigned long),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
+ !init_fifo(&free[RESERVE_MOVINGGC],
+ copygc_reserve, GFP_KERNEL) ||
+ !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
+ !init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
+ !init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL) ||
+ !init_heap(©gc_heap, copygc_reserve, GFP_KERNEL))
+ goto err;
+
+ buckets->first_bucket = ca->mi.first_bucket;
+ buckets->nbuckets = nbuckets;
+
+ bch2_copygc_stop(ca);
+
+ if (resize) {
+ down_write(&c->gc_lock);
+ down_write(&ca->bucket_lock);
+ percpu_down_write(&c->mark_lock);
+ }
+
+ old_buckets = bucket_array(ca);
+
+ if (resize) {
+ size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
+
+ memcpy(buckets->b,
+ old_buckets->b,
+ n * sizeof(struct bucket));
+ memcpy(oldest_gens,
+ ca->oldest_gens,
+ n * sizeof(u8));
+ memcpy(buckets_nouse,
+ ca->buckets_nouse,
+ BITS_TO_LONGS(n) * sizeof(unsigned long));
+ memcpy(buckets_written,
+ ca->buckets_written,
+ BITS_TO_LONGS(n) * sizeof(unsigned long));
+ }
+
+ rcu_assign_pointer(ca->buckets[0], buckets);
+ buckets = old_buckets;
+
+ swap(ca->oldest_gens, oldest_gens);
+ swap(ca->buckets_nouse, buckets_nouse);
+ swap(ca->buckets_written, buckets_written);
+
+ if (resize)
+ percpu_up_write(&c->mark_lock);
+
+ spin_lock(&c->freelist_lock);
+ for (i = 0; i < RESERVE_NR; i++) {
+ fifo_move(&free[i], &ca->free[i]);
+ swap(ca->free[i], free[i]);
+ }
+ fifo_move(&free_inc, &ca->free_inc);
+ swap(ca->free_inc, free_inc);
+ spin_unlock(&c->freelist_lock);
+
+ /* with gc lock held, alloc_heap can't be in use: */
+ swap(ca->alloc_heap, alloc_heap);
+
+ /* and we shut down copygc: */
+ swap(ca->copygc_heap, copygc_heap);
+
+ nbuckets = ca->mi.nbuckets;
+
+ if (resize) {
+ up_write(&ca->bucket_lock);
+ up_write(&c->gc_lock);
+ }
+
+ if (start_copygc &&
+ bch2_copygc_start(c, ca))
+ bch_err(ca, "error restarting copygc thread");
+
+ ret = 0;
+err:
+ free_heap(©gc_heap);
+ free_heap(&alloc_heap);
+ free_fifo(&free_inc);
+ for (i = 0; i < RESERVE_NR; i++)
+ free_fifo(&free[i]);
+ kvpfree(buckets_nouse,
+ BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
+ kvpfree(buckets_written,
+ BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
+ kvpfree(oldest_gens,
+ nbuckets * sizeof(u8));
+ if (buckets)
+ call_rcu(&old_buckets->rcu, buckets_free_rcu);
+
+ return ret;
+}
+
+void bch2_dev_buckets_free(struct bch_dev *ca)
+{
+ unsigned i;
+
+ free_heap(&ca->copygc_heap);
+ free_heap(&ca->alloc_heap);
+ free_fifo(&ca->free_inc);
+ for (i = 0; i < RESERVE_NR; i++)
+ free_fifo(&ca->free[i]);
+ kvpfree(ca->buckets_written,
+ BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
+ kvpfree(ca->buckets_nouse,
+ BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
+ kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
+ kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
+ sizeof(struct bucket_array) +
+ ca->mi.nbuckets * sizeof(struct bucket));
+
+ free_percpu(ca->usage[0]);
+}
+
+int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+ if (!(ca->usage[0] = alloc_percpu(struct bch_dev_usage)))
+ return -ENOMEM;
- return bch2_disk_reservation_add(c, res, sectors, flags);
+ return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
}