#include "disk_groups.h"
#include "ec.h"
#include "error.h"
-#include "io.h"
+#include "io_write.h"
#include "journal.h"
#include "movinggc.h"
#include "nocow_locking.h"
+#include "trace.h"
#include <linux/math64.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <trace/events/bcachefs.h>
-const char * const bch2_alloc_reserves[] = {
+static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
+ struct mutex *lock)
+{
+ if (!mutex_trylock(lock)) {
+ bch2_trans_unlock(trans);
+ mutex_lock(lock);
+ }
+}
+
+const char * const bch2_watermarks[] = {
#define x(t) #t,
- BCH_ALLOC_RESERVES()
+ BCH_WATERMARKS()
#undef x
NULL
};
void bch2_reset_alloc_cursors(struct bch_fs *c)
{
- struct bch_dev *ca;
- unsigned i;
-
rcu_read_lock();
- for_each_member_device_rcu(ca, c, i, NULL)
+ for_each_member_device_rcu(c, ca, NULL)
ca->alloc_cursor = 0;
rcu_read_unlock();
}
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
if (ob->ec) {
- bch2_ec_bucket_written(c, ob);
+ ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
return;
}
return ob;
}
-static void open_bucket_free_unused(struct bch_fs *c,
- struct write_point *wp,
- struct open_bucket *ob)
+static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
{
BUG_ON(c->open_buckets_partial_nr >=
ARRAY_SIZE(c->open_buckets_partial));
return -1;
}
-static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
+static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
{
- switch (reserve) {
- case RESERVE_btree:
- case RESERVE_btree_movinggc:
+ switch (watermark) {
+ case BCH_WATERMARK_reclaim:
return 0;
- case RESERVE_movinggc:
+ case BCH_WATERMARK_btree:
+ case BCH_WATERMARK_btree_copygc:
return OPEN_BUCKETS_COUNT / 4;
+ case BCH_WATERMARK_copygc:
+ return OPEN_BUCKETS_COUNT / 3;
default:
return OPEN_BUCKETS_COUNT / 2;
}
static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 bucket,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
const struct bch_alloc_v4 *a,
struct bucket_alloc_state *s,
struct closure *cl)
spin_lock(&c->freelist_lock);
- if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
+ if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
if (cl)
closure_wait(&c->open_buckets_wait, cl);
- if (!c->blocked_allocate_open_bucket)
- c->blocked_allocate_open_bucket = local_clock();
-
+ track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
spin_unlock(&c->freelist_lock);
return ERR_PTR(-BCH_ERR_open_buckets_empty);
}
ca->nr_open_buckets++;
bch2_open_bucket_hash_add(c, ob);
- if (c->blocked_allocate_open_bucket) {
- bch2_time_stats_update(
- &c->times[BCH_TIME_blocked_allocate_open_bucket],
- c->blocked_allocate_open_bucket);
- c->blocked_allocate_open_bucket = 0;
- }
-
- if (c->blocked_allocate) {
- bch2_time_stats_update(
- &c->times[BCH_TIME_blocked_allocate],
- c->blocked_allocate);
- c->blocked_allocate = 0;
- }
+ track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false);
+ track_event_change(&c->times[BCH_TIME_blocked_allocate], false);
spin_unlock(&c->freelist_lock);
return ob;
}
static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
- enum alloc_reserve reserve, u64 free_entry,
+ enum bch_watermark watermark, u64 free_entry,
struct bucket_alloc_state *s,
struct bkey_s_c freespace_k,
struct closure *cl)
goto err;
}
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_bkey_get_iter(trans, &iter,
+ BTREE_ID_alloc, POS(ca->dev_idx, b),
+ BTREE_ITER_CACHED);
ret = bkey_err(k);
if (ret) {
ob = ERR_PTR(ret);
a = bch2_alloc_to_v4(k, &a_convert);
if (a->data_type != BCH_DATA_free) {
- if (!test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+ if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
ob = NULL;
goto err;
}
}
if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
- test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+ c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
" freespace key ",
genbits, alloc_freespace_genbits(*a) >> 56);
bch2_trans_inconsistent(trans, "%s", buf.buf);
ob = ERR_PTR(-EIO);
goto err;
-
}
- if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+ if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
struct bch_backpointer bp;
- u64 bp_offset = 0;
+ struct bpos bp_pos = POS_MIN;
ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
- &bp_offset, &bp,
+ &bp_pos, &bp,
BTREE_ITER_NOPRESERVE);
if (ret) {
ob = ERR_PTR(ret);
goto err;
}
- if (bp_offset != U64_MAX) {
+ if (!bkey_eq(bp_pos, POS_MAX)) {
/*
* Bucket may have data in it - we don't call
* bc2h_trans_inconnsistent() because fsck hasn't
}
}
- ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
+ ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
if (!ob)
- iter.path->preserve = false;
+ set_btree_iter_dontneed(&iter);
err:
- set_btree_iter_dontneed(&iter);
+ if (iter.path)
+ set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
return ob;
static noinline struct open_bucket *
bch2_bucket_alloc_early(struct btree_trans *trans,
struct bch_dev *ca,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct bucket_alloc_state *s,
struct closure *cl)
{
- struct btree_iter iter;
- struct bkey_s_c k;
+ struct btree_iter iter, citer;
+ struct bkey_s_c k, ck;
struct open_bucket *ob = NULL;
- u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
- u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor));
+ u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
+ u64 alloc_start = max(first_bucket, READ_ONCE(ca->alloc_cursor));
+ u64 alloc_cursor = alloc_start;
int ret;
+
+ /*
+ * Scan with an uncached iterator to avoid polluting the key cache. An
+ * uncached iter will return a cached key if one exists, but if not
+ * there is no other underlying protection for the associated key cache
+ * slot. To avoid racing bucket allocations, look up the cached key slot
+ * of any likely allocation candidate before attempting to proceed with
+ * the allocation. This provides proper exclusion on the associated
+ * bucket.
+ */
again:
for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
BTREE_ITER_SLOTS, k, ret) {
continue;
a = bch2_alloc_to_v4(k, &a_convert);
-
if (a->data_type != BCH_DATA_free)
continue;
+ /* now check the cached key to serialize concurrent allocs of the bucket */
+ ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_CACHED);
+ ret = bkey_err(ck);
+ if (ret)
+ break;
+
+ a = bch2_alloc_to_v4(ck, &a_convert);
+ if (a->data_type != BCH_DATA_free)
+ goto next;
+
s->buckets_seen++;
- ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
+ ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
+next:
+ set_btree_iter_dontneed(&citer);
+ bch2_trans_iter_exit(trans, &citer);
if (ob)
break;
}
bch2_trans_iter_exit(trans, &iter);
+ alloc_cursor = iter.pos.offset;
ca->alloc_cursor = alloc_cursor;
if (!ob && ret)
ob = ERR_PTR(ret);
- if (!ob && alloc_cursor > alloc_start) {
- alloc_cursor = alloc_start;
+ if (!ob && alloc_start > first_bucket) {
+ alloc_cursor = alloc_start = first_bucket;
goto again;
}
static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
struct bch_dev *ca,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct bucket_alloc_state *s,
struct closure *cl)
{
s->buckets_seen++;
- ob = try_alloc_bucket(trans, ca, reserve,
+ ob = try_alloc_bucket(trans, ca, watermark,
alloc_cursor, s, k, cl);
if (ob) {
- iter.path->preserve = false;
+ set_btree_iter_dontneed(&iter);
break;
}
}
}
/**
- * bch_bucket_alloc - allocate a single bucket from a specific device
+ * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
+ * @trans: transaction object
+ * @ca: device to allocate from
+ * @watermark: how important is this allocation?
+ * @cl: if not NULL, closure to be used to wait if buckets not available
+ * @usage: for secondarily also returning the current device usage
*
- * Returns index of bucket on success, 0 on failure
+ * Returns: an open_bucket on success, or an ERR_PTR() on failure.
*/
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
struct bch_dev *ca,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct closure *cl,
struct bch_dev_usage *usage)
{
bool waiting = false;
again:
bch2_dev_usage_read_fast(ca, usage);
- avail = dev_buckets_free(ca, *usage, reserve);
+ avail = dev_buckets_free(ca, *usage, watermark);
if (usage->d[BCH_DATA_need_discard].buckets > avail)
bch2_do_discards(c);
goto again;
}
- if (!c->blocked_allocate)
- c->blocked_allocate = local_clock();
+ track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
ob = ERR_PTR(-BCH_ERR_freelist_empty);
goto err;
closure_wake_up(&c->freelist_wait);
alloc:
ob = likely(freespace)
- ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
- : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
+ ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
+ : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
if (s.skipped_need_journal_commit * 2 > avail)
bch2_journal_flush_async(&c->journal, NULL);
- if (!ob && freespace && !test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+ if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
freespace = false;
goto alloc;
}
if (!IS_ERR(ob))
trace_and_count(c, bucket_alloc, ca,
- bch2_alloc_reserves[reserve],
+ bch2_watermarks[watermark],
ob->bucket,
usage->d[BCH_DATA_free].buckets,
avail,
"");
else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
trace_and_count(c, bucket_alloc_fail, ca,
- bch2_alloc_reserves[reserve],
+ bch2_watermarks[watermark],
0,
usage->d[BCH_DATA_free].buckets,
avail,
}
struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct closure *cl)
{
struct bch_dev_usage usage;
struct open_bucket *ob;
bch2_trans_do(c, NULL, NULL, 0,
- PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
+ PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
cl, &usage)));
return ob;
}
struct bch_dev_usage *usage)
{
u64 *v = stripe->next_alloc + ca->dev_idx;
- u64 free_space = dev_buckets_available(ca, RESERVE_none);
+ u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
u64 free_space_inv = free_space
? div64_u64(1ULL << 48, free_space)
: 1ULL << 48;
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
+ unsigned flags,
enum bch_data_type data_type,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
struct closure *cl)
{
struct bch_fs *c = trans->c;
continue;
}
- ob = bch2_bucket_alloc_trans(trans, ca, reserve, cl, &usage);
+ ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
percpu_ref_put(&ca->ref);
if (add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
- have_cache, 0, ob)) {
+ have_cache, flags, ob)) {
ret = 0;
break;
}
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags,
struct closure *cl)
{
struct dev_alloc_list devs_sorted;
struct ec_stripe_head *h;
struct open_bucket *ob;
- struct bch_dev *ca;
unsigned i, ec_idx;
int ret = 0;
if (ec_open_bucket(c, ptrs))
return 0;
- h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl);
+ h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
if (IS_ERR(h))
return PTR_ERR(h);
if (!h)
}
goto out_put_head;
got_bucket:
- ca = bch_dev_bkey_exists(c, ob->dev);
-
ob->ec_idx = ec_idx;
ob->ec = h->s;
+ ec_stripe_new_get(h->s, STRIPE_REF_io);
ret = add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
have_cache, flags, ob);
- atomic_inc(&h->s->pin);
out_put_head:
bch2_ec_stripe_head_put(c, h);
return ret;
return false;
if (!ca->mi.durability &&
- (wp->data_type != BCH_DATA_user || !*have_cache))
+ (wp->data_type == BCH_DATA_btree || ec || *have_cache))
return false;
if (ec != (ob->ec != NULL))
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache, bool ec,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags)
{
int i, ret = 0;
spin_lock(&c->freelist_lock);
+ if (!c->open_buckets_partial_nr)
+ goto unlock;
+
for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
u64 avail;
bch2_dev_usage_read_fast(ca, &usage);
- avail = dev_buckets_free(ca, usage, reserve);
+ avail = dev_buckets_free(ca, usage, watermark);
if (!avail)
continue;
break;
}
}
-
+unlock:
spin_unlock(&c->freelist_lock);
return ret;
}
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags,
struct closure *_cl)
{
unsigned i;
int ret;
- rcu_read_lock();
devs = target_rw_devs(c, wp->data_type, target);
- rcu_read_unlock();
/* Don't allocate from devices we already have pointers to: */
- for (i = 0; i < devs_have->nr; i++)
- __clear_bit(devs_have->devs[i], devs.d);
+ darray_for_each(*devs_have, i)
+ __clear_bit(*i, devs.d);
open_bucket_for_each(c, ptrs, ob, i)
__clear_bit(ob->dev, devs.d);
ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
nr_replicas, nr_effective,
- have_cache, erasure_code, reserve, flags);
+ have_cache, erasure_code, watermark, flags);
if (ret)
return ret;
target,
nr_replicas, nr_effective,
have_cache,
- reserve, flags, _cl);
+ watermark, flags, _cl);
} else {
retry_blocking:
/*
*/
ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
nr_replicas, nr_effective, have_cache,
- wp->data_type, reserve, cl);
+ flags, wp->data_type, watermark, cl);
if (ret &&
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
!bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
cl = _cl;
goto retry_blocking;
}
-
}
return ret;
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags,
struct closure *cl)
{
ret = __open_bucket_add_buckets(trans, ptrs, wp,
devs_have, target, erasure_code,
nr_replicas, nr_effective, have_cache,
- reserve, flags, cl);
+ watermark, flags, cl);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
ret = __open_bucket_add_buckets(trans, ptrs, wp,
devs_have, target, false,
nr_replicas, nr_effective, have_cache,
- reserve, flags, cl);
+ watermark, flags, cl);
return ret < 0 ? ret : 0;
}
-void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
- struct open_buckets *obs)
+/**
+ * should_drop_bucket - check if this is open_bucket should go away
+ * @ob: open_bucket to predicate on
+ * @c: filesystem handle
+ * @ca: if set, we're killing buckets for a particular device
+ * @ec: if true, we're shutting down erasure coding and killing all ec
+ * open_buckets
+ * otherwise, return true
+ * Returns: true if we should kill this open_bucket
+ *
+ * We're killing open_buckets because we're shutting down a device, erasure
+ * coding, or the entire filesystem - check if this open_bucket matches:
+ */
+static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
+ struct bch_dev *ca, bool ec)
{
- struct open_buckets ptrs = { .nr = 0 };
- struct open_bucket *ob, *ob2;
- unsigned i, j;
-
- open_bucket_for_each(c, obs, ob, i) {
- bool drop = !ca || ob->dev == ca->dev_idx;
+ if (ec) {
+ return ob->ec != NULL;
+ } else if (ca) {
+ bool drop = ob->dev == ca->dev_idx;
+ struct open_bucket *ob2;
+ unsigned i;
if (!drop && ob->ec) {
+ unsigned nr_blocks;
+
mutex_lock(&ob->ec->lock);
- for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
- if (!ob->ec->blocks[j])
+ nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
+
+ for (i = 0; i < nr_blocks; i++) {
+ if (!ob->ec->blocks[i])
continue;
- ob2 = c->open_buckets + ob->ec->blocks[j];
+ ob2 = c->open_buckets + ob->ec->blocks[i];
drop |= ob2->dev == ca->dev_idx;
}
mutex_unlock(&ob->ec->lock);
}
- if (drop)
- bch2_open_bucket_put(c, ob);
- else
- ob_push(c, &ptrs, ob);
+ return drop;
+ } else {
+ return true;
}
-
- *obs = ptrs;
}
-void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
- struct write_point *wp)
+static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
+ bool ec, struct write_point *wp)
{
+ struct open_buckets ptrs = { .nr = 0 };
+ struct open_bucket *ob;
+ unsigned i;
+
mutex_lock(&wp->lock);
- bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ if (should_drop_bucket(ob, c, ca, ec))
+ bch2_open_bucket_put(c, ob);
+ else
+ ob_push(c, &ptrs, ob);
+ wp->ptrs = ptrs;
mutex_unlock(&wp->lock);
}
+void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
+ bool ec)
+{
+ unsigned i;
+
+ /* Next, close write points that point to this device... */
+ for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
+ bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
+
+ bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
+ bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
+ bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
+
+ mutex_lock(&c->btree_reserve_cache_lock);
+ while (c->btree_reserve_cache_nr) {
+ struct btree_alloc *a =
+ &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
+
+ bch2_open_buckets_put(c, &a->ob);
+ }
+ mutex_unlock(&c->btree_reserve_cache_lock);
+
+ spin_lock(&c->freelist_lock);
+ i = 0;
+ while (i < c->open_buckets_partial_nr) {
+ struct open_bucket *ob =
+ c->open_buckets + c->open_buckets_partial[i];
+
+ if (should_drop_bucket(ob, c, ca, ec)) {
+ --c->open_buckets_partial_nr;
+ swap(c->open_buckets_partial[i],
+ c->open_buckets_partial[c->open_buckets_partial_nr]);
+ ob->on_partial_list = false;
+ spin_unlock(&c->freelist_lock);
+ bch2_open_bucket_put(c, ob);
+ spin_lock(&c->freelist_lock);
+ } else {
+ i++;
+ }
+ }
+ spin_unlock(&c->freelist_lock);
+
+ bch2_ec_stop_dev(c, ca);
+}
+
static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
unsigned long write_point)
{
return true;
}
-static bool try_decrease_writepoints(struct bch_fs *c,
- unsigned old_nr)
+static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
{
+ struct bch_fs *c = trans->c;
struct write_point *wp;
+ struct open_bucket *ob;
+ unsigned i;
mutex_lock(&c->write_points_hash_lock);
if (c->write_points_nr < old_nr) {
hlist_del_rcu(&wp->node);
mutex_unlock(&c->write_points_hash_lock);
- bch2_writepoint_stop(c, NULL, wp);
+ bch2_trans_mutex_lock_norelock(trans, &wp->lock);
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ open_bucket_free_unused(c, ob);
+ wp->ptrs.nr = 0;
+ mutex_unlock(&wp->lock);
return true;
}
-static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
- struct mutex *lock)
-{
- if (!mutex_trylock(lock)) {
- bch2_trans_unlock(trans);
- mutex_lock(lock);
- }
-}
-
static struct write_point *writepoint_find(struct btree_trans *trans,
unsigned long write_point)
{
return wp;
}
+static noinline void
+deallocate_extra_replicas(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct open_buckets *ptrs_no_use,
+ unsigned extra_replicas)
+{
+ struct open_buckets ptrs2 = { 0 };
+ struct open_bucket *ob;
+ unsigned i;
+
+ open_bucket_for_each(c, ptrs, ob, i) {
+ unsigned d = bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+
+ if (d && d <= extra_replicas) {
+ extra_replicas -= d;
+ ob_push(c, ptrs_no_use, ob);
+ } else {
+ ob_push(c, &ptrs2, ob);
+ }
+ }
+
+ *ptrs = ptrs2;
+}
+
/*
* Get us an open_bucket we can allocate from, return with it locked:
*/
struct bch_devs_list *devs_have,
unsigned nr_replicas,
unsigned nr_replicas_required,
- enum alloc_reserve reserve,
+ enum bch_watermark watermark,
unsigned flags,
struct closure *cl,
struct write_point **wp_ret)
int ret;
int i;
+ if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
+ erasure_code = false;
+
+ BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
+
BUG_ON(!nr_replicas || !nr_replicas_required);
retry:
ptrs.nr = 0;
if (wp->data_type != BCH_DATA_user)
have_cache = true;
- if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
- ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
- target, erasure_code,
- nr_replicas, &nr_effective,
- &have_cache, reserve,
- flags, cl);
- } else {
+ if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,
- &have_cache, reserve,
+ &have_cache, watermark,
flags, NULL);
if (!ret ||
bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto alloc_done;
+ /* Don't retry from all devices if we're out of open buckets: */
+ if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
+ int ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
+ target, erasure_code,
+ nr_replicas, &nr_effective,
+ &have_cache, watermark,
+ flags, cl);
+ if (!ret ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+ goto alloc_done;
+ }
+
+ /*
+ * Only try to allocate cache (durability = 0 devices) from the
+ * specified target:
+ */
+ have_cache = true;
+
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
0, erasure_code,
nr_replicas, &nr_effective,
- &have_cache, reserve,
+ &have_cache, watermark,
+ flags, cl);
+ } else {
+ ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
+ target, erasure_code,
+ nr_replicas, &nr_effective,
+ &have_cache, watermark,
flags, cl);
}
alloc_done:
if (ret)
goto err;
+ if (nr_effective > nr_replicas)
+ deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
+
/* Free buckets we didn't use: */
open_bucket_for_each(c, &wp->ptrs, ob, i)
- open_bucket_free_unused(c, wp, ob);
+ open_bucket_free_unused(c, ob);
wp->ptrs = ptrs;
if (ptrs.nr < ARRAY_SIZE(ptrs.v))
ob_push(c, &ptrs, ob);
else
- open_bucket_free_unused(c, wp, ob);
+ open_bucket_free_unused(c, ob);
wp->ptrs = ptrs;
mutex_unlock(&wp->lock);
if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
- try_decrease_writepoints(c, write_points_nr))
+ try_decrease_writepoints(trans, write_points_nr))
goto retry;
if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
unsigned data_type = ob->data_type;
barrier(); /* READ_ONCE() doesn't work on bitfields */
- prt_printf(out, "%zu ref %u %s%s%s %u:%llu gen %u\n",
+ prt_printf(out, "%zu ref %u ",
ob - c->open_buckets,
- atomic_read(&ob->pin),
- data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type",
- ob->ec ? " ec" : "",
- ob->on_partial_list ? " partial" : "",
- ob->dev, ob->bucket, ob->gen);
+ atomic_read(&ob->pin));
+ bch2_prt_data_type(out, data_type);
+ prt_printf(out, " %u:%llu gen %u allocated %u/%u",
+ ob->dev, ob->bucket, ob->gen,
+ ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
+ if (ob->ec)
+ prt_printf(out, " ec idx %llu", ob->ec->idx);
+ if (ob->on_partial_list)
+ prt_str(out, " partial");
+ prt_newline(out);
}
void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
{
struct open_bucket *ob;
+ out->atomic++;
+
for (ob = c->open_buckets;
ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
ob++) {
bch2_open_bucket_to_text(out, c, ob);
spin_unlock(&ob->lock);
}
+
+ --out->atomic;
}
void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
{
unsigned i;
+ out->atomic++;
spin_lock(&c->freelist_lock);
+
for (i = 0; i < c->open_buckets_partial_nr; i++)
bch2_open_bucket_to_text(out, c,
c->open_buckets + c->open_buckets_partial[i]);
+
spin_unlock(&c->freelist_lock);
+ --out->atomic;
}
static const char * const bch2_write_point_states[] = {
NULL
};
+static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
+ struct write_point *wp)
+{
+ struct open_bucket *ob;
+ unsigned i;
+
+ prt_printf(out, "%lu: ", wp->write_point);
+ prt_human_readable_u64(out, wp->sectors_allocated);
+
+ prt_printf(out, " last wrote: ");
+ bch2_pr_time_units(out, sched_clock() - wp->last_used);
+
+ for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
+ prt_printf(out, " %s: ", bch2_write_point_states[i]);
+ bch2_pr_time_units(out, wp->time[i]);
+ }
+
+ prt_newline(out);
+
+ printbuf_indent_add(out, 2);
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ bch2_open_bucket_to_text(out, c, ob);
+ printbuf_indent_sub(out, 2);
+}
+
void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
{
struct write_point *wp;
- unsigned i;
+ prt_str(out, "Foreground write points\n");
for (wp = c->write_points;
wp < c->write_points + ARRAY_SIZE(c->write_points);
- wp++) {
- prt_printf(out, "%lu: ", wp->write_point);
- prt_human_readable_u64(out, wp->sectors_allocated);
+ wp++)
+ bch2_write_point_to_text(out, c, wp);
- prt_printf(out, " last wrote: ");
- bch2_pr_time_units(out, sched_clock() - wp->last_used);
+ prt_str(out, "Copygc write point\n");
+ bch2_write_point_to_text(out, c, &c->copygc_write_point);
- for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
- prt_printf(out, " %s: ", bch2_write_point_states[i]);
- bch2_pr_time_units(out, wp->time[i]);
- }
+ prt_str(out, "Rebalance write point\n");
+ bch2_write_point_to_text(out, c, &c->rebalance_write_point);
- prt_newline(out);
- }
+ prt_str(out, "Btree write point\n");
+ bch2_write_point_to_text(out, c, &c->btree_write_point);
}