#include "journal.h"
#include "movinggc.h"
#include "nocow_locking.h"
+#include "trace.h"
#include <linux/math64.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
-#include <trace/events/bcachefs.h>
const char * const bch2_alloc_reserves[] = {
#define x(t) #t,
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
if (ob->ec) {
- bch2_ec_bucket_written(c, ob);
+ ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
return;
}
struct write_point *wp,
struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
- bool may_realloc = wp->data_type == BCH_DATA_user;
-
- BUG_ON(ca->open_buckets_partial_nr >
- ARRAY_SIZE(ca->open_buckets_partial));
-
- if (ca->open_buckets_partial_nr <
- ARRAY_SIZE(ca->open_buckets_partial) &&
- may_realloc) {
- spin_lock(&c->freelist_lock);
- ob->on_partial_list = true;
- ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
- ob - c->open_buckets;
- spin_unlock(&c->freelist_lock);
+ BUG_ON(c->open_buckets_partial_nr >=
+ ARRAY_SIZE(c->open_buckets_partial));
- closure_wake_up(&c->open_buckets_wait);
- closure_wake_up(&c->freelist_wait);
- } else {
- bch2_open_bucket_put(c, ob);
- }
+ spin_lock(&c->freelist_lock);
+ ob->on_partial_list = true;
+ c->open_buckets_partial[c->open_buckets_partial_nr++] =
+ ob - c->open_buckets;
+ spin_unlock(&c->freelist_lock);
+
+ closure_wake_up(&c->open_buckets_wait);
+ closure_wake_up(&c->freelist_wait);
}
/* _only_ for allocating the journal on a new device: */
ob->valid = true;
ob->sectors_free = ca->mi.bucket_size;
- ob->alloc_reserve = reserve;
ob->dev = ca->dev_idx;
ob->gen = a->gen;
ob->bucket = bucket;
goto err;
}
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_bkey_get_iter(trans, &iter,
+ BTREE_ID_alloc, POS(ca->dev_idx, b),
+ BTREE_ITER_CACHED);
ret = bkey_err(k);
if (ret) {
ob = ERR_PTR(ret);
if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
struct bch_backpointer bp;
- u64 bp_offset = 0;
+ struct bpos bp_pos = POS_MIN;
ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
- &bp_offset, &bp,
+ &bp_pos, &bp,
BTREE_ITER_NOPRESERVE);
if (ret) {
ob = ERR_PTR(ret);
goto err;
}
- if (bp_offset != U64_MAX) {
+ if (!bkey_eq(bp_pos, POS_MAX)) {
/*
* Bucket may have data in it - we don't call
* bc2h_trans_inconnsistent() because fsck hasn't
return ob;
}
-static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
- enum alloc_reserve reserve)
-{
- struct open_bucket *ob;
- int i;
-
- spin_lock(&c->freelist_lock);
-
- for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
- ob = c->open_buckets + ca->open_buckets_partial[i];
-
- if (reserve <= ob->alloc_reserve) {
- array_remove_item(ca->open_buckets_partial,
- ca->open_buckets_partial_nr,
- i);
- ob->on_partial_list = false;
- ob->alloc_reserve = reserve;
- spin_unlock(&c->freelist_lock);
- return ob;
- }
- }
-
- spin_unlock(&c->freelist_lock);
- return NULL;
-}
-
/*
* This path is for before the freespace btree is initialized:
*
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
struct bch_dev *ca,
enum alloc_reserve reserve,
- bool may_alloc_partial,
struct closure *cl,
struct bch_dev_usage *usage)
{
if (waiting)
closure_wake_up(&c->freelist_wait);
-
- if (may_alloc_partial) {
- ob = try_alloc_partial_bucket(c, ca, reserve);
- if (ob)
- return ob;
- }
alloc:
ob = likely(freespace)
? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
if (!IS_ERR(ob))
trace_and_count(c, bucket_alloc, ca,
bch2_alloc_reserves[reserve],
- may_alloc_partial,
ob->bucket,
usage->d[BCH_DATA_free].buckets,
avail,
else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
trace_and_count(c, bucket_alloc_fail, ca,
bch2_alloc_reserves[reserve],
- may_alloc_partial,
0,
usage->d[BCH_DATA_free].buckets,
avail,
struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
enum alloc_reserve reserve,
- bool may_alloc_partial,
struct closure *cl)
{
struct bch_dev_usage usage;
bch2_trans_do(c, NULL, NULL, 0,
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
- may_alloc_partial, cl, &usage)));
+ cl, &usage)));
return ob;
}
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
}
-#define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
-#define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
-
-static void add_new_bucket(struct bch_fs *c,
+static int add_new_bucket(struct bch_fs *c,
struct open_buckets *ptrs,
struct bch_devs_mask *devs_may_alloc,
+ unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
unsigned flags,
unsigned durability =
bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+ BUG_ON(*nr_effective >= nr_replicas);
+ BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
+
__clear_bit(ob->dev, devs_may_alloc->d);
- *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
+ *nr_effective += (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)
? durability : 1;
*have_cache |= !durability;
ob_push(c, ptrs, ob);
+
+ if (*nr_effective >= nr_replicas)
+ return 1;
+ if (ob->ec)
+ return 1;
+ return 0;
}
-static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
+int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
struct open_buckets *ptrs,
struct dev_stripe_state *stripe,
struct bch_devs_mask *devs_may_alloc,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
- enum alloc_reserve reserve,
unsigned flags,
+ enum bch_data_type data_type,
+ enum alloc_reserve reserve,
struct closure *cl)
{
struct bch_fs *c = trans->c;
continue;
}
- ob = bch2_bucket_alloc_trans(trans, ca, reserve,
- flags & BUCKET_MAY_ALLOC_PARTIAL, cl, &usage);
+ ob = bch2_bucket_alloc_trans(trans, ca, reserve, cl, &usage);
if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
percpu_ref_put(&ca->ref);
continue;
}
- add_new_bucket(c, ptrs, devs_may_alloc,
- nr_effective, have_cache, flags, ob);
+ ob->data_type = data_type;
- if (*nr_effective >= nr_replicas) {
+ if (add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_replicas, nr_effective,
+ have_cache, flags, ob)) {
ret = 0;
break;
}
return ret;
}
-int bch2_bucket_alloc_set(struct bch_fs *c,
- struct open_buckets *ptrs,
- struct dev_stripe_state *stripe,
- struct bch_devs_mask *devs_may_alloc,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- enum alloc_reserve reserve,
- unsigned flags,
- struct closure *cl)
-{
- return bch2_trans_do(c, NULL, NULL, 0,
- bch2_bucket_alloc_set_trans(&trans, ptrs, stripe,
- devs_may_alloc, nr_replicas,
- nr_effective, have_cache, reserve,
- flags, cl));
-}
-
/* Allocate from stripes: */
/*
* it's to a device we don't want:
*/
-static int bucket_alloc_from_stripe(struct bch_fs *c,
+static int bucket_alloc_from_stripe(struct btree_trans *trans,
struct open_buckets *ptrs,
struct write_point *wp,
struct bch_devs_mask *devs_may_alloc,
u16 target,
- unsigned erasure_code,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
+ enum alloc_reserve reserve,
unsigned flags,
struct closure *cl)
{
+ struct bch_fs *c = trans->c;
struct dev_alloc_list devs_sorted;
struct ec_stripe_head *h;
struct open_bucket *ob;
struct bch_dev *ca;
unsigned i, ec_idx;
-
- if (!erasure_code)
- return 0;
+ int ret = 0;
if (nr_replicas < 2)
return 0;
if (ec_open_bucket(c, ptrs))
return 0;
- h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
- wp == &c->copygc_write_point,
- cl);
+ h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl);
if (IS_ERR(h))
- return -PTR_ERR(h);
+ return PTR_ERR(h);
if (!h)
return 0;
ob->ec_idx = ec_idx;
ob->ec = h->s;
+ ec_stripe_new_get(h->s, STRIPE_REF_io);
- add_new_bucket(c, ptrs, devs_may_alloc,
- nr_effective, have_cache, flags, ob);
- atomic_inc(&h->s->pin);
+ ret = add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_replicas, nr_effective,
+ have_cache, flags, ob);
out_put_head:
bch2_ec_stripe_head_put(c, h);
- return 0;
+ return ret;
}
/* Sector allocator */
-static void get_buckets_from_writepoint(struct bch_fs *c,
- struct open_buckets *ptrs,
- struct write_point *wp,
- struct bch_devs_mask *devs_may_alloc,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- unsigned flags,
- bool need_ec)
+static bool want_bucket(struct bch_fs *c,
+ struct write_point *wp,
+ struct bch_devs_mask *devs_may_alloc,
+ bool *have_cache, bool ec,
+ struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+
+ if (!test_bit(ob->dev, devs_may_alloc->d))
+ return false;
+
+ if (ob->data_type != wp->data_type)
+ return false;
+
+ if (!ca->mi.durability &&
+ (wp->data_type == BCH_DATA_btree || ec || *have_cache))
+ return false;
+
+ if (ec != (ob->ec != NULL))
+ return false;
+
+ return true;
+}
+
+static int bucket_alloc_set_writepoint(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_mask *devs_may_alloc,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ bool ec, unsigned flags)
{
struct open_buckets ptrs_skip = { .nr = 0 };
struct open_bucket *ob;
unsigned i;
+ int ret = 0;
open_bucket_for_each(c, &wp->ptrs, ob, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
-
- if (*nr_effective < nr_replicas &&
- test_bit(ob->dev, devs_may_alloc->d) &&
- (ca->mi.durability ||
- (wp->data_type == BCH_DATA_user && !*have_cache)) &&
- (ob->ec || !need_ec)) {
- add_new_bucket(c, ptrs, devs_may_alloc,
- nr_effective, have_cache,
- flags, ob);
- } else {
+ if (!ret && want_bucket(c, wp, devs_may_alloc,
+ have_cache, ec, ob))
+ ret = add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_replicas, nr_effective,
+ have_cache, flags, ob);
+ else
ob_push(c, &ptrs_skip, ob);
- }
}
wp->ptrs = ptrs_skip;
+
+ return ret;
}
-static int open_bucket_add_buckets(struct btree_trans *trans,
+static int bucket_alloc_set_partial(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_mask *devs_may_alloc,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache, bool ec,
+ enum alloc_reserve reserve,
+ unsigned flags)
+{
+ int i, ret = 0;
+
+ if (!c->open_buckets_partial_nr)
+ return 0;
+
+ spin_lock(&c->freelist_lock);
+
+ if (!c->open_buckets_partial_nr)
+ goto unlock;
+
+ for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
+ struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
+
+ if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_dev_usage usage;
+ u64 avail;
+
+ bch2_dev_usage_read_fast(ca, &usage);
+ avail = dev_buckets_free(ca, usage, reserve);
+ if (!avail)
+ continue;
+
+ array_remove_item(c->open_buckets_partial,
+ c->open_buckets_partial_nr,
+ i);
+ ob->on_partial_list = false;
+
+ ret = add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_replicas, nr_effective,
+ have_cache, flags, ob);
+ if (ret)
+ break;
+ }
+ }
+unlock:
+ spin_unlock(&c->freelist_lock);
+ return ret;
+}
+
+static int __open_bucket_add_buckets(struct btree_trans *trans,
struct open_buckets *ptrs,
struct write_point *wp,
struct bch_devs_list *devs_have,
u16 target,
- unsigned erasure_code,
+ bool erasure_code,
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
struct bch_devs_mask devs;
struct open_bucket *ob;
struct closure *cl = NULL;
- int ret;
unsigned i;
+ int ret;
rcu_read_lock();
devs = target_rw_devs(c, wp->data_type, target);
open_bucket_for_each(c, ptrs, ob, i)
__clear_bit(ob->dev, devs.d);
+ if (erasure_code && ec_open_bucket(c, ptrs))
+ return 0;
+
+ ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
+ nr_replicas, nr_effective,
+ have_cache, erasure_code, flags);
+ if (ret)
+ return ret;
+
+ ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
+ nr_replicas, nr_effective,
+ have_cache, erasure_code, reserve, flags);
+ if (ret)
+ return ret;
+
if (erasure_code) {
- if (!ec_open_bucket(c, ptrs)) {
- get_buckets_from_writepoint(c, ptrs, wp, &devs,
- nr_replicas, nr_effective,
- have_cache, flags, true);
- if (*nr_effective >= nr_replicas)
- return 0;
+ ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
+ target,
+ nr_replicas, nr_effective,
+ have_cache,
+ reserve, flags, _cl);
+ } else {
+retry_blocking:
+ /*
+ * Try nonblocking first, so that if one device is full we'll try from
+ * other devices:
+ */
+ ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
+ nr_replicas, nr_effective, have_cache,
+ flags, wp->data_type, reserve, cl);
+ if (ret &&
+ !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
+ !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
+ !cl && _cl) {
+ cl = _cl;
+ goto retry_blocking;
}
- if (!ec_open_bucket(c, ptrs)) {
- ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
- target, erasure_code,
- nr_replicas, nr_effective,
- have_cache, flags, _cl);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
- bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
- bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
- return ret;
- if (*nr_effective >= nr_replicas)
- return 0;
- }
}
- get_buckets_from_writepoint(c, ptrs, wp, &devs,
- nr_replicas, nr_effective,
- have_cache, flags, false);
- if (*nr_effective >= nr_replicas)
- return 0;
+ return ret;
+}
-retry_blocking:
- /*
- * Try nonblocking first, so that if one device is full we'll try from
- * other devices:
- */
- ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
+static int open_bucket_add_buckets(struct btree_trans *trans,
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_list *devs_have,
+ u16 target,
+ unsigned erasure_code,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ enum alloc_reserve reserve,
+ unsigned flags,
+ struct closure *cl)
+{
+ int ret;
+
+ if (erasure_code) {
+ ret = __open_bucket_add_buckets(trans, ptrs, wp,
+ devs_have, target, erasure_code,
nr_replicas, nr_effective, have_cache,
reserve, flags, cl);
- if (ret &&
- !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
- !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
- !cl && _cl) {
- cl = _cl;
- goto retry_blocking;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
+ bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
+ bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+ return ret;
+ if (*nr_effective >= nr_replicas)
+ return 0;
}
- return ret;
+ ret = __open_bucket_add_buckets(trans, ptrs, wp,
+ devs_have, target, false,
+ nr_replicas, nr_effective, have_cache,
+ reserve, flags, cl);
+ return ret < 0 ? ret : 0;
}
-void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
- struct open_buckets *obs)
+static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
+ struct bch_dev *ca, bool ec)
{
- struct open_buckets ptrs = { .nr = 0 };
- struct open_bucket *ob, *ob2;
- unsigned i, j;
-
- open_bucket_for_each(c, obs, ob, i) {
- bool drop = !ca || ob->dev == ca->dev_idx;
+ if (ec) {
+ return ob->ec != NULL;
+ } else if (ca) {
+ bool drop = ob->dev == ca->dev_idx;
+ struct open_bucket *ob2;
+ unsigned i;
if (!drop && ob->ec) {
mutex_lock(&ob->ec->lock);
- for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
- if (!ob->ec->blocks[j])
+ for (i = 0; i < ob->ec->new_stripe.key.v.nr_blocks; i++) {
+ if (!ob->ec->blocks[i])
continue;
- ob2 = c->open_buckets + ob->ec->blocks[j];
+ ob2 = c->open_buckets + ob->ec->blocks[i];
drop |= ob2->dev == ca->dev_idx;
}
mutex_unlock(&ob->ec->lock);
}
- if (drop)
- bch2_open_bucket_put(c, ob);
- else
- ob_push(c, &ptrs, ob);
+ return drop;
+ } else {
+ return true;
}
-
- *obs = ptrs;
}
-void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
- struct write_point *wp)
+static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
+ bool ec, struct write_point *wp)
{
+ struct open_buckets ptrs = { .nr = 0 };
+ struct open_bucket *ob;
+ unsigned i;
+
mutex_lock(&wp->lock);
- bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ if (should_drop_bucket(ob, c, ca, ec))
+ bch2_open_bucket_put(c, ob);
+ else
+ ob_push(c, &ptrs, ob);
+ wp->ptrs = ptrs;
mutex_unlock(&wp->lock);
}
+void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
+ bool ec)
+{
+ unsigned i;
+
+ /* Next, close write points that point to this device... */
+ for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
+ bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
+
+ bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
+ bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
+ bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
+
+ mutex_lock(&c->btree_reserve_cache_lock);
+ while (c->btree_reserve_cache_nr) {
+ struct btree_alloc *a =
+ &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
+
+ bch2_open_buckets_put(c, &a->ob);
+ }
+ mutex_unlock(&c->btree_reserve_cache_lock);
+
+ spin_lock(&c->freelist_lock);
+ i = 0;
+ while (i < c->open_buckets_partial_nr) {
+ struct open_bucket *ob =
+ c->open_buckets + c->open_buckets_partial[i];
+
+ if (should_drop_bucket(ob, c, ca, ec)) {
+ --c->open_buckets_partial_nr;
+ swap(c->open_buckets_partial[i],
+ c->open_buckets_partial[c->open_buckets_partial_nr]);
+ ob->on_partial_list = false;
+ spin_unlock(&c->freelist_lock);
+ bch2_open_bucket_put(c, ob);
+ spin_lock(&c->freelist_lock);
+ } else {
+ i++;
+ }
+ }
+ spin_unlock(&c->freelist_lock);
+
+ bch2_ec_stop_dev(c, ca);
+}
+
static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
unsigned long write_point)
{
return true;
}
-static bool try_decrease_writepoints(struct bch_fs *c,
- unsigned old_nr)
+static bool try_decrease_writepoints(struct bch_fs *c, unsigned old_nr)
{
struct write_point *wp;
hlist_del_rcu(&wp->node);
mutex_unlock(&c->write_points_hash_lock);
- bch2_writepoint_stop(c, NULL, wp);
+ bch2_writepoint_stop(c, NULL, false, wp);
return true;
}
-static void bch2_trans_mutex_lock(struct btree_trans *trans,
+static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
struct mutex *lock)
{
if (!mutex_trylock(lock)) {
if (!(write_point & 1UL)) {
wp = (struct write_point *) write_point;
- bch2_trans_mutex_lock(trans, &wp->lock);
+ bch2_trans_mutex_lock_norelock(trans, &wp->lock);
return wp;
}
wp = __writepoint_find(head, write_point);
if (wp) {
lock_wp:
- bch2_trans_mutex_lock(trans, &wp->lock);
+ bch2_trans_mutex_lock_norelock(trans, &wp->lock);
if (wp->write_point == write_point)
goto out;
mutex_unlock(&wp->lock);
if (!oldest || time_before64(wp->last_used, oldest->last_used))
oldest = wp;
- bch2_trans_mutex_lock(trans, &oldest->lock);
- bch2_trans_mutex_lock(trans, &c->write_points_hash_lock);
+ bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
+ bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
if (oldest >= c->write_points + c->write_points_nr ||
try_increase_writepoints(c)) {
mutex_unlock(&c->write_points_hash_lock);
struct open_bucket *ob;
struct open_buckets ptrs;
unsigned nr_effective, write_points_nr;
- unsigned ob_flags = 0;
bool have_cache;
int ret;
int i;
- if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
- ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
+ BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
BUG_ON(!nr_replicas || !nr_replicas_required);
retry:
*wp_ret = wp = writepoint_find(trans, write_point.v);
- if (wp->data_type == BCH_DATA_user)
- ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
-
/* metadata may not allocate on cache devices: */
if (wp->data_type != BCH_DATA_user)
have_cache = true;
- if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
- ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
- target, erasure_code,
- nr_replicas, &nr_effective,
- &have_cache, reserve,
- ob_flags, cl);
- } else {
+ if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,
&have_cache, reserve,
- ob_flags, NULL);
+ flags, NULL);
if (!ret ||
bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto alloc_done;
+ /* Don't retry from all devices if we're out of open buckets: */
+ if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+ goto allocate_blocking;
+
+ /*
+ * Only try to allocate cache (durability = 0 devices) from the
+ * specified target:
+ */
+ have_cache = true;
+
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
0, erasure_code,
nr_replicas, &nr_effective,
&have_cache, reserve,
- ob_flags, cl);
+ flags, cl);
+ } else {
+allocate_blocking:
+ ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
+ target, erasure_code,
+ nr_replicas, &nr_effective,
+ &have_cache, reserve,
+ flags, cl);
}
alloc_done:
BUG_ON(!ret && nr_effective < nr_replicas);
}
}
+static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ unsigned data_type = ob->data_type;
+ barrier(); /* READ_ONCE() doesn't work on bitfields */
+
+ prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u",
+ ob - c->open_buckets,
+ atomic_read(&ob->pin),
+ data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type",
+ ob->dev, ob->bucket, ob->gen,
+ ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
+ if (ob->ec)
+ prt_printf(out, " ec idx %llu", ob->ec->idx);
+ if (ob->on_partial_list)
+ prt_str(out, " partial");
+ prt_newline(out);
+}
+
void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
{
struct open_bucket *ob;
+ out->atomic++;
+
for (ob = c->open_buckets;
ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
ob++) {
spin_lock(&ob->lock);
- if (ob->valid && !ob->on_partial_list) {
- prt_printf(out, "%zu ref %u type %s %u:%llu:%u\n",
- ob - c->open_buckets,
- atomic_read(&ob->pin),
- bch2_data_types[ob->data_type],
- ob->dev, ob->bucket, ob->gen);
- }
+ if (ob->valid && !ob->on_partial_list)
+ bch2_open_bucket_to_text(out, c, ob);
spin_unlock(&ob->lock);
}
+
+ --out->atomic;
+}
+
+void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ unsigned i;
+
+ out->atomic++;
+ spin_lock(&c->freelist_lock);
+
+ for (i = 0; i < c->open_buckets_partial_nr; i++)
+ bch2_open_bucket_to_text(out, c,
+ c->open_buckets + c->open_buckets_partial[i]);
+
+ spin_unlock(&c->freelist_lock);
+ --out->atomic;
}
static const char * const bch2_write_point_states[] = {