static enum merge_result bch2_extent_merge(struct bch_fs *, struct btree *,
struct bkey_i *, struct bkey_i *);
-static void sort_key_next(struct btree_node_iter *iter,
+static void sort_key_next(struct btree_node_iter_large *iter,
struct btree *b,
struct btree_node_iter_set *i)
{
?: (l).k - (r).k; \
})
-static inline bool should_drop_next_key(struct btree_node_iter *iter,
+static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
struct btree *b)
{
struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
}
struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
- struct btree *b,
- struct btree_node_iter *iter)
+ struct btree *b,
+ struct btree_node_iter_large *iter)
{
struct bkey_packed *out = dst->start;
struct btree_nr_keys nr;
heap_resort(iter, key_sort_cmp);
- while (!bch2_btree_node_iter_end(iter)) {
+ while (!bch2_btree_node_iter_large_end(iter)) {
if (!should_drop_next_key(iter, b)) {
struct bkey_packed *k =
__btree_node_offset_to_key(b, iter->data->k);
return dropped;
}
+const struct bch_extent_ptr *
+bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
+{
+ const struct bch_extent_ptr *ptr;
+
+ extent_for_each_ptr(e, ptr) {
+ struct bch_dev *ca = c->devs[ptr->dev];
+
+ if (ca->mi.group &&
+ ca->mi.group - 1 == group)
+ return ptr;
+ }
+
+ return NULL;
+}
+
+const struct bch_extent_ptr *
+bch2_extent_has_target(struct bch_fs *c, struct bkey_s_c_extent e, unsigned target)
+{
+ const struct bch_extent_ptr *ptr;
+
+ extent_for_each_ptr(e, ptr)
+ if (dev_in_target(c->devs[ptr->dev], target))
+ return ptr;
+
+ return NULL;
+}
+
unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e)
{
const struct bch_extent_ptr *ptr;
return nr_ptrs;
}
-unsigned bch2_extent_nr_good_ptrs(struct bch_fs *c, struct bkey_s_c_extent e)
+unsigned bch2_extent_ptr_durability(struct bch_fs *c,
+ const struct bch_extent_ptr *ptr)
+{
+ struct bch_dev *ca;
+
+ if (ptr->cached)
+ return 0;
+
+ ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ if (ca->mi.state == BCH_MEMBER_STATE_FAILED)
+ return 0;
+
+ return ca->mi.durability;
+}
+
+unsigned bch2_extent_durability(struct bch_fs *c, struct bkey_s_c_extent e)
{
const struct bch_extent_ptr *ptr;
- unsigned nr_ptrs = 0;
+ unsigned durability = 0;
extent_for_each_ptr(e, ptr)
- nr_ptrs += (!ptr->cached &&
- bch_dev_bkey_exists(c, ptr->dev)->mi.state !=
- BCH_MEMBER_STATE_FAILED);
+ durability += bch2_extent_ptr_durability(c, ptr);
- return nr_ptrs;
+ return durability;
}
unsigned bch2_extent_is_compressed(struct bkey_s_c k)
goto err;
}
- if (!bch2_sb_has_replicas(c, BCH_DATA_BTREE, bch2_extent_devs(e))) {
+ if (!bch2_bkey_replicas_marked(c, BCH_DATA_BTREE, e.s_c)) {
bch2_bkey_val_to_text(c, btree_node_type(b),
buf, sizeof(buf), k);
bch2_fs_bug(c,
bkey_start_pos(&_ur)) ?: (r).k - (l).k; \
})
-static inline void extent_sort_sift(struct btree_node_iter *iter,
+static inline void extent_sort_sift(struct btree_node_iter_large *iter,
struct btree *b, size_t i)
{
heap_sift_down(iter, i, extent_sort_cmp);
}
-static inline void extent_sort_next(struct btree_node_iter *iter,
+static inline void extent_sort_next(struct btree_node_iter_large *iter,
struct btree *b,
struct btree_node_iter_set *i)
{
struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
struct bset *dst,
struct btree *b,
- struct btree_node_iter *iter)
+ struct btree_node_iter_large *iter)
{
struct bkey_format *f = &b->format;
struct btree_node_iter_set *_l = iter->data, *_r;
heap_resort(iter, extent_sort_cmp);
- while (!bch2_btree_node_iter_end(iter)) {
+ while (!bch2_btree_node_iter_large_end(iter)) {
lk = __btree_node_offset_to_key(b, _l->k);
if (iter->used == 1) {
unsigned seq, stale;
char buf[160];
bool bad;
- unsigned ptrs_per_tier[BCH_TIER_MAX];
unsigned replicas = 0;
/*
* going to get overwritten during replay)
*/
- memset(ptrs_per_tier, 0, sizeof(ptrs_per_tier));
-
extent_for_each_ptr(e, ptr) {
ca = bch_dev_bkey_exists(c, ptr->dev);
replicas++;
- ptrs_per_tier[ca->mi.tier]++;
/*
* If journal replay hasn't finished, we might be seeing keys
}
if (!bkey_extent_is_cached(e.k) &&
- !bch2_sb_has_replicas(c, BCH_DATA_USER, bch2_extent_devs(e))) {
+ !bch2_bkey_replicas_marked(c, BCH_DATA_USER, e.s_c)) {
bch2_bkey_val_to_text(c, btree_node_type(b),
buf, sizeof(buf), e.s_c);
bch2_fs_bug(c,
#undef p
}
-static unsigned PTR_TIER(struct bch_fs *c,
- const struct bch_extent_ptr *ptr)
-{
- return bch_dev_bkey_exists(c, ptr->dev)->mi.tier;
-}
-
static void bch2_extent_crc_init(union bch_extent_crc *crc,
struct bch_extent_crc_unpacked new)
{
}
void bch2_extent_mark_replicas_cached(struct bch_fs *c,
- struct bkey_s_extent e)
+ struct bkey_s_extent e,
+ unsigned nr_desired_replicas,
+ unsigned target)
{
struct bch_extent_ptr *ptr;
- unsigned tier = 0, nr_cached = 0;
- unsigned nr_good = bch2_extent_nr_good_ptrs(c, e.c);
- bool have_higher_tier;
+ int extra = bch2_extent_durability(c, e.c) - nr_desired_replicas;
- if (nr_good <= c->opts.data_replicas)
+ if (extra <= 0)
return;
- nr_cached = nr_good - c->opts.data_replicas;
+ extent_for_each_ptr(e, ptr) {
+ int n = bch2_extent_ptr_durability(c, ptr);
- do {
- have_higher_tier = false;
+ if (n && n <= extra &&
+ !dev_in_target(c->devs[ptr->dev], target)) {
+ ptr->cached = true;
+ extra -= n;
+ }
+ }
- extent_for_each_ptr(e, ptr) {
- if (!ptr->cached &&
- PTR_TIER(c, ptr) == tier) {
- ptr->cached = true;
- nr_cached--;
- if (!nr_cached)
- return;
- }
+ extent_for_each_ptr(e, ptr) {
+ int n = bch2_extent_ptr_durability(c, ptr);
- if (PTR_TIER(c, ptr) > tier)
- have_higher_tier = true;
+ if (n && n <= extra) {
+ ptr->cached = true;
+ extra -= n;
}
-
- tier++;
- } while (have_higher_tier);
+ }
}
/*
- * This picks a non-stale pointer, preferabbly from a device other than
- * avoid. Avoid can be NULL, meaning pick any. If there are no non-stale
- * pointers to other devices, it will still pick a pointer from avoid.
- * Note that it prefers lowered-numbered pointers to higher-numbered pointers
- * as the pointers are sorted by tier, hence preferring pointers to tier 0
- * rather than pointers to tier 1.
+ * This picks a non-stale pointer, preferably from a device other than @avoid.
+ * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
+ * other devices, it will still pick a pointer from avoid.
*/
void bch2_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k,
struct bch_devs_mask *avoid,