static enum merge_result bch2_extent_merge(struct bch_fs *, struct btree *,
struct bkey_i *, struct bkey_i *);
-static void sort_key_next(struct btree_node_iter *iter,
+static void sort_key_next(struct btree_node_iter_large *iter,
struct btree *b,
struct btree_node_iter_set *i)
{
?: (l).k - (r).k; \
})
-static inline bool should_drop_next_key(struct btree_node_iter *iter,
+static inline bool should_drop_next_key(struct btree_node_iter_large *iter,
struct btree *b)
{
struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
}
struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
- struct btree *b,
- struct btree_node_iter *iter)
+ struct btree *b,
+ struct btree_node_iter_large *iter)
{
struct bkey_packed *out = dst->start;
struct btree_nr_keys nr;
heap_resort(iter, key_sort_cmp);
- while (!bch2_btree_node_iter_end(iter)) {
+ while (!bch2_btree_node_iter_large_end(iter)) {
if (!should_drop_next_key(iter, b)) {
struct bkey_packed *k =
__btree_node_offset_to_key(b, iter->data->k);
struct bch_dev *ca = c->devs[ptr->dev];
if (ca->mi.group &&
- ca->mi.group == group)
+ ca->mi.group - 1 == group)
return ptr;
}
return nr_ptrs;
}
-unsigned bch2_extent_nr_good_ptrs(struct bch_fs *c, struct bkey_s_c_extent e)
+unsigned bch2_extent_ptr_durability(struct bch_fs *c,
+ const struct bch_extent_ptr *ptr)
+{
+ struct bch_dev *ca;
+
+ if (ptr->cached)
+ return 0;
+
+ ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ if (ca->mi.state == BCH_MEMBER_STATE_FAILED)
+ return 0;
+
+ return ca->mi.durability;
+}
+
+unsigned bch2_extent_durability(struct bch_fs *c, struct bkey_s_c_extent e)
{
const struct bch_extent_ptr *ptr;
- unsigned nr_ptrs = 0;
+ unsigned durability = 0;
extent_for_each_ptr(e, ptr)
- nr_ptrs += (!ptr->cached &&
- bch_dev_bkey_exists(c, ptr->dev)->mi.state !=
- BCH_MEMBER_STATE_FAILED);
+ durability += bch2_extent_ptr_durability(c, ptr);
- return nr_ptrs;
+ return durability;
}
unsigned bch2_extent_is_compressed(struct bkey_s_c k)
bkey_start_pos(&_ur)) ?: (r).k - (l).k; \
})
-static inline void extent_sort_sift(struct btree_node_iter *iter,
+static inline void extent_sort_sift(struct btree_node_iter_large *iter,
struct btree *b, size_t i)
{
heap_sift_down(iter, i, extent_sort_cmp);
}
-static inline void extent_sort_next(struct btree_node_iter *iter,
+static inline void extent_sort_next(struct btree_node_iter_large *iter,
struct btree *b,
struct btree_node_iter_set *i)
{
struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
struct bset *dst,
struct btree *b,
- struct btree_node_iter *iter)
+ struct btree_node_iter_large *iter)
{
struct bkey_format *f = &b->format;
struct btree_node_iter_set *_l = iter->data, *_r;
heap_resort(iter, extent_sort_cmp);
- while (!bch2_btree_node_iter_end(iter)) {
+ while (!bch2_btree_node_iter_large_end(iter)) {
lk = __btree_node_offset_to_key(b, _l->k);
if (iter->used == 1) {
unsigned target)
{
struct bch_extent_ptr *ptr;
- unsigned nr_cached = 0, nr_good = bch2_extent_nr_good_ptrs(c, e.c);
+ int extra = bch2_extent_durability(c, e.c) - nr_desired_replicas;
- if (nr_good <= nr_desired_replicas)
+ if (extra <= 0)
return;
- nr_cached = nr_good - nr_desired_replicas;
+ extent_for_each_ptr(e, ptr) {
+ int n = bch2_extent_ptr_durability(c, ptr);
- extent_for_each_ptr(e, ptr)
- if (!ptr->cached &&
+ if (n && n <= extra &&
!dev_in_target(c->devs[ptr->dev], target)) {
ptr->cached = true;
- nr_cached--;
- if (!nr_cached)
- return;
+ extra -= n;
+ }
+ }
+
+ extent_for_each_ptr(e, ptr) {
+ int n = bch2_extent_ptr_durability(c, ptr);
+
+ if (n && n <= extra) {
+ ptr->cached = true;
+ extra -= n;
}
+ }
}
/*