6 struct bch_replicas_entry_padded {
7 struct bch_replicas_entry e;
8 u8 pad[BCH_SB_MEMBERS_MAX];
11 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
12 struct bch_replicas_cpu *);
14 /* Replicas tracking - in memory: */
16 static inline int u8_cmp(u8 l, u8 r)
18 return (l > r) - (l < r);
21 static void replicas_entry_sort(struct bch_replicas_entry *e)
23 bubble_sort(e->devs, e->nr_devs, u8_cmp);
26 #define for_each_cpu_replicas_entry(_r, _i) \
27 for (_i = (_r)->entries; \
28 (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
29 _i = (void *) (_i) + (_r)->entry_size)
31 static inline struct bch_replicas_entry *
32 cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
34 return (void *) r->entries + r->entry_size * i;
37 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
39 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
42 static void replicas_entry_to_text(struct printbuf *out,
43 struct bch_replicas_entry *e)
47 pr_buf(out, "%s: %u/%u [",
48 bch2_data_types[e->data_type],
52 for (i = 0; i < e->nr_devs; i++)
53 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
57 void bch2_cpu_replicas_to_text(struct printbuf *out,
58 struct bch_replicas_cpu *r)
60 struct bch_replicas_entry *e;
63 for_each_cpu_replicas_entry(r, e) {
68 replicas_entry_to_text(out, e);
72 static void extent_to_replicas(struct bkey_s_c k,
73 struct bch_replicas_entry *r)
75 if (bkey_extent_is_data(k.k)) {
76 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
77 const union bch_extent_entry *entry;
78 struct extent_ptr_decoded p;
82 extent_for_each_ptr_decode(e, p, entry)
84 r->devs[r->nr_devs++] = p.ptr.dev;
88 static void bkey_to_replicas(enum bkey_type type,
90 struct bch_replicas_entry *e)
96 e->data_type = BCH_DATA_BTREE;
97 extent_to_replicas(k, e);
99 case BKEY_TYPE_EXTENTS:
100 e->data_type = BCH_DATA_USER;
101 extent_to_replicas(k, e);
107 replicas_entry_sort(e);
110 static inline void devlist_to_replicas(struct bch_devs_list devs,
111 enum bch_data_type data_type,
112 struct bch_replicas_entry *e)
117 data_type == BCH_DATA_SB ||
118 data_type >= BCH_DATA_NR);
120 e->data_type = data_type;
124 for (i = 0; i < devs.nr; i++)
125 e->devs[e->nr_devs++] = devs.devs[i];
127 replicas_entry_sort(e);
130 static struct bch_replicas_cpu *
131 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
132 struct bch_replicas_entry *new_entry)
134 struct bch_replicas_cpu *new;
135 unsigned i, nr, entry_size;
137 entry_size = max_t(unsigned, old->entry_size,
138 replicas_entry_bytes(new_entry));
141 new = kzalloc(sizeof(struct bch_replicas_cpu) +
142 nr * entry_size, GFP_NOIO);
147 new->entry_size = entry_size;
149 for (i = 0; i < old->nr; i++)
150 memcpy(cpu_replicas_entry(new, i),
151 cpu_replicas_entry(old, i),
154 memcpy(cpu_replicas_entry(new, old->nr),
156 replicas_entry_bytes(new_entry));
158 bch2_cpu_replicas_sort(new);
162 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
163 struct bch_replicas_entry *search)
165 return replicas_entry_bytes(search) <= r->entry_size &&
166 eytzinger0_find(r->entries, r->nr,
168 memcmp, search) < r->nr;
171 static bool replicas_has_entry(struct bch_fs *c,
172 struct bch_replicas_entry *search,
173 bool check_gc_replicas)
175 struct bch_replicas_cpu *r, *gc_r;
179 r = rcu_dereference(c->replicas);
180 marked = __replicas_has_entry(r, search) &&
181 (!check_gc_replicas ||
182 likely(!(gc_r = rcu_dereference(c->replicas_gc))) ||
183 __replicas_has_entry(gc_r, search));
190 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
191 struct bch_replicas_entry *new_entry)
193 struct bch_replicas_cpu *old_gc, *new_gc = NULL, *old_r, *new_r = NULL;
196 mutex_lock(&c->sb_lock);
198 old_gc = rcu_dereference_protected(c->replicas_gc,
199 lockdep_is_held(&c->sb_lock));
200 if (old_gc && !__replicas_has_entry(old_gc, new_entry)) {
201 new_gc = cpu_replicas_add_entry(old_gc, new_entry);
206 old_r = rcu_dereference_protected(c->replicas,
207 lockdep_is_held(&c->sb_lock));
208 if (!__replicas_has_entry(old_r, new_entry)) {
209 new_r = cpu_replicas_add_entry(old_r, new_entry);
213 ret = bch2_cpu_replicas_to_sb_replicas(c, new_r);
218 /* allocations done, now commit: */
223 /* don't update in memory replicas until changes are persistent */
226 rcu_assign_pointer(c->replicas_gc, new_gc);
227 kfree_rcu(old_gc, rcu);
231 rcu_assign_pointer(c->replicas, new_r);
232 kfree_rcu(old_r, rcu);
235 mutex_unlock(&c->sb_lock);
238 mutex_unlock(&c->sb_lock);
244 static int __bch2_mark_replicas(struct bch_fs *c,
245 struct bch_replicas_entry *devs)
247 return likely(replicas_has_entry(c, devs, true))
249 : bch2_mark_replicas_slowpath(c, devs);
252 int bch2_mark_replicas(struct bch_fs *c,
253 enum bch_data_type data_type,
254 struct bch_devs_list devs)
256 struct bch_replicas_entry_padded search;
261 memset(&search, 0, sizeof(search));
263 BUG_ON(devs.nr >= BCH_REPLICAS_MAX);
265 devlist_to_replicas(devs, data_type, &search.e);
267 return __bch2_mark_replicas(c, &search.e);
270 int bch2_mark_bkey_replicas(struct bch_fs *c,
274 struct bch_replicas_entry_padded search;
277 memset(&search, 0, sizeof(search));
279 if (type == BKEY_TYPE_EXTENTS) {
280 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
283 for (i = 0; i < cached.nr; i++)
284 if ((ret = bch2_mark_replicas(c, BCH_DATA_CACHED,
285 bch2_dev_list_single(cached.devs[i]))))
289 bkey_to_replicas(type, k, &search.e);
291 return search.e.nr_devs
292 ? __bch2_mark_replicas(c, &search.e)
296 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
298 struct bch_replicas_cpu *new_r, *old_r;
300 lockdep_assert_held(&c->replicas_gc_lock);
302 mutex_lock(&c->sb_lock);
304 new_r = rcu_dereference_protected(c->replicas_gc,
305 lockdep_is_held(&c->sb_lock));
306 rcu_assign_pointer(c->replicas_gc, NULL);
311 if (bch2_cpu_replicas_to_sb_replicas(c, new_r)) {
318 /* don't update in memory replicas until changes are persistent */
320 old_r = rcu_dereference_protected(c->replicas,
321 lockdep_is_held(&c->sb_lock));
323 rcu_assign_pointer(c->replicas, new_r);
324 kfree_rcu(old_r, rcu);
326 mutex_unlock(&c->sb_lock);
329 kfree_rcu(new_r, rcu);
333 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
335 struct bch_replicas_cpu *dst, *src;
336 struct bch_replicas_entry *e;
338 lockdep_assert_held(&c->replicas_gc_lock);
340 mutex_lock(&c->sb_lock);
341 BUG_ON(c->replicas_gc);
343 src = rcu_dereference_protected(c->replicas,
344 lockdep_is_held(&c->sb_lock));
346 dst = kzalloc(sizeof(struct bch_replicas_cpu) +
347 src->nr * src->entry_size, GFP_NOIO);
349 mutex_unlock(&c->sb_lock);
354 dst->entry_size = src->entry_size;
356 for_each_cpu_replicas_entry(src, e)
357 if (!((1 << e->data_type) & typemask))
358 memcpy(cpu_replicas_entry(dst, dst->nr++),
361 bch2_cpu_replicas_sort(dst);
363 rcu_assign_pointer(c->replicas_gc, dst);
364 mutex_unlock(&c->sb_lock);
369 /* Replicas tracking - superblock: */
371 static struct bch_replicas_cpu *
372 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r)
374 struct bch_replicas_entry *e, *dst;
375 struct bch_replicas_cpu *cpu_r;
376 unsigned nr = 0, entry_size = 0, idx = 0;
378 for_each_replicas_entry(sb_r, e) {
379 entry_size = max_t(unsigned, entry_size,
380 replicas_entry_bytes(e));
384 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
385 nr * entry_size, GFP_NOIO);
390 cpu_r->entry_size = entry_size;
392 for_each_replicas_entry(sb_r, e) {
393 dst = cpu_replicas_entry(cpu_r, idx++);
394 memcpy(dst, e, replicas_entry_bytes(e));
395 replicas_entry_sort(dst);
401 static struct bch_replicas_cpu *
402 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r)
404 struct bch_replicas_entry_v0 *e;
405 struct bch_replicas_cpu *cpu_r;
406 unsigned nr = 0, entry_size = 0, idx = 0;
408 for_each_replicas_entry(sb_r, e) {
409 entry_size = max_t(unsigned, entry_size,
410 replicas_entry_bytes(e));
414 entry_size += sizeof(struct bch_replicas_entry) -
415 sizeof(struct bch_replicas_entry_v0);
417 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
418 nr * entry_size, GFP_NOIO);
423 cpu_r->entry_size = entry_size;
425 for_each_replicas_entry(sb_r, e) {
426 struct bch_replicas_entry *dst =
427 cpu_replicas_entry(cpu_r, idx++);
429 dst->data_type = e->data_type;
430 dst->nr_devs = e->nr_devs;
431 dst->nr_required = 1;
432 memcpy(dst->devs, e->devs, e->nr_devs);
433 replicas_entry_sort(dst);
439 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
441 struct bch_sb_field_replicas *sb_v1;
442 struct bch_sb_field_replicas_v0 *sb_v0;
443 struct bch_replicas_cpu *cpu_r, *old_r;
445 if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
446 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_v1);
447 else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
448 cpu_r = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0);
450 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu), GFP_NOIO);
455 bch2_cpu_replicas_sort(cpu_r);
457 old_r = rcu_dereference_check(c->replicas, lockdep_is_held(&c->sb_lock));
458 rcu_assign_pointer(c->replicas, cpu_r);
460 kfree_rcu(old_r, rcu);
465 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
466 struct bch_replicas_cpu *r)
468 struct bch_sb_field_replicas_v0 *sb_r;
469 struct bch_replicas_entry_v0 *dst;
470 struct bch_replicas_entry *src;
473 bytes = sizeof(struct bch_sb_field_replicas);
475 for_each_cpu_replicas_entry(r, src)
476 bytes += replicas_entry_bytes(src) - 1;
478 sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
479 DIV_ROUND_UP(bytes, sizeof(u64)));
483 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
484 sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
486 memset(&sb_r->entries, 0,
487 vstruct_end(&sb_r->field) -
488 (void *) &sb_r->entries);
491 for_each_cpu_replicas_entry(r, src) {
492 dst->data_type = src->data_type;
493 dst->nr_devs = src->nr_devs;
494 memcpy(dst->devs, src->devs, src->nr_devs);
496 dst = replicas_entry_next(dst);
498 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
504 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
505 struct bch_replicas_cpu *r)
507 struct bch_sb_field_replicas *sb_r;
508 struct bch_replicas_entry *dst, *src;
509 bool need_v1 = false;
512 bytes = sizeof(struct bch_sb_field_replicas);
514 for_each_cpu_replicas_entry(r, src) {
515 bytes += replicas_entry_bytes(src);
516 if (src->nr_required != 1)
521 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
523 sb_r = bch2_sb_resize_replicas(&c->disk_sb,
524 DIV_ROUND_UP(bytes, sizeof(u64)));
528 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
529 sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
531 memset(&sb_r->entries, 0,
532 vstruct_end(&sb_r->field) -
533 (void *) &sb_r->entries);
536 for_each_cpu_replicas_entry(r, src) {
537 memcpy(dst, src, replicas_entry_bytes(src));
539 dst = replicas_entry_next(dst);
541 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
547 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
551 sort_cmp_size(cpu_r->entries,
556 for (i = 0; i + 1 < cpu_r->nr; i++) {
557 struct bch_replicas_entry *l =
558 cpu_replicas_entry(cpu_r, i);
559 struct bch_replicas_entry *r =
560 cpu_replicas_entry(cpu_r, i + 1);
562 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
564 if (!memcmp(l, r, cpu_r->entry_size))
565 return "duplicate replicas entry";
571 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
573 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
574 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
575 struct bch_replicas_cpu *cpu_r = NULL;
576 struct bch_replicas_entry *e;
580 for_each_replicas_entry(sb_r, e) {
581 err = "invalid replicas entry: invalid data type";
582 if (e->data_type >= BCH_DATA_NR)
585 err = "invalid replicas entry: no devices";
589 err = "invalid replicas entry: bad nr_required";
590 if (!e->nr_required ||
591 (e->nr_required > 1 &&
592 e->nr_required >= e->nr_devs))
595 err = "invalid replicas entry: invalid device";
596 for (i = 0; i < e->nr_devs; i++)
597 if (!bch2_dev_exists(sb, mi, e->devs[i]))
601 err = "cannot allocate memory";
602 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
606 err = check_dup_replicas_entries(cpu_r);
612 static void bch2_sb_replicas_to_text(struct printbuf *out,
614 struct bch_sb_field *f)
616 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
617 struct bch_replicas_entry *e;
620 for_each_replicas_entry(r, e) {
625 replicas_entry_to_text(out, e);
629 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
630 .validate = bch2_sb_validate_replicas,
631 .to_text = bch2_sb_replicas_to_text,
634 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
636 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
637 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
638 struct bch_replicas_cpu *cpu_r = NULL;
639 struct bch_replicas_entry_v0 *e;
643 for_each_replicas_entry_v0(sb_r, e) {
644 err = "invalid replicas entry: invalid data type";
645 if (e->data_type >= BCH_DATA_NR)
648 err = "invalid replicas entry: no devices";
652 err = "invalid replicas entry: invalid device";
653 for (i = 0; i < e->nr_devs; i++)
654 if (!bch2_dev_exists(sb, mi, e->devs[i]))
658 err = "cannot allocate memory";
659 cpu_r = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r);
663 err = check_dup_replicas_entries(cpu_r);
669 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
670 .validate = bch2_sb_validate_replicas_v0,
673 /* Query replicas: */
675 bool bch2_replicas_marked(struct bch_fs *c,
676 enum bch_data_type data_type,
677 struct bch_devs_list devs,
678 bool check_gc_replicas)
680 struct bch_replicas_entry_padded search;
685 memset(&search, 0, sizeof(search));
687 devlist_to_replicas(devs, data_type, &search.e);
689 return replicas_has_entry(c, &search.e, check_gc_replicas);
692 bool bch2_bkey_replicas_marked(struct bch_fs *c,
695 bool check_gc_replicas)
697 struct bch_replicas_entry_padded search;
699 memset(&search, 0, sizeof(search));
701 if (type == BKEY_TYPE_EXTENTS) {
702 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
705 for (i = 0; i < cached.nr; i++)
706 if (!bch2_replicas_marked(c, BCH_DATA_CACHED,
707 bch2_dev_list_single(cached.devs[i]),
712 bkey_to_replicas(type, k, &search.e);
714 return search.e.nr_devs
715 ? replicas_has_entry(c, &search.e, check_gc_replicas)
719 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
720 struct bch_devs_mask online_devs)
722 struct bch_sb_field_members *mi;
723 struct bch_replicas_entry *e;
724 struct bch_replicas_cpu *r;
725 unsigned i, nr_online, nr_offline;
726 struct replicas_status ret;
728 memset(&ret, 0, sizeof(ret));
730 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
731 ret.replicas[i].redundancy = INT_MAX;
733 mi = bch2_sb_get_members(c->disk_sb.sb);
735 r = rcu_dereference(c->replicas);
737 for_each_cpu_replicas_entry(r, e) {
738 if (e->data_type >= ARRAY_SIZE(ret.replicas))
739 panic("e %p data_type %u\n", e, e->data_type);
741 nr_online = nr_offline = 0;
743 for (i = 0; i < e->nr_devs; i++) {
744 BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
747 if (test_bit(e->devs[i], online_devs.d))
753 ret.replicas[e->data_type].redundancy =
754 min(ret.replicas[e->data_type].redundancy,
755 (int) nr_online - (int) e->nr_required);
757 ret.replicas[e->data_type].nr_offline =
758 max(ret.replicas[e->data_type].nr_offline,
764 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
765 if (ret.replicas[i].redundancy == INT_MAX)
766 ret.replicas[i].redundancy = 0;
771 struct replicas_status bch2_replicas_status(struct bch_fs *c)
773 return __bch2_replicas_status(c, bch2_online_devs(c));
776 static bool have_enough_devs(struct replicas_status s,
777 enum bch_data_type type,
778 bool force_if_degraded,
781 return (!s.replicas[type].nr_offline || force_if_degraded) &&
782 (s.replicas[type].redundancy >= 0 || force_if_lost);
785 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
787 return (have_enough_devs(s, BCH_DATA_JOURNAL,
788 flags & BCH_FORCE_IF_METADATA_DEGRADED,
789 flags & BCH_FORCE_IF_METADATA_LOST) &&
790 have_enough_devs(s, BCH_DATA_BTREE,
791 flags & BCH_FORCE_IF_METADATA_DEGRADED,
792 flags & BCH_FORCE_IF_METADATA_LOST) &&
793 have_enough_devs(s, BCH_DATA_USER,
794 flags & BCH_FORCE_IF_DATA_DEGRADED,
795 flags & BCH_FORCE_IF_DATA_LOST));
798 int bch2_replicas_online(struct bch_fs *c, bool meta)
800 struct replicas_status s = bch2_replicas_status(c);
803 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
804 s.replicas[BCH_DATA_BTREE].redundancy)
805 : s.replicas[BCH_DATA_USER].redundancy) + 1;
808 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
810 struct bch_replicas_entry *e;
811 struct bch_replicas_cpu *r;
815 r = rcu_dereference(c->replicas);
817 for_each_cpu_replicas_entry(r, e)
818 for (i = 0; i < e->nr_devs; i++)
819 if (e->devs[i] == ca->dev_idx)
820 ret |= 1 << e->data_type;