6 struct bch_replicas_entry_padded {
7 struct bch_replicas_entry e;
8 u8 pad[BCH_SB_MEMBERS_MAX];
11 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
12 struct bch_replicas_cpu *);
14 /* Replicas tracking - in memory: */
16 static inline int u8_cmp(u8 l, u8 r)
18 return (l > r) - (l < r);
21 static void replicas_entry_sort(struct bch_replicas_entry *e)
23 bubble_sort(e->devs, e->nr_devs, u8_cmp);
26 #define for_each_cpu_replicas_entry(_r, _i) \
27 for (_i = (_r)->entries; \
28 (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
29 _i = (void *) (_i) + (_r)->entry_size)
31 static inline struct bch_replicas_entry *
32 cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
34 return (void *) r->entries + r->entry_size * i;
37 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
39 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
42 static void replicas_entry_to_text(struct printbuf *out,
43 struct bch_replicas_entry *e)
47 pr_buf(out, "%s: %u/%u [",
48 bch2_data_types[e->data_type],
52 for (i = 0; i < e->nr_devs; i++)
53 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
57 void bch2_cpu_replicas_to_text(struct printbuf *out,
58 struct bch_replicas_cpu *r)
60 struct bch_replicas_entry *e;
63 for_each_cpu_replicas_entry(r, e) {
68 replicas_entry_to_text(out, e);
72 static void extent_to_replicas(struct bkey_s_c k,
73 struct bch_replicas_entry *r)
75 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
76 const union bch_extent_entry *entry;
77 struct extent_ptr_decoded p;
81 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
90 r->devs[r->nr_devs++] = p.ptr.dev;
94 static void stripe_to_replicas(struct bkey_s_c k,
95 struct bch_replicas_entry *r)
97 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
98 const struct bch_extent_ptr *ptr;
100 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
102 for (ptr = s.v->ptrs;
103 ptr < s.v->ptrs + s.v->nr_blocks;
105 r->devs[r->nr_devs++] = ptr->dev;
108 static void bkey_to_replicas(struct bkey_s_c k,
109 struct bch_replicas_entry *e)
114 case KEY_TYPE_btree_ptr:
115 e->data_type = BCH_DATA_BTREE;
116 extent_to_replicas(k, e);
118 case KEY_TYPE_extent:
119 e->data_type = BCH_DATA_USER;
120 extent_to_replicas(k, e);
122 case KEY_TYPE_stripe:
123 e->data_type = BCH_DATA_USER;
124 stripe_to_replicas(k, e);
128 replicas_entry_sort(e);
131 static inline void devlist_to_replicas(struct bch_devs_list devs,
132 enum bch_data_type data_type,
133 struct bch_replicas_entry *e)
138 data_type == BCH_DATA_SB ||
139 data_type >= BCH_DATA_NR);
141 e->data_type = data_type;
145 for (i = 0; i < devs.nr; i++)
146 e->devs[e->nr_devs++] = devs.devs[i];
148 replicas_entry_sort(e);
151 static struct bch_replicas_cpu *
152 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
153 struct bch_replicas_entry *new_entry)
155 struct bch_replicas_cpu *new;
156 unsigned i, nr, entry_size;
158 entry_size = max_t(unsigned, old->entry_size,
159 replicas_entry_bytes(new_entry));
162 new = kzalloc(sizeof(struct bch_replicas_cpu) +
163 nr * entry_size, GFP_NOIO);
168 new->entry_size = entry_size;
170 for (i = 0; i < old->nr; i++)
171 memcpy(cpu_replicas_entry(new, i),
172 cpu_replicas_entry(old, i),
175 memcpy(cpu_replicas_entry(new, old->nr),
177 replicas_entry_bytes(new_entry));
179 bch2_cpu_replicas_sort(new);
183 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
184 struct bch_replicas_entry *search)
186 return replicas_entry_bytes(search) <= r->entry_size &&
187 eytzinger0_find(r->entries, r->nr,
189 memcmp, search) < r->nr;
192 static bool replicas_has_entry(struct bch_fs *c,
193 struct bch_replicas_entry *search,
194 bool check_gc_replicas)
196 struct bch_replicas_cpu *r, *gc_r;
200 r = rcu_dereference(c->replicas);
201 marked = __replicas_has_entry(r, search) &&
202 (!check_gc_replicas ||
203 likely(!(gc_r = rcu_dereference(c->replicas_gc))) ||
204 __replicas_has_entry(gc_r, search));
211 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
212 struct bch_replicas_entry *new_entry)
214 struct bch_replicas_cpu *old_gc, *new_gc = NULL, *old_r, *new_r = NULL;
217 mutex_lock(&c->sb_lock);
219 old_gc = rcu_dereference_protected(c->replicas_gc,
220 lockdep_is_held(&c->sb_lock));
221 if (old_gc && !__replicas_has_entry(old_gc, new_entry)) {
222 new_gc = cpu_replicas_add_entry(old_gc, new_entry);
227 old_r = rcu_dereference_protected(c->replicas,
228 lockdep_is_held(&c->sb_lock));
229 if (!__replicas_has_entry(old_r, new_entry)) {
230 new_r = cpu_replicas_add_entry(old_r, new_entry);
234 ret = bch2_cpu_replicas_to_sb_replicas(c, new_r);
239 /* allocations done, now commit: */
244 /* don't update in memory replicas until changes are persistent */
247 rcu_assign_pointer(c->replicas_gc, new_gc);
248 kfree_rcu(old_gc, rcu);
252 rcu_assign_pointer(c->replicas, new_r);
253 kfree_rcu(old_r, rcu);
256 mutex_unlock(&c->sb_lock);
259 mutex_unlock(&c->sb_lock);
265 static int __bch2_mark_replicas(struct bch_fs *c,
266 struct bch_replicas_entry *devs)
268 return likely(replicas_has_entry(c, devs, true))
270 : bch2_mark_replicas_slowpath(c, devs);
273 int bch2_mark_replicas(struct bch_fs *c,
274 enum bch_data_type data_type,
275 struct bch_devs_list devs)
277 struct bch_replicas_entry_padded search;
282 memset(&search, 0, sizeof(search));
284 BUG_ON(devs.nr >= BCH_REPLICAS_MAX);
286 devlist_to_replicas(devs, data_type, &search.e);
288 return __bch2_mark_replicas(c, &search.e);
291 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
293 struct bch_replicas_entry_padded search;
294 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
298 memset(&search, 0, sizeof(search));
300 for (i = 0; i < cached.nr; i++)
301 if ((ret = bch2_mark_replicas(c, BCH_DATA_CACHED,
302 bch2_dev_list_single(cached.devs[i]))))
305 bkey_to_replicas(k, &search.e);
307 return search.e.nr_devs
308 ? __bch2_mark_replicas(c, &search.e)
312 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
314 struct bch_replicas_cpu *new_r, *old_r;
316 lockdep_assert_held(&c->replicas_gc_lock);
318 mutex_lock(&c->sb_lock);
320 new_r = rcu_dereference_protected(c->replicas_gc,
321 lockdep_is_held(&c->sb_lock));
322 rcu_assign_pointer(c->replicas_gc, NULL);
327 if (bch2_cpu_replicas_to_sb_replicas(c, new_r)) {
334 /* don't update in memory replicas until changes are persistent */
336 old_r = rcu_dereference_protected(c->replicas,
337 lockdep_is_held(&c->sb_lock));
339 rcu_assign_pointer(c->replicas, new_r);
340 kfree_rcu(old_r, rcu);
342 mutex_unlock(&c->sb_lock);
345 kfree_rcu(new_r, rcu);
349 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
351 struct bch_replicas_cpu *dst, *src;
352 struct bch_replicas_entry *e;
354 lockdep_assert_held(&c->replicas_gc_lock);
356 mutex_lock(&c->sb_lock);
357 BUG_ON(c->replicas_gc);
359 src = rcu_dereference_protected(c->replicas,
360 lockdep_is_held(&c->sb_lock));
362 dst = kzalloc(sizeof(struct bch_replicas_cpu) +
363 src->nr * src->entry_size, GFP_NOIO);
365 mutex_unlock(&c->sb_lock);
370 dst->entry_size = src->entry_size;
372 for_each_cpu_replicas_entry(src, e)
373 if (!((1 << e->data_type) & typemask))
374 memcpy(cpu_replicas_entry(dst, dst->nr++),
377 bch2_cpu_replicas_sort(dst);
379 rcu_assign_pointer(c->replicas_gc, dst);
380 mutex_unlock(&c->sb_lock);
385 /* Replicas tracking - superblock: */
387 static struct bch_replicas_cpu *
388 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r)
390 struct bch_replicas_entry *e, *dst;
391 struct bch_replicas_cpu *cpu_r;
392 unsigned nr = 0, entry_size = 0, idx = 0;
394 for_each_replicas_entry(sb_r, e) {
395 entry_size = max_t(unsigned, entry_size,
396 replicas_entry_bytes(e));
400 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
401 nr * entry_size, GFP_NOIO);
406 cpu_r->entry_size = entry_size;
408 for_each_replicas_entry(sb_r, e) {
409 dst = cpu_replicas_entry(cpu_r, idx++);
410 memcpy(dst, e, replicas_entry_bytes(e));
411 replicas_entry_sort(dst);
417 static struct bch_replicas_cpu *
418 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r)
420 struct bch_replicas_entry_v0 *e;
421 struct bch_replicas_cpu *cpu_r;
422 unsigned nr = 0, entry_size = 0, idx = 0;
424 for_each_replicas_entry(sb_r, e) {
425 entry_size = max_t(unsigned, entry_size,
426 replicas_entry_bytes(e));
430 entry_size += sizeof(struct bch_replicas_entry) -
431 sizeof(struct bch_replicas_entry_v0);
433 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
434 nr * entry_size, GFP_NOIO);
439 cpu_r->entry_size = entry_size;
441 for_each_replicas_entry(sb_r, e) {
442 struct bch_replicas_entry *dst =
443 cpu_replicas_entry(cpu_r, idx++);
445 dst->data_type = e->data_type;
446 dst->nr_devs = e->nr_devs;
447 dst->nr_required = 1;
448 memcpy(dst->devs, e->devs, e->nr_devs);
449 replicas_entry_sort(dst);
455 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
457 struct bch_sb_field_replicas *sb_v1;
458 struct bch_sb_field_replicas_v0 *sb_v0;
459 struct bch_replicas_cpu *cpu_r, *old_r;
461 if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
462 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_v1);
463 else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
464 cpu_r = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0);
466 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu), GFP_NOIO);
471 bch2_cpu_replicas_sort(cpu_r);
473 old_r = rcu_dereference_check(c->replicas, lockdep_is_held(&c->sb_lock));
474 rcu_assign_pointer(c->replicas, cpu_r);
476 kfree_rcu(old_r, rcu);
481 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
482 struct bch_replicas_cpu *r)
484 struct bch_sb_field_replicas_v0 *sb_r;
485 struct bch_replicas_entry_v0 *dst;
486 struct bch_replicas_entry *src;
489 bytes = sizeof(struct bch_sb_field_replicas);
491 for_each_cpu_replicas_entry(r, src)
492 bytes += replicas_entry_bytes(src) - 1;
494 sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
495 DIV_ROUND_UP(bytes, sizeof(u64)));
499 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
500 sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
502 memset(&sb_r->entries, 0,
503 vstruct_end(&sb_r->field) -
504 (void *) &sb_r->entries);
507 for_each_cpu_replicas_entry(r, src) {
508 dst->data_type = src->data_type;
509 dst->nr_devs = src->nr_devs;
510 memcpy(dst->devs, src->devs, src->nr_devs);
512 dst = replicas_entry_next(dst);
514 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
520 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
521 struct bch_replicas_cpu *r)
523 struct bch_sb_field_replicas *sb_r;
524 struct bch_replicas_entry *dst, *src;
525 bool need_v1 = false;
528 bytes = sizeof(struct bch_sb_field_replicas);
530 for_each_cpu_replicas_entry(r, src) {
531 bytes += replicas_entry_bytes(src);
532 if (src->nr_required != 1)
537 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
539 sb_r = bch2_sb_resize_replicas(&c->disk_sb,
540 DIV_ROUND_UP(bytes, sizeof(u64)));
544 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
545 sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
547 memset(&sb_r->entries, 0,
548 vstruct_end(&sb_r->field) -
549 (void *) &sb_r->entries);
552 for_each_cpu_replicas_entry(r, src) {
553 memcpy(dst, src, replicas_entry_bytes(src));
555 dst = replicas_entry_next(dst);
557 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
563 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
567 sort_cmp_size(cpu_r->entries,
572 for (i = 0; i + 1 < cpu_r->nr; i++) {
573 struct bch_replicas_entry *l =
574 cpu_replicas_entry(cpu_r, i);
575 struct bch_replicas_entry *r =
576 cpu_replicas_entry(cpu_r, i + 1);
578 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
580 if (!memcmp(l, r, cpu_r->entry_size))
581 return "duplicate replicas entry";
587 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
589 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
590 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
591 struct bch_replicas_cpu *cpu_r = NULL;
592 struct bch_replicas_entry *e;
596 for_each_replicas_entry(sb_r, e) {
597 err = "invalid replicas entry: invalid data type";
598 if (e->data_type >= BCH_DATA_NR)
601 err = "invalid replicas entry: no devices";
605 err = "invalid replicas entry: bad nr_required";
606 if (!e->nr_required ||
607 (e->nr_required > 1 &&
608 e->nr_required >= e->nr_devs))
611 err = "invalid replicas entry: invalid device";
612 for (i = 0; i < e->nr_devs; i++)
613 if (!bch2_dev_exists(sb, mi, e->devs[i]))
617 err = "cannot allocate memory";
618 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
622 err = check_dup_replicas_entries(cpu_r);
628 static void bch2_sb_replicas_to_text(struct printbuf *out,
630 struct bch_sb_field *f)
632 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
633 struct bch_replicas_entry *e;
636 for_each_replicas_entry(r, e) {
641 replicas_entry_to_text(out, e);
645 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
646 .validate = bch2_sb_validate_replicas,
647 .to_text = bch2_sb_replicas_to_text,
650 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
652 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
653 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
654 struct bch_replicas_cpu *cpu_r = NULL;
655 struct bch_replicas_entry_v0 *e;
659 for_each_replicas_entry_v0(sb_r, e) {
660 err = "invalid replicas entry: invalid data type";
661 if (e->data_type >= BCH_DATA_NR)
664 err = "invalid replicas entry: no devices";
668 err = "invalid replicas entry: invalid device";
669 for (i = 0; i < e->nr_devs; i++)
670 if (!bch2_dev_exists(sb, mi, e->devs[i]))
674 err = "cannot allocate memory";
675 cpu_r = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r);
679 err = check_dup_replicas_entries(cpu_r);
685 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
686 .validate = bch2_sb_validate_replicas_v0,
689 /* Query replicas: */
691 bool bch2_replicas_marked(struct bch_fs *c,
692 enum bch_data_type data_type,
693 struct bch_devs_list devs,
694 bool check_gc_replicas)
696 struct bch_replicas_entry_padded search;
701 memset(&search, 0, sizeof(search));
703 devlist_to_replicas(devs, data_type, &search.e);
705 return replicas_has_entry(c, &search.e, check_gc_replicas);
708 bool bch2_bkey_replicas_marked(struct bch_fs *c,
710 bool check_gc_replicas)
712 struct bch_replicas_entry_padded search;
713 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
716 memset(&search, 0, sizeof(search));
718 for (i = 0; i < cached.nr; i++)
719 if (!bch2_replicas_marked(c, BCH_DATA_CACHED,
720 bch2_dev_list_single(cached.devs[i]),
724 bkey_to_replicas(k, &search.e);
726 return search.e.nr_devs
727 ? replicas_has_entry(c, &search.e, check_gc_replicas)
731 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
732 struct bch_devs_mask online_devs)
734 struct bch_sb_field_members *mi;
735 struct bch_replicas_entry *e;
736 struct bch_replicas_cpu *r;
737 unsigned i, nr_online, nr_offline;
738 struct replicas_status ret;
740 memset(&ret, 0, sizeof(ret));
742 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
743 ret.replicas[i].redundancy = INT_MAX;
745 mi = bch2_sb_get_members(c->disk_sb.sb);
747 r = rcu_dereference(c->replicas);
749 for_each_cpu_replicas_entry(r, e) {
750 if (e->data_type >= ARRAY_SIZE(ret.replicas))
751 panic("e %p data_type %u\n", e, e->data_type);
753 nr_online = nr_offline = 0;
755 for (i = 0; i < e->nr_devs; i++) {
756 BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
759 if (test_bit(e->devs[i], online_devs.d))
765 ret.replicas[e->data_type].redundancy =
766 min(ret.replicas[e->data_type].redundancy,
767 (int) nr_online - (int) e->nr_required);
769 ret.replicas[e->data_type].nr_offline =
770 max(ret.replicas[e->data_type].nr_offline,
776 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
777 if (ret.replicas[i].redundancy == INT_MAX)
778 ret.replicas[i].redundancy = 0;
783 struct replicas_status bch2_replicas_status(struct bch_fs *c)
785 return __bch2_replicas_status(c, bch2_online_devs(c));
788 static bool have_enough_devs(struct replicas_status s,
789 enum bch_data_type type,
790 bool force_if_degraded,
793 return (!s.replicas[type].nr_offline || force_if_degraded) &&
794 (s.replicas[type].redundancy >= 0 || force_if_lost);
797 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
799 return (have_enough_devs(s, BCH_DATA_JOURNAL,
800 flags & BCH_FORCE_IF_METADATA_DEGRADED,
801 flags & BCH_FORCE_IF_METADATA_LOST) &&
802 have_enough_devs(s, BCH_DATA_BTREE,
803 flags & BCH_FORCE_IF_METADATA_DEGRADED,
804 flags & BCH_FORCE_IF_METADATA_LOST) &&
805 have_enough_devs(s, BCH_DATA_USER,
806 flags & BCH_FORCE_IF_DATA_DEGRADED,
807 flags & BCH_FORCE_IF_DATA_LOST));
810 int bch2_replicas_online(struct bch_fs *c, bool meta)
812 struct replicas_status s = bch2_replicas_status(c);
815 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
816 s.replicas[BCH_DATA_BTREE].redundancy)
817 : s.replicas[BCH_DATA_USER].redundancy) + 1;
820 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
822 struct bch_replicas_entry *e;
823 struct bch_replicas_cpu *r;
827 r = rcu_dereference(c->replicas);
829 for_each_cpu_replicas_entry(r, e)
830 for (i = 0; i < e->nr_devs; i++)
831 if (e->devs[i] == ca->dev_idx)
832 ret |= 1 << e->data_type;