6 struct bch_replicas_entry_padded {
7 struct bch_replicas_entry e;
8 u8 pad[BCH_SB_MEMBERS_MAX];
11 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
12 struct bch_replicas_cpu *);
14 /* Replicas tracking - in memory: */
16 static inline int u8_cmp(u8 l, u8 r)
18 return (l > r) - (l < r);
21 static void replicas_entry_sort(struct bch_replicas_entry *e)
23 bubble_sort(e->devs, e->nr_devs, u8_cmp);
26 #define for_each_cpu_replicas_entry(_r, _i) \
27 for (_i = (_r)->entries; \
28 (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
29 _i = (void *) (_i) + (_r)->entry_size)
31 static inline struct bch_replicas_entry *
32 cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
34 return (void *) r->entries + r->entry_size * i;
37 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
39 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
42 static void replicas_entry_to_text(struct printbuf *out,
43 struct bch_replicas_entry *e)
47 pr_buf(out, "%s: %u/%u [",
48 bch2_data_types[e->data_type],
52 for (i = 0; i < e->nr_devs; i++)
53 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
57 void bch2_cpu_replicas_to_text(struct printbuf *out,
58 struct bch_replicas_cpu *r)
60 struct bch_replicas_entry *e;
63 for_each_cpu_replicas_entry(r, e) {
68 replicas_entry_to_text(out, e);
72 static void extent_to_replicas(struct bkey_s_c k,
73 struct bch_replicas_entry *r)
75 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
76 const union bch_extent_entry *entry;
77 struct extent_ptr_decoded p;
81 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
90 r->devs[r->nr_devs++] = p.ptr.dev;
94 static void stripe_to_replicas(struct bkey_s_c k,
95 struct bch_replicas_entry *r)
97 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
98 const struct bch_extent_ptr *ptr;
100 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
102 for (ptr = s.v->ptrs;
103 ptr < s.v->ptrs + s.v->nr_blocks;
105 r->devs[r->nr_devs++] = ptr->dev;
108 static void bkey_to_replicas(struct bkey_s_c k,
109 struct bch_replicas_entry *e)
114 case KEY_TYPE_btree_ptr:
115 e->data_type = BCH_DATA_BTREE;
116 extent_to_replicas(k, e);
118 case KEY_TYPE_extent:
119 e->data_type = BCH_DATA_USER;
120 extent_to_replicas(k, e);
122 case KEY_TYPE_stripe:
123 e->data_type = BCH_DATA_USER;
124 stripe_to_replicas(k, e);
128 replicas_entry_sort(e);
131 static inline void devlist_to_replicas(struct bch_devs_list devs,
132 enum bch_data_type data_type,
133 struct bch_replicas_entry *e)
138 data_type == BCH_DATA_SB ||
139 data_type >= BCH_DATA_NR);
141 e->data_type = data_type;
145 for (i = 0; i < devs.nr; i++)
146 e->devs[e->nr_devs++] = devs.devs[i];
148 replicas_entry_sort(e);
151 static struct bch_replicas_cpu
152 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
153 struct bch_replicas_entry *new_entry)
156 struct bch_replicas_cpu new = {
158 .entry_size = max_t(unsigned, old->entry_size,
159 replicas_entry_bytes(new_entry)),
162 new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
166 for (i = 0; i < old->nr; i++)
167 memcpy(cpu_replicas_entry(&new, i),
168 cpu_replicas_entry(old, i),
171 memcpy(cpu_replicas_entry(&new, old->nr),
173 replicas_entry_bytes(new_entry));
175 bch2_cpu_replicas_sort(&new);
179 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
180 struct bch_replicas_entry *search)
182 return replicas_entry_bytes(search) <= r->entry_size &&
183 eytzinger0_find(r->entries, r->nr,
185 memcmp, search) < r->nr;
188 static bool replicas_has_entry(struct bch_fs *c,
189 struct bch_replicas_entry *search,
190 bool check_gc_replicas)
194 percpu_down_read_preempt_disable(&c->mark_lock);
195 marked = __replicas_has_entry(&c->replicas, search) &&
196 (!check_gc_replicas ||
197 likely((!c->replicas_gc.entries)) ||
198 __replicas_has_entry(&c->replicas_gc, search));
199 percpu_up_read_preempt_enable(&c->mark_lock);
205 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
206 struct bch_replicas_entry *new_entry)
208 struct bch_replicas_cpu new_r, new_gc;
211 memset(&new_r, 0, sizeof(new_r));
212 memset(&new_gc, 0, sizeof(new_gc));
214 mutex_lock(&c->sb_lock);
216 if (c->replicas_gc.entries &&
217 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
218 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
223 if (!__replicas_has_entry(&c->replicas, new_entry)) {
224 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
228 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
233 if (!new_r.entries &&
237 /* allocations done, now commit: */
242 /* don't update in memory replicas until changes are persistent */
243 percpu_down_write(&c->mark_lock);
245 swap(new_r, c->replicas);
247 swap(new_gc, c->replicas_gc);
248 percpu_up_write(&c->mark_lock);
252 mutex_unlock(&c->sb_lock);
254 kfree(new_r.entries);
255 kfree(new_gc.entries);
260 static int __bch2_mark_replicas(struct bch_fs *c,
261 struct bch_replicas_entry *devs)
263 return likely(replicas_has_entry(c, devs, true))
265 : bch2_mark_replicas_slowpath(c, devs);
268 int bch2_mark_replicas(struct bch_fs *c,
269 enum bch_data_type data_type,
270 struct bch_devs_list devs)
272 struct bch_replicas_entry_padded search;
277 memset(&search, 0, sizeof(search));
279 BUG_ON(devs.nr >= BCH_REPLICAS_MAX);
281 devlist_to_replicas(devs, data_type, &search.e);
283 return __bch2_mark_replicas(c, &search.e);
286 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
288 struct bch_replicas_entry_padded search;
289 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
293 memset(&search, 0, sizeof(search));
295 for (i = 0; i < cached.nr; i++)
296 if ((ret = bch2_mark_replicas(c, BCH_DATA_CACHED,
297 bch2_dev_list_single(cached.devs[i]))))
300 bkey_to_replicas(k, &search.e);
302 return search.e.nr_devs
303 ? __bch2_mark_replicas(c, &search.e)
307 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
309 lockdep_assert_held(&c->replicas_gc_lock);
311 mutex_lock(&c->sb_lock);
316 if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
323 /* don't update in memory replicas until changes are persistent */
325 percpu_down_write(&c->mark_lock);
327 swap(c->replicas, c->replicas_gc);
329 kfree(c->replicas_gc.entries);
330 c->replicas_gc.entries = NULL;
331 percpu_up_write(&c->mark_lock);
333 mutex_unlock(&c->sb_lock);
337 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
339 struct bch_replicas_entry *e;
342 lockdep_assert_held(&c->replicas_gc_lock);
344 mutex_lock(&c->sb_lock);
345 BUG_ON(c->replicas_gc.entries);
347 c->replicas_gc.nr = 0;
348 c->replicas_gc.entry_size = 0;
350 for_each_cpu_replicas_entry(&c->replicas, e)
351 if (!((1 << e->data_type) & typemask)) {
353 c->replicas_gc.entry_size =
354 max_t(unsigned, c->replicas_gc.entry_size,
355 replicas_entry_bytes(e));
358 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
359 c->replicas_gc.entry_size,
361 if (!c->replicas_gc.entries) {
362 mutex_unlock(&c->sb_lock);
366 for_each_cpu_replicas_entry(&c->replicas, e)
367 if (!((1 << e->data_type) & typemask))
368 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
369 e, c->replicas_gc.entry_size);
371 bch2_cpu_replicas_sort(&c->replicas_gc);
372 mutex_unlock(&c->sb_lock);
377 /* Replicas tracking - superblock: */
380 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
381 struct bch_replicas_cpu *cpu_r)
383 struct bch_replicas_entry *e, *dst;
384 unsigned nr = 0, entry_size = 0, idx = 0;
386 for_each_replicas_entry(sb_r, e) {
387 entry_size = max_t(unsigned, entry_size,
388 replicas_entry_bytes(e));
392 cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
397 cpu_r->entry_size = entry_size;
399 for_each_replicas_entry(sb_r, e) {
400 dst = cpu_replicas_entry(cpu_r, idx++);
401 memcpy(dst, e, replicas_entry_bytes(e));
402 replicas_entry_sort(dst);
409 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
410 struct bch_replicas_cpu *cpu_r)
412 struct bch_replicas_entry_v0 *e;
413 unsigned nr = 0, entry_size = 0, idx = 0;
415 for_each_replicas_entry(sb_r, e) {
416 entry_size = max_t(unsigned, entry_size,
417 replicas_entry_bytes(e));
421 entry_size += sizeof(struct bch_replicas_entry) -
422 sizeof(struct bch_replicas_entry_v0);
424 cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
429 cpu_r->entry_size = entry_size;
431 for_each_replicas_entry(sb_r, e) {
432 struct bch_replicas_entry *dst =
433 cpu_replicas_entry(cpu_r, idx++);
435 dst->data_type = e->data_type;
436 dst->nr_devs = e->nr_devs;
437 dst->nr_required = 1;
438 memcpy(dst->devs, e->devs, e->nr_devs);
439 replicas_entry_sort(dst);
445 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
447 struct bch_sb_field_replicas *sb_v1;
448 struct bch_sb_field_replicas_v0 *sb_v0;
449 struct bch_replicas_cpu new_r = { 0, 0, NULL };
452 if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
453 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
454 else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
455 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
460 bch2_cpu_replicas_sort(&new_r);
462 percpu_down_write(&c->mark_lock);
463 swap(c->replicas, new_r);
464 percpu_up_write(&c->mark_lock);
466 kfree(new_r.entries);
471 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
472 struct bch_replicas_cpu *r)
474 struct bch_sb_field_replicas_v0 *sb_r;
475 struct bch_replicas_entry_v0 *dst;
476 struct bch_replicas_entry *src;
479 bytes = sizeof(struct bch_sb_field_replicas);
481 for_each_cpu_replicas_entry(r, src)
482 bytes += replicas_entry_bytes(src) - 1;
484 sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
485 DIV_ROUND_UP(bytes, sizeof(u64)));
489 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
490 sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
492 memset(&sb_r->entries, 0,
493 vstruct_end(&sb_r->field) -
494 (void *) &sb_r->entries);
497 for_each_cpu_replicas_entry(r, src) {
498 dst->data_type = src->data_type;
499 dst->nr_devs = src->nr_devs;
500 memcpy(dst->devs, src->devs, src->nr_devs);
502 dst = replicas_entry_next(dst);
504 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
510 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
511 struct bch_replicas_cpu *r)
513 struct bch_sb_field_replicas *sb_r;
514 struct bch_replicas_entry *dst, *src;
515 bool need_v1 = false;
518 bytes = sizeof(struct bch_sb_field_replicas);
520 for_each_cpu_replicas_entry(r, src) {
521 bytes += replicas_entry_bytes(src);
522 if (src->nr_required != 1)
527 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
529 sb_r = bch2_sb_resize_replicas(&c->disk_sb,
530 DIV_ROUND_UP(bytes, sizeof(u64)));
534 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
535 sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
537 memset(&sb_r->entries, 0,
538 vstruct_end(&sb_r->field) -
539 (void *) &sb_r->entries);
542 for_each_cpu_replicas_entry(r, src) {
543 memcpy(dst, src, replicas_entry_bytes(src));
545 dst = replicas_entry_next(dst);
547 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
553 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
557 sort_cmp_size(cpu_r->entries,
562 for (i = 0; i + 1 < cpu_r->nr; i++) {
563 struct bch_replicas_entry *l =
564 cpu_replicas_entry(cpu_r, i);
565 struct bch_replicas_entry *r =
566 cpu_replicas_entry(cpu_r, i + 1);
568 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
570 if (!memcmp(l, r, cpu_r->entry_size))
571 return "duplicate replicas entry";
577 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
579 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
580 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
581 struct bch_replicas_cpu cpu_r = { .entries = NULL };
582 struct bch_replicas_entry *e;
586 for_each_replicas_entry(sb_r, e) {
587 err = "invalid replicas entry: invalid data type";
588 if (e->data_type >= BCH_DATA_NR)
591 err = "invalid replicas entry: no devices";
595 err = "invalid replicas entry: bad nr_required";
596 if (!e->nr_required ||
597 (e->nr_required > 1 &&
598 e->nr_required >= e->nr_devs))
601 err = "invalid replicas entry: invalid device";
602 for (i = 0; i < e->nr_devs; i++)
603 if (!bch2_dev_exists(sb, mi, e->devs[i]))
607 err = "cannot allocate memory";
608 if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
611 err = check_dup_replicas_entries(&cpu_r);
613 kfree(cpu_r.entries);
617 static void bch2_sb_replicas_to_text(struct printbuf *out,
619 struct bch_sb_field *f)
621 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
622 struct bch_replicas_entry *e;
625 for_each_replicas_entry(r, e) {
630 replicas_entry_to_text(out, e);
634 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
635 .validate = bch2_sb_validate_replicas,
636 .to_text = bch2_sb_replicas_to_text,
639 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
641 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
642 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
643 struct bch_replicas_cpu cpu_r = { .entries = NULL };
644 struct bch_replicas_entry_v0 *e;
648 for_each_replicas_entry_v0(sb_r, e) {
649 err = "invalid replicas entry: invalid data type";
650 if (e->data_type >= BCH_DATA_NR)
653 err = "invalid replicas entry: no devices";
657 err = "invalid replicas entry: invalid device";
658 for (i = 0; i < e->nr_devs; i++)
659 if (!bch2_dev_exists(sb, mi, e->devs[i]))
663 err = "cannot allocate memory";
664 if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
667 err = check_dup_replicas_entries(&cpu_r);
669 kfree(cpu_r.entries);
673 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
674 .validate = bch2_sb_validate_replicas_v0,
677 /* Query replicas: */
679 bool bch2_replicas_marked(struct bch_fs *c,
680 enum bch_data_type data_type,
681 struct bch_devs_list devs,
682 bool check_gc_replicas)
684 struct bch_replicas_entry_padded search;
689 memset(&search, 0, sizeof(search));
691 devlist_to_replicas(devs, data_type, &search.e);
693 return replicas_has_entry(c, &search.e, check_gc_replicas);
696 bool bch2_bkey_replicas_marked(struct bch_fs *c,
698 bool check_gc_replicas)
700 struct bch_replicas_entry_padded search;
701 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
704 memset(&search, 0, sizeof(search));
706 for (i = 0; i < cached.nr; i++)
707 if (!bch2_replicas_marked(c, BCH_DATA_CACHED,
708 bch2_dev_list_single(cached.devs[i]),
712 bkey_to_replicas(k, &search.e);
714 return search.e.nr_devs
715 ? replicas_has_entry(c, &search.e, check_gc_replicas)
719 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
720 struct bch_devs_mask online_devs)
722 struct bch_sb_field_members *mi;
723 struct bch_replicas_entry *e;
724 unsigned i, nr_online, nr_offline;
725 struct replicas_status ret;
727 memset(&ret, 0, sizeof(ret));
729 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
730 ret.replicas[i].redundancy = INT_MAX;
732 mi = bch2_sb_get_members(c->disk_sb.sb);
734 percpu_down_read_preempt_disable(&c->mark_lock);
736 for_each_cpu_replicas_entry(&c->replicas, e) {
737 if (e->data_type >= ARRAY_SIZE(ret.replicas))
738 panic("e %p data_type %u\n", e, e->data_type);
740 nr_online = nr_offline = 0;
742 for (i = 0; i < e->nr_devs; i++) {
743 BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
746 if (test_bit(e->devs[i], online_devs.d))
752 ret.replicas[e->data_type].redundancy =
753 min(ret.replicas[e->data_type].redundancy,
754 (int) nr_online - (int) e->nr_required);
756 ret.replicas[e->data_type].nr_offline =
757 max(ret.replicas[e->data_type].nr_offline,
761 percpu_up_read_preempt_enable(&c->mark_lock);
763 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
764 if (ret.replicas[i].redundancy == INT_MAX)
765 ret.replicas[i].redundancy = 0;
770 struct replicas_status bch2_replicas_status(struct bch_fs *c)
772 return __bch2_replicas_status(c, bch2_online_devs(c));
775 static bool have_enough_devs(struct replicas_status s,
776 enum bch_data_type type,
777 bool force_if_degraded,
780 return (!s.replicas[type].nr_offline || force_if_degraded) &&
781 (s.replicas[type].redundancy >= 0 || force_if_lost);
784 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
786 return (have_enough_devs(s, BCH_DATA_JOURNAL,
787 flags & BCH_FORCE_IF_METADATA_DEGRADED,
788 flags & BCH_FORCE_IF_METADATA_LOST) &&
789 have_enough_devs(s, BCH_DATA_BTREE,
790 flags & BCH_FORCE_IF_METADATA_DEGRADED,
791 flags & BCH_FORCE_IF_METADATA_LOST) &&
792 have_enough_devs(s, BCH_DATA_USER,
793 flags & BCH_FORCE_IF_DATA_DEGRADED,
794 flags & BCH_FORCE_IF_DATA_LOST));
797 int bch2_replicas_online(struct bch_fs *c, bool meta)
799 struct replicas_status s = bch2_replicas_status(c);
802 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
803 s.replicas[BCH_DATA_BTREE].redundancy)
804 : s.replicas[BCH_DATA_USER].redundancy) + 1;
807 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
809 struct bch_replicas_entry *e;
812 percpu_down_read_preempt_disable(&c->mark_lock);
814 for_each_cpu_replicas_entry(&c->replicas, e)
815 for (i = 0; i < e->nr_devs; i++)
816 if (e->devs[i] == ca->dev_idx)
817 ret |= 1 << e->data_type;
819 percpu_up_read_preempt_enable(&c->mark_lock);