1 // SPDX-License-Identifier: GPL-2.0
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10 struct bch_replicas_cpu *);
12 /* Replicas tracking - in memory: */
14 static inline int u8_cmp(u8 l, u8 r)
19 static void verify_replicas_entry(struct bch_replicas_entry *e)
21 #ifdef CONFIG_BCACHEFS_DEBUG
24 BUG_ON(e->data_type >= BCH_DATA_NR);
26 BUG_ON(e->nr_required > 1 &&
27 e->nr_required >= e->nr_devs);
29 for (i = 0; i + 1 < e->nr_devs; i++)
30 BUG_ON(e->devs[i] >= e->devs[i + 1]);
34 static void replicas_entry_sort(struct bch_replicas_entry *e)
36 bubble_sort(e->devs, e->nr_devs, u8_cmp);
39 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
41 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
44 void bch2_replicas_entry_to_text(struct printbuf *out,
45 struct bch_replicas_entry *e)
49 pr_buf(out, "%s: %u/%u [",
50 bch2_data_types[e->data_type],
54 for (i = 0; i < e->nr_devs; i++)
55 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
59 void bch2_cpu_replicas_to_text(struct printbuf *out,
60 struct bch_replicas_cpu *r)
62 struct bch_replicas_entry *e;
65 for_each_cpu_replicas_entry(r, e) {
70 bch2_replicas_entry_to_text(out, e);
74 static void extent_to_replicas(struct bkey_s_c k,
75 struct bch_replicas_entry *r)
77 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
78 const union bch_extent_entry *entry;
79 struct extent_ptr_decoded p;
83 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
88 r->devs[r->nr_devs++] = p.ptr.dev;
94 static void stripe_to_replicas(struct bkey_s_c k,
95 struct bch_replicas_entry *r)
97 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
98 const struct bch_extent_ptr *ptr;
100 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
102 for (ptr = s.v->ptrs;
103 ptr < s.v->ptrs + s.v->nr_blocks;
105 r->devs[r->nr_devs++] = ptr->dev;
108 void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
114 case KEY_TYPE_btree_ptr:
115 case KEY_TYPE_btree_ptr_v2:
116 e->data_type = BCH_DATA_BTREE;
117 extent_to_replicas(k, e);
119 case KEY_TYPE_extent:
120 case KEY_TYPE_reflink_v:
121 e->data_type = BCH_DATA_USER;
122 extent_to_replicas(k, e);
124 case KEY_TYPE_stripe:
125 e->data_type = BCH_DATA_USER;
126 stripe_to_replicas(k, e);
130 replicas_entry_sort(e);
133 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
134 enum bch_data_type data_type,
135 struct bch_devs_list devs)
140 data_type == BCH_DATA_SB ||
141 data_type >= BCH_DATA_NR);
143 e->data_type = data_type;
147 for (i = 0; i < devs.nr; i++)
148 e->devs[e->nr_devs++] = devs.devs[i];
150 replicas_entry_sort(e);
153 static struct bch_replicas_cpu
154 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
155 struct bch_replicas_entry *new_entry)
158 struct bch_replicas_cpu new = {
160 .entry_size = max_t(unsigned, old->entry_size,
161 replicas_entry_bytes(new_entry)),
164 BUG_ON(!new_entry->data_type);
165 verify_replicas_entry(new_entry);
167 new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
171 for (i = 0; i < old->nr; i++)
172 memcpy(cpu_replicas_entry(&new, i),
173 cpu_replicas_entry(old, i),
176 memcpy(cpu_replicas_entry(&new, old->nr),
178 replicas_entry_bytes(new_entry));
180 bch2_cpu_replicas_sort(&new);
184 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
185 struct bch_replicas_entry *search)
187 int idx, entry_size = replicas_entry_bytes(search);
189 if (unlikely(entry_size > r->entry_size))
192 verify_replicas_entry(search);
194 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
195 idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
199 return idx < r->nr ? idx : -1;
202 int bch2_replicas_entry_idx(struct bch_fs *c,
203 struct bch_replicas_entry *search)
205 replicas_entry_sort(search);
207 return __replicas_entry_idx(&c->replicas, search);
210 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
211 struct bch_replicas_entry *search)
213 return __replicas_entry_idx(r, search) >= 0;
216 static bool bch2_replicas_marked_locked(struct bch_fs *c,
217 struct bch_replicas_entry *search,
218 bool check_gc_replicas)
220 if (!search->nr_devs)
223 verify_replicas_entry(search);
225 return __replicas_has_entry(&c->replicas, search) &&
226 (!check_gc_replicas ||
227 likely((!c->replicas_gc.entries)) ||
228 __replicas_has_entry(&c->replicas_gc, search));
231 bool bch2_replicas_marked(struct bch_fs *c,
232 struct bch_replicas_entry *search,
233 bool check_gc_replicas)
237 percpu_down_read(&c->mark_lock);
238 marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
239 percpu_up_read(&c->mark_lock);
244 static void __replicas_table_update(struct bch_fs_usage *dst,
245 struct bch_replicas_cpu *dst_r,
246 struct bch_fs_usage *src,
247 struct bch_replicas_cpu *src_r)
249 int src_idx, dst_idx;
253 for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
254 if (!src->replicas[src_idx])
257 dst_idx = __replicas_entry_idx(dst_r,
258 cpu_replicas_entry(src_r, src_idx));
261 dst->replicas[dst_idx] = src->replicas[src_idx];
265 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
266 struct bch_replicas_cpu *dst_r,
267 struct bch_fs_usage __percpu *src_p,
268 struct bch_replicas_cpu *src_r)
270 unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
271 struct bch_fs_usage *dst, *src = (void *)
272 bch2_acc_percpu_u64s((void *) src_p, src_nr);
275 dst = this_cpu_ptr(dst_p);
278 __replicas_table_update(dst, dst_r, src, src_r);
282 * Resize filesystem accounting:
284 static int replicas_table_update(struct bch_fs *c,
285 struct bch_replicas_cpu *new_r)
287 struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
288 struct bch_fs_usage *new_scratch = NULL;
289 struct bch_fs_usage __percpu *new_gc = NULL;
290 struct bch_fs_usage *new_base = NULL;
291 unsigned bytes = sizeof(struct bch_fs_usage) +
292 sizeof(u64) * new_r->nr;
295 if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
296 !(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
298 !(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
300 !(new_scratch = kmalloc(bytes, GFP_NOIO)) ||
302 !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO)))) {
303 bch_err(c, "error updating replicas table: memory allocation failure");
308 __replicas_table_update(new_base, new_r,
309 c->usage_base, &c->replicas);
311 __replicas_table_update_pcpu(new_usage[0], new_r,
312 c->usage[0], &c->replicas);
314 __replicas_table_update_pcpu(new_usage[1], new_r,
315 c->usage[1], &c->replicas);
317 __replicas_table_update_pcpu(new_gc, new_r,
318 c->usage_gc, &c->replicas);
320 swap(c->usage_base, new_base);
321 swap(c->usage[0], new_usage[0]);
322 swap(c->usage[1], new_usage[1]);
323 swap(c->usage_scratch, new_scratch);
324 swap(c->usage_gc, new_gc);
325 swap(c->replicas, *new_r);
330 free_percpu(new_usage[1]);
331 free_percpu(new_usage[0]);
336 static unsigned reserve_journal_replicas(struct bch_fs *c,
337 struct bch_replicas_cpu *r)
339 struct bch_replicas_entry *e;
340 unsigned journal_res_u64s = 0;
344 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
348 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
350 /* persistent_reserved: */
352 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
355 for_each_cpu_replicas_entry(r, e)
357 DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
358 e->nr_devs, sizeof(u64));
359 return journal_res_u64s;
363 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
364 struct bch_replicas_entry *new_entry)
366 struct bch_replicas_cpu new_r, new_gc;
369 verify_replicas_entry(new_entry);
371 memset(&new_r, 0, sizeof(new_r));
372 memset(&new_gc, 0, sizeof(new_gc));
374 mutex_lock(&c->sb_lock);
376 if (c->replicas_gc.entries &&
377 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
378 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
383 if (!__replicas_has_entry(&c->replicas, new_entry)) {
384 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
388 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
392 bch2_journal_entry_res_resize(&c->journal,
393 &c->replicas_journal_res,
394 reserve_journal_replicas(c, &new_r));
397 if (!new_r.entries &&
401 /* allocations done, now commit: */
406 /* don't update in memory replicas until changes are persistent */
407 percpu_down_write(&c->mark_lock);
409 ret = replicas_table_update(c, &new_r);
411 swap(new_gc, c->replicas_gc);
412 percpu_up_write(&c->mark_lock);
414 mutex_unlock(&c->sb_lock);
416 kfree(new_r.entries);
417 kfree(new_gc.entries);
421 bch_err(c, "error adding replicas entry: memory allocation failure");
426 int bch2_mark_replicas(struct bch_fs *c,
427 struct bch_replicas_entry *r)
429 return likely(bch2_replicas_marked(c, r, true))
431 : bch2_mark_replicas_slowpath(c, r);
434 bool bch2_bkey_replicas_marked_locked(struct bch_fs *c,
436 bool check_gc_replicas)
438 struct bch_replicas_padded search;
439 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
442 for (i = 0; i < cached.nr; i++) {
443 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
445 if (!bch2_replicas_marked_locked(c, &search.e,
450 bch2_bkey_to_replicas(&search.e, k);
452 return bch2_replicas_marked_locked(c, &search.e, check_gc_replicas);
455 bool bch2_bkey_replicas_marked(struct bch_fs *c,
457 bool check_gc_replicas)
461 percpu_down_read(&c->mark_lock);
462 marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
463 percpu_up_read(&c->mark_lock);
468 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
470 struct bch_replicas_padded search;
471 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
475 for (i = 0; i < cached.nr; i++) {
476 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
478 ret = bch2_mark_replicas(c, &search.e);
483 bch2_bkey_to_replicas(&search.e, k);
485 return bch2_mark_replicas(c, &search.e);
488 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
492 lockdep_assert_held(&c->replicas_gc_lock);
494 mutex_lock(&c->sb_lock);
495 percpu_down_write(&c->mark_lock);
498 * this is kind of crappy; the replicas gc mechanism needs to be ripped
502 for (i = 0; i < c->replicas.nr; i++) {
503 struct bch_replicas_entry *e =
504 cpu_replicas_entry(&c->replicas, i);
505 struct bch_replicas_cpu n;
507 if (!__replicas_has_entry(&c->replicas_gc, e) &&
508 (c->usage_base->replicas[i] ||
509 percpu_u64_get(&c->usage[0]->replicas[i]) ||
510 percpu_u64_get(&c->usage[1]->replicas[i]))) {
511 n = cpu_replicas_add_entry(&c->replicas_gc, e);
517 swap(n, c->replicas_gc);
522 if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
527 ret = replicas_table_update(c, &c->replicas_gc);
529 kfree(c->replicas_gc.entries);
530 c->replicas_gc.entries = NULL;
532 percpu_up_write(&c->mark_lock);
537 mutex_unlock(&c->sb_lock);
542 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
544 struct bch_replicas_entry *e;
547 lockdep_assert_held(&c->replicas_gc_lock);
549 mutex_lock(&c->sb_lock);
550 BUG_ON(c->replicas_gc.entries);
552 c->replicas_gc.nr = 0;
553 c->replicas_gc.entry_size = 0;
555 for_each_cpu_replicas_entry(&c->replicas, e)
556 if (!((1 << e->data_type) & typemask)) {
558 c->replicas_gc.entry_size =
559 max_t(unsigned, c->replicas_gc.entry_size,
560 replicas_entry_bytes(e));
563 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
564 c->replicas_gc.entry_size,
566 if (!c->replicas_gc.entries) {
567 mutex_unlock(&c->sb_lock);
568 bch_err(c, "error allocating c->replicas_gc");
572 for_each_cpu_replicas_entry(&c->replicas, e)
573 if (!((1 << e->data_type) & typemask))
574 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
575 e, c->replicas_gc.entry_size);
577 bch2_cpu_replicas_sort(&c->replicas_gc);
578 mutex_unlock(&c->sb_lock);
583 int bch2_replicas_gc2(struct bch_fs *c)
585 struct bch_replicas_cpu new = { 0 };
589 bch2_journal_meta(&c->journal);
591 nr = READ_ONCE(c->replicas.nr);
592 new.entry_size = READ_ONCE(c->replicas.entry_size);
593 new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
595 bch_err(c, "error allocating c->replicas_gc");
599 mutex_lock(&c->sb_lock);
600 percpu_down_write(&c->mark_lock);
602 if (nr != c->replicas.nr ||
603 new.entry_size != c->replicas.entry_size) {
604 percpu_up_write(&c->mark_lock);
605 mutex_unlock(&c->sb_lock);
610 for (i = 0; i < c->replicas.nr; i++) {
611 struct bch_replicas_entry *e =
612 cpu_replicas_entry(&c->replicas, i);
614 if (e->data_type == BCH_DATA_JOURNAL ||
615 c->usage_base->replicas[i] ||
616 percpu_u64_get(&c->usage[0]->replicas[i]) ||
617 percpu_u64_get(&c->usage[1]->replicas[i]))
618 memcpy(cpu_replicas_entry(&new, new.nr++),
622 bch2_cpu_replicas_sort(&new);
624 if (bch2_cpu_replicas_to_sb_replicas(c, &new)) {
629 ret = replicas_table_update(c, &new);
633 percpu_up_write(&c->mark_lock);
638 mutex_unlock(&c->sb_lock);
643 int bch2_replicas_set_usage(struct bch_fs *c,
644 struct bch_replicas_entry *r,
647 int ret, idx = bch2_replicas_entry_idx(c, r);
650 struct bch_replicas_cpu n;
652 n = cpu_replicas_add_entry(&c->replicas, r);
656 ret = replicas_table_update(c, &n);
662 idx = bch2_replicas_entry_idx(c, r);
666 c->usage_base->replicas[idx] = sectors;
671 /* Replicas tracking - superblock: */
674 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
675 struct bch_replicas_cpu *cpu_r)
677 struct bch_replicas_entry *e, *dst;
678 unsigned nr = 0, entry_size = 0, idx = 0;
680 for_each_replicas_entry(sb_r, e) {
681 entry_size = max_t(unsigned, entry_size,
682 replicas_entry_bytes(e));
686 cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
691 cpu_r->entry_size = entry_size;
693 for_each_replicas_entry(sb_r, e) {
694 dst = cpu_replicas_entry(cpu_r, idx++);
695 memcpy(dst, e, replicas_entry_bytes(e));
696 replicas_entry_sort(dst);
703 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
704 struct bch_replicas_cpu *cpu_r)
706 struct bch_replicas_entry_v0 *e;
707 unsigned nr = 0, entry_size = 0, idx = 0;
709 for_each_replicas_entry(sb_r, e) {
710 entry_size = max_t(unsigned, entry_size,
711 replicas_entry_bytes(e));
715 entry_size += sizeof(struct bch_replicas_entry) -
716 sizeof(struct bch_replicas_entry_v0);
718 cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
723 cpu_r->entry_size = entry_size;
725 for_each_replicas_entry(sb_r, e) {
726 struct bch_replicas_entry *dst =
727 cpu_replicas_entry(cpu_r, idx++);
729 dst->data_type = e->data_type;
730 dst->nr_devs = e->nr_devs;
731 dst->nr_required = 1;
732 memcpy(dst->devs, e->devs, e->nr_devs);
733 replicas_entry_sort(dst);
739 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
741 struct bch_sb_field_replicas *sb_v1;
742 struct bch_sb_field_replicas_v0 *sb_v0;
743 struct bch_replicas_cpu new_r = { 0, 0, NULL };
746 if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
747 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
748 else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
749 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
754 bch2_cpu_replicas_sort(&new_r);
756 percpu_down_write(&c->mark_lock);
758 ret = replicas_table_update(c, &new_r);
759 percpu_up_write(&c->mark_lock);
761 kfree(new_r.entries);
766 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
767 struct bch_replicas_cpu *r)
769 struct bch_sb_field_replicas_v0 *sb_r;
770 struct bch_replicas_entry_v0 *dst;
771 struct bch_replicas_entry *src;
774 bytes = sizeof(struct bch_sb_field_replicas);
776 for_each_cpu_replicas_entry(r, src)
777 bytes += replicas_entry_bytes(src) - 1;
779 sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
780 DIV_ROUND_UP(bytes, sizeof(u64)));
784 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
785 sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
787 memset(&sb_r->entries, 0,
788 vstruct_end(&sb_r->field) -
789 (void *) &sb_r->entries);
792 for_each_cpu_replicas_entry(r, src) {
793 dst->data_type = src->data_type;
794 dst->nr_devs = src->nr_devs;
795 memcpy(dst->devs, src->devs, src->nr_devs);
797 dst = replicas_entry_next(dst);
799 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
805 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
806 struct bch_replicas_cpu *r)
808 struct bch_sb_field_replicas *sb_r;
809 struct bch_replicas_entry *dst, *src;
810 bool need_v1 = false;
813 bytes = sizeof(struct bch_sb_field_replicas);
815 for_each_cpu_replicas_entry(r, src) {
816 bytes += replicas_entry_bytes(src);
817 if (src->nr_required != 1)
822 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
824 sb_r = bch2_sb_resize_replicas(&c->disk_sb,
825 DIV_ROUND_UP(bytes, sizeof(u64)));
829 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
830 sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
832 memset(&sb_r->entries, 0,
833 vstruct_end(&sb_r->field) -
834 (void *) &sb_r->entries);
837 for_each_cpu_replicas_entry(r, src) {
838 memcpy(dst, src, replicas_entry_bytes(src));
840 dst = replicas_entry_next(dst);
842 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
848 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
852 sort_cmp_size(cpu_r->entries,
857 for (i = 0; i + 1 < cpu_r->nr; i++) {
858 struct bch_replicas_entry *l =
859 cpu_replicas_entry(cpu_r, i);
860 struct bch_replicas_entry *r =
861 cpu_replicas_entry(cpu_r, i + 1);
863 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
865 if (!memcmp(l, r, cpu_r->entry_size))
866 return "duplicate replicas entry";
872 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
874 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
875 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
876 struct bch_replicas_cpu cpu_r = { .entries = NULL };
877 struct bch_replicas_entry *e;
881 for_each_replicas_entry(sb_r, e) {
882 err = "invalid replicas entry: invalid data type";
883 if (e->data_type >= BCH_DATA_NR)
886 err = "invalid replicas entry: no devices";
890 err = "invalid replicas entry: bad nr_required";
891 if (e->nr_required > 1 &&
892 e->nr_required >= e->nr_devs)
895 err = "invalid replicas entry: invalid device";
896 for (i = 0; i < e->nr_devs; i++)
897 if (!bch2_dev_exists(sb, mi, e->devs[i]))
901 err = "cannot allocate memory";
902 if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
905 err = check_dup_replicas_entries(&cpu_r);
907 kfree(cpu_r.entries);
911 static void bch2_sb_replicas_to_text(struct printbuf *out,
913 struct bch_sb_field *f)
915 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
916 struct bch_replicas_entry *e;
919 for_each_replicas_entry(r, e) {
924 bch2_replicas_entry_to_text(out, e);
928 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
929 .validate = bch2_sb_validate_replicas,
930 .to_text = bch2_sb_replicas_to_text,
933 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
935 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
936 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
937 struct bch_replicas_cpu cpu_r = { .entries = NULL };
938 struct bch_replicas_entry_v0 *e;
942 for_each_replicas_entry_v0(sb_r, e) {
943 err = "invalid replicas entry: invalid data type";
944 if (e->data_type >= BCH_DATA_NR)
947 err = "invalid replicas entry: no devices";
951 err = "invalid replicas entry: invalid device";
952 for (i = 0; i < e->nr_devs; i++)
953 if (!bch2_dev_exists(sb, mi, e->devs[i]))
957 err = "cannot allocate memory";
958 if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
961 err = check_dup_replicas_entries(&cpu_r);
963 kfree(cpu_r.entries);
967 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
968 .validate = bch2_sb_validate_replicas_v0,
971 /* Query replicas: */
973 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
974 struct bch_devs_mask online_devs)
976 struct bch_sb_field_members *mi;
977 struct bch_replicas_entry *e;
978 unsigned i, nr_online, nr_offline;
979 struct replicas_status ret;
981 memset(&ret, 0, sizeof(ret));
983 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
984 ret.replicas[i].redundancy = INT_MAX;
986 mi = bch2_sb_get_members(c->disk_sb.sb);
988 percpu_down_read(&c->mark_lock);
990 for_each_cpu_replicas_entry(&c->replicas, e) {
991 if (e->data_type >= ARRAY_SIZE(ret.replicas))
992 panic("e %p data_type %u\n", e, e->data_type);
994 nr_online = nr_offline = 0;
996 for (i = 0; i < e->nr_devs; i++) {
997 BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
1000 if (test_bit(e->devs[i], online_devs.d))
1006 ret.replicas[e->data_type].redundancy =
1007 min(ret.replicas[e->data_type].redundancy,
1008 (int) nr_online - (int) e->nr_required);
1010 ret.replicas[e->data_type].nr_offline =
1011 max(ret.replicas[e->data_type].nr_offline,
1015 percpu_up_read(&c->mark_lock);
1017 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
1018 if (ret.replicas[i].redundancy == INT_MAX)
1019 ret.replicas[i].redundancy = 0;
1024 struct replicas_status bch2_replicas_status(struct bch_fs *c)
1026 return __bch2_replicas_status(c, bch2_online_devs(c));
1029 static bool have_enough_devs(struct replicas_status s,
1030 enum bch_data_type type,
1031 bool force_if_degraded,
1034 return (!s.replicas[type].nr_offline || force_if_degraded) &&
1035 (s.replicas[type].redundancy >= 0 || force_if_lost);
1038 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
1040 return (have_enough_devs(s, BCH_DATA_JOURNAL,
1041 flags & BCH_FORCE_IF_METADATA_DEGRADED,
1042 flags & BCH_FORCE_IF_METADATA_LOST) &&
1043 have_enough_devs(s, BCH_DATA_BTREE,
1044 flags & BCH_FORCE_IF_METADATA_DEGRADED,
1045 flags & BCH_FORCE_IF_METADATA_LOST) &&
1046 have_enough_devs(s, BCH_DATA_USER,
1047 flags & BCH_FORCE_IF_DATA_DEGRADED,
1048 flags & BCH_FORCE_IF_DATA_LOST));
1051 int bch2_replicas_online(struct bch_fs *c, bool meta)
1053 struct replicas_status s = bch2_replicas_status(c);
1056 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
1057 s.replicas[BCH_DATA_BTREE].redundancy)
1058 : s.replicas[BCH_DATA_USER].redundancy) + 1;
1061 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1063 struct bch_replicas_entry *e;
1064 unsigned i, ret = 0;
1066 percpu_down_read(&c->mark_lock);
1068 for_each_cpu_replicas_entry(&c->replicas, e)
1069 for (i = 0; i < e->nr_devs; i++)
1070 if (e->devs[i] == ca->dev_idx)
1071 ret |= 1 << e->data_type;
1073 percpu_up_read(&c->mark_lock);
1078 int bch2_fs_replicas_init(struct bch_fs *c)
1080 c->journal.entry_u64s_reserved +=
1081 reserve_journal_replicas(c, &c->replicas);
1083 return replicas_table_update(c, &c->replicas);