1 // SPDX-License-Identifier: GPL-2.0
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10 struct bch_replicas_cpu *);
12 /* Replicas tracking - in memory: */
14 static inline int u8_cmp(u8 l, u8 r)
19 static void verify_replicas_entry_sorted(struct bch_replicas_entry *e)
21 #ifdef CONFIG_BCACHES_DEBUG
24 for (i = 0; i + 1 < e->nr_devs; i++)
25 BUG_ON(e->devs[i] >= e->devs[i + 1]);
29 static void replicas_entry_sort(struct bch_replicas_entry *e)
31 bubble_sort(e->devs, e->nr_devs, u8_cmp);
34 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
36 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
39 void bch2_replicas_entry_to_text(struct printbuf *out,
40 struct bch_replicas_entry *e)
44 pr_buf(out, "%s: %u/%u [",
45 bch2_data_types[e->data_type],
49 for (i = 0; i < e->nr_devs; i++)
50 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
54 void bch2_cpu_replicas_to_text(struct printbuf *out,
55 struct bch_replicas_cpu *r)
57 struct bch_replicas_entry *e;
60 for_each_cpu_replicas_entry(r, e) {
65 bch2_replicas_entry_to_text(out, e);
69 static void extent_to_replicas(struct bkey_s_c k,
70 struct bch_replicas_entry *r)
72 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
73 const union bch_extent_entry *entry;
74 struct extent_ptr_decoded p;
78 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
87 r->devs[r->nr_devs++] = p.ptr.dev;
91 static void stripe_to_replicas(struct bkey_s_c k,
92 struct bch_replicas_entry *r)
94 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
95 const struct bch_extent_ptr *ptr;
97 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
100 ptr < s.v->ptrs + s.v->nr_blocks;
102 r->devs[r->nr_devs++] = ptr->dev;
105 void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
111 case KEY_TYPE_btree_ptr:
112 e->data_type = BCH_DATA_BTREE;
113 extent_to_replicas(k, e);
115 case KEY_TYPE_extent:
116 case KEY_TYPE_reflink_v:
117 e->data_type = BCH_DATA_USER;
118 extent_to_replicas(k, e);
120 case KEY_TYPE_stripe:
121 e->data_type = BCH_DATA_USER;
122 stripe_to_replicas(k, e);
126 replicas_entry_sort(e);
129 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
130 enum bch_data_type data_type,
131 struct bch_devs_list devs)
136 data_type == BCH_DATA_SB ||
137 data_type >= BCH_DATA_NR);
139 e->data_type = data_type;
143 for (i = 0; i < devs.nr; i++)
144 e->devs[e->nr_devs++] = devs.devs[i];
146 replicas_entry_sort(e);
149 static struct bch_replicas_cpu
150 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
151 struct bch_replicas_entry *new_entry)
154 struct bch_replicas_cpu new = {
156 .entry_size = max_t(unsigned, old->entry_size,
157 replicas_entry_bytes(new_entry)),
160 BUG_ON(!new_entry->data_type);
161 verify_replicas_entry_sorted(new_entry);
163 new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
167 for (i = 0; i < old->nr; i++)
168 memcpy(cpu_replicas_entry(&new, i),
169 cpu_replicas_entry(old, i),
172 memcpy(cpu_replicas_entry(&new, old->nr),
174 replicas_entry_bytes(new_entry));
176 bch2_cpu_replicas_sort(&new);
180 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
181 struct bch_replicas_entry *search)
183 int idx, entry_size = replicas_entry_bytes(search);
185 if (unlikely(entry_size > r->entry_size))
188 verify_replicas_entry_sorted(search);
190 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
191 idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
195 return idx < r->nr ? idx : -1;
198 int bch2_replicas_entry_idx(struct bch_fs *c,
199 struct bch_replicas_entry *search)
201 replicas_entry_sort(search);
203 return __replicas_entry_idx(&c->replicas, search);
206 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
207 struct bch_replicas_entry *search)
209 return __replicas_entry_idx(r, search) >= 0;
212 static bool bch2_replicas_marked_locked(struct bch_fs *c,
213 struct bch_replicas_entry *search,
214 bool check_gc_replicas)
216 if (!search->nr_devs)
219 verify_replicas_entry_sorted(search);
221 return __replicas_has_entry(&c->replicas, search) &&
222 (!check_gc_replicas ||
223 likely((!c->replicas_gc.entries)) ||
224 __replicas_has_entry(&c->replicas_gc, search));
227 bool bch2_replicas_marked(struct bch_fs *c,
228 struct bch_replicas_entry *search,
229 bool check_gc_replicas)
233 percpu_down_read(&c->mark_lock);
234 marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
235 percpu_up_read(&c->mark_lock);
240 static void __replicas_table_update(struct bch_fs_usage *dst,
241 struct bch_replicas_cpu *dst_r,
242 struct bch_fs_usage *src,
243 struct bch_replicas_cpu *src_r)
245 int src_idx, dst_idx;
249 for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
250 if (!src->replicas[src_idx])
253 dst_idx = __replicas_entry_idx(dst_r,
254 cpu_replicas_entry(src_r, src_idx));
257 dst->replicas[dst_idx] = src->replicas[src_idx];
261 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
262 struct bch_replicas_cpu *dst_r,
263 struct bch_fs_usage __percpu *src_p,
264 struct bch_replicas_cpu *src_r)
266 unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
267 struct bch_fs_usage *dst, *src = (void *)
268 bch2_acc_percpu_u64s((void *) src_p, src_nr);
271 dst = this_cpu_ptr(dst_p);
274 __replicas_table_update(dst, dst_r, src, src_r);
278 * Resize filesystem accounting:
280 static int replicas_table_update(struct bch_fs *c,
281 struct bch_replicas_cpu *new_r)
283 struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
284 struct bch_fs_usage *new_scratch = NULL;
285 struct bch_fs_usage __percpu *new_gc = NULL;
286 struct bch_fs_usage *new_base = NULL;
287 unsigned bytes = sizeof(struct bch_fs_usage) +
288 sizeof(u64) * new_r->nr;
291 if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
292 !(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
294 !(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
296 !(new_scratch = kmalloc(bytes, GFP_NOIO)) ||
298 !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO))))
302 __replicas_table_update(new_base, new_r,
303 c->usage_base, &c->replicas);
305 __replicas_table_update_pcpu(new_usage[0], new_r,
306 c->usage[0], &c->replicas);
308 __replicas_table_update_pcpu(new_usage[1], new_r,
309 c->usage[1], &c->replicas);
311 __replicas_table_update_pcpu(new_gc, new_r,
312 c->usage_gc, &c->replicas);
314 swap(c->usage_base, new_base);
315 swap(c->usage[0], new_usage[0]);
316 swap(c->usage[1], new_usage[1]);
317 swap(c->usage_scratch, new_scratch);
318 swap(c->usage_gc, new_gc);
319 swap(c->replicas, *new_r);
324 free_percpu(new_usage[1]);
325 free_percpu(new_usage[0]);
330 static unsigned reserve_journal_replicas(struct bch_fs *c,
331 struct bch_replicas_cpu *r)
333 struct bch_replicas_entry *e;
334 unsigned journal_res_u64s = 0;
338 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
342 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
344 /* persistent_reserved: */
346 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
349 for_each_cpu_replicas_entry(r, e)
351 DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
352 e->nr_devs, sizeof(u64));
353 return journal_res_u64s;
357 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
358 struct bch_replicas_entry *new_entry)
360 struct bch_replicas_cpu new_r, new_gc;
363 memset(&new_r, 0, sizeof(new_r));
364 memset(&new_gc, 0, sizeof(new_gc));
366 mutex_lock(&c->sb_lock);
368 if (c->replicas_gc.entries &&
369 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
370 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
375 if (!__replicas_has_entry(&c->replicas, new_entry)) {
376 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
380 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
384 bch2_journal_entry_res_resize(&c->journal,
385 &c->replicas_journal_res,
386 reserve_journal_replicas(c, &new_r));
389 if (!new_r.entries &&
393 /* allocations done, now commit: */
398 /* don't update in memory replicas until changes are persistent */
399 percpu_down_write(&c->mark_lock);
401 ret = replicas_table_update(c, &new_r);
403 swap(new_gc, c->replicas_gc);
404 percpu_up_write(&c->mark_lock);
408 mutex_unlock(&c->sb_lock);
410 kfree(new_r.entries);
411 kfree(new_gc.entries);
416 int bch2_mark_replicas(struct bch_fs *c,
417 struct bch_replicas_entry *r)
419 return likely(bch2_replicas_marked(c, r, true))
421 : bch2_mark_replicas_slowpath(c, r);
424 bool bch2_bkey_replicas_marked_locked(struct bch_fs *c,
426 bool check_gc_replicas)
428 struct bch_replicas_padded search;
429 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
432 for (i = 0; i < cached.nr; i++) {
433 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
435 if (!bch2_replicas_marked_locked(c, &search.e,
440 bch2_bkey_to_replicas(&search.e, k);
442 return bch2_replicas_marked_locked(c, &search.e, check_gc_replicas);
445 bool bch2_bkey_replicas_marked(struct bch_fs *c,
447 bool check_gc_replicas)
451 percpu_down_read(&c->mark_lock);
452 marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
453 percpu_up_read(&c->mark_lock);
458 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
460 struct bch_replicas_padded search;
461 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
465 for (i = 0; i < cached.nr; i++) {
466 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
468 ret = bch2_mark_replicas(c, &search.e);
473 bch2_bkey_to_replicas(&search.e, k);
475 return bch2_mark_replicas(c, &search.e);
478 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
482 lockdep_assert_held(&c->replicas_gc_lock);
484 mutex_lock(&c->sb_lock);
485 percpu_down_write(&c->mark_lock);
488 * this is kind of crappy; the replicas gc mechanism needs to be ripped
492 for (i = 0; i < c->replicas.nr; i++) {
493 struct bch_replicas_entry *e =
494 cpu_replicas_entry(&c->replicas, i);
495 struct bch_replicas_cpu n;
497 if (!__replicas_has_entry(&c->replicas_gc, e) &&
498 (c->usage_base->replicas[i] ||
499 percpu_u64_get(&c->usage[0]->replicas[i]) ||
500 percpu_u64_get(&c->usage[1]->replicas[i]))) {
501 n = cpu_replicas_add_entry(&c->replicas_gc, e);
507 swap(n, c->replicas_gc);
512 if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
517 ret = replicas_table_update(c, &c->replicas_gc);
519 kfree(c->replicas_gc.entries);
520 c->replicas_gc.entries = NULL;
522 percpu_up_write(&c->mark_lock);
527 mutex_unlock(&c->sb_lock);
532 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
534 struct bch_replicas_entry *e;
537 lockdep_assert_held(&c->replicas_gc_lock);
539 mutex_lock(&c->sb_lock);
540 BUG_ON(c->replicas_gc.entries);
542 c->replicas_gc.nr = 0;
543 c->replicas_gc.entry_size = 0;
545 for_each_cpu_replicas_entry(&c->replicas, e)
546 if (!((1 << e->data_type) & typemask)) {
548 c->replicas_gc.entry_size =
549 max_t(unsigned, c->replicas_gc.entry_size,
550 replicas_entry_bytes(e));
553 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
554 c->replicas_gc.entry_size,
556 if (!c->replicas_gc.entries) {
557 mutex_unlock(&c->sb_lock);
561 for_each_cpu_replicas_entry(&c->replicas, e)
562 if (!((1 << e->data_type) & typemask))
563 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
564 e, c->replicas_gc.entry_size);
566 bch2_cpu_replicas_sort(&c->replicas_gc);
567 mutex_unlock(&c->sb_lock);
572 int bch2_replicas_gc2(struct bch_fs *c)
574 struct bch_replicas_cpu new = { 0 };
578 bch2_journal_meta(&c->journal);
580 nr = READ_ONCE(c->replicas.nr);
581 new.entry_size = READ_ONCE(c->replicas.entry_size);
582 new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
586 mutex_lock(&c->sb_lock);
587 percpu_down_write(&c->mark_lock);
589 if (nr != c->replicas.nr ||
590 new.entry_size != c->replicas.entry_size) {
591 percpu_up_write(&c->mark_lock);
592 mutex_unlock(&c->sb_lock);
597 for (i = 0; i < c->replicas.nr; i++) {
598 struct bch_replicas_entry *e =
599 cpu_replicas_entry(&c->replicas, i);
601 if (e->data_type == BCH_DATA_JOURNAL ||
602 c->usage_base->replicas[i] ||
603 percpu_u64_get(&c->usage[0]->replicas[i]) ||
604 percpu_u64_get(&c->usage[1]->replicas[i]))
605 memcpy(cpu_replicas_entry(&new, new.nr++),
609 bch2_cpu_replicas_sort(&new);
611 if (bch2_cpu_replicas_to_sb_replicas(c, &new)) {
616 ret = replicas_table_update(c, &new);
620 percpu_up_write(&c->mark_lock);
625 mutex_unlock(&c->sb_lock);
630 int bch2_replicas_set_usage(struct bch_fs *c,
631 struct bch_replicas_entry *r,
634 int ret, idx = bch2_replicas_entry_idx(c, r);
637 struct bch_replicas_cpu n;
639 n = cpu_replicas_add_entry(&c->replicas, r);
643 ret = replicas_table_update(c, &n);
649 idx = bch2_replicas_entry_idx(c, r);
653 c->usage_base->replicas[idx] = sectors;
658 /* Replicas tracking - superblock: */
661 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
662 struct bch_replicas_cpu *cpu_r)
664 struct bch_replicas_entry *e, *dst;
665 unsigned nr = 0, entry_size = 0, idx = 0;
667 for_each_replicas_entry(sb_r, e) {
668 entry_size = max_t(unsigned, entry_size,
669 replicas_entry_bytes(e));
673 cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
678 cpu_r->entry_size = entry_size;
680 for_each_replicas_entry(sb_r, e) {
681 dst = cpu_replicas_entry(cpu_r, idx++);
682 memcpy(dst, e, replicas_entry_bytes(e));
683 replicas_entry_sort(dst);
690 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
691 struct bch_replicas_cpu *cpu_r)
693 struct bch_replicas_entry_v0 *e;
694 unsigned nr = 0, entry_size = 0, idx = 0;
696 for_each_replicas_entry(sb_r, e) {
697 entry_size = max_t(unsigned, entry_size,
698 replicas_entry_bytes(e));
702 entry_size += sizeof(struct bch_replicas_entry) -
703 sizeof(struct bch_replicas_entry_v0);
705 cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
710 cpu_r->entry_size = entry_size;
712 for_each_replicas_entry(sb_r, e) {
713 struct bch_replicas_entry *dst =
714 cpu_replicas_entry(cpu_r, idx++);
716 dst->data_type = e->data_type;
717 dst->nr_devs = e->nr_devs;
718 dst->nr_required = 1;
719 memcpy(dst->devs, e->devs, e->nr_devs);
720 replicas_entry_sort(dst);
726 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
728 struct bch_sb_field_replicas *sb_v1;
729 struct bch_sb_field_replicas_v0 *sb_v0;
730 struct bch_replicas_cpu new_r = { 0, 0, NULL };
733 if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
734 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
735 else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
736 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
741 bch2_cpu_replicas_sort(&new_r);
743 percpu_down_write(&c->mark_lock);
745 ret = replicas_table_update(c, &new_r);
746 percpu_up_write(&c->mark_lock);
748 kfree(new_r.entries);
753 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
754 struct bch_replicas_cpu *r)
756 struct bch_sb_field_replicas_v0 *sb_r;
757 struct bch_replicas_entry_v0 *dst;
758 struct bch_replicas_entry *src;
761 bytes = sizeof(struct bch_sb_field_replicas);
763 for_each_cpu_replicas_entry(r, src)
764 bytes += replicas_entry_bytes(src) - 1;
766 sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
767 DIV_ROUND_UP(bytes, sizeof(u64)));
771 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
772 sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
774 memset(&sb_r->entries, 0,
775 vstruct_end(&sb_r->field) -
776 (void *) &sb_r->entries);
779 for_each_cpu_replicas_entry(r, src) {
780 dst->data_type = src->data_type;
781 dst->nr_devs = src->nr_devs;
782 memcpy(dst->devs, src->devs, src->nr_devs);
784 dst = replicas_entry_next(dst);
786 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
792 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
793 struct bch_replicas_cpu *r)
795 struct bch_sb_field_replicas *sb_r;
796 struct bch_replicas_entry *dst, *src;
797 bool need_v1 = false;
800 bytes = sizeof(struct bch_sb_field_replicas);
802 for_each_cpu_replicas_entry(r, src) {
803 bytes += replicas_entry_bytes(src);
804 if (src->nr_required != 1)
809 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
811 sb_r = bch2_sb_resize_replicas(&c->disk_sb,
812 DIV_ROUND_UP(bytes, sizeof(u64)));
816 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
817 sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
819 memset(&sb_r->entries, 0,
820 vstruct_end(&sb_r->field) -
821 (void *) &sb_r->entries);
824 for_each_cpu_replicas_entry(r, src) {
825 memcpy(dst, src, replicas_entry_bytes(src));
827 dst = replicas_entry_next(dst);
829 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
835 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
839 sort_cmp_size(cpu_r->entries,
844 for (i = 0; i + 1 < cpu_r->nr; i++) {
845 struct bch_replicas_entry *l =
846 cpu_replicas_entry(cpu_r, i);
847 struct bch_replicas_entry *r =
848 cpu_replicas_entry(cpu_r, i + 1);
850 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
852 if (!memcmp(l, r, cpu_r->entry_size))
853 return "duplicate replicas entry";
859 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
861 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
862 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
863 struct bch_replicas_cpu cpu_r = { .entries = NULL };
864 struct bch_replicas_entry *e;
868 for_each_replicas_entry(sb_r, e) {
869 err = "invalid replicas entry: invalid data type";
870 if (e->data_type >= BCH_DATA_NR)
873 err = "invalid replicas entry: no devices";
877 err = "invalid replicas entry: bad nr_required";
878 if (!e->nr_required ||
879 (e->nr_required > 1 &&
880 e->nr_required >= e->nr_devs))
883 err = "invalid replicas entry: invalid device";
884 for (i = 0; i < e->nr_devs; i++)
885 if (!bch2_dev_exists(sb, mi, e->devs[i]))
889 err = "cannot allocate memory";
890 if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
893 err = check_dup_replicas_entries(&cpu_r);
895 kfree(cpu_r.entries);
899 static void bch2_sb_replicas_to_text(struct printbuf *out,
901 struct bch_sb_field *f)
903 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
904 struct bch_replicas_entry *e;
907 for_each_replicas_entry(r, e) {
912 bch2_replicas_entry_to_text(out, e);
916 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
917 .validate = bch2_sb_validate_replicas,
918 .to_text = bch2_sb_replicas_to_text,
921 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
923 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
924 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
925 struct bch_replicas_cpu cpu_r = { .entries = NULL };
926 struct bch_replicas_entry_v0 *e;
930 for_each_replicas_entry_v0(sb_r, e) {
931 err = "invalid replicas entry: invalid data type";
932 if (e->data_type >= BCH_DATA_NR)
935 err = "invalid replicas entry: no devices";
939 err = "invalid replicas entry: invalid device";
940 for (i = 0; i < e->nr_devs; i++)
941 if (!bch2_dev_exists(sb, mi, e->devs[i]))
945 err = "cannot allocate memory";
946 if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
949 err = check_dup_replicas_entries(&cpu_r);
951 kfree(cpu_r.entries);
955 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
956 .validate = bch2_sb_validate_replicas_v0,
959 /* Query replicas: */
961 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
962 struct bch_devs_mask online_devs)
964 struct bch_sb_field_members *mi;
965 struct bch_replicas_entry *e;
966 unsigned i, nr_online, nr_offline;
967 struct replicas_status ret;
969 memset(&ret, 0, sizeof(ret));
971 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
972 ret.replicas[i].redundancy = INT_MAX;
974 mi = bch2_sb_get_members(c->disk_sb.sb);
976 percpu_down_read(&c->mark_lock);
978 for_each_cpu_replicas_entry(&c->replicas, e) {
979 if (e->data_type >= ARRAY_SIZE(ret.replicas))
980 panic("e %p data_type %u\n", e, e->data_type);
982 nr_online = nr_offline = 0;
984 for (i = 0; i < e->nr_devs; i++) {
985 BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
988 if (test_bit(e->devs[i], online_devs.d))
994 ret.replicas[e->data_type].redundancy =
995 min(ret.replicas[e->data_type].redundancy,
996 (int) nr_online - (int) e->nr_required);
998 ret.replicas[e->data_type].nr_offline =
999 max(ret.replicas[e->data_type].nr_offline,
1003 percpu_up_read(&c->mark_lock);
1005 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
1006 if (ret.replicas[i].redundancy == INT_MAX)
1007 ret.replicas[i].redundancy = 0;
1012 struct replicas_status bch2_replicas_status(struct bch_fs *c)
1014 return __bch2_replicas_status(c, bch2_online_devs(c));
1017 static bool have_enough_devs(struct replicas_status s,
1018 enum bch_data_type type,
1019 bool force_if_degraded,
1022 return (!s.replicas[type].nr_offline || force_if_degraded) &&
1023 (s.replicas[type].redundancy >= 0 || force_if_lost);
1026 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
1028 return (have_enough_devs(s, BCH_DATA_JOURNAL,
1029 flags & BCH_FORCE_IF_METADATA_DEGRADED,
1030 flags & BCH_FORCE_IF_METADATA_LOST) &&
1031 have_enough_devs(s, BCH_DATA_BTREE,
1032 flags & BCH_FORCE_IF_METADATA_DEGRADED,
1033 flags & BCH_FORCE_IF_METADATA_LOST) &&
1034 have_enough_devs(s, BCH_DATA_USER,
1035 flags & BCH_FORCE_IF_DATA_DEGRADED,
1036 flags & BCH_FORCE_IF_DATA_LOST));
1039 int bch2_replicas_online(struct bch_fs *c, bool meta)
1041 struct replicas_status s = bch2_replicas_status(c);
1044 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
1045 s.replicas[BCH_DATA_BTREE].redundancy)
1046 : s.replicas[BCH_DATA_USER].redundancy) + 1;
1049 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1051 struct bch_replicas_entry *e;
1052 unsigned i, ret = 0;
1054 percpu_down_read(&c->mark_lock);
1056 for_each_cpu_replicas_entry(&c->replicas, e)
1057 for (i = 0; i < e->nr_devs; i++)
1058 if (e->devs[i] == ca->dev_idx)
1059 ret |= 1 << e->data_type;
1061 percpu_up_read(&c->mark_lock);
1066 int bch2_fs_replicas_init(struct bch_fs *c)
1068 c->journal.entry_u64s_reserved +=
1069 reserve_journal_replicas(c, &c->replicas);
1071 return replicas_table_update(c, &c->replicas);