7 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
8 struct bch_replicas_cpu *);
10 /* Replicas tracking - in memory: */
12 static inline int u8_cmp(u8 l, u8 r)
14 return (l > r) - (l < r);
17 static void verify_replicas_entry_sorted(struct bch_replicas_entry *e)
19 #ifdef CONFIG_BCACHES_DEBUG
22 for (i = 0; i + 1 < e->nr_devs; i++)
23 BUG_ON(e->devs[i] >= e->devs[i + 1]);
27 static void replicas_entry_sort(struct bch_replicas_entry *e)
29 bubble_sort(e->devs, e->nr_devs, u8_cmp);
32 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
34 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
37 void bch2_replicas_entry_to_text(struct printbuf *out,
38 struct bch_replicas_entry *e)
42 pr_buf(out, "%s: %u/%u [",
43 bch2_data_types[e->data_type],
47 for (i = 0; i < e->nr_devs; i++)
48 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
52 void bch2_cpu_replicas_to_text(struct printbuf *out,
53 struct bch_replicas_cpu *r)
55 struct bch_replicas_entry *e;
58 for_each_cpu_replicas_entry(r, e) {
63 bch2_replicas_entry_to_text(out, e);
67 static void extent_to_replicas(struct bkey_s_c k,
68 struct bch_replicas_entry *r)
70 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
71 const union bch_extent_entry *entry;
72 struct extent_ptr_decoded p;
76 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
85 r->devs[r->nr_devs++] = p.ptr.dev;
89 static void stripe_to_replicas(struct bkey_s_c k,
90 struct bch_replicas_entry *r)
92 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
93 const struct bch_extent_ptr *ptr;
95 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
98 ptr < s.v->ptrs + s.v->nr_blocks;
100 r->devs[r->nr_devs++] = ptr->dev;
103 static void bkey_to_replicas(struct bch_replicas_entry *e,
109 case KEY_TYPE_btree_ptr:
110 e->data_type = BCH_DATA_BTREE;
111 extent_to_replicas(k, e);
113 case KEY_TYPE_extent:
114 e->data_type = BCH_DATA_USER;
115 extent_to_replicas(k, e);
117 case KEY_TYPE_stripe:
118 e->data_type = BCH_DATA_USER;
119 stripe_to_replicas(k, e);
123 replicas_entry_sort(e);
126 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
127 enum bch_data_type data_type,
128 struct bch_devs_list devs)
133 data_type == BCH_DATA_SB ||
134 data_type >= BCH_DATA_NR);
136 e->data_type = data_type;
140 for (i = 0; i < devs.nr; i++)
141 e->devs[e->nr_devs++] = devs.devs[i];
143 replicas_entry_sort(e);
146 static struct bch_replicas_cpu
147 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
148 struct bch_replicas_entry *new_entry)
151 struct bch_replicas_cpu new = {
153 .entry_size = max_t(unsigned, old->entry_size,
154 replicas_entry_bytes(new_entry)),
157 BUG_ON(!new_entry->data_type);
158 verify_replicas_entry_sorted(new_entry);
160 new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
164 for (i = 0; i < old->nr; i++)
165 memcpy(cpu_replicas_entry(&new, i),
166 cpu_replicas_entry(old, i),
169 memcpy(cpu_replicas_entry(&new, old->nr),
171 replicas_entry_bytes(new_entry));
173 bch2_cpu_replicas_sort(&new);
177 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
178 struct bch_replicas_entry *search)
180 int idx, entry_size = replicas_entry_bytes(search);
182 if (unlikely(entry_size > r->entry_size))
185 verify_replicas_entry_sorted(search);
187 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
188 idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
192 return idx < r->nr ? idx : -1;
195 int bch2_replicas_entry_idx(struct bch_fs *c,
196 struct bch_replicas_entry *search)
198 replicas_entry_sort(search);
200 return __replicas_entry_idx(&c->replicas, search);
203 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
204 struct bch_replicas_entry *search)
206 return __replicas_entry_idx(r, search) >= 0;
209 static bool bch2_replicas_marked_locked(struct bch_fs *c,
210 struct bch_replicas_entry *search,
211 bool check_gc_replicas)
213 if (!search->nr_devs)
216 verify_replicas_entry_sorted(search);
218 return __replicas_has_entry(&c->replicas, search) &&
219 (!check_gc_replicas ||
220 likely((!c->replicas_gc.entries)) ||
221 __replicas_has_entry(&c->replicas_gc, search));
224 bool bch2_replicas_marked(struct bch_fs *c,
225 struct bch_replicas_entry *search,
226 bool check_gc_replicas)
230 percpu_down_read_preempt_disable(&c->mark_lock);
231 marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
232 percpu_up_read_preempt_enable(&c->mark_lock);
237 static void __replicas_table_update(struct bch_fs_usage __percpu *dst_p,
238 struct bch_replicas_cpu *dst_r,
239 struct bch_fs_usage __percpu *src_p,
240 struct bch_replicas_cpu *src_r)
242 unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
243 struct bch_fs_usage *dst, *src = (void *)
244 bch2_acc_percpu_u64s((void *) src_p, src_nr);
245 int src_idx, dst_idx;
248 dst = this_cpu_ptr(dst_p);
253 for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
254 if (!src->replicas[src_idx])
257 dst_idx = __replicas_entry_idx(dst_r,
258 cpu_replicas_entry(src_r, src_idx));
261 dst->replicas[dst_idx] = src->replicas[src_idx];
266 * Resize filesystem accounting:
268 static int replicas_table_update(struct bch_fs *c,
269 struct bch_replicas_cpu *new_r)
271 struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
272 struct bch_fs_usage *new_scratch = NULL;
273 unsigned bytes = sizeof(struct bch_fs_usage) +
274 sizeof(u64) * new_r->nr;
277 if (!(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
280 !(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
282 !(new_scratch = kmalloc(bytes, GFP_NOIO)))
286 __replicas_table_update(new_usage[0], new_r,
287 c->usage[0], &c->replicas);
289 __replicas_table_update(new_usage[1], new_r,
290 c->usage[1], &c->replicas);
292 swap(c->usage[0], new_usage[0]);
293 swap(c->usage[1], new_usage[1]);
294 swap(c->usage_scratch, new_scratch);
295 swap(c->replicas, *new_r);
299 free_percpu(new_usage[1]);
300 free_percpu(new_usage[0]);
304 static unsigned reserve_journal_replicas(struct bch_fs *c,
305 struct bch_replicas_cpu *r)
307 struct bch_replicas_entry *e;
308 unsigned journal_res_u64s = 0;
312 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
316 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
318 /* persistent_reserved: */
320 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
323 for_each_cpu_replicas_entry(r, e)
325 DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
326 e->nr_devs, sizeof(u64));
327 return journal_res_u64s;
331 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
332 struct bch_replicas_entry *new_entry)
334 struct bch_replicas_cpu new_r, new_gc;
337 memset(&new_r, 0, sizeof(new_r));
338 memset(&new_gc, 0, sizeof(new_gc));
340 mutex_lock(&c->sb_lock);
342 if (c->replicas_gc.entries &&
343 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
344 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
349 if (!__replicas_has_entry(&c->replicas, new_entry)) {
350 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
354 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
358 bch2_journal_entry_res_resize(&c->journal,
359 &c->replicas_journal_res,
360 reserve_journal_replicas(c, &new_r));
363 if (!new_r.entries &&
367 /* allocations done, now commit: */
372 /* don't update in memory replicas until changes are persistent */
373 percpu_down_write(&c->mark_lock);
375 ret = replicas_table_update(c, &new_r);
377 swap(new_gc, c->replicas_gc);
378 percpu_up_write(&c->mark_lock);
382 mutex_unlock(&c->sb_lock);
384 kfree(new_r.entries);
385 kfree(new_gc.entries);
390 int bch2_mark_replicas(struct bch_fs *c,
391 struct bch_replicas_entry *r)
393 return likely(bch2_replicas_marked(c, r, true))
395 : bch2_mark_replicas_slowpath(c, r);
398 bool bch2_bkey_replicas_marked_locked(struct bch_fs *c,
400 bool check_gc_replicas)
402 struct bch_replicas_padded search;
403 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
406 for (i = 0; i < cached.nr; i++) {
407 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
409 if (!bch2_replicas_marked_locked(c, &search.e,
414 bkey_to_replicas(&search.e, k);
416 return bch2_replicas_marked_locked(c, &search.e, check_gc_replicas);
419 bool bch2_bkey_replicas_marked(struct bch_fs *c,
421 bool check_gc_replicas)
425 percpu_down_read_preempt_disable(&c->mark_lock);
426 marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
427 percpu_up_read_preempt_enable(&c->mark_lock);
432 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
434 struct bch_replicas_padded search;
435 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
439 for (i = 0; i < cached.nr; i++) {
440 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
442 ret = bch2_mark_replicas(c, &search.e);
447 bkey_to_replicas(&search.e, k);
449 return bch2_mark_replicas(c, &search.e);
452 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
456 lockdep_assert_held(&c->replicas_gc_lock);
458 mutex_lock(&c->sb_lock);
464 * this is kind of crappy; the replicas gc mechanism needs to be ripped
468 for (i = 0; i < c->replicas.nr; i++) {
469 struct bch_replicas_entry *e =
470 cpu_replicas_entry(&c->replicas, i);
471 struct bch_replicas_cpu n;
474 if (__replicas_has_entry(&c->replicas_gc, e))
477 v = percpu_u64_get(&c->usage[0]->replicas[i]);
481 n = cpu_replicas_add_entry(&c->replicas_gc, e);
487 percpu_down_write(&c->mark_lock);
488 swap(n, c->replicas_gc);
489 percpu_up_write(&c->mark_lock);
494 if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
501 /* don't update in memory replicas until changes are persistent */
503 percpu_down_write(&c->mark_lock);
505 ret = replicas_table_update(c, &c->replicas_gc);
507 kfree(c->replicas_gc.entries);
508 c->replicas_gc.entries = NULL;
509 percpu_up_write(&c->mark_lock);
511 mutex_unlock(&c->sb_lock);
515 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
517 struct bch_replicas_entry *e;
520 lockdep_assert_held(&c->replicas_gc_lock);
522 mutex_lock(&c->sb_lock);
523 BUG_ON(c->replicas_gc.entries);
525 c->replicas_gc.nr = 0;
526 c->replicas_gc.entry_size = 0;
528 for_each_cpu_replicas_entry(&c->replicas, e)
529 if (!((1 << e->data_type) & typemask)) {
531 c->replicas_gc.entry_size =
532 max_t(unsigned, c->replicas_gc.entry_size,
533 replicas_entry_bytes(e));
536 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
537 c->replicas_gc.entry_size,
539 if (!c->replicas_gc.entries) {
540 mutex_unlock(&c->sb_lock);
544 for_each_cpu_replicas_entry(&c->replicas, e)
545 if (!((1 << e->data_type) & typemask))
546 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
547 e, c->replicas_gc.entry_size);
549 bch2_cpu_replicas_sort(&c->replicas_gc);
550 mutex_unlock(&c->sb_lock);
555 int bch2_replicas_set_usage(struct bch_fs *c,
556 struct bch_replicas_entry *r,
559 int ret, idx = bch2_replicas_entry_idx(c, r);
562 struct bch_replicas_cpu n;
564 n = cpu_replicas_add_entry(&c->replicas, r);
568 ret = replicas_table_update(c, &n);
574 idx = bch2_replicas_entry_idx(c, r);
578 percpu_u64_set(&c->usage[0]->replicas[idx], sectors);
583 /* Replicas tracking - superblock: */
586 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
587 struct bch_replicas_cpu *cpu_r)
589 struct bch_replicas_entry *e, *dst;
590 unsigned nr = 0, entry_size = 0, idx = 0;
592 for_each_replicas_entry(sb_r, e) {
593 entry_size = max_t(unsigned, entry_size,
594 replicas_entry_bytes(e));
598 cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
603 cpu_r->entry_size = entry_size;
605 for_each_replicas_entry(sb_r, e) {
606 dst = cpu_replicas_entry(cpu_r, idx++);
607 memcpy(dst, e, replicas_entry_bytes(e));
608 replicas_entry_sort(dst);
615 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
616 struct bch_replicas_cpu *cpu_r)
618 struct bch_replicas_entry_v0 *e;
619 unsigned nr = 0, entry_size = 0, idx = 0;
621 for_each_replicas_entry(sb_r, e) {
622 entry_size = max_t(unsigned, entry_size,
623 replicas_entry_bytes(e));
627 entry_size += sizeof(struct bch_replicas_entry) -
628 sizeof(struct bch_replicas_entry_v0);
630 cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
635 cpu_r->entry_size = entry_size;
637 for_each_replicas_entry(sb_r, e) {
638 struct bch_replicas_entry *dst =
639 cpu_replicas_entry(cpu_r, idx++);
641 dst->data_type = e->data_type;
642 dst->nr_devs = e->nr_devs;
643 dst->nr_required = 1;
644 memcpy(dst->devs, e->devs, e->nr_devs);
645 replicas_entry_sort(dst);
651 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
653 struct bch_sb_field_replicas *sb_v1;
654 struct bch_sb_field_replicas_v0 *sb_v0;
655 struct bch_replicas_cpu new_r = { 0, 0, NULL };
658 if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
659 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
660 else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
661 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
666 bch2_cpu_replicas_sort(&new_r);
668 percpu_down_write(&c->mark_lock);
670 ret = replicas_table_update(c, &new_r);
671 percpu_up_write(&c->mark_lock);
673 kfree(new_r.entries);
678 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
679 struct bch_replicas_cpu *r)
681 struct bch_sb_field_replicas_v0 *sb_r;
682 struct bch_replicas_entry_v0 *dst;
683 struct bch_replicas_entry *src;
686 bytes = sizeof(struct bch_sb_field_replicas);
688 for_each_cpu_replicas_entry(r, src)
689 bytes += replicas_entry_bytes(src) - 1;
691 sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
692 DIV_ROUND_UP(bytes, sizeof(u64)));
696 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
697 sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
699 memset(&sb_r->entries, 0,
700 vstruct_end(&sb_r->field) -
701 (void *) &sb_r->entries);
704 for_each_cpu_replicas_entry(r, src) {
705 dst->data_type = src->data_type;
706 dst->nr_devs = src->nr_devs;
707 memcpy(dst->devs, src->devs, src->nr_devs);
709 dst = replicas_entry_next(dst);
711 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
717 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
718 struct bch_replicas_cpu *r)
720 struct bch_sb_field_replicas *sb_r;
721 struct bch_replicas_entry *dst, *src;
722 bool need_v1 = false;
725 bytes = sizeof(struct bch_sb_field_replicas);
727 for_each_cpu_replicas_entry(r, src) {
728 bytes += replicas_entry_bytes(src);
729 if (src->nr_required != 1)
734 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
736 sb_r = bch2_sb_resize_replicas(&c->disk_sb,
737 DIV_ROUND_UP(bytes, sizeof(u64)));
741 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
742 sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
744 memset(&sb_r->entries, 0,
745 vstruct_end(&sb_r->field) -
746 (void *) &sb_r->entries);
749 for_each_cpu_replicas_entry(r, src) {
750 memcpy(dst, src, replicas_entry_bytes(src));
752 dst = replicas_entry_next(dst);
754 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
760 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
764 sort_cmp_size(cpu_r->entries,
769 for (i = 0; i + 1 < cpu_r->nr; i++) {
770 struct bch_replicas_entry *l =
771 cpu_replicas_entry(cpu_r, i);
772 struct bch_replicas_entry *r =
773 cpu_replicas_entry(cpu_r, i + 1);
775 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
777 if (!memcmp(l, r, cpu_r->entry_size))
778 return "duplicate replicas entry";
784 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
786 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
787 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
788 struct bch_replicas_cpu cpu_r = { .entries = NULL };
789 struct bch_replicas_entry *e;
793 for_each_replicas_entry(sb_r, e) {
794 err = "invalid replicas entry: invalid data type";
795 if (e->data_type >= BCH_DATA_NR)
798 err = "invalid replicas entry: no devices";
802 err = "invalid replicas entry: bad nr_required";
803 if (!e->nr_required ||
804 (e->nr_required > 1 &&
805 e->nr_required >= e->nr_devs))
808 err = "invalid replicas entry: invalid device";
809 for (i = 0; i < e->nr_devs; i++)
810 if (!bch2_dev_exists(sb, mi, e->devs[i]))
814 err = "cannot allocate memory";
815 if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
818 err = check_dup_replicas_entries(&cpu_r);
820 kfree(cpu_r.entries);
824 static void bch2_sb_replicas_to_text(struct printbuf *out,
826 struct bch_sb_field *f)
828 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
829 struct bch_replicas_entry *e;
832 for_each_replicas_entry(r, e) {
837 bch2_replicas_entry_to_text(out, e);
841 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
842 .validate = bch2_sb_validate_replicas,
843 .to_text = bch2_sb_replicas_to_text,
846 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
848 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
849 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
850 struct bch_replicas_cpu cpu_r = { .entries = NULL };
851 struct bch_replicas_entry_v0 *e;
855 for_each_replicas_entry_v0(sb_r, e) {
856 err = "invalid replicas entry: invalid data type";
857 if (e->data_type >= BCH_DATA_NR)
860 err = "invalid replicas entry: no devices";
864 err = "invalid replicas entry: invalid device";
865 for (i = 0; i < e->nr_devs; i++)
866 if (!bch2_dev_exists(sb, mi, e->devs[i]))
870 err = "cannot allocate memory";
871 if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
874 err = check_dup_replicas_entries(&cpu_r);
876 kfree(cpu_r.entries);
880 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
881 .validate = bch2_sb_validate_replicas_v0,
884 /* Query replicas: */
886 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
887 struct bch_devs_mask online_devs)
889 struct bch_sb_field_members *mi;
890 struct bch_replicas_entry *e;
891 unsigned i, nr_online, nr_offline;
892 struct replicas_status ret;
894 memset(&ret, 0, sizeof(ret));
896 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
897 ret.replicas[i].redundancy = INT_MAX;
899 mi = bch2_sb_get_members(c->disk_sb.sb);
901 percpu_down_read_preempt_disable(&c->mark_lock);
903 for_each_cpu_replicas_entry(&c->replicas, e) {
904 if (e->data_type >= ARRAY_SIZE(ret.replicas))
905 panic("e %p data_type %u\n", e, e->data_type);
907 nr_online = nr_offline = 0;
909 for (i = 0; i < e->nr_devs; i++) {
910 BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
913 if (test_bit(e->devs[i], online_devs.d))
919 ret.replicas[e->data_type].redundancy =
920 min(ret.replicas[e->data_type].redundancy,
921 (int) nr_online - (int) e->nr_required);
923 ret.replicas[e->data_type].nr_offline =
924 max(ret.replicas[e->data_type].nr_offline,
928 percpu_up_read_preempt_enable(&c->mark_lock);
930 for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
931 if (ret.replicas[i].redundancy == INT_MAX)
932 ret.replicas[i].redundancy = 0;
937 struct replicas_status bch2_replicas_status(struct bch_fs *c)
939 return __bch2_replicas_status(c, bch2_online_devs(c));
942 static bool have_enough_devs(struct replicas_status s,
943 enum bch_data_type type,
944 bool force_if_degraded,
947 return (!s.replicas[type].nr_offline || force_if_degraded) &&
948 (s.replicas[type].redundancy >= 0 || force_if_lost);
951 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
953 return (have_enough_devs(s, BCH_DATA_JOURNAL,
954 flags & BCH_FORCE_IF_METADATA_DEGRADED,
955 flags & BCH_FORCE_IF_METADATA_LOST) &&
956 have_enough_devs(s, BCH_DATA_BTREE,
957 flags & BCH_FORCE_IF_METADATA_DEGRADED,
958 flags & BCH_FORCE_IF_METADATA_LOST) &&
959 have_enough_devs(s, BCH_DATA_USER,
960 flags & BCH_FORCE_IF_DATA_DEGRADED,
961 flags & BCH_FORCE_IF_DATA_LOST));
964 int bch2_replicas_online(struct bch_fs *c, bool meta)
966 struct replicas_status s = bch2_replicas_status(c);
969 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
970 s.replicas[BCH_DATA_BTREE].redundancy)
971 : s.replicas[BCH_DATA_USER].redundancy) + 1;
974 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
976 struct bch_replicas_entry *e;
979 percpu_down_read_preempt_disable(&c->mark_lock);
981 for_each_cpu_replicas_entry(&c->replicas, e)
982 for (i = 0; i < e->nr_devs; i++)
983 if (e->devs[i] == ca->dev_idx)
984 ret |= 1 << e->data_type;
986 percpu_up_read_preempt_enable(&c->mark_lock);
991 int bch2_fs_replicas_init(struct bch_fs *c)
993 c->journal.entry_u64s_reserved +=
994 reserve_journal_replicas(c, &c->replicas);
996 return replicas_table_update(c, &c->replicas);