1 // SPDX-License-Identifier: GPL-2.0
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10 struct bch_replicas_cpu *);
12 /* Replicas tracking - in memory: */
14 static void verify_replicas_entry(struct bch_replicas_entry *e)
16 #ifdef CONFIG_BCACHEFS_DEBUG
19 BUG_ON(e->data_type >= BCH_DATA_NR);
21 BUG_ON(e->nr_required > 1 &&
22 e->nr_required >= e->nr_devs);
24 for (i = 0; i + 1 < e->nr_devs; i++)
25 BUG_ON(e->devs[i] >= e->devs[i + 1]);
29 void bch2_replicas_entry_sort(struct bch_replicas_entry *e)
31 bubble_sort(e->devs, e->nr_devs, u8_cmp);
34 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
36 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
39 void bch2_replicas_entry_to_text(struct printbuf *out,
40 struct bch_replicas_entry *e)
44 if (e->data_type < BCH_DATA_NR)
45 pr_buf(out, "%s", bch2_data_types[e->data_type]);
47 pr_buf(out, "(invalid data type %u)", e->data_type);
49 pr_buf(out, ": %u/%u [", e->nr_required, e->nr_devs);
50 for (i = 0; i < e->nr_devs; i++)
51 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
55 void bch2_cpu_replicas_to_text(struct printbuf *out,
56 struct bch_replicas_cpu *r)
58 struct bch_replicas_entry *e;
61 for_each_cpu_replicas_entry(r, e) {
66 bch2_replicas_entry_to_text(out, e);
70 static void extent_to_replicas(struct bkey_s_c k,
71 struct bch_replicas_entry *r)
73 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
74 const union bch_extent_entry *entry;
75 struct extent_ptr_decoded p;
79 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
84 r->devs[r->nr_devs++] = p.ptr.dev;
90 static void stripe_to_replicas(struct bkey_s_c k,
91 struct bch_replicas_entry *r)
93 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
94 const struct bch_extent_ptr *ptr;
96 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
99 ptr < s.v->ptrs + s.v->nr_blocks;
101 r->devs[r->nr_devs++] = ptr->dev;
104 void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
110 case KEY_TYPE_btree_ptr:
111 case KEY_TYPE_btree_ptr_v2:
112 e->data_type = BCH_DATA_btree;
113 extent_to_replicas(k, e);
115 case KEY_TYPE_extent:
116 case KEY_TYPE_reflink_v:
117 e->data_type = BCH_DATA_user;
118 extent_to_replicas(k, e);
120 case KEY_TYPE_stripe:
121 e->data_type = BCH_DATA_parity;
122 stripe_to_replicas(k, e);
126 bch2_replicas_entry_sort(e);
129 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
130 enum bch_data_type data_type,
131 struct bch_devs_list devs)
136 data_type == BCH_DATA_sb ||
137 data_type >= BCH_DATA_NR);
139 e->data_type = data_type;
143 for (i = 0; i < devs.nr; i++)
144 e->devs[e->nr_devs++] = devs.devs[i];
146 bch2_replicas_entry_sort(e);
149 static struct bch_replicas_cpu
150 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
151 struct bch_replicas_entry *new_entry)
154 struct bch_replicas_cpu new = {
156 .entry_size = max_t(unsigned, old->entry_size,
157 replicas_entry_bytes(new_entry)),
160 BUG_ON(!new_entry->data_type);
161 verify_replicas_entry(new_entry);
163 new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
167 for (i = 0; i < old->nr; i++)
168 memcpy(cpu_replicas_entry(&new, i),
169 cpu_replicas_entry(old, i),
172 memcpy(cpu_replicas_entry(&new, old->nr),
174 replicas_entry_bytes(new_entry));
176 bch2_cpu_replicas_sort(&new);
180 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
181 struct bch_replicas_entry *search)
183 int idx, entry_size = replicas_entry_bytes(search);
185 if (unlikely(entry_size > r->entry_size))
188 verify_replicas_entry(search);
190 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
191 idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
195 return idx < r->nr ? idx : -1;
198 int bch2_replicas_entry_idx(struct bch_fs *c,
199 struct bch_replicas_entry *search)
201 bch2_replicas_entry_sort(search);
203 return __replicas_entry_idx(&c->replicas, search);
206 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
207 struct bch_replicas_entry *search)
209 return __replicas_entry_idx(r, search) >= 0;
212 bool bch2_replicas_marked(struct bch_fs *c,
213 struct bch_replicas_entry *search)
217 if (!search->nr_devs)
220 verify_replicas_entry(search);
222 percpu_down_read(&c->mark_lock);
223 marked = __replicas_has_entry(&c->replicas, search) &&
224 (likely((!c->replicas_gc.entries)) ||
225 __replicas_has_entry(&c->replicas_gc, search));
226 percpu_up_read(&c->mark_lock);
231 static void __replicas_table_update(struct bch_fs_usage *dst,
232 struct bch_replicas_cpu *dst_r,
233 struct bch_fs_usage *src,
234 struct bch_replicas_cpu *src_r)
236 int src_idx, dst_idx;
240 for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
241 if (!src->replicas[src_idx])
244 dst_idx = __replicas_entry_idx(dst_r,
245 cpu_replicas_entry(src_r, src_idx));
248 dst->replicas[dst_idx] = src->replicas[src_idx];
252 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
253 struct bch_replicas_cpu *dst_r,
254 struct bch_fs_usage __percpu *src_p,
255 struct bch_replicas_cpu *src_r)
257 unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
258 struct bch_fs_usage *dst, *src = (void *)
259 bch2_acc_percpu_u64s((void *) src_p, src_nr);
262 dst = this_cpu_ptr(dst_p);
265 __replicas_table_update(dst, dst_r, src, src_r);
269 * Resize filesystem accounting:
271 static int replicas_table_update(struct bch_fs *c,
272 struct bch_replicas_cpu *new_r)
274 struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
275 struct bch_fs_usage_online *new_scratch = NULL;
276 struct bch_fs_usage __percpu *new_gc = NULL;
277 struct bch_fs_usage *new_base = NULL;
278 unsigned i, bytes = sizeof(struct bch_fs_usage) +
279 sizeof(u64) * new_r->nr;
280 unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
281 sizeof(u64) * new_r->nr;
284 memset(new_usage, 0, sizeof(new_usage));
286 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
287 if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
288 sizeof(u64), GFP_KERNEL)))
291 if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
292 !(new_scratch = kmalloc(scratch_bytes, GFP_KERNEL)) ||
294 !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
297 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
299 __replicas_table_update_pcpu(new_usage[i], new_r,
300 c->usage[i], &c->replicas);
302 __replicas_table_update(new_base, new_r,
303 c->usage_base, &c->replicas);
305 __replicas_table_update_pcpu(new_gc, new_r,
306 c->usage_gc, &c->replicas);
308 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
309 swap(c->usage[i], new_usage[i]);
310 swap(c->usage_base, new_base);
311 swap(c->usage_scratch, new_scratch);
312 swap(c->usage_gc, new_gc);
313 swap(c->replicas, *new_r);
317 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
318 free_percpu(new_usage[i]);
322 bch_err(c, "error updating replicas table: memory allocation failure");
327 static unsigned reserve_journal_replicas(struct bch_fs *c,
328 struct bch_replicas_cpu *r)
330 struct bch_replicas_entry *e;
331 unsigned journal_res_u64s = 0;
335 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
339 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
341 /* persistent_reserved: */
343 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
346 for_each_cpu_replicas_entry(r, e)
348 DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
349 e->nr_devs, sizeof(u64));
350 return journal_res_u64s;
354 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
355 struct bch_replicas_entry *new_entry)
357 struct bch_replicas_cpu new_r, new_gc;
360 verify_replicas_entry(new_entry);
362 memset(&new_r, 0, sizeof(new_r));
363 memset(&new_gc, 0, sizeof(new_gc));
365 mutex_lock(&c->sb_lock);
367 if (c->replicas_gc.entries &&
368 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
369 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
374 if (!__replicas_has_entry(&c->replicas, new_entry)) {
375 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
379 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
383 bch2_journal_entry_res_resize(&c->journal,
384 &c->replicas_journal_res,
385 reserve_journal_replicas(c, &new_r));
388 if (!new_r.entries &&
392 /* allocations done, now commit: */
397 /* don't update in memory replicas until changes are persistent */
398 percpu_down_write(&c->mark_lock);
400 ret = replicas_table_update(c, &new_r);
402 swap(new_gc, c->replicas_gc);
403 percpu_up_write(&c->mark_lock);
405 mutex_unlock(&c->sb_lock);
407 kfree(new_r.entries);
408 kfree(new_gc.entries);
412 bch_err(c, "error adding replicas entry: memory allocation failure");
417 static int __bch2_mark_replicas(struct bch_fs *c,
418 struct bch_replicas_entry *r,
421 return likely(bch2_replicas_marked(c, r)) ? 0
423 : bch2_mark_replicas_slowpath(c, r);
426 int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry *r)
428 return __bch2_mark_replicas(c, r, false);
431 /* replicas delta list: */
433 int bch2_replicas_delta_list_mark(struct bch_fs *c,
434 struct replicas_delta_list *r)
436 struct replicas_delta *d = r->d;
437 struct replicas_delta *top = (void *) r->d + r->used;
440 for (d = r->d; !ret && d != top; d = replicas_delta_next(d))
441 ret = bch2_mark_replicas(c, &d->r);
446 * Old replicas_gc mechanism: only used for journal replicas entries now, should
450 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
454 lockdep_assert_held(&c->replicas_gc_lock);
456 mutex_lock(&c->sb_lock);
457 percpu_down_write(&c->mark_lock);
460 * this is kind of crappy; the replicas gc mechanism needs to be ripped
464 for (i = 0; i < c->replicas.nr; i++) {
465 struct bch_replicas_entry *e =
466 cpu_replicas_entry(&c->replicas, i);
467 struct bch_replicas_cpu n;
469 if (!__replicas_has_entry(&c->replicas_gc, e) &&
470 bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
471 n = cpu_replicas_add_entry(&c->replicas_gc, e);
477 swap(n, c->replicas_gc);
482 if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
487 ret = replicas_table_update(c, &c->replicas_gc);
489 kfree(c->replicas_gc.entries);
490 c->replicas_gc.entries = NULL;
492 percpu_up_write(&c->mark_lock);
497 mutex_unlock(&c->sb_lock);
502 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
504 struct bch_replicas_entry *e;
507 lockdep_assert_held(&c->replicas_gc_lock);
509 mutex_lock(&c->sb_lock);
510 BUG_ON(c->replicas_gc.entries);
512 c->replicas_gc.nr = 0;
513 c->replicas_gc.entry_size = 0;
515 for_each_cpu_replicas_entry(&c->replicas, e)
516 if (!((1 << e->data_type) & typemask)) {
518 c->replicas_gc.entry_size =
519 max_t(unsigned, c->replicas_gc.entry_size,
520 replicas_entry_bytes(e));
523 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
524 c->replicas_gc.entry_size,
526 if (!c->replicas_gc.entries) {
527 mutex_unlock(&c->sb_lock);
528 bch_err(c, "error allocating c->replicas_gc");
532 for_each_cpu_replicas_entry(&c->replicas, e)
533 if (!((1 << e->data_type) & typemask))
534 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
535 e, c->replicas_gc.entry_size);
537 bch2_cpu_replicas_sort(&c->replicas_gc);
538 mutex_unlock(&c->sb_lock);
543 /* New much simpler mechanism for clearing out unneeded replicas entries: */
545 int bch2_replicas_gc2(struct bch_fs *c)
547 struct bch_replicas_cpu new = { 0 };
551 bch2_journal_meta(&c->journal);
553 nr = READ_ONCE(c->replicas.nr);
554 new.entry_size = READ_ONCE(c->replicas.entry_size);
555 new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
557 bch_err(c, "error allocating c->replicas_gc");
561 mutex_lock(&c->sb_lock);
562 percpu_down_write(&c->mark_lock);
564 if (nr != c->replicas.nr ||
565 new.entry_size != c->replicas.entry_size) {
566 percpu_up_write(&c->mark_lock);
567 mutex_unlock(&c->sb_lock);
572 for (i = 0; i < c->replicas.nr; i++) {
573 struct bch_replicas_entry *e =
574 cpu_replicas_entry(&c->replicas, i);
576 if (e->data_type == BCH_DATA_journal ||
577 c->usage_base->replicas[i] ||
578 percpu_u64_get(&c->usage[0]->replicas[i]) ||
579 percpu_u64_get(&c->usage[1]->replicas[i]) ||
580 percpu_u64_get(&c->usage[2]->replicas[i]) ||
581 percpu_u64_get(&c->usage[3]->replicas[i]))
582 memcpy(cpu_replicas_entry(&new, new.nr++),
586 bch2_cpu_replicas_sort(&new);
588 if (bch2_cpu_replicas_to_sb_replicas(c, &new)) {
593 ret = replicas_table_update(c, &new);
597 percpu_up_write(&c->mark_lock);
602 mutex_unlock(&c->sb_lock);
607 int bch2_replicas_set_usage(struct bch_fs *c,
608 struct bch_replicas_entry *r,
611 int ret, idx = bch2_replicas_entry_idx(c, r);
614 struct bch_replicas_cpu n;
616 n = cpu_replicas_add_entry(&c->replicas, r);
620 ret = replicas_table_update(c, &n);
626 idx = bch2_replicas_entry_idx(c, r);
630 c->usage_base->replicas[idx] = sectors;
635 /* Replicas tracking - superblock: */
638 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
639 struct bch_replicas_cpu *cpu_r)
641 struct bch_replicas_entry *e, *dst;
642 unsigned nr = 0, entry_size = 0, idx = 0;
644 for_each_replicas_entry(sb_r, e) {
645 entry_size = max_t(unsigned, entry_size,
646 replicas_entry_bytes(e));
650 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
655 cpu_r->entry_size = entry_size;
657 for_each_replicas_entry(sb_r, e) {
658 dst = cpu_replicas_entry(cpu_r, idx++);
659 memcpy(dst, e, replicas_entry_bytes(e));
660 bch2_replicas_entry_sort(dst);
667 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
668 struct bch_replicas_cpu *cpu_r)
670 struct bch_replicas_entry_v0 *e;
671 unsigned nr = 0, entry_size = 0, idx = 0;
673 for_each_replicas_entry(sb_r, e) {
674 entry_size = max_t(unsigned, entry_size,
675 replicas_entry_bytes(e));
679 entry_size += sizeof(struct bch_replicas_entry) -
680 sizeof(struct bch_replicas_entry_v0);
682 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
687 cpu_r->entry_size = entry_size;
689 for_each_replicas_entry(sb_r, e) {
690 struct bch_replicas_entry *dst =
691 cpu_replicas_entry(cpu_r, idx++);
693 dst->data_type = e->data_type;
694 dst->nr_devs = e->nr_devs;
695 dst->nr_required = 1;
696 memcpy(dst->devs, e->devs, e->nr_devs);
697 bch2_replicas_entry_sort(dst);
703 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
705 struct bch_sb_field_replicas *sb_v1;
706 struct bch_sb_field_replicas_v0 *sb_v0;
707 struct bch_replicas_cpu new_r = { 0, 0, NULL };
710 if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
711 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
712 else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
713 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
718 bch2_cpu_replicas_sort(&new_r);
720 percpu_down_write(&c->mark_lock);
722 ret = replicas_table_update(c, &new_r);
723 percpu_up_write(&c->mark_lock);
725 kfree(new_r.entries);
730 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
731 struct bch_replicas_cpu *r)
733 struct bch_sb_field_replicas_v0 *sb_r;
734 struct bch_replicas_entry_v0 *dst;
735 struct bch_replicas_entry *src;
738 bytes = sizeof(struct bch_sb_field_replicas);
740 for_each_cpu_replicas_entry(r, src)
741 bytes += replicas_entry_bytes(src) - 1;
743 sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
744 DIV_ROUND_UP(bytes, sizeof(u64)));
748 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
749 sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
751 memset(&sb_r->entries, 0,
752 vstruct_end(&sb_r->field) -
753 (void *) &sb_r->entries);
756 for_each_cpu_replicas_entry(r, src) {
757 dst->data_type = src->data_type;
758 dst->nr_devs = src->nr_devs;
759 memcpy(dst->devs, src->devs, src->nr_devs);
761 dst = replicas_entry_next(dst);
763 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
769 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
770 struct bch_replicas_cpu *r)
772 struct bch_sb_field_replicas *sb_r;
773 struct bch_replicas_entry *dst, *src;
774 bool need_v1 = false;
777 bytes = sizeof(struct bch_sb_field_replicas);
779 for_each_cpu_replicas_entry(r, src) {
780 bytes += replicas_entry_bytes(src);
781 if (src->nr_required != 1)
786 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
788 sb_r = bch2_sb_resize_replicas(&c->disk_sb,
789 DIV_ROUND_UP(bytes, sizeof(u64)));
793 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
794 sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
796 memset(&sb_r->entries, 0,
797 vstruct_end(&sb_r->field) -
798 (void *) &sb_r->entries);
801 for_each_cpu_replicas_entry(r, src) {
802 memcpy(dst, src, replicas_entry_bytes(src));
804 dst = replicas_entry_next(dst);
806 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
812 static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
814 struct printbuf *err)
816 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
819 sort_cmp_size(cpu_r->entries,
824 for (i = 0; i < cpu_r->nr; i++) {
825 struct bch_replicas_entry *e =
826 cpu_replicas_entry(cpu_r, i);
828 if (e->data_type >= BCH_DATA_NR) {
829 pr_buf(err, "invalid data type in entry ");
830 bch2_replicas_entry_to_text(err, e);
835 pr_buf(err, "no devices in entry ");
836 bch2_replicas_entry_to_text(err, e);
840 if (e->nr_required > 1 &&
841 e->nr_required >= e->nr_devs) {
842 pr_buf(err, "bad nr_required in entry ");
843 bch2_replicas_entry_to_text(err, e);
847 for (j = 0; j < e->nr_devs; j++)
848 if (!bch2_dev_exists(sb, mi, e->devs[j])) {
849 pr_buf(err, "invalid device %u in entry ", e->devs[j]);
850 bch2_replicas_entry_to_text(err, e);
854 if (i + 1 < cpu_r->nr) {
855 struct bch_replicas_entry *n =
856 cpu_replicas_entry(cpu_r, i + 1);
858 BUG_ON(memcmp(e, n, cpu_r->entry_size) > 0);
860 if (!memcmp(e, n, cpu_r->entry_size)) {
861 pr_buf(err, "duplicate replicas entry ");
862 bch2_replicas_entry_to_text(err, e);
871 static int bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f,
872 struct printbuf *err)
874 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
875 struct bch_replicas_cpu cpu_r;
878 if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
881 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
882 kfree(cpu_r.entries);
886 static void bch2_sb_replicas_to_text(struct printbuf *out,
888 struct bch_sb_field *f)
890 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
891 struct bch_replicas_entry *e;
894 for_each_replicas_entry(r, e) {
899 bch2_replicas_entry_to_text(out, e);
903 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
904 .validate = bch2_sb_validate_replicas,
905 .to_text = bch2_sb_replicas_to_text,
908 static int bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f,
909 struct printbuf *err)
911 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
912 struct bch_replicas_cpu cpu_r;
915 if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
918 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
919 kfree(cpu_r.entries);
923 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
924 .validate = bch2_sb_validate_replicas_v0,
927 /* Query replicas: */
929 bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
930 unsigned flags, bool print)
932 struct bch_replicas_entry *e;
935 percpu_down_read(&c->mark_lock);
936 for_each_cpu_replicas_entry(&c->replicas, e) {
937 unsigned i, nr_online = 0, nr_failed = 0, dflags = 0;
938 bool metadata = e->data_type < BCH_DATA_user;
940 if (e->data_type == BCH_DATA_cached)
943 for (i = 0; i < e->nr_devs; i++) {
944 struct bch_dev *ca = bch_dev_bkey_exists(c, e->devs[i]);
946 nr_online += test_bit(e->devs[i], devs.d);
947 nr_failed += ca->mi.state == BCH_MEMBER_STATE_failed;
950 if (nr_failed == e->nr_devs)
953 if (nr_online < e->nr_required)
955 ? BCH_FORCE_IF_METADATA_LOST
956 : BCH_FORCE_IF_DATA_LOST;
958 if (nr_online < e->nr_devs)
960 ? BCH_FORCE_IF_METADATA_DEGRADED
961 : BCH_FORCE_IF_DATA_DEGRADED;
963 if (dflags & ~flags) {
967 bch2_replicas_entry_to_text(&PBUF(buf), e);
968 bch_err(c, "insufficient devices online (%u) for replicas entry %s",
976 percpu_up_read(&c->mark_lock);
981 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
983 struct bch_replicas_entry *e;
986 percpu_down_read(&c->mark_lock);
988 for_each_cpu_replicas_entry(&c->replicas, e)
989 for (i = 0; i < e->nr_devs; i++)
990 if (e->devs[i] == ca->dev_idx)
991 ret |= 1 << e->data_type;
993 percpu_up_read(&c->mark_lock);
998 void bch2_fs_replicas_exit(struct bch_fs *c)
1002 kfree(c->usage_scratch);
1003 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
1004 free_percpu(c->usage[i]);
1005 kfree(c->usage_base);
1006 kfree(c->replicas.entries);
1007 kfree(c->replicas_gc.entries);
1009 mempool_exit(&c->replicas_delta_pool);
1012 int bch2_fs_replicas_init(struct bch_fs *c)
1014 bch2_journal_entry_res_resize(&c->journal,
1015 &c->replicas_journal_res,
1016 reserve_journal_replicas(c, &c->replicas));
1018 return mempool_init_kmalloc_pool(&c->replicas_delta_pool, 1,
1019 REPLICAS_DELTA_LIST_MAX) ?:
1020 replicas_table_update(c, &c->replicas);