1 // SPDX-License-Identifier: GPL-2.0
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10 struct bch_replicas_cpu *);
12 /* Replicas tracking - in memory: */
14 static void verify_replicas_entry(struct bch_replicas_entry *e)
16 #ifdef CONFIG_BCACHEFS_DEBUG
19 BUG_ON(e->data_type >= BCH_DATA_NR);
21 BUG_ON(e->nr_required > 1 &&
22 e->nr_required >= e->nr_devs);
24 for (i = 0; i + 1 < e->nr_devs; i++)
25 BUG_ON(e->devs[i] >= e->devs[i + 1]);
29 void bch2_replicas_entry_sort(struct bch_replicas_entry *e)
31 bubble_sort(e->devs, e->nr_devs, u8_cmp);
34 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
36 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
39 void bch2_replicas_entry_to_text(struct printbuf *out,
40 struct bch_replicas_entry *e)
44 pr_buf(out, "%s: %u/%u [",
45 bch2_data_types[e->data_type],
49 for (i = 0; i < e->nr_devs; i++)
50 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
54 void bch2_cpu_replicas_to_text(struct printbuf *out,
55 struct bch_replicas_cpu *r)
57 struct bch_replicas_entry *e;
60 for_each_cpu_replicas_entry(r, e) {
65 bch2_replicas_entry_to_text(out, e);
69 static void extent_to_replicas(struct bkey_s_c k,
70 struct bch_replicas_entry *r)
72 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
73 const union bch_extent_entry *entry;
74 struct extent_ptr_decoded p;
78 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
83 r->devs[r->nr_devs++] = p.ptr.dev;
89 static void stripe_to_replicas(struct bkey_s_c k,
90 struct bch_replicas_entry *r)
92 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
93 const struct bch_extent_ptr *ptr;
95 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
98 ptr < s.v->ptrs + s.v->nr_blocks;
100 r->devs[r->nr_devs++] = ptr->dev;
103 void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
109 case KEY_TYPE_btree_ptr:
110 case KEY_TYPE_btree_ptr_v2:
111 e->data_type = BCH_DATA_btree;
112 extent_to_replicas(k, e);
114 case KEY_TYPE_extent:
115 case KEY_TYPE_reflink_v:
116 e->data_type = BCH_DATA_user;
117 extent_to_replicas(k, e);
119 case KEY_TYPE_stripe:
120 e->data_type = BCH_DATA_parity;
121 stripe_to_replicas(k, e);
125 bch2_replicas_entry_sort(e);
128 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
129 enum bch_data_type data_type,
130 struct bch_devs_list devs)
135 data_type == BCH_DATA_sb ||
136 data_type >= BCH_DATA_NR);
138 e->data_type = data_type;
142 for (i = 0; i < devs.nr; i++)
143 e->devs[e->nr_devs++] = devs.devs[i];
145 bch2_replicas_entry_sort(e);
148 static struct bch_replicas_cpu
149 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
150 struct bch_replicas_entry *new_entry)
153 struct bch_replicas_cpu new = {
155 .entry_size = max_t(unsigned, old->entry_size,
156 replicas_entry_bytes(new_entry)),
159 BUG_ON(!new_entry->data_type);
160 verify_replicas_entry(new_entry);
162 new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
166 for (i = 0; i < old->nr; i++)
167 memcpy(cpu_replicas_entry(&new, i),
168 cpu_replicas_entry(old, i),
171 memcpy(cpu_replicas_entry(&new, old->nr),
173 replicas_entry_bytes(new_entry));
175 bch2_cpu_replicas_sort(&new);
179 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
180 struct bch_replicas_entry *search)
182 int idx, entry_size = replicas_entry_bytes(search);
184 if (unlikely(entry_size > r->entry_size))
187 verify_replicas_entry(search);
189 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
190 idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
194 return idx < r->nr ? idx : -1;
197 int bch2_replicas_entry_idx(struct bch_fs *c,
198 struct bch_replicas_entry *search)
200 bch2_replicas_entry_sort(search);
202 return __replicas_entry_idx(&c->replicas, search);
205 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
206 struct bch_replicas_entry *search)
208 return __replicas_entry_idx(r, search) >= 0;
211 bool bch2_replicas_marked(struct bch_fs *c,
212 struct bch_replicas_entry *search)
216 if (!search->nr_devs)
219 verify_replicas_entry(search);
221 percpu_down_read(&c->mark_lock);
222 marked = __replicas_has_entry(&c->replicas, search) &&
223 (likely((!c->replicas_gc.entries)) ||
224 __replicas_has_entry(&c->replicas_gc, search));
225 percpu_up_read(&c->mark_lock);
230 static void __replicas_table_update(struct bch_fs_usage *dst,
231 struct bch_replicas_cpu *dst_r,
232 struct bch_fs_usage *src,
233 struct bch_replicas_cpu *src_r)
235 int src_idx, dst_idx;
239 for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
240 if (!src->replicas[src_idx])
243 dst_idx = __replicas_entry_idx(dst_r,
244 cpu_replicas_entry(src_r, src_idx));
247 dst->replicas[dst_idx] = src->replicas[src_idx];
251 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
252 struct bch_replicas_cpu *dst_r,
253 struct bch_fs_usage __percpu *src_p,
254 struct bch_replicas_cpu *src_r)
256 unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
257 struct bch_fs_usage *dst, *src = (void *)
258 bch2_acc_percpu_u64s((void *) src_p, src_nr);
261 dst = this_cpu_ptr(dst_p);
264 __replicas_table_update(dst, dst_r, src, src_r);
268 * Resize filesystem accounting:
270 static int replicas_table_update(struct bch_fs *c,
271 struct bch_replicas_cpu *new_r)
273 struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
274 struct bch_fs_usage_online *new_scratch = NULL;
275 struct bch_fs_usage __percpu *new_gc = NULL;
276 struct bch_fs_usage *new_base = NULL;
277 unsigned i, bytes = sizeof(struct bch_fs_usage) +
278 sizeof(u64) * new_r->nr;
279 unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
280 sizeof(u64) * new_r->nr;
283 memset(new_usage, 0, sizeof(new_usage));
285 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
286 if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
287 sizeof(u64), GFP_KERNEL)))
290 if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
291 !(new_scratch = kmalloc(scratch_bytes, GFP_KERNEL)) ||
293 !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
296 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
298 __replicas_table_update_pcpu(new_usage[i], new_r,
299 c->usage[i], &c->replicas);
301 __replicas_table_update(new_base, new_r,
302 c->usage_base, &c->replicas);
304 __replicas_table_update_pcpu(new_gc, new_r,
305 c->usage_gc, &c->replicas);
307 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
308 swap(c->usage[i], new_usage[i]);
309 swap(c->usage_base, new_base);
310 swap(c->usage_scratch, new_scratch);
311 swap(c->usage_gc, new_gc);
312 swap(c->replicas, *new_r);
316 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
317 free_percpu(new_usage[i]);
321 bch_err(c, "error updating replicas table: memory allocation failure");
326 static unsigned reserve_journal_replicas(struct bch_fs *c,
327 struct bch_replicas_cpu *r)
329 struct bch_replicas_entry *e;
330 unsigned journal_res_u64s = 0;
334 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
338 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
340 /* persistent_reserved: */
342 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
345 for_each_cpu_replicas_entry(r, e)
347 DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
348 e->nr_devs, sizeof(u64));
349 return journal_res_u64s;
353 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
354 struct bch_replicas_entry *new_entry)
356 struct bch_replicas_cpu new_r, new_gc;
359 verify_replicas_entry(new_entry);
361 memset(&new_r, 0, sizeof(new_r));
362 memset(&new_gc, 0, sizeof(new_gc));
364 mutex_lock(&c->sb_lock);
366 if (c->replicas_gc.entries &&
367 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
368 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
373 if (!__replicas_has_entry(&c->replicas, new_entry)) {
374 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
378 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
382 bch2_journal_entry_res_resize(&c->journal,
383 &c->replicas_journal_res,
384 reserve_journal_replicas(c, &new_r));
387 if (!new_r.entries &&
391 /* allocations done, now commit: */
396 /* don't update in memory replicas until changes are persistent */
397 percpu_down_write(&c->mark_lock);
399 ret = replicas_table_update(c, &new_r);
401 swap(new_gc, c->replicas_gc);
402 percpu_up_write(&c->mark_lock);
404 mutex_unlock(&c->sb_lock);
406 kfree(new_r.entries);
407 kfree(new_gc.entries);
411 bch_err(c, "error adding replicas entry: memory allocation failure");
416 static int __bch2_mark_replicas(struct bch_fs *c,
417 struct bch_replicas_entry *r,
420 return likely(bch2_replicas_marked(c, r)) ? 0
422 : bch2_mark_replicas_slowpath(c, r);
425 int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry *r)
427 return __bch2_mark_replicas(c, r, false);
430 static int __bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k,
433 struct bch_replicas_padded search;
434 struct bch_devs_list cached = bch2_bkey_cached_devs(k);
438 memset(&search, 0, sizeof(search));
440 for (i = 0; i < cached.nr; i++) {
441 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
443 ret = __bch2_mark_replicas(c, &search.e, check);
448 bch2_bkey_to_replicas(&search.e, k);
450 ret = __bch2_mark_replicas(c, &search.e, check);
454 if (search.e.data_type == BCH_DATA_parity) {
455 search.e.data_type = BCH_DATA_cached;
456 ret = __bch2_mark_replicas(c, &search.e, check);
460 search.e.data_type = BCH_DATA_user;
461 ret = __bch2_mark_replicas(c, &search.e, check);
469 /* replicas delta list: */
471 bool bch2_replicas_delta_list_marked(struct bch_fs *c,
472 struct replicas_delta_list *r)
474 struct replicas_delta *d = r->d;
475 struct replicas_delta *top = (void *) r->d + r->used;
477 percpu_rwsem_assert_held(&c->mark_lock);
479 for (d = r->d; d != top; d = replicas_delta_next(d))
480 if (bch2_replicas_entry_idx(c, &d->r) < 0)
485 int bch2_replicas_delta_list_mark(struct bch_fs *c,
486 struct replicas_delta_list *r)
488 struct replicas_delta *d = r->d;
489 struct replicas_delta *top = (void *) r->d + r->used;
492 for (d = r->d; !ret && d != top; d = replicas_delta_next(d))
493 ret = bch2_mark_replicas(c, &d->r);
499 bool bch2_bkey_replicas_marked(struct bch_fs *c,
502 return __bch2_mark_bkey_replicas(c, k, true) == 0;
505 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
507 return __bch2_mark_bkey_replicas(c, k, false);
511 * Old replicas_gc mechanism: only used for journal replicas entries now, should
515 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
519 lockdep_assert_held(&c->replicas_gc_lock);
521 mutex_lock(&c->sb_lock);
522 percpu_down_write(&c->mark_lock);
525 * this is kind of crappy; the replicas gc mechanism needs to be ripped
529 for (i = 0; i < c->replicas.nr; i++) {
530 struct bch_replicas_entry *e =
531 cpu_replicas_entry(&c->replicas, i);
532 struct bch_replicas_cpu n;
534 if (!__replicas_has_entry(&c->replicas_gc, e) &&
535 bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
536 n = cpu_replicas_add_entry(&c->replicas_gc, e);
542 swap(n, c->replicas_gc);
547 if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
552 ret = replicas_table_update(c, &c->replicas_gc);
554 kfree(c->replicas_gc.entries);
555 c->replicas_gc.entries = NULL;
557 percpu_up_write(&c->mark_lock);
562 mutex_unlock(&c->sb_lock);
567 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
569 struct bch_replicas_entry *e;
572 lockdep_assert_held(&c->replicas_gc_lock);
574 mutex_lock(&c->sb_lock);
575 BUG_ON(c->replicas_gc.entries);
577 c->replicas_gc.nr = 0;
578 c->replicas_gc.entry_size = 0;
580 for_each_cpu_replicas_entry(&c->replicas, e)
581 if (!((1 << e->data_type) & typemask)) {
583 c->replicas_gc.entry_size =
584 max_t(unsigned, c->replicas_gc.entry_size,
585 replicas_entry_bytes(e));
588 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
589 c->replicas_gc.entry_size,
591 if (!c->replicas_gc.entries) {
592 mutex_unlock(&c->sb_lock);
593 bch_err(c, "error allocating c->replicas_gc");
597 for_each_cpu_replicas_entry(&c->replicas, e)
598 if (!((1 << e->data_type) & typemask))
599 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
600 e, c->replicas_gc.entry_size);
602 bch2_cpu_replicas_sort(&c->replicas_gc);
603 mutex_unlock(&c->sb_lock);
608 /* New much simpler mechanism for clearing out unneeded replicas entries: */
610 int bch2_replicas_gc2(struct bch_fs *c)
612 struct bch_replicas_cpu new = { 0 };
616 bch2_journal_meta(&c->journal);
618 nr = READ_ONCE(c->replicas.nr);
619 new.entry_size = READ_ONCE(c->replicas.entry_size);
620 new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
622 bch_err(c, "error allocating c->replicas_gc");
626 mutex_lock(&c->sb_lock);
627 percpu_down_write(&c->mark_lock);
629 if (nr != c->replicas.nr ||
630 new.entry_size != c->replicas.entry_size) {
631 percpu_up_write(&c->mark_lock);
632 mutex_unlock(&c->sb_lock);
637 for (i = 0; i < c->replicas.nr; i++) {
638 struct bch_replicas_entry *e =
639 cpu_replicas_entry(&c->replicas, i);
641 if (e->data_type == BCH_DATA_journal ||
642 c->usage_base->replicas[i] ||
643 percpu_u64_get(&c->usage[0]->replicas[i]) ||
644 percpu_u64_get(&c->usage[1]->replicas[i]) ||
645 percpu_u64_get(&c->usage[2]->replicas[i]) ||
646 percpu_u64_get(&c->usage[3]->replicas[i]))
647 memcpy(cpu_replicas_entry(&new, new.nr++),
651 bch2_cpu_replicas_sort(&new);
653 if (bch2_cpu_replicas_to_sb_replicas(c, &new)) {
658 ret = replicas_table_update(c, &new);
662 percpu_up_write(&c->mark_lock);
667 mutex_unlock(&c->sb_lock);
672 int bch2_replicas_set_usage(struct bch_fs *c,
673 struct bch_replicas_entry *r,
676 int ret, idx = bch2_replicas_entry_idx(c, r);
679 struct bch_replicas_cpu n;
681 n = cpu_replicas_add_entry(&c->replicas, r);
685 ret = replicas_table_update(c, &n);
691 idx = bch2_replicas_entry_idx(c, r);
695 c->usage_base->replicas[idx] = sectors;
700 /* Replicas tracking - superblock: */
703 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
704 struct bch_replicas_cpu *cpu_r)
706 struct bch_replicas_entry *e, *dst;
707 unsigned nr = 0, entry_size = 0, idx = 0;
709 for_each_replicas_entry(sb_r, e) {
710 entry_size = max_t(unsigned, entry_size,
711 replicas_entry_bytes(e));
715 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
720 cpu_r->entry_size = entry_size;
722 for_each_replicas_entry(sb_r, e) {
723 dst = cpu_replicas_entry(cpu_r, idx++);
724 memcpy(dst, e, replicas_entry_bytes(e));
725 bch2_replicas_entry_sort(dst);
732 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
733 struct bch_replicas_cpu *cpu_r)
735 struct bch_replicas_entry_v0 *e;
736 unsigned nr = 0, entry_size = 0, idx = 0;
738 for_each_replicas_entry(sb_r, e) {
739 entry_size = max_t(unsigned, entry_size,
740 replicas_entry_bytes(e));
744 entry_size += sizeof(struct bch_replicas_entry) -
745 sizeof(struct bch_replicas_entry_v0);
747 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
752 cpu_r->entry_size = entry_size;
754 for_each_replicas_entry(sb_r, e) {
755 struct bch_replicas_entry *dst =
756 cpu_replicas_entry(cpu_r, idx++);
758 dst->data_type = e->data_type;
759 dst->nr_devs = e->nr_devs;
760 dst->nr_required = 1;
761 memcpy(dst->devs, e->devs, e->nr_devs);
762 bch2_replicas_entry_sort(dst);
768 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
770 struct bch_sb_field_replicas *sb_v1;
771 struct bch_sb_field_replicas_v0 *sb_v0;
772 struct bch_replicas_cpu new_r = { 0, 0, NULL };
775 if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
776 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
777 else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
778 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
783 bch2_cpu_replicas_sort(&new_r);
785 percpu_down_write(&c->mark_lock);
787 ret = replicas_table_update(c, &new_r);
788 percpu_up_write(&c->mark_lock);
790 kfree(new_r.entries);
795 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
796 struct bch_replicas_cpu *r)
798 struct bch_sb_field_replicas_v0 *sb_r;
799 struct bch_replicas_entry_v0 *dst;
800 struct bch_replicas_entry *src;
803 bytes = sizeof(struct bch_sb_field_replicas);
805 for_each_cpu_replicas_entry(r, src)
806 bytes += replicas_entry_bytes(src) - 1;
808 sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
809 DIV_ROUND_UP(bytes, sizeof(u64)));
813 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
814 sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
816 memset(&sb_r->entries, 0,
817 vstruct_end(&sb_r->field) -
818 (void *) &sb_r->entries);
821 for_each_cpu_replicas_entry(r, src) {
822 dst->data_type = src->data_type;
823 dst->nr_devs = src->nr_devs;
824 memcpy(dst->devs, src->devs, src->nr_devs);
826 dst = replicas_entry_next(dst);
828 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
834 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
835 struct bch_replicas_cpu *r)
837 struct bch_sb_field_replicas *sb_r;
838 struct bch_replicas_entry *dst, *src;
839 bool need_v1 = false;
842 bytes = sizeof(struct bch_sb_field_replicas);
844 for_each_cpu_replicas_entry(r, src) {
845 bytes += replicas_entry_bytes(src);
846 if (src->nr_required != 1)
851 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
853 sb_r = bch2_sb_resize_replicas(&c->disk_sb,
854 DIV_ROUND_UP(bytes, sizeof(u64)));
858 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
859 sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
861 memset(&sb_r->entries, 0,
862 vstruct_end(&sb_r->field) -
863 (void *) &sb_r->entries);
866 for_each_cpu_replicas_entry(r, src) {
867 memcpy(dst, src, replicas_entry_bytes(src));
869 dst = replicas_entry_next(dst);
871 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
877 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
881 sort_cmp_size(cpu_r->entries,
886 for (i = 0; i + 1 < cpu_r->nr; i++) {
887 struct bch_replicas_entry *l =
888 cpu_replicas_entry(cpu_r, i);
889 struct bch_replicas_entry *r =
890 cpu_replicas_entry(cpu_r, i + 1);
892 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
894 if (!memcmp(l, r, cpu_r->entry_size))
895 return "duplicate replicas entry";
901 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
903 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
904 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
905 struct bch_replicas_cpu cpu_r = { .entries = NULL };
906 struct bch_replicas_entry *e;
910 for_each_replicas_entry(sb_r, e) {
911 err = "invalid replicas entry: invalid data type";
912 if (e->data_type >= BCH_DATA_NR)
915 err = "invalid replicas entry: no devices";
919 err = "invalid replicas entry: bad nr_required";
920 if (e->nr_required > 1 &&
921 e->nr_required >= e->nr_devs)
924 err = "invalid replicas entry: invalid device";
925 for (i = 0; i < e->nr_devs; i++)
926 if (!bch2_dev_exists(sb, mi, e->devs[i]))
930 err = "cannot allocate memory";
931 if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
934 err = check_dup_replicas_entries(&cpu_r);
936 kfree(cpu_r.entries);
940 static void bch2_sb_replicas_to_text(struct printbuf *out,
942 struct bch_sb_field *f)
944 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
945 struct bch_replicas_entry *e;
948 for_each_replicas_entry(r, e) {
953 bch2_replicas_entry_to_text(out, e);
957 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
958 .validate = bch2_sb_validate_replicas,
959 .to_text = bch2_sb_replicas_to_text,
962 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
964 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
965 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
966 struct bch_replicas_cpu cpu_r = { .entries = NULL };
967 struct bch_replicas_entry_v0 *e;
971 for_each_replicas_entry_v0(sb_r, e) {
972 err = "invalid replicas entry: invalid data type";
973 if (e->data_type >= BCH_DATA_NR)
976 err = "invalid replicas entry: no devices";
980 err = "invalid replicas entry: invalid device";
981 for (i = 0; i < e->nr_devs; i++)
982 if (!bch2_dev_exists(sb, mi, e->devs[i]))
986 err = "cannot allocate memory";
987 if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
990 err = check_dup_replicas_entries(&cpu_r);
992 kfree(cpu_r.entries);
996 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
997 .validate = bch2_sb_validate_replicas_v0,
1000 /* Query replicas: */
1002 bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
1003 unsigned flags, bool print)
1005 struct bch_replicas_entry *e;
1008 percpu_down_read(&c->mark_lock);
1009 for_each_cpu_replicas_entry(&c->replicas, e) {
1010 unsigned i, nr_online = 0, nr_failed = 0, dflags = 0;
1011 bool metadata = e->data_type < BCH_DATA_user;
1013 if (e->data_type == BCH_DATA_cached)
1016 for (i = 0; i < e->nr_devs; i++) {
1017 struct bch_dev *ca = bch_dev_bkey_exists(c, e->devs[i]);
1019 nr_online += test_bit(e->devs[i], devs.d);
1020 nr_failed += ca->mi.state == BCH_MEMBER_STATE_failed;
1023 if (nr_failed == e->nr_devs)
1026 if (nr_online < e->nr_required)
1028 ? BCH_FORCE_IF_METADATA_LOST
1029 : BCH_FORCE_IF_DATA_LOST;
1031 if (nr_online < e->nr_devs)
1033 ? BCH_FORCE_IF_METADATA_DEGRADED
1034 : BCH_FORCE_IF_DATA_DEGRADED;
1036 if (dflags & ~flags) {
1040 bch2_replicas_entry_to_text(&PBUF(buf), e);
1041 bch_err(c, "insufficient devices online (%u) for replicas entry %s",
1049 percpu_up_read(&c->mark_lock);
1054 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1056 struct bch_replicas_entry *e;
1057 unsigned i, ret = 0;
1059 percpu_down_read(&c->mark_lock);
1061 for_each_cpu_replicas_entry(&c->replicas, e)
1062 for (i = 0; i < e->nr_devs; i++)
1063 if (e->devs[i] == ca->dev_idx)
1064 ret |= 1 << e->data_type;
1066 percpu_up_read(&c->mark_lock);
1071 void bch2_fs_replicas_exit(struct bch_fs *c)
1075 kfree(c->usage_scratch);
1076 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
1077 free_percpu(c->usage[i]);
1078 kfree(c->usage_base);
1079 kfree(c->replicas.entries);
1080 kfree(c->replicas_gc.entries);
1082 mempool_exit(&c->replicas_delta_pool);
1085 int bch2_fs_replicas_init(struct bch_fs *c)
1087 bch2_journal_entry_res_resize(&c->journal,
1088 &c->replicas_journal_res,
1089 reserve_journal_replicas(c, &c->replicas));
1091 return mempool_init_kmalloc_pool(&c->replicas_delta_pool, 1,
1092 REPLICAS_DELTA_LIST_MAX) ?:
1093 replicas_table_update(c, &c->replicas);