1 // SPDX-License-Identifier: GPL-2.0
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10 struct bch_replicas_cpu *);
12 /* Replicas tracking - in memory: */
14 static void verify_replicas_entry(struct bch_replicas_entry *e)
16 #ifdef CONFIG_BCACHEFS_DEBUG
19 BUG_ON(e->data_type >= BCH_DATA_NR);
21 BUG_ON(e->nr_required > 1 &&
22 e->nr_required >= e->nr_devs);
24 for (i = 0; i + 1 < e->nr_devs; i++)
25 BUG_ON(e->devs[i] >= e->devs[i + 1]);
29 void bch2_replicas_entry_sort(struct bch_replicas_entry *e)
31 bubble_sort(e->devs, e->nr_devs, u8_cmp);
34 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
36 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
39 void bch2_replicas_entry_v0_to_text(struct printbuf *out,
40 struct bch_replicas_entry_v0 *e)
44 if (e->data_type < BCH_DATA_NR)
45 prt_printf(out, "%s", bch2_data_types[e->data_type]);
47 prt_printf(out, "(invalid data type %u)", e->data_type);
49 prt_printf(out, ": %u [", e->nr_devs);
50 for (i = 0; i < e->nr_devs; i++)
51 prt_printf(out, i ? " %u" : "%u", e->devs[i]);
55 void bch2_replicas_entry_to_text(struct printbuf *out,
56 struct bch_replicas_entry *e)
60 if (e->data_type < BCH_DATA_NR)
61 prt_printf(out, "%s", bch2_data_types[e->data_type]);
63 prt_printf(out, "(invalid data type %u)", e->data_type);
65 prt_printf(out, ": %u/%u [", e->nr_required, e->nr_devs);
66 for (i = 0; i < e->nr_devs; i++)
67 prt_printf(out, i ? " %u" : "%u", e->devs[i]);
71 void bch2_cpu_replicas_to_text(struct printbuf *out,
72 struct bch_replicas_cpu *r)
74 struct bch_replicas_entry *e;
77 for_each_cpu_replicas_entry(r, e) {
82 bch2_replicas_entry_to_text(out, e);
86 static void extent_to_replicas(struct bkey_s_c k,
87 struct bch_replicas_entry *r)
89 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
90 const union bch_extent_entry *entry;
91 struct extent_ptr_decoded p;
95 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
100 r->devs[r->nr_devs++] = p.ptr.dev;
106 static void stripe_to_replicas(struct bkey_s_c k,
107 struct bch_replicas_entry *r)
109 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
110 const struct bch_extent_ptr *ptr;
112 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
114 for (ptr = s.v->ptrs;
115 ptr < s.v->ptrs + s.v->nr_blocks;
117 r->devs[r->nr_devs++] = ptr->dev;
120 void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
126 case KEY_TYPE_btree_ptr:
127 case KEY_TYPE_btree_ptr_v2:
128 e->data_type = BCH_DATA_btree;
129 extent_to_replicas(k, e);
131 case KEY_TYPE_extent:
132 case KEY_TYPE_reflink_v:
133 e->data_type = BCH_DATA_user;
134 extent_to_replicas(k, e);
136 case KEY_TYPE_stripe:
137 e->data_type = BCH_DATA_parity;
138 stripe_to_replicas(k, e);
142 bch2_replicas_entry_sort(e);
145 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
146 enum bch_data_type data_type,
147 struct bch_devs_list devs)
152 data_type == BCH_DATA_sb ||
153 data_type >= BCH_DATA_NR);
155 e->data_type = data_type;
159 for (i = 0; i < devs.nr; i++)
160 e->devs[e->nr_devs++] = devs.devs[i];
162 bch2_replicas_entry_sort(e);
165 static struct bch_replicas_cpu
166 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
167 struct bch_replicas_entry *new_entry)
170 struct bch_replicas_cpu new = {
172 .entry_size = max_t(unsigned, old->entry_size,
173 replicas_entry_bytes(new_entry)),
176 BUG_ON(!new_entry->data_type);
177 verify_replicas_entry(new_entry);
179 new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
183 for (i = 0; i < old->nr; i++)
184 memcpy(cpu_replicas_entry(&new, i),
185 cpu_replicas_entry(old, i),
188 memcpy(cpu_replicas_entry(&new, old->nr),
190 replicas_entry_bytes(new_entry));
192 bch2_cpu_replicas_sort(&new);
196 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
197 struct bch_replicas_entry *search)
199 int idx, entry_size = replicas_entry_bytes(search);
201 if (unlikely(entry_size > r->entry_size))
204 verify_replicas_entry(search);
206 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
207 idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
211 return idx < r->nr ? idx : -1;
214 int bch2_replicas_entry_idx(struct bch_fs *c,
215 struct bch_replicas_entry *search)
217 bch2_replicas_entry_sort(search);
219 return __replicas_entry_idx(&c->replicas, search);
222 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
223 struct bch_replicas_entry *search)
225 return __replicas_entry_idx(r, search) >= 0;
228 bool bch2_replicas_marked(struct bch_fs *c,
229 struct bch_replicas_entry *search)
233 if (!search->nr_devs)
236 verify_replicas_entry(search);
238 percpu_down_read(&c->mark_lock);
239 marked = __replicas_has_entry(&c->replicas, search) &&
240 (likely((!c->replicas_gc.entries)) ||
241 __replicas_has_entry(&c->replicas_gc, search));
242 percpu_up_read(&c->mark_lock);
247 static void __replicas_table_update(struct bch_fs_usage *dst,
248 struct bch_replicas_cpu *dst_r,
249 struct bch_fs_usage *src,
250 struct bch_replicas_cpu *src_r)
252 int src_idx, dst_idx;
256 for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
257 if (!src->replicas[src_idx])
260 dst_idx = __replicas_entry_idx(dst_r,
261 cpu_replicas_entry(src_r, src_idx));
264 dst->replicas[dst_idx] = src->replicas[src_idx];
268 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
269 struct bch_replicas_cpu *dst_r,
270 struct bch_fs_usage __percpu *src_p,
271 struct bch_replicas_cpu *src_r)
273 unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
274 struct bch_fs_usage *dst, *src = (void *)
275 bch2_acc_percpu_u64s((void *) src_p, src_nr);
278 dst = this_cpu_ptr(dst_p);
281 __replicas_table_update(dst, dst_r, src, src_r);
285 * Resize filesystem accounting:
287 static int replicas_table_update(struct bch_fs *c,
288 struct bch_replicas_cpu *new_r)
290 struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
291 struct bch_fs_usage_online *new_scratch = NULL;
292 struct bch_fs_usage __percpu *new_gc = NULL;
293 struct bch_fs_usage *new_base = NULL;
294 unsigned i, bytes = sizeof(struct bch_fs_usage) +
295 sizeof(u64) * new_r->nr;
296 unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
297 sizeof(u64) * new_r->nr;
300 memset(new_usage, 0, sizeof(new_usage));
302 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
303 if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
304 sizeof(u64), GFP_KERNEL)))
307 if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
308 !(new_scratch = kmalloc(scratch_bytes, GFP_KERNEL)) ||
310 !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
313 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
315 __replicas_table_update_pcpu(new_usage[i], new_r,
316 c->usage[i], &c->replicas);
318 __replicas_table_update(new_base, new_r,
319 c->usage_base, &c->replicas);
321 __replicas_table_update_pcpu(new_gc, new_r,
322 c->usage_gc, &c->replicas);
324 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
325 swap(c->usage[i], new_usage[i]);
326 swap(c->usage_base, new_base);
327 swap(c->usage_scratch, new_scratch);
328 swap(c->usage_gc, new_gc);
329 swap(c->replicas, *new_r);
333 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
334 free_percpu(new_usage[i]);
338 bch_err(c, "error updating replicas table: memory allocation failure");
339 ret = -BCH_ERR_ENOMEM_replicas_table;
343 static unsigned reserve_journal_replicas(struct bch_fs *c,
344 struct bch_replicas_cpu *r)
346 struct bch_replicas_entry *e;
347 unsigned journal_res_u64s = 0;
351 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
355 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
357 /* persistent_reserved: */
359 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
362 for_each_cpu_replicas_entry(r, e)
364 DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
365 e->nr_devs, sizeof(u64));
366 return journal_res_u64s;
370 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
371 struct bch_replicas_entry *new_entry)
373 struct bch_replicas_cpu new_r, new_gc;
376 verify_replicas_entry(new_entry);
378 memset(&new_r, 0, sizeof(new_r));
379 memset(&new_gc, 0, sizeof(new_gc));
381 mutex_lock(&c->sb_lock);
383 if (c->replicas_gc.entries &&
384 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
385 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
386 if (!new_gc.entries) {
387 ret = -BCH_ERR_ENOMEM_cpu_replicas;
392 if (!__replicas_has_entry(&c->replicas, new_entry)) {
393 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
394 if (!new_r.entries) {
395 ret = -BCH_ERR_ENOMEM_cpu_replicas;
399 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
403 bch2_journal_entry_res_resize(&c->journal,
404 &c->replicas_journal_res,
405 reserve_journal_replicas(c, &new_r));
408 if (!new_r.entries &&
412 /* allocations done, now commit: */
417 /* don't update in memory replicas until changes are persistent */
418 percpu_down_write(&c->mark_lock);
420 ret = replicas_table_update(c, &new_r);
422 swap(new_gc, c->replicas_gc);
423 percpu_up_write(&c->mark_lock);
425 mutex_unlock(&c->sb_lock);
427 kfree(new_r.entries);
428 kfree(new_gc.entries);
432 bch_err(c, "error adding replicas entry: %s", bch2_err_str(ret));
436 int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry *r)
438 return likely(bch2_replicas_marked(c, r))
439 ? 0 : bch2_mark_replicas_slowpath(c, r);
442 /* replicas delta list: */
444 int bch2_replicas_delta_list_mark(struct bch_fs *c,
445 struct replicas_delta_list *r)
447 struct replicas_delta *d = r->d;
448 struct replicas_delta *top = (void *) r->d + r->used;
451 for (d = r->d; !ret && d != top; d = replicas_delta_next(d))
452 ret = bch2_mark_replicas(c, &d->r);
457 * Old replicas_gc mechanism: only used for journal replicas entries now, should
461 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
465 lockdep_assert_held(&c->replicas_gc_lock);
467 mutex_lock(&c->sb_lock);
468 percpu_down_write(&c->mark_lock);
471 * this is kind of crappy; the replicas gc mechanism needs to be ripped
475 for (i = 0; i < c->replicas.nr; i++) {
476 struct bch_replicas_entry *e =
477 cpu_replicas_entry(&c->replicas, i);
478 struct bch_replicas_cpu n;
480 if (!__replicas_has_entry(&c->replicas_gc, e) &&
481 bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
482 n = cpu_replicas_add_entry(&c->replicas_gc, e);
484 ret = -BCH_ERR_ENOMEM_cpu_replicas;
488 swap(n, c->replicas_gc);
493 ret = bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
497 ret = replicas_table_update(c, &c->replicas_gc);
499 kfree(c->replicas_gc.entries);
500 c->replicas_gc.entries = NULL;
502 percpu_up_write(&c->mark_lock);
507 mutex_unlock(&c->sb_lock);
512 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
514 struct bch_replicas_entry *e;
517 lockdep_assert_held(&c->replicas_gc_lock);
519 mutex_lock(&c->sb_lock);
520 BUG_ON(c->replicas_gc.entries);
522 c->replicas_gc.nr = 0;
523 c->replicas_gc.entry_size = 0;
525 for_each_cpu_replicas_entry(&c->replicas, e)
526 if (!((1 << e->data_type) & typemask)) {
528 c->replicas_gc.entry_size =
529 max_t(unsigned, c->replicas_gc.entry_size,
530 replicas_entry_bytes(e));
533 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
534 c->replicas_gc.entry_size,
536 if (!c->replicas_gc.entries) {
537 mutex_unlock(&c->sb_lock);
538 bch_err(c, "error allocating c->replicas_gc");
539 return -BCH_ERR_ENOMEM_replicas_gc;
542 for_each_cpu_replicas_entry(&c->replicas, e)
543 if (!((1 << e->data_type) & typemask))
544 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
545 e, c->replicas_gc.entry_size);
547 bch2_cpu_replicas_sort(&c->replicas_gc);
548 mutex_unlock(&c->sb_lock);
554 * New much simpler mechanism for clearing out unneeded replicas entries - drop
555 * replicas entries that have 0 sectors used.
557 * However, we don't track sector counts for journal usage, so this doesn't drop
558 * any BCH_DATA_journal entries; the old bch2_replicas_gc_(start|end) mechanism
559 * is retained for that.
561 int bch2_replicas_gc2(struct bch_fs *c)
563 struct bch_replicas_cpu new = { 0 };
567 bch2_journal_meta(&c->journal);
569 nr = READ_ONCE(c->replicas.nr);
570 new.entry_size = READ_ONCE(c->replicas.entry_size);
571 new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
573 bch_err(c, "error allocating c->replicas_gc");
574 return -BCH_ERR_ENOMEM_replicas_gc;
577 mutex_lock(&c->sb_lock);
578 percpu_down_write(&c->mark_lock);
580 if (nr != c->replicas.nr ||
581 new.entry_size != c->replicas.entry_size) {
582 percpu_up_write(&c->mark_lock);
583 mutex_unlock(&c->sb_lock);
588 for (i = 0; i < c->replicas.nr; i++) {
589 struct bch_replicas_entry *e =
590 cpu_replicas_entry(&c->replicas, i);
592 if (e->data_type == BCH_DATA_journal ||
593 c->usage_base->replicas[i] ||
594 percpu_u64_get(&c->usage[0]->replicas[i]) ||
595 percpu_u64_get(&c->usage[1]->replicas[i]) ||
596 percpu_u64_get(&c->usage[2]->replicas[i]) ||
597 percpu_u64_get(&c->usage[3]->replicas[i]))
598 memcpy(cpu_replicas_entry(&new, new.nr++),
602 bch2_cpu_replicas_sort(&new);
604 ret = bch2_cpu_replicas_to_sb_replicas(c, &new);
608 ret = replicas_table_update(c, &new);
612 percpu_up_write(&c->mark_lock);
617 mutex_unlock(&c->sb_lock);
622 int bch2_replicas_set_usage(struct bch_fs *c,
623 struct bch_replicas_entry *r,
626 int ret, idx = bch2_replicas_entry_idx(c, r);
629 struct bch_replicas_cpu n;
631 n = cpu_replicas_add_entry(&c->replicas, r);
633 return -BCH_ERR_ENOMEM_cpu_replicas;
635 ret = replicas_table_update(c, &n);
641 idx = bch2_replicas_entry_idx(c, r);
645 c->usage_base->replicas[idx] = sectors;
650 /* Replicas tracking - superblock: */
653 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
654 struct bch_replicas_cpu *cpu_r)
656 struct bch_replicas_entry *e, *dst;
657 unsigned nr = 0, entry_size = 0, idx = 0;
659 for_each_replicas_entry(sb_r, e) {
660 entry_size = max_t(unsigned, entry_size,
661 replicas_entry_bytes(e));
665 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
667 return -BCH_ERR_ENOMEM_cpu_replicas;
670 cpu_r->entry_size = entry_size;
672 for_each_replicas_entry(sb_r, e) {
673 dst = cpu_replicas_entry(cpu_r, idx++);
674 memcpy(dst, e, replicas_entry_bytes(e));
675 bch2_replicas_entry_sort(dst);
682 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
683 struct bch_replicas_cpu *cpu_r)
685 struct bch_replicas_entry_v0 *e;
686 unsigned nr = 0, entry_size = 0, idx = 0;
688 for_each_replicas_entry(sb_r, e) {
689 entry_size = max_t(unsigned, entry_size,
690 replicas_entry_bytes(e));
694 entry_size += sizeof(struct bch_replicas_entry) -
695 sizeof(struct bch_replicas_entry_v0);
697 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
699 return -BCH_ERR_ENOMEM_cpu_replicas;
702 cpu_r->entry_size = entry_size;
704 for_each_replicas_entry(sb_r, e) {
705 struct bch_replicas_entry *dst =
706 cpu_replicas_entry(cpu_r, idx++);
708 dst->data_type = e->data_type;
709 dst->nr_devs = e->nr_devs;
710 dst->nr_required = 1;
711 memcpy(dst->devs, e->devs, e->nr_devs);
712 bch2_replicas_entry_sort(dst);
718 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
720 struct bch_sb_field_replicas *sb_v1;
721 struct bch_sb_field_replicas_v0 *sb_v0;
722 struct bch_replicas_cpu new_r = { 0, 0, NULL };
725 if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
726 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
727 else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
728 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
732 bch2_cpu_replicas_sort(&new_r);
734 percpu_down_write(&c->mark_lock);
736 ret = replicas_table_update(c, &new_r);
737 percpu_up_write(&c->mark_lock);
739 kfree(new_r.entries);
744 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
745 struct bch_replicas_cpu *r)
747 struct bch_sb_field_replicas_v0 *sb_r;
748 struct bch_replicas_entry_v0 *dst;
749 struct bch_replicas_entry *src;
752 bytes = sizeof(struct bch_sb_field_replicas);
754 for_each_cpu_replicas_entry(r, src)
755 bytes += replicas_entry_bytes(src) - 1;
757 sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
758 DIV_ROUND_UP(bytes, sizeof(u64)));
760 return -BCH_ERR_ENOSPC_sb_replicas;
762 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
763 sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
765 memset(&sb_r->entries, 0,
766 vstruct_end(&sb_r->field) -
767 (void *) &sb_r->entries);
770 for_each_cpu_replicas_entry(r, src) {
771 dst->data_type = src->data_type;
772 dst->nr_devs = src->nr_devs;
773 memcpy(dst->devs, src->devs, src->nr_devs);
775 dst = replicas_entry_next(dst);
777 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
783 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
784 struct bch_replicas_cpu *r)
786 struct bch_sb_field_replicas *sb_r;
787 struct bch_replicas_entry *dst, *src;
788 bool need_v1 = false;
791 bytes = sizeof(struct bch_sb_field_replicas);
793 for_each_cpu_replicas_entry(r, src) {
794 bytes += replicas_entry_bytes(src);
795 if (src->nr_required != 1)
800 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
802 sb_r = bch2_sb_resize_replicas(&c->disk_sb,
803 DIV_ROUND_UP(bytes, sizeof(u64)));
805 return -BCH_ERR_ENOSPC_sb_replicas;
807 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
808 sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
810 memset(&sb_r->entries, 0,
811 vstruct_end(&sb_r->field) -
812 (void *) &sb_r->entries);
815 for_each_cpu_replicas_entry(r, src) {
816 memcpy(dst, src, replicas_entry_bytes(src));
818 dst = replicas_entry_next(dst);
820 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
826 static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
828 struct printbuf *err)
830 struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
833 sort_cmp_size(cpu_r->entries,
838 for (i = 0; i < cpu_r->nr; i++) {
839 struct bch_replicas_entry *e =
840 cpu_replicas_entry(cpu_r, i);
842 if (e->data_type >= BCH_DATA_NR) {
843 prt_printf(err, "invalid data type in entry ");
844 bch2_replicas_entry_to_text(err, e);
845 return -BCH_ERR_invalid_sb_replicas;
849 prt_printf(err, "no devices in entry ");
850 bch2_replicas_entry_to_text(err, e);
851 return -BCH_ERR_invalid_sb_replicas;
854 if (e->nr_required > 1 &&
855 e->nr_required >= e->nr_devs) {
856 prt_printf(err, "bad nr_required in entry ");
857 bch2_replicas_entry_to_text(err, e);
858 return -BCH_ERR_invalid_sb_replicas;
861 for (j = 0; j < e->nr_devs; j++)
862 if (!bch2_dev_exists(sb, mi, e->devs[j])) {
863 prt_printf(err, "invalid device %u in entry ", e->devs[j]);
864 bch2_replicas_entry_to_text(err, e);
865 return -BCH_ERR_invalid_sb_replicas;
868 if (i + 1 < cpu_r->nr) {
869 struct bch_replicas_entry *n =
870 cpu_replicas_entry(cpu_r, i + 1);
872 BUG_ON(memcmp(e, n, cpu_r->entry_size) > 0);
874 if (!memcmp(e, n, cpu_r->entry_size)) {
875 prt_printf(err, "duplicate replicas entry ");
876 bch2_replicas_entry_to_text(err, e);
877 return -BCH_ERR_invalid_sb_replicas;
885 static int bch2_sb_replicas_validate(struct bch_sb *sb, struct bch_sb_field *f,
886 struct printbuf *err)
888 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
889 struct bch_replicas_cpu cpu_r;
892 ret = __bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r);
896 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
897 kfree(cpu_r.entries);
901 static void bch2_sb_replicas_to_text(struct printbuf *out,
903 struct bch_sb_field *f)
905 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
906 struct bch_replicas_entry *e;
909 for_each_replicas_entry(r, e) {
911 prt_printf(out, " ");
914 bch2_replicas_entry_to_text(out, e);
919 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
920 .validate = bch2_sb_replicas_validate,
921 .to_text = bch2_sb_replicas_to_text,
924 static int bch2_sb_replicas_v0_validate(struct bch_sb *sb, struct bch_sb_field *f,
925 struct printbuf *err)
927 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
928 struct bch_replicas_cpu cpu_r;
931 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r);
935 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
936 kfree(cpu_r.entries);
940 static void bch2_sb_replicas_v0_to_text(struct printbuf *out,
942 struct bch_sb_field *f)
944 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
945 struct bch_replicas_entry_v0 *e;
948 for_each_replicas_entry(sb_r, e) {
950 prt_printf(out, " ");
953 bch2_replicas_entry_v0_to_text(out, e);
958 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
959 .validate = bch2_sb_replicas_v0_validate,
960 .to_text = bch2_sb_replicas_v0_to_text,
963 /* Query replicas: */
965 bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
966 unsigned flags, bool print)
968 struct bch_replicas_entry *e;
971 percpu_down_read(&c->mark_lock);
972 for_each_cpu_replicas_entry(&c->replicas, e) {
973 unsigned i, nr_online = 0, nr_failed = 0, dflags = 0;
974 bool metadata = e->data_type < BCH_DATA_user;
976 if (e->data_type == BCH_DATA_cached)
979 for (i = 0; i < e->nr_devs; i++) {
980 struct bch_dev *ca = bch_dev_bkey_exists(c, e->devs[i]);
982 nr_online += test_bit(e->devs[i], devs.d);
983 nr_failed += ca->mi.state == BCH_MEMBER_STATE_failed;
986 if (nr_failed == e->nr_devs)
989 if (nr_online < e->nr_required)
991 ? BCH_FORCE_IF_METADATA_LOST
992 : BCH_FORCE_IF_DATA_LOST;
994 if (nr_online < e->nr_devs)
996 ? BCH_FORCE_IF_METADATA_DEGRADED
997 : BCH_FORCE_IF_DATA_DEGRADED;
999 if (dflags & ~flags) {
1001 struct printbuf buf = PRINTBUF;
1003 bch2_replicas_entry_to_text(&buf, e);
1004 bch_err(c, "insufficient devices online (%u) for replicas entry %s",
1005 nr_online, buf.buf);
1006 printbuf_exit(&buf);
1013 percpu_up_read(&c->mark_lock);
1018 unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev)
1020 struct bch_sb_field_replicas *replicas;
1021 struct bch_sb_field_replicas_v0 *replicas_v0;
1022 unsigned i, data_has = 0;
1024 replicas = bch2_sb_get_replicas(sb);
1025 replicas_v0 = bch2_sb_get_replicas_v0(sb);
1028 struct bch_replicas_entry *r;
1030 for_each_replicas_entry(replicas, r)
1031 for (i = 0; i < r->nr_devs; i++)
1032 if (r->devs[i] == dev)
1033 data_has |= 1 << r->data_type;
1034 } else if (replicas_v0) {
1035 struct bch_replicas_entry_v0 *r;
1037 for_each_replicas_entry_v0(replicas_v0, r)
1038 for (i = 0; i < r->nr_devs; i++)
1039 if (r->devs[i] == dev)
1040 data_has |= 1 << r->data_type;
1047 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1051 mutex_lock(&c->sb_lock);
1052 ret = bch2_sb_dev_has_data(c->disk_sb.sb, ca->dev_idx);
1053 mutex_unlock(&c->sb_lock);
1058 void bch2_fs_replicas_exit(struct bch_fs *c)
1062 kfree(c->usage_scratch);
1063 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
1064 free_percpu(c->usage[i]);
1065 kfree(c->usage_base);
1066 kfree(c->replicas.entries);
1067 kfree(c->replicas_gc.entries);
1069 mempool_exit(&c->replicas_delta_pool);
1072 int bch2_fs_replicas_init(struct bch_fs *c)
1074 bch2_journal_entry_res_resize(&c->journal,
1075 &c->replicas_journal_res,
1076 reserve_journal_replicas(c, &c->replicas));
1078 return mempool_init_kmalloc_pool(&c->replicas_delta_pool, 1,
1079 REPLICAS_DELTA_LIST_MAX) ?:
1080 replicas_table_update(c, &c->replicas);