1 // SPDX-License-Identifier: GPL-2.0
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10 struct bch_replicas_cpu *);
12 /* Replicas tracking - in memory: */
14 static void verify_replicas_entry(struct bch_replicas_entry_v1 *e)
16 #ifdef CONFIG_BCACHEFS_DEBUG
19 BUG_ON(e->data_type >= BCH_DATA_NR);
21 BUG_ON(e->nr_required > 1 &&
22 e->nr_required >= e->nr_devs);
24 for (i = 0; i + 1 < e->nr_devs; i++)
25 BUG_ON(e->devs[i] >= e->devs[i + 1]);
29 void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e)
31 bubble_sort(e->devs, e->nr_devs, u8_cmp);
34 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
36 eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
39 static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
40 struct bch_replicas_entry_v0 *e)
44 if (e->data_type < BCH_DATA_NR)
45 prt_printf(out, "%s", bch2_data_types[e->data_type]);
47 prt_printf(out, "(invalid data type %u)", e->data_type);
49 prt_printf(out, ": %u [", e->nr_devs);
50 for (i = 0; i < e->nr_devs; i++)
51 prt_printf(out, i ? " %u" : "%u", e->devs[i]);
55 void bch2_replicas_entry_to_text(struct printbuf *out,
56 struct bch_replicas_entry_v1 *e)
60 if (e->data_type < BCH_DATA_NR)
61 prt_printf(out, "%s", bch2_data_types[e->data_type]);
63 prt_printf(out, "(invalid data type %u)", e->data_type);
65 prt_printf(out, ": %u/%u [", e->nr_required, e->nr_devs);
66 for (i = 0; i < e->nr_devs; i++)
67 prt_printf(out, i ? " %u" : "%u", e->devs[i]);
71 int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
76 prt_printf(err, "no devices in entry ");
80 if (r->nr_required > 1 &&
81 r->nr_required >= r->nr_devs) {
82 prt_printf(err, "bad nr_required in entry ");
86 for (unsigned i = 0; i < r->nr_devs; i++)
87 if (!bch2_dev_exists(sb, r->devs[i])) {
88 prt_printf(err, "invalid device %u in entry ", r->devs[i]);
94 bch2_replicas_entry_to_text(err, r);
95 return -BCH_ERR_invalid_replicas_entry;
98 void bch2_cpu_replicas_to_text(struct printbuf *out,
99 struct bch_replicas_cpu *r)
101 struct bch_replicas_entry_v1 *e;
104 for_each_cpu_replicas_entry(r, e) {
106 prt_printf(out, " ");
109 bch2_replicas_entry_to_text(out, e);
113 static void extent_to_replicas(struct bkey_s_c k,
114 struct bch_replicas_entry_v1 *r)
116 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
117 const union bch_extent_entry *entry;
118 struct extent_ptr_decoded p;
122 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
127 r->devs[r->nr_devs++] = p.ptr.dev;
133 static void stripe_to_replicas(struct bkey_s_c k,
134 struct bch_replicas_entry_v1 *r)
136 struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
137 const struct bch_extent_ptr *ptr;
139 r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
141 for (ptr = s.v->ptrs;
142 ptr < s.v->ptrs + s.v->nr_blocks;
144 r->devs[r->nr_devs++] = ptr->dev;
147 void bch2_bkey_to_replicas(struct bch_replicas_entry_v1 *e,
153 case KEY_TYPE_btree_ptr:
154 case KEY_TYPE_btree_ptr_v2:
155 e->data_type = BCH_DATA_btree;
156 extent_to_replicas(k, e);
158 case KEY_TYPE_extent:
159 case KEY_TYPE_reflink_v:
160 e->data_type = BCH_DATA_user;
161 extent_to_replicas(k, e);
163 case KEY_TYPE_stripe:
164 e->data_type = BCH_DATA_parity;
165 stripe_to_replicas(k, e);
169 bch2_replicas_entry_sort(e);
172 void bch2_devlist_to_replicas(struct bch_replicas_entry_v1 *e,
173 enum bch_data_type data_type,
174 struct bch_devs_list devs)
179 data_type == BCH_DATA_sb ||
180 data_type >= BCH_DATA_NR);
182 e->data_type = data_type;
186 for (i = 0; i < devs.nr; i++)
187 e->devs[e->nr_devs++] = devs.devs[i];
189 bch2_replicas_entry_sort(e);
192 static struct bch_replicas_cpu
193 cpu_replicas_add_entry(struct bch_fs *c,
194 struct bch_replicas_cpu *old,
195 struct bch_replicas_entry_v1 *new_entry)
198 struct bch_replicas_cpu new = {
200 .entry_size = max_t(unsigned, old->entry_size,
201 replicas_entry_bytes(new_entry)),
204 for (i = 0; i < new_entry->nr_devs; i++)
205 BUG_ON(!bch2_dev_exists2(c, new_entry->devs[i]));
207 BUG_ON(!new_entry->data_type);
208 verify_replicas_entry(new_entry);
210 new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
214 for (i = 0; i < old->nr; i++)
215 memcpy(cpu_replicas_entry(&new, i),
216 cpu_replicas_entry(old, i),
219 memcpy(cpu_replicas_entry(&new, old->nr),
221 replicas_entry_bytes(new_entry));
223 bch2_cpu_replicas_sort(&new);
227 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
228 struct bch_replicas_entry_v1 *search)
230 int idx, entry_size = replicas_entry_bytes(search);
232 if (unlikely(entry_size > r->entry_size))
235 verify_replicas_entry(search);
237 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
238 idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
242 return idx < r->nr ? idx : -1;
245 int bch2_replicas_entry_idx(struct bch_fs *c,
246 struct bch_replicas_entry_v1 *search)
248 bch2_replicas_entry_sort(search);
250 return __replicas_entry_idx(&c->replicas, search);
253 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
254 struct bch_replicas_entry_v1 *search)
256 return __replicas_entry_idx(r, search) >= 0;
259 bool bch2_replicas_marked(struct bch_fs *c,
260 struct bch_replicas_entry_v1 *search)
264 if (!search->nr_devs)
267 verify_replicas_entry(search);
269 percpu_down_read(&c->mark_lock);
270 marked = __replicas_has_entry(&c->replicas, search) &&
271 (likely((!c->replicas_gc.entries)) ||
272 __replicas_has_entry(&c->replicas_gc, search));
273 percpu_up_read(&c->mark_lock);
278 static void __replicas_table_update(struct bch_fs_usage *dst,
279 struct bch_replicas_cpu *dst_r,
280 struct bch_fs_usage *src,
281 struct bch_replicas_cpu *src_r)
283 int src_idx, dst_idx;
287 for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
288 if (!src->replicas[src_idx])
291 dst_idx = __replicas_entry_idx(dst_r,
292 cpu_replicas_entry(src_r, src_idx));
295 dst->replicas[dst_idx] = src->replicas[src_idx];
299 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
300 struct bch_replicas_cpu *dst_r,
301 struct bch_fs_usage __percpu *src_p,
302 struct bch_replicas_cpu *src_r)
304 unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
305 struct bch_fs_usage *dst, *src = (void *)
306 bch2_acc_percpu_u64s((u64 __percpu *) src_p, src_nr);
309 dst = this_cpu_ptr(dst_p);
312 __replicas_table_update(dst, dst_r, src, src_r);
316 * Resize filesystem accounting:
318 static int replicas_table_update(struct bch_fs *c,
319 struct bch_replicas_cpu *new_r)
321 struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
322 struct bch_fs_usage_online *new_scratch = NULL;
323 struct bch_fs_usage __percpu *new_gc = NULL;
324 struct bch_fs_usage *new_base = NULL;
325 unsigned i, bytes = sizeof(struct bch_fs_usage) +
326 sizeof(u64) * new_r->nr;
327 unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
328 sizeof(u64) * new_r->nr;
331 memset(new_usage, 0, sizeof(new_usage));
333 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
334 if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
335 sizeof(u64), GFP_KERNEL)))
338 if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
339 !(new_scratch = kmalloc(scratch_bytes, GFP_KERNEL)) ||
341 !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
344 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
346 __replicas_table_update_pcpu(new_usage[i], new_r,
347 c->usage[i], &c->replicas);
349 __replicas_table_update(new_base, new_r,
350 c->usage_base, &c->replicas);
352 __replicas_table_update_pcpu(new_gc, new_r,
353 c->usage_gc, &c->replicas);
355 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
356 swap(c->usage[i], new_usage[i]);
357 swap(c->usage_base, new_base);
358 swap(c->usage_scratch, new_scratch);
359 swap(c->usage_gc, new_gc);
360 swap(c->replicas, *new_r);
364 for (i = 0; i < ARRAY_SIZE(new_usage); i++)
365 free_percpu(new_usage[i]);
369 bch_err(c, "error updating replicas table: memory allocation failure");
370 ret = -BCH_ERR_ENOMEM_replicas_table;
374 static unsigned reserve_journal_replicas(struct bch_fs *c,
375 struct bch_replicas_cpu *r)
377 struct bch_replicas_entry_v1 *e;
378 unsigned journal_res_u64s = 0;
382 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
386 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
388 /* persistent_reserved: */
390 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
393 for_each_cpu_replicas_entry(r, e)
395 DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
396 e->nr_devs, sizeof(u64));
397 return journal_res_u64s;
401 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
402 struct bch_replicas_entry_v1 *new_entry)
404 struct bch_replicas_cpu new_r, new_gc;
407 verify_replicas_entry(new_entry);
409 memset(&new_r, 0, sizeof(new_r));
410 memset(&new_gc, 0, sizeof(new_gc));
412 mutex_lock(&c->sb_lock);
414 if (c->replicas_gc.entries &&
415 !__replicas_has_entry(&c->replicas_gc, new_entry)) {
416 new_gc = cpu_replicas_add_entry(c, &c->replicas_gc, new_entry);
417 if (!new_gc.entries) {
418 ret = -BCH_ERR_ENOMEM_cpu_replicas;
423 if (!__replicas_has_entry(&c->replicas, new_entry)) {
424 new_r = cpu_replicas_add_entry(c, &c->replicas, new_entry);
425 if (!new_r.entries) {
426 ret = -BCH_ERR_ENOMEM_cpu_replicas;
430 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
434 bch2_journal_entry_res_resize(&c->journal,
435 &c->replicas_journal_res,
436 reserve_journal_replicas(c, &new_r));
439 if (!new_r.entries &&
443 /* allocations done, now commit: */
448 /* don't update in memory replicas until changes are persistent */
449 percpu_down_write(&c->mark_lock);
451 ret = replicas_table_update(c, &new_r);
453 swap(new_gc, c->replicas_gc);
454 percpu_up_write(&c->mark_lock);
456 mutex_unlock(&c->sb_lock);
458 kfree(new_r.entries);
459 kfree(new_gc.entries);
463 bch_err_msg(c, ret, "adding replicas entry");
467 int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry_v1 *r)
469 return likely(bch2_replicas_marked(c, r))
470 ? 0 : bch2_mark_replicas_slowpath(c, r);
473 /* replicas delta list: */
475 int bch2_replicas_delta_list_mark(struct bch_fs *c,
476 struct replicas_delta_list *r)
478 struct replicas_delta *d = r->d;
479 struct replicas_delta *top = (void *) r->d + r->used;
482 for (d = r->d; !ret && d != top; d = replicas_delta_next(d))
483 ret = bch2_mark_replicas(c, &d->r);
488 * Old replicas_gc mechanism: only used for journal replicas entries now, should
492 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
494 lockdep_assert_held(&c->replicas_gc_lock);
496 mutex_lock(&c->sb_lock);
497 percpu_down_write(&c->mark_lock);
500 bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc) ?:
501 replicas_table_update(c, &c->replicas_gc);
503 kfree(c->replicas_gc.entries);
504 c->replicas_gc.entries = NULL;
506 percpu_up_write(&c->mark_lock);
511 mutex_unlock(&c->sb_lock);
516 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
518 struct bch_replicas_entry_v1 *e;
521 lockdep_assert_held(&c->replicas_gc_lock);
523 mutex_lock(&c->sb_lock);
524 BUG_ON(c->replicas_gc.entries);
526 c->replicas_gc.nr = 0;
527 c->replicas_gc.entry_size = 0;
529 for_each_cpu_replicas_entry(&c->replicas, e)
530 if (!((1 << e->data_type) & typemask)) {
532 c->replicas_gc.entry_size =
533 max_t(unsigned, c->replicas_gc.entry_size,
534 replicas_entry_bytes(e));
537 c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
538 c->replicas_gc.entry_size,
540 if (!c->replicas_gc.entries) {
541 mutex_unlock(&c->sb_lock);
542 bch_err(c, "error allocating c->replicas_gc");
543 return -BCH_ERR_ENOMEM_replicas_gc;
546 for_each_cpu_replicas_entry(&c->replicas, e)
547 if (!((1 << e->data_type) & typemask))
548 memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
549 e, c->replicas_gc.entry_size);
551 bch2_cpu_replicas_sort(&c->replicas_gc);
552 mutex_unlock(&c->sb_lock);
558 * New much simpler mechanism for clearing out unneeded replicas entries - drop
559 * replicas entries that have 0 sectors used.
561 * However, we don't track sector counts for journal usage, so this doesn't drop
562 * any BCH_DATA_journal entries; the old bch2_replicas_gc_(start|end) mechanism
563 * is retained for that.
565 int bch2_replicas_gc2(struct bch_fs *c)
567 struct bch_replicas_cpu new = { 0 };
571 bch2_journal_meta(&c->journal);
573 nr = READ_ONCE(c->replicas.nr);
574 new.entry_size = READ_ONCE(c->replicas.entry_size);
575 new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
577 bch_err(c, "error allocating c->replicas_gc");
578 return -BCH_ERR_ENOMEM_replicas_gc;
581 mutex_lock(&c->sb_lock);
582 percpu_down_write(&c->mark_lock);
584 if (nr != c->replicas.nr ||
585 new.entry_size != c->replicas.entry_size) {
586 percpu_up_write(&c->mark_lock);
587 mutex_unlock(&c->sb_lock);
592 for (i = 0; i < c->replicas.nr; i++) {
593 struct bch_replicas_entry_v1 *e =
594 cpu_replicas_entry(&c->replicas, i);
596 if (e->data_type == BCH_DATA_journal ||
597 c->usage_base->replicas[i] ||
598 percpu_u64_get(&c->usage[0]->replicas[i]) ||
599 percpu_u64_get(&c->usage[1]->replicas[i]) ||
600 percpu_u64_get(&c->usage[2]->replicas[i]) ||
601 percpu_u64_get(&c->usage[3]->replicas[i]))
602 memcpy(cpu_replicas_entry(&new, new.nr++),
606 bch2_cpu_replicas_sort(&new);
608 ret = bch2_cpu_replicas_to_sb_replicas(c, &new) ?:
609 replicas_table_update(c, &new);
613 percpu_up_write(&c->mark_lock);
618 mutex_unlock(&c->sb_lock);
623 int bch2_replicas_set_usage(struct bch_fs *c,
624 struct bch_replicas_entry_v1 *r,
627 int ret, idx = bch2_replicas_entry_idx(c, r);
630 struct bch_replicas_cpu n;
632 n = cpu_replicas_add_entry(c, &c->replicas, r);
634 return -BCH_ERR_ENOMEM_cpu_replicas;
636 ret = replicas_table_update(c, &n);
642 idx = bch2_replicas_entry_idx(c, r);
646 c->usage_base->replicas[idx] = sectors;
651 /* Replicas tracking - superblock: */
654 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
655 struct bch_replicas_cpu *cpu_r)
657 struct bch_replicas_entry_v1 *e, *dst;
658 unsigned nr = 0, entry_size = 0, idx = 0;
660 for_each_replicas_entry(sb_r, e) {
661 entry_size = max_t(unsigned, entry_size,
662 replicas_entry_bytes(e));
666 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
668 return -BCH_ERR_ENOMEM_cpu_replicas;
671 cpu_r->entry_size = entry_size;
673 for_each_replicas_entry(sb_r, e) {
674 dst = cpu_replicas_entry(cpu_r, idx++);
675 memcpy(dst, e, replicas_entry_bytes(e));
676 bch2_replicas_entry_sort(dst);
683 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
684 struct bch_replicas_cpu *cpu_r)
686 struct bch_replicas_entry_v0 *e;
687 unsigned nr = 0, entry_size = 0, idx = 0;
689 for_each_replicas_entry(sb_r, e) {
690 entry_size = max_t(unsigned, entry_size,
691 replicas_entry_bytes(e));
695 entry_size += sizeof(struct bch_replicas_entry_v1) -
696 sizeof(struct bch_replicas_entry_v0);
698 cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
700 return -BCH_ERR_ENOMEM_cpu_replicas;
703 cpu_r->entry_size = entry_size;
705 for_each_replicas_entry(sb_r, e) {
706 struct bch_replicas_entry_v1 *dst =
707 cpu_replicas_entry(cpu_r, idx++);
709 dst->data_type = e->data_type;
710 dst->nr_devs = e->nr_devs;
711 dst->nr_required = 1;
712 memcpy(dst->devs, e->devs, e->nr_devs);
713 bch2_replicas_entry_sort(dst);
719 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
721 struct bch_sb_field_replicas *sb_v1;
722 struct bch_sb_field_replicas_v0 *sb_v0;
723 struct bch_replicas_cpu new_r = { 0, 0, NULL };
726 if ((sb_v1 = bch2_sb_field_get(c->disk_sb.sb, replicas)))
727 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
728 else if ((sb_v0 = bch2_sb_field_get(c->disk_sb.sb, replicas_v0)))
729 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
733 bch2_cpu_replicas_sort(&new_r);
735 percpu_down_write(&c->mark_lock);
737 ret = replicas_table_update(c, &new_r);
738 percpu_up_write(&c->mark_lock);
740 kfree(new_r.entries);
745 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
746 struct bch_replicas_cpu *r)
748 struct bch_sb_field_replicas_v0 *sb_r;
749 struct bch_replicas_entry_v0 *dst;
750 struct bch_replicas_entry_v1 *src;
753 bytes = sizeof(struct bch_sb_field_replicas);
755 for_each_cpu_replicas_entry(r, src)
756 bytes += replicas_entry_bytes(src) - 1;
758 sb_r = bch2_sb_field_resize(&c->disk_sb, replicas_v0,
759 DIV_ROUND_UP(bytes, sizeof(u64)));
761 return -BCH_ERR_ENOSPC_sb_replicas;
763 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
764 sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas_v0);
766 memset(&sb_r->entries, 0,
767 vstruct_end(&sb_r->field) -
768 (void *) &sb_r->entries);
771 for_each_cpu_replicas_entry(r, src) {
772 dst->data_type = src->data_type;
773 dst->nr_devs = src->nr_devs;
774 memcpy(dst->devs, src->devs, src->nr_devs);
776 dst = replicas_entry_next(dst);
778 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
784 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
785 struct bch_replicas_cpu *r)
787 struct bch_sb_field_replicas *sb_r;
788 struct bch_replicas_entry_v1 *dst, *src;
789 bool need_v1 = false;
792 bytes = sizeof(struct bch_sb_field_replicas);
794 for_each_cpu_replicas_entry(r, src) {
795 bytes += replicas_entry_bytes(src);
796 if (src->nr_required != 1)
801 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
803 sb_r = bch2_sb_field_resize(&c->disk_sb, replicas,
804 DIV_ROUND_UP(bytes, sizeof(u64)));
806 return -BCH_ERR_ENOSPC_sb_replicas;
808 bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
809 sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas);
811 memset(&sb_r->entries, 0,
812 vstruct_end(&sb_r->field) -
813 (void *) &sb_r->entries);
816 for_each_cpu_replicas_entry(r, src) {
817 memcpy(dst, src, replicas_entry_bytes(src));
819 dst = replicas_entry_next(dst);
821 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
827 static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
829 struct printbuf *err)
833 sort_cmp_size(cpu_r->entries,
838 for (i = 0; i < cpu_r->nr; i++) {
839 struct bch_replicas_entry_v1 *e =
840 cpu_replicas_entry(cpu_r, i);
842 int ret = bch2_replicas_entry_validate(e, sb, err);
846 if (i + 1 < cpu_r->nr) {
847 struct bch_replicas_entry_v1 *n =
848 cpu_replicas_entry(cpu_r, i + 1);
850 BUG_ON(memcmp(e, n, cpu_r->entry_size) > 0);
852 if (!memcmp(e, n, cpu_r->entry_size)) {
853 prt_printf(err, "duplicate replicas entry ");
854 bch2_replicas_entry_to_text(err, e);
855 return -BCH_ERR_invalid_sb_replicas;
863 static int bch2_sb_replicas_validate(struct bch_sb *sb, struct bch_sb_field *f,
864 struct printbuf *err)
866 struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
867 struct bch_replicas_cpu cpu_r;
870 ret = __bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r);
874 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
875 kfree(cpu_r.entries);
879 static void bch2_sb_replicas_to_text(struct printbuf *out,
881 struct bch_sb_field *f)
883 struct bch_sb_field_replicas *r = field_to_type(f, replicas);
884 struct bch_replicas_entry_v1 *e;
887 for_each_replicas_entry(r, e) {
889 prt_printf(out, " ");
892 bch2_replicas_entry_to_text(out, e);
897 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
898 .validate = bch2_sb_replicas_validate,
899 .to_text = bch2_sb_replicas_to_text,
902 static int bch2_sb_replicas_v0_validate(struct bch_sb *sb, struct bch_sb_field *f,
903 struct printbuf *err)
905 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
906 struct bch_replicas_cpu cpu_r;
909 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r);
913 ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
914 kfree(cpu_r.entries);
918 static void bch2_sb_replicas_v0_to_text(struct printbuf *out,
920 struct bch_sb_field *f)
922 struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
923 struct bch_replicas_entry_v0 *e;
926 for_each_replicas_entry(sb_r, e) {
928 prt_printf(out, " ");
931 bch2_replicas_entry_v0_to_text(out, e);
936 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
937 .validate = bch2_sb_replicas_v0_validate,
938 .to_text = bch2_sb_replicas_v0_to_text,
941 /* Query replicas: */
943 bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
944 unsigned flags, bool print)
946 struct bch_replicas_entry_v1 *e;
949 percpu_down_read(&c->mark_lock);
950 for_each_cpu_replicas_entry(&c->replicas, e) {
951 unsigned i, nr_online = 0, nr_failed = 0, dflags = 0;
952 bool metadata = e->data_type < BCH_DATA_user;
954 if (e->data_type == BCH_DATA_cached)
957 for (i = 0; i < e->nr_devs; i++) {
958 struct bch_dev *ca = bch_dev_bkey_exists(c, e->devs[i]);
960 nr_online += test_bit(e->devs[i], devs.d);
961 nr_failed += ca->mi.state == BCH_MEMBER_STATE_failed;
964 if (nr_failed == e->nr_devs)
967 if (nr_online < e->nr_required)
969 ? BCH_FORCE_IF_METADATA_LOST
970 : BCH_FORCE_IF_DATA_LOST;
972 if (nr_online < e->nr_devs)
974 ? BCH_FORCE_IF_METADATA_DEGRADED
975 : BCH_FORCE_IF_DATA_DEGRADED;
977 if (dflags & ~flags) {
979 struct printbuf buf = PRINTBUF;
981 bch2_replicas_entry_to_text(&buf, e);
982 bch_err(c, "insufficient devices online (%u) for replicas entry %s",
991 percpu_up_read(&c->mark_lock);
996 unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev)
998 struct bch_sb_field_replicas *replicas;
999 struct bch_sb_field_replicas_v0 *replicas_v0;
1000 unsigned i, data_has = 0;
1002 replicas = bch2_sb_field_get(sb, replicas);
1003 replicas_v0 = bch2_sb_field_get(sb, replicas_v0);
1006 struct bch_replicas_entry_v1 *r;
1008 for_each_replicas_entry(replicas, r)
1009 for (i = 0; i < r->nr_devs; i++)
1010 if (r->devs[i] == dev)
1011 data_has |= 1 << r->data_type;
1012 } else if (replicas_v0) {
1013 struct bch_replicas_entry_v0 *r;
1015 for_each_replicas_entry_v0(replicas_v0, r)
1016 for (i = 0; i < r->nr_devs; i++)
1017 if (r->devs[i] == dev)
1018 data_has |= 1 << r->data_type;
1025 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1029 mutex_lock(&c->sb_lock);
1030 ret = bch2_sb_dev_has_data(c->disk_sb.sb, ca->dev_idx);
1031 mutex_unlock(&c->sb_lock);
1036 void bch2_fs_replicas_exit(struct bch_fs *c)
1040 kfree(c->usage_scratch);
1041 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
1042 free_percpu(c->usage[i]);
1043 kfree(c->usage_base);
1044 kfree(c->replicas.entries);
1045 kfree(c->replicas_gc.entries);
1047 mempool_exit(&c->replicas_delta_pool);
1050 int bch2_fs_replicas_init(struct bch_fs *c)
1052 bch2_journal_entry_res_resize(&c->journal,
1053 &c->replicas_journal_res,
1054 reserve_journal_replicas(c, &c->replicas));
1056 return mempool_init_kmalloc_pool(&c->replicas_delta_pool, 1,
1057 REPLICAS_DELTA_LIST_MAX) ?:
1058 replicas_table_update(c, &c->replicas);