X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Freplicas.c;h=820f99898a16e8f89f99e2923201dcbf6c87c958;hb=06611a71a35a1b14efe192454aabf3a01b4804d4;hp=76efbfce7683bca0c30dc53accd0049ba170810b;hpb=5ef62f56ab50c5799f713e3a42f5c7ad7e8283d3;p=bcachefs-tools-debian diff --git a/libbcachefs/replicas.c b/libbcachefs/replicas.c index 76efbfc..820f998 100644 --- a/libbcachefs/replicas.c +++ b/libbcachefs/replicas.c @@ -11,7 +11,7 @@ static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *, /* Replicas tracking - in memory: */ -static void verify_replicas_entry(struct bch_replicas_entry *e) +static void verify_replicas_entry(struct bch_replicas_entry_v1 *e) { #ifdef CONFIG_BCACHEFS_DEBUG unsigned i; @@ -26,7 +26,7 @@ static void verify_replicas_entry(struct bch_replicas_entry *e) #endif } -void bch2_replicas_entry_sort(struct bch_replicas_entry *e) +void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e) { bubble_sort(e->devs, e->nr_devs, u8_cmp); } @@ -36,8 +36,8 @@ static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r) eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL); } -void bch2_replicas_entry_v0_to_text(struct printbuf *out, - struct bch_replicas_entry_v0 *e) +static void bch2_replicas_entry_v0_to_text(struct printbuf *out, + struct bch_replicas_entry_v0 *e) { unsigned i; @@ -53,7 +53,7 @@ void bch2_replicas_entry_v0_to_text(struct printbuf *out, } void bch2_replicas_entry_to_text(struct printbuf *out, - struct bch_replicas_entry *e) + struct bch_replicas_entry_v1 *e) { unsigned i; @@ -71,7 +71,7 @@ void bch2_replicas_entry_to_text(struct printbuf *out, void bch2_cpu_replicas_to_text(struct printbuf *out, struct bch_replicas_cpu *r) { - struct bch_replicas_entry *e; + struct bch_replicas_entry_v1 *e; bool first = true; for_each_cpu_replicas_entry(r, e) { @@ -84,7 +84,7 @@ void bch2_cpu_replicas_to_text(struct printbuf *out, } static void extent_to_replicas(struct bkey_s_c k, - struct bch_replicas_entry *r) + struct bch_replicas_entry_v1 *r) { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; @@ -104,7 +104,7 @@ static void extent_to_replicas(struct bkey_s_c k, } static void stripe_to_replicas(struct bkey_s_c k, - struct bch_replicas_entry *r) + struct bch_replicas_entry_v1 *r) { struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); const struct bch_extent_ptr *ptr; @@ -117,7 +117,7 @@ static void stripe_to_replicas(struct bkey_s_c k, r->devs[r->nr_devs++] = ptr->dev; } -void bch2_bkey_to_replicas(struct bch_replicas_entry *e, +void bch2_bkey_to_replicas(struct bch_replicas_entry_v1 *e, struct bkey_s_c k) { e->nr_devs = 0; @@ -142,7 +142,7 @@ void bch2_bkey_to_replicas(struct bch_replicas_entry *e, bch2_replicas_entry_sort(e); } -void bch2_devlist_to_replicas(struct bch_replicas_entry *e, +void bch2_devlist_to_replicas(struct bch_replicas_entry_v1 *e, enum bch_data_type data_type, struct bch_devs_list devs) { @@ -164,7 +164,7 @@ void bch2_devlist_to_replicas(struct bch_replicas_entry *e, static struct bch_replicas_cpu cpu_replicas_add_entry(struct bch_replicas_cpu *old, - struct bch_replicas_entry *new_entry) + struct bch_replicas_entry_v1 *new_entry) { unsigned i; struct bch_replicas_cpu new = { @@ -194,7 +194,7 @@ cpu_replicas_add_entry(struct bch_replicas_cpu *old, } static inline int __replicas_entry_idx(struct bch_replicas_cpu *r, - struct bch_replicas_entry *search) + struct bch_replicas_entry_v1 *search) { int idx, entry_size = replicas_entry_bytes(search); @@ -212,7 +212,7 @@ static inline int __replicas_entry_idx(struct bch_replicas_cpu *r, } int bch2_replicas_entry_idx(struct bch_fs *c, - struct bch_replicas_entry *search) + struct bch_replicas_entry_v1 *search) { bch2_replicas_entry_sort(search); @@ -220,13 +220,13 @@ int bch2_replicas_entry_idx(struct bch_fs *c, } static bool __replicas_has_entry(struct bch_replicas_cpu *r, - struct bch_replicas_entry *search) + struct bch_replicas_entry_v1 *search) { return __replicas_entry_idx(r, search) >= 0; } bool bch2_replicas_marked(struct bch_fs *c, - struct bch_replicas_entry *search) + struct bch_replicas_entry_v1 *search) { bool marked; @@ -272,7 +272,7 @@ static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p, { unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr; struct bch_fs_usage *dst, *src = (void *) - bch2_acc_percpu_u64s((void *) src_p, src_nr); + bch2_acc_percpu_u64s((u64 __percpu *) src_p, src_nr); preempt_disable(); dst = this_cpu_ptr(dst_p); @@ -343,7 +343,7 @@ err: static unsigned reserve_journal_replicas(struct bch_fs *c, struct bch_replicas_cpu *r) { - struct bch_replicas_entry *e; + struct bch_replicas_entry_v1 *e; unsigned journal_res_u64s = 0; /* nr_inodes: */ @@ -368,7 +368,7 @@ static unsigned reserve_journal_replicas(struct bch_fs *c, noinline static int bch2_mark_replicas_slowpath(struct bch_fs *c, - struct bch_replicas_entry *new_entry) + struct bch_replicas_entry_v1 *new_entry) { struct bch_replicas_cpu new_r, new_gc; int ret = 0; @@ -429,11 +429,11 @@ out: return ret; err: - bch_err(c, "error adding replicas entry: %s", bch2_err_str(ret)); + bch_err_msg(c, ret, "adding replicas entry"); goto out; } -int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry *r) +int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry_v1 *r) { return likely(bch2_replicas_marked(c, r)) ? 0 : bch2_mark_replicas_slowpath(c, r); @@ -465,12 +465,10 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret) mutex_lock(&c->sb_lock); percpu_down_write(&c->mark_lock); - ret = bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc); - if (ret) - goto err; + ret = ret ?: + bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc) ?: + replicas_table_update(c, &c->replicas_gc); - ret = replicas_table_update(c, &c->replicas_gc); -err: kfree(c->replicas_gc.entries); c->replicas_gc.entries = NULL; @@ -486,7 +484,7 @@ err: int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask) { - struct bch_replicas_entry *e; + struct bch_replicas_entry_v1 *e; unsigned i = 0; lockdep_assert_held(&c->replicas_gc_lock); @@ -561,7 +559,7 @@ retry: } for (i = 0; i < c->replicas.nr; i++) { - struct bch_replicas_entry *e = + struct bch_replicas_entry_v1 *e = cpu_replicas_entry(&c->replicas, i); if (e->data_type == BCH_DATA_journal || @@ -576,12 +574,9 @@ retry: bch2_cpu_replicas_sort(&new); - ret = bch2_cpu_replicas_to_sb_replicas(c, &new); - if (ret) - goto err; + ret = bch2_cpu_replicas_to_sb_replicas(c, &new) ?: + replicas_table_update(c, &new); - ret = replicas_table_update(c, &new); -err: kfree(new.entries); percpu_up_write(&c->mark_lock); @@ -595,7 +590,7 @@ err: } int bch2_replicas_set_usage(struct bch_fs *c, - struct bch_replicas_entry *r, + struct bch_replicas_entry_v1 *r, u64 sectors) { int ret, idx = bch2_replicas_entry_idx(c, r); @@ -628,7 +623,7 @@ static int __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r, struct bch_replicas_cpu *cpu_r) { - struct bch_replicas_entry *e, *dst; + struct bch_replicas_entry_v1 *e, *dst; unsigned nr = 0, entry_size = 0, idx = 0; for_each_replicas_entry(sb_r, e) { @@ -666,7 +661,7 @@ __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r, nr++; } - entry_size += sizeof(struct bch_replicas_entry) - + entry_size += sizeof(struct bch_replicas_entry_v1) - sizeof(struct bch_replicas_entry_v0); cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL); @@ -677,7 +672,7 @@ __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r, cpu_r->entry_size = entry_size; for_each_replicas_entry(sb_r, e) { - struct bch_replicas_entry *dst = + struct bch_replicas_entry_v1 *dst = cpu_replicas_entry(cpu_r, idx++); dst->data_type = e->data_type; @@ -697,9 +692,9 @@ int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c) struct bch_replicas_cpu new_r = { 0, 0, NULL }; int ret = 0; - if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb))) + if ((sb_v1 = bch2_sb_field_get(c->disk_sb.sb, replicas))) ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r); - else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb))) + else if ((sb_v0 = bch2_sb_field_get(c->disk_sb.sb, replicas_v0))) ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r); if (ret) return ret; @@ -721,7 +716,7 @@ static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c, { struct bch_sb_field_replicas_v0 *sb_r; struct bch_replicas_entry_v0 *dst; - struct bch_replicas_entry *src; + struct bch_replicas_entry_v1 *src; size_t bytes; bytes = sizeof(struct bch_sb_field_replicas); @@ -729,13 +724,13 @@ static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c, for_each_cpu_replicas_entry(r, src) bytes += replicas_entry_bytes(src) - 1; - sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb, + sb_r = bch2_sb_field_resize(&c->disk_sb, replicas_v0, DIV_ROUND_UP(bytes, sizeof(u64))); if (!sb_r) return -BCH_ERR_ENOSPC_sb_replicas; bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas); - sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb); + sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas_v0); memset(&sb_r->entries, 0, vstruct_end(&sb_r->field) - @@ -759,7 +754,7 @@ static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c, struct bch_replicas_cpu *r) { struct bch_sb_field_replicas *sb_r; - struct bch_replicas_entry *dst, *src; + struct bch_replicas_entry_v1 *dst, *src; bool need_v1 = false; size_t bytes; @@ -774,13 +769,13 @@ static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c, if (!need_v1) return bch2_cpu_replicas_to_sb_replicas_v0(c, r); - sb_r = bch2_sb_resize_replicas(&c->disk_sb, + sb_r = bch2_sb_field_resize(&c->disk_sb, replicas, DIV_ROUND_UP(bytes, sizeof(u64))); if (!sb_r) return -BCH_ERR_ENOSPC_sb_replicas; bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0); - sb_r = bch2_sb_get_replicas(c->disk_sb.sb); + sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas); memset(&sb_r->entries, 0, vstruct_end(&sb_r->field) - @@ -802,7 +797,6 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r, struct bch_sb *sb, struct printbuf *err) { - struct bch_sb_field_members *mi = bch2_sb_get_members(sb); unsigned i, j; sort_cmp_size(cpu_r->entries, @@ -811,7 +805,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r, memcmp, NULL); for (i = 0; i < cpu_r->nr; i++) { - struct bch_replicas_entry *e = + struct bch_replicas_entry_v1 *e = cpu_replicas_entry(cpu_r, i); if (e->data_type >= BCH_DATA_NR) { @@ -834,14 +828,14 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r, } for (j = 0; j < e->nr_devs; j++) - if (!bch2_dev_exists(sb, mi, e->devs[j])) { + if (!bch2_dev_exists(sb, e->devs[j])) { prt_printf(err, "invalid device %u in entry ", e->devs[j]); bch2_replicas_entry_to_text(err, e); return -BCH_ERR_invalid_sb_replicas; } if (i + 1 < cpu_r->nr) { - struct bch_replicas_entry *n = + struct bch_replicas_entry_v1 *n = cpu_replicas_entry(cpu_r, i + 1); BUG_ON(memcmp(e, n, cpu_r->entry_size) > 0); @@ -878,7 +872,7 @@ static void bch2_sb_replicas_to_text(struct printbuf *out, struct bch_sb_field *f) { struct bch_sb_field_replicas *r = field_to_type(f, replicas); - struct bch_replicas_entry *e; + struct bch_replicas_entry_v1 *e; bool first = true; for_each_replicas_entry(r, e) { @@ -940,7 +934,7 @@ const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = { bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs, unsigned flags, bool print) { - struct bch_replicas_entry *e; + struct bch_replicas_entry_v1 *e; bool ret = true; percpu_down_read(&c->mark_lock); @@ -996,11 +990,11 @@ unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev) struct bch_sb_field_replicas_v0 *replicas_v0; unsigned i, data_has = 0; - replicas = bch2_sb_get_replicas(sb); - replicas_v0 = bch2_sb_get_replicas_v0(sb); + replicas = bch2_sb_field_get(sb, replicas); + replicas_v0 = bch2_sb_field_get(sb, replicas_v0); if (replicas) { - struct bch_replicas_entry *r; + struct bch_replicas_entry_v1 *r; for_each_replicas_entry(replicas, r) for (i = 0; i < r->nr_devs; i++)