sorted = kmalloc_array(nr_groups, sizeof(*sorted), GFP_KERNEL);
if (!sorted)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_disk_groups_validate;
memcpy(sorted, groups->entries, nr_groups * sizeof(*sorted));
sort(sorted, nr_groups, sizeof(*sorted), group_cmp, NULL);
return ret;
}
+void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct bch_disk_groups_cpu *g;
+ struct bch_dev *ca;
+ int i;
+ unsigned iter;
+
+ out->atomic++;
+ rcu_read_lock();
+
+ g = rcu_dereference(c->disk_groups);
+ if (!g)
+ goto out;
+
+ for (i = 0; i < g->nr; i++) {
+ if (i)
+ prt_printf(out, " ");
+
+ if (g->entries[i].deleted) {
+ prt_printf(out, "[deleted]");
+ continue;
+ }
+
+ prt_printf(out, "[parent %d devs", g->entries[i].parent);
+ for_each_member_device_rcu(ca, c, iter, &g->entries[i].devs)
+ prt_printf(out, " %s", ca->name);
+ prt_printf(out, "]");
+ }
+
+out:
+ rcu_read_unlock();
+ out->atomic--;
+}
+
static void bch2_sb_disk_groups_to_text(struct printbuf *out,
struct bch_sb *sb,
struct bch_sb_field *f)
cpu_g = kzalloc(sizeof(*cpu_g) +
sizeof(cpu_g->entries[0]) * nr_groups, GFP_KERNEL);
if (!cpu_g)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_disk_groups_to_cpu;
cpu_g->nr = nr_groups;
const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *c, unsigned target)
{
struct target t = target_decode(target);
+ struct bch_devs_mask *devs;
+
+ rcu_read_lock();
switch (t.type) {
case TARGET_NULL:
- return NULL;
+ devs = NULL;
+ break;
case TARGET_DEV: {
struct bch_dev *ca = t.dev < c->sb.nr_devices
? rcu_dereference(c->devs[t.dev])
: NULL;
- return ca ? &ca->self : NULL;
+ devs = ca ? &ca->self : NULL;
+ break;
}
case TARGET_GROUP: {
struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
- return g && t.group < g->nr && !g->entries[t.group].deleted
+ devs = g && t.group < g->nr && !g->entries[t.group].deleted
? &g->entries[t.group].devs
: NULL;
+ break;
}
default:
BUG();
}
+
+ rcu_read_unlock();
+
+ return devs;
}
bool bch2_dev_in_target(struct bch_fs *c, unsigned dev, unsigned target)