devs[i] = strndup_user((const char __user *)(unsigned long)
user_devs[i],
PATH_MAX);
- if (!devs[i]) {
- ret = -ENOMEM;
+ ret= PTR_ERR_OR_ZERO(devs[i]);
+ if (ret)
goto err;
- }
}
c = bch2_fs_open(devs, arg.nr_devs, bch2_opts_empty());
return -EINVAL;
path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
- if (!path)
- return -ENOMEM;
+ ret = PTR_ERR_OR_ZERO(path);
+ if (ret)
+ return ret;
err = bch2_fs_open_incremental(path);
kfree(path);
static long bch2_ioctl_query_uuid(struct bch_fs *c,
struct bch_ioctl_query_uuid __user *user_arg)
{
- return copy_to_user(&user_arg->uuid,
- &c->sb.user_uuid,
- sizeof(c->sb.user_uuid));
+ if (copy_to_user(&user_arg->uuid, &c->sb.user_uuid,
+ sizeof(c->sb.user_uuid)))
+ return -EFAULT;
+ return 0;
}
#if 0
static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start arg)
{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (arg.flags || arg.pad)
return -EINVAL;
static long bch2_ioctl_stop(struct bch_fs *c)
{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
bch2_fs_stop(c);
return 0;
}
char *path;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (arg.flags || arg.pad)
return -EINVAL;
path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
- if (!path)
- return -ENOMEM;
+ ret = PTR_ERR_OR_ZERO(path);
+ if (ret)
+ return ret;
ret = bch2_dev_add(c, path);
kfree(path);
{
struct bch_dev *ca;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
BCH_FORCE_IF_METADATA_LOST|
BCH_FORCE_IF_DEGRADED|
char *path;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (arg.flags || arg.pad)
return -EINVAL;
path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
- if (!path)
- return -ENOMEM;
+ ret = PTR_ERR_OR_ZERO(path);
+ if (ret)
+ return ret;
ret = bch2_dev_online(c, path);
kfree(path);
struct bch_dev *ca;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
BCH_FORCE_IF_METADATA_LOST|
BCH_FORCE_IF_DEGRADED|
struct bch_dev *ca;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
BCH_FORCE_IF_METADATA_LOST|
BCH_FORCE_IF_DEGRADED|
BCH_BY_INDEX)) ||
- arg.pad[0] || arg.pad[1] || arg.pad[2])
+ arg.pad[0] || arg.pad[1] || arg.pad[2] ||
+ arg.new_state >= BCH_MEMBER_STATE_NR)
return -EINVAL;
ca = bch2_device_lookup(c, arg.dev, arg.flags);
return PTR_ERR(ca);
ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags);
+ if (ret)
+ bch_err(c, "Error setting device state: %s", bch2_err_str(ret));
percpu_ref_put(&ca->ref);
return ret;
struct bch_ioctl_data_event e = {
.type = BCH_DATA_EVENT_PROGRESS,
.p.data_type = ctx->stats.data_type,
- .p.btree_id = ctx->stats.btree_id,
- .p.pos = ctx->stats.pos,
+ .p.btree_id = ctx->stats.pos.btree,
+ .p.pos = ctx->stats.pos.pos,
.p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
.p.sectors_total = bch2_fs_usage_read_short(c).used,
};
if (len < sizeof(e))
return -EINVAL;
- return copy_to_user(buf, &e, sizeof(e)) ?: sizeof(e);
+ if (copy_to_user(buf, &e, sizeof(e)))
+ return -EFAULT;
+
+ return sizeof(e);
}
static const struct file_operations bcachefs_data_ops = {
unsigned flags = O_RDONLY|O_CLOEXEC|O_NONBLOCK;
int ret, fd = -1;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (arg.op >= BCH_DATA_OP_NR || arg.flags)
return -EINVAL;
ctx->c = c;
ctx->arg = arg;
- ctx->thread = kthread_create(bch2_data_thread, ctx, "[bcachefs]");
+ ctx->thread = kthread_create(bch2_data_thread, ctx,
+ "bch-data/%s", c->name);
if (IS_ERR(ctx->thread)) {
ret = PTR_ERR(ctx->thread);
goto err;
{
struct bch_ioctl_fs_usage *arg = NULL;
struct bch_replicas_usage *dst_e, *dst_end;
- struct bch_fs_usage *src;
+ struct bch_fs_usage_online *src;
u32 replica_entries_bytes;
unsigned i;
int ret = 0;
if (get_user(replica_entries_bytes, &user_arg->replica_entries_bytes))
return -EFAULT;
- arg = kzalloc(sizeof(*arg) + replica_entries_bytes, GFP_KERNEL);
+ arg = kzalloc(size_add(sizeof(*arg), replica_entries_bytes), GFP_KERNEL);
if (!arg)
return -ENOMEM;
arg->online_reserved = src->online_reserved;
for (i = 0; i < BCH_REPLICAS_MAX; i++)
- arg->persistent_reserved[i] = src->persistent_reserved[i];
+ arg->persistent_reserved[i] = src->u.persistent_reserved[i];
dst_e = arg->replicas;
dst_end = (void *) arg->replicas + replica_entries_bytes;
struct bch_replicas_entry *src_e =
cpu_replicas_entry(&c->replicas, i);
- if (replicas_usage_next(dst_e) > dst_end) {
+ /* check that we have enough space for one replicas entry */
+ if (dst_e + 1 > dst_end) {
ret = -ERANGE;
break;
}
- dst_e->sectors = src->replicas[i];
+ dst_e->sectors = src->u.replicas[i];
dst_e->r = *src_e;
/* recheck after setting nr_devs: */
percpu_up_read(&c->mark_lock);
kfree(src);
- if (!ret)
- ret = copy_to_user(user_arg, arg,
- sizeof(*arg) + arg->replica_entries_bytes);
+ if (ret)
+ goto err;
+ if (copy_to_user(user_arg, arg,
+ sizeof(*arg) + arg->replica_entries_bytes))
+ ret = -EFAULT;
err:
kfree(arg);
return ret;
arg.state = ca->mi.state;
arg.bucket_size = ca->mi.bucket_size;
arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
- arg.available_buckets = arg.nr_buckets - src.buckets_unavailable;
- arg.ec_buckets = src.buckets_ec;
- arg.ec_sectors = src.sectors_ec;
+ arg.buckets_ec = src.buckets_ec;
for (i = 0; i < BCH_DATA_NR; i++) {
- arg.buckets[i] = src.buckets[i];
- arg.sectors[i] = src.sectors[i];
+ arg.d[i].buckets = src.d[i].buckets;
+ arg.d[i].sectors = src.d[i].sectors;
+ arg.d[i].fragmented = src.d[i].fragmented;
}
percpu_ref_put(&ca->ref);
- return copy_to_user(user_arg, &arg, sizeof(arg));
+ if (copy_to_user(user_arg, &arg, sizeof(arg)))
+ return -EFAULT;
+
+ return 0;
}
static long bch2_ioctl_read_super(struct bch_fs *c,
struct bch_sb *sb;
int ret = 0;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if ((arg.flags & ~(BCH_BY_INDEX|BCH_READ_DEV)) ||
arg.pad)
return -EINVAL;
goto err;
}
- ret = copy_to_user((void __user *)(unsigned long)arg.sb,
- sb, vstruct_bytes(sb));
+ if (copy_to_user((void __user *)(unsigned long)arg.sb, sb,
+ vstruct_bytes(sb)))
+ ret = -EFAULT;
err:
- if (ca)
+ if (!IS_ERR_OR_NULL(ca))
percpu_ref_put(&ca->ref);
mutex_unlock(&c->sb_lock);
return ret;
struct bch_dev *ca;
unsigned i;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!dev)
+ return -EINVAL;
+
for_each_online_member(ca, c, i)
- if (ca->disk_sb.bdev->bd_dev == dev) {
+ if (ca->dev == dev) {
percpu_ref_put(&ca->io_ref);
return i;
}
- return -ENOENT;
+ return -BCH_ERR_ENOENT_dev_idx_not_found;
}
static long bch2_ioctl_disk_resize(struct bch_fs *c,
struct bch_dev *ca;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if ((arg.flags & ~BCH_BY_INDEX) ||
arg.pad)
return -EINVAL;
struct bch_dev *ca;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if ((arg.flags & ~BCH_BY_INDEX) ||
arg.pad)
return -EINVAL;
+ if (arg.nbuckets > U32_MAX)
+ return -EINVAL;
+
ca = bch2_device_lookup(c, arg.dev, arg.flags);
if (IS_ERR(ca))
return PTR_ERR(ca);
\
if (copy_from_user(&i, arg, sizeof(i))) \
return -EFAULT; \
- return bch2_ioctl_##_name(c, i); \
+ ret = bch2_ioctl_##_name(c, i); \
+ goto out; \
} while (0)
long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
{
- /* ioctls that don't require admin cap: */
+ long ret;
+
switch (cmd) {
case BCH_IOCTL_QUERY_UUID:
return bch2_ioctl_query_uuid(c, arg);
return bch2_ioctl_fs_usage(c, arg);
case BCH_IOCTL_DEV_USAGE:
return bch2_ioctl_dev_usage(c, arg);
- }
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- switch (cmd) {
#if 0
case BCH_IOCTL_START:
BCH_IOCTL(start, struct bch_ioctl_start);
if (!test_bit(BCH_FS_STARTED, &c->flags))
return -EINVAL;
- /* ioctls that do require admin cap: */
switch (cmd) {
case BCH_IOCTL_DISK_ADD:
BCH_IOCTL(disk_add, struct bch_ioctl_disk);
default:
return -ENOTTY;
}
+out:
+ if (ret < 0)
+ ret = bch2_err_class(ret);
+ return ret;
}
static DEFINE_IDR(bch_chardev_minor);
if (bch_chardev_major < 0)
return bch_chardev_major;
- bch_chardev_class = class_create(THIS_MODULE, "bcachefs");
+ bch_chardev_class = class_create("bcachefs");
if (IS_ERR(bch_chardev_class))
return PTR_ERR(bch_chardev_class);