1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_CHARDEV
5 #include "bcachefs_ioctl.h"
14 #include <linux/anon_inodes.h>
15 #include <linux/cdev.h>
16 #include <linux/device.h>
17 #include <linux/file.h>
19 #include <linux/ioctl.h>
20 #include <linux/kthread.h>
21 #include <linux/major.h>
22 #include <linux/sched/task.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
27 static int copy_to_user_errcode(void __user *to, const void *from, unsigned long n)
29 return copy_to_user(to, from, n) ? -EFAULT : 0;
32 struct thread_with_file {
33 struct task_struct *task;
37 static void thread_with_file_exit(struct thread_with_file *thr)
40 kthread_stop(thr->task);
41 put_task_struct(thr->task);
45 static int run_thread_with_file(struct thread_with_file *thr,
46 const struct file_operations *fops,
47 int (*fn)(void *), const char *fmt, ...)
50 struct file *file = NULL;
52 struct printbuf name = PRINTBUF;
53 unsigned fd_flags = O_RDONLY|O_CLOEXEC|O_NONBLOCK;
56 prt_vprintf(&name, fmt, args);
60 thr->task = kthread_create(fn, thr, name.buf);
61 ret = PTR_ERR_OR_ZERO(thr->task);
65 ret = get_unused_fd_flags(fd_flags);
70 file = anon_inode_getfile(name.buf, fops, thr, fd_flags);
71 ret = PTR_ERR_OR_ZERO(file);
76 get_task_struct(thr->task);
77 wake_up_process(thr->task);
83 kthread_stop(thr->task);
89 /* returns with ref on ca->ref */
90 static struct bch_dev *bch2_device_lookup(struct bch_fs *c, u64 dev,
95 if (flags & BCH_BY_INDEX) {
96 if (dev >= c->sb.nr_devices)
97 return ERR_PTR(-EINVAL);
100 ca = rcu_dereference(c->devs[dev]);
102 percpu_ref_get(&ca->ref);
106 return ERR_PTR(-EINVAL);
110 path = strndup_user((const char __user *)
111 (unsigned long) dev, PATH_MAX);
113 return ERR_CAST(path);
115 ca = bch2_dev_lookup(c, path);
123 static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg)
125 struct bch_ioctl_assemble arg;
127 u64 *user_devs = NULL;
132 if (copy_from_user(&arg, user_arg, sizeof(arg)))
135 if (arg.flags || arg.pad)
138 user_devs = kmalloc_array(arg.nr_devs, sizeof(u64), GFP_KERNEL);
142 devs = kcalloc(arg.nr_devs, sizeof(char *), GFP_KERNEL);
144 if (copy_from_user(user_devs, user_arg->devs,
145 sizeof(u64) * arg.nr_devs))
148 for (i = 0; i < arg.nr_devs; i++) {
149 devs[i] = strndup_user((const char __user *)(unsigned long)
152 ret= PTR_ERR_OR_ZERO(devs[i]);
157 c = bch2_fs_open(devs, arg.nr_devs, bch2_opts_empty());
158 ret = PTR_ERR_OR_ZERO(c);
163 for (i = 0; i < arg.nr_devs; i++)
169 static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg)
171 struct bch_ioctl_incremental arg;
175 if (copy_from_user(&arg, user_arg, sizeof(arg)))
178 if (arg.flags || arg.pad)
181 path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
182 ret = PTR_ERR_OR_ZERO(path);
186 err = bch2_fs_open_incremental(path);
190 pr_err("Could not register bcachefs devices: %s", err);
199 struct thread_with_file thr;
204 struct bch_opts opts;
206 struct log_output output;
207 DARRAY(char) output2;
210 static void bch2_fsck_thread_free(struct fsck_thread *thr)
212 thread_with_file_exit(&thr->thr);
214 for (size_t i = 0; i < thr->nr_devs; i++)
216 darray_exit(&thr->output2);
217 printbuf_exit(&thr->output.buf);
222 static int bch2_fsck_thread_release(struct inode *inode, struct file *file)
224 struct fsck_thread *thr = container_of(file->private_data, struct fsck_thread, thr);
226 bch2_fsck_thread_free(thr);
230 static ssize_t bch2_fsck_thread_read(struct file *file, char __user *buf,
231 size_t len, loff_t *ppos)
233 struct fsck_thread *thr = container_of(file->private_data, struct fsck_thread, thr);
234 size_t copied = 0, b;
237 ret = wait_event_interruptible(thr->output.wait,
238 thr->output.buf.pos || thr->output2.nr);
243 ret = darray_make_room(&thr->output2, thr->output.buf.pos);
247 spin_lock_irq(&thr->output.lock);
248 b = min_t(size_t, darray_room(thr->output2), thr->output.buf.pos);
250 memcpy(&darray_top(thr->output2), thr->output.buf.buf, b);
251 memmove(thr->output.buf.buf,
252 thr->output.buf.buf + b,
253 thr->output.buf.pos - b);
255 thr->output2.nr += b;
256 thr->output.buf.pos -= b;
257 spin_unlock_irq(&thr->output.lock);
259 b = min(len, thr->output2.nr);
263 b -= copy_to_user(buf, thr->output2.data, b);
273 memmove(thr->output2.data,
274 thr->output2.data + b,
275 thr->output2.nr - b);
276 thr->output2.nr -= b;
279 return copied ?: ret;
282 static const struct file_operations fsck_thread_ops = {
283 .release = bch2_fsck_thread_release,
284 .read = bch2_fsck_thread_read,
288 static int bch2_fsck_offline_thread_fn(void *arg)
290 struct fsck_thread *thr = container_of(arg, struct fsck_thread, thr);
291 struct bch_fs *c = bch2_fs_open(thr->devs, thr->nr_devs, thr->opts);
293 thr->thr.ret = PTR_ERR_OR_ZERO(c);
299 static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg)
301 struct bch_ioctl_fsck_offline arg;
302 struct fsck_thread *thr = NULL;
306 if (copy_from_user(&arg, user_arg, sizeof(arg)))
312 if (!(devs = kcalloc(arg.nr_devs, sizeof(*devs), GFP_KERNEL)) ||
313 !(thr = kzalloc(sizeof(*thr), GFP_KERNEL)) ||
314 !(thr->devs = kcalloc(arg.nr_devs, sizeof(*thr->devs), GFP_KERNEL))) {
319 thr->nr_devs = arg.nr_devs;
320 thr->output.buf = PRINTBUF;
321 thr->output.buf.atomic++;
322 spin_lock_init(&thr->output.lock);
323 init_waitqueue_head(&thr->output.wait);
324 darray_init(&thr->output2);
326 if (copy_from_user(devs, &user_arg->devs[0], sizeof(user_arg->devs[0]) * arg.nr_devs)) {
331 for (size_t i = 0; i < arg.nr_devs; i++) {
332 thr->devs[i] = strndup_user((char __user *)(unsigned long) devs[i], PATH_MAX);
333 ret = PTR_ERR_OR_ZERO(thr->devs[i]);
339 char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
341 ret = PTR_ERR_OR_ZERO(optstr) ?:
342 bch2_parse_mount_opts(NULL, &thr->opts, optstr);
349 opt_set(thr->opts, log_output, (u64)(unsigned long)&thr->output);
351 ret = run_thread_with_file(&thr->thr,
353 bch2_fsck_offline_thread_fn,
358 bch2_fsck_thread_free(thr);
359 pr_err("ret %s", bch2_err_str(ret));
365 static long bch2_global_ioctl(unsigned cmd, void __user *arg)
371 case BCH_IOCTL_ASSEMBLE:
372 return bch2_ioctl_assemble(arg);
373 case BCH_IOCTL_INCREMENTAL:
374 return bch2_ioctl_incremental(arg);
376 case BCH_IOCTL_FSCK_OFFLINE: {
377 ret = bch2_ioctl_fsck_offline(arg);
386 ret = bch2_err_class(ret);
390 static long bch2_ioctl_query_uuid(struct bch_fs *c,
391 struct bch_ioctl_query_uuid __user *user_arg)
393 return copy_to_user_errcode(&user_arg->uuid, &c->sb.user_uuid,
394 sizeof(c->sb.user_uuid));
398 static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start arg)
400 if (!capable(CAP_SYS_ADMIN))
403 if (arg.flags || arg.pad)
406 return bch2_fs_start(c);
409 static long bch2_ioctl_stop(struct bch_fs *c)
411 if (!capable(CAP_SYS_ADMIN))
419 static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg)
424 if (!capable(CAP_SYS_ADMIN))
427 if (arg.flags || arg.pad)
430 path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
431 ret = PTR_ERR_OR_ZERO(path);
435 ret = bch2_dev_add(c, path);
441 static long bch2_ioctl_disk_remove(struct bch_fs *c, struct bch_ioctl_disk arg)
445 if (!capable(CAP_SYS_ADMIN))
448 if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
449 BCH_FORCE_IF_METADATA_LOST|
450 BCH_FORCE_IF_DEGRADED|
455 ca = bch2_device_lookup(c, arg.dev, arg.flags);
459 return bch2_dev_remove(c, ca, arg.flags);
462 static long bch2_ioctl_disk_online(struct bch_fs *c, struct bch_ioctl_disk arg)
467 if (!capable(CAP_SYS_ADMIN))
470 if (arg.flags || arg.pad)
473 path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
474 ret = PTR_ERR_OR_ZERO(path);
478 ret = bch2_dev_online(c, path);
483 static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg)
488 if (!capable(CAP_SYS_ADMIN))
491 if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
492 BCH_FORCE_IF_METADATA_LOST|
493 BCH_FORCE_IF_DEGRADED|
498 ca = bch2_device_lookup(c, arg.dev, arg.flags);
502 ret = bch2_dev_offline(c, ca, arg.flags);
503 percpu_ref_put(&ca->ref);
507 static long bch2_ioctl_disk_set_state(struct bch_fs *c,
508 struct bch_ioctl_disk_set_state arg)
513 if (!capable(CAP_SYS_ADMIN))
516 if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
517 BCH_FORCE_IF_METADATA_LOST|
518 BCH_FORCE_IF_DEGRADED|
520 arg.pad[0] || arg.pad[1] || arg.pad[2] ||
521 arg.new_state >= BCH_MEMBER_STATE_NR)
524 ca = bch2_device_lookup(c, arg.dev, arg.flags);
528 ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags);
530 bch_err(c, "Error setting device state: %s", bch2_err_str(ret));
532 percpu_ref_put(&ca->ref);
536 struct bch_data_ctx {
537 struct thread_with_file thr;
540 struct bch_ioctl_data arg;
541 struct bch_move_stats stats;
544 static int bch2_data_thread(void *arg)
546 struct bch_data_ctx *ctx = container_of(arg, struct bch_data_ctx, thr);
548 ctx->thr.ret = bch2_data_job(ctx->c, &ctx->stats, ctx->arg);
549 ctx->stats.data_type = U8_MAX;
553 static int bch2_data_job_release(struct inode *inode, struct file *file)
555 struct bch_data_ctx *ctx = container_of(file->private_data, struct bch_data_ctx, thr);
557 thread_with_file_exit(&ctx->thr);
562 static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
563 size_t len, loff_t *ppos)
565 struct bch_data_ctx *ctx = container_of(file->private_data, struct bch_data_ctx, thr);
566 struct bch_fs *c = ctx->c;
567 struct bch_ioctl_data_event e = {
568 .type = BCH_DATA_EVENT_PROGRESS,
569 .p.data_type = ctx->stats.data_type,
570 .p.btree_id = ctx->stats.pos.btree,
571 .p.pos = ctx->stats.pos.pos,
572 .p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
573 .p.sectors_total = bch2_fs_usage_read_short(c).used,
579 return copy_to_user_errcode(buf, &e, sizeof(e)) ?: sizeof(e);
582 static const struct file_operations bcachefs_data_ops = {
583 .release = bch2_data_job_release,
584 .read = bch2_data_job_read,
588 static long bch2_ioctl_data(struct bch_fs *c,
589 struct bch_ioctl_data arg)
591 struct bch_data_ctx *ctx;
594 if (!capable(CAP_SYS_ADMIN))
597 if (arg.op >= BCH_DATA_OP_NR || arg.flags)
600 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
607 ret = run_thread_with_file(&ctx->thr,
610 "bch-data/%s", c->name);
616 static long bch2_ioctl_fs_usage(struct bch_fs *c,
617 struct bch_ioctl_fs_usage __user *user_arg)
619 struct bch_ioctl_fs_usage *arg = NULL;
620 struct bch_replicas_usage *dst_e, *dst_end;
621 struct bch_fs_usage_online *src;
622 u32 replica_entries_bytes;
626 if (!test_bit(BCH_FS_started, &c->flags))
629 if (get_user(replica_entries_bytes, &user_arg->replica_entries_bytes))
632 arg = kzalloc(size_add(sizeof(*arg), replica_entries_bytes), GFP_KERNEL);
636 src = bch2_fs_usage_read(c);
642 arg->capacity = c->capacity;
643 arg->used = bch2_fs_sectors_used(c, src);
644 arg->online_reserved = src->online_reserved;
646 for (i = 0; i < BCH_REPLICAS_MAX; i++)
647 arg->persistent_reserved[i] = src->u.persistent_reserved[i];
649 dst_e = arg->replicas;
650 dst_end = (void *) arg->replicas + replica_entries_bytes;
652 for (i = 0; i < c->replicas.nr; i++) {
653 struct bch_replicas_entry_v1 *src_e =
654 cpu_replicas_entry(&c->replicas, i);
656 /* check that we have enough space for one replicas entry */
657 if (dst_e + 1 > dst_end) {
662 dst_e->sectors = src->u.replicas[i];
665 /* recheck after setting nr_devs: */
666 if (replicas_usage_next(dst_e) > dst_end) {
671 memcpy(dst_e->r.devs, src_e->devs, src_e->nr_devs);
673 dst_e = replicas_usage_next(dst_e);
676 arg->replica_entries_bytes = (void *) dst_e - (void *) arg->replicas;
678 percpu_up_read(&c->mark_lock);
684 ret = copy_to_user_errcode(user_arg, arg,
685 sizeof(*arg) + arg->replica_entries_bytes);
691 /* obsolete, didn't allow for new data types: */
692 static long bch2_ioctl_dev_usage(struct bch_fs *c,
693 struct bch_ioctl_dev_usage __user *user_arg)
695 struct bch_ioctl_dev_usage arg;
696 struct bch_dev_usage src;
700 if (!test_bit(BCH_FS_started, &c->flags))
703 if (copy_from_user(&arg, user_arg, sizeof(arg)))
706 if ((arg.flags & ~BCH_BY_INDEX) ||
712 ca = bch2_device_lookup(c, arg.dev, arg.flags);
716 src = bch2_dev_usage_read(ca);
718 arg.state = ca->mi.state;
719 arg.bucket_size = ca->mi.bucket_size;
720 arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
722 for (i = 0; i < BCH_DATA_NR; i++) {
723 arg.d[i].buckets = src.d[i].buckets;
724 arg.d[i].sectors = src.d[i].sectors;
725 arg.d[i].fragmented = src.d[i].fragmented;
728 percpu_ref_put(&ca->ref);
730 return copy_to_user_errcode(user_arg, &arg, sizeof(arg));
733 static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
734 struct bch_ioctl_dev_usage_v2 __user *user_arg)
736 struct bch_ioctl_dev_usage_v2 arg;
737 struct bch_dev_usage src;
741 if (!test_bit(BCH_FS_started, &c->flags))
744 if (copy_from_user(&arg, user_arg, sizeof(arg)))
747 if ((arg.flags & ~BCH_BY_INDEX) ||
753 ca = bch2_device_lookup(c, arg.dev, arg.flags);
757 src = bch2_dev_usage_read(ca);
759 arg.state = ca->mi.state;
760 arg.bucket_size = ca->mi.bucket_size;
761 arg.nr_data_types = min(arg.nr_data_types, BCH_DATA_NR);
762 arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
764 ret = copy_to_user_errcode(user_arg, &arg, sizeof(arg));
768 for (unsigned i = 0; i < arg.nr_data_types; i++) {
769 struct bch_ioctl_dev_usage_type t = {
770 .buckets = src.d[i].buckets,
771 .sectors = src.d[i].sectors,
772 .fragmented = src.d[i].fragmented,
775 ret = copy_to_user_errcode(&user_arg->d[i], &t, sizeof(t));
780 percpu_ref_put(&ca->ref);
784 static long bch2_ioctl_read_super(struct bch_fs *c,
785 struct bch_ioctl_read_super arg)
787 struct bch_dev *ca = NULL;
791 if (!capable(CAP_SYS_ADMIN))
794 if ((arg.flags & ~(BCH_BY_INDEX|BCH_READ_DEV)) ||
798 mutex_lock(&c->sb_lock);
800 if (arg.flags & BCH_READ_DEV) {
801 ca = bch2_device_lookup(c, arg.dev, arg.flags);
813 if (vstruct_bytes(sb) > arg.size) {
818 ret = copy_to_user_errcode((void __user *)(unsigned long)arg.sb, sb,
821 if (!IS_ERR_OR_NULL(ca))
822 percpu_ref_put(&ca->ref);
823 mutex_unlock(&c->sb_lock);
827 static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
828 struct bch_ioctl_disk_get_idx arg)
830 dev_t dev = huge_decode_dev(arg.dev);
834 if (!capable(CAP_SYS_ADMIN))
840 for_each_online_member(ca, c, i)
841 if (ca->dev == dev) {
842 percpu_ref_put(&ca->io_ref);
846 return -BCH_ERR_ENOENT_dev_idx_not_found;
849 static long bch2_ioctl_disk_resize(struct bch_fs *c,
850 struct bch_ioctl_disk_resize arg)
855 if (!capable(CAP_SYS_ADMIN))
858 if ((arg.flags & ~BCH_BY_INDEX) ||
862 ca = bch2_device_lookup(c, arg.dev, arg.flags);
866 ret = bch2_dev_resize(c, ca, arg.nbuckets);
868 percpu_ref_put(&ca->ref);
872 static long bch2_ioctl_disk_resize_journal(struct bch_fs *c,
873 struct bch_ioctl_disk_resize_journal arg)
878 if (!capable(CAP_SYS_ADMIN))
881 if ((arg.flags & ~BCH_BY_INDEX) ||
885 if (arg.nbuckets > U32_MAX)
888 ca = bch2_device_lookup(c, arg.dev, arg.flags);
892 ret = bch2_set_nr_journal_buckets(c, ca, arg.nbuckets);
894 percpu_ref_put(&ca->ref);
898 static int bch2_fsck_online_thread_fn(void *arg)
900 struct fsck_thread *thr = container_of(arg, struct fsck_thread, thr);
901 struct bch_fs *c = thr->c;
903 struct bch_fs *c = bch2_fs_open(thr->devs, thr->nr_devs, thr->opts);
905 thr->thr.ret = PTR_ERR_OR_ZERO(c);
912 static long bch2_ioctl_fsck_online(struct bch_fs *c,
913 struct bch_ioctl_fsck_online arg)
915 struct fsck_thread *thr = NULL;
921 thr = kzalloc(sizeof(*thr), GFP_KERNEL);
926 thr->output.buf = PRINTBUF;
927 thr->output.buf.atomic++;
928 spin_lock_init(&thr->output.lock);
929 init_waitqueue_head(&thr->output.wait);
930 darray_init(&thr->output2);
932 ret = run_thread_with_file(&thr->thr,
934 bch2_fsck_online_thread_fn,
938 bch2_fsck_thread_free(thr);
942 #define BCH_IOCTL(_name, _argtype) \
946 if (copy_from_user(&i, arg, sizeof(i))) \
948 ret = bch2_ioctl_##_name(c, i); \
952 long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
957 case BCH_IOCTL_QUERY_UUID:
958 return bch2_ioctl_query_uuid(c, arg);
959 case BCH_IOCTL_FS_USAGE:
960 return bch2_ioctl_fs_usage(c, arg);
961 case BCH_IOCTL_DEV_USAGE:
962 return bch2_ioctl_dev_usage(c, arg);
963 case BCH_IOCTL_DEV_USAGE_V2:
964 return bch2_ioctl_dev_usage_v2(c, arg);
966 case BCH_IOCTL_START:
967 BCH_IOCTL(start, struct bch_ioctl_start);
969 return bch2_ioctl_stop(c);
971 case BCH_IOCTL_READ_SUPER:
972 BCH_IOCTL(read_super, struct bch_ioctl_read_super);
973 case BCH_IOCTL_DISK_GET_IDX:
974 BCH_IOCTL(disk_get_idx, struct bch_ioctl_disk_get_idx);
977 if (!test_bit(BCH_FS_started, &c->flags))
981 case BCH_IOCTL_DISK_ADD:
982 BCH_IOCTL(disk_add, struct bch_ioctl_disk);
983 case BCH_IOCTL_DISK_REMOVE:
984 BCH_IOCTL(disk_remove, struct bch_ioctl_disk);
985 case BCH_IOCTL_DISK_ONLINE:
986 BCH_IOCTL(disk_online, struct bch_ioctl_disk);
987 case BCH_IOCTL_DISK_OFFLINE:
988 BCH_IOCTL(disk_offline, struct bch_ioctl_disk);
989 case BCH_IOCTL_DISK_SET_STATE:
990 BCH_IOCTL(disk_set_state, struct bch_ioctl_disk_set_state);
992 BCH_IOCTL(data, struct bch_ioctl_data);
993 case BCH_IOCTL_DISK_RESIZE:
994 BCH_IOCTL(disk_resize, struct bch_ioctl_disk_resize);
995 case BCH_IOCTL_DISK_RESIZE_JOURNAL:
996 BCH_IOCTL(disk_resize_journal, struct bch_ioctl_disk_resize_journal);
997 case BCH_IOCTL_FSCK_ONLINE:
998 BCH_IOCTL(fsck_online, struct bch_ioctl_fsck_online);
1004 ret = bch2_err_class(ret);
1008 static DEFINE_IDR(bch_chardev_minor);
1010 static long bch2_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v)
1012 unsigned minor = iminor(file_inode(filp));
1013 struct bch_fs *c = minor < U8_MAX ? idr_find(&bch_chardev_minor, minor) : NULL;
1014 void __user *arg = (void __user *) v;
1017 ? bch2_fs_ioctl(c, cmd, arg)
1018 : bch2_global_ioctl(cmd, arg);
1021 static const struct file_operations bch_chardev_fops = {
1022 .owner = THIS_MODULE,
1023 .unlocked_ioctl = bch2_chardev_ioctl,
1024 .open = nonseekable_open,
1027 static int bch_chardev_major;
1028 static struct class *bch_chardev_class;
1029 static struct device *bch_chardev;
1031 void bch2_fs_chardev_exit(struct bch_fs *c)
1033 if (!IS_ERR_OR_NULL(c->chardev))
1034 device_unregister(c->chardev);
1036 idr_remove(&bch_chardev_minor, c->minor);
1039 int bch2_fs_chardev_init(struct bch_fs *c)
1041 c->minor = idr_alloc(&bch_chardev_minor, c, 0, 0, GFP_KERNEL);
1045 c->chardev = device_create(bch_chardev_class, NULL,
1046 MKDEV(bch_chardev_major, c->minor), c,
1047 "bcachefs%u-ctl", c->minor);
1048 if (IS_ERR(c->chardev))
1049 return PTR_ERR(c->chardev);
1054 void bch2_chardev_exit(void)
1056 if (!IS_ERR_OR_NULL(bch_chardev_class))
1057 device_destroy(bch_chardev_class,
1058 MKDEV(bch_chardev_major, U8_MAX));
1059 if (!IS_ERR_OR_NULL(bch_chardev_class))
1060 class_destroy(bch_chardev_class);
1061 if (bch_chardev_major > 0)
1062 unregister_chrdev(bch_chardev_major, "bcachefs");
1065 int __init bch2_chardev_init(void)
1067 bch_chardev_major = register_chrdev(0, "bcachefs-ctl", &bch_chardev_fops);
1068 if (bch_chardev_major < 0)
1069 return bch_chardev_major;
1071 bch_chardev_class = class_create("bcachefs");
1072 if (IS_ERR(bch_chardev_class))
1073 return PTR_ERR(bch_chardev_class);
1075 bch_chardev = device_create(bch_chardev_class, NULL,
1076 MKDEV(bch_chardev_major, U8_MAX),
1077 NULL, "bcachefs-ctl");
1078 if (IS_ERR(bch_chardev))
1079 return PTR_ERR(bch_chardev);
1084 #endif /* NO_BCACHEFS_CHARDEV */