5 #include <sys/statvfs.h>
7 #include <fuse_lowlevel.h>
10 #include "libbcachefs.h"
11 #include "tools-util.h"
13 #include "libbcachefs/bcachefs.h"
14 #include "libbcachefs/alloc_foreground.h"
15 #include "libbcachefs/btree_iter.h"
16 #include "libbcachefs/buckets.h"
17 #include "libbcachefs/dirent.h"
18 #include "libbcachefs/error.h"
19 #include "libbcachefs/fs-common.h"
20 #include "libbcachefs/inode.h"
21 #include "libbcachefs/io.h"
22 #include "libbcachefs/opts.h"
23 #include "libbcachefs/super.h"
26 #include "libbcachefs/fs.h"
28 #include <linux/dcache.h>
30 /* XXX cut and pasted from fsck.c */
31 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
33 static inline u64 map_root_ino(u64 ino)
35 return ino == 1 ? 4096 : ino;
38 static inline u64 unmap_root_ino(u64 ino)
40 return ino == 4096 ? 1 : ino;
43 static struct stat inode_to_stat(struct bch_fs *c,
44 struct bch_inode_unpacked *bi)
46 return (struct stat) {
47 .st_ino = bi->bi_inum,
48 .st_size = bi->bi_size,
49 .st_mode = bi->bi_mode,
52 .st_nlink = bch2_inode_nlink_get(bi),
53 .st_rdev = bi->bi_dev,
54 .st_blksize = block_bytes(c),
55 .st_blocks = bi->bi_sectors,
56 .st_atim = bch2_time_to_timespec(c, bi->bi_atime),
57 .st_mtim = bch2_time_to_timespec(c, bi->bi_mtime),
58 .st_ctim = bch2_time_to_timespec(c, bi->bi_ctime),
62 static struct fuse_entry_param inode_to_entry(struct bch_fs *c,
63 struct bch_inode_unpacked *bi)
65 return (struct fuse_entry_param) {
67 .generation = bi->bi_generation,
68 .attr = inode_to_stat(c, bi),
69 .attr_timeout = DBL_MAX,
70 .entry_timeout = DBL_MAX,
74 static void bcachefs_fuse_init(void *arg, struct fuse_conn_info *conn)
76 if (conn->capable & FUSE_CAP_WRITEBACK_CACHE) {
77 fuse_log(FUSE_LOG_DEBUG, "fuse_init: activating writeback\n");
78 conn->want |= FUSE_CAP_WRITEBACK_CACHE;
80 fuse_log(FUSE_LOG_DEBUG, "fuse_init: writeback not capable\n");
82 //conn->want |= FUSE_CAP_POSIX_ACL;
85 static void bcachefs_fuse_destroy(void *arg)
87 struct bch_fs *c = arg;
92 static void bcachefs_fuse_lookup(fuse_req_t req, fuse_ino_t dir,
95 struct bch_fs *c = fuse_req_userdata(req);
96 struct bch_inode_unpacked bi;
97 struct qstr qstr = QSTR(name);
101 dir = map_root_ino(dir);
103 ret = bch2_inode_find_by_inum(c, dir, &bi);
105 fuse_reply_err(req, -ret);
109 struct bch_hash_info hash_info = bch2_hash_info_init(c, &bi);
111 inum = bch2_dirent_lookup(c, dir, &hash_info, &qstr);
117 ret = bch2_inode_find_by_inum(c, inum, &bi);
121 bi.bi_inum = unmap_root_ino(bi.bi_inum);
123 struct fuse_entry_param e = inode_to_entry(c, &bi);
124 fuse_reply_entry(req, &e);
127 fuse_reply_err(req, -ret);
130 static void bcachefs_fuse_getattr(fuse_req_t req, fuse_ino_t inum,
131 struct fuse_file_info *fi)
133 struct bch_fs *c = fuse_req_userdata(req);
134 struct bch_inode_unpacked bi;
138 inum = map_root_ino(inum);
140 ret = bch2_inode_find_by_inum(c, inum, &bi);
142 fuse_reply_err(req, -ret);
146 bi.bi_inum = unmap_root_ino(bi.bi_inum);
148 attr = inode_to_stat(c, &bi);
149 fuse_reply_attr(req, &attr, DBL_MAX);
152 static void bcachefs_fuse_setattr(fuse_req_t req, fuse_ino_t inum,
153 struct stat *attr, int to_set,
154 struct fuse_file_info *fi)
156 struct bch_fs *c = fuse_req_userdata(req);
157 struct bch_inode_unpacked inode_u;
158 struct btree_trans trans;
159 struct btree_iter *iter;
163 inum = map_root_ino(inum);
165 bch2_trans_init(&trans, c, 0, 0);
167 bch2_trans_begin(&trans);
168 now = bch2_current_time(c);
170 iter = bch2_inode_peek(&trans, &inode_u, inum, BTREE_ITER_INTENT);
171 ret = PTR_ERR_OR_ZERO(iter);
175 if (to_set & FUSE_SET_ATTR_MODE)
176 inode_u.bi_mode = attr->st_mode;
177 if (to_set & FUSE_SET_ATTR_UID)
178 inode_u.bi_uid = attr->st_uid;
179 if (to_set & FUSE_SET_ATTR_GID)
180 inode_u.bi_gid = attr->st_gid;
181 if (to_set & FUSE_SET_ATTR_SIZE)
182 inode_u.bi_size = attr->st_size;
183 if (to_set & FUSE_SET_ATTR_ATIME)
184 inode_u.bi_atime = timespec_to_bch2_time(c, attr->st_atim);
185 if (to_set & FUSE_SET_ATTR_MTIME)
186 inode_u.bi_mtime = timespec_to_bch2_time(c, attr->st_mtim);
187 if (to_set & FUSE_SET_ATTR_ATIME_NOW)
188 inode_u.bi_atime = now;
189 if (to_set & FUSE_SET_ATTR_MTIME_NOW)
190 inode_u.bi_mtime = now;
193 ret = bch2_inode_write(&trans, iter, &inode_u) ?:
194 bch2_trans_commit(&trans, NULL, NULL,
196 BTREE_INSERT_NOFAIL);
201 bch2_trans_exit(&trans);
204 *attr = inode_to_stat(c, &inode_u);
205 fuse_reply_attr(req, attr, DBL_MAX);
207 fuse_reply_err(req, -ret);
211 static int do_create(struct bch_fs *c, u64 dir,
212 const char *name, mode_t mode, dev_t rdev,
213 struct bch_inode_unpacked *new_inode)
215 struct qstr qstr = QSTR(name);
216 struct bch_inode_unpacked dir_u;
218 dir = map_root_ino(dir);
220 bch2_inode_init_early(c, new_inode);
222 return bch2_trans_do(c, NULL, BTREE_INSERT_ATOMIC,
223 bch2_create_trans(&trans,
226 0, 0, mode, rdev, NULL, NULL));
229 static void bcachefs_fuse_mknod(fuse_req_t req, fuse_ino_t dir,
230 const char *name, mode_t mode,
233 struct bch_fs *c = fuse_req_userdata(req);
234 struct bch_inode_unpacked new_inode;
237 ret = do_create(c, dir, name, mode, rdev, &new_inode);
241 struct fuse_entry_param e = inode_to_entry(c, &new_inode);
242 fuse_reply_entry(req, &e);
245 fuse_reply_err(req, -ret);
248 static void bcachefs_fuse_mkdir(fuse_req_t req, fuse_ino_t dir,
249 const char *name, mode_t mode)
251 BUG_ON(mode & S_IFMT);
254 bcachefs_fuse_mknod(req, dir, name, mode, 0);
257 static void bcachefs_fuse_unlink(fuse_req_t req, fuse_ino_t dir,
260 struct bch_fs *c = fuse_req_userdata(req);
261 struct bch_inode_unpacked dir_u, inode_u;
262 struct qstr qstr = QSTR(name);
265 dir = map_root_ino(dir);
267 ret = bch2_trans_do(c, NULL, BTREE_INSERT_ATOMIC|BTREE_INSERT_NOFAIL,
268 bch2_unlink_trans(&trans, dir, &dir_u,
271 fuse_reply_err(req, -ret);
274 static void bcachefs_fuse_rmdir(fuse_req_t req, fuse_ino_t dir,
277 dir = map_root_ino(dir);
279 bcachefs_fuse_unlink(req, dir, name);
282 static void bcachefs_fuse_rename(fuse_req_t req,
283 fuse_ino_t src_dir, const char *srcname,
284 fuse_ino_t dst_dir, const char *dstname,
287 struct bch_fs *c = fuse_req_userdata(req);
288 struct bch_inode_unpacked dst_dir_u, src_dir_u;
289 struct bch_inode_unpacked src_inode_u, dst_inode_u;
290 struct qstr dst_name = QSTR(srcname);
291 struct qstr src_name = QSTR(dstname);
294 src_dir = map_root_ino(src_dir);
295 dst_dir = map_root_ino(dst_dir);
297 /* XXX handle overwrites */
298 ret = bch2_trans_do(c, NULL, BTREE_INSERT_ATOMIC,
299 bch2_rename_trans(&trans,
302 &src_inode_u, &dst_inode_u,
303 &src_name, &dst_name,
306 fuse_reply_err(req, -ret);
309 static void bcachefs_fuse_link(fuse_req_t req, fuse_ino_t inum,
310 fuse_ino_t newparent, const char *newname)
312 struct bch_fs *c = fuse_req_userdata(req);
313 struct bch_inode_unpacked inode_u;
314 struct qstr qstr = QSTR(newname);
317 newparent = map_root_ino(newparent);
319 ret = bch2_trans_do(c, NULL, BTREE_INSERT_ATOMIC,
320 bch2_link_trans(&trans, newparent,
321 inum, &inode_u, &qstr));
324 struct fuse_entry_param e = inode_to_entry(c, &inode_u);
325 fuse_reply_entry(req, &e);
327 fuse_reply_err(req, -ret);
331 static void bcachefs_fuse_open(fuse_req_t req, fuse_ino_t inum,
332 struct fuse_file_info *fi)
334 fi->direct_io = false;
335 fi->keep_cache = true;
336 fi->cache_readdir = true;
338 fuse_reply_open(req, fi);
341 static void userbio_init(struct bio *bio, struct bio_vec *bv,
342 void *buf, size_t size)
344 bio_init(bio, bv, 1);
345 bio->bi_iter.bi_size = size;
351 static int get_inode_io_opts(struct bch_fs *c, u64 inum,
352 struct bch_io_opts *opts)
354 struct bch_inode_unpacked inode;
355 if (bch2_inode_find_by_inum(c, inum, &inode))
358 *opts = bch2_opts_to_inode_opts(c->opts);
359 bch2_io_opts_apply(opts, bch2_inode_opts_get(&inode));
363 static void bcachefs_fuse_read_endio(struct bio *bio)
365 closure_put(bio->bi_private);
368 struct fuse_align_io {
376 /* Handle unaligned start and end */
377 /* TODO: align to block_bytes, sector size, or page size? */
378 static struct fuse_align_io align_io(const struct bch_fs *c, size_t size,
381 struct fuse_align_io align;
385 align.start = round_down(offset, block_bytes(c));
386 align.pad_start = offset - align.start;
388 off_t end = offset + size;
389 align.end = round_up(end, block_bytes(c));
390 align.pad_end = align.end - end;
392 align.size = align.end - align.start;
398 * Given an aligned number of bytes transferred, figure out how many unaligned
399 * bytes were transferred.
401 static size_t align_fix_up_bytes(const struct fuse_align_io *align,
406 if (align_bytes > align->pad_start) {
407 bytes = align_bytes - align->pad_start;
408 bytes = bytes > align->pad_end ? bytes - align->pad_end : 0;
417 static int read_aligned(struct bch_fs *c, fuse_ino_t inum, size_t aligned_size,
418 off_t aligned_offset, void *buf)
420 BUG_ON(aligned_size & (block_bytes(c) - 1));
421 BUG_ON(aligned_offset & (block_bytes(c) - 1));
423 struct bch_io_opts io_opts;
424 if (get_inode_io_opts(c, inum, &io_opts))
427 struct bch_read_bio rbio;
429 userbio_init(&rbio.bio, &bv, buf, aligned_size);
430 bio_set_op_attrs(&rbio.bio, REQ_OP_READ, REQ_SYNC);
431 rbio.bio.bi_iter.bi_sector = aligned_offset >> 9;
434 closure_init_stack(&cl);
437 rbio.bio.bi_end_io = bcachefs_fuse_read_endio;
438 rbio.bio.bi_private = &cl;
440 bch2_read(c, rbio_init(&rbio.bio, io_opts), inum);
444 return -blk_status_to_errno(rbio.bio.bi_status);
447 static void bcachefs_fuse_read(fuse_req_t req, fuse_ino_t inum,
448 size_t size, off_t offset,
449 struct fuse_file_info *fi)
451 struct bch_fs *c = fuse_req_userdata(req);
453 fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_read(%llu, %zd, %lld)\n",
456 /* Check inode size. */
457 struct bch_inode_unpacked bi;
458 int ret = bch2_inode_find_by_inum(c, inum, &bi);
460 fuse_reply_err(req, -ret);
464 off_t end = min_t(u64, bi.bi_size, offset + size);
466 fuse_reply_buf(req, NULL, 0);
471 struct fuse_align_io align = align_io(c, size, offset);
473 void *buf = aligned_alloc(PAGE_SIZE, align.size);
475 fuse_reply_err(req, ENOMEM);
479 ret = read_aligned(c, inum, align.size, align.start, buf);
482 fuse_reply_buf(req, buf + align.pad_start, size);
484 fuse_reply_err(req, -ret);
489 static int write_set_inode(struct bch_fs *c, fuse_ino_t inum, off_t new_size)
491 struct btree_trans trans;
492 struct btree_iter *iter;
493 struct bch_inode_unpacked inode_u;
497 bch2_trans_init(&trans, c, 0, 0);
499 bch2_trans_begin(&trans);
500 now = bch2_current_time(c);
502 iter = bch2_inode_peek(&trans, &inode_u, inum, BTREE_ITER_INTENT);
503 ret = PTR_ERR_OR_ZERO(iter);
507 inode_u.bi_size = max_t(u64, inode_u.bi_size, new_size);
508 inode_u.bi_mtime = now;
509 inode_u.bi_ctime = now;
511 ret = bch2_inode_write(&trans, iter, &inode_u);
515 ret = bch2_trans_commit(&trans, NULL, NULL,
516 BTREE_INSERT_ATOMIC|BTREE_INSERT_NOFAIL);
522 bch2_trans_exit(&trans);
526 static int write_aligned(struct bch_fs *c, fuse_ino_t inum,
527 struct bch_io_opts io_opts, void *buf,
528 size_t aligned_size, off_t aligned_offset,
531 struct bch_write_op op = { 0 };
535 BUG_ON(aligned_size & (block_bytes(c) - 1));
536 BUG_ON(aligned_offset & (block_bytes(c) - 1));
540 closure_init_stack(&cl);
542 bch2_write_op_init(&op, c, io_opts); /* XXX reads from op?! */
543 op.write_point = writepoint_hashed(0);
544 op.nr_replicas = io_opts.data_replicas;
545 op.target = io_opts.foreground_target;
546 op.pos = POS(inum, aligned_offset >> 9);
548 userbio_init(&op.wbio.bio, &bv, buf, aligned_size);
549 bio_set_op_attrs(&op.wbio.bio, REQ_OP_WRITE, REQ_SYNC);
551 if (bch2_disk_reservation_get(c, &op.res, aligned_size >> 9,
552 op.nr_replicas, 0)) {
553 /* XXX: use check_range_allocated like dio write path */
557 closure_call(&op.cl, bch2_write, NULL, &cl);
561 *written_out = op.written << 9;
566 static void bcachefs_fuse_write(fuse_req_t req, fuse_ino_t inum,
567 const char *buf, size_t size,
569 struct fuse_file_info *fi)
571 struct bch_fs *c = fuse_req_userdata(req);
572 struct bch_io_opts io_opts;
573 size_t aligned_written;
576 fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_write(%llu, %zd, %lld)\n",
579 struct fuse_align_io align = align_io(c, size, offset);
581 if (get_inode_io_opts(c, inum, &io_opts)) {
586 /* Realign the data and read in start and end, if needed */
587 void *aligned_buf = aligned_alloc(PAGE_SIZE, align.size);
589 /* Read partial start data. */
590 if (align.pad_start) {
591 memset(aligned_buf, 0, block_bytes(c));
593 ret = read_aligned(c, inum, block_bytes(c), align.start,
600 * Read partial end data. If the whole write fits in one block, the
601 * start data and the end data are the same so this isn't needed.
604 !(align.pad_start && align.size == block_bytes(c))) {
605 off_t partial_end_start = align.end - block_bytes(c);
606 size_t buf_offset = align.size - block_bytes(c);
608 memset(aligned_buf + buf_offset, 0, block_bytes(c));
610 ret = read_aligned(c, inum, block_bytes(c), partial_end_start,
611 aligned_buf + buf_offset);
616 /* Overlay what we want to write. */
617 memcpy(aligned_buf + align.pad_start, buf, size);
619 /* Actually write. */
620 ret = write_aligned(c, inum, io_opts, aligned_buf,
621 align.size, align.start, &aligned_written);
623 /* Figure out how many unaligned bytes were written. */
624 size_t written = align_fix_up_bytes(&align, aligned_written);
625 BUG_ON(written > size);
627 fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_write: wrote %zd bytes\n",
635 * TODO: Integrate with bch2_extent_update()
638 ret = write_set_inode(c, inum, offset + written);
641 BUG_ON(written == 0);
642 fuse_reply_write(req, written);
647 fuse_reply_err(req, -ret);
650 static void bcachefs_fuse_symlink(fuse_req_t req, const char *link,
651 fuse_ino_t dir, const char *name)
653 struct bch_fs *c = fuse_req_userdata(req);
654 struct bch_inode_unpacked new_inode;
655 size_t link_len = strlen(link);
658 dir = map_root_ino(dir);
660 ret = do_create(c, dir, name, S_IFLNK|S_IRWXUGO, 0, &new_inode);
664 struct bch_io_opts io_opts;
665 ret = get_inode_io_opts(c, new_inode.bi_inum, &io_opts);
669 struct fuse_align_io align = align_io(c, link_len + 1, 0);
671 void *aligned_buf = aligned_alloc(PAGE_SIZE, align.size);
672 memset(aligned_buf, 0, align.size);
673 memcpy(aligned_buf, link, link_len); /* already terminated */
675 size_t aligned_written;
676 ret = write_aligned(c, new_inode.bi_inum, io_opts, aligned_buf,
677 align.size, align.start, &aligned_written);
683 size_t written = align_fix_up_bytes(&align, aligned_written);
684 BUG_ON(written != link_len + 1); // TODO: handle short
686 ret = write_set_inode(c, new_inode.bi_inum, written);
690 new_inode.bi_size = written;
692 struct fuse_entry_param e = inode_to_entry(c, &new_inode);
693 fuse_reply_entry(req, &e);
697 fuse_reply_err(req, -ret);
700 static void bcachefs_fuse_readlink(fuse_req_t req, fuse_ino_t inum)
702 struct bch_fs *c = fuse_req_userdata(req);
705 struct bch_inode_unpacked bi;
706 int ret = bch2_inode_find_by_inum(c, inum, &bi);
710 struct fuse_align_io align = align_io(c, bi.bi_size, 0);
713 buf = aligned_alloc(PAGE_SIZE, align.size);
717 ret = read_aligned(c, inum, align.size, align.start, buf);
721 BUG_ON(buf[align.size - 1] != 0);
723 fuse_reply_readlink(req, buf);
727 fuse_reply_err(req, -ret);
734 * FUSE flush is essentially the close() call, however it is not guaranteed
735 * that one flush happens per open/create.
737 * It doesn't have to do anything, and is mostly relevant for NFS-style
738 * filesystems where close has some relationship to caching.
740 static void bcachefs_fuse_flush(fuse_req_t req, fuse_ino_t inum,
741 struct fuse_file_info *fi)
743 struct bch_fs *c = fuse_req_userdata(req);
746 static void bcachefs_fuse_release(fuse_req_t req, fuse_ino_t inum,
747 struct fuse_file_info *fi)
749 struct bch_fs *c = fuse_req_userdata(req);
752 static void bcachefs_fuse_fsync(fuse_req_t req, fuse_ino_t inum, int datasync,
753 struct fuse_file_info *fi)
755 struct bch_fs *c = fuse_req_userdata(req);
758 static void bcachefs_fuse_opendir(fuse_req_t req, fuse_ino_t inum,
759 struct fuse_file_info *fi)
761 struct bch_fs *c = fuse_req_userdata(req);
765 struct fuse_dir_entry {
771 struct fuse_dir_context {
772 struct dir_context ctx;
777 struct fuse_dir_entry *prev;
780 static int fuse_send_dir_entry(struct fuse_dir_context *ctx, loff_t pos)
782 struct fuse_dir_entry *de = ctx->prev;
785 struct stat statbuf = {
786 .st_ino = unmap_root_ino(de->ino),
787 .st_mode = de->type << 12,
790 size_t len = fuse_add_direntry(ctx->req, ctx->buf, ctx->bufsize,
791 de->name, &statbuf, pos);
795 if (len > ctx->bufsize)
804 static int fuse_filldir(struct dir_context *_ctx,
805 const char *name, int namelen,
806 loff_t pos, u64 ino, unsigned type)
808 struct fuse_dir_context *ctx =
809 container_of(_ctx, struct fuse_dir_context, ctx);
811 fuse_log(FUSE_LOG_DEBUG, "fuse_filldir(ctx={.ctx={.pos=%llu}}, "
812 "name=%s, namelen=%d, pos=%lld, dir=%llu, type=%u)\n",
813 ctx->ctx.pos, name, namelen, pos, ino, type);
816 * We have to emit directory entries after reading the next entry,
817 * because the previous entry contains a pointer to next.
820 int ret = fuse_send_dir_entry(ctx, pos);
825 struct fuse_dir_entry *cur = malloc(sizeof *cur + namelen + 1);
828 memcpy(cur->name, name, namelen);
829 cur->name[namelen] = 0;
836 static bool handle_dots(struct fuse_dir_context *ctx, fuse_ino_t dir)
840 if (ctx->ctx.pos == 0) {
841 ret = fuse_filldir(&ctx->ctx, ".", 1, ctx->ctx.pos,
842 unmap_root_ino(dir), DT_DIR);
848 if (ctx->ctx.pos == 1) {
849 ret = fuse_filldir(&ctx->ctx, "..", 2, ctx->ctx.pos,
850 /*TODO: parent*/ 1, DT_DIR);
859 static void bcachefs_fuse_readdir(fuse_req_t req, fuse_ino_t dir,
860 size_t size, off_t off,
861 struct fuse_file_info *fi)
863 struct bch_fs *c = fuse_req_userdata(req);
864 struct bch_inode_unpacked bi;
865 char *buf = calloc(size, 1);
866 struct fuse_dir_context ctx = {
867 .ctx.actor = fuse_filldir,
875 fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_readdir(dir=%llu, size=%zu, "
876 "off=%lld)\n", dir, size, off);
878 dir = map_root_ino(dir);
880 ret = bch2_inode_find_by_inum(c, dir, &bi);
884 if (!S_ISDIR(bi.bi_mode)) {
889 if (!handle_dots(&ctx, dir))
892 ret = bch2_readdir(c, dir, &ctx.ctx);
896 * If we have something to send, the error above doesn't matter.
898 * Alternatively, if this send fails, but we previously sent something,
899 * then this is a success.
902 ret = fuse_send_dir_entry(&ctx, ctx.ctx.pos);
903 if (ret && ctx.buf != buf)
908 fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_readdir reply %zd\n",
910 fuse_reply_buf(req, buf, ctx.buf - buf);
912 fuse_reply_err(req, -ret);
919 static void bcachefs_fuse_readdirplus(fuse_req_t req, fuse_ino_t dir,
920 size_t size, off_t off,
921 struct fuse_file_info *fi)
926 static void bcachefs_fuse_releasedir(fuse_req_t req, fuse_ino_t inum,
927 struct fuse_file_info *fi)
929 struct bch_fs *c = fuse_req_userdata(req);
932 static void bcachefs_fuse_fsyncdir(fuse_req_t req, fuse_ino_t inum, int datasync,
933 struct fuse_file_info *fi)
935 struct bch_fs *c = fuse_req_userdata(req);
939 static void bcachefs_fuse_statfs(fuse_req_t req, fuse_ino_t inum)
941 struct bch_fs *c = fuse_req_userdata(req);
942 struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
943 unsigned shift = c->block_bits;
944 struct statvfs statbuf = {
945 .f_bsize = block_bytes(c),
946 .f_frsize = block_bytes(c),
947 .f_blocks = usage.capacity >> shift,
948 .f_bfree = (usage.capacity - usage.used) >> shift,
949 //.f_bavail = statbuf.f_bfree,
950 .f_files = usage.nr_inodes,
952 .f_namemax = BCH_NAME_MAX,
955 fuse_reply_statfs(req, &statbuf);
959 static void bcachefs_fuse_setxattr(fuse_req_t req, fuse_ino_t inum,
960 const char *name, const char *value,
961 size_t size, int flags)
963 struct bch_fs *c = fuse_req_userdata(req);
966 static void bcachefs_fuse_getxattr(fuse_req_t req, fuse_ino_t inum,
967 const char *name, size_t size)
969 struct bch_fs *c = fuse_req_userdata(req);
971 fuse_reply_xattr(req, );
974 static void bcachefs_fuse_listxattr(fuse_req_t req, fuse_ino_t inum, size_t size)
976 struct bch_fs *c = fuse_req_userdata(req);
979 static void bcachefs_fuse_removexattr(fuse_req_t req, fuse_ino_t inum,
982 struct bch_fs *c = fuse_req_userdata(req);
986 static void bcachefs_fuse_create(fuse_req_t req, fuse_ino_t dir,
987 const char *name, mode_t mode,
988 struct fuse_file_info *fi)
990 struct bch_fs *c = fuse_req_userdata(req);
991 struct bch_inode_unpacked new_inode;
994 ret = do_create(c, dir, name, mode, 0, &new_inode);
998 struct fuse_entry_param e = inode_to_entry(c, &new_inode);
999 fuse_reply_create(req, &e, fi);
1002 fuse_reply_err(req, -ret);
1007 static void bcachefs_fuse_write_buf(fuse_req_t req, fuse_ino_t inum,
1008 struct fuse_bufvec *bufv, off_t off,
1009 struct fuse_file_info *fi)
1011 struct bch_fs *c = fuse_req_userdata(req);
1014 static void bcachefs_fuse_fallocate(fuse_req_t req, fuse_ino_t inum, int mode,
1015 off_t offset, off_t length,
1016 struct fuse_file_info *fi)
1018 struct bch_fs *c = fuse_req_userdata(req);
1022 static const struct fuse_lowlevel_ops bcachefs_fuse_ops = {
1023 .init = bcachefs_fuse_init,
1024 .destroy = bcachefs_fuse_destroy,
1025 .lookup = bcachefs_fuse_lookup,
1026 .getattr = bcachefs_fuse_getattr,
1027 .setattr = bcachefs_fuse_setattr,
1028 .readlink = bcachefs_fuse_readlink,
1029 .mknod = bcachefs_fuse_mknod,
1030 .mkdir = bcachefs_fuse_mkdir,
1031 .unlink = bcachefs_fuse_unlink,
1032 .rmdir = bcachefs_fuse_rmdir,
1033 .symlink = bcachefs_fuse_symlink,
1034 .rename = bcachefs_fuse_rename,
1035 .link = bcachefs_fuse_link,
1036 .open = bcachefs_fuse_open,
1037 .read = bcachefs_fuse_read,
1038 .write = bcachefs_fuse_write,
1039 //.flush = bcachefs_fuse_flush,
1040 //.release = bcachefs_fuse_release,
1041 //.fsync = bcachefs_fuse_fsync,
1042 //.opendir = bcachefs_fuse_opendir,
1043 .readdir = bcachefs_fuse_readdir,
1044 //.readdirplus = bcachefs_fuse_readdirplus,
1045 //.releasedir = bcachefs_fuse_releasedir,
1046 //.fsyncdir = bcachefs_fuse_fsyncdir,
1047 .statfs = bcachefs_fuse_statfs,
1048 //.setxattr = bcachefs_fuse_setxattr,
1049 //.getxattr = bcachefs_fuse_getxattr,
1050 //.listxattr = bcachefs_fuse_listxattr,
1051 //.removexattr = bcachefs_fuse_removexattr,
1052 .create = bcachefs_fuse_create,
1056 .getlk = bcachefs_fuse_getlk,
1057 .setlk = bcachefs_fuse_setlk,
1059 //.write_buf = bcachefs_fuse_write_buf,
1060 //.fallocate = bcachefs_fuse_fallocate,
1065 * Setup and command parsing.
1074 static void bf_context_free(struct bf_context *ctx)
1078 free(ctx->devices_str);
1079 for (i = 0; i < ctx->nr_devices; ++i)
1080 free(ctx->devices[i]);
1084 static struct fuse_opt bf_opts[] = {
1089 * Fuse option parsing helper -- returning 0 means we consumed the argument, 1
1092 static int bf_opt_proc(void *data, const char *arg, int key,
1093 struct fuse_args *outargs)
1095 struct bf_context *ctx = data;
1098 case FUSE_OPT_KEY_NONOPT:
1099 /* Just extract the first non-option string. */
1100 if (!ctx->devices_str) {
1101 ctx->devices_str = strdup(arg);
1111 * dev1:dev2 -> [ dev1, dev2 ]
1114 static void tokenize_devices(struct bf_context *ctx)
1116 char *devices_str = strdup(ctx->devices_str);
1117 char *devices_tmp = devices_str;
1118 char **devices = NULL;
1122 while ((dev = strsep(&devices_tmp, ":"))) {
1123 if (strlen(dev) > 0) {
1124 devices = realloc(devices, (nr + 1) * sizeof *devices);
1125 devices[nr] = strdup(dev);
1131 devices = malloc(sizeof *devices);
1132 devices[0] = strdup(ctx->devices_str);
1136 ctx->devices = devices;
1137 ctx->nr_devices = nr;
1142 static void usage(char *argv[])
1144 printf("Usage: %s fusemount [options] <dev>[:dev2:...] <mountpoint>\n",
1149 int cmd_fusemount(int argc, char *argv[])
1151 struct fuse_args args = FUSE_ARGS_INIT(argc, argv);
1152 struct bch_opts bch_opts = bch2_opts_empty();
1153 struct bf_context ctx = { 0 };
1154 struct bch_fs *c = NULL;
1157 /* Parse arguments. */
1158 if (fuse_opt_parse(&args, &ctx, bf_opts, bf_opt_proc) < 0)
1159 die("fuse_opt_parse err: %m");
1161 struct fuse_cmdline_opts fuse_opts;
1162 if (fuse_parse_cmdline(&args, &fuse_opts) < 0)
1163 die("fuse_parse_cmdline err: %m");
1165 if (fuse_opts.show_help) {
1167 fuse_cmdline_help();
1168 fuse_lowlevel_help();
1172 if (fuse_opts.show_version) {
1173 /* TODO: Show bcachefs version. */
1174 printf("FUSE library version %s\n", fuse_pkgversion());
1175 fuse_lowlevel_version();
1179 if (!fuse_opts.mountpoint) {
1181 printf("Please supply a mountpoint.\n");
1185 if (!ctx.devices_str) {
1187 printf("Please specify a device or device1:device2:...\n");
1191 tokenize_devices(&ctx);
1194 printf("Opening bcachefs filesystem on:\n");
1195 for (i = 0; i < ctx.nr_devices; ++i)
1196 printf("\t%s\n", ctx.devices[i]);
1198 c = bch2_fs_open(ctx.devices, ctx.nr_devices, bch_opts);
1200 die("error opening %s: %s", ctx.devices_str,
1201 strerror(-PTR_ERR(c)));
1204 struct fuse_session *se =
1205 fuse_session_new(&args, &bcachefs_fuse_ops,
1206 sizeof(bcachefs_fuse_ops), c);
1208 die("fuse_lowlevel_new err: %m");
1210 if (fuse_set_signal_handlers(se) < 0)
1211 die("fuse_set_signal_handlers err: %m");
1213 if (fuse_session_mount(se, fuse_opts.mountpoint))
1214 die("fuse_mount err: %m");
1216 fuse_daemonize(fuse_opts.foreground);
1218 ret = fuse_session_loop(se);
1221 fuse_session_unmount(se);
1222 fuse_remove_signal_handlers(se);
1223 fuse_session_destroy(se);
1226 free(fuse_opts.mountpoint);
1227 fuse_opt_free_args(&args);
1228 bf_context_free(&ctx);