+#ifdef BCACHEFS_FUSE
+
#include <errno.h>
#include <float.h>
#include <getopt.h>
#include "libbcachefs/btree_iter.h"
#include "libbcachefs/buckets.h"
#include "libbcachefs/dirent.h"
+#include "libbcachefs/errcode.h"
#include "libbcachefs/error.h"
#include "libbcachefs/fs-common.h"
#include "libbcachefs/inode.h"
-#include "libbcachefs/io.h"
+#include "libbcachefs/io_read.h"
+#include "libbcachefs/io_write.h"
#include "libbcachefs/opts.h"
#include "libbcachefs/super.h"
/* XXX cut and pasted from fsck.c */
#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
-static inline u64 map_root_ino(u64 ino)
+/* used by write_aligned function for waiting on bch2_write closure */
+struct write_aligned_op_t {
+ struct closure cl;
+
+ /* must be last: */
+ struct bch_write_op op;
+};
+
+
+static inline subvol_inum map_root_ino(u64 ino)
{
- return ino == 1 ? 4096 : ino;
+ return (subvol_inum) { 1, ino == 1 ? 4096 : ino };
}
static inline u64 unmap_root_ino(u64 ino)
struct bch_inode_unpacked *bi)
{
return (struct stat) {
- .st_ino = bi->bi_inum,
+ .st_ino = unmap_root_ino(bi->bi_inum),
.st_size = bi->bi_size,
.st_mode = bi->bi_mode,
.st_uid = bi->bi_uid,
struct bch_inode_unpacked *bi)
{
return (struct fuse_entry_param) {
- .ino = bi->bi_inum,
+ .ino = unmap_root_ino(bi->bi_inum),
.generation = bi->bi_generation,
.attr = inode_to_stat(c, bi),
.attr_timeout = DBL_MAX,
bch2_fs_stop(c);
}
-static void bcachefs_fuse_lookup(fuse_req_t req, fuse_ino_t dir,
+static void bcachefs_fuse_lookup(fuse_req_t req, fuse_ino_t dir_ino,
const char *name)
{
+ subvol_inum dir = map_root_ino(dir_ino);
struct bch_fs *c = fuse_req_userdata(req);
struct bch_inode_unpacked bi;
struct qstr qstr = QSTR(name);
- u64 inum;
+ subvol_inum inum;
int ret;
- dir = map_root_ino(dir);
+ fuse_log(FUSE_LOG_DEBUG, "fuse_lookup(dir=%llu name=%s)\n",
+ dir.inum, name);
ret = bch2_inode_find_by_inum(c, dir, &bi);
if (ret) {
struct bch_hash_info hash_info = bch2_hash_info_init(c, &bi);
- inum = bch2_dirent_lookup(c, dir, &hash_info, &qstr);
- if (!inum) {
- ret = -ENOENT;
- goto err;
+ ret = bch2_dirent_lookup(c, dir, &hash_info, &qstr, &inum);
+ if (ret) {
+ struct fuse_entry_param e = {
+ .attr_timeout = DBL_MAX,
+ .entry_timeout = DBL_MAX,
+ };
+ fuse_reply_entry(req, &e);
+ return;
}
ret = bch2_inode_find_by_inum(c, inum, &bi);
if (ret)
goto err;
- bi.bi_inum = unmap_root_ino(bi.bi_inum);
+ fuse_log(FUSE_LOG_DEBUG, "fuse_lookup ret(inum=%llu)\n",
+ bi.bi_inum);
struct fuse_entry_param e = inode_to_entry(c, &bi);
fuse_reply_entry(req, &e);
return;
err:
+ fuse_log(FUSE_LOG_DEBUG, "fuse_lookup error %i\n", ret);
fuse_reply_err(req, -ret);
}
-static void bcachefs_fuse_getattr(fuse_req_t req, fuse_ino_t inum,
+static void bcachefs_fuse_getattr(fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi)
{
+ subvol_inum inum = map_root_ino(ino);
struct bch_fs *c = fuse_req_userdata(req);
struct bch_inode_unpacked bi;
struct stat attr;
- int ret;
- inum = map_root_ino(inum);
+ fuse_log(FUSE_LOG_DEBUG, "fuse_getattr(inum=%llu)\n", inum.inum);
- ret = bch2_inode_find_by_inum(c, inum, &bi);
+ int ret = bch2_inode_find_by_inum(c, inum, &bi);
if (ret) {
+ fuse_log(FUSE_LOG_DEBUG, "fuse_getattr error %i\n", ret);
fuse_reply_err(req, -ret);
return;
}
- bi.bi_inum = unmap_root_ino(bi.bi_inum);
+ fuse_log(FUSE_LOG_DEBUG, "fuse_getattr success\n");
attr = inode_to_stat(c, &bi);
fuse_reply_attr(req, &attr, DBL_MAX);
}
-static void bcachefs_fuse_setattr(fuse_req_t req, fuse_ino_t inum,
+static void bcachefs_fuse_setattr(fuse_req_t req, fuse_ino_t ino,
struct stat *attr, int to_set,
struct fuse_file_info *fi)
{
struct bch_fs *c = fuse_req_userdata(req);
struct bch_inode_unpacked inode_u;
- struct btree_trans trans;
- struct btree_iter *iter;
+ struct btree_trans *trans;
+ struct btree_iter iter;
u64 now;
int ret;
- inum = map_root_ino(inum);
+ subvol_inum inum = map_root_ino(ino);
- bch2_trans_init(&trans, c, 0, 0);
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_setattr(%llu, %x)\n", inum.inum, to_set);
+
+ trans = bch2_trans_get(c);
retry:
- bch2_trans_begin(&trans);
+ bch2_trans_begin(trans);
now = bch2_current_time(c);
- iter = bch2_inode_peek(&trans, &inode_u, inum, BTREE_ITER_INTENT);
- ret = PTR_ERR_OR_ZERO(iter);
+ ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT);
if (ret)
goto err;
inode_u.bi_mtime = now;
/* TODO: CTIME? */
- ret = bch2_inode_write(&trans, iter, &inode_u) ?:
- bch2_trans_commit(&trans, NULL, NULL,
- BTREE_INSERT_ATOMIC|
- BTREE_INSERT_NOFAIL);
+ ret = bch2_inode_write(trans, &iter, &inode_u) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BCH_TRANS_COMMIT_no_enospc);
err:
+ bch2_trans_iter_exit(trans, &iter);
if (ret == -EINTR)
goto retry;
- bch2_trans_exit(&trans);
+ bch2_trans_put(trans);
if (!ret) {
*attr = inode_to_stat(c, &inode_u);
}
}
-static void bcachefs_fuse_readlink(fuse_req_t req, fuse_ino_t inum)
-{
- //struct bch_fs *c = fuse_req_userdata(req);
-
- //char *link = malloc();
-
- //fuse_reply_readlink(req, link);
-}
-
-static int do_create(struct bch_fs *c, u64 dir,
+static int do_create(struct bch_fs *c, subvol_inum dir,
const char *name, mode_t mode, dev_t rdev,
struct bch_inode_unpacked *new_inode)
{
struct qstr qstr = QSTR(name);
struct bch_inode_unpacked dir_u;
-
- dir = map_root_ino(dir);
+ uid_t uid = 0;
+ gid_t gid = 0;
bch2_inode_init_early(c, new_inode);
- return bch2_trans_do(c, NULL, BTREE_INSERT_ATOMIC,
- bch2_create_trans(&trans,
+ return bch2_trans_do(c, NULL, NULL, 0,
+ bch2_create_trans(trans,
dir, &dir_u,
new_inode, &qstr,
- 0, 0, mode, rdev, NULL, NULL));
+ uid, gid, mode, rdev, NULL, NULL,
+ (subvol_inum) { 0 }, 0));
}
-static void bcachefs_fuse_mknod(fuse_req_t req, fuse_ino_t dir,
+static void bcachefs_fuse_mknod(fuse_req_t req, fuse_ino_t dir_ino,
const char *name, mode_t mode,
dev_t rdev)
{
+ subvol_inum dir = map_root_ino(dir_ino);
struct bch_fs *c = fuse_req_userdata(req);
struct bch_inode_unpacked new_inode;
int ret;
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_mknod(%llu, %s, %x, %x)\n",
+ dir.inum, name, mode, rdev);
+
ret = do_create(c, dir, name, mode, rdev, &new_inode);
if (ret)
goto err;
static void bcachefs_fuse_mkdir(fuse_req_t req, fuse_ino_t dir,
const char *name, mode_t mode)
{
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_mkdir(%llu, %s, %x)\n",
+ dir, name, mode);
+
+ BUG_ON(mode & S_IFMT);
+
+ mode |= S_IFDIR;
bcachefs_fuse_mknod(req, dir, name, mode, 0);
}
-static void bcachefs_fuse_unlink(fuse_req_t req, fuse_ino_t dir,
+static void bcachefs_fuse_unlink(fuse_req_t req, fuse_ino_t dir_ino,
const char *name)
{
struct bch_fs *c = fuse_req_userdata(req);
struct bch_inode_unpacked dir_u, inode_u;
struct qstr qstr = QSTR(name);
- int ret;
+ subvol_inum dir = map_root_ino(dir_ino);
- dir = map_root_ino(dir);
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_unlink(%llu, %s)\n", dir.inum, name);
- ret = bch2_trans_do(c, NULL, BTREE_INSERT_ATOMIC|BTREE_INSERT_NOFAIL,
- bch2_unlink_trans(&trans, dir, &dir_u,
- &inode_u, &qstr));
+ int ret = bch2_trans_do(c, NULL, NULL,
+ BCH_TRANS_COMMIT_no_enospc,
+ bch2_unlink_trans(trans, dir, &dir_u,
+ &inode_u, &qstr, false));
fuse_reply_err(req, -ret);
}
static void bcachefs_fuse_rmdir(fuse_req_t req, fuse_ino_t dir,
const char *name)
{
- dir = map_root_ino(dir);
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_rmdir(%llu, %s)\n", dir, name);
bcachefs_fuse_unlink(req, dir, name);
}
-#if 0
-static void bcachefs_fuse_symlink(fuse_req_t req, const char *link,
- fuse_ino_t parent, const char *name)
-{
- struct bch_fs *c = fuse_req_userdata(req);
-}
-#endif
-
static void bcachefs_fuse_rename(fuse_req_t req,
- fuse_ino_t src_dir, const char *srcname,
- fuse_ino_t dst_dir, const char *dstname,
+ fuse_ino_t src_dir_ino, const char *srcname,
+ fuse_ino_t dst_dir_ino, const char *dstname,
unsigned flags)
{
struct bch_fs *c = fuse_req_userdata(req);
struct bch_inode_unpacked src_inode_u, dst_inode_u;
struct qstr dst_name = QSTR(srcname);
struct qstr src_name = QSTR(dstname);
+ subvol_inum src_dir = map_root_ino(src_dir_ino);
+ subvol_inum dst_dir = map_root_ino(dst_dir_ino);
int ret;
- src_dir = map_root_ino(src_dir);
- dst_dir = map_root_ino(dst_dir);
+ fuse_log(FUSE_LOG_DEBUG,
+ "bcachefs_fuse_rename(%llu, %s, %llu, %s, %x)\n",
+ src_dir.inum, srcname, dst_dir.inum, dstname, flags);
/* XXX handle overwrites */
- ret = bch2_trans_do(c, NULL, BTREE_INSERT_ATOMIC,
- bch2_rename_trans(&trans,
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_rename_trans(trans,
src_dir, &src_dir_u,
dst_dir, &dst_dir_u,
&src_inode_u, &dst_inode_u,
fuse_reply_err(req, -ret);
}
-static void bcachefs_fuse_link(fuse_req_t req, fuse_ino_t inum,
- fuse_ino_t newparent, const char *newname)
+static void bcachefs_fuse_link(fuse_req_t req, fuse_ino_t ino,
+ fuse_ino_t newparent_ino, const char *newname)
{
struct bch_fs *c = fuse_req_userdata(req);
- struct bch_inode_unpacked inode_u;
+ struct bch_inode_unpacked dir_u, inode_u;
struct qstr qstr = QSTR(newname);
+ subvol_inum newparent = map_root_ino(newparent_ino);
+ subvol_inum inum = map_root_ino(ino);
int ret;
- ret = bch2_trans_do(c, NULL, BTREE_INSERT_ATOMIC,
- bch2_link_trans(&trans, newparent,
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_link(%llu, %llu, %s)\n",
+ inum.inum, newparent.inum, newname);
+
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_link_trans(trans, newparent, &dir_u,
inum, &inode_u, &qstr));
if (!ret) {
static void userbio_init(struct bio *bio, struct bio_vec *bv,
void *buf, size_t size)
{
- bio_init(bio, bv, 1);
+ bio_init(bio, NULL, bv, 1, 0);
bio->bi_iter.bi_size = size;
bv->bv_page = buf;
bv->bv_len = size;
bv->bv_offset = 0;
}
-static int get_inode_io_opts(struct bch_fs *c, u64 inum,
- struct bch_io_opts *opts)
+static int get_inode_io_opts(struct bch_fs *c, subvol_inum inum, struct bch_io_opts *opts)
{
struct bch_inode_unpacked inode;
if (bch2_inode_find_by_inum(c, inum, &inode))
return -EINVAL;
- *opts = bch2_opts_to_inode_opts(c->opts);
- bch2_io_opts_apply(opts, bch2_inode_opts_get(&inode));
+ bch2_inode_opts_get(opts, c, &inode);
return 0;
}
closure_put(bio->bi_private);
}
-static void bcachefs_fuse_read(fuse_req_t req, fuse_ino_t inum,
- size_t size, off_t offset,
- struct fuse_file_info *fi)
+
+static void bcachefs_fuse_write_endio(struct bch_write_op *op)
{
- struct bch_fs *c = fuse_req_userdata(req);
+ struct write_aligned_op_t *w = container_of(op,struct write_aligned_op_t,op);
+ closure_put(&w->cl);
+}
- if ((size|offset) & (block_bytes(c) - 1)) {
- fuse_log(FUSE_LOG_DEBUG,
- "bcachefs_fuse_read: unaligned io not supported.\n");
- fuse_reply_err(req, EINVAL);
- return;
- }
- struct bch_io_opts io_opts;
- if (get_inode_io_opts(c, inum, &io_opts)) {
- fuse_reply_err(req, ENOENT);
- return;
- }
+struct fuse_align_io {
+ off_t start;
+ size_t pad_start;
+ off_t end;
+ size_t pad_end;
+ size_t size;
+};
- void *buf = aligned_alloc(max(PAGE_SIZE, size), size);
- if (!buf) {
- fuse_reply_err(req, ENOMEM);
- return;
+/* Handle unaligned start and end */
+/* TODO: align to block_bytes, sector size, or page size? */
+static struct fuse_align_io align_io(const struct bch_fs *c, size_t size,
+ off_t offset)
+{
+ struct fuse_align_io align;
+
+ BUG_ON(offset < 0);
+
+ align.start = round_down(offset, block_bytes(c));
+ align.pad_start = offset - align.start;
+
+ off_t end = offset + size;
+ align.end = round_up(end, block_bytes(c));
+ align.pad_end = align.end - end;
+
+ align.size = align.end - align.start;
+
+ return align;
+}
+
+/*
+ * Given an aligned number of bytes transferred, figure out how many unaligned
+ * bytes were transferred.
+ */
+static size_t align_fix_up_bytes(const struct fuse_align_io *align,
+ size_t align_bytes)
+{
+ size_t bytes = 0;
+
+ if (align_bytes > align->pad_start) {
+ bytes = align_bytes - align->pad_start;
+ bytes = bytes > align->pad_end ? bytes - align->pad_end : 0;
}
- struct bch_read_bio rbio;
- struct bio_vec bv;
- struct closure cl;
+ return bytes;
+}
- closure_init_stack(&cl);
- userbio_init(&rbio.bio, &bv, buf, size);
+/*
+ * Read aligned data.
+ */
+static int read_aligned(struct bch_fs *c, subvol_inum inum, size_t aligned_size,
+ off_t aligned_offset, void *buf)
+{
+ BUG_ON(aligned_size & (block_bytes(c) - 1));
+ BUG_ON(aligned_offset & (block_bytes(c) - 1));
+
+ struct bch_io_opts io_opts;
+ if (get_inode_io_opts(c, inum, &io_opts))
+ return -ENOENT;
+
+ struct bch_read_bio rbio;
+ struct bio_vec bv;
+ userbio_init(&rbio.bio, &bv, buf, aligned_size);
bio_set_op_attrs(&rbio.bio, REQ_OP_READ, REQ_SYNC);
- rbio.bio.bi_iter.bi_sector = offset >> 9;
+ rbio.bio.bi_iter.bi_sector = aligned_offset >> 9;
+
+ struct closure cl;
+ closure_init_stack(&cl);
+
closure_get(&cl);
rbio.bio.bi_end_io = bcachefs_fuse_read_endio;
rbio.bio.bi_private = &cl;
closure_sync(&cl);
- if (likely(!rbio.bio.bi_status)) {
- fuse_reply_buf(req, buf, size);
- } else {
- fuse_reply_err(req, -blk_status_to_errno(rbio.bio.bi_status));
+ return -blk_status_to_errno(rbio.bio.bi_status);
+}
+
+static void bcachefs_fuse_read(fuse_req_t req, fuse_ino_t ino,
+ size_t size, off_t offset,
+ struct fuse_file_info *fi)
+{
+ subvol_inum inum = map_root_ino(ino);
+ struct bch_fs *c = fuse_req_userdata(req);
+
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_read(%llu, %zd, %lld)\n",
+ inum, size, offset);
+
+ /* Check inode size. */
+ struct bch_inode_unpacked bi;
+ int ret = bch2_inode_find_by_inum(c, inum, &bi);
+ if (ret) {
+ fuse_reply_err(req, -ret);
+ return;
}
+ off_t end = min_t(u64, bi.bi_size, offset + size);
+ if (end <= offset) {
+ fuse_reply_buf(req, NULL, 0);
+ return;
+ }
+ size = end - offset;
+
+ struct fuse_align_io align = align_io(c, size, offset);
+
+ void *buf = aligned_alloc(PAGE_SIZE, align.size);
+ if (!buf) {
+ fuse_reply_err(req, ENOMEM);
+ return;
+ }
+
+ ret = read_aligned(c, inum, align.size, align.start, buf);
+
+ if (likely(!ret))
+ fuse_reply_buf(req, buf + align.pad_start, size);
+ else
+ fuse_reply_err(req, -ret);
+
free(buf);
}
-static int write_set_inode(struct bch_fs *c, fuse_ino_t inum, off_t new_size)
+static int inode_update_times(struct bch_fs *c, subvol_inum inum)
{
- struct btree_trans trans;
- struct btree_iter *iter;
+ struct btree_trans *trans;
+ struct btree_iter iter;
struct bch_inode_unpacked inode_u;
int ret = 0;
u64 now;
- bch2_trans_init(&trans, c, 0, 0);
+ trans = bch2_trans_get(c);
retry:
- bch2_trans_begin(&trans);
+ bch2_trans_begin(trans);
now = bch2_current_time(c);
- iter = bch2_inode_peek(&trans, &inode_u, inum, BTREE_ITER_INTENT);
- ret = PTR_ERR_OR_ZERO(iter);
+ ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT);
if (ret)
goto err;
- inode_u.bi_size = max_t(u64, inode_u.bi_size, new_size);
inode_u.bi_mtime = now;
inode_u.bi_ctime = now;
- ret = bch2_inode_write(&trans, iter, &inode_u);
+ ret = bch2_inode_write(trans, &iter, &inode_u);
if (ret)
goto err;
- ret = bch2_trans_commit(&trans, NULL, NULL,
- BTREE_INSERT_ATOMIC|BTREE_INSERT_NOFAIL);
-
+ ret = bch2_trans_commit(trans, NULL, NULL,
+ BCH_TRANS_COMMIT_no_enospc);
err:
+ bch2_trans_iter_exit(trans, &iter);
if (ret == -EINTR)
goto retry;
- bch2_trans_exit(&trans);
+ bch2_trans_put(trans);
return ret;
}
-static void bcachefs_fuse_write(fuse_req_t req, fuse_ino_t inum,
+static int write_aligned(struct bch_fs *c, subvol_inum inum,
+ struct bch_io_opts io_opts, void *buf,
+ size_t aligned_size, off_t aligned_offset,
+ off_t new_i_size, size_t *written_out)
+{
+
+ struct write_aligned_op_t w = { 0 }
+;
+ struct bch_write_op *op = &w.op;
+ struct bio_vec bv;
+
+ BUG_ON(aligned_size & (block_bytes(c) - 1));
+ BUG_ON(aligned_offset & (block_bytes(c) - 1));
+
+ *written_out = 0;
+
+ closure_init_stack(&w.cl);
+
+ bch2_write_op_init(op, c, io_opts); /* XXX reads from op?! */
+ op->write_point = writepoint_hashed(0);
+ op->nr_replicas = io_opts.data_replicas;
+ op->target = io_opts.foreground_target;
+ op->subvol = inum.subvol;
+ op->pos = POS(inum.inum, aligned_offset >> 9);
+ op->new_i_size = new_i_size;
+ op->end_io = bcachefs_fuse_write_endio;
+
+ userbio_init(&op->wbio.bio, &bv, buf, aligned_size);
+ bio_set_op_attrs(&op->wbio.bio, REQ_OP_WRITE, REQ_SYNC);
+
+ if (bch2_disk_reservation_get(c, &op->res, aligned_size >> 9,
+ op->nr_replicas, 0)) {
+ /* XXX: use check_range_allocated like dio write path */
+ return -ENOSPC;
+ }
+
+ closure_get(&w.cl);
+
+ closure_call(&op->cl, bch2_write, NULL, NULL);
+
+ closure_sync(&w.cl);
+
+ if (!op->error)
+ *written_out = op->written << 9;
+
+ return op->error;
+}
+
+static void bcachefs_fuse_write(fuse_req_t req, fuse_ino_t ino,
const char *buf, size_t size,
off_t offset,
struct fuse_file_info *fi)
{
+ subvol_inum inum = map_root_ino(ino);
struct bch_fs *c = fuse_req_userdata(req);
struct bch_io_opts io_opts;
- struct bch_write_op op;
- struct bio_vec bv;
- struct closure cl;
+ size_t aligned_written;
+ int ret = 0;
- if ((size|offset) & (block_bytes(c) - 1)) {
- fuse_log(FUSE_LOG_DEBUG,
- "bcachefs_fuse_write: unaligned io not supported.\n");
- fuse_reply_err(req, EINVAL);
- return;
- }
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_write(%llu, %zd, %lld)\n",
+ inum, size, offset);
- closure_init_stack(&cl);
+ struct fuse_align_io align = align_io(c, size, offset);
+ void *aligned_buf = aligned_alloc(PAGE_SIZE, align.size);
+ BUG_ON(!aligned_buf);
if (get_inode_io_opts(c, inum, &io_opts)) {
- fuse_reply_err(req, ENOENT);
- return;
+ ret = -ENOENT;
+ goto err;
}
- bch2_write_op_init(&op, c, io_opts);
- op.write_point = writepoint_hashed(0);
- op.nr_replicas = io_opts.data_replicas;
- op.target = io_opts.foreground_target;
- op.pos = POS(inum, offset >> 9);
+ /* Realign the data and read in start and end, if needed */
- userbio_init(&op.wbio.bio, &bv, (void *) buf, size);
- bio_set_op_attrs(&op.wbio.bio, REQ_OP_WRITE, REQ_SYNC);
+ /* Read partial start data. */
+ if (align.pad_start) {
+ memset(aligned_buf, 0, block_bytes(c));
- if (bch2_disk_reservation_get(c, &op.res, size >> 9,
- op.nr_replicas, 0)) {
- /* XXX: use check_range_allocated like dio write path */
- fuse_reply_err(req, ENOSPC);
- return;
+ ret = read_aligned(c, inum, block_bytes(c), align.start,
+ aligned_buf);
+ if (ret)
+ goto err;
}
- closure_call(&op.cl, bch2_write, NULL, &cl);
- closure_sync(&cl);
+ /*
+ * Read partial end data. If the whole write fits in one block, the
+ * start data and the end data are the same so this isn't needed.
+ */
+ if (align.pad_end &&
+ !(align.pad_start && align.size == block_bytes(c))) {
+ off_t partial_end_start = align.end - block_bytes(c);
+ size_t buf_offset = align.size - block_bytes(c);
+
+ memset(aligned_buf + buf_offset, 0, block_bytes(c));
+
+ ret = read_aligned(c, inum, block_bytes(c), partial_end_start,
+ aligned_buf + buf_offset);
+ if (ret)
+ goto err;
+ }
+
+ /* Overlay what we want to write. */
+ memcpy(aligned_buf + align.pad_start, buf, size);
+
+ /* Actually write. */
+ ret = write_aligned(c, inum, io_opts, aligned_buf,
+ align.size, align.start,
+ offset + size, &aligned_written);
+
+ /* Figure out how many unaligned bytes were written. */
+ size_t written = align_fix_up_bytes(&align, aligned_written);
+ BUG_ON(written > size);
+
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_write: wrote %zd bytes\n",
+ written);
+
+ if (written > 0)
+ ret = 0;
/*
- * Update inode data.
- * TODO: could possibly do asynchronously.
- * TODO: could also possibly do atomically with the extents.
+ * Update inode times.
+ * TODO: Integrate with bch2_extent_update()
*/
- if (!op.error)
- op.error = write_set_inode(c, inum, offset + size);
+ if (!ret)
+ ret = inode_update_times(c, inum);
- if (!op.error) {
- BUG_ON(op.written == 0);
- fuse_reply_write(req, (size_t) op.written << 9);
- } else {
- BUG_ON(!op.error);
- fuse_reply_err(req, -op.error);
+ if (!ret) {
+ BUG_ON(written == 0);
+ fuse_reply_write(req, written);
+ free(aligned_buf);
+ return;
}
+
+err:
+ fuse_reply_err(req, -ret);
+ free(aligned_buf);
+}
+
+static void bcachefs_fuse_symlink(fuse_req_t req, const char *link,
+ fuse_ino_t dir_ino, const char *name)
+{
+ subvol_inum dir = map_root_ino(dir_ino);
+ struct bch_fs *c = fuse_req_userdata(req);
+ struct bch_inode_unpacked new_inode;
+ size_t link_len = strlen(link);
+ int ret;
+
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_symlink(%s, %llu, %s)\n",
+ link, dir.inum, name);
+
+ ret = do_create(c, dir, name, S_IFLNK|S_IRWXUGO, 0, &new_inode);
+ if (ret)
+ goto err;
+
+ struct bch_io_opts io_opts;
+ ret = get_inode_io_opts(c, dir, &io_opts);
+ if (ret)
+ goto err;
+
+ struct fuse_align_io align = align_io(c, link_len + 1, 0);
+
+ void *aligned_buf = aligned_alloc(PAGE_SIZE, align.size);
+ BUG_ON(!aligned_buf);
+
+ memset(aligned_buf, 0, align.size);
+ memcpy(aligned_buf, link, link_len); /* already terminated */
+
+ subvol_inum inum = (subvol_inum) { dir.subvol, new_inode.bi_inum };
+
+ size_t aligned_written;
+ ret = write_aligned(c, inum, io_opts, aligned_buf,
+ align.size, align.start, link_len + 1,
+ &aligned_written);
+ free(aligned_buf);
+
+ if (ret)
+ goto err;
+
+ size_t written = align_fix_up_bytes(&align, aligned_written);
+ BUG_ON(written != link_len + 1); // TODO: handle short
+
+ ret = inode_update_times(c, inum);
+ if (ret)
+ goto err;
+
+ new_inode.bi_size = written;
+
+ struct fuse_entry_param e = inode_to_entry(c, &new_inode);
+ fuse_reply_entry(req, &e);
+ return;
+
+err:
+ fuse_reply_err(req, -ret);
+}
+
+static void bcachefs_fuse_readlink(fuse_req_t req, fuse_ino_t ino)
+{
+ subvol_inum inum = map_root_ino(ino);
+ struct bch_fs *c = fuse_req_userdata(req);
+ char *buf = NULL;
+
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_readlink(%llu)\n", inum.inum);
+
+ struct bch_inode_unpacked bi;
+ int ret = bch2_inode_find_by_inum(c, inum, &bi);
+ if (ret)
+ goto err;
+
+ struct fuse_align_io align = align_io(c, bi.bi_size, 0);
+
+ ret = -ENOMEM;
+ buf = aligned_alloc(PAGE_SIZE, align.size);
+ if (!buf)
+ goto err;
+
+ ret = read_aligned(c, inum, align.size, align.start, buf);
+ if (ret)
+ goto err;
+
+ BUG_ON(buf[align.size - 1] != 0);
+
+ fuse_reply_readlink(req, buf);
+
+err:
+ if (ret)
+ fuse_reply_err(req, -ret);
+
+ free(buf);
}
#if 0
}
#endif
-struct fuse_dir_entry {
- u64 ino;
- unsigned type;
- char name[0];
-};
-
struct fuse_dir_context {
struct dir_context ctx;
fuse_req_t req;
char *buf;
size_t bufsize;
-
- struct fuse_dir_entry *prev;
};
-static int fuse_send_dir_entry(struct fuse_dir_context *ctx, loff_t pos)
-{
- struct fuse_dir_entry *de = ctx->prev;
- ctx->prev = NULL;
+struct fuse_dirent {
+ uint64_t ino;
+ uint64_t off;
+ uint32_t namelen;
+ uint32_t type;
+ char name[];
+};
- struct stat statbuf = {
- .st_ino = unmap_root_ino(de->ino),
- .st_mode = de->type << 12,
- };
+#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
+#define FUSE_DIRENT_ALIGN(x) \
+ (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
- size_t len = fuse_add_direntry(ctx->req, ctx->buf, ctx->bufsize,
- de->name, &statbuf, pos);
+static size_t fuse_add_direntry2(char *buf, size_t bufsize,
+ const char *name, int namelen,
+ const struct stat *stbuf, off_t off)
+{
+ size_t entlen = FUSE_NAME_OFFSET + namelen;
+ size_t entlen_padded = FUSE_DIRENT_ALIGN(entlen);
+ struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
- free(de);
+ if ((buf == NULL) || (entlen_padded > bufsize))
+ return entlen_padded;
- if (len > ctx->bufsize)
- return -EINVAL;
+ dirent->ino = stbuf->st_ino;
+ dirent->off = off;
+ dirent->namelen = namelen;
+ dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
+ memcpy(dirent->name, name, namelen);
+ memset(dirent->name + namelen, 0, entlen_padded - entlen);
- ctx->buf += len;
- ctx->bufsize -= len;
-
- return 0;
+ return entlen_padded;
}
static int fuse_filldir(struct dir_context *_ctx,
struct fuse_dir_context *ctx =
container_of(_ctx, struct fuse_dir_context, ctx);
- fuse_log(FUSE_LOG_DEBUG, "fuse_filldir(ctx={.ctx={.pos=%llu}}, "
- "name=%s, namelen=%d, pos=%lld, dir=%llu, type=%u)\n",
- ctx->ctx.pos, name, namelen, pos, ino, type);
+ struct stat statbuf = {
+ .st_ino = unmap_root_ino(ino),
+ .st_mode = type << 12,
+ };
- /*
- * We have to emit directory entries after reading the next entry,
- * because the previous entry contains a pointer to next.
- */
- if (ctx->prev) {
- int ret = fuse_send_dir_entry(ctx, pos);
- if (ret)
- return ret;
- }
+ fuse_log(FUSE_LOG_DEBUG, "fuse_filldir(name=%s inum=%llu pos=%llu)\n",
+ name, statbuf.st_ino, pos);
- struct fuse_dir_entry *cur = malloc(sizeof *cur + namelen + 1);
- cur->ino = ino;
- cur->type = type;
- memcpy(cur->name, name, namelen);
- cur->name[namelen] = 0;
+ size_t len = fuse_add_direntry2(ctx->buf,
+ ctx->bufsize,
+ name,
+ namelen,
+ &statbuf,
+ pos + 1);
- ctx->prev = cur;
+ if (len > ctx->bufsize)
+ return -1;
+
+ ctx->buf += len;
+ ctx->bufsize -= len;
return 0;
}
static bool handle_dots(struct fuse_dir_context *ctx, fuse_ino_t dir)
{
- int ret = 0;
-
if (ctx->ctx.pos == 0) {
- ret = fuse_filldir(&ctx->ctx, ".", 1, ctx->ctx.pos,
- unmap_root_ino(dir), DT_DIR);
- if (ret < 0)
+ if (fuse_filldir(&ctx->ctx, ".", 1, ctx->ctx.pos,
+ dir, DT_DIR) < 0)
return false;
ctx->ctx.pos = 1;
}
if (ctx->ctx.pos == 1) {
- ret = fuse_filldir(&ctx->ctx, "..", 2, ctx->ctx.pos,
- /*TODO: parent*/ 1, DT_DIR);
- if (ret < 0)
+ if (fuse_filldir(&ctx->ctx, "..", 2, ctx->ctx.pos,
+ /*TODO: parent*/ 1, DT_DIR) < 0)
return false;
ctx->ctx.pos = 2;
}
return true;
}
-static void bcachefs_fuse_readdir(fuse_req_t req, fuse_ino_t dir,
+static void bcachefs_fuse_readdir(fuse_req_t req, fuse_ino_t dir_ino,
size_t size, off_t off,
struct fuse_file_info *fi)
{
+ subvol_inum dir = map_root_ino(dir_ino);
struct bch_fs *c = fuse_req_userdata(req);
struct bch_inode_unpacked bi;
char *buf = calloc(size, 1);
int ret = 0;
fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_readdir(dir=%llu, size=%zu, "
- "off=%lld)\n", dir, size, off);
-
- dir = map_root_ino(dir);
+ "off=%lld)\n", dir.inum, size, off);
ret = bch2_inode_find_by_inum(c, dir, &bi);
if (ret)
goto reply;
}
- if (!handle_dots(&ctx, dir))
+ if (!handle_dots(&ctx, dir.inum))
goto reply;
ret = bch2_readdir(c, dir, &ctx.ctx);
-
reply:
- /*
- * If we have something to send, the error above doesn't matter.
- *
- * Alternatively, if this send fails, but we previously sent something,
- * then this is a success.
- */
- if (ctx.prev) {
- ret = fuse_send_dir_entry(&ctx, ctx.ctx.pos);
- if (ret && ctx.buf != buf)
- ret = 0;
- }
-
if (!ret) {
fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_readdir reply %zd\n",
ctx.buf - buf);
}
#endif
-static void bcachefs_fuse_create(fuse_req_t req, fuse_ino_t dir,
+static void bcachefs_fuse_create(fuse_req_t req, fuse_ino_t dir_ino,
const char *name, mode_t mode,
struct fuse_file_info *fi)
{
+ subvol_inum dir = map_root_ino(dir_ino);
struct bch_fs *c = fuse_req_userdata(req);
struct bch_inode_unpacked new_inode;
int ret;
+ fuse_log(FUSE_LOG_DEBUG, "bcachefs_fuse_create(%llu, %s, %x)\n",
+ dir.inum, name, mode);
+
ret = do_create(c, dir, name, mode, 0, &new_inode);
if (ret)
goto err;
return;
err:
fuse_reply_err(req, -ret);
-
}
#if 0
.mkdir = bcachefs_fuse_mkdir,
.unlink = bcachefs_fuse_unlink,
.rmdir = bcachefs_fuse_rmdir,
- //.symlink = bcachefs_fuse_symlink,
+ .symlink = bcachefs_fuse_symlink,
.rename = bcachefs_fuse_rename,
.link = bcachefs_fuse_link,
.open = bcachefs_fuse_open,
}
tokenize_devices(&ctx);
+ struct printbuf fsname = PRINTBUF;
+ prt_printf(&fsname, "fsname=");
+ for (i = 0; i < ctx.nr_devices; ++i) {
+ if (i)
+ prt_str(&fsname, ":");
+ prt_str(&fsname, ctx.devices[i]);
+ }
+
+ fuse_opt_add_arg(&args, "-o");
+ fuse_opt_add_arg(&args, fsname.buf);
+
/* Open bch */
printf("Opening bcachefs filesystem on:\n");
for (i = 0; i < ctx.nr_devices; ++i)
c = bch2_fs_open(ctx.devices, ctx.nr_devices, bch_opts);
if (IS_ERR(c))
die("error opening %s: %s", ctx.devices_str,
- strerror(-PTR_ERR(c)));
+ bch2_err_str(PTR_ERR(c)));
/* Fuse */
struct fuse_session *se =
if (fuse_session_mount(se, fuse_opts.mountpoint))
die("fuse_mount err: %m");
+ /* This print statement is a trigger for tests. */
+ printf("Fuse mount initialized.\n");
+
+ if (fuse_opts.foreground == 0){
+ printf("Fuse forcing to foreground mode, due gcc constructors usage.\n");
+ fuse_opts.foreground = 1;
+ }
+
fuse_daemonize(fuse_opts.foreground);
ret = fuse_session_loop(se);
return ret ? 1 : 0;
}
+
+#endif /* BCACHEFS_FUSE */