1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
8 #include "fs-io-direct.h"
9 #include "fs-io-pagecache.h"
13 #include <linux/kthread.h>
14 #include <linux/pagemap.h>
15 #include <linux/prefetch.h>
16 #include <linux/task_io_accounting_ops.h>
25 struct bch_read_bio rbio;
28 static void bio_check_or_release(struct bio *bio, bool check_dirty)
31 bio_check_pages_dirty(bio);
33 bio_release_pages(bio, false);
38 static CLOSURE_CALLBACK(bch2_dio_read_complete)
40 closure_type(dio, struct dio_read, cl);
42 dio->req->ki_complete(dio->req, dio->ret);
43 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
46 static void bch2_direct_IO_read_endio(struct bio *bio)
48 struct dio_read *dio = bio->bi_private;
51 dio->ret = blk_status_to_errno(bio->bi_status);
53 closure_put(&dio->cl);
56 static void bch2_direct_IO_read_split_endio(struct bio *bio)
58 struct dio_read *dio = bio->bi_private;
59 bool should_dirty = dio->should_dirty;
61 bch2_direct_IO_read_endio(bio);
62 bio_check_or_release(bio, should_dirty);
65 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
67 struct file *file = req->ki_filp;
68 struct bch_inode_info *inode = file_bch_inode(file);
69 struct bch_fs *c = inode->v.i_sb->s_fs_info;
70 struct bch_io_opts opts;
73 loff_t offset = req->ki_pos;
74 bool sync = is_sync_kiocb(req);
78 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
80 ret = min_t(loff_t, iter->count,
81 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
86 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
87 iter->count -= shorten;
89 bio = bio_alloc_bioset(NULL,
90 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
95 bio->bi_end_io = bch2_direct_IO_read_endio;
97 dio = container_of(bio, struct dio_read, rbio.bio);
98 closure_init(&dio->cl, NULL);
101 * this is a _really_ horrible hack just to avoid an atomic sub at the
105 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
106 atomic_set(&dio->cl.remaining,
107 CLOSURE_REMAINING_INITIALIZER -
111 atomic_set(&dio->cl.remaining,
112 CLOSURE_REMAINING_INITIALIZER + 1);
113 dio->cl.closure_get_happened = true;
119 * This is one of the sketchier things I've encountered: we have to skip
120 * the dirtying of requests that are internal from the kernel (i.e. from
121 * loopback), because we'll deadlock on page_lock.
123 dio->should_dirty = iter_is_iovec(iter);
126 while (iter->count) {
127 bio = bio_alloc_bioset(NULL,
128 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
132 bio->bi_end_io = bch2_direct_IO_read_split_endio;
134 bio->bi_opf = REQ_OP_READ|REQ_SYNC;
135 bio->bi_iter.bi_sector = offset >> 9;
136 bio->bi_private = dio;
138 ret = bio_iov_iter_get_pages(bio, iter);
140 /* XXX: fault inject this path */
141 bio->bi_status = BLK_STS_RESOURCE;
146 offset += bio->bi_iter.bi_size;
148 if (dio->should_dirty)
149 bio_set_pages_dirty(bio);
152 closure_get(&dio->cl);
154 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
157 iter->count += shorten;
160 closure_sync(&dio->cl);
161 closure_debug_destroy(&dio->cl);
163 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
170 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
172 struct file *file = iocb->ki_filp;
173 struct bch_inode_info *inode = file_bch_inode(file);
174 struct address_space *mapping = file->f_mapping;
175 size_t count = iov_iter_count(iter);
179 return 0; /* skip atime */
181 if (iocb->ki_flags & IOCB_DIRECT) {
182 struct blk_plug plug;
184 if (unlikely(mapping->nrpages)) {
185 ret = filemap_write_and_wait_range(mapping,
187 iocb->ki_pos + count - 1);
194 blk_start_plug(&plug);
195 ret = bch2_direct_IO_read(iocb, iter);
196 blk_finish_plug(&plug);
201 bch2_pagecache_add_get(inode);
202 ret = generic_file_read_iter(iocb, iter);
203 bch2_pagecache_add_put(inode);
206 return bch2_err_class(ret);
209 /* O_DIRECT writes */
213 struct address_space *mapping;
214 struct bch_inode_info *inode;
215 struct mm_struct *mm;
216 const struct iovec *iov;
221 struct quota_res quota_res;
224 struct iov_iter iter;
225 struct iovec inline_vecs[2];
228 struct bch_write_op op;
231 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
232 u64 offset, u64 size,
233 unsigned nr_replicas, bool compressed)
235 struct btree_trans *trans = bch2_trans_get(c);
236 struct btree_iter iter;
238 u64 end = offset + size;
243 bch2_trans_begin(trans);
245 err = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
249 for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
250 SPOS(inum.inum, offset, snapshot),
251 BTREE_ITER_SLOTS, k, err) {
252 if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
255 if (k.k->p.snapshot != snapshot ||
256 nr_replicas > bch2_bkey_replicas(c, k) ||
257 (!compressed && bch2_bkey_sectors_compressed(k))) {
263 offset = iter.pos.offset;
264 bch2_trans_iter_exit(trans, &iter);
266 if (bch2_err_matches(err, BCH_ERR_transaction_restart))
268 bch2_trans_put(trans);
270 return err ? false : ret;
273 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
275 struct bch_fs *c = dio->op.c;
276 struct bch_inode_info *inode = dio->inode;
277 struct bio *bio = &dio->op.wbio.bio;
279 return bch2_check_range_allocated(c, inode_inum(inode),
280 dio->op.pos.offset, bio_sectors(bio),
281 dio->op.opts.data_replicas,
282 dio->op.opts.compression != 0);
285 static void bch2_dio_write_loop_async(struct bch_write_op *);
286 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
289 * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
290 * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
291 * caller's stack, we're not guaranteed that it will live for the duration of
294 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
296 struct iovec *iov = dio->inline_vecs;
299 * iov_iter has a single embedded iovec - nothing to do:
301 if (iter_is_ubuf(&dio->iter))
305 * We don't currently handle non-iovec iov_iters here - return an error,
306 * and we'll fall back to doing the IO synchronously:
308 if (!iter_is_iovec(&dio->iter))
311 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
312 dio->iov = iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
318 memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
319 dio->iter.__iov = iov;
323 static CLOSURE_CALLBACK(bch2_dio_write_flush_done)
325 closure_type(dio, struct dio_write, op.cl);
326 struct bch_fs *c = dio->op.c;
328 closure_debug_destroy(cl);
330 dio->op.error = bch2_journal_error(&c->journal);
332 bch2_dio_write_done(dio);
335 static noinline void bch2_dio_write_flush(struct dio_write *dio)
337 struct bch_fs *c = dio->op.c;
338 struct bch_inode_unpacked inode;
343 closure_init(&dio->op.cl, NULL);
345 if (!dio->op.error) {
346 ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
350 bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq,
352 bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
357 closure_sync(&dio->op.cl);
358 closure_debug_destroy(&dio->op.cl);
360 continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
364 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
366 struct kiocb *req = dio->req;
367 struct bch_inode_info *inode = dio->inode;
368 bool sync = dio->sync;
371 if (unlikely(dio->flush)) {
372 bch2_dio_write_flush(dio);
377 bch2_pagecache_block_put(inode);
381 ret = dio->op.error ?: ((long) dio->written << 9);
382 bio_put(&dio->op.wbio.bio);
384 /* inode->i_dio_count is our ref on inode and thus bch_fs */
385 inode_dio_end(&inode->v);
388 ret = bch2_err_class(ret);
391 req->ki_complete(req, ret);
397 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
399 struct bch_fs *c = dio->op.c;
400 struct kiocb *req = dio->req;
401 struct bch_inode_info *inode = dio->inode;
402 struct bio *bio = &dio->op.wbio.bio;
404 req->ki_pos += (u64) dio->op.written << 9;
405 dio->written += dio->op.written;
407 if (dio->extending) {
408 spin_lock(&inode->v.i_lock);
409 if (req->ki_pos > inode->v.i_size)
410 i_size_write(&inode->v, req->ki_pos);
411 spin_unlock(&inode->v.i_lock);
414 if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
415 mutex_lock(&inode->ei_quota_lock);
416 __bch2_i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
417 __bch2_quota_reservation_put(c, inode, &dio->quota_res);
418 mutex_unlock(&inode->ei_quota_lock);
421 bio_release_pages(bio, false);
423 if (unlikely(dio->op.error))
424 set_bit(EI_INODE_ERROR, &inode->ei_flags);
427 static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
429 struct bch_fs *c = dio->op.c;
430 struct kiocb *req = dio->req;
431 struct address_space *mapping = dio->mapping;
432 struct bch_inode_info *inode = dio->inode;
433 struct bch_io_opts opts;
434 struct bio *bio = &dio->op.wbio.bio;
435 unsigned unaligned, iter_count;
436 bool sync = dio->sync, dropped_locks;
439 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
442 iter_count = dio->iter.count;
444 EBUG_ON(current->faults_disabled_mapping);
445 current->faults_disabled_mapping = mapping;
447 ret = bio_iov_iter_get_pages(bio, &dio->iter);
449 dropped_locks = fdm_dropped_locks();
451 current->faults_disabled_mapping = NULL;
454 * If the fault handler returned an error but also signalled
455 * that it dropped & retook ei_pagecache_lock, we just need to
456 * re-shoot down the page cache and retry:
458 if (dropped_locks && ret)
461 if (unlikely(ret < 0))
464 if (unlikely(dropped_locks)) {
465 ret = bch2_write_invalidate_inode_pages_range(mapping,
467 req->ki_pos + iter_count - 1);
471 if (!bio->bi_iter.bi_size)
475 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
476 bio->bi_iter.bi_size -= unaligned;
477 iov_iter_revert(&dio->iter, unaligned);
479 if (!bio->bi_iter.bi_size) {
481 * bio_iov_iter_get_pages was only able to get <
482 * blocksize worth of pages:
488 bch2_write_op_init(&dio->op, c, opts);
489 dio->op.end_io = sync
491 : bch2_dio_write_loop_async;
492 dio->op.target = dio->op.opts.foreground_target;
493 dio->op.write_point = writepoint_hashed((unsigned long) current);
494 dio->op.nr_replicas = dio->op.opts.data_replicas;
495 dio->op.subvol = inode->ei_subvol;
496 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
497 dio->op.devs_need_flush = &inode->ei_devs_need_flush;
500 dio->op.flags |= BCH_WRITE_SYNC;
501 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
503 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
504 bio_sectors(bio), true);
508 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
509 dio->op.opts.data_replicas, 0);
511 !bch2_dio_write_check_allocated(dio))
514 task_io_account_write(bio->bi_iter.bi_size);
516 if (unlikely(dio->iter.count) &&
519 bch2_dio_write_copy_iov(dio))
520 dio->sync = sync = true;
523 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
528 bch2_dio_write_end(dio);
530 if (likely(!dio->iter.count) || dio->op.error)
533 bio_reset(bio, NULL, REQ_OP_WRITE);
536 return bch2_dio_write_done(dio);
540 bio_release_pages(bio, false);
542 bch2_quota_reservation_put(c, inode, &dio->quota_res);
546 static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
548 struct mm_struct *mm = dio->mm;
550 bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
554 bch2_dio_write_loop(dio);
556 kthread_unuse_mm(mm);
559 static void bch2_dio_write_loop_async(struct bch_write_op *op)
561 struct dio_write *dio = container_of(op, struct dio_write, op);
563 bch2_dio_write_end(dio);
565 if (likely(!dio->iter.count) || dio->op.error)
566 bch2_dio_write_done(dio);
568 bch2_dio_write_continue(dio);
571 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
573 struct file *file = req->ki_filp;
574 struct address_space *mapping = file->f_mapping;
575 struct bch_inode_info *inode = file_bch_inode(file);
576 struct bch_fs *c = inode->v.i_sb->s_fs_info;
577 struct dio_write *dio;
579 bool locked = true, extending;
583 prefetch((void *) &c->opts + 64);
584 prefetch(&inode->ei_inode);
585 prefetch((void *) &inode->ei_inode + 64);
587 inode_lock(&inode->v);
589 ret = generic_write_checks(req, iter);
590 if (unlikely(ret <= 0))
593 ret = file_remove_privs(file);
597 ret = file_update_time(file);
601 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
604 inode_dio_begin(&inode->v);
605 bch2_pagecache_block_get(inode);
607 extending = req->ki_pos + iter->count > inode->v.i_size;
609 inode_unlock(&inode->v);
613 bio = bio_alloc_bioset(NULL,
614 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
617 &c->dio_write_bioset);
618 dio = container_of(bio, struct dio_write, op.wbio.bio);
620 dio->mapping = mapping;
622 dio->mm = current->mm;
625 dio->extending = extending;
626 dio->sync = is_sync_kiocb(req) || extending;
627 dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
628 dio->quota_res.sectors = 0;
633 if (unlikely(mapping->nrpages)) {
634 ret = bch2_write_invalidate_inode_pages_range(mapping,
636 req->ki_pos + iter->count - 1);
641 ret = bch2_dio_write_loop(dio);
644 inode_unlock(&inode->v);
647 bch2_pagecache_block_put(inode);
649 inode_dio_end(&inode->v);
653 void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
655 bioset_exit(&c->dio_write_bioset);
656 bioset_exit(&c->dio_read_bioset);
659 int bch2_fs_fs_io_direct_init(struct bch_fs *c)
661 if (bioset_init(&c->dio_read_bioset,
662 4, offsetof(struct dio_read, rbio.bio),
664 return -BCH_ERR_ENOMEM_dio_read_bioset_init;
666 if (bioset_init(&c->dio_write_bioset,
667 4, offsetof(struct dio_write, op.wbio.bio),
669 return -BCH_ERR_ENOMEM_dio_write_bioset_init;
674 #endif /* NO_BCACHEFS_FS */