1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
8 #include "fs-io-direct.h"
9 #include "fs-io-pagecache.h"
13 #include <linux/kthread.h>
14 #include <linux/pagemap.h>
15 #include <linux/prefetch.h>
16 #include <linux/task_io_accounting_ops.h>
25 struct bch_read_bio rbio;
28 static void bio_check_or_release(struct bio *bio, bool check_dirty)
31 bio_check_pages_dirty(bio);
33 bio_release_pages(bio, false);
38 static CLOSURE_CALLBACK(bch2_dio_read_complete)
40 closure_type(dio, struct dio_read, cl);
42 dio->req->ki_complete(dio->req, dio->ret);
43 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
46 static void bch2_direct_IO_read_endio(struct bio *bio)
48 struct dio_read *dio = bio->bi_private;
51 dio->ret = blk_status_to_errno(bio->bi_status);
53 closure_put(&dio->cl);
56 static void bch2_direct_IO_read_split_endio(struct bio *bio)
58 struct dio_read *dio = bio->bi_private;
59 bool should_dirty = dio->should_dirty;
61 bch2_direct_IO_read_endio(bio);
62 bio_check_or_release(bio, should_dirty);
65 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
67 struct file *file = req->ki_filp;
68 struct bch_inode_info *inode = file_bch_inode(file);
69 struct bch_fs *c = inode->v.i_sb->s_fs_info;
70 struct bch_io_opts opts;
73 loff_t offset = req->ki_pos;
74 bool sync = is_sync_kiocb(req);
78 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
80 /* bios must be 512 byte aligned: */
81 if ((offset|iter->count) & (SECTOR_SIZE - 1))
84 ret = min_t(loff_t, iter->count,
85 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
90 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
91 iter->count -= shorten;
93 bio = bio_alloc_bioset(NULL,
94 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
99 bio->bi_end_io = bch2_direct_IO_read_endio;
101 dio = container_of(bio, struct dio_read, rbio.bio);
102 closure_init(&dio->cl, NULL);
105 * this is a _really_ horrible hack just to avoid an atomic sub at the
109 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
110 atomic_set(&dio->cl.remaining,
111 CLOSURE_REMAINING_INITIALIZER -
115 atomic_set(&dio->cl.remaining,
116 CLOSURE_REMAINING_INITIALIZER + 1);
117 dio->cl.closure_get_happened = true;
123 * This is one of the sketchier things I've encountered: we have to skip
124 * the dirtying of requests that are internal from the kernel (i.e. from
125 * loopback), because we'll deadlock on page_lock.
127 dio->should_dirty = iter_is_iovec(iter);
130 while (iter->count) {
131 bio = bio_alloc_bioset(NULL,
132 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
136 bio->bi_end_io = bch2_direct_IO_read_split_endio;
138 bio->bi_opf = REQ_OP_READ|REQ_SYNC;
139 bio->bi_iter.bi_sector = offset >> 9;
140 bio->bi_private = dio;
142 ret = bio_iov_iter_get_pages(bio, iter);
144 /* XXX: fault inject this path */
145 bio->bi_status = BLK_STS_RESOURCE;
150 offset += bio->bi_iter.bi_size;
152 if (dio->should_dirty)
153 bio_set_pages_dirty(bio);
156 closure_get(&dio->cl);
158 bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
161 iter->count += shorten;
164 closure_sync(&dio->cl);
165 closure_debug_destroy(&dio->cl);
167 bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
174 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
176 struct file *file = iocb->ki_filp;
177 struct bch_inode_info *inode = file_bch_inode(file);
178 struct address_space *mapping = file->f_mapping;
179 size_t count = iov_iter_count(iter);
183 return 0; /* skip atime */
185 if (iocb->ki_flags & IOCB_DIRECT) {
186 struct blk_plug plug;
188 if (unlikely(mapping->nrpages)) {
189 ret = filemap_write_and_wait_range(mapping,
191 iocb->ki_pos + count - 1);
198 blk_start_plug(&plug);
199 ret = bch2_direct_IO_read(iocb, iter);
200 blk_finish_plug(&plug);
205 bch2_pagecache_add_get(inode);
206 ret = generic_file_read_iter(iocb, iter);
207 bch2_pagecache_add_put(inode);
210 return bch2_err_class(ret);
213 /* O_DIRECT writes */
217 struct address_space *mapping;
218 struct bch_inode_info *inode;
219 struct mm_struct *mm;
220 const struct iovec *iov;
225 struct quota_res quota_res;
228 struct iov_iter iter;
229 struct iovec inline_vecs[2];
232 struct bch_write_op op;
235 static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
236 u64 offset, u64 size,
237 unsigned nr_replicas, bool compressed)
239 struct btree_trans *trans = bch2_trans_get(c);
240 struct btree_iter iter;
242 u64 end = offset + size;
247 bch2_trans_begin(trans);
249 err = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
253 for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
254 SPOS(inum.inum, offset, snapshot),
255 BTREE_ITER_SLOTS, k, err) {
256 if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
259 if (k.k->p.snapshot != snapshot ||
260 nr_replicas > bch2_bkey_replicas(c, k) ||
261 (!compressed && bch2_bkey_sectors_compressed(k))) {
267 offset = iter.pos.offset;
268 bch2_trans_iter_exit(trans, &iter);
270 if (bch2_err_matches(err, BCH_ERR_transaction_restart))
272 bch2_trans_put(trans);
274 return err ? false : ret;
277 static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
279 struct bch_fs *c = dio->op.c;
280 struct bch_inode_info *inode = dio->inode;
281 struct bio *bio = &dio->op.wbio.bio;
283 return bch2_check_range_allocated(c, inode_inum(inode),
284 dio->op.pos.offset, bio_sectors(bio),
285 dio->op.opts.data_replicas,
286 dio->op.opts.compression != 0);
289 static void bch2_dio_write_loop_async(struct bch_write_op *);
290 static __always_inline long bch2_dio_write_done(struct dio_write *dio);
293 * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
294 * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
295 * caller's stack, we're not guaranteed that it will live for the duration of
298 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
300 struct iovec *iov = dio->inline_vecs;
303 * iov_iter has a single embedded iovec - nothing to do:
305 if (iter_is_ubuf(&dio->iter))
309 * We don't currently handle non-iovec iov_iters here - return an error,
310 * and we'll fall back to doing the IO synchronously:
312 if (!iter_is_iovec(&dio->iter))
315 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
316 dio->iov = iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
322 memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
323 dio->iter.__iov = iov;
327 static CLOSURE_CALLBACK(bch2_dio_write_flush_done)
329 closure_type(dio, struct dio_write, op.cl);
330 struct bch_fs *c = dio->op.c;
332 closure_debug_destroy(cl);
334 dio->op.error = bch2_journal_error(&c->journal);
336 bch2_dio_write_done(dio);
339 static noinline void bch2_dio_write_flush(struct dio_write *dio)
341 struct bch_fs *c = dio->op.c;
342 struct bch_inode_unpacked inode;
347 closure_init(&dio->op.cl, NULL);
349 if (!dio->op.error) {
350 ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
354 bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq,
356 bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
361 closure_sync(&dio->op.cl);
362 closure_debug_destroy(&dio->op.cl);
364 continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
368 static __always_inline long bch2_dio_write_done(struct dio_write *dio)
370 struct kiocb *req = dio->req;
371 struct bch_inode_info *inode = dio->inode;
372 bool sync = dio->sync;
375 if (unlikely(dio->flush)) {
376 bch2_dio_write_flush(dio);
381 bch2_pagecache_block_put(inode);
385 ret = dio->op.error ?: ((long) dio->written << 9);
386 bio_put(&dio->op.wbio.bio);
388 /* inode->i_dio_count is our ref on inode and thus bch_fs */
389 inode_dio_end(&inode->v);
392 ret = bch2_err_class(ret);
395 req->ki_complete(req, ret);
401 static __always_inline void bch2_dio_write_end(struct dio_write *dio)
403 struct bch_fs *c = dio->op.c;
404 struct kiocb *req = dio->req;
405 struct bch_inode_info *inode = dio->inode;
406 struct bio *bio = &dio->op.wbio.bio;
408 req->ki_pos += (u64) dio->op.written << 9;
409 dio->written += dio->op.written;
411 if (dio->extending) {
412 spin_lock(&inode->v.i_lock);
413 if (req->ki_pos > inode->v.i_size)
414 i_size_write(&inode->v, req->ki_pos);
415 spin_unlock(&inode->v.i_lock);
418 if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
419 mutex_lock(&inode->ei_quota_lock);
420 __bch2_i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
421 __bch2_quota_reservation_put(c, inode, &dio->quota_res);
422 mutex_unlock(&inode->ei_quota_lock);
425 bio_release_pages(bio, false);
427 if (unlikely(dio->op.error))
428 set_bit(EI_INODE_ERROR, &inode->ei_flags);
431 static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
433 struct bch_fs *c = dio->op.c;
434 struct kiocb *req = dio->req;
435 struct address_space *mapping = dio->mapping;
436 struct bch_inode_info *inode = dio->inode;
437 struct bch_io_opts opts;
438 struct bio *bio = &dio->op.wbio.bio;
439 unsigned unaligned, iter_count;
440 bool sync = dio->sync, dropped_locks;
443 bch2_inode_opts_get(&opts, c, &inode->ei_inode);
446 iter_count = dio->iter.count;
448 EBUG_ON(current->faults_disabled_mapping);
449 current->faults_disabled_mapping = mapping;
451 ret = bio_iov_iter_get_pages(bio, &dio->iter);
453 dropped_locks = fdm_dropped_locks();
455 current->faults_disabled_mapping = NULL;
458 * If the fault handler returned an error but also signalled
459 * that it dropped & retook ei_pagecache_lock, we just need to
460 * re-shoot down the page cache and retry:
462 if (dropped_locks && ret)
465 if (unlikely(ret < 0))
468 if (unlikely(dropped_locks)) {
469 ret = bch2_write_invalidate_inode_pages_range(mapping,
471 req->ki_pos + iter_count - 1);
475 if (!bio->bi_iter.bi_size)
479 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
480 bio->bi_iter.bi_size -= unaligned;
481 iov_iter_revert(&dio->iter, unaligned);
483 if (!bio->bi_iter.bi_size) {
485 * bio_iov_iter_get_pages was only able to get <
486 * blocksize worth of pages:
492 bch2_write_op_init(&dio->op, c, opts);
493 dio->op.end_io = sync
495 : bch2_dio_write_loop_async;
496 dio->op.target = dio->op.opts.foreground_target;
497 dio->op.write_point = writepoint_hashed((unsigned long) current);
498 dio->op.nr_replicas = dio->op.opts.data_replicas;
499 dio->op.subvol = inode->ei_subvol;
500 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
501 dio->op.devs_need_flush = &inode->ei_devs_need_flush;
504 dio->op.flags |= BCH_WRITE_SYNC;
505 dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
507 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
508 bio_sectors(bio), true);
512 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
513 dio->op.opts.data_replicas, 0);
515 !bch2_dio_write_check_allocated(dio))
518 task_io_account_write(bio->bi_iter.bi_size);
520 if (unlikely(dio->iter.count) &&
523 bch2_dio_write_copy_iov(dio))
524 dio->sync = sync = true;
527 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
532 bch2_dio_write_end(dio);
534 if (likely(!dio->iter.count) || dio->op.error)
537 bio_reset(bio, NULL, REQ_OP_WRITE);
540 return bch2_dio_write_done(dio);
544 bio_release_pages(bio, false);
546 bch2_quota_reservation_put(c, inode, &dio->quota_res);
550 static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
552 struct mm_struct *mm = dio->mm;
554 bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
558 bch2_dio_write_loop(dio);
560 kthread_unuse_mm(mm);
563 static void bch2_dio_write_loop_async(struct bch_write_op *op)
565 struct dio_write *dio = container_of(op, struct dio_write, op);
567 bch2_dio_write_end(dio);
569 if (likely(!dio->iter.count) || dio->op.error)
570 bch2_dio_write_done(dio);
572 bch2_dio_write_continue(dio);
575 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
577 struct file *file = req->ki_filp;
578 struct address_space *mapping = file->f_mapping;
579 struct bch_inode_info *inode = file_bch_inode(file);
580 struct bch_fs *c = inode->v.i_sb->s_fs_info;
581 struct dio_write *dio;
583 bool locked = true, extending;
587 prefetch((void *) &c->opts + 64);
588 prefetch(&inode->ei_inode);
589 prefetch((void *) &inode->ei_inode + 64);
591 inode_lock(&inode->v);
593 ret = generic_write_checks(req, iter);
594 if (unlikely(ret <= 0))
597 ret = file_remove_privs(file);
601 ret = file_update_time(file);
605 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
608 inode_dio_begin(&inode->v);
609 bch2_pagecache_block_get(inode);
611 extending = req->ki_pos + iter->count > inode->v.i_size;
613 inode_unlock(&inode->v);
617 bio = bio_alloc_bioset(NULL,
618 bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
621 &c->dio_write_bioset);
622 dio = container_of(bio, struct dio_write, op.wbio.bio);
624 dio->mapping = mapping;
626 dio->mm = current->mm;
629 dio->extending = extending;
630 dio->sync = is_sync_kiocb(req) || extending;
631 dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
632 dio->quota_res.sectors = 0;
637 if (unlikely(mapping->nrpages)) {
638 ret = bch2_write_invalidate_inode_pages_range(mapping,
640 req->ki_pos + iter->count - 1);
645 ret = bch2_dio_write_loop(dio);
648 inode_unlock(&inode->v);
651 bch2_pagecache_block_put(inode);
653 inode_dio_end(&inode->v);
657 void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
659 bioset_exit(&c->dio_write_bioset);
660 bioset_exit(&c->dio_read_bioset);
663 int bch2_fs_fs_io_direct_init(struct bch_fs *c)
665 if (bioset_init(&c->dio_read_bioset,
666 4, offsetof(struct dio_read, rbio.bio),
668 return -BCH_ERR_ENOMEM_dio_read_bioset_init;
670 if (bioset_init(&c->dio_write_bioset,
671 4, offsetof(struct dio_write, op.wbio.bio),
673 return -BCH_ERR_ENOMEM_dio_write_bioset_init;
678 #endif /* NO_BCACHEFS_FS */