1 /* SPDX-License-Identifier: GPL-2.0 */
9 #define to_wbio(_bio) \
10 container_of((_bio), struct bch_write_bio, bio)
12 #define to_rbio(_bio) \
13 container_of((_bio), struct bch_read_bio, bio)
15 void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
16 void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
18 void bch2_latency_acct(struct bch_dev *, u64, int);
20 void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
21 enum bch_data_type, const struct bkey_i *);
23 #define BLK_STS_REMOVED ((__force blk_status_t)128)
25 const char *bch2_blk_status_to_str(blk_status_t);
27 enum bch_write_flags {
28 BCH_WRITE_ALLOC_NOWAIT = (1 << 0),
29 BCH_WRITE_CACHED = (1 << 1),
30 BCH_WRITE_FLUSH = (1 << 2),
31 BCH_WRITE_DATA_ENCODED = (1 << 3),
32 BCH_WRITE_PAGES_STABLE = (1 << 4),
33 BCH_WRITE_PAGES_OWNED = (1 << 5),
34 BCH_WRITE_ONLY_SPECIFIED_DEVS = (1 << 6),
35 BCH_WRITE_WROTE_DATA_INLINE = (1 << 7),
36 BCH_WRITE_FROM_INTERNAL = (1 << 8),
37 BCH_WRITE_CHECK_ENOSPC = (1 << 9),
40 BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 10),
41 BCH_WRITE_SKIP_CLOSURE_PUT = (1 << 11),
42 BCH_WRITE_DONE = (1 << 12),
45 static inline u64 *op_journal_seq(struct bch_write_op *op)
47 return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR)
48 ? op->journal_seq_p : &op->journal_seq;
51 static inline void op_journal_seq_set(struct bch_write_op *op, u64 *journal_seq)
53 op->journal_seq_p = journal_seq;
54 op->flags |= BCH_WRITE_JOURNAL_SEQ_PTR;
57 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
59 return op->alloc_reserve == RESERVE_MOVINGGC
64 int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
65 struct bkey_i *, bool *, bool *, s64 *, s64 *);
66 int bch2_extent_update(struct btree_trans *, struct btree_iter *,
67 struct bkey_i *, struct disk_reservation *,
68 u64 *, u64, s64 *, bool);
69 int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
70 struct bpos, u64 *, s64 *);
71 int bch2_fpunch(struct bch_fs *c, u64, u64, u64, u64 *, s64 *);
73 int bch2_write_index_default(struct bch_write_op *);
75 static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
76 struct bch_io_opts opts)
83 op->csum_type = bch2_data_checksum_type(c, opts.data_checksum);
84 op->compression_type = bch2_compression_opt_to_type[opts.compression];
86 op->nr_replicas_required = c->opts.data_replicas_required;
87 op->alloc_reserve = RESERVE_NONE;
88 op->incompressible = 0;
89 op->open_buckets.nr = 0;
94 op->version = ZERO_VERSION;
95 op->write_point = (struct write_point_specifier) { 0 };
96 op->res = (struct disk_reservation) { 0 };
98 op->new_i_size = U64_MAX;
99 op->i_sectors_delta = 0;
100 op->index_update_fn = bch2_write_index_default;
103 void bch2_write(struct closure *);
105 static inline struct bch_write_bio *wbio_init(struct bio *bio)
107 struct bch_write_bio *wbio = to_wbio(bio);
109 memset(wbio, 0, offsetof(struct bch_write_bio, bio));
113 struct bch_devs_mask;
114 struct cache_promote_op;
115 struct extent_ptr_decoded;
117 int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
120 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
121 enum btree_id *data_btree,
122 unsigned *offset_into_extent,
125 if (k->k->k.type != KEY_TYPE_reflink_p)
128 *data_btree = BTREE_ID_reflink;
129 return __bch2_read_indirect_extent(trans, offset_into_extent, k);
132 enum bch_read_flags {
133 BCH_READ_RETRY_IF_STALE = 1 << 0,
134 BCH_READ_MAY_PROMOTE = 1 << 1,
135 BCH_READ_USER_MAPPED = 1 << 2,
136 BCH_READ_NODECODE = 1 << 3,
137 BCH_READ_LAST_FRAGMENT = 1 << 4,
140 BCH_READ_MUST_BOUNCE = 1 << 5,
141 BCH_READ_MUST_CLONE = 1 << 6,
142 BCH_READ_IN_RETRY = 1 << 7,
145 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
146 struct bvec_iter, struct bpos, enum btree_id,
147 struct bkey_s_c, unsigned,
148 struct bch_io_failures *, unsigned);
150 static inline void bch2_read_extent(struct btree_trans *trans,
151 struct bch_read_bio *rbio, struct bpos read_pos,
152 enum btree_id data_btree, struct bkey_s_c k,
153 unsigned offset_into_extent, unsigned flags)
155 __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
156 data_btree, k, offset_into_extent, NULL, flags);
159 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
160 u64, struct bch_io_failures *, unsigned flags);
162 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
165 struct bch_io_failures failed = { .nr = 0 };
167 BUG_ON(rbio->_state);
170 rbio->start_time = local_clock();
172 __bch2_read(c, rbio, rbio->bio.bi_iter, inode, &failed,
173 BCH_READ_RETRY_IF_STALE|
174 BCH_READ_MAY_PROMOTE|
175 BCH_READ_USER_MAPPED);
178 static inline struct bch_read_bio *rbio_init(struct bio *bio,
179 struct bch_io_opts opts)
181 struct bch_read_bio *rbio = to_rbio(bio);
184 rbio->promote = NULL;
189 void bch2_fs_io_exit(struct bch_fs *);
190 int bch2_fs_io_init(struct bch_fs *);
192 #endif /* _BCACHEFS_IO_H */