1 /* SPDX-License-Identifier: GPL-2.0 */
9 #define to_wbio(_bio) \
10 container_of((_bio), struct bch_write_bio, bio)
12 #define to_rbio(_bio) \
13 container_of((_bio), struct bch_read_bio, bio)
15 void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
16 void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
18 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
19 void bch2_latency_acct(struct bch_dev *, u64, int);
21 static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) {}
24 void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
25 enum bch_data_type, const struct bkey_i *, bool);
27 #define BLK_STS_REMOVED ((__force blk_status_t)128)
29 const char *bch2_blk_status_to_str(blk_status_t);
31 enum bch_write_flags {
32 __BCH_WRITE_ALLOC_NOWAIT,
34 __BCH_WRITE_DATA_ENCODED,
35 __BCH_WRITE_PAGES_STABLE,
36 __BCH_WRITE_PAGES_OWNED,
37 __BCH_WRITE_ONLY_SPECIFIED_DEVS,
38 __BCH_WRITE_WROTE_DATA_INLINE,
39 __BCH_WRITE_FROM_INTERNAL,
40 __BCH_WRITE_CHECK_ENOSPC,
43 __BCH_WRITE_IN_WORKER,
46 __BCH_WRITE_CONVERT_UNWRITTEN,
49 #define BCH_WRITE_ALLOC_NOWAIT (1U << __BCH_WRITE_ALLOC_NOWAIT)
50 #define BCH_WRITE_CACHED (1U << __BCH_WRITE_CACHED)
51 #define BCH_WRITE_DATA_ENCODED (1U << __BCH_WRITE_DATA_ENCODED)
52 #define BCH_WRITE_PAGES_STABLE (1U << __BCH_WRITE_PAGES_STABLE)
53 #define BCH_WRITE_PAGES_OWNED (1U << __BCH_WRITE_PAGES_OWNED)
54 #define BCH_WRITE_ONLY_SPECIFIED_DEVS (1U << __BCH_WRITE_ONLY_SPECIFIED_DEVS)
55 #define BCH_WRITE_WROTE_DATA_INLINE (1U << __BCH_WRITE_WROTE_DATA_INLINE)
56 #define BCH_WRITE_FROM_INTERNAL (1U << __BCH_WRITE_FROM_INTERNAL)
57 #define BCH_WRITE_CHECK_ENOSPC (1U << __BCH_WRITE_CHECK_ENOSPC)
58 #define BCH_WRITE_SYNC (1U << __BCH_WRITE_SYNC)
59 #define BCH_WRITE_MOVE (1U << __BCH_WRITE_MOVE)
62 #define BCH_WRITE_IN_WORKER (1U << __BCH_WRITE_IN_WORKER)
63 #define BCH_WRITE_DONE (1U << __BCH_WRITE_DONE)
64 #define BCH_WRITE_IO_ERROR (1U << __BCH_WRITE_IO_ERROR)
65 #define BCH_WRITE_CONVERT_UNWRITTEN (1U << __BCH_WRITE_CONVERT_UNWRITTEN)
67 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
69 return op->alloc_reserve == RESERVE_movinggc
71 : op->c->btree_update_wq;
74 int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
75 struct bkey_i *, bool *, s64 *, s64 *);
76 int bch2_extent_update(struct btree_trans *, subvol_inum,
77 struct btree_iter *, struct bkey_i *,
78 struct disk_reservation *, u64, s64 *, bool);
79 int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *,
80 unsigned, struct bch_io_opts, s64 *,
81 struct write_point_specifier);
83 int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
84 subvol_inum, u64, s64 *);
85 int bch2_fpunch(struct bch_fs *c, subvol_inum, u64, u64, s64 *);
87 static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
88 struct bch_io_opts opts)
95 op->csum_type = bch2_data_checksum_type(c, opts);
96 op->compression_type = bch2_compression_opt_to_type[opts.compression];
98 op->nr_replicas_required = c->opts.data_replicas_required;
99 op->alloc_reserve = RESERVE_none;
100 op->incompressible = 0;
101 op->open_buckets.nr = 0;
102 op->devs_have.nr = 0;
107 op->version = ZERO_VERSION;
108 op->write_point = (struct write_point_specifier) { 0 };
109 op->res = (struct disk_reservation) { 0 };
110 op->new_i_size = U64_MAX;
111 op->i_sectors_delta = 0;
112 op->devs_need_flush = NULL;
115 void bch2_write(struct closure *);
117 void bch2_write_point_do_index_updates(struct work_struct *);
119 static inline struct bch_write_bio *wbio_init(struct bio *bio)
121 struct bch_write_bio *wbio = to_wbio(bio);
123 memset(wbio, 0, offsetof(struct bch_write_bio, bio));
127 struct bch_devs_mask;
128 struct cache_promote_op;
129 struct extent_ptr_decoded;
131 int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
134 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
135 enum btree_id *data_btree,
136 unsigned *offset_into_extent,
139 if (k->k->k.type != KEY_TYPE_reflink_p)
142 *data_btree = BTREE_ID_reflink;
143 return __bch2_read_indirect_extent(trans, offset_into_extent, k);
146 enum bch_read_flags {
147 BCH_READ_RETRY_IF_STALE = 1 << 0,
148 BCH_READ_MAY_PROMOTE = 1 << 1,
149 BCH_READ_USER_MAPPED = 1 << 2,
150 BCH_READ_NODECODE = 1 << 3,
151 BCH_READ_LAST_FRAGMENT = 1 << 4,
154 BCH_READ_MUST_BOUNCE = 1 << 5,
155 BCH_READ_MUST_CLONE = 1 << 6,
156 BCH_READ_IN_RETRY = 1 << 7,
159 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
160 struct bvec_iter, struct bpos, enum btree_id,
161 struct bkey_s_c, unsigned,
162 struct bch_io_failures *, unsigned);
164 static inline void bch2_read_extent(struct btree_trans *trans,
165 struct bch_read_bio *rbio, struct bpos read_pos,
166 enum btree_id data_btree, struct bkey_s_c k,
167 unsigned offset_into_extent, unsigned flags)
169 __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
170 data_btree, k, offset_into_extent, NULL, flags);
173 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
174 subvol_inum, struct bch_io_failures *, unsigned flags);
176 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
179 struct bch_io_failures failed = { .nr = 0 };
181 BUG_ON(rbio->_state);
184 rbio->start_time = local_clock();
185 rbio->subvol = inum.subvol;
187 __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
188 BCH_READ_RETRY_IF_STALE|
189 BCH_READ_MAY_PROMOTE|
190 BCH_READ_USER_MAPPED);
193 static inline struct bch_read_bio *rbio_init(struct bio *bio,
194 struct bch_io_opts opts)
196 struct bch_read_bio *rbio = to_rbio(bio);
199 rbio->promote = NULL;
204 void bch2_fs_io_exit(struct bch_fs *);
205 int bch2_fs_io_init(struct bch_fs *);
207 #endif /* _BCACHEFS_IO_H */