4 #include <linux/hash.h>
9 #define to_wbio(_bio) \
10 container_of((_bio), struct bch_write_bio, bio)
12 #define to_rbio(_bio) \
13 container_of((_bio), struct bch_read_bio, bio)
15 void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
16 void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
17 void bch2_bio_alloc_more_pages_pool(struct bch_fs *, struct bio *, size_t);
19 void bch2_latency_acct(struct bch_dev *, unsigned, int);
21 void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
22 enum bch_data_type, const struct bkey_i *);
24 #define BLK_STS_REMOVED ((__force blk_status_t)128)
26 enum bch_write_flags {
27 BCH_WRITE_ALLOC_NOWAIT = (1 << 0),
28 BCH_WRITE_CACHED = (1 << 1),
29 BCH_WRITE_FLUSH = (1 << 2),
30 BCH_WRITE_DATA_ENCODED = (1 << 3),
31 BCH_WRITE_PAGES_STABLE = (1 << 4),
32 BCH_WRITE_PAGES_OWNED = (1 << 5),
33 BCH_WRITE_ONLY_SPECIFIED_DEVS = (1 << 6),
34 BCH_WRITE_NOPUT_RESERVATION = (1 << 7),
35 BCH_WRITE_NOMARK_REPLICAS = (1 << 8),
38 BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 9),
39 BCH_WRITE_DONE = (1 << 10),
40 BCH_WRITE_LOOPED = (1 << 11),
43 static inline u64 *op_journal_seq(struct bch_write_op *op)
45 return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR)
46 ? op->journal_seq_p : &op->journal_seq;
49 static inline void op_journal_seq_set(struct bch_write_op *op, u64 *journal_seq)
51 op->journal_seq_p = journal_seq;
52 op->flags |= BCH_WRITE_JOURNAL_SEQ_PTR;
55 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
57 return op->alloc_reserve == RESERVE_MOVINGGC
62 int bch2_write_index_default(struct bch_write_op *);
64 static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c)
67 op->io_wq = index_update_wq(op);
71 op->csum_type = bch2_data_checksum_type(c, c->opts.data_checksum);
72 op->compression_type =
73 bch2_compression_opt_to_type[c->opts.compression];
75 op->nr_replicas_required = c->opts.data_replicas_required;
76 op->alloc_reserve = RESERVE_NONE;
77 op->open_buckets_nr = 0;
80 op->version = ZERO_VERSION;
82 op->write_point = (struct write_point_specifier) { 0 };
83 op->res = (struct disk_reservation) { 0 };
85 op->index_update_fn = bch2_write_index_default;
88 void bch2_write(struct closure *);
90 static inline struct bch_write_bio *wbio_init(struct bio *bio)
92 struct bch_write_bio *wbio = to_wbio(bio);
94 memset(wbio, 0, offsetof(struct bch_write_bio, bio));
99 struct cache_promote_op;
100 struct extent_pick_ptr;
102 int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
103 struct bkey_s_c_extent e, struct extent_pick_ptr *,
105 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
106 u64, struct bch_devs_mask *, unsigned);
108 enum bch_read_flags {
109 BCH_READ_RETRY_IF_STALE = 1 << 0,
110 BCH_READ_MAY_PROMOTE = 1 << 1,
111 BCH_READ_USER_MAPPED = 1 << 2,
112 BCH_READ_NODECODE = 1 << 3,
115 BCH_READ_MUST_BOUNCE = 1 << 4,
116 BCH_READ_MUST_CLONE = 1 << 5,
117 BCH_READ_IN_RETRY = 1 << 6,
120 static inline void bch2_read_extent(struct bch_fs *c,
121 struct bch_read_bio *rbio,
122 struct bkey_s_c_extent e,
123 struct extent_pick_ptr *pick,
126 __bch2_read_extent(c, rbio, rbio->bio.bi_iter, e, pick, flags);
129 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
132 BUG_ON(rbio->_state);
133 __bch2_read(c, rbio, rbio->bio.bi_iter, inode, NULL,
134 BCH_READ_RETRY_IF_STALE|
135 BCH_READ_MAY_PROMOTE|
136 BCH_READ_USER_MAPPED);
139 static inline struct bch_read_bio *rbio_init(struct bio *bio,
140 struct bch_io_opts opts)
142 struct bch_read_bio *rbio = to_rbio(bio);
145 rbio->promote = NULL;
150 #endif /* _BCACHEFS_IO_H */