]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.h
0c145eb67317660eb65614225d4eb757389a714f
[bcachefs-tools-debian] / libbcachefs / io.h
1 #ifndef _BCACHEFS_IO_H
2 #define _BCACHEFS_IO_H
3
4 #include <linux/hash.h>
5 #include "alloc.h"
6 #include "checksum.h"
7 #include "io_types.h"
8
9 #define to_wbio(_bio)                   \
10         container_of((_bio), struct bch_write_bio, bio)
11
12 #define to_rbio(_bio)                   \
13         container_of((_bio), struct bch_read_bio, bio)
14
15 void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
16 void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
17 void bch2_bio_alloc_more_pages_pool(struct bch_fs *, struct bio *, size_t);
18
19 void bch2_latency_acct(struct bch_dev *, unsigned, int);
20
21 void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
22                                enum bch_data_type, const struct bkey_i *);
23
24 #define BLK_STS_REMOVED         ((__force blk_status_t)128)
25
26 enum bch_write_flags {
27         BCH_WRITE_ALLOC_NOWAIT          = (1 << 0),
28         BCH_WRITE_CACHED                = (1 << 1),
29         BCH_WRITE_FLUSH                 = (1 << 2),
30         BCH_WRITE_DATA_ENCODED          = (1 << 3),
31         BCH_WRITE_PAGES_STABLE          = (1 << 4),
32         BCH_WRITE_PAGES_OWNED           = (1 << 5),
33         BCH_WRITE_ONLY_SPECIFIED_DEVS   = (1 << 6),
34         BCH_WRITE_NOPUT_RESERVATION     = (1 << 7),
35
36         /* Internal: */
37         BCH_WRITE_JOURNAL_SEQ_PTR       = (1 << 8),
38         BCH_WRITE_DONE                  = (1 << 9),
39         BCH_WRITE_LOOPED                = (1 << 10),
40 };
41
42 static inline u64 *op_journal_seq(struct bch_write_op *op)
43 {
44         return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR)
45                 ? op->journal_seq_p : &op->journal_seq;
46 }
47
48 static inline void op_journal_seq_set(struct bch_write_op *op, u64 *journal_seq)
49 {
50         op->journal_seq_p = journal_seq;
51         op->flags |= BCH_WRITE_JOURNAL_SEQ_PTR;
52 }
53
54 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
55 {
56         return op->alloc_reserve == RESERVE_MOVINGGC
57                 ? op->c->copygc_wq
58                 : op->c->wq;
59 }
60
61 int bch2_write_index_default(struct bch_write_op *);
62
63 static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c)
64 {
65         op->c                   = c;
66         op->io_wq               = index_update_wq(op);
67         op->flags               = 0;
68         op->written             = 0;
69         op->error               = 0;
70         op->csum_type           = bch2_data_checksum_type(c, c->opts.data_checksum);
71         op->compression_type    =
72                 bch2_compression_opt_to_type(c->opts.compression);
73         op->nr_replicas         = 0;
74         op->nr_replicas_required = c->opts.data_replicas_required;
75         op->alloc_reserve       = RESERVE_NONE;
76         op->open_buckets_nr     = 0;
77         op->devs_have.nr        = 0;
78         op->pos                 = POS_MAX;
79         op->version             = ZERO_VERSION;
80         op->devs                = NULL;
81         op->write_point         = (struct write_point_specifier) { 0 };
82         op->res                 = (struct disk_reservation) { 0 };
83         op->journal_seq         = 0;
84         op->index_update_fn     = bch2_write_index_default;
85 }
86
87 void bch2_write(struct closure *);
88
89 static inline struct bch_write_bio *wbio_init(struct bio *bio)
90 {
91         struct bch_write_bio *wbio = to_wbio(bio);
92
93         memset(wbio, 0, offsetof(struct bch_write_bio, bio));
94         return wbio;
95 }
96
97 struct bch_devs_mask;
98 struct cache_promote_op;
99 struct extent_pick_ptr;
100
101 int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
102                        struct bkey_s_c_extent e, struct extent_pick_ptr *,
103                        unsigned);
104 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
105                  u64, struct bch_devs_mask *, unsigned);
106
107 enum bch_read_flags {
108         BCH_READ_RETRY_IF_STALE         = 1 << 0,
109         BCH_READ_MAY_PROMOTE            = 1 << 1,
110         BCH_READ_USER_MAPPED            = 1 << 2,
111         BCH_READ_NODECODE               = 1 << 3,
112
113         /* internal: */
114         BCH_READ_MUST_BOUNCE            = 1 << 4,
115         BCH_READ_MUST_CLONE             = 1 << 5,
116         BCH_READ_IN_RETRY               = 1 << 6,
117 };
118
119 static inline void bch2_read_extent(struct bch_fs *c,
120                                     struct bch_read_bio *rbio,
121                                     struct bkey_s_c_extent e,
122                                     struct extent_pick_ptr *pick,
123                                     unsigned flags)
124 {
125         __bch2_read_extent(c, rbio, rbio->bio.bi_iter, e, pick, flags);
126 }
127
128 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
129                              u64 inode)
130 {
131         BUG_ON(rbio->_state);
132         __bch2_read(c, rbio, rbio->bio.bi_iter, inode, NULL,
133                     BCH_READ_RETRY_IF_STALE|
134                     BCH_READ_MAY_PROMOTE|
135                     BCH_READ_USER_MAPPED);
136 }
137
138 static inline struct bch_read_bio *rbio_init(struct bio *bio,
139                                              struct bch_io_opts opts)
140 {
141         struct bch_read_bio *rbio = to_rbio(bio);
142
143         rbio->_state    = 0;
144         rbio->promote   = NULL;
145         rbio->opts      = opts;
146         return rbio;
147 }
148
149 #endif /* _BCACHEFS_IO_H */