]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.h
Update bcachefs sources to f4b290345a bcachefs: device resize
[bcachefs-tools-debian] / libbcachefs / io.h
1 #ifndef _BCACHEFS_IO_H
2 #define _BCACHEFS_IO_H
3
4 #include <linux/hash.h>
5 #include "alloc.h"
6 #include "checksum.h"
7 #include "io_types.h"
8
9 #define to_wbio(_bio)                   \
10         container_of((_bio), struct bch_write_bio, bio)
11
12 #define to_rbio(_bio)                   \
13         container_of((_bio), struct bch_read_bio, bio)
14
15 void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
16 void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
17 void bch2_bio_alloc_more_pages_pool(struct bch_fs *, struct bio *, size_t);
18
19 void bch2_latency_acct(struct bch_dev *, unsigned, int);
20
21 void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
22                                enum bch_data_type, const struct bkey_i *);
23
24 #define BLK_STS_REMOVED         ((__force blk_status_t)128)
25
26 enum bch_write_flags {
27         BCH_WRITE_ALLOC_NOWAIT          = (1 << 0),
28         BCH_WRITE_CACHED                = (1 << 1),
29         BCH_WRITE_FLUSH                 = (1 << 2),
30         BCH_WRITE_DATA_ENCODED          = (1 << 3),
31         BCH_WRITE_PAGES_STABLE          = (1 << 4),
32         BCH_WRITE_PAGES_OWNED           = (1 << 5),
33         BCH_WRITE_ONLY_SPECIFIED_DEVS   = (1 << 6),
34         BCH_WRITE_NOPUT_RESERVATION     = (1 << 7),
35         BCH_WRITE_NOMARK_REPLICAS       = (1 << 8),
36
37         /* Internal: */
38         BCH_WRITE_JOURNAL_SEQ_PTR       = (1 << 9),
39         BCH_WRITE_DONE                  = (1 << 10),
40         BCH_WRITE_LOOPED                = (1 << 11),
41 };
42
43 static inline u64 *op_journal_seq(struct bch_write_op *op)
44 {
45         return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR)
46                 ? op->journal_seq_p : &op->journal_seq;
47 }
48
49 static inline void op_journal_seq_set(struct bch_write_op *op, u64 *journal_seq)
50 {
51         op->journal_seq_p = journal_seq;
52         op->flags |= BCH_WRITE_JOURNAL_SEQ_PTR;
53 }
54
55 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
56 {
57         return op->alloc_reserve == RESERVE_MOVINGGC
58                 ? op->c->copygc_wq
59                 : op->c->wq;
60 }
61
62 int bch2_write_index_default(struct bch_write_op *);
63
64 static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c)
65 {
66         op->c                   = c;
67         op->io_wq               = index_update_wq(op);
68         op->flags               = 0;
69         op->written             = 0;
70         op->error               = 0;
71         op->csum_type           = bch2_data_checksum_type(c, c->opts.data_checksum);
72         op->compression_type    =
73                 bch2_compression_opt_to_type(c->opts.compression);
74         op->nr_replicas         = 0;
75         op->nr_replicas_required = c->opts.data_replicas_required;
76         op->alloc_reserve       = RESERVE_NONE;
77         op->open_buckets_nr     = 0;
78         op->devs_have.nr        = 0;
79         op->pos                 = POS_MAX;
80         op->version             = ZERO_VERSION;
81         op->devs                = NULL;
82         op->write_point         = (struct write_point_specifier) { 0 };
83         op->res                 = (struct disk_reservation) { 0 };
84         op->journal_seq         = 0;
85         op->index_update_fn     = bch2_write_index_default;
86 }
87
88 void bch2_write(struct closure *);
89
90 static inline struct bch_write_bio *wbio_init(struct bio *bio)
91 {
92         struct bch_write_bio *wbio = to_wbio(bio);
93
94         memset(wbio, 0, offsetof(struct bch_write_bio, bio));
95         return wbio;
96 }
97
98 struct bch_devs_mask;
99 struct cache_promote_op;
100 struct extent_pick_ptr;
101
102 int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
103                        struct bkey_s_c_extent e, struct extent_pick_ptr *,
104                        unsigned);
105 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
106                  u64, struct bch_devs_mask *, unsigned);
107
108 enum bch_read_flags {
109         BCH_READ_RETRY_IF_STALE         = 1 << 0,
110         BCH_READ_MAY_PROMOTE            = 1 << 1,
111         BCH_READ_USER_MAPPED            = 1 << 2,
112         BCH_READ_NODECODE               = 1 << 3,
113
114         /* internal: */
115         BCH_READ_MUST_BOUNCE            = 1 << 4,
116         BCH_READ_MUST_CLONE             = 1 << 5,
117         BCH_READ_IN_RETRY               = 1 << 6,
118 };
119
120 static inline void bch2_read_extent(struct bch_fs *c,
121                                     struct bch_read_bio *rbio,
122                                     struct bkey_s_c_extent e,
123                                     struct extent_pick_ptr *pick,
124                                     unsigned flags)
125 {
126         __bch2_read_extent(c, rbio, rbio->bio.bi_iter, e, pick, flags);
127 }
128
129 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
130                              u64 inode)
131 {
132         BUG_ON(rbio->_state);
133         __bch2_read(c, rbio, rbio->bio.bi_iter, inode, NULL,
134                     BCH_READ_RETRY_IF_STALE|
135                     BCH_READ_MAY_PROMOTE|
136                     BCH_READ_USER_MAPPED);
137 }
138
139 static inline struct bch_read_bio *rbio_init(struct bio *bio,
140                                              struct bch_io_opts opts)
141 {
142         struct bch_read_bio *rbio = to_rbio(bio);
143
144         rbio->_state    = 0;
145         rbio->promote   = NULL;
146         rbio->opts      = opts;
147         return rbio;
148 }
149
150 #endif /* _BCACHEFS_IO_H */