]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.h
New upstream release
[bcachefs-tools-debian] / libbcachefs / io.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_IO_H
3 #define _BCACHEFS_IO_H
4
5 #include "checksum.h"
6 #include "bkey_buf.h"
7 #include "io_types.h"
8
9 #define to_wbio(_bio)                   \
10         container_of((_bio), struct bch_write_bio, bio)
11
12 #define to_rbio(_bio)                   \
13         container_of((_bio), struct bch_read_bio, bio)
14
15 void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
16 void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
17
18 void bch2_latency_acct(struct bch_dev *, u64, int);
19
20 void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
21                                enum bch_data_type, const struct bkey_i *);
22
23 #define BLK_STS_REMOVED         ((__force blk_status_t)128)
24
25 const char *bch2_blk_status_to_str(blk_status_t);
26
27 enum bch_write_flags {
28         BCH_WRITE_ALLOC_NOWAIT          = (1 << 0),
29         BCH_WRITE_CACHED                = (1 << 1),
30         BCH_WRITE_FLUSH                 = (1 << 2),
31         BCH_WRITE_DATA_ENCODED          = (1 << 3),
32         BCH_WRITE_PAGES_STABLE          = (1 << 4),
33         BCH_WRITE_PAGES_OWNED           = (1 << 5),
34         BCH_WRITE_ONLY_SPECIFIED_DEVS   = (1 << 6),
35         BCH_WRITE_WROTE_DATA_INLINE     = (1 << 7),
36         BCH_WRITE_FROM_INTERNAL         = (1 << 8),
37         BCH_WRITE_CHECK_ENOSPC          = (1 << 9),
38
39         /* Internal: */
40         BCH_WRITE_JOURNAL_SEQ_PTR       = (1 << 10),
41         BCH_WRITE_SKIP_CLOSURE_PUT      = (1 << 11),
42         BCH_WRITE_DONE                  = (1 << 12),
43 };
44
45 static inline u64 *op_journal_seq(struct bch_write_op *op)
46 {
47         return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR)
48                 ? op->journal_seq_p : &op->journal_seq;
49 }
50
51 static inline void op_journal_seq_set(struct bch_write_op *op, u64 *journal_seq)
52 {
53         op->journal_seq_p = journal_seq;
54         op->flags |= BCH_WRITE_JOURNAL_SEQ_PTR;
55 }
56
57 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
58 {
59         return op->alloc_reserve == RESERVE_MOVINGGC
60                 ? op->c->copygc_wq
61                 : op->c->btree_update_wq;
62 }
63
64 int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
65                                struct bkey_i *, bool *, bool *, s64 *, s64 *);
66 int bch2_extent_update(struct btree_trans *, struct btree_iter *,
67                        struct bkey_i *, struct disk_reservation *,
68                        u64 *, u64, s64 *, bool);
69 int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
70                    struct bpos, u64 *, s64 *);
71 int bch2_fpunch(struct bch_fs *c, u64, u64, u64, u64 *, s64 *);
72
73 int bch2_write_index_default(struct bch_write_op *);
74
75 static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
76                                       struct bch_io_opts opts)
77 {
78         op->c                   = c;
79         op->end_io              = NULL;
80         op->flags               = 0;
81         op->written             = 0;
82         op->error               = 0;
83         op->csum_type           = bch2_data_checksum_type(c, opts.data_checksum);
84         op->compression_type    = bch2_compression_opt_to_type[opts.compression];
85         op->nr_replicas         = 0;
86         op->nr_replicas_required = c->opts.data_replicas_required;
87         op->alloc_reserve       = RESERVE_NONE;
88         op->incompressible      = 0;
89         op->open_buckets.nr     = 0;
90         op->devs_have.nr        = 0;
91         op->target              = 0;
92         op->opts                = opts;
93         op->pos                 = POS_MAX;
94         op->version             = ZERO_VERSION;
95         op->write_point         = (struct write_point_specifier) { 0 };
96         op->res                 = (struct disk_reservation) { 0 };
97         op->journal_seq         = 0;
98         op->new_i_size          = U64_MAX;
99         op->i_sectors_delta     = 0;
100         op->index_update_fn     = bch2_write_index_default;
101 }
102
103 void bch2_write(struct closure *);
104
105 static inline struct bch_write_bio *wbio_init(struct bio *bio)
106 {
107         struct bch_write_bio *wbio = to_wbio(bio);
108
109         memset(wbio, 0, offsetof(struct bch_write_bio, bio));
110         return wbio;
111 }
112
113 struct bch_devs_mask;
114 struct cache_promote_op;
115 struct extent_ptr_decoded;
116
117 int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
118                                 struct bkey_buf *);
119
120 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
121                                             enum btree_id *data_btree,
122                                             unsigned *offset_into_extent,
123                                             struct bkey_buf *k)
124 {
125         if (k->k->k.type != KEY_TYPE_reflink_p)
126                 return 0;
127
128         *data_btree = BTREE_ID_reflink;
129         return __bch2_read_indirect_extent(trans, offset_into_extent, k);
130 }
131
132 enum bch_read_flags {
133         BCH_READ_RETRY_IF_STALE         = 1 << 0,
134         BCH_READ_MAY_PROMOTE            = 1 << 1,
135         BCH_READ_USER_MAPPED            = 1 << 2,
136         BCH_READ_NODECODE               = 1 << 3,
137         BCH_READ_LAST_FRAGMENT          = 1 << 4,
138
139         /* internal: */
140         BCH_READ_MUST_BOUNCE            = 1 << 5,
141         BCH_READ_MUST_CLONE             = 1 << 6,
142         BCH_READ_IN_RETRY               = 1 << 7,
143 };
144
145 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
146                        struct bvec_iter, struct bpos, enum btree_id,
147                        struct bkey_s_c, unsigned,
148                        struct bch_io_failures *, unsigned);
149
150 static inline void bch2_read_extent(struct btree_trans *trans,
151                         struct bch_read_bio *rbio, struct bpos read_pos,
152                         enum btree_id data_btree, struct bkey_s_c k,
153                         unsigned offset_into_extent, unsigned flags)
154 {
155         __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
156                            data_btree, k, offset_into_extent, NULL, flags);
157 }
158
159 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
160                  u64, struct bch_io_failures *, unsigned flags);
161
162 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
163                              u64 inode)
164 {
165         struct bch_io_failures failed = { .nr = 0 };
166
167         BUG_ON(rbio->_state);
168
169         rbio->c = c;
170         rbio->start_time = local_clock();
171
172         __bch2_read(c, rbio, rbio->bio.bi_iter, inode, &failed,
173                     BCH_READ_RETRY_IF_STALE|
174                     BCH_READ_MAY_PROMOTE|
175                     BCH_READ_USER_MAPPED);
176 }
177
178 static inline struct bch_read_bio *rbio_init(struct bio *bio,
179                                              struct bch_io_opts opts)
180 {
181         struct bch_read_bio *rbio = to_rbio(bio);
182
183         rbio->_state    = 0;
184         rbio->promote   = NULL;
185         rbio->opts      = opts;
186         return rbio;
187 }
188
189 void bch2_fs_io_exit(struct bch_fs *);
190 int bch2_fs_io_init(struct bch_fs *);
191
192 #endif /* _BCACHEFS_IO_H */