]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.h
rust: Fix ptr casting in Fs::open()
[bcachefs-tools-debian] / libbcachefs / io.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_IO_H
3 #define _BCACHEFS_IO_H
4
5 #include "checksum.h"
6 #include "bkey_buf.h"
7 #include "io_types.h"
8
9 #define to_wbio(_bio)                   \
10         container_of((_bio), struct bch_write_bio, bio)
11
12 #define to_rbio(_bio)                   \
13         container_of((_bio), struct bch_read_bio, bio)
14
15 void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
16 void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
17
18 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
19 void bch2_latency_acct(struct bch_dev *, u64, int);
20 #else
21 static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) {}
22 #endif
23
24 void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
25                                enum bch_data_type, const struct bkey_i *, bool);
26
27 #define BLK_STS_REMOVED         ((__force blk_status_t)128)
28
29 const char *bch2_blk_status_to_str(blk_status_t);
30
31 enum bch_write_flags {
32         __BCH_WRITE_ALLOC_NOWAIT,
33         __BCH_WRITE_CACHED,
34         __BCH_WRITE_DATA_ENCODED,
35         __BCH_WRITE_PAGES_STABLE,
36         __BCH_WRITE_PAGES_OWNED,
37         __BCH_WRITE_ONLY_SPECIFIED_DEVS,
38         __BCH_WRITE_WROTE_DATA_INLINE,
39         __BCH_WRITE_FROM_INTERNAL,
40         __BCH_WRITE_CHECK_ENOSPC,
41         __BCH_WRITE_SYNC,
42         __BCH_WRITE_MOVE,
43         __BCH_WRITE_IN_WORKER,
44         __BCH_WRITE_DONE,
45         __BCH_WRITE_IO_ERROR,
46         __BCH_WRITE_CONVERT_UNWRITTEN,
47 };
48
49 #define BCH_WRITE_ALLOC_NOWAIT          (1U << __BCH_WRITE_ALLOC_NOWAIT)
50 #define BCH_WRITE_CACHED                (1U << __BCH_WRITE_CACHED)
51 #define BCH_WRITE_DATA_ENCODED          (1U << __BCH_WRITE_DATA_ENCODED)
52 #define BCH_WRITE_PAGES_STABLE          (1U << __BCH_WRITE_PAGES_STABLE)
53 #define BCH_WRITE_PAGES_OWNED           (1U << __BCH_WRITE_PAGES_OWNED)
54 #define BCH_WRITE_ONLY_SPECIFIED_DEVS   (1U << __BCH_WRITE_ONLY_SPECIFIED_DEVS)
55 #define BCH_WRITE_WROTE_DATA_INLINE     (1U << __BCH_WRITE_WROTE_DATA_INLINE)
56 #define BCH_WRITE_FROM_INTERNAL         (1U << __BCH_WRITE_FROM_INTERNAL)
57 #define BCH_WRITE_CHECK_ENOSPC          (1U << __BCH_WRITE_CHECK_ENOSPC)
58 #define BCH_WRITE_SYNC                  (1U << __BCH_WRITE_SYNC)
59 #define BCH_WRITE_MOVE                  (1U << __BCH_WRITE_MOVE)
60
61 /* Internal: */
62 #define BCH_WRITE_IN_WORKER             (1U << __BCH_WRITE_IN_WORKER)
63 #define BCH_WRITE_DONE                  (1U << __BCH_WRITE_DONE)
64 #define BCH_WRITE_IO_ERROR              (1U << __BCH_WRITE_IO_ERROR)
65 #define BCH_WRITE_CONVERT_UNWRITTEN     (1U << __BCH_WRITE_CONVERT_UNWRITTEN)
66
67 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
68 {
69         return op->alloc_reserve == RESERVE_movinggc
70                 ? op->c->copygc_wq
71                 : op->c->btree_update_wq;
72 }
73
74 int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
75                                struct bkey_i *, bool *, s64 *, s64 *);
76 int bch2_extent_update(struct btree_trans *, subvol_inum,
77                        struct btree_iter *, struct bkey_i *,
78                        struct disk_reservation *, u64, s64 *, bool);
79 int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *,
80                           unsigned, struct bch_io_opts, s64 *,
81                           struct write_point_specifier);
82
83 int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
84                    subvol_inum, u64, s64 *);
85 int bch2_fpunch(struct bch_fs *c, subvol_inum, u64, u64, s64 *);
86
87 static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
88                                       struct bch_io_opts opts)
89 {
90         op->c                   = c;
91         op->end_io              = NULL;
92         op->flags               = 0;
93         op->written             = 0;
94         op->error               = 0;
95         op->csum_type           = bch2_data_checksum_type(c, opts);
96         op->compression_type    = bch2_compression_opt_to_type[opts.compression];
97         op->nr_replicas         = 0;
98         op->nr_replicas_required = c->opts.data_replicas_required;
99         op->alloc_reserve       = RESERVE_none;
100         op->incompressible      = 0;
101         op->open_buckets.nr     = 0;
102         op->devs_have.nr        = 0;
103         op->target              = 0;
104         op->opts                = opts;
105         op->subvol              = 0;
106         op->pos                 = POS_MAX;
107         op->version             = ZERO_VERSION;
108         op->write_point         = (struct write_point_specifier) { 0 };
109         op->res                 = (struct disk_reservation) { 0 };
110         op->new_i_size          = U64_MAX;
111         op->i_sectors_delta     = 0;
112         op->devs_need_flush     = NULL;
113 }
114
115 void bch2_write(struct closure *);
116
117 void bch2_write_point_do_index_updates(struct work_struct *);
118
119 static inline struct bch_write_bio *wbio_init(struct bio *bio)
120 {
121         struct bch_write_bio *wbio = to_wbio(bio);
122
123         memset(wbio, 0, offsetof(struct bch_write_bio, bio));
124         return wbio;
125 }
126
127 struct bch_devs_mask;
128 struct cache_promote_op;
129 struct extent_ptr_decoded;
130
131 int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
132                                 struct bkey_buf *);
133
134 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
135                                             enum btree_id *data_btree,
136                                             unsigned *offset_into_extent,
137                                             struct bkey_buf *k)
138 {
139         if (k->k->k.type != KEY_TYPE_reflink_p)
140                 return 0;
141
142         *data_btree = BTREE_ID_reflink;
143         return __bch2_read_indirect_extent(trans, offset_into_extent, k);
144 }
145
146 enum bch_read_flags {
147         BCH_READ_RETRY_IF_STALE         = 1 << 0,
148         BCH_READ_MAY_PROMOTE            = 1 << 1,
149         BCH_READ_USER_MAPPED            = 1 << 2,
150         BCH_READ_NODECODE               = 1 << 3,
151         BCH_READ_LAST_FRAGMENT          = 1 << 4,
152
153         /* internal: */
154         BCH_READ_MUST_BOUNCE            = 1 << 5,
155         BCH_READ_MUST_CLONE             = 1 << 6,
156         BCH_READ_IN_RETRY               = 1 << 7,
157 };
158
159 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
160                        struct bvec_iter, struct bpos, enum btree_id,
161                        struct bkey_s_c, unsigned,
162                        struct bch_io_failures *, unsigned);
163
164 static inline void bch2_read_extent(struct btree_trans *trans,
165                         struct bch_read_bio *rbio, struct bpos read_pos,
166                         enum btree_id data_btree, struct bkey_s_c k,
167                         unsigned offset_into_extent, unsigned flags)
168 {
169         __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
170                            data_btree, k, offset_into_extent, NULL, flags);
171 }
172
173 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
174                  subvol_inum, struct bch_io_failures *, unsigned flags);
175
176 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
177                              subvol_inum inum)
178 {
179         struct bch_io_failures failed = { .nr = 0 };
180
181         BUG_ON(rbio->_state);
182
183         rbio->c = c;
184         rbio->start_time = local_clock();
185         rbio->subvol = inum.subvol;
186
187         __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
188                     BCH_READ_RETRY_IF_STALE|
189                     BCH_READ_MAY_PROMOTE|
190                     BCH_READ_USER_MAPPED);
191 }
192
193 static inline struct bch_read_bio *rbio_init(struct bio *bio,
194                                              struct bch_io_opts opts)
195 {
196         struct bch_read_bio *rbio = to_rbio(bio);
197
198         rbio->_state    = 0;
199         rbio->promote   = NULL;
200         rbio->opts      = opts;
201         return rbio;
202 }
203
204 void bch2_fs_io_exit(struct bch_fs *);
205 int bch2_fs_io_init(struct bch_fs *);
206
207 #endif /* _BCACHEFS_IO_H */