]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/io.h
New upstream snapshot
[bcachefs-tools-debian] / libbcachefs / io.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_IO_H
3 #define _BCACHEFS_IO_H
4
5 #include "checksum.h"
6 #include "bkey_buf.h"
7 #include "io_types.h"
8
9 #define to_wbio(_bio)                   \
10         container_of((_bio), struct bch_write_bio, bio)
11
12 #define to_rbio(_bio)                   \
13         container_of((_bio), struct bch_read_bio, bio)
14
15 void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
16 void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
17
18 void bch2_latency_acct(struct bch_dev *, u64, int);
19
20 void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
21                                enum bch_data_type, const struct bkey_i *);
22
23 #define BLK_STS_REMOVED         ((__force blk_status_t)128)
24
25 const char *bch2_blk_status_to_str(blk_status_t);
26
27 enum bch_write_flags {
28         BCH_WRITE_ALLOC_NOWAIT          = (1 << 0),
29         BCH_WRITE_CACHED                = (1 << 1),
30         BCH_WRITE_FLUSH                 = (1 << 2),
31         BCH_WRITE_DATA_ENCODED          = (1 << 3),
32         BCH_WRITE_PAGES_STABLE          = (1 << 4),
33         BCH_WRITE_PAGES_OWNED           = (1 << 5),
34         BCH_WRITE_ONLY_SPECIFIED_DEVS   = (1 << 6),
35         BCH_WRITE_WROTE_DATA_INLINE     = (1 << 7),
36         BCH_WRITE_FROM_INTERNAL         = (1 << 8),
37         BCH_WRITE_CHECK_ENOSPC          = (1 << 9),
38
39         /* Internal: */
40         BCH_WRITE_JOURNAL_SEQ_PTR       = (1 << 10),
41         BCH_WRITE_SKIP_CLOSURE_PUT      = (1 << 11),
42         BCH_WRITE_DONE                  = (1 << 12),
43 };
44
45 static inline u64 *op_journal_seq(struct bch_write_op *op)
46 {
47         return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR)
48                 ? op->journal_seq_p : &op->journal_seq;
49 }
50
51 static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
52 {
53         return op->alloc_reserve == RESERVE_MOVINGGC
54                 ? op->c->copygc_wq
55                 : op->c->btree_update_wq;
56 }
57
58 int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
59                                struct bkey_i *, bool *, s64 *, s64 *);
60 int bch2_extent_update(struct btree_trans *, subvol_inum,
61                        struct btree_iter *, struct bkey_i *,
62                        struct disk_reservation *, u64 *, u64, s64 *, bool);
63
64 int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
65                    subvol_inum, u64, s64 *);
66 int bch2_fpunch(struct bch_fs *c, subvol_inum, u64, u64, s64 *);
67
68 int bch2_write_index_default(struct bch_write_op *);
69
70 static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
71                                       struct bch_io_opts opts)
72 {
73         op->c                   = c;
74         op->end_io              = NULL;
75         op->flags               = 0;
76         op->written             = 0;
77         op->error               = 0;
78         op->csum_type           = bch2_data_checksum_type(c, opts.data_checksum);
79         op->compression_type    = bch2_compression_opt_to_type[opts.compression];
80         op->nr_replicas         = 0;
81         op->nr_replicas_required = c->opts.data_replicas_required;
82         op->alloc_reserve       = RESERVE_NONE;
83         op->incompressible      = 0;
84         op->open_buckets.nr     = 0;
85         op->devs_have.nr        = 0;
86         op->target              = 0;
87         op->opts                = opts;
88         op->subvol              = 0;
89         op->pos                 = POS_MAX;
90         op->version             = ZERO_VERSION;
91         op->write_point         = (struct write_point_specifier) { 0 };
92         op->res                 = (struct disk_reservation) { 0 };
93         op->journal_seq         = 0;
94         op->new_i_size          = U64_MAX;
95         op->i_sectors_delta     = 0;
96         op->index_update_fn     = bch2_write_index_default;
97 }
98
99 void bch2_write(struct closure *);
100
101 static inline struct bch_write_bio *wbio_init(struct bio *bio)
102 {
103         struct bch_write_bio *wbio = to_wbio(bio);
104
105         memset(wbio, 0, offsetof(struct bch_write_bio, bio));
106         return wbio;
107 }
108
109 struct bch_devs_mask;
110 struct cache_promote_op;
111 struct extent_ptr_decoded;
112
113 int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
114                                 struct bkey_buf *);
115
116 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
117                                             enum btree_id *data_btree,
118                                             unsigned *offset_into_extent,
119                                             struct bkey_buf *k)
120 {
121         if (k->k->k.type != KEY_TYPE_reflink_p)
122                 return 0;
123
124         *data_btree = BTREE_ID_reflink;
125         return __bch2_read_indirect_extent(trans, offset_into_extent, k);
126 }
127
128 enum bch_read_flags {
129         BCH_READ_RETRY_IF_STALE         = 1 << 0,
130         BCH_READ_MAY_PROMOTE            = 1 << 1,
131         BCH_READ_USER_MAPPED            = 1 << 2,
132         BCH_READ_NODECODE               = 1 << 3,
133         BCH_READ_LAST_FRAGMENT          = 1 << 4,
134
135         /* internal: */
136         BCH_READ_MUST_BOUNCE            = 1 << 5,
137         BCH_READ_MUST_CLONE             = 1 << 6,
138         BCH_READ_IN_RETRY               = 1 << 7,
139 };
140
141 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
142                        struct bvec_iter, struct bpos, enum btree_id,
143                        struct bkey_s_c, unsigned,
144                        struct bch_io_failures *, unsigned);
145
146 static inline void bch2_read_extent(struct btree_trans *trans,
147                         struct bch_read_bio *rbio, struct bpos read_pos,
148                         enum btree_id data_btree, struct bkey_s_c k,
149                         unsigned offset_into_extent, unsigned flags)
150 {
151         __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
152                            data_btree, k, offset_into_extent, NULL, flags);
153 }
154
155 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
156                  subvol_inum, struct bch_io_failures *, unsigned flags);
157
158 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
159                              subvol_inum inum)
160 {
161         struct bch_io_failures failed = { .nr = 0 };
162
163         BUG_ON(rbio->_state);
164
165         rbio->c = c;
166         rbio->start_time = local_clock();
167         rbio->subvol = inum.subvol;
168
169         __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
170                     BCH_READ_RETRY_IF_STALE|
171                     BCH_READ_MAY_PROMOTE|
172                     BCH_READ_USER_MAPPED);
173 }
174
175 static inline struct bch_read_bio *rbio_init(struct bio *bio,
176                                              struct bch_io_opts opts)
177 {
178         struct bch_read_bio *rbio = to_rbio(bio);
179
180         rbio->_state    = 0;
181         rbio->promote   = NULL;
182         rbio->opts      = opts;
183         return rbio;
184 }
185
186 void bch2_fs_io_exit(struct bch_fs *);
187 int bch2_fs_io_init(struct bch_fs *);
188
189 #endif /* _BCACHEFS_IO_H */