]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcache/io_types.h
bcache in userspace; userspace fsck
[bcachefs-tools-debian] / libbcache / io_types.h
1 #ifndef _BCACHE_IO_TYPES_H
2 #define _BCACHE_IO_TYPES_H
3
4 #include "btree_types.h"
5 #include "buckets_types.h"
6 #include "keylist_types.h"
7
8 #include <linux/llist.h>
9 #include <linux/workqueue.h>
10
11 struct bch_read_bio {
12         /*
13          * Reads will often have to be split, and if the extent being read from
14          * was checksummed or compressed we'll also have to allocate bounce
15          * buffers and copy the data back into the original bio.
16          *
17          * If we didn't have to split, we have to save and restore the original
18          * bi_end_io - @split below indicates which:
19          */
20         union {
21         struct bch_read_bio     *parent;
22         bio_end_io_t            *orig_bi_end_io;
23         };
24
25         /*
26          * Saved copy of parent->bi_iter, from submission time - allows us to
27          * resubmit on IO error, and also to copy data back to the original bio
28          * when we're bouncing:
29          */
30         struct bvec_iter        parent_iter;
31
32         /*
33          * If we have to retry the read (IO error, checksum failure, read stale
34          * data (raced with allocator), we retry the portion of the parent bio
35          * that failed (i.e. this bio's portion, parent_iter).
36          *
37          * But we need to stash the inode somewhere:
38          */
39         u64                     inode;
40
41         unsigned                submit_time_us;
42         u16                     flags;
43         u8                      bounce:1,
44                                 split:1;
45
46         struct bch_extent_crc64 crc;
47         struct bch_extent_ptr   ptr;
48         struct cache            *ca;
49
50         struct cache_promote_op *promote;
51
52         /* bio_decompress_worker list */
53         struct llist_node       list;
54
55         struct bio              bio;
56 };
57
58 static inline struct bch_read_bio *
59 bch_rbio_parent(struct bch_read_bio *rbio)
60 {
61         return rbio->split ? rbio->parent : rbio;
62 }
63
64 struct bch_write_bio {
65         struct cache_set        *c;
66         struct cache            *ca;
67         union {
68                 struct bio      *orig;
69                 struct closure  *cl;
70         };
71
72         unsigned                submit_time_us;
73         unsigned                split:1,
74                                 bounce:1,
75                                 put_bio:1;
76
77         /* Only for btree writes: */
78         unsigned                used_mempool:1;
79         u8                      order;
80
81         struct bio              bio;
82 };
83
84 struct bch_replace_info {
85         struct extent_insert_hook       hook;
86         /* How many insertions succeeded */
87         unsigned                        successes;
88         /* How many insertions failed */
89         unsigned                        failures;
90         BKEY_PADDED(key);
91 };
92
93 struct bch_write_op {
94         struct closure          cl;
95         struct cache_set        *c;
96         struct workqueue_struct *io_wq;
97         struct bch_write_bio    *bio;
98
99         unsigned                written; /* sectors */
100
101         short                   error;
102
103         u16                     flags;
104         unsigned                compression_type:4;
105         unsigned                nr_replicas:4;
106         unsigned                alloc_reserve:4;
107
108         struct bpos             pos;
109         unsigned                version;
110
111         /* For BCH_WRITE_DATA_COMPRESSED: */
112         struct bch_extent_crc64 crc;
113         unsigned                size;
114
115         struct disk_reservation res;
116
117         struct write_point      *wp;
118
119         union {
120         u8                      open_buckets[16];
121         struct {
122         struct bch_write_op     *next;
123         unsigned long           expires;
124         };
125         };
126
127         /*
128          * If caller wants to flush but hasn't passed us a journal_seq ptr, we
129          * still need to stash the journal_seq somewhere:
130          */
131         union {
132                 u64                     *journal_seq_p;
133                 u64                     journal_seq;
134         };
135
136         int                     (*index_update_fn)(struct bch_write_op *);
137
138         struct keylist          insert_keys;
139         u64                     inline_keys[BKEY_EXTENT_U64s_MAX * 2];
140 };
141
142 struct bio_decompress_worker {
143         struct cache_set                *c;
144         struct work_struct              work;
145         struct llist_head               bio_list;
146 };
147
148 #endif /* _BCACHE_IO_TYPES_H */