1 #ifndef _BCACHE_IO_TYPES_H
2 #define _BCACHE_IO_TYPES_H
4 #include "btree_types.h"
5 #include "buckets_types.h"
6 #include "keylist_types.h"
8 #include <linux/llist.h>
9 #include <linux/workqueue.h>
13 * Reads will often have to be split, and if the extent being read from
14 * was checksummed or compressed we'll also have to allocate bounce
15 * buffers and copy the data back into the original bio.
17 * If we didn't have to split, we have to save and restore the original
18 * bi_end_io - @split below indicates which:
21 struct bch_read_bio *parent;
22 bio_end_io_t *orig_bi_end_io;
26 * Saved copy of parent->bi_iter, from submission time - allows us to
27 * resubmit on IO error, and also to copy data back to the original bio
28 * when we're bouncing:
30 struct bvec_iter parent_iter;
32 unsigned submit_time_us;
39 struct bch_extent_ptr ptr;
40 struct bch_extent_crc128 crc;
41 struct bversion version;
43 struct cache_promote_op *promote;
46 * If we have to retry the read (IO error, checksum failure, read stale
47 * data (raced with allocator), we retry the portion of the parent bio
48 * that failed (i.e. this bio's portion, parent_iter).
50 * But we need to stash the inode somewhere:
54 struct work_struct work;
59 static inline struct bch_read_bio *
60 bch2_rbio_parent(struct bch_read_bio *rbio)
62 return rbio->split ? rbio->parent : rbio;
65 struct bch_write_bio {
73 unsigned submit_time_us;
78 /* Only for btree writes: */
79 unsigned used_mempool:1;
85 struct bch_replace_info {
86 struct extent_insert_hook hook;
87 /* How many insertions succeeded */
89 /* How many insertions failed */
97 struct workqueue_struct *io_wq;
98 struct bch_write_bio *bio;
100 unsigned written; /* sectors */
105 unsigned csum_type:4;
106 unsigned compression_type:4;
107 unsigned nr_replicas:4;
108 unsigned alloc_reserve:4;
112 struct bversion version;
114 /* For BCH_WRITE_DATA_COMPRESSED: */
115 struct bch_extent_crc128 crc;
118 struct disk_reservation res;
120 struct write_point *wp;
125 struct bch_write_op *next;
126 unsigned long expires;
131 * If caller wants to flush but hasn't passed us a journal_seq ptr, we
132 * still need to stash the journal_seq somewhere:
139 int (*index_update_fn)(struct bch_write_op *);
141 struct keylist insert_keys;
142 u64 inline_keys[BKEY_EXTENT_U64s_MAX * 2];
145 #endif /* _BCACHE_IO_TYPES_H */