1 #ifndef _BCACHE_BLOCKDEV_H
2 #define _BCACHE_BLOCKDEV_H
4 #include "blockdev_types.h"
8 /* Stack frame for bio_complete */
12 struct bch_read_bio rbio;
13 struct bch_write_bio wbio;
17 struct bcache_device *d;
22 /* Flags only used for reads */
23 unsigned recoverable:1;
24 unsigned read_dirty_data:1;
25 unsigned cache_miss:1;
28 * For reads: bypass read from cache and insertion into cache
29 * For writes: discard key range from cache, sending the write to
30 * the backing device (if there is a backing device)
34 unsigned long start_time;
37 * Mostly only used for writes. For reads, we still make use of
38 * some trivial fields:
42 struct bch_write_op iop;
45 #ifndef NO_BCACHE_BLOCKDEV
47 extern struct kobj_type bch_cached_dev_ktype;
48 extern struct kobj_type bch_blockdev_volume_ktype;
50 void bch_write_bdev_super(struct cached_dev *, struct closure *);
52 void bch_cached_dev_release(struct kobject *);
53 void bch_blockdev_volume_release(struct kobject *);
55 int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
56 void bch_attach_backing_devs(struct cache_set *);
58 void bch_cached_dev_detach(struct cached_dev *);
59 void bch_cached_dev_run(struct cached_dev *);
60 void bch_blockdev_stop(struct bcache_device *);
62 bool bch_is_open_backing_dev(struct block_device *);
63 const char *bch_backing_dev_register(struct bcache_superblock *);
65 int bch_blockdev_volume_create(struct cache_set *, u64);
66 int bch_blockdev_volumes_start(struct cache_set *);
68 void bch_blockdevs_stop(struct cache_set *);
70 void bch_fs_blockdev_exit(struct cache_set *);
71 int bch_fs_blockdev_init(struct cache_set *);
72 void bch_blockdev_exit(void);
73 int bch_blockdev_init(void);
77 static inline void bch_write_bdev_super(struct cached_dev *dc,
78 struct closure *cl) {}
80 static inline void bch_cached_dev_release(struct kobject *kobj) {}
81 static inline void bch_blockdev_volume_release(struct kobject *kobj) {}
83 static inline int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
87 static inline void bch_attach_backing_devs(struct cache_set *c) {}
89 static inline void bch_cached_dev_detach(struct cached_dev *dc) {}
90 static inline void bch_cached_dev_run(struct cached_dev *dc) {}
91 static inline void bch_blockdev_stop(struct bcache_device *d) {}
93 static inline bool bch_is_open_backing_dev(struct block_device *bdev)
97 static inline const char *bch_backing_dev_register(struct bcache_superblock *sb)
99 return "not implemented";
102 static inline int bch_blockdev_volume_create(struct cache_set *c, u64 s) { return 0; }
103 static inline int bch_blockdev_volumes_start(struct cache_set *c) { return 0; }
105 static inline void bch_blockdevs_stop(struct cache_set *c) {}
106 static inline void bch_fs_blockdev_exit(struct cache_set *c) {}
107 static inline int bch_fs_blockdev_init(struct cache_set *c) { return 0; }
108 static inline void bch_blockdev_exit(void) {}
109 static inline int bch_blockdev_init(void) { return 0; }
113 static inline void cached_dev_put(struct cached_dev *dc)
115 if (atomic_dec_and_test(&dc->count))
116 schedule_work(&dc->detach);
119 static inline bool cached_dev_get(struct cached_dev *dc)
121 if (!atomic_inc_not_zero(&dc->count))
124 /* Paired with the mb in cached_dev_attach */
125 smp_mb__after_atomic();
129 static inline u64 bcache_dev_inum(struct bcache_device *d)
131 return d->inode.k.p.inode;
134 static inline struct bcache_device *bch_dev_find(struct cache_set *c, u64 inode)
136 return radix_tree_lookup(&c->devices, inode);
139 #endif /* _BCACHE_BLOCKDEV_H */