]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcache/writeback.h
bcache in userspace; userspace fsck
[bcachefs-tools-debian] / libbcache / writeback.h
1 #ifndef _BCACHE_WRITEBACK_H
2 #define _BCACHE_WRITEBACK_H
3
4 #include "blockdev.h"
5 #include "buckets.h"
6
7 #define CUTOFF_WRITEBACK        60
8 #define CUTOFF_WRITEBACK_SYNC   30
9
10 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
11 {
12         uint64_t i, ret = 0;
13
14         for (i = 0; i < d->nr_stripes; i++)
15                 ret += atomic_read(d->stripe_sectors_dirty + i);
16
17         return ret;
18 }
19
20 static inline unsigned offset_to_stripe(struct bcache_device *d,
21                                         uint64_t offset)
22 {
23         do_div(offset, d->stripe_size);
24         return offset;
25 }
26
27 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
28                                            uint64_t offset,
29                                            unsigned nr_sectors)
30 {
31         unsigned stripe = offset_to_stripe(&dc->disk, offset);
32
33         while (1) {
34                 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
35                         return true;
36
37                 if (nr_sectors <= dc->disk.stripe_size)
38                         return false;
39
40                 nr_sectors -= dc->disk.stripe_size;
41                 stripe++;
42         }
43 }
44
45 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
46                                     unsigned cache_mode, bool would_skip)
47 {
48         struct cache_set *c = dc->disk.c;
49         u64 available = sectors_available(c);
50
51         if (cache_mode != CACHE_MODE_WRITEBACK ||
52             test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
53             available * 100 < c->capacity * CUTOFF_WRITEBACK_SYNC)
54                 return false;
55
56         if (dc->partial_stripes_expensive &&
57             bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
58                                     bio_sectors(bio)))
59                 return true;
60
61         if (would_skip)
62                 return false;
63
64         return bio->bi_opf & REQ_SYNC ||
65                 available * 100 < c->capacity * CUTOFF_WRITEBACK;
66 }
67
68 static inline void bch_writeback_queue(struct cached_dev *dc)
69 {
70         if (!IS_ERR_OR_NULL(dc->writeback_thread))
71                 wake_up_process(dc->writeback_thread);
72 }
73
74 static inline void bch_writeback_add(struct cached_dev *dc)
75 {
76         if (!atomic_read(&dc->has_dirty) &&
77             !atomic_xchg(&dc->has_dirty, 1)) {
78                 atomic_inc(&dc->count);
79
80                 if (BDEV_STATE(dc->disk_sb.sb) != BDEV_STATE_DIRTY) {
81                         SET_BDEV_STATE(dc->disk_sb.sb, BDEV_STATE_DIRTY);
82                         /* XXX: should do this synchronously */
83                         bch_write_bdev_super(dc, NULL);
84                 }
85
86                 bch_writeback_queue(dc);
87         }
88 }
89
90 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, u64, int);
91
92 void bch_writeback_recalc_oldest_gens(struct cache_set *);
93 void bch_sectors_dirty_init(struct cached_dev *, struct cache_set *c);
94
95 void bch_cached_dev_writeback_stop(struct cached_dev *);
96 void bch_cached_dev_writeback_free(struct cached_dev *);
97 int bch_cached_dev_writeback_init(struct cached_dev *);
98 int bch_cached_dev_writeback_start(struct cached_dev *);
99
100 #endif