1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_BACKGROUND_H
3 #define _BCACHEFS_ALLOC_BACKGROUND_H
6 #include "alloc_types.h"
9 extern const char * const bch2_allocator_states[];
11 struct bkey_alloc_unpacked {
18 #define x(_name, _bits) u##_bits _name;
23 struct bkey_alloc_buf {
25 struct bch_alloc_v3 v;
27 #define x(_name, _bits) + _bits / 8
28 u8 _pad[0 + BCH_ALLOC_FIELDS_V2()];
30 } __attribute__((packed, aligned(8)));
32 /* How out of date a pointer gen is allowed to be: */
33 #define BUCKET_GC_GEN_MAX 96U
35 /* returns true if not equal */
36 static inline bool bkey_alloc_unpacked_cmp(struct bkey_alloc_unpacked l,
37 struct bkey_alloc_unpacked r)
39 return l.gen != r.gen ||
40 l.oldest_gen != r.oldest_gen ||
41 l.data_type != r.data_type
42 #define x(_name, ...) || l._name != r._name
48 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c);
49 void bch2_alloc_pack(struct bch_fs *, struct bkey_alloc_buf *,
50 const struct bkey_alloc_unpacked);
52 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
54 static inline struct bkey_alloc_unpacked
55 alloc_mem_to_key(struct btree_iter *iter,
56 struct bucket *g, struct bucket_mark m)
58 return (struct bkey_alloc_unpacked) {
59 .dev = iter->pos.inode,
60 .bucket = iter->pos.offset,
62 .oldest_gen = g->oldest_gen,
63 .data_type = m.data_type,
64 .dirty_sectors = m.dirty_sectors,
65 .cached_sectors = m.cached_sectors,
66 .read_time = g->io_time[READ],
67 .write_time = g->io_time[WRITE],
71 #define ALLOC_SCAN_BATCH(ca) max_t(size_t, 1, (ca)->mi.nbuckets >> 9)
73 const char *bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c);
74 const char *bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c);
75 const char *bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c);
76 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
78 #define bch2_bkey_ops_alloc (struct bkey_ops) { \
79 .key_invalid = bch2_alloc_v1_invalid, \
80 .val_to_text = bch2_alloc_to_text, \
83 #define bch2_bkey_ops_alloc_v2 (struct bkey_ops) { \
84 .key_invalid = bch2_alloc_v2_invalid, \
85 .val_to_text = bch2_alloc_to_text, \
88 #define bch2_bkey_ops_alloc_v3 (struct bkey_ops) { \
89 .key_invalid = bch2_alloc_v3_invalid, \
90 .val_to_text = bch2_alloc_to_text, \
93 static inline bool bkey_is_alloc(const struct bkey *k)
95 return k->type == KEY_TYPE_alloc ||
96 k->type == KEY_TYPE_alloc_v2 ||
97 k->type == KEY_TYPE_alloc_v3;
100 int bch2_alloc_read(struct bch_fs *);
102 static inline void bch2_wake_allocator(struct bch_dev *ca)
104 struct task_struct *p;
107 p = rcu_dereference(ca->alloc_thread);
113 static inline void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
116 if (bch2_expensive_debug_checks) {
121 for (j = 0; j < RESERVE_NR; j++)
122 fifo_for_each_entry(i, &ca->free[j], iter)
124 fifo_for_each_entry(i, &ca->free_inc, iter)
129 void bch2_recalc_capacity(struct bch_fs *);
131 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
132 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
134 void bch2_dev_allocator_quiesce(struct bch_fs *, struct bch_dev *);
135 void bch2_dev_allocator_stop(struct bch_dev *);
136 int bch2_dev_allocator_start(struct bch_dev *);
138 int bch2_alloc_write(struct bch_fs *, unsigned);
139 void bch2_fs_allocator_background_init(struct bch_fs *);
141 void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *);
143 #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */