1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_BACKGROUND_H
3 #define _BCACHEFS_ALLOC_BACKGROUND_H
6 #include "alloc_types.h"
11 struct bkey_alloc_unpacked {
20 #define x(_name, _bits) u##_bits _name;
25 /* How out of date a pointer gen is allowed to be: */
26 #define BUCKET_GC_GEN_MAX 96U
28 static inline u8 alloc_gc_gen(struct bkey_alloc_unpacked a)
30 return a.gen - a.oldest_gen;
41 extern const char * const bch2_bucket_states[];
43 static inline enum bucket_state bucket_state(struct bkey_alloc_unpacked a)
45 if (a.dirty_sectors || a.stripe)
51 return BUCKET_need_discard;
52 if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
53 return BUCKET_need_gc_gens;
57 static inline u64 alloc_lru_idx(struct bkey_alloc_unpacked a)
59 return bucket_state(a) == BUCKET_cached ? a.read_time : 0;
62 static inline u64 alloc_freespace_genbits(struct bkey_alloc_unpacked a)
64 return ((u64) alloc_gc_gen(a) >> 4) << 56;
67 static inline struct bpos alloc_freespace_pos(struct bkey_alloc_unpacked a)
69 return POS(a.dev, a.bucket | alloc_freespace_genbits(a));
72 /* returns true if not equal */
73 static inline bool bkey_alloc_unpacked_cmp(struct bkey_alloc_unpacked l,
74 struct bkey_alloc_unpacked r)
76 return l.gen != r.gen ||
77 l.oldest_gen != r.oldest_gen ||
78 l.data_type != r.data_type
79 #define x(_name, ...) || l._name != r._name
85 struct bkey_alloc_buf {
87 struct bch_alloc_v3 v;
89 #define x(_name, _bits) + _bits / 8
90 u8 _pad[0 + BCH_ALLOC_FIELDS_V2()];
92 } __attribute__((packed, aligned(8)));
94 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c);
95 struct bkey_alloc_buf *bch2_alloc_pack(struct btree_trans *,
96 const struct bkey_alloc_unpacked);
97 int bch2_alloc_write(struct btree_trans *, struct btree_iter *,
98 struct bkey_alloc_unpacked *, unsigned);
100 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
102 #define ALLOC_SCAN_BATCH(ca) max_t(size_t, 1, (ca)->mi.nbuckets >> 9)
104 const char *bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c);
105 const char *bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c);
106 const char *bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c);
107 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
109 #define bch2_bkey_ops_alloc (struct bkey_ops) { \
110 .key_invalid = bch2_alloc_v1_invalid, \
111 .val_to_text = bch2_alloc_to_text, \
112 .trans_trigger = bch2_trans_mark_alloc, \
113 .atomic_trigger = bch2_mark_alloc, \
116 #define bch2_bkey_ops_alloc_v2 (struct bkey_ops) { \
117 .key_invalid = bch2_alloc_v2_invalid, \
118 .val_to_text = bch2_alloc_to_text, \
119 .trans_trigger = bch2_trans_mark_alloc, \
120 .atomic_trigger = bch2_mark_alloc, \
123 #define bch2_bkey_ops_alloc_v3 (struct bkey_ops) { \
124 .key_invalid = bch2_alloc_v3_invalid, \
125 .val_to_text = bch2_alloc_to_text, \
126 .trans_trigger = bch2_trans_mark_alloc, \
127 .atomic_trigger = bch2_mark_alloc, \
130 static inline bool bkey_is_alloc(const struct bkey *k)
132 return k->type == KEY_TYPE_alloc ||
133 k->type == KEY_TYPE_alloc_v2 ||
134 k->type == KEY_TYPE_alloc_v3;
137 int bch2_alloc_read(struct bch_fs *);
139 int bch2_trans_mark_alloc(struct btree_trans *, struct bkey_s_c,
140 struct bkey_i *, unsigned);
141 int bch2_check_alloc_info(struct bch_fs *, bool);
142 void bch2_do_discards(struct bch_fs *);
144 static inline bool should_invalidate_buckets(struct bch_dev *ca)
146 struct bch_dev_usage u = bch2_dev_usage_read(ca);
148 return u.d[BCH_DATA_cached].buckets &&
149 u.buckets_unavailable + u.d[BCH_DATA_cached].buckets <
150 ca->mi.nbuckets >> 7;
153 void bch2_do_invalidates(struct bch_fs *);
155 int bch2_fs_freespace_init(struct bch_fs *);
157 void bch2_recalc_capacity(struct bch_fs *);
159 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
160 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
162 void bch2_fs_allocator_background_init(struct bch_fs *);
164 #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */