1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_BACKGROUND_H
3 #define _BCACHEFS_ALLOC_BACKGROUND_H
6 #include "alloc_types.h"
11 /* How out of date a pointer gen is allowed to be: */
12 #define BUCKET_GC_GEN_MAX 96U
14 static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
16 return a.gen - a.oldest_gen;
27 extern const char * const bch2_bucket_states[];
29 static inline enum bucket_state bucket_state(struct bch_alloc_v4 a)
31 if (a.dirty_sectors || a.stripe)
36 if (BCH_ALLOC_V4_NEED_DISCARD(&a))
37 return BUCKET_need_discard;
38 if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
39 return BUCKET_need_gc_gens;
43 static inline u64 alloc_lru_idx(struct bch_alloc_v4 a)
45 return bucket_state(a) == BUCKET_cached ? a.io_time[READ] : 0;
48 static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
50 return ((u64) alloc_gc_gen(a) >> 4) << 56;
53 static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a)
55 pos.offset |= alloc_freespace_genbits(a);
59 struct bkey_i_alloc_v4 *
60 bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos);
62 void bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
63 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
65 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
67 #define ALLOC_SCAN_BATCH(ca) max_t(size_t, 1, (ca)->mi.nbuckets >> 9)
69 const char *bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c);
70 const char *bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c);
71 const char *bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c);
72 const char *bch2_alloc_v4_invalid(const struct bch_fs *, struct bkey_s_c k);
73 void bch2_alloc_v4_swab(struct bkey_s);
74 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
76 #define bch2_bkey_ops_alloc (struct bkey_ops) { \
77 .key_invalid = bch2_alloc_v1_invalid, \
78 .val_to_text = bch2_alloc_to_text, \
79 .trans_trigger = bch2_trans_mark_alloc, \
80 .atomic_trigger = bch2_mark_alloc, \
83 #define bch2_bkey_ops_alloc_v2 (struct bkey_ops) { \
84 .key_invalid = bch2_alloc_v2_invalid, \
85 .val_to_text = bch2_alloc_to_text, \
86 .trans_trigger = bch2_trans_mark_alloc, \
87 .atomic_trigger = bch2_mark_alloc, \
90 #define bch2_bkey_ops_alloc_v3 (struct bkey_ops) { \
91 .key_invalid = bch2_alloc_v3_invalid, \
92 .val_to_text = bch2_alloc_to_text, \
93 .trans_trigger = bch2_trans_mark_alloc, \
94 .atomic_trigger = bch2_mark_alloc, \
97 #define bch2_bkey_ops_alloc_v4 (struct bkey_ops) { \
98 .key_invalid = bch2_alloc_v4_invalid, \
99 .val_to_text = bch2_alloc_to_text, \
100 .swab = bch2_alloc_v4_swab, \
101 .trans_trigger = bch2_trans_mark_alloc, \
102 .atomic_trigger = bch2_mark_alloc, \
105 static inline bool bkey_is_alloc(const struct bkey *k)
107 return k->type == KEY_TYPE_alloc ||
108 k->type == KEY_TYPE_alloc_v2 ||
109 k->type == KEY_TYPE_alloc_v3;
112 int bch2_alloc_read(struct bch_fs *);
114 int bch2_trans_mark_alloc(struct btree_trans *, struct bkey_s_c,
115 struct bkey_i *, unsigned);
116 int bch2_check_alloc_info(struct bch_fs *, bool);
117 void bch2_do_discards(struct bch_fs *);
119 static inline bool should_invalidate_buckets(struct bch_dev *ca)
121 struct bch_dev_usage u = bch2_dev_usage_read(ca);
123 return u.d[BCH_DATA_cached].buckets &&
124 u.buckets_unavailable + u.d[BCH_DATA_cached].buckets <
125 ca->mi.nbuckets >> 7;
128 void bch2_do_invalidates(struct bch_fs *);
130 int bch2_fs_freespace_init(struct bch_fs *);
132 void bch2_recalc_capacity(struct bch_fs *);
134 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
135 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
137 void bch2_fs_allocator_background_init(struct bch_fs *);
139 #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */