1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_ALLOC_BACKGROUND_H
3 #define _BCACHEFS_ALLOC_BACKGROUND_H
6 #include "alloc_types.h"
11 extern const char * const bch2_allocator_states[];
13 struct bkey_alloc_unpacked {
20 #define x(_name, _bits) u##_bits _name;
25 /* How out of date a pointer gen is allowed to be: */
26 #define BUCKET_GC_GEN_MAX 96U
28 /* returns true if not equal */
29 static inline bool bkey_alloc_unpacked_cmp(struct bkey_alloc_unpacked l,
30 struct bkey_alloc_unpacked r)
32 return l.gen != r.gen ||
33 l.oldest_gen != r.oldest_gen ||
34 l.data_type != r.data_type
35 #define x(_name, ...) || l._name != r._name
41 struct bkey_alloc_buf {
43 struct bch_alloc_v3 v;
45 #define x(_name, _bits) + _bits / 8
46 u8 _pad[0 + BCH_ALLOC_FIELDS_V2()];
48 } __attribute__((packed, aligned(8)));
50 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c);
51 struct bkey_alloc_buf *bch2_alloc_pack(struct btree_trans *,
52 const struct bkey_alloc_unpacked);
53 int bch2_alloc_write(struct btree_trans *, struct btree_iter *,
54 struct bkey_alloc_unpacked *, unsigned);
56 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
58 #define ALLOC_SCAN_BATCH(ca) max_t(size_t, 1, (ca)->mi.nbuckets >> 9)
60 const char *bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c);
61 const char *bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c);
62 const char *bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c);
63 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
65 #define bch2_bkey_ops_alloc (struct bkey_ops) { \
66 .key_invalid = bch2_alloc_v1_invalid, \
67 .val_to_text = bch2_alloc_to_text, \
68 .atomic_trigger = bch2_mark_alloc, \
71 #define bch2_bkey_ops_alloc_v2 (struct bkey_ops) { \
72 .key_invalid = bch2_alloc_v2_invalid, \
73 .val_to_text = bch2_alloc_to_text, \
74 .atomic_trigger = bch2_mark_alloc, \
77 #define bch2_bkey_ops_alloc_v3 (struct bkey_ops) { \
78 .key_invalid = bch2_alloc_v3_invalid, \
79 .val_to_text = bch2_alloc_to_text, \
80 .atomic_trigger = bch2_mark_alloc, \
83 static inline bool bkey_is_alloc(const struct bkey *k)
85 return k->type == KEY_TYPE_alloc ||
86 k->type == KEY_TYPE_alloc_v2 ||
87 k->type == KEY_TYPE_alloc_v3;
90 int bch2_alloc_read(struct bch_fs *, bool, bool);
92 static inline void bch2_wake_allocator(struct bch_dev *ca)
94 struct task_struct *p;
97 p = rcu_dereference(ca->alloc_thread);
103 static inline void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
106 if (bch2_expensive_debug_checks) {
111 for (j = 0; j < RESERVE_NR; j++)
112 fifo_for_each_entry(i, &ca->free[j], iter)
114 fifo_for_each_entry(i, &ca->free_inc, iter)
119 void bch2_recalc_capacity(struct bch_fs *);
121 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
122 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
124 void bch2_dev_allocator_quiesce(struct bch_fs *, struct bch_dev *);
125 void bch2_dev_allocator_stop(struct bch_dev *);
126 int bch2_dev_allocator_start(struct bch_dev *);
128 void bch2_fs_allocator_background_init(struct bch_fs *);
130 #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */