1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_GC_H
3 #define _BCACHEFS_BTREE_GC_H
5 #include "btree_types.h"
7 void bch2_coalesce(struct bch_fs *);
10 int bch2_gc(struct bch_fs *, struct journal_keys *, bool, bool);
11 int bch2_gc_gens(struct bch_fs *);
12 void bch2_gc_thread_stop(struct bch_fs *);
13 int bch2_gc_thread_start(struct bch_fs *);
14 void bch2_mark_dev_superblock(struct bch_fs *, struct bch_dev *, unsigned);
17 * For concurrent mark and sweep (with other index updates), we define a total
18 * ordering of _all_ references GC walks:
20 * Note that some references will have the same GC position as others - e.g.
21 * everything within the same btree node; in those cases we're relying on
22 * whatever locking exists for where those references live, i.e. the write lock
25 * That locking is also required to ensure GC doesn't pass the updater in
26 * between the updater adding/removing the reference and updating the GC marks;
27 * without that, we would at best double count sometimes.
29 * That part is important - whenever calling bch2_mark_pointers(), a lock _must_
30 * be held that prevents GC from passing the position the updater is at.
32 * (What about the start of gc, when we're clearing all the marks? GC clears the
33 * mark with the gc pos seqlock held, and bch_mark_bucket checks against the gc
34 * position inside its cmpxchg loop, so crap magically works).
37 /* Position of (the start of) a gc phase: */
38 static inline struct gc_pos gc_phase(enum gc_phase phase)
40 return (struct gc_pos) {
47 static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
49 if (l.phase != r.phase)
50 return l.phase < r.phase ? -1 : 1;
51 if (bkey_cmp(l.pos, r.pos))
52 return bkey_cmp(l.pos, r.pos);
53 if (l.level != r.level)
54 return l.level < r.level ? -1 : 1;
58 static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
61 #define x(n, v, s) case BTREE_ID_##n: return GC_PHASE_BTREE_##n;
69 static inline struct gc_pos gc_pos_btree(enum btree_id id,
70 struct bpos pos, unsigned level)
72 return (struct gc_pos) {
73 .phase = btree_id_to_gc_phase(id),
80 * GC position of the pointers within a btree node: note, _not_ for &b->key
81 * itself, that lives in the parent node:
83 static inline struct gc_pos gc_pos_btree_node(struct btree *b)
85 return gc_pos_btree(b->c.btree_id, b->key.k.p, b->c.level);
89 * GC position of the pointer to a btree root: we don't use
90 * gc_pos_pointer_to_btree_node() here to avoid a potential race with
91 * btree_split() increasing the tree depth - the new root will have level > the
92 * old root and thus have a greater gc position than the old root, but that
93 * would be incorrect since once gc has marked the root it's not coming back.
95 static inline struct gc_pos gc_pos_btree_root(enum btree_id id)
97 return gc_pos_btree(id, POS_MAX, BTREE_MAX_DEPTH);
100 static inline struct gc_pos gc_pos_alloc(struct bch_fs *c, struct open_bucket *ob)
102 return (struct gc_pos) {
103 .phase = GC_PHASE_ALLOC,
104 .pos = POS(ob ? ob - c->open_buckets : 0, 0),
108 static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
114 seq = read_seqcount_begin(&c->gc_pos_lock);
115 ret = gc_pos_cmp(pos, c->gc_pos) <= 0;
116 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
121 #endif /* _BCACHEFS_BTREE_GC_H */