1 #ifndef _BCACHEFS_BTREE_GC_H
2 #define _BCACHEFS_BTREE_GC_H
4 #include "btree_types.h"
6 void bch2_coalesce(struct bch_fs *);
7 int bch2_gc(struct bch_fs *, struct list_head *, bool);
8 void bch2_gc_thread_stop(struct bch_fs *);
9 int bch2_gc_thread_start(struct bch_fs *);
10 int bch2_initial_gc(struct bch_fs *, struct list_head *);
11 void bch2_mark_dev_superblock(struct bch_fs *, struct bch_dev *, unsigned);
14 * For concurrent mark and sweep (with other index updates), we define a total
15 * ordering of _all_ references GC walks:
17 * Note that some references will have the same GC position as others - e.g.
18 * everything within the same btree node; in those cases we're relying on
19 * whatever locking exists for where those references live, i.e. the write lock
22 * That locking is also required to ensure GC doesn't pass the updater in
23 * between the updater adding/removing the reference and updating the GC marks;
24 * without that, we would at best double count sometimes.
26 * That part is important - whenever calling bch2_mark_pointers(), a lock _must_
27 * be held that prevents GC from passing the position the updater is at.
29 * (What about the start of gc, when we're clearing all the marks? GC clears the
30 * mark with the gc pos seqlock held, and bch_mark_bucket checks against the gc
31 * position inside its cmpxchg loop, so crap magically works).
34 /* Position of (the start of) a gc phase: */
35 static inline struct gc_pos gc_phase(enum gc_phase phase)
37 return (struct gc_pos) {
44 static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
46 if (l.phase != r.phase)
47 return l.phase < r.phase ? -1 : 1;
48 if (bkey_cmp(l.pos, r.pos))
49 return bkey_cmp(l.pos, r.pos);
50 if (l.level != r.level)
51 return l.level < r.level ? -1 : 1;
55 static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
58 #define x(n, v, s) case BTREE_ID_##n: return GC_PHASE_BTREE_##n;
66 static inline struct gc_pos gc_pos_btree(enum btree_id id,
67 struct bpos pos, unsigned level)
69 return (struct gc_pos) {
70 .phase = btree_id_to_gc_phase(id),
77 * GC position of the pointers within a btree node: note, _not_ for &b->key
78 * itself, that lives in the parent node:
80 static inline struct gc_pos gc_pos_btree_node(struct btree *b)
82 return gc_pos_btree(b->btree_id, b->key.k.p, b->level);
86 * GC position of the pointer to a btree root: we don't use
87 * gc_pos_pointer_to_btree_node() here to avoid a potential race with
88 * btree_split() increasing the tree depth - the new root will have level > the
89 * old root and thus have a greater gc position than the old root, but that
90 * would be incorrect since once gc has marked the root it's not coming back.
92 static inline struct gc_pos gc_pos_btree_root(enum btree_id id)
94 return gc_pos_btree(id, POS_MAX, BTREE_MAX_DEPTH);
97 static inline struct gc_pos gc_pos_alloc(struct bch_fs *c, struct open_bucket *ob)
99 return (struct gc_pos) {
100 .phase = GC_PHASE_ALLOC,
101 .pos = POS(ob ? ob - c->open_buckets : 0, 0),
105 static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
111 seq = read_seqcount_begin(&c->gc_pos_lock);
112 ret = gc_pos_cmp(pos, c->gc_pos) <= 0;
113 } while (read_seqcount_retry(&c->gc_pos_lock, seq));
118 #endif /* _BCACHEFS_BTREE_GC_H */