4 #include "btree_types.h"
8 void bch_coalesce(struct cache_set *);
9 void bch_gc(struct cache_set *);
10 void bch_gc_thread_stop(struct cache_set *);
11 int bch_gc_thread_start(struct cache_set *);
12 int bch_initial_gc(struct cache_set *, struct list_head *);
13 u8 bch_btree_key_recalc_oldest_gen(struct cache_set *, struct bkey_s_c);
14 u8 bch_btree_mark_key_initial(struct cache_set *, enum bkey_type,
18 * For concurrent mark and sweep (with other index updates), we define a total
19 * ordering of _all_ references GC walks:
21 * Note that some references will have the same GC position as others - e.g.
22 * everything within the same btree node; in those cases we're relying on
23 * whatever locking exists for where those references live, i.e. the write lock
26 * That locking is also required to ensure GC doesn't pass the updater in
27 * between the updater adding/removing the reference and updating the GC marks;
28 * without that, we would at best double count sometimes.
30 * That part is important - whenever calling bch_mark_pointers(), a lock _must_
31 * be held that prevents GC from passing the position the updater is at.
33 * (What about the start of gc, when we're clearing all the marks? GC clears the
34 * mark with the gc pos seqlock held, and bch_mark_bucket checks against the gc
35 * position inside its cmpxchg loop, so crap magically works).
38 /* Position of (the start of) a gc phase: */
39 static inline struct gc_pos gc_phase(enum gc_phase phase)
41 return (struct gc_pos) {
48 #define GC_POS_MIN gc_phase(0)
50 static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
52 if (l.phase != r.phase)
53 return l.phase < r.phase ? -1 : 1;
54 if (bkey_cmp(l.pos, r.pos))
55 return bkey_cmp(l.pos, r.pos);
56 if (l.level != r.level)
57 return l.level < r.level ? -1 : 1;
62 * GC position of the pointers within a btree node: note, _not_ for &b->key
63 * itself, that lives in the parent node:
65 static inline struct gc_pos gc_pos_btree_node(struct btree *b)
67 return (struct gc_pos) {
75 * GC position of the pointer to a btree root: we don't use
76 * gc_pos_pointer_to_btree_node() here to avoid a potential race with
77 * btree_split() increasing the tree depth - the new root will have level > the
78 * old root and thus have a greater gc position than the old root, but that
79 * would be incorrect since once gc has marked the root it's not coming back.
81 static inline struct gc_pos gc_pos_btree_root(enum btree_id id)
83 return (struct gc_pos) {
90 static inline bool gc_will_visit(struct cache_set *c, struct gc_pos pos)
96 seq = read_seqcount_begin(&c->gc_pos_lock);
97 ret = gc_pos_cmp(c->gc_pos, pos) < 0;
98 } while (read_seqcount_retry(&c->gc_pos_lock, seq));