4 #include "btree_iter.h"
7 #include <trace/events/bcache.h>
10 * For buffered iteration over the btree, with predicates and ratelimiting and
14 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
16 /* Overlapping keys compare equal */
17 if (bkey_cmp(l->key.k.p, bkey_start_pos(&r->key.k)) <= 0)
19 if (bkey_cmp(bkey_start_pos(&l->key.k), r->key.k.p) >= 0)
24 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
27 return clamp_t(s64, bkey_cmp(l->key.k.p, r->key.k.p), -1, 1);
30 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
31 struct bpos end, keybuf_pred_fn *pred)
33 struct bpos start = buf->last_scanned;
34 struct btree_iter iter;
36 unsigned nr_found = 0;
38 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, buf->last_scanned, k) {
39 if (bkey_cmp(k.k->p, end) >= 0) {
40 buf->last_scanned = k.k->p;
47 spin_lock(&buf->lock);
49 w = array_alloc(&buf->freelist);
51 spin_unlock(&buf->lock);
55 bkey_reassemble(&w->key, k);
56 atomic_set(&w->ref, -1); /* -1 means hasn't started */
58 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
59 array_free(&buf->freelist, w);
63 spin_unlock(&buf->lock);
66 buf->last_scanned = k.k->p;
67 bch_btree_iter_cond_resched(&iter);
70 /* If we end up here, it means:
71 * - the map_fn didn't fill up the keybuf
72 * - the map_fn didn't see the end key
73 * - there were no more keys to map over
74 * Therefore, we are at the end of the key space */
75 buf->last_scanned = POS_MAX;
77 bch_btree_iter_unlock(&iter);
79 trace_bcache_keyscan(nr_found,
80 start.inode, start.offset,
81 buf->last_scanned.inode,
82 buf->last_scanned.offset);
84 spin_lock(&buf->lock);
86 if (!RB_EMPTY_ROOT(&buf->keys)) {
89 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
90 buf->start = bkey_start_pos(&w->key.k);
92 w = RB_LAST(&buf->keys, struct keybuf_key, node);
93 buf->end = w->key.k.p;
99 spin_unlock(&buf->lock);
102 static void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
104 rb_erase(&w->node, &buf->keys);
105 array_free(&buf->freelist, w);
108 void bch_keybuf_put(struct keybuf *buf, struct keybuf_key *w)
110 BUG_ON(atomic_read(&w->ref) <= 0);
112 if (atomic_dec_and_test(&w->ref)) {
115 spin_lock(&buf->lock);
116 bch_keybuf_del(buf, w);
117 spin_unlock(&buf->lock);
121 void bch_keybuf_recalc_oldest_gens(struct cache_set *c, struct keybuf *buf)
123 struct keybuf_key *w, *n;
125 spin_lock(&buf->lock);
126 rbtree_postorder_for_each_entry_safe(w, n,
128 bch_btree_key_recalc_oldest_gen(c, bkey_i_to_s_c(&w->key));
129 spin_unlock(&buf->lock);
132 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bpos start,
136 struct keybuf_key *w, *next, s = { .key.k.p = start };
138 if (bkey_cmp(end, buf->start) <= 0 ||
139 bkey_cmp(start, buf->end) >= 0)
142 spin_lock(&buf->lock);
144 for (w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
145 w && bkey_cmp(bkey_start_pos(&w->key.k), end) < 0;
147 next = RB_NEXT(w, node);
149 if (atomic_read(&w->ref) == -1)
150 bch_keybuf_del(buf, w);
155 spin_unlock(&buf->lock);
159 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
161 struct keybuf_key *w;
163 spin_lock(&buf->lock);
165 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
167 while (w && atomic_read(&w->ref) != -1)
168 w = RB_NEXT(w, node);
171 spin_unlock(&buf->lock);
175 atomic_set(&w->ref, 1);
176 spin_unlock(&buf->lock);
178 down(&buf->in_flight);
183 void bch_keybuf_init(struct keybuf *buf)
185 sema_init(&buf->in_flight, KEYBUF_REFILL_BATCH / 2);
187 buf->last_scanned = POS_MAX;
188 buf->start = POS_MIN;
193 spin_lock_init(&buf->lock);
194 array_allocator_init(&buf->freelist);