]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_key_cache.c
Update bcachefs sources to 3e93567c51 bcachefs: Switch to local_clock() for fastpath...
[bcachefs-tools-debian] / libbcachefs / btree_key_cache.c
1
2 #include "bcachefs.h"
3 #include "btree_cache.h"
4 #include "btree_iter.h"
5 #include "btree_key_cache.h"
6 #include "btree_locking.h"
7 #include "btree_update.h"
8 #include "errcode.h"
9 #include "error.h"
10 #include "journal.h"
11 #include "journal_reclaim.h"
12
13 #include <linux/sched/mm.h>
14 #include <trace/events/bcachefs.h>
15
16 static inline bool btree_uses_pcpu_readers(enum btree_id id)
17 {
18         return id == BTREE_ID_subvolumes;
19 }
20
21 static struct kmem_cache *bch2_key_cache;
22
23 static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
24                                        const void *obj)
25 {
26         const struct bkey_cached *ck = obj;
27         const struct bkey_cached_key *key = arg->key;
28
29         return cmp_int(ck->key.btree_id, key->btree_id) ?:
30                 bpos_cmp(ck->key.pos, key->pos);
31 }
32
33 static const struct rhashtable_params bch2_btree_key_cache_params = {
34         .head_offset    = offsetof(struct bkey_cached, hash),
35         .key_offset     = offsetof(struct bkey_cached, key),
36         .key_len        = sizeof(struct bkey_cached_key),
37         .obj_cmpfn      = bch2_btree_key_cache_cmp_fn,
38 };
39
40 __flatten
41 inline struct bkey_cached *
42 bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
43 {
44         struct bkey_cached_key key = {
45                 .btree_id       = btree_id,
46                 .pos            = pos,
47         };
48
49         return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
50                                       bch2_btree_key_cache_params);
51 }
52
53 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
54 {
55         if (!six_trylock_intent(&ck->c.lock))
56                 return false;
57
58         if (!six_trylock_write(&ck->c.lock)) {
59                 six_unlock_intent(&ck->c.lock);
60                 return false;
61         }
62
63         if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
64                 six_unlock_write(&ck->c.lock);
65                 six_unlock_intent(&ck->c.lock);
66                 return false;
67         }
68
69         return true;
70 }
71
72 static void bkey_cached_evict(struct btree_key_cache *c,
73                               struct bkey_cached *ck)
74 {
75         BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
76                                       bch2_btree_key_cache_params));
77         memset(&ck->key, ~0, sizeof(ck->key));
78
79         atomic_long_dec(&c->nr_keys);
80 }
81
82 static void bkey_cached_free(struct btree_key_cache *bc,
83                              struct bkey_cached *ck)
84 {
85         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
86
87         BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
88
89         ck->btree_trans_barrier_seq =
90                 start_poll_synchronize_srcu(&c->btree_trans_barrier);
91
92         if (ck->c.lock.readers)
93                 list_move_tail(&ck->list, &bc->freed_pcpu);
94         else
95                 list_move_tail(&ck->list, &bc->freed_nonpcpu);
96         atomic_long_inc(&bc->nr_freed);
97
98         kfree(ck->k);
99         ck->k           = NULL;
100         ck->u64s        = 0;
101
102         six_unlock_write(&ck->c.lock);
103         six_unlock_intent(&ck->c.lock);
104 }
105
106 static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
107                                          struct bkey_cached *ck)
108 {
109         struct btree_key_cache_freelist *f;
110         bool freed = false;
111
112         BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
113
114         if (!ck->c.lock.readers) {
115 #ifdef __KERNEL__
116                 preempt_disable();
117                 f = this_cpu_ptr(bc->pcpu_freed);
118
119                 if (f->nr < ARRAY_SIZE(f->objs)) {
120                         f->objs[f->nr++] = ck;
121                         freed = true;
122                 }
123                 preempt_enable();
124
125                 if (!freed) {
126                         mutex_lock(&bc->lock);
127                         preempt_disable();
128                         f = this_cpu_ptr(bc->pcpu_freed);
129
130                         while (f->nr > ARRAY_SIZE(f->objs) / 2) {
131                                 struct bkey_cached *ck2 = f->objs[--f->nr];
132
133                                 list_move_tail(&ck2->list, &bc->freed_nonpcpu);
134                         }
135                         preempt_enable();
136
137                         list_move_tail(&ck->list, &bc->freed_nonpcpu);
138                         mutex_unlock(&bc->lock);
139                 }
140 #else
141                 mutex_lock(&bc->lock);
142                 list_move_tail(&ck->list, &bc->freed_nonpcpu);
143                 mutex_unlock(&bc->lock);
144 #endif
145         } else {
146                 mutex_lock(&bc->lock);
147                 list_move_tail(&ck->list, &bc->freed_pcpu);
148                 mutex_unlock(&bc->lock);
149         }
150 }
151
152 static void bkey_cached_free_fast(struct btree_key_cache *bc,
153                                   struct bkey_cached *ck)
154 {
155         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
156
157         ck->btree_trans_barrier_seq =
158                 start_poll_synchronize_srcu(&c->btree_trans_barrier);
159
160         list_del_init(&ck->list);
161         atomic_long_inc(&bc->nr_freed);
162
163         kfree(ck->k);
164         ck->k           = NULL;
165         ck->u64s        = 0;
166
167         bkey_cached_move_to_freelist(bc, ck);
168
169         six_unlock_write(&ck->c.lock);
170         six_unlock_intent(&ck->c.lock);
171 }
172
173 static struct bkey_cached *
174 bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path)
175 {
176         struct bch_fs *c = trans->c;
177         struct btree_key_cache *bc = &c->btree_key_cache;
178         struct bkey_cached *ck = NULL;
179         struct btree_key_cache_freelist *f;
180         bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
181
182         if (!pcpu_readers) {
183 #ifdef __KERNEL__
184                 preempt_disable();
185                 f = this_cpu_ptr(bc->pcpu_freed);
186                 if (f->nr)
187                         ck = f->objs[--f->nr];
188                 preempt_enable();
189
190                 if (!ck) {
191                         mutex_lock(&bc->lock);
192                         preempt_disable();
193                         f = this_cpu_ptr(bc->pcpu_freed);
194
195                         while (!list_empty(&bc->freed_nonpcpu) &&
196                                f->nr < ARRAY_SIZE(f->objs) / 2) {
197                                 ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
198                                 list_del_init(&ck->list);
199                                 f->objs[f->nr++] = ck;
200                         }
201
202                         ck = f->nr ? f->objs[--f->nr] : NULL;
203                         preempt_enable();
204                         mutex_unlock(&bc->lock);
205                 }
206 #else
207                 mutex_lock(&bc->lock);
208                 if (!list_empty(&bc->freed_nonpcpu)) {
209                         ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
210                         list_del_init(&ck->list);
211                 }
212                 mutex_unlock(&bc->lock);
213 #endif
214         } else {
215                 mutex_lock(&bc->lock);
216                 if (!list_empty(&bc->freed_pcpu)) {
217                         ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
218                         list_del_init(&ck->list);
219                 }
220                 mutex_unlock(&bc->lock);
221         }
222
223         if (ck) {
224                 int ret;
225
226                 ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent);
227                 if (unlikely(ret)) {
228                         bkey_cached_move_to_freelist(bc, ck);
229                         return ERR_PTR(ret);
230                 }
231
232                 path->l[0].b = (void *) ck;
233                 path->l[0].lock_seq = ck->c.lock.state.seq;
234                 mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
235
236                 ret = bch2_btree_node_lock_write(trans, path, &ck->c);
237                 if (unlikely(ret)) {
238                         btree_node_unlock(trans, path, 0);
239                         bkey_cached_move_to_freelist(bc, ck);
240                         return ERR_PTR(ret);
241                 }
242
243                 return ck;
244         }
245
246         /* GFP_NOFS because we're holding btree locks: */
247         ck = kmem_cache_alloc(bch2_key_cache, GFP_NOFS|__GFP_ZERO);
248         if (likely(ck)) {
249                 INIT_LIST_HEAD(&ck->list);
250                 __six_lock_init(&ck->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
251                 if (pcpu_readers)
252                         six_lock_pcpu_alloc(&ck->c.lock);
253
254                 ck->c.cached = true;
255                 BUG_ON(!six_trylock_intent(&ck->c.lock));
256                 BUG_ON(!six_trylock_write(&ck->c.lock));
257                 return ck;
258         }
259
260         return NULL;
261 }
262
263 static struct bkey_cached *
264 bkey_cached_reuse(struct btree_key_cache *c)
265 {
266         struct bucket_table *tbl;
267         struct rhash_head *pos;
268         struct bkey_cached *ck;
269         unsigned i;
270
271         mutex_lock(&c->lock);
272         rcu_read_lock();
273         tbl = rht_dereference_rcu(c->table.tbl, &c->table);
274         for (i = 0; i < tbl->size; i++)
275                 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
276                         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
277                             bkey_cached_lock_for_evict(ck)) {
278                                 bkey_cached_evict(c, ck);
279                                 goto out;
280                         }
281                 }
282         ck = NULL;
283 out:
284         rcu_read_unlock();
285         mutex_unlock(&c->lock);
286         return ck;
287 }
288
289 static struct bkey_cached *
290 btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
291 {
292         struct bch_fs *c = trans->c;
293         struct btree_key_cache *bc = &c->btree_key_cache;
294         struct bkey_cached *ck;
295         bool was_new = true;
296
297         ck = bkey_cached_alloc(trans, path);
298         if (unlikely(IS_ERR(ck)))
299                 return ck;
300
301         if (unlikely(!ck)) {
302                 ck = bkey_cached_reuse(bc);
303                 if (unlikely(!ck)) {
304                         bch_err(c, "error allocating memory for key cache item, btree %s",
305                                 bch2_btree_ids[path->btree_id]);
306                         return ERR_PTR(-ENOMEM);
307                 }
308
309                 mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
310                 was_new = false;
311         } else {
312                 if (path->btree_id == BTREE_ID_subvolumes)
313                         six_lock_pcpu_alloc(&ck->c.lock);
314         }
315
316         ck->c.level             = 0;
317         ck->c.btree_id          = path->btree_id;
318         ck->key.btree_id        = path->btree_id;
319         ck->key.pos             = path->pos;
320         ck->valid               = false;
321         ck->flags               = 1U << BKEY_CACHED_ACCESSED;
322
323         if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
324                                           &ck->hash,
325                                           bch2_btree_key_cache_params))) {
326                 /* We raced with another fill: */
327
328                 if (likely(was_new)) {
329                         six_unlock_write(&ck->c.lock);
330                         six_unlock_intent(&ck->c.lock);
331                         mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
332                         kfree(ck);
333                 } else {
334                         bkey_cached_free_fast(bc, ck);
335                 }
336
337                 return NULL;
338         }
339
340         atomic_long_inc(&bc->nr_keys);
341
342         six_unlock_write(&ck->c.lock);
343
344         return ck;
345 }
346
347 static int btree_key_cache_fill(struct btree_trans *trans,
348                                 struct btree_path *ck_path,
349                                 struct bkey_cached *ck)
350 {
351         struct btree_path *path;
352         struct bkey_s_c k;
353         unsigned new_u64s = 0;
354         struct bkey_i *new_k = NULL;
355         struct bkey u;
356         int ret;
357
358         path = bch2_path_get(trans, ck->key.btree_id,
359                              ck->key.pos, 0, 0, 0, _THIS_IP_);
360         ret = bch2_btree_path_traverse(trans, path, 0);
361         if (ret)
362                 goto err;
363
364         k = bch2_btree_path_peek_slot(path, &u);
365
366         if (!bch2_btree_node_relock(trans, ck_path, 0)) {
367                 trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
368                 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
369                 goto err;
370         }
371
372         /*
373          * bch2_varint_decode can read past the end of the buffer by at
374          * most 7 bytes (it won't be used):
375          */
376         new_u64s = k.k->u64s + 1;
377
378         /*
379          * Allocate some extra space so that the transaction commit path is less
380          * likely to have to reallocate, since that requires a transaction
381          * restart:
382          */
383         new_u64s = min(256U, (new_u64s * 3) / 2);
384
385         if (new_u64s > ck->u64s) {
386                 new_u64s = roundup_pow_of_two(new_u64s);
387                 new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
388                 if (!new_k) {
389                         bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
390                                 bch2_btree_ids[ck->key.btree_id], new_u64s);
391                         ret = -ENOMEM;
392                         goto err;
393                 }
394         }
395
396         ret = bch2_btree_node_lock_write(trans, ck_path, &ck_path->l[0].b->c);
397         if (ret) {
398                 kfree(new_k);
399                 goto err;
400         }
401
402         if (new_k) {
403                 kfree(ck->k);
404                 ck->u64s = new_u64s;
405                 ck->k = new_k;
406         }
407
408         bkey_reassemble(ck->k, k);
409         ck->valid = true;
410         bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
411
412         /* We're not likely to need this iterator again: */
413         path->preserve = false;
414 err:
415         bch2_path_put(trans, path, 0);
416         return ret;
417 }
418
419 noinline static int
420 bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree_path *path,
421                                          unsigned flags)
422 {
423         struct bch_fs *c = trans->c;
424         struct bkey_cached *ck;
425         int ret = 0;
426
427         BUG_ON(path->level);
428
429         path->l[1].b = NULL;
430
431         if (bch2_btree_node_relock(trans, path, 0)) {
432                 ck = (void *) path->l[0].b;
433                 goto fill;
434         }
435 retry:
436         ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
437         if (!ck) {
438                 ck = btree_key_cache_create(trans, path);
439                 ret = PTR_ERR_OR_ZERO(ck);
440                 if (ret)
441                         goto err;
442                 if (!ck)
443                         goto retry;
444
445                 mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
446                 path->locks_want = 1;
447         } else {
448                 enum six_lock_type lock_want = __btree_lock_want(path, 0);
449
450                 ret = btree_node_lock(trans, path, (void *) ck, 0,
451                                       lock_want, _THIS_IP_);
452                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
453                         goto err;
454
455                 BUG_ON(ret);
456
457                 if (ck->key.btree_id != path->btree_id ||
458                     bpos_cmp(ck->key.pos, path->pos)) {
459                         six_unlock_type(&ck->c.lock, lock_want);
460                         goto retry;
461                 }
462
463                 mark_btree_node_locked(trans, path, 0, lock_want);
464         }
465
466         path->l[0].lock_seq     = ck->c.lock.state.seq;
467         path->l[0].b            = (void *) ck;
468 fill:
469         if (!ck->valid) {
470                 /*
471                  * Using the underscore version because we haven't set
472                  * path->uptodate yet:
473                  */
474                 if (!path->locks_want &&
475                     !__bch2_btree_path_upgrade(trans, path, 1)) {
476                         trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
477                         ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
478                         goto err;
479                 }
480
481                 ret = btree_key_cache_fill(trans, path, ck);
482                 if (ret)
483                         goto err;
484         }
485
486         if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
487                 set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
488
489         path->uptodate = BTREE_ITER_UPTODATE;
490         BUG_ON(!ck->valid);
491         BUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
492
493         return ret;
494 err:
495         if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
496                 btree_node_unlock(trans, path, 0);
497                 path->l[0].b = ERR_PTR(ret);
498         }
499         return ret;
500 }
501
502 int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
503                                     unsigned flags)
504 {
505         struct bch_fs *c = trans->c;
506         struct bkey_cached *ck;
507         int ret = 0;
508
509         EBUG_ON(path->level);
510
511         path->l[1].b = NULL;
512
513         if (bch2_btree_node_relock(trans, path, 0)) {
514                 ck = (void *) path->l[0].b;
515                 goto fill;
516         }
517 retry:
518         ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
519         if (!ck) {
520                 return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
521         } else {
522                 enum six_lock_type lock_want = __btree_lock_want(path, 0);
523
524                 ret = btree_node_lock(trans, path, (void *) ck, 0,
525                                       lock_want, _THIS_IP_);
526                 EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
527
528                 if (ret)
529                         return ret;
530
531                 if (ck->key.btree_id != path->btree_id ||
532                     bpos_cmp(ck->key.pos, path->pos)) {
533                         six_unlock_type(&ck->c.lock, lock_want);
534                         goto retry;
535                 }
536
537                 mark_btree_node_locked(trans, path, 0, lock_want);
538         }
539
540         path->l[0].lock_seq     = ck->c.lock.state.seq;
541         path->l[0].b            = (void *) ck;
542 fill:
543         if (!ck->valid)
544                 return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
545
546         if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
547                 set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
548
549         path->uptodate = BTREE_ITER_UPTODATE;
550         EBUG_ON(!ck->valid);
551         EBUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
552
553         return ret;
554 }
555
556 static int btree_key_cache_flush_pos(struct btree_trans *trans,
557                                      struct bkey_cached_key key,
558                                      u64 journal_seq,
559                                      unsigned commit_flags,
560                                      bool evict)
561 {
562         struct bch_fs *c = trans->c;
563         struct journal *j = &c->journal;
564         struct btree_iter c_iter, b_iter;
565         struct bkey_cached *ck = NULL;
566         int ret;
567
568         bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
569                              BTREE_ITER_SLOTS|
570                              BTREE_ITER_INTENT|
571                              BTREE_ITER_ALL_SNAPSHOTS);
572         bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
573                              BTREE_ITER_CACHED|
574                              BTREE_ITER_INTENT);
575         b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
576
577         ret = bch2_btree_iter_traverse(&c_iter);
578         if (ret)
579                 goto out;
580
581         ck = (void *) c_iter.path->l[0].b;
582         if (!ck)
583                 goto out;
584
585         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
586                 if (evict)
587                         goto evict;
588                 goto out;
589         }
590
591         BUG_ON(!ck->valid);
592
593         if (journal_seq && ck->journal.seq != journal_seq)
594                 goto out;
595
596         /*
597          * Since journal reclaim depends on us making progress here, and the
598          * allocator/copygc depend on journal reclaim making progress, we need
599          * to be using alloc reserves:
600          * */
601         ret   = bch2_btree_iter_traverse(&b_iter) ?:
602                 bch2_trans_update(trans, &b_iter, ck->k,
603                                   BTREE_UPDATE_KEY_CACHE_RECLAIM|
604                                   BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
605                                   BTREE_TRIGGER_NORUN) ?:
606                 bch2_trans_commit(trans, NULL, NULL,
607                                   BTREE_INSERT_NOCHECK_RW|
608                                   BTREE_INSERT_NOFAIL|
609                                   BTREE_INSERT_USE_RESERVE|
610                                   (ck->journal.seq == journal_last_seq(j)
611                                    ? JOURNAL_WATERMARK_reserved
612                                    : 0)|
613                                   commit_flags);
614
615         bch2_fs_fatal_err_on(ret &&
616                              !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
617                              !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
618                              !bch2_journal_error(j), c,
619                              "error flushing key cache: %s", bch2_err_str(ret));
620         if (ret)
621                 goto out;
622
623         bch2_journal_pin_drop(j, &ck->journal);
624         bch2_journal_preres_put(j, &ck->res);
625
626         BUG_ON(!btree_node_locked(c_iter.path, 0));
627
628         if (!evict) {
629                 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
630                         clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
631                         atomic_long_dec(&c->btree_key_cache.nr_dirty);
632                 }
633         } else {
634                 struct btree_path *path2;
635 evict:
636                 trans_for_each_path(trans, path2)
637                         if (path2 != c_iter.path)
638                                 __bch2_btree_path_unlock(trans, path2);
639
640                 bch2_btree_node_lock_write_nofail(trans, c_iter.path, &ck->c);
641
642                 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
643                         clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
644                         atomic_long_dec(&c->btree_key_cache.nr_dirty);
645                 }
646
647                 mark_btree_node_locked_noreset(c_iter.path, 0, BTREE_NODE_UNLOCKED);
648                 bkey_cached_evict(&c->btree_key_cache, ck);
649                 bkey_cached_free_fast(&c->btree_key_cache, ck);
650         }
651 out:
652         bch2_trans_iter_exit(trans, &b_iter);
653         bch2_trans_iter_exit(trans, &c_iter);
654         return ret;
655 }
656
657 int bch2_btree_key_cache_journal_flush(struct journal *j,
658                                 struct journal_entry_pin *pin, u64 seq)
659 {
660         struct bch_fs *c = container_of(j, struct bch_fs, journal);
661         struct bkey_cached *ck =
662                 container_of(pin, struct bkey_cached, journal);
663         struct bkey_cached_key key;
664         struct btree_trans trans;
665         int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
666         int ret = 0;
667
668         bch2_trans_init(&trans, c, 0, 0);
669
670         btree_node_lock_nopath_nofail(&trans, &ck->c, SIX_LOCK_read);
671         key = ck->key;
672
673         if (ck->journal.seq != seq ||
674             !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
675                 six_unlock_read(&ck->c.lock);
676                 goto unlock;
677         }
678         six_unlock_read(&ck->c.lock);
679
680         ret = commit_do(&trans, NULL, NULL, 0,
681                 btree_key_cache_flush_pos(&trans, key, seq,
682                                 BTREE_INSERT_JOURNAL_RECLAIM, false));
683 unlock:
684         srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
685
686         bch2_trans_exit(&trans);
687         return ret;
688 }
689
690 /*
691  * Flush and evict a key from the key cache:
692  */
693 int bch2_btree_key_cache_flush(struct btree_trans *trans,
694                                enum btree_id id, struct bpos pos)
695 {
696         struct bch_fs *c = trans->c;
697         struct bkey_cached_key key = { id, pos };
698
699         /* Fastpath - assume it won't be found: */
700         if (!bch2_btree_key_cache_find(c, id, pos))
701                 return 0;
702
703         return btree_key_cache_flush_pos(trans, key, 0, 0, true);
704 }
705
706 bool bch2_btree_insert_key_cached(struct btree_trans *trans,
707                                   struct btree_path *path,
708                                   struct bkey_i *insert)
709 {
710         struct bch_fs *c = trans->c;
711         struct bkey_cached *ck = (void *) path->l[0].b;
712         bool kick_reclaim = false;
713
714         BUG_ON(insert->u64s > ck->u64s);
715
716         if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
717                 int difference;
718
719                 BUG_ON(jset_u64s(insert->u64s) > trans->journal_preres.u64s);
720
721                 difference = jset_u64s(insert->u64s) - ck->res.u64s;
722                 if (difference > 0) {
723                         trans->journal_preres.u64s      -= difference;
724                         ck->res.u64s                    += difference;
725                 }
726         }
727
728         bkey_copy(ck->k, insert);
729         ck->valid = true;
730
731         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
732                 set_bit(BKEY_CACHED_DIRTY, &ck->flags);
733                 atomic_long_inc(&c->btree_key_cache.nr_dirty);
734
735                 if (bch2_nr_btree_keys_need_flush(c))
736                         kick_reclaim = true;
737         }
738
739         bch2_journal_pin_update(&c->journal, trans->journal_res.seq,
740                                 &ck->journal, bch2_btree_key_cache_journal_flush);
741
742         if (kick_reclaim)
743                 journal_reclaim_kick(&c->journal);
744         return true;
745 }
746
747 void bch2_btree_key_cache_drop(struct btree_trans *trans,
748                                struct btree_path *path)
749 {
750         struct bch_fs *c = trans->c;
751         struct bkey_cached *ck = (void *) path->l[0].b;
752
753         BUG_ON(!ck->valid);
754
755         /*
756          * We just did an update to the btree, bypassing the key cache: the key
757          * cache key is now stale and must be dropped, even if dirty:
758          */
759         if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
760                 clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
761                 atomic_long_dec(&c->btree_key_cache.nr_dirty);
762                 bch2_journal_pin_drop(&c->journal, &ck->journal);
763         }
764
765         ck->valid = false;
766 }
767
768 static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
769                                            struct shrink_control *sc)
770 {
771         struct bch_fs *c = container_of(shrink, struct bch_fs,
772                                         btree_key_cache.shrink);
773         struct btree_key_cache *bc = &c->btree_key_cache;
774         struct bucket_table *tbl;
775         struct bkey_cached *ck, *t;
776         size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
777         unsigned start, flags;
778         int srcu_idx;
779
780         mutex_lock(&bc->lock);
781         srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
782         flags = memalloc_nofs_save();
783
784         /*
785          * Newest freed entries are at the end of the list - once we hit one
786          * that's too new to be freed, we can bail out:
787          */
788         list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
789                 if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
790                                                  ck->btree_trans_barrier_seq))
791                         break;
792
793                 list_del(&ck->list);
794                 six_lock_pcpu_free(&ck->c.lock);
795                 kmem_cache_free(bch2_key_cache, ck);
796                 atomic_long_dec(&bc->nr_freed);
797                 scanned++;
798                 freed++;
799         }
800
801         if (scanned >= nr)
802                 goto out;
803
804         list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
805                 if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
806                                                  ck->btree_trans_barrier_seq))
807                         break;
808
809                 list_del(&ck->list);
810                 six_lock_pcpu_free(&ck->c.lock);
811                 kmem_cache_free(bch2_key_cache, ck);
812                 atomic_long_dec(&bc->nr_freed);
813                 scanned++;
814                 freed++;
815         }
816
817         if (scanned >= nr)
818                 goto out;
819
820         rcu_read_lock();
821         tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
822         if (bc->shrink_iter >= tbl->size)
823                 bc->shrink_iter = 0;
824         start = bc->shrink_iter;
825
826         do {
827                 struct rhash_head *pos, *next;
828
829                 pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter));
830
831                 while (!rht_is_a_nulls(pos)) {
832                         next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
833                         ck = container_of(pos, struct bkey_cached, hash);
834
835                         if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
836                                 goto next;
837
838                         if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
839                                 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
840                         else if (bkey_cached_lock_for_evict(ck)) {
841                                 bkey_cached_evict(bc, ck);
842                                 bkey_cached_free(bc, ck);
843                         }
844
845                         scanned++;
846                         if (scanned >= nr)
847                                 break;
848 next:
849                         pos = next;
850                 }
851
852                 bc->shrink_iter++;
853                 if (bc->shrink_iter >= tbl->size)
854                         bc->shrink_iter = 0;
855         } while (scanned < nr && bc->shrink_iter != start);
856
857         rcu_read_unlock();
858 out:
859         memalloc_nofs_restore(flags);
860         srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
861         mutex_unlock(&bc->lock);
862
863         return freed;
864 }
865
866 static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
867                                             struct shrink_control *sc)
868 {
869         struct bch_fs *c = container_of(shrink, struct bch_fs,
870                                         btree_key_cache.shrink);
871         struct btree_key_cache *bc = &c->btree_key_cache;
872         long nr = atomic_long_read(&bc->nr_keys) -
873                 atomic_long_read(&bc->nr_dirty);
874
875         return max(0L, nr);
876 }
877
878 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
879 {
880         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
881         struct bucket_table *tbl;
882         struct bkey_cached *ck, *n;
883         struct rhash_head *pos;
884         unsigned i;
885 #ifdef __KERNEL__
886         int cpu;
887 #endif
888
889         if (bc->shrink.list.next)
890                 unregister_shrinker(&bc->shrink);
891
892         mutex_lock(&bc->lock);
893
894         rcu_read_lock();
895         tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
896         if (tbl)
897                 for (i = 0; i < tbl->size; i++)
898                         rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
899                                 bkey_cached_evict(bc, ck);
900                                 list_add(&ck->list, &bc->freed_nonpcpu);
901                         }
902         rcu_read_unlock();
903
904 #ifdef __KERNEL__
905         for_each_possible_cpu(cpu) {
906                 struct btree_key_cache_freelist *f =
907                         per_cpu_ptr(bc->pcpu_freed, cpu);
908
909                 for (i = 0; i < f->nr; i++) {
910                         ck = f->objs[i];
911                         list_add(&ck->list, &bc->freed_nonpcpu);
912                 }
913         }
914 #endif
915
916         list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
917
918         list_for_each_entry_safe(ck, n, &bc->freed_nonpcpu, list) {
919                 cond_resched();
920
921                 bch2_journal_pin_drop(&c->journal, &ck->journal);
922                 bch2_journal_preres_put(&c->journal, &ck->res);
923
924                 list_del(&ck->list);
925                 kfree(ck->k);
926                 six_lock_pcpu_free(&ck->c.lock);
927                 kmem_cache_free(bch2_key_cache, ck);
928         }
929
930         if (atomic_long_read(&bc->nr_dirty) &&
931             !bch2_journal_error(&c->journal) &&
932             test_bit(BCH_FS_WAS_RW, &c->flags))
933                 panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
934                       atomic_long_read(&bc->nr_dirty));
935
936         if (atomic_long_read(&bc->nr_keys))
937                 panic("btree key cache shutdown error: nr_keys nonzero (%li)\n",
938                       atomic_long_read(&bc->nr_keys));
939
940         mutex_unlock(&bc->lock);
941
942         if (bc->table_init_done)
943                 rhashtable_destroy(&bc->table);
944
945         free_percpu(bc->pcpu_freed);
946 }
947
948 void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
949 {
950         mutex_init(&c->lock);
951         INIT_LIST_HEAD(&c->freed_pcpu);
952         INIT_LIST_HEAD(&c->freed_nonpcpu);
953 }
954
955 static void bch2_btree_key_cache_shrinker_to_text(struct printbuf *out, struct shrinker *shrink)
956 {
957         struct btree_key_cache *bc =
958                 container_of(shrink, struct btree_key_cache, shrink);
959
960         bch2_btree_key_cache_to_text(out, bc);
961 }
962
963 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
964 {
965         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
966         int ret;
967
968 #ifdef __KERNEL__
969         bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
970         if (!bc->pcpu_freed)
971                 return -ENOMEM;
972 #endif
973
974         ret = rhashtable_init(&bc->table, &bch2_btree_key_cache_params);
975         if (ret)
976                 return ret;
977
978         bc->table_init_done = true;
979
980         bc->shrink.seeks                = 1;
981         bc->shrink.count_objects        = bch2_btree_key_cache_count;
982         bc->shrink.scan_objects         = bch2_btree_key_cache_scan;
983         bc->shrink.to_text              = bch2_btree_key_cache_shrinker_to_text;
984         return register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name);
985 }
986
987 void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
988 {
989         prt_printf(out, "nr_freed:\t%zu\n",     atomic_long_read(&c->nr_freed));
990         prt_printf(out, "nr_keys:\t%lu\n",      atomic_long_read(&c->nr_keys));
991         prt_printf(out, "nr_dirty:\t%lu\n",     atomic_long_read(&c->nr_dirty));
992 }
993
994 void bch2_btree_key_cache_exit(void)
995 {
996         if (bch2_key_cache)
997                 kmem_cache_destroy(bch2_key_cache);
998 }
999
1000 int __init bch2_btree_key_cache_init(void)
1001 {
1002         bch2_key_cache = KMEM_CACHE(bkey_cached, 0);
1003         if (!bch2_key_cache)
1004                 return -ENOMEM;
1005
1006         return 0;
1007 }