]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_key_cache.c
Update bcachefs sources to 70fa0c1ff4 fixup! bcachefs: Btree key cache improvements
[bcachefs-tools-debian] / libbcachefs / btree_key_cache.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_cache.h"
5 #include "btree_iter.h"
6 #include "btree_key_cache.h"
7 #include "btree_locking.h"
8 #include "btree_update.h"
9 #include "errcode.h"
10 #include "error.h"
11 #include "journal.h"
12 #include "journal_reclaim.h"
13
14 #include <linux/sched/mm.h>
15 #include <trace/events/bcachefs.h>
16
17 static inline bool btree_uses_pcpu_readers(enum btree_id id)
18 {
19         return id == BTREE_ID_subvolumes;
20 }
21
22 static struct kmem_cache *bch2_key_cache;
23
24 static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
25                                        const void *obj)
26 {
27         const struct bkey_cached *ck = obj;
28         const struct bkey_cached_key *key = arg->key;
29
30         return cmp_int(ck->key.btree_id, key->btree_id) ?:
31                 bpos_cmp(ck->key.pos, key->pos);
32 }
33
34 static const struct rhashtable_params bch2_btree_key_cache_params = {
35         .head_offset    = offsetof(struct bkey_cached, hash),
36         .key_offset     = offsetof(struct bkey_cached, key),
37         .key_len        = sizeof(struct bkey_cached_key),
38         .obj_cmpfn      = bch2_btree_key_cache_cmp_fn,
39 };
40
41 __flatten
42 inline struct bkey_cached *
43 bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
44 {
45         struct bkey_cached_key key = {
46                 .btree_id       = btree_id,
47                 .pos            = pos,
48         };
49
50         return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
51                                       bch2_btree_key_cache_params);
52 }
53
54 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
55 {
56         if (!six_trylock_intent(&ck->c.lock))
57                 return false;
58
59         if (!six_trylock_write(&ck->c.lock)) {
60                 six_unlock_intent(&ck->c.lock);
61                 return false;
62         }
63
64         if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
65                 six_unlock_write(&ck->c.lock);
66                 six_unlock_intent(&ck->c.lock);
67                 return false;
68         }
69
70         return true;
71 }
72
73 static void bkey_cached_evict(struct btree_key_cache *c,
74                               struct bkey_cached *ck)
75 {
76         BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
77                                       bch2_btree_key_cache_params));
78         memset(&ck->key, ~0, sizeof(ck->key));
79
80         atomic_long_dec(&c->nr_keys);
81 }
82
83 static void bkey_cached_free(struct btree_key_cache *bc,
84                              struct bkey_cached *ck)
85 {
86         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
87
88         BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
89
90         ck->btree_trans_barrier_seq =
91                 start_poll_synchronize_srcu(&c->btree_trans_barrier);
92
93         if (ck->c.lock.readers)
94                 list_move_tail(&ck->list, &bc->freed_pcpu);
95         else
96                 list_move_tail(&ck->list, &bc->freed_nonpcpu);
97         atomic_long_inc(&bc->nr_freed);
98
99         kfree(ck->k);
100         ck->k           = NULL;
101         ck->u64s        = 0;
102
103         six_unlock_write(&ck->c.lock);
104         six_unlock_intent(&ck->c.lock);
105 }
106
107 #ifdef __KERNEL__
108 static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
109                                                    struct bkey_cached *ck)
110 {
111         struct bkey_cached *pos;
112
113         list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) {
114                 if (ULONG_CMP_GE(ck->btree_trans_barrier_seq,
115                                  pos->btree_trans_barrier_seq)) {
116                         list_move(&ck->list, &pos->list);
117                         return;
118                 }
119         }
120
121         list_move(&ck->list, &bc->freed_nonpcpu);
122 }
123 #endif
124
125 static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
126                                          struct bkey_cached *ck)
127 {
128         BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
129
130         if (!ck->c.lock.readers) {
131 #ifdef __KERNEL__
132                 struct btree_key_cache_freelist *f;
133                 bool freed = false;
134
135                 preempt_disable();
136                 f = this_cpu_ptr(bc->pcpu_freed);
137
138                 if (f->nr < ARRAY_SIZE(f->objs)) {
139                         f->objs[f->nr++] = ck;
140                         freed = true;
141                 }
142                 preempt_enable();
143
144                 if (!freed) {
145                         mutex_lock(&bc->lock);
146                         preempt_disable();
147                         f = this_cpu_ptr(bc->pcpu_freed);
148
149                         while (f->nr > ARRAY_SIZE(f->objs) / 2) {
150                                 struct bkey_cached *ck2 = f->objs[--f->nr];
151
152                                 __bkey_cached_move_to_freelist_ordered(bc, ck2);
153                         }
154                         preempt_enable();
155
156                         __bkey_cached_move_to_freelist_ordered(bc, ck);
157                         mutex_unlock(&bc->lock);
158                 }
159 #else
160                 mutex_lock(&bc->lock);
161                 list_move_tail(&ck->list, &bc->freed_nonpcpu);
162                 mutex_unlock(&bc->lock);
163 #endif
164         } else {
165                 mutex_lock(&bc->lock);
166                 list_move_tail(&ck->list, &bc->freed_pcpu);
167                 mutex_unlock(&bc->lock);
168         }
169 }
170
171 static void bkey_cached_free_fast(struct btree_key_cache *bc,
172                                   struct bkey_cached *ck)
173 {
174         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
175
176         ck->btree_trans_barrier_seq =
177                 start_poll_synchronize_srcu(&c->btree_trans_barrier);
178
179         list_del_init(&ck->list);
180         atomic_long_inc(&bc->nr_freed);
181
182         kfree(ck->k);
183         ck->k           = NULL;
184         ck->u64s        = 0;
185
186         bkey_cached_move_to_freelist(bc, ck);
187
188         six_unlock_write(&ck->c.lock);
189         six_unlock_intent(&ck->c.lock);
190 }
191
192 static struct bkey_cached *
193 bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path)
194 {
195         struct bch_fs *c = trans->c;
196         struct btree_key_cache *bc = &c->btree_key_cache;
197         struct bkey_cached *ck = NULL;
198         bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
199
200         if (!pcpu_readers) {
201 #ifdef __KERNEL__
202                 struct btree_key_cache_freelist *f;
203
204                 preempt_disable();
205                 f = this_cpu_ptr(bc->pcpu_freed);
206                 if (f->nr)
207                         ck = f->objs[--f->nr];
208                 preempt_enable();
209
210                 if (!ck) {
211                         mutex_lock(&bc->lock);
212                         preempt_disable();
213                         f = this_cpu_ptr(bc->pcpu_freed);
214
215                         while (!list_empty(&bc->freed_nonpcpu) &&
216                                f->nr < ARRAY_SIZE(f->objs) / 2) {
217                                 ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
218                                 list_del_init(&ck->list);
219                                 f->objs[f->nr++] = ck;
220                         }
221
222                         ck = f->nr ? f->objs[--f->nr] : NULL;
223                         preempt_enable();
224                         mutex_unlock(&bc->lock);
225                 }
226 #else
227                 mutex_lock(&bc->lock);
228                 if (!list_empty(&bc->freed_nonpcpu)) {
229                         ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
230                         list_del_init(&ck->list);
231                 }
232                 mutex_unlock(&bc->lock);
233 #endif
234         } else {
235                 mutex_lock(&bc->lock);
236                 if (!list_empty(&bc->freed_pcpu)) {
237                         ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
238                         list_del_init(&ck->list);
239                 }
240                 mutex_unlock(&bc->lock);
241         }
242
243         if (ck) {
244                 int ret;
245
246                 ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent);
247                 if (unlikely(ret)) {
248                         bkey_cached_move_to_freelist(bc, ck);
249                         return ERR_PTR(ret);
250                 }
251
252                 path->l[0].b = (void *) ck;
253                 path->l[0].lock_seq = ck->c.lock.state.seq;
254                 mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
255
256                 ret = bch2_btree_node_lock_write(trans, path, &ck->c);
257                 if (unlikely(ret)) {
258                         btree_node_unlock(trans, path, 0);
259                         bkey_cached_move_to_freelist(bc, ck);
260                         return ERR_PTR(ret);
261                 }
262
263                 return ck;
264         }
265
266         /* GFP_NOFS because we're holding btree locks: */
267         ck = kmem_cache_alloc(bch2_key_cache, GFP_NOFS|__GFP_ZERO);
268         if (likely(ck)) {
269                 INIT_LIST_HEAD(&ck->list);
270                 __six_lock_init(&ck->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
271                 if (pcpu_readers)
272                         six_lock_pcpu_alloc(&ck->c.lock);
273
274                 ck->c.cached = true;
275                 BUG_ON(!six_trylock_intent(&ck->c.lock));
276                 BUG_ON(!six_trylock_write(&ck->c.lock));
277                 return ck;
278         }
279
280         return NULL;
281 }
282
283 static struct bkey_cached *
284 bkey_cached_reuse(struct btree_key_cache *c)
285 {
286         struct bucket_table *tbl;
287         struct rhash_head *pos;
288         struct bkey_cached *ck;
289         unsigned i;
290
291         mutex_lock(&c->lock);
292         rcu_read_lock();
293         tbl = rht_dereference_rcu(c->table.tbl, &c->table);
294         for (i = 0; i < tbl->size; i++)
295                 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
296                         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
297                             bkey_cached_lock_for_evict(ck)) {
298                                 bkey_cached_evict(c, ck);
299                                 goto out;
300                         }
301                 }
302         ck = NULL;
303 out:
304         rcu_read_unlock();
305         mutex_unlock(&c->lock);
306         return ck;
307 }
308
309 static struct bkey_cached *
310 btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
311 {
312         struct bch_fs *c = trans->c;
313         struct btree_key_cache *bc = &c->btree_key_cache;
314         struct bkey_cached *ck;
315         bool was_new = true;
316
317         ck = bkey_cached_alloc(trans, path);
318         if (IS_ERR(ck))
319                 return ck;
320
321         if (unlikely(!ck)) {
322                 ck = bkey_cached_reuse(bc);
323                 if (unlikely(!ck)) {
324                         bch_err(c, "error allocating memory for key cache item, btree %s",
325                                 bch2_btree_ids[path->btree_id]);
326                         return ERR_PTR(-ENOMEM);
327                 }
328
329                 mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
330                 was_new = false;
331         } else {
332                 if (path->btree_id == BTREE_ID_subvolumes)
333                         six_lock_pcpu_alloc(&ck->c.lock);
334         }
335
336         ck->c.level             = 0;
337         ck->c.btree_id          = path->btree_id;
338         ck->key.btree_id        = path->btree_id;
339         ck->key.pos             = path->pos;
340         ck->valid               = false;
341         ck->flags               = 1U << BKEY_CACHED_ACCESSED;
342
343         if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
344                                           &ck->hash,
345                                           bch2_btree_key_cache_params))) {
346                 /* We raced with another fill: */
347
348                 if (likely(was_new)) {
349                         six_unlock_write(&ck->c.lock);
350                         six_unlock_intent(&ck->c.lock);
351                         mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
352                         kfree(ck);
353                 } else {
354                         bkey_cached_free_fast(bc, ck);
355                 }
356
357                 return NULL;
358         }
359
360         atomic_long_inc(&bc->nr_keys);
361
362         six_unlock_write(&ck->c.lock);
363
364         return ck;
365 }
366
367 static int btree_key_cache_fill(struct btree_trans *trans,
368                                 struct btree_path *ck_path,
369                                 struct bkey_cached *ck)
370 {
371         struct btree_path *path;
372         struct bkey_s_c k;
373         unsigned new_u64s = 0;
374         struct bkey_i *new_k = NULL;
375         struct bkey u;
376         int ret;
377
378         path = bch2_path_get(trans, ck->key.btree_id,
379                              ck->key.pos, 0, 0, 0, _THIS_IP_);
380         ret = bch2_btree_path_traverse(trans, path, 0);
381         if (ret)
382                 goto err;
383
384         k = bch2_btree_path_peek_slot(path, &u);
385
386         if (!bch2_btree_node_relock(trans, ck_path, 0)) {
387                 trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
388                 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
389                 goto err;
390         }
391
392         /*
393          * bch2_varint_decode can read past the end of the buffer by at
394          * most 7 bytes (it won't be used):
395          */
396         new_u64s = k.k->u64s + 1;
397
398         /*
399          * Allocate some extra space so that the transaction commit path is less
400          * likely to have to reallocate, since that requires a transaction
401          * restart:
402          */
403         new_u64s = min(256U, (new_u64s * 3) / 2);
404
405         if (new_u64s > ck->u64s) {
406                 new_u64s = roundup_pow_of_two(new_u64s);
407                 new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
408                 if (!new_k) {
409                         bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
410                                 bch2_btree_ids[ck->key.btree_id], new_u64s);
411                         ret = -ENOMEM;
412                         goto err;
413                 }
414         }
415
416         ret = bch2_btree_node_lock_write(trans, ck_path, &ck_path->l[0].b->c);
417         if (ret) {
418                 kfree(new_k);
419                 goto err;
420         }
421
422         if (new_k) {
423                 kfree(ck->k);
424                 ck->u64s = new_u64s;
425                 ck->k = new_k;
426         }
427
428         bkey_reassemble(ck->k, k);
429         ck->valid = true;
430         bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
431
432         /* We're not likely to need this iterator again: */
433         path->preserve = false;
434 err:
435         bch2_path_put(trans, path, 0);
436         return ret;
437 }
438
439 static noinline int
440 bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree_path *path,
441                                          unsigned flags)
442 {
443         struct bch_fs *c = trans->c;
444         struct bkey_cached *ck;
445         int ret = 0;
446
447         BUG_ON(path->level);
448
449         path->l[1].b = NULL;
450
451         if (bch2_btree_node_relock(trans, path, 0)) {
452                 ck = (void *) path->l[0].b;
453                 goto fill;
454         }
455 retry:
456         ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
457         if (!ck) {
458                 ck = btree_key_cache_create(trans, path);
459                 ret = PTR_ERR_OR_ZERO(ck);
460                 if (ret)
461                         goto err;
462                 if (!ck)
463                         goto retry;
464
465                 mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
466                 path->locks_want = 1;
467         } else {
468                 enum six_lock_type lock_want = __btree_lock_want(path, 0);
469
470                 ret = btree_node_lock(trans, path, (void *) ck, 0,
471                                       lock_want, _THIS_IP_);
472                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
473                         goto err;
474
475                 BUG_ON(ret);
476
477                 if (ck->key.btree_id != path->btree_id ||
478                     bpos_cmp(ck->key.pos, path->pos)) {
479                         six_unlock_type(&ck->c.lock, lock_want);
480                         goto retry;
481                 }
482
483                 mark_btree_node_locked(trans, path, 0, lock_want);
484         }
485
486         path->l[0].lock_seq     = ck->c.lock.state.seq;
487         path->l[0].b            = (void *) ck;
488 fill:
489         if (!ck->valid) {
490                 /*
491                  * Using the underscore version because we haven't set
492                  * path->uptodate yet:
493                  */
494                 if (!path->locks_want &&
495                     !__bch2_btree_path_upgrade(trans, path, 1)) {
496                         trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
497                         ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
498                         goto err;
499                 }
500
501                 ret = btree_key_cache_fill(trans, path, ck);
502                 if (ret)
503                         goto err;
504         }
505
506         if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
507                 set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
508
509         path->uptodate = BTREE_ITER_UPTODATE;
510         BUG_ON(!ck->valid);
511         BUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
512
513         return ret;
514 err:
515         if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
516                 btree_node_unlock(trans, path, 0);
517                 path->l[0].b = ERR_PTR(ret);
518         }
519         return ret;
520 }
521
522 int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
523                                     unsigned flags)
524 {
525         struct bch_fs *c = trans->c;
526         struct bkey_cached *ck;
527         int ret = 0;
528
529         EBUG_ON(path->level);
530
531         path->l[1].b = NULL;
532
533         if (bch2_btree_node_relock(trans, path, 0)) {
534                 ck = (void *) path->l[0].b;
535                 goto fill;
536         }
537 retry:
538         ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
539         if (!ck) {
540                 return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
541         } else {
542                 enum six_lock_type lock_want = __btree_lock_want(path, 0);
543
544                 ret = btree_node_lock(trans, path, (void *) ck, 0,
545                                       lock_want, _THIS_IP_);
546                 EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
547
548                 if (ret)
549                         return ret;
550
551                 if (ck->key.btree_id != path->btree_id ||
552                     bpos_cmp(ck->key.pos, path->pos)) {
553                         six_unlock_type(&ck->c.lock, lock_want);
554                         goto retry;
555                 }
556
557                 mark_btree_node_locked(trans, path, 0, lock_want);
558         }
559
560         path->l[0].lock_seq     = ck->c.lock.state.seq;
561         path->l[0].b            = (void *) ck;
562 fill:
563         if (!ck->valid)
564                 return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
565
566         if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
567                 set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
568
569         path->uptodate = BTREE_ITER_UPTODATE;
570         EBUG_ON(!ck->valid);
571         EBUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
572
573         return ret;
574 }
575
576 static int btree_key_cache_flush_pos(struct btree_trans *trans,
577                                      struct bkey_cached_key key,
578                                      u64 journal_seq,
579                                      unsigned commit_flags,
580                                      bool evict)
581 {
582         struct bch_fs *c = trans->c;
583         struct journal *j = &c->journal;
584         struct btree_iter c_iter, b_iter;
585         struct bkey_cached *ck = NULL;
586         int ret;
587
588         bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
589                              BTREE_ITER_SLOTS|
590                              BTREE_ITER_INTENT|
591                              BTREE_ITER_ALL_SNAPSHOTS);
592         bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
593                              BTREE_ITER_CACHED|
594                              BTREE_ITER_INTENT);
595         b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
596
597         ret = bch2_btree_iter_traverse(&c_iter);
598         if (ret)
599                 goto out;
600
601         ck = (void *) c_iter.path->l[0].b;
602         if (!ck)
603                 goto out;
604
605         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
606                 if (evict)
607                         goto evict;
608                 goto out;
609         }
610
611         BUG_ON(!ck->valid);
612
613         if (journal_seq && ck->journal.seq != journal_seq)
614                 goto out;
615
616         /*
617          * Since journal reclaim depends on us making progress here, and the
618          * allocator/copygc depend on journal reclaim making progress, we need
619          * to be using alloc reserves:
620          */
621         ret   = bch2_btree_iter_traverse(&b_iter) ?:
622                 bch2_trans_update(trans, &b_iter, ck->k,
623                                   BTREE_UPDATE_KEY_CACHE_RECLAIM|
624                                   BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
625                                   BTREE_TRIGGER_NORUN) ?:
626                 bch2_trans_commit(trans, NULL, NULL,
627                                   BTREE_INSERT_NOCHECK_RW|
628                                   BTREE_INSERT_NOFAIL|
629                                   BTREE_INSERT_USE_RESERVE|
630                                   (ck->journal.seq == journal_last_seq(j)
631                                    ? JOURNAL_WATERMARK_reserved
632                                    : 0)|
633                                   commit_flags);
634
635         bch2_fs_fatal_err_on(ret &&
636                              !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
637                              !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
638                              !bch2_journal_error(j), c,
639                              "error flushing key cache: %s", bch2_err_str(ret));
640         if (ret)
641                 goto out;
642
643         bch2_journal_pin_drop(j, &ck->journal);
644         bch2_journal_preres_put(j, &ck->res);
645
646         BUG_ON(!btree_node_locked(c_iter.path, 0));
647
648         if (!evict) {
649                 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
650                         clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
651                         atomic_long_dec(&c->btree_key_cache.nr_dirty);
652                 }
653         } else {
654                 struct btree_path *path2;
655 evict:
656                 trans_for_each_path(trans, path2)
657                         if (path2 != c_iter.path)
658                                 __bch2_btree_path_unlock(trans, path2);
659
660                 bch2_btree_node_lock_write_nofail(trans, c_iter.path, &ck->c);
661
662                 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
663                         clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
664                         atomic_long_dec(&c->btree_key_cache.nr_dirty);
665                 }
666
667                 mark_btree_node_locked_noreset(c_iter.path, 0, BTREE_NODE_UNLOCKED);
668                 bkey_cached_evict(&c->btree_key_cache, ck);
669                 bkey_cached_free_fast(&c->btree_key_cache, ck);
670         }
671 out:
672         bch2_trans_iter_exit(trans, &b_iter);
673         bch2_trans_iter_exit(trans, &c_iter);
674         return ret;
675 }
676
677 int bch2_btree_key_cache_journal_flush(struct journal *j,
678                                 struct journal_entry_pin *pin, u64 seq)
679 {
680         struct bch_fs *c = container_of(j, struct bch_fs, journal);
681         struct bkey_cached *ck =
682                 container_of(pin, struct bkey_cached, journal);
683         struct bkey_cached_key key;
684         struct btree_trans trans;
685         int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
686         int ret = 0;
687
688         bch2_trans_init(&trans, c, 0, 0);
689
690         btree_node_lock_nopath_nofail(&trans, &ck->c, SIX_LOCK_read);
691         key = ck->key;
692
693         if (ck->journal.seq != seq ||
694             !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
695                 six_unlock_read(&ck->c.lock);
696                 goto unlock;
697         }
698         six_unlock_read(&ck->c.lock);
699
700         ret = commit_do(&trans, NULL, NULL, 0,
701                 btree_key_cache_flush_pos(&trans, key, seq,
702                                 BTREE_INSERT_JOURNAL_RECLAIM, false));
703 unlock:
704         srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
705
706         bch2_trans_exit(&trans);
707         return ret;
708 }
709
710 /*
711  * Flush and evict a key from the key cache:
712  */
713 int bch2_btree_key_cache_flush(struct btree_trans *trans,
714                                enum btree_id id, struct bpos pos)
715 {
716         struct bch_fs *c = trans->c;
717         struct bkey_cached_key key = { id, pos };
718
719         /* Fastpath - assume it won't be found: */
720         if (!bch2_btree_key_cache_find(c, id, pos))
721                 return 0;
722
723         return btree_key_cache_flush_pos(trans, key, 0, 0, true);
724 }
725
726 bool bch2_btree_insert_key_cached(struct btree_trans *trans,
727                                   struct btree_path *path,
728                                   struct bkey_i *insert)
729 {
730         struct bch_fs *c = trans->c;
731         struct bkey_cached *ck = (void *) path->l[0].b;
732         bool kick_reclaim = false;
733
734         BUG_ON(insert->u64s > ck->u64s);
735
736         if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
737                 int difference;
738
739                 BUG_ON(jset_u64s(insert->u64s) > trans->journal_preres.u64s);
740
741                 difference = jset_u64s(insert->u64s) - ck->res.u64s;
742                 if (difference > 0) {
743                         trans->journal_preres.u64s      -= difference;
744                         ck->res.u64s                    += difference;
745                 }
746         }
747
748         bkey_copy(ck->k, insert);
749         ck->valid = true;
750
751         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
752                 set_bit(BKEY_CACHED_DIRTY, &ck->flags);
753                 atomic_long_inc(&c->btree_key_cache.nr_dirty);
754
755                 if (bch2_nr_btree_keys_need_flush(c))
756                         kick_reclaim = true;
757         }
758
759         bch2_journal_pin_update(&c->journal, trans->journal_res.seq,
760                                 &ck->journal, bch2_btree_key_cache_journal_flush);
761
762         if (kick_reclaim)
763                 journal_reclaim_kick(&c->journal);
764         return true;
765 }
766
767 void bch2_btree_key_cache_drop(struct btree_trans *trans,
768                                struct btree_path *path)
769 {
770         struct bch_fs *c = trans->c;
771         struct bkey_cached *ck = (void *) path->l[0].b;
772
773         BUG_ON(!ck->valid);
774
775         /*
776          * We just did an update to the btree, bypassing the key cache: the key
777          * cache key is now stale and must be dropped, even if dirty:
778          */
779         if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
780                 clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
781                 atomic_long_dec(&c->btree_key_cache.nr_dirty);
782                 bch2_journal_pin_drop(&c->journal, &ck->journal);
783         }
784
785         ck->valid = false;
786 }
787
788 static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
789                                            struct shrink_control *sc)
790 {
791         struct bch_fs *c = container_of(shrink, struct bch_fs,
792                                         btree_key_cache.shrink);
793         struct btree_key_cache *bc = &c->btree_key_cache;
794         struct bucket_table *tbl;
795         struct bkey_cached *ck, *t;
796         size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
797         unsigned start, flags;
798         int srcu_idx;
799
800         mutex_lock(&bc->lock);
801         srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
802         flags = memalloc_nofs_save();
803
804         /*
805          * Newest freed entries are at the end of the list - once we hit one
806          * that's too new to be freed, we can bail out:
807          */
808         list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
809                 if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
810                                                  ck->btree_trans_barrier_seq))
811                         break;
812
813                 list_del(&ck->list);
814                 six_lock_pcpu_free(&ck->c.lock);
815                 kmem_cache_free(bch2_key_cache, ck);
816                 atomic_long_dec(&bc->nr_freed);
817                 scanned++;
818                 freed++;
819         }
820
821         if (scanned >= nr)
822                 goto out;
823
824         list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
825                 if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
826                                                  ck->btree_trans_barrier_seq))
827                         break;
828
829                 list_del(&ck->list);
830                 six_lock_pcpu_free(&ck->c.lock);
831                 kmem_cache_free(bch2_key_cache, ck);
832                 atomic_long_dec(&bc->nr_freed);
833                 scanned++;
834                 freed++;
835         }
836
837         if (scanned >= nr)
838                 goto out;
839
840         rcu_read_lock();
841         tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
842         if (bc->shrink_iter >= tbl->size)
843                 bc->shrink_iter = 0;
844         start = bc->shrink_iter;
845
846         do {
847                 struct rhash_head *pos, *next;
848
849                 pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter));
850
851                 while (!rht_is_a_nulls(pos)) {
852                         next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
853                         ck = container_of(pos, struct bkey_cached, hash);
854
855                         if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
856                                 goto next;
857
858                         if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
859                                 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
860                         else if (bkey_cached_lock_for_evict(ck)) {
861                                 bkey_cached_evict(bc, ck);
862                                 bkey_cached_free(bc, ck);
863                         }
864
865                         scanned++;
866                         if (scanned >= nr)
867                                 break;
868 next:
869                         pos = next;
870                 }
871
872                 bc->shrink_iter++;
873                 if (bc->shrink_iter >= tbl->size)
874                         bc->shrink_iter = 0;
875         } while (scanned < nr && bc->shrink_iter != start);
876
877         rcu_read_unlock();
878 out:
879         memalloc_nofs_restore(flags);
880         srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
881         mutex_unlock(&bc->lock);
882
883         return freed;
884 }
885
886 static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
887                                             struct shrink_control *sc)
888 {
889         struct bch_fs *c = container_of(shrink, struct bch_fs,
890                                         btree_key_cache.shrink);
891         struct btree_key_cache *bc = &c->btree_key_cache;
892         long nr = atomic_long_read(&bc->nr_keys) -
893                 atomic_long_read(&bc->nr_dirty);
894
895         return max(0L, nr);
896 }
897
898 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
899 {
900         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
901         struct bucket_table *tbl;
902         struct bkey_cached *ck, *n;
903         struct rhash_head *pos;
904         unsigned i;
905 #ifdef __KERNEL__
906         int cpu;
907 #endif
908
909         if (bc->shrink.list.next)
910                 unregister_shrinker(&bc->shrink);
911
912         mutex_lock(&bc->lock);
913
914         /*
915          * The loop is needed to guard against racing with rehash:
916          */
917         while (atomic_long_read(&bc->nr_keys)) {
918                 rcu_read_lock();
919                 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
920                 if (tbl)
921                         for (i = 0; i < tbl->size; i++)
922                                 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
923                                         bkey_cached_evict(bc, ck);
924                                         list_add(&ck->list, &bc->freed_nonpcpu);
925                                 }
926                 rcu_read_unlock();
927         }
928
929 #ifdef __KERNEL__
930         for_each_possible_cpu(cpu) {
931                 struct btree_key_cache_freelist *f =
932                         per_cpu_ptr(bc->pcpu_freed, cpu);
933
934                 for (i = 0; i < f->nr; i++) {
935                         ck = f->objs[i];
936                         list_add(&ck->list, &bc->freed_nonpcpu);
937                 }
938         }
939 #endif
940
941         list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
942
943         list_for_each_entry_safe(ck, n, &bc->freed_nonpcpu, list) {
944                 cond_resched();
945
946                 bch2_journal_pin_drop(&c->journal, &ck->journal);
947                 bch2_journal_preres_put(&c->journal, &ck->res);
948
949                 list_del(&ck->list);
950                 kfree(ck->k);
951                 six_lock_pcpu_free(&ck->c.lock);
952                 kmem_cache_free(bch2_key_cache, ck);
953         }
954
955         if (atomic_long_read(&bc->nr_dirty) &&
956             !bch2_journal_error(&c->journal) &&
957             test_bit(BCH_FS_WAS_RW, &c->flags))
958                 panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
959                       atomic_long_read(&bc->nr_dirty));
960
961         if (atomic_long_read(&bc->nr_keys))
962                 panic("btree key cache shutdown error: nr_keys nonzero (%li)\n",
963                       atomic_long_read(&bc->nr_keys));
964
965         mutex_unlock(&bc->lock);
966
967         if (bc->table_init_done)
968                 rhashtable_destroy(&bc->table);
969
970         free_percpu(bc->pcpu_freed);
971 }
972
973 void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
974 {
975         mutex_init(&c->lock);
976         INIT_LIST_HEAD(&c->freed_pcpu);
977         INIT_LIST_HEAD(&c->freed_nonpcpu);
978 }
979
980 static void bch2_btree_key_cache_shrinker_to_text(struct printbuf *out, struct shrinker *shrink)
981 {
982         struct btree_key_cache *bc =
983                 container_of(shrink, struct btree_key_cache, shrink);
984
985         bch2_btree_key_cache_to_text(out, bc);
986 }
987
988 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
989 {
990         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
991         int ret;
992
993 #ifdef __KERNEL__
994         bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
995         if (!bc->pcpu_freed)
996                 return -ENOMEM;
997 #endif
998
999         ret = rhashtable_init(&bc->table, &bch2_btree_key_cache_params);
1000         if (ret)
1001                 return ret;
1002
1003         bc->table_init_done = true;
1004
1005         bc->shrink.seeks                = 0;
1006         bc->shrink.count_objects        = bch2_btree_key_cache_count;
1007         bc->shrink.scan_objects         = bch2_btree_key_cache_scan;
1008         bc->shrink.to_text              = bch2_btree_key_cache_shrinker_to_text;
1009         return register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name);
1010 }
1011
1012 void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
1013 {
1014         prt_printf(out, "nr_freed:\t%zu",       atomic_long_read(&c->nr_freed));
1015         prt_newline(out);
1016         prt_printf(out, "nr_keys:\t%lu",        atomic_long_read(&c->nr_keys));
1017         prt_newline(out);
1018         prt_printf(out, "nr_dirty:\t%lu",       atomic_long_read(&c->nr_dirty));
1019         prt_newline(out);
1020 }
1021
1022 void bch2_btree_key_cache_exit(void)
1023 {
1024         kmem_cache_destroy(bch2_key_cache);
1025 }
1026
1027 int __init bch2_btree_key_cache_init(void)
1028 {
1029         bch2_key_cache = KMEM_CACHE(bkey_cached, 0);
1030         if (!bch2_key_cache)
1031                 return -ENOMEM;
1032
1033         return 0;
1034 }