]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_key_cache.c
Update bcachefs sources to 021e62a098 bcachefs: Fix error in filesystem initialization
[bcachefs-tools-debian] / libbcachefs / btree_key_cache.c
1
2 #include "bcachefs.h"
3 #include "btree_cache.h"
4 #include "btree_iter.h"
5 #include "btree_key_cache.h"
6 #include "btree_locking.h"
7 #include "btree_update.h"
8 #include "error.h"
9 #include "journal.h"
10 #include "journal_reclaim.h"
11
12 #include <linux/sched/mm.h>
13 #include <trace/events/bcachefs.h>
14
15 static struct kmem_cache *bch2_key_cache;
16
17 static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
18                                        const void *obj)
19 {
20         const struct bkey_cached *ck = obj;
21         const struct bkey_cached_key *key = arg->key;
22
23         return cmp_int(ck->key.btree_id, key->btree_id) ?:
24                 bkey_cmp(ck->key.pos, key->pos);
25 }
26
27 static const struct rhashtable_params bch2_btree_key_cache_params = {
28         .head_offset    = offsetof(struct bkey_cached, hash),
29         .key_offset     = offsetof(struct bkey_cached, key),
30         .key_len        = sizeof(struct bkey_cached_key),
31         .obj_cmpfn      = bch2_btree_key_cache_cmp_fn,
32 };
33
34 __flatten
35 inline struct bkey_cached *
36 bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
37 {
38         struct bkey_cached_key key = {
39                 .btree_id       = btree_id,
40                 .pos            = pos,
41         };
42
43         return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
44                                       bch2_btree_key_cache_params);
45 }
46
47 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
48 {
49         if (!six_trylock_intent(&ck->c.lock))
50                 return false;
51
52         if (!six_trylock_write(&ck->c.lock)) {
53                 six_unlock_intent(&ck->c.lock);
54                 return false;
55         }
56
57         if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
58                 six_unlock_write(&ck->c.lock);
59                 six_unlock_intent(&ck->c.lock);
60                 return false;
61         }
62
63         return true;
64 }
65
66 static void bkey_cached_evict(struct btree_key_cache *c,
67                               struct bkey_cached *ck)
68 {
69         BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
70                                       bch2_btree_key_cache_params));
71         memset(&ck->key, ~0, sizeof(ck->key));
72
73         c->nr_keys--;
74 }
75
76 static void bkey_cached_free(struct btree_key_cache *bc,
77                              struct bkey_cached *ck)
78 {
79         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
80
81         BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
82
83         ck->btree_trans_barrier_seq =
84                 start_poll_synchronize_srcu(&c->btree_trans_barrier);
85
86         list_move_tail(&ck->list, &bc->freed);
87         bc->nr_freed++;
88
89         kfree(ck->k);
90         ck->k           = NULL;
91         ck->u64s        = 0;
92
93         six_unlock_write(&ck->c.lock);
94         six_unlock_intent(&ck->c.lock);
95 }
96
97 static struct bkey_cached *
98 bkey_cached_alloc(struct btree_key_cache *c)
99 {
100         struct bkey_cached *ck;
101
102         list_for_each_entry_reverse(ck, &c->freed, list)
103                 if (bkey_cached_lock_for_evict(ck)) {
104                         c->nr_freed--;
105                         return ck;
106                 }
107
108         ck = kmem_cache_alloc(bch2_key_cache, GFP_NOFS|__GFP_ZERO);
109         if (likely(ck)) {
110                 INIT_LIST_HEAD(&ck->list);
111                 six_lock_init(&ck->c.lock);
112                 BUG_ON(!six_trylock_intent(&ck->c.lock));
113                 BUG_ON(!six_trylock_write(&ck->c.lock));
114                 return ck;
115         }
116
117         list_for_each_entry(ck, &c->clean, list)
118                 if (bkey_cached_lock_for_evict(ck)) {
119                         bkey_cached_evict(c, ck);
120                         return ck;
121                 }
122
123         return NULL;
124 }
125
126 static struct bkey_cached *
127 btree_key_cache_create(struct btree_key_cache *c,
128                        enum btree_id btree_id,
129                        struct bpos pos)
130 {
131         struct bkey_cached *ck;
132
133         ck = bkey_cached_alloc(c);
134         if (!ck)
135                 return ERR_PTR(-ENOMEM);
136
137         ck->c.level             = 0;
138         ck->c.btree_id          = btree_id;
139         ck->key.btree_id        = btree_id;
140         ck->key.pos             = pos;
141         ck->valid               = false;
142         ck->flags               = 1U << BKEY_CACHED_ACCESSED;
143
144         if (rhashtable_lookup_insert_fast(&c->table,
145                                           &ck->hash,
146                                           bch2_btree_key_cache_params)) {
147                 /* We raced with another fill: */
148                 bkey_cached_free(c, ck);
149                 return NULL;
150         }
151
152         c->nr_keys++;
153
154         list_move(&ck->list, &c->clean);
155         six_unlock_write(&ck->c.lock);
156
157         return ck;
158 }
159
160 static int btree_key_cache_fill(struct btree_trans *trans,
161                                 struct btree_iter *ck_iter,
162                                 struct bkey_cached *ck)
163 {
164         struct btree_iter *iter;
165         struct bkey_s_c k;
166         unsigned new_u64s = 0;
167         struct bkey_i *new_k = NULL;
168         int ret;
169
170         iter = bch2_trans_get_iter(trans, ck->key.btree_id,
171                                    ck->key.pos, BTREE_ITER_SLOTS);
172         if (IS_ERR(iter))
173                 return PTR_ERR(iter);
174
175         k = bch2_btree_iter_peek_slot(iter);
176         ret = bkey_err(k);
177         if (ret) {
178                 bch2_trans_iter_put(trans, iter);
179                 return ret;
180         }
181
182         if (!bch2_btree_node_relock(ck_iter, 0)) {
183                 bch2_trans_iter_put(trans, iter);
184                 trace_transaction_restart_ip(trans->ip, _THIS_IP_);
185                 return -EINTR;
186         }
187
188         if (k.k->u64s > ck->u64s) {
189                 new_u64s = roundup_pow_of_two(k.k->u64s);
190                 new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
191                 if (!new_k) {
192                         bch2_trans_iter_put(trans, iter);
193                         return -ENOMEM;
194                 }
195         }
196
197         bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter);
198         if (new_k) {
199                 kfree(ck->k);
200                 ck->u64s = new_u64s;
201                 ck->k = new_k;
202         }
203
204         bkey_reassemble(ck->k, k);
205         ck->valid = true;
206         bch2_btree_node_unlock_write(ck_iter->l[0].b, ck_iter);
207
208         /* We're not likely to need this iterator again: */
209         bch2_trans_iter_free(trans, iter);
210
211         return 0;
212 }
213
214 static int bkey_cached_check_fn(struct six_lock *lock, void *p)
215 {
216         struct bkey_cached *ck = container_of(lock, struct bkey_cached, c.lock);
217         const struct btree_iter *iter = p;
218
219         return ck->key.btree_id == iter->btree_id &&
220                 !bkey_cmp(ck->key.pos, iter->pos) ? 0 : -1;
221 }
222
223 __flatten
224 int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
225 {
226         struct btree_trans *trans = iter->trans;
227         struct bch_fs *c = trans->c;
228         struct bkey_cached *ck;
229         int ret = 0;
230
231         BUG_ON(iter->level);
232
233         if (btree_node_locked(iter, 0)) {
234                 ck = (void *) iter->l[0].b;
235                 goto fill;
236         }
237 retry:
238         ck = bch2_btree_key_cache_find(c, iter->btree_id, iter->pos);
239         if (!ck) {
240                 if (iter->flags & BTREE_ITER_CACHED_NOCREATE) {
241                         iter->l[0].b = NULL;
242                         return 0;
243                 }
244
245                 mutex_lock(&c->btree_key_cache.lock);
246                 ck = btree_key_cache_create(&c->btree_key_cache,
247                                             iter->btree_id, iter->pos);
248                 mutex_unlock(&c->btree_key_cache.lock);
249
250                 ret = PTR_ERR_OR_ZERO(ck);
251                 if (ret)
252                         goto err;
253                 if (!ck)
254                         goto retry;
255
256                 mark_btree_node_locked(iter, 0, SIX_LOCK_intent);
257                 iter->locks_want = 1;
258         } else {
259                 enum six_lock_type lock_want = __btree_lock_want(iter, 0);
260
261                 if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want,
262                                      bkey_cached_check_fn, iter, _THIS_IP_)) {
263                         if (ck->key.btree_id != iter->btree_id ||
264                             bkey_cmp(ck->key.pos, iter->pos)) {
265                                 goto retry;
266                         }
267
268                         trace_transaction_restart_ip(trans->ip, _THIS_IP_);
269                         ret = -EINTR;
270                         goto err;
271                 }
272
273                 if (ck->key.btree_id != iter->btree_id ||
274                     bkey_cmp(ck->key.pos, iter->pos)) {
275                         six_unlock_type(&ck->c.lock, lock_want);
276                         goto retry;
277                 }
278
279                 mark_btree_node_locked(iter, 0, lock_want);
280         }
281
282         iter->l[0].lock_seq     = ck->c.lock.state.seq;
283         iter->l[0].b            = (void *) ck;
284 fill:
285         if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) {
286                 if (!btree_node_intent_locked(iter, 0))
287                         bch2_btree_iter_upgrade(iter, 1);
288                 if (!btree_node_intent_locked(iter, 0)) {
289                         trace_transaction_restart_ip(trans->ip, _THIS_IP_);
290                         ret = -EINTR;
291                         goto err;
292                 }
293
294                 ret = btree_key_cache_fill(trans, iter, ck);
295                 if (ret)
296                         goto err;
297         }
298
299         if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
300                 set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
301
302         iter->uptodate = BTREE_ITER_NEED_PEEK;
303         bch2_btree_iter_downgrade(iter);
304         return ret;
305 err:
306         if (ret != -EINTR) {
307                 btree_node_unlock(iter, 0);
308                 iter->flags |= BTREE_ITER_ERROR;
309                 iter->l[0].b = BTREE_ITER_NO_NODE_ERROR;
310         }
311         return ret;
312 }
313
314 static int btree_key_cache_flush_pos(struct btree_trans *trans,
315                                      struct bkey_cached_key key,
316                                      u64 journal_seq,
317                                      bool evict)
318 {
319         struct bch_fs *c = trans->c;
320         struct journal *j = &c->journal;
321         struct btree_iter *c_iter = NULL, *b_iter = NULL;
322         struct bkey_cached *ck;
323         int ret;
324
325         b_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
326                                      BTREE_ITER_SLOTS|
327                                      BTREE_ITER_INTENT);
328         ret = PTR_ERR_OR_ZERO(b_iter);
329         if (ret)
330                 goto out;
331
332         c_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
333                                      BTREE_ITER_CACHED|
334                                      BTREE_ITER_CACHED_NOFILL|
335                                      BTREE_ITER_CACHED_NOCREATE|
336                                      BTREE_ITER_INTENT);
337         ret = PTR_ERR_OR_ZERO(c_iter);
338         if (ret)
339                 goto out;
340 retry:
341         ret = bch2_btree_iter_traverse(c_iter);
342         if (ret)
343                 goto err;
344
345         ck = (void *) c_iter->l[0].b;
346         if (!ck ||
347             (journal_seq && ck->journal.seq != journal_seq))
348                 goto out;
349
350         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
351                 if (!evict)
352                         goto out;
353                 goto evict;
354         }
355
356         ret   = bch2_btree_iter_traverse(b_iter) ?:
357                 bch2_trans_update(trans, b_iter, ck->k, BTREE_TRIGGER_NORUN) ?:
358                 bch2_trans_commit(trans, NULL, NULL,
359                                   BTREE_INSERT_NOUNLOCK|
360                                   BTREE_INSERT_NOCHECK_RW|
361                                   BTREE_INSERT_NOFAIL|
362                                   BTREE_INSERT_USE_RESERVE|
363                                   BTREE_INSERT_USE_ALLOC_RESERVE|
364                                   BTREE_INSERT_JOURNAL_RESERVED|
365                                   BTREE_INSERT_JOURNAL_RECLAIM);
366 err:
367         if (ret == -EINTR)
368                 goto retry;
369
370         BUG_ON(ret && !bch2_journal_error(j));
371
372         if (ret)
373                 goto out;
374
375         bch2_journal_pin_drop(j, &ck->journal);
376         bch2_journal_preres_put(j, &ck->res);
377
378         if (!evict) {
379                 mutex_lock(&c->btree_key_cache.lock);
380                 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
381                         clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
382                         c->btree_key_cache.nr_dirty--;
383                 }
384
385                 list_move_tail(&ck->list, &c->btree_key_cache.clean);
386                 mutex_unlock(&c->btree_key_cache.lock);
387         } else {
388 evict:
389                 BUG_ON(!btree_node_intent_locked(c_iter, 0));
390
391                 mark_btree_node_unlocked(c_iter, 0);
392                 c_iter->l[0].b = NULL;
393
394                 six_lock_write(&ck->c.lock, NULL, NULL);
395
396                 mutex_lock(&c->btree_key_cache.lock);
397                 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
398                         clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
399                         c->btree_key_cache.nr_dirty--;
400                 }
401
402                 bkey_cached_evict(&c->btree_key_cache, ck);
403                 bkey_cached_free(&c->btree_key_cache, ck);
404                 mutex_unlock(&c->btree_key_cache.lock);
405         }
406 out:
407         bch2_trans_iter_put(trans, b_iter);
408         bch2_trans_iter_put(trans, c_iter);
409         return ret;
410 }
411
412 static void btree_key_cache_journal_flush(struct journal *j,
413                                           struct journal_entry_pin *pin,
414                                           u64 seq)
415 {
416         struct bch_fs *c = container_of(j, struct bch_fs, journal);
417         struct bkey_cached *ck =
418                 container_of(pin, struct bkey_cached, journal);
419         struct bkey_cached_key key;
420         struct btree_trans trans;
421
422         int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
423
424         six_lock_read(&ck->c.lock, NULL, NULL);
425         key = ck->key;
426
427         if (ck->journal.seq != seq ||
428             !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
429                 six_unlock_read(&ck->c.lock);
430                 goto unlock;
431         }
432         six_unlock_read(&ck->c.lock);
433
434         bch2_trans_init(&trans, c, 0, 0);
435         btree_key_cache_flush_pos(&trans, key, seq, false);
436         bch2_trans_exit(&trans);
437 unlock:
438         srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
439 }
440
441 /*
442  * Flush and evict a key from the key cache:
443  */
444 int bch2_btree_key_cache_flush(struct btree_trans *trans,
445                                enum btree_id id, struct bpos pos)
446 {
447         struct bch_fs *c = trans->c;
448         struct bkey_cached_key key = { id, pos };
449
450         /* Fastpath - assume it won't be found: */
451         if (!bch2_btree_key_cache_find(c, id, pos))
452                 return 0;
453
454         return btree_key_cache_flush_pos(trans, key, 0, true);
455 }
456
457 bool bch2_btree_insert_key_cached(struct btree_trans *trans,
458                                   struct btree_iter *iter,
459                                   struct bkey_i *insert)
460 {
461         struct bch_fs *c = trans->c;
462         struct bkey_cached *ck = (void *) iter->l[0].b;
463         bool kick_reclaim = false;
464
465         BUG_ON(insert->u64s > ck->u64s);
466
467         if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
468                 int difference;
469
470                 BUG_ON(jset_u64s(insert->u64s) > trans->journal_preres.u64s);
471
472                 difference = jset_u64s(insert->u64s) - ck->res.u64s;
473                 if (difference > 0) {
474                         trans->journal_preres.u64s      -= difference;
475                         ck->res.u64s                    += difference;
476                 }
477         }
478
479         bkey_copy(ck->k, insert);
480         ck->valid = true;
481
482         if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
483                 mutex_lock(&c->btree_key_cache.lock);
484                 list_move(&ck->list, &c->btree_key_cache.dirty);
485
486                 set_bit(BKEY_CACHED_DIRTY, &ck->flags);
487                 c->btree_key_cache.nr_dirty++;
488
489                 if (bch2_nr_btree_keys_need_flush(c))
490                         kick_reclaim = true;
491
492                 mutex_unlock(&c->btree_key_cache.lock);
493         }
494
495         bch2_journal_pin_update(&c->journal, trans->journal_res.seq,
496                                 &ck->journal, btree_key_cache_journal_flush);
497
498         if (kick_reclaim)
499                 journal_reclaim_kick(&c->journal);
500         return true;
501 }
502
503 #ifdef CONFIG_BCACHEFS_DEBUG
504 void bch2_btree_key_cache_verify_clean(struct btree_trans *trans,
505                                enum btree_id id, struct bpos pos)
506 {
507         BUG_ON(bch2_btree_key_cache_find(trans->c, id, pos));
508 }
509 #endif
510
511 static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
512                                            struct shrink_control *sc)
513 {
514         struct bch_fs *c = container_of(shrink, struct bch_fs,
515                                         btree_key_cache.shrink);
516         struct btree_key_cache *bc = &c->btree_key_cache;
517         struct bkey_cached *ck, *t;
518         size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
519         unsigned flags;
520
521         /* Return -1 if we can't do anything right now */
522         if (sc->gfp_mask & __GFP_FS)
523                 mutex_lock(&bc->lock);
524         else if (!mutex_trylock(&bc->lock))
525                 return -1;
526
527         flags = memalloc_nofs_save();
528
529         /*
530          * Newest freed entries are at the end of the list - once we hit one
531          * that's too new to be freed, we can bail out:
532          */
533         list_for_each_entry_safe(ck, t, &bc->freed, list) {
534                 if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
535                                                  ck->btree_trans_barrier_seq))
536                         break;
537
538                 list_del(&ck->list);
539                 kmem_cache_free(bch2_key_cache, ck);
540                 bc->nr_freed--;
541                 scanned++;
542                 freed++;
543         }
544
545         if (scanned >= nr)
546                 goto out;
547
548         list_for_each_entry_safe(ck, t, &bc->clean, list) {
549                 if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
550                         clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
551                 else if (bkey_cached_lock_for_evict(ck)) {
552                         bkey_cached_evict(bc, ck);
553                         bkey_cached_free(bc, ck);
554                 }
555
556                 scanned++;
557                 if (scanned >= nr) {
558                         if (&t->list != &bc->clean)
559                                 list_move_tail(&bc->clean, &t->list);
560                         goto out;
561                 }
562         }
563 out:
564         memalloc_nofs_restore(flags);
565         mutex_unlock(&bc->lock);
566
567         return freed;
568 }
569
570 static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
571                                             struct shrink_control *sc)
572 {
573         struct bch_fs *c = container_of(shrink, struct bch_fs,
574                                         btree_key_cache.shrink);
575         struct btree_key_cache *bc = &c->btree_key_cache;
576
577         return bc->nr_keys;
578 }
579
580 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
581 {
582         struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
583         struct bkey_cached *ck, *n;
584
585         if (bc->shrink.list.next)
586                 unregister_shrinker(&bc->shrink);
587
588         mutex_lock(&bc->lock);
589         list_splice(&bc->dirty, &bc->clean);
590
591         list_for_each_entry_safe(ck, n, &bc->clean, list) {
592                 bch2_journal_pin_drop(&c->journal, &ck->journal);
593                 bch2_journal_preres_put(&c->journal, &ck->res);
594
595                 kfree(ck->k);
596                 list_del(&ck->list);
597                 kmem_cache_free(bch2_key_cache, ck);
598                 bc->nr_keys--;
599         }
600
601         BUG_ON(bc->nr_dirty && !bch2_journal_error(&c->journal));
602         BUG_ON(bc->nr_keys);
603
604         list_for_each_entry_safe(ck, n, &bc->freed, list) {
605                 list_del(&ck->list);
606                 kmem_cache_free(bch2_key_cache, ck);
607         }
608         mutex_unlock(&bc->lock);
609
610         if (bc->table_init_done)
611                 rhashtable_destroy(&bc->table);
612 }
613
614 void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
615 {
616         mutex_init(&c->lock);
617         INIT_LIST_HEAD(&c->freed);
618         INIT_LIST_HEAD(&c->clean);
619         INIT_LIST_HEAD(&c->dirty);
620 }
621
622 int bch2_fs_btree_key_cache_init(struct btree_key_cache *c)
623 {
624         int ret;
625
626         c->shrink.seeks                 = 1;
627         c->shrink.count_objects         = bch2_btree_key_cache_count;
628         c->shrink.scan_objects          = bch2_btree_key_cache_scan;
629
630         ret = register_shrinker(&c->shrink);
631         if (ret)
632                 return ret;
633
634         ret = rhashtable_init(&c->table, &bch2_btree_key_cache_params);
635         if (ret)
636                 return ret;
637
638         c->table_init_done = true;
639         return 0;
640 }
641
642 void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
643 {
644         pr_buf(out, "nr_freed:\t%zu\n", c->nr_freed);
645         pr_buf(out, "nr_keys:\t%zu\n",  c->nr_keys);
646         pr_buf(out, "nr_dirty:\t%zu\n", c->nr_dirty);
647 }
648
649 void bch2_btree_key_cache_exit(void)
650 {
651         if (bch2_key_cache)
652                 kmem_cache_destroy(bch2_key_cache);
653 }
654
655 int __init bch2_btree_key_cache_init(void)
656 {
657         bch2_key_cache = KMEM_CACHE(bkey_cached, 0);
658         if (!bch2_key_cache)
659                 return -ENOMEM;
660
661         return 0;
662 }