]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/buckets_waiting_for_journal.c
Update bcachefs sources to ba398d2906 bcachefs: Fix reflink repair code
[bcachefs-tools-debian] / libbcachefs / buckets_waiting_for_journal.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "buckets_waiting_for_journal.h"
5 #include <linux/random.h>
6
7 static inline struct bucket_hashed *
8 bucket_hash(struct buckets_waiting_for_journal_table *t,
9             unsigned hash_seed_idx, u64 dev_bucket)
10 {
11         unsigned h = siphash_1u64(dev_bucket, &t->hash_seeds[hash_seed_idx]);
12
13         BUG_ON(!is_power_of_2(t->size));
14
15         return t->d + (h & (t->size - 1));
16 }
17
18 static void bucket_table_init(struct buckets_waiting_for_journal_table *t, size_t size)
19 {
20         unsigned i;
21
22         t->size = size;
23         for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++)
24                 get_random_bytes(&t->hash_seeds[i], sizeof(t->hash_seeds[i]));
25         memset(t->d, 0, sizeof(t->d[0]) * size);
26 }
27
28 bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
29                                       u64 flushed_seq,
30                                       unsigned dev, u64 bucket)
31 {
32         struct buckets_waiting_for_journal_table *t;
33         u64 dev_bucket = (u64) dev << 56 | bucket;
34         bool ret = false;
35         unsigned i;
36
37         mutex_lock(&b->lock);
38         t = b->t;
39
40         for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
41                 struct bucket_hashed *h = bucket_hash(t, i, dev_bucket);
42
43                 if (h->dev_bucket == dev_bucket) {
44                         ret = h->journal_seq > flushed_seq;
45                         break;
46                 }
47         }
48
49         mutex_unlock(&b->lock);
50
51         return ret;
52 }
53
54 static bool bucket_table_insert(struct buckets_waiting_for_journal_table *t,
55                                 struct bucket_hashed *new,
56                                 u64 flushed_seq)
57 {
58         struct bucket_hashed *last_evicted = NULL;
59         unsigned tries, i;
60
61         for (tries = 0; tries < 10; tries++) {
62                 struct bucket_hashed *old, *victim = NULL;
63
64                 for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
65                         old = bucket_hash(t, i, new->dev_bucket);
66
67                         if (old->dev_bucket == new->dev_bucket ||
68                             old->journal_seq <= flushed_seq) {
69                                 *old = *new;
70                                 return true;
71                         }
72
73                         if (last_evicted != old)
74                                 victim = old;
75                 }
76
77                 /* hashed to same slot 3 times: */
78                 if (!victim)
79                         break;
80
81                 /* Failed to find an empty slot: */
82                 swap(*new, *victim);
83                 last_evicted = victim;
84         }
85
86         return false;
87 }
88
89 int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
90                                          u64 flushed_seq,
91                                          unsigned dev, u64 bucket,
92                                          u64 journal_seq)
93 {
94         struct buckets_waiting_for_journal_table *t, *n;
95         struct bucket_hashed tmp, new = {
96                 .dev_bucket     = (u64) dev << 56 | bucket,
97                 .journal_seq    = journal_seq,
98         };
99         size_t i, new_size, nr_elements = 1, nr_rehashes = 0;
100         int ret = 0;
101
102         mutex_lock(&b->lock);
103
104         if (likely(bucket_table_insert(b->t, &new, flushed_seq)))
105                 goto out;
106
107         t = b->t;
108         for (i = 0; i < t->size; i++)
109                 nr_elements += t->d[i].journal_seq > flushed_seq;
110
111         new_size = nr_elements < t->size / 3 ? t->size : t->size * 2;
112
113         n = kvmalloc(sizeof(*n) + sizeof(n->d[0]) * new_size, GFP_KERNEL);
114         if (!n) {
115                 ret = -ENOMEM;
116                 goto out;
117         }
118
119 retry_rehash:
120         nr_rehashes++;
121         bucket_table_init(n, new_size);
122
123         tmp = new;
124         BUG_ON(!bucket_table_insert(n, &tmp, flushed_seq));
125
126         for (i = 0; i < t->size; i++) {
127                 if (t->d[i].journal_seq <= flushed_seq)
128                         continue;
129
130                 tmp = t->d[i];
131                 if (!bucket_table_insert(n, &tmp, flushed_seq))
132                         goto retry_rehash;
133         }
134
135         b->t = n;
136         kvfree(t);
137
138         pr_debug("took %zu rehashes, table at %zu/%zu elements",
139                  nr_rehashes, nr_elements, b->t->size);
140 out:
141         mutex_unlock(&b->lock);
142
143         return ret;
144 }
145
146 void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *c)
147 {
148         struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
149
150         kvfree(b->t);
151 }
152
153 #define INITIAL_TABLE_SIZE      8
154
155 int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *c)
156 {
157         struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
158
159         mutex_init(&b->lock);
160
161         b->t = kvmalloc(sizeof(*b->t) + sizeof(b->t->d[0]) * INITIAL_TABLE_SIZE, GFP_KERNEL);
162         if (!b->t)
163                 return -ENOMEM;
164
165         bucket_table_init(b->t, INITIAL_TABLE_SIZE);
166         return 0;
167 }