]> git.sesse.net Git - bcachefs-tools-debian/blob - c_src/libbcachefs/buckets_waiting_for_journal.c
ec1b636ef78d075d1c2b6a9dd2b610a5ba8f274c
[bcachefs-tools-debian] / c_src / libbcachefs / buckets_waiting_for_journal.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "buckets_waiting_for_journal.h"
5 #include <linux/hash.h>
6 #include <linux/random.h>
7
8 static inline struct bucket_hashed *
9 bucket_hash(struct buckets_waiting_for_journal_table *t,
10             unsigned hash_seed_idx, u64 dev_bucket)
11 {
12         return t->d + hash_64(dev_bucket ^ t->hash_seeds[hash_seed_idx], t->bits);
13 }
14
15 static void bucket_table_init(struct buckets_waiting_for_journal_table *t, size_t bits)
16 {
17         unsigned i;
18
19         t->bits = bits;
20         for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++)
21                 get_random_bytes(&t->hash_seeds[i], sizeof(t->hash_seeds[i]));
22         memset(t->d, 0, sizeof(t->d[0]) << t->bits);
23 }
24
25 bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
26                                       u64 flushed_seq,
27                                       unsigned dev, u64 bucket)
28 {
29         struct buckets_waiting_for_journal_table *t;
30         u64 dev_bucket = (u64) dev << 56 | bucket;
31         bool ret = false;
32         unsigned i;
33
34         mutex_lock(&b->lock);
35         t = b->t;
36
37         for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
38                 struct bucket_hashed *h = bucket_hash(t, i, dev_bucket);
39
40                 if (h->dev_bucket == dev_bucket) {
41                         ret = h->journal_seq > flushed_seq;
42                         break;
43                 }
44         }
45
46         mutex_unlock(&b->lock);
47
48         return ret;
49 }
50
51 static bool bucket_table_insert(struct buckets_waiting_for_journal_table *t,
52                                 struct bucket_hashed *new,
53                                 u64 flushed_seq)
54 {
55         struct bucket_hashed *last_evicted = NULL;
56         unsigned tries, i;
57
58         for (tries = 0; tries < 10; tries++) {
59                 struct bucket_hashed *old, *victim = NULL;
60
61                 for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
62                         old = bucket_hash(t, i, new->dev_bucket);
63
64                         if (old->dev_bucket == new->dev_bucket ||
65                             old->journal_seq <= flushed_seq) {
66                                 *old = *new;
67                                 return true;
68                         }
69
70                         if (last_evicted != old)
71                                 victim = old;
72                 }
73
74                 /* hashed to same slot 3 times: */
75                 if (!victim)
76                         break;
77
78                 /* Failed to find an empty slot: */
79                 swap(*new, *victim);
80                 last_evicted = victim;
81         }
82
83         return false;
84 }
85
86 int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
87                                          u64 flushed_seq,
88                                          unsigned dev, u64 bucket,
89                                          u64 journal_seq)
90 {
91         struct buckets_waiting_for_journal_table *t, *n;
92         struct bucket_hashed tmp, new = {
93                 .dev_bucket     = (u64) dev << 56 | bucket,
94                 .journal_seq    = journal_seq,
95         };
96         size_t i, size, new_bits, nr_elements = 1, nr_rehashes = 0;
97         int ret = 0;
98
99         mutex_lock(&b->lock);
100
101         if (likely(bucket_table_insert(b->t, &new, flushed_seq)))
102                 goto out;
103
104         t = b->t;
105         size = 1UL << t->bits;
106         for (i = 0; i < size; i++)
107                 nr_elements += t->d[i].journal_seq > flushed_seq;
108
109         new_bits = t->bits + (nr_elements * 3 > size);
110
111         n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL);
112         if (!n) {
113                 ret = -BCH_ERR_ENOMEM_buckets_waiting_for_journal_set;
114                 goto out;
115         }
116
117 retry_rehash:
118         nr_rehashes++;
119         bucket_table_init(n, new_bits);
120
121         tmp = new;
122         BUG_ON(!bucket_table_insert(n, &tmp, flushed_seq));
123
124         for (i = 0; i < 1UL << t->bits; i++) {
125                 if (t->d[i].journal_seq <= flushed_seq)
126                         continue;
127
128                 tmp = t->d[i];
129                 if (!bucket_table_insert(n, &tmp, flushed_seq))
130                         goto retry_rehash;
131         }
132
133         b->t = n;
134         kvfree(t);
135
136         pr_debug("took %zu rehashes, table at %zu/%lu elements",
137                  nr_rehashes, nr_elements, 1UL << b->t->bits);
138 out:
139         mutex_unlock(&b->lock);
140
141         return ret;
142 }
143
144 void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *c)
145 {
146         struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
147
148         kvfree(b->t);
149 }
150
151 #define INITIAL_TABLE_BITS              3
152
153 int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *c)
154 {
155         struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
156
157         mutex_init(&b->lock);
158
159         b->t = kvmalloc(sizeof(*b->t) +
160                         (sizeof(b->t->d[0]) << INITIAL_TABLE_BITS), GFP_KERNEL);
161         if (!b->t)
162                 return -BCH_ERR_ENOMEM_buckets_waiting_for_journal_init;
163
164         bucket_table_init(b->t, INITIAL_TABLE_BITS);
165         return 0;
166 }