1 #ifndef _BCACHE_STR_HASH_H
2 #define _BCACHE_STR_HASH_H
4 #include "btree_iter.h"
5 #include "btree_update.h"
12 #include <linux/crc32c.h>
13 #include <crypto/hash.h>
15 struct bch_hash_info {
19 SIPHASH_KEY siphash_key;
23 static inline struct bch_hash_info
24 bch2_hash_info_init(struct bch_fs *c,
25 const struct bch_inode_unpacked *bi)
28 struct bch_hash_info info = {
29 .type = (bi->i_flags >> INODE_STR_HASH_OFFSET) &
30 ~(~0U << INODE_STR_HASH_BITS)
34 case BCH_STR_HASH_CRC32C:
35 case BCH_STR_HASH_CRC64:
36 info.crc_key = bi->i_hash_seed;
38 case BCH_STR_HASH_SIPHASH: {
39 SHASH_DESC_ON_STACK(desc, c->sha256);
40 u8 digest[crypto_shash_digestsize(c->sha256)];
42 desc->tfm = c->sha256;
45 crypto_shash_digest(desc, (void *) &bi->i_hash_seed,
46 sizeof(bi->i_hash_seed), digest);
47 memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
57 struct bch_str_hash_ctx {
65 static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx,
66 const struct bch_hash_info *info)
69 case BCH_STR_HASH_CRC32C:
70 ctx->crc32c = crc32c(~0, &info->crc_key, sizeof(info->crc_key));
72 case BCH_STR_HASH_CRC64:
73 ctx->crc64 = bch2_crc64_update(~0, &info->crc_key, sizeof(info->crc_key));
75 case BCH_STR_HASH_SIPHASH:
76 SipHash24_Init(&ctx->siphash, &info->siphash_key);
83 static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx,
84 const struct bch_hash_info *info,
85 const void *data, size_t len)
88 case BCH_STR_HASH_CRC32C:
89 ctx->crc32c = crc32c(ctx->crc32c, data, len);
91 case BCH_STR_HASH_CRC64:
92 ctx->crc64 = bch2_crc64_update(ctx->crc64, data, len);
94 case BCH_STR_HASH_SIPHASH:
95 SipHash24_Update(&ctx->siphash, data, len);
102 static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx,
103 const struct bch_hash_info *info)
105 switch (info->type) {
106 case BCH_STR_HASH_CRC32C:
108 case BCH_STR_HASH_CRC64:
109 return ctx->crc64 >> 1;
110 case BCH_STR_HASH_SIPHASH:
111 return SipHash24_End(&ctx->siphash) >> 1;
117 struct bch_hash_desc {
118 enum btree_id btree_id;
122 u64 (*hash_key)(const struct bch_hash_info *, const void *);
123 u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c);
124 bool (*cmp_key)(struct bkey_s_c, const void *);
125 bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c);
128 static inline struct bkey_s_c
129 bch2_hash_lookup_at(const struct bch_hash_desc desc,
130 const struct bch_hash_info *info,
131 struct btree_iter *iter, const void *search)
133 u64 inode = iter->pos.inode;
136 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
138 if (btree_iter_err(k))
141 if (k.k->type == desc.key_type) {
142 if (!desc.cmp_key(k, search))
144 } else if (k.k->type == desc.whiteout_type) {
147 /* hole, not found */
151 bch2_btree_iter_advance_pos(iter);
152 } while (iter->pos.inode == inode);
154 return bkey_s_c_err(-ENOENT);
157 static inline struct bkey_s_c
158 bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc,
159 const struct bch_hash_info *info,
160 struct btree_iter *iter, struct bkey_s_c search)
162 u64 inode = iter->pos.inode;
165 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
167 if (btree_iter_err(k))
170 if (k.k->type == desc.key_type) {
171 if (!desc.cmp_bkey(k, search))
173 } else if (k.k->type == desc.whiteout_type) {
176 /* hole, not found */
180 bch2_btree_iter_advance_pos(iter);
181 } while (iter->pos.inode == inode);
183 return bkey_s_c_err(-ENOENT);
186 static inline struct bkey_s_c
187 bch2_hash_lookup(const struct bch_hash_desc desc,
188 const struct bch_hash_info *info,
189 struct bch_fs *c, u64 inode,
190 struct btree_iter *iter, const void *key)
192 bch2_btree_iter_init(iter, c, desc.btree_id,
193 POS(inode, desc.hash_key(info, key)));
195 return bch2_hash_lookup_at(desc, info, iter, key);
198 static inline struct bkey_s_c
199 bch2_hash_lookup_intent(const struct bch_hash_desc desc,
200 const struct bch_hash_info *info,
201 struct bch_fs *c, u64 inode,
202 struct btree_iter *iter, const void *key)
204 bch2_btree_iter_init_intent(iter, c, desc.btree_id,
205 POS(inode, desc.hash_key(info, key)));
207 return bch2_hash_lookup_at(desc, info, iter, key);
210 static inline struct bkey_s_c
211 bch2_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
214 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
216 if (btree_iter_err(k))
219 if (k.k->type != desc.key_type)
222 /* hash collision, keep going */
223 bch2_btree_iter_advance_pos(iter);
224 if (iter->pos.inode != k.k->p.inode)
225 return bkey_s_c_err(-ENOENT);
229 static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc,
230 const struct bch_hash_info *info,
231 struct bch_fs *c, u64 inode,
232 struct btree_iter *iter,
235 bch2_btree_iter_init_intent(iter, c, desc.btree_id,
236 POS(inode, desc.hash_key(info, key)));
238 return bch2_hash_hole_at(desc, iter);
241 static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc,
242 const struct bch_hash_info *info,
243 struct btree_iter *iter,
244 struct btree_iter *start)
246 bch2_btree_iter_set_pos(iter,
247 btree_type_successor(start->btree_id, start->pos));
250 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
251 int ret = btree_iter_err(k);
256 if (k.k->type != desc.key_type &&
257 k.k->type != desc.whiteout_type)
260 if (k.k->type == desc.key_type &&
261 desc.hash_bkey(info, k) <= start->pos.offset)
264 bch2_btree_iter_advance_pos(iter);
268 #define BCH_HASH_SET_MUST_CREATE 1
269 #define BCH_HASH_SET_MUST_REPLACE 2
271 static inline int bch2_hash_set(const struct bch_hash_desc desc,
272 const struct bch_hash_info *info,
273 struct bch_fs *c, u64 inode,
275 struct bkey_i *insert, int flags)
277 struct btree_iter iter, hashed_slot;
281 bch2_btree_iter_init_intent(&hashed_slot, c, desc.btree_id,
282 POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))));
283 bch2_btree_iter_init_intent(&iter, c, desc.btree_id, hashed_slot.pos);
284 bch2_btree_iter_link(&hashed_slot, &iter);
287 * On hash collision, we have to keep the slot we hashed to locked while
288 * we do the insert - to avoid racing with another thread deleting
289 * whatever's in the slot we hashed to:
291 ret = bch2_btree_iter_traverse(&hashed_slot);
296 * On -EINTR/retry, we dropped locks - always restart from the slot we
299 bch2_btree_iter_copy(&iter, &hashed_slot);
301 k = bch2_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert));
303 ret = btree_iter_err(k);
304 if (ret == -ENOENT) {
305 if (flags & BCH_HASH_SET_MUST_REPLACE) {
311 * Not found, so we're now looking for any open
312 * slot - we might have skipped over a whiteout
313 * that we could have used, so restart from the
316 bch2_btree_iter_copy(&iter, &hashed_slot);
317 k = bch2_hash_hole_at(desc, &iter);
318 if ((ret = btree_iter_err(k)))
321 if (flags & BCH_HASH_SET_MUST_CREATE) {
329 insert->k.p = iter.pos;
330 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
332 BTREE_INSERT_ENTRY(&iter, insert));
338 * On successful insert, we don't want to clobber ret with error from
341 bch2_btree_iter_unlock(&iter);
342 bch2_btree_iter_unlock(&hashed_slot);
346 static inline int bch2_hash_delete_at(const struct bch_hash_desc desc,
347 const struct bch_hash_info *info,
348 struct btree_iter *iter,
351 struct btree_iter whiteout_iter;
352 struct bkey_i delete;
355 bch2_btree_iter_init(&whiteout_iter, iter->c, desc.btree_id,
357 bch2_btree_iter_link(iter, &whiteout_iter);
359 ret = bch2_hash_needs_whiteout(desc, info, &whiteout_iter, iter);
363 bkey_init(&delete.k);
364 delete.k.p = iter->pos;
365 delete.k.type = ret ? desc.whiteout_type : KEY_TYPE_DELETED;
367 ret = bch2_btree_insert_at(iter->c, NULL, NULL, journal_seq,
370 BTREE_INSERT_ENTRY(iter, &delete));
372 bch2_btree_iter_unlink(&whiteout_iter);
376 static inline int bch2_hash_delete(const struct bch_hash_desc desc,
377 const struct bch_hash_info *info,
378 struct bch_fs *c, u64 inode,
379 u64 *journal_seq, const void *key)
381 struct btree_iter iter, whiteout_iter;
385 bch2_btree_iter_init_intent(&iter, c, desc.btree_id,
386 POS(inode, desc.hash_key(info, key)));
387 bch2_btree_iter_init(&whiteout_iter, c, desc.btree_id,
388 POS(inode, desc.hash_key(info, key)));
389 bch2_btree_iter_link(&iter, &whiteout_iter);
391 k = bch2_hash_lookup_at(desc, info, &iter, key);
392 if ((ret = btree_iter_err(k)))
395 ret = bch2_hash_delete_at(desc, info, &iter, journal_seq);
400 bch2_btree_iter_unlock(&whiteout_iter);
401 bch2_btree_iter_unlock(&iter);
405 #endif /* _BCACHE_STR_HASH_H */