1 #ifndef _BCACHE_STR_HASH_H
2 #define _BCACHE_STR_HASH_H
4 #include "btree_iter.h"
5 #include "btree_update.h"
12 #include <linux/crc32c.h>
13 #include <crypto/hash.h>
15 struct bch_hash_info {
19 SIPHASH_KEY siphash_key;
23 static inline struct bch_hash_info
24 bch2_hash_info_init(struct bch_fs *c,
25 const struct bch_inode_unpacked *bi)
28 struct bch_hash_info info = {
29 .type = (bi->i_flags >> INODE_STR_HASH_OFFSET) &
30 ~(~0U << INODE_STR_HASH_BITS)
34 case BCH_STR_HASH_CRC32C:
35 case BCH_STR_HASH_CRC64:
36 info.crc_key = bi->i_hash_seed;
38 case BCH_STR_HASH_SIPHASH: {
39 SHASH_DESC_ON_STACK(desc, c->sha256);
40 u8 digest[crypto_shash_digestsize(c->sha256)];
42 desc->tfm = c->sha256;
45 crypto_shash_digest(desc, (void *) &bi->i_hash_seed,
46 sizeof(bi->i_hash_seed), digest);
47 memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
57 struct bch_str_hash_ctx {
65 static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx,
66 const struct bch_hash_info *info)
69 case BCH_STR_HASH_CRC32C:
70 ctx->crc32c = crc32c(~0, &info->crc_key, sizeof(info->crc_key));
72 case BCH_STR_HASH_CRC64:
73 ctx->crc64 = bch2_crc64_update(~0, &info->crc_key, sizeof(info->crc_key));
75 case BCH_STR_HASH_SIPHASH:
76 SipHash24_Init(&ctx->siphash, &info->siphash_key);
83 static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx,
84 const struct bch_hash_info *info,
85 const void *data, size_t len)
88 case BCH_STR_HASH_CRC32C:
89 ctx->crc32c = crc32c(ctx->crc32c, data, len);
91 case BCH_STR_HASH_CRC64:
92 ctx->crc64 = bch2_crc64_update(ctx->crc64, data, len);
94 case BCH_STR_HASH_SIPHASH:
95 SipHash24_Update(&ctx->siphash, data, len);
102 static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx,
103 const struct bch_hash_info *info)
105 switch (info->type) {
106 case BCH_STR_HASH_CRC32C:
108 case BCH_STR_HASH_CRC64:
109 return ctx->crc64 >> 1;
110 case BCH_STR_HASH_SIPHASH:
111 return SipHash24_End(&ctx->siphash) >> 1;
117 struct bch_hash_desc {
118 enum btree_id btree_id;
122 u64 (*hash_key)(const struct bch_hash_info *, const void *);
123 u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c);
124 bool (*cmp_key)(struct bkey_s_c, const void *);
125 bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c);
128 static inline struct bkey_s_c
129 bch2_hash_lookup_at(const struct bch_hash_desc desc,
130 const struct bch_hash_info *info,
131 struct btree_iter *iter, const void *search)
133 u64 inode = iter->pos.inode;
136 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
138 if (btree_iter_err(k))
141 if (k.k->type == desc.key_type) {
142 if (!desc.cmp_key(k, search))
144 } else if (k.k->type == desc.whiteout_type) {
147 /* hole, not found */
151 bch2_btree_iter_advance_pos(iter);
152 } while (iter->pos.inode == inode);
154 return bkey_s_c_err(-ENOENT);
157 static inline struct bkey_s_c
158 bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc,
159 const struct bch_hash_info *info,
160 struct btree_iter *iter, struct bkey_s_c search)
162 u64 inode = iter->pos.inode;
165 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
167 if (btree_iter_err(k))
170 if (k.k->type == desc.key_type) {
171 if (!desc.cmp_bkey(k, search))
173 } else if (k.k->type == desc.whiteout_type) {
176 /* hole, not found */
180 bch2_btree_iter_advance_pos(iter);
181 } while (iter->pos.inode == inode);
183 return bkey_s_c_err(-ENOENT);
186 static inline struct bkey_s_c
187 bch2_hash_lookup(const struct bch_hash_desc desc,
188 const struct bch_hash_info *info,
189 struct bch_fs *c, u64 inode,
190 struct btree_iter *iter, const void *key)
192 bch2_btree_iter_init(iter, c, desc.btree_id,
193 POS(inode, desc.hash_key(info, key)), 0);
195 return bch2_hash_lookup_at(desc, info, iter, key);
198 static inline struct bkey_s_c
199 bch2_hash_lookup_intent(const struct bch_hash_desc desc,
200 const struct bch_hash_info *info,
201 struct bch_fs *c, u64 inode,
202 struct btree_iter *iter, const void *key)
204 bch2_btree_iter_init(iter, c, desc.btree_id,
205 POS(inode, desc.hash_key(info, key)),
208 return bch2_hash_lookup_at(desc, info, iter, key);
211 static inline struct bkey_s_c
212 bch2_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
215 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
217 if (btree_iter_err(k))
220 if (k.k->type != desc.key_type)
223 /* hash collision, keep going */
224 bch2_btree_iter_advance_pos(iter);
225 if (iter->pos.inode != k.k->p.inode)
226 return bkey_s_c_err(-ENOENT);
230 static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc,
231 const struct bch_hash_info *info,
232 struct bch_fs *c, u64 inode,
233 struct btree_iter *iter,
236 bch2_btree_iter_init(iter, c, desc.btree_id,
237 POS(inode, desc.hash_key(info, key)),
240 return bch2_hash_hole_at(desc, iter);
243 static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc,
244 const struct bch_hash_info *info,
245 struct btree_iter *iter,
246 struct btree_iter *start)
248 bch2_btree_iter_set_pos(iter,
249 btree_type_successor(start->btree_id, start->pos));
252 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
253 int ret = btree_iter_err(k);
258 if (k.k->type != desc.key_type &&
259 k.k->type != desc.whiteout_type)
262 if (k.k->type == desc.key_type &&
263 desc.hash_bkey(info, k) <= start->pos.offset)
266 bch2_btree_iter_advance_pos(iter);
270 #define BCH_HASH_SET_MUST_CREATE (1 << 4)
271 #define BCH_HASH_SET_MUST_REPLACE (1 << 5)
273 static inline int bch2_hash_set(const struct bch_hash_desc desc,
274 const struct bch_hash_info *info,
275 struct bch_fs *c, u64 inode,
277 struct bkey_i *insert, int flags)
279 struct btree_iter iter, hashed_slot;
283 bch2_btree_iter_init(&hashed_slot, c, desc.btree_id,
284 POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
286 bch2_btree_iter_init(&iter, c, desc.btree_id, hashed_slot.pos,
288 bch2_btree_iter_link(&hashed_slot, &iter);
291 * On hash collision, we have to keep the slot we hashed to locked while
292 * we do the insert - to avoid racing with another thread deleting
293 * whatever's in the slot we hashed to:
295 ret = bch2_btree_iter_traverse(&hashed_slot);
300 * On -EINTR/retry, we dropped locks - always restart from the slot we
303 bch2_btree_iter_copy(&iter, &hashed_slot);
305 k = bch2_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert));
307 ret = btree_iter_err(k);
308 if (ret == -ENOENT) {
309 if (flags & BCH_HASH_SET_MUST_REPLACE) {
315 * Not found, so we're now looking for any open
316 * slot - we might have skipped over a whiteout
317 * that we could have used, so restart from the
320 bch2_btree_iter_copy(&iter, &hashed_slot);
321 k = bch2_hash_hole_at(desc, &iter);
322 if ((ret = btree_iter_err(k)))
325 if (flags & BCH_HASH_SET_MUST_CREATE) {
333 insert->k.p = iter.pos;
334 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
335 BTREE_INSERT_ATOMIC|flags,
336 BTREE_INSERT_ENTRY(&iter, insert));
342 * On successful insert, we don't want to clobber ret with error from
345 bch2_btree_iter_unlock(&iter);
346 bch2_btree_iter_unlock(&hashed_slot);
350 static inline int bch2_hash_delete_at(const struct bch_hash_desc desc,
351 const struct bch_hash_info *info,
352 struct btree_iter *iter,
355 struct btree_iter whiteout_iter;
356 struct bkey_i delete;
359 bch2_btree_iter_init(&whiteout_iter, iter->c, desc.btree_id,
361 bch2_btree_iter_link(iter, &whiteout_iter);
363 ret = bch2_hash_needs_whiteout(desc, info, &whiteout_iter, iter);
367 bkey_init(&delete.k);
368 delete.k.p = iter->pos;
369 delete.k.type = ret ? desc.whiteout_type : KEY_TYPE_DELETED;
371 ret = bch2_btree_insert_at(iter->c, NULL, NULL, journal_seq,
374 BTREE_INSERT_ENTRY(iter, &delete));
376 bch2_btree_iter_unlink(&whiteout_iter);
380 static inline int bch2_hash_delete(const struct bch_hash_desc desc,
381 const struct bch_hash_info *info,
382 struct bch_fs *c, u64 inode,
383 u64 *journal_seq, const void *key)
385 struct btree_iter iter, whiteout_iter;
389 bch2_btree_iter_init(&iter, c, desc.btree_id,
390 POS(inode, desc.hash_key(info, key)),
392 bch2_btree_iter_init(&whiteout_iter, c, desc.btree_id,
393 POS(inode, desc.hash_key(info, key)), 0);
394 bch2_btree_iter_link(&iter, &whiteout_iter);
396 k = bch2_hash_lookup_at(desc, info, &iter, key);
397 if ((ret = btree_iter_err(k)))
400 ret = bch2_hash_delete_at(desc, info, &iter, journal_seq);
405 bch2_btree_iter_unlock(&whiteout_iter);
406 bch2_btree_iter_unlock(&iter);
410 #endif /* _BCACHE_STR_HASH_H */