1 #ifndef _BCACHEFS_STR_HASH_H
2 #define _BCACHEFS_STR_HASH_H
4 #include "btree_iter.h"
5 #include "btree_update.h"
12 #include <linux/crc32c.h>
13 #include <crypto/hash.h>
15 struct bch_hash_info {
19 SIPHASH_KEY siphash_key;
23 static inline struct bch_hash_info
24 bch2_hash_info_init(struct bch_fs *c,
25 const struct bch_inode_unpacked *bi)
28 struct bch_hash_info info = {
29 .type = (bi->bi_flags >> INODE_STR_HASH_OFFSET) &
30 ~(~0U << INODE_STR_HASH_BITS)
34 case BCH_STR_HASH_CRC32C:
35 case BCH_STR_HASH_CRC64:
36 info.crc_key = bi->bi_hash_seed;
38 case BCH_STR_HASH_SIPHASH: {
39 SHASH_DESC_ON_STACK(desc, c->sha256);
40 u8 digest[crypto_shash_digestsize(c->sha256)];
42 desc->tfm = c->sha256;
45 crypto_shash_digest(desc, (void *) &bi->bi_hash_seed,
46 sizeof(bi->bi_hash_seed), digest);
47 memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
57 struct bch_str_hash_ctx {
65 static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx,
66 const struct bch_hash_info *info)
69 case BCH_STR_HASH_CRC32C:
70 ctx->crc32c = crc32c(~0, &info->crc_key, sizeof(info->crc_key));
72 case BCH_STR_HASH_CRC64:
73 ctx->crc64 = bch2_crc64_update(~0, &info->crc_key, sizeof(info->crc_key));
75 case BCH_STR_HASH_SIPHASH:
76 SipHash24_Init(&ctx->siphash, &info->siphash_key);
83 static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx,
84 const struct bch_hash_info *info,
85 const void *data, size_t len)
88 case BCH_STR_HASH_CRC32C:
89 ctx->crc32c = crc32c(ctx->crc32c, data, len);
91 case BCH_STR_HASH_CRC64:
92 ctx->crc64 = bch2_crc64_update(ctx->crc64, data, len);
94 case BCH_STR_HASH_SIPHASH:
95 SipHash24_Update(&ctx->siphash, data, len);
102 static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx,
103 const struct bch_hash_info *info)
105 switch (info->type) {
106 case BCH_STR_HASH_CRC32C:
108 case BCH_STR_HASH_CRC64:
109 return ctx->crc64 >> 1;
110 case BCH_STR_HASH_SIPHASH:
111 return SipHash24_End(&ctx->siphash) >> 1;
117 struct bch_hash_desc {
118 enum btree_id btree_id;
122 u64 (*hash_key)(const struct bch_hash_info *, const void *);
123 u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c);
124 bool (*cmp_key)(struct bkey_s_c, const void *);
125 bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c);
128 static inline struct bkey_s_c
129 bch2_hash_lookup_at(const struct bch_hash_desc desc,
130 const struct bch_hash_info *info,
131 struct btree_iter *iter, const void *search)
133 u64 inode = iter->pos.inode;
136 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
137 if (iter->pos.inode != inode)
140 if (k.k->type == desc.key_type) {
141 if (!desc.cmp_key(k, search))
143 } else if (k.k->type == desc.whiteout_type) {
146 /* hole, not found */
150 return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
153 static inline struct bkey_s_c
154 bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc,
155 const struct bch_hash_info *info,
156 struct btree_iter *iter, struct bkey_s_c search)
158 u64 inode = iter->pos.inode;
161 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
162 if (iter->pos.inode != inode)
165 if (k.k->type == desc.key_type) {
166 if (!desc.cmp_bkey(k, search))
168 } else if (k.k->type == desc.whiteout_type) {
171 /* hole, not found */
175 return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
178 static inline struct bkey_s_c
179 bch2_hash_lookup(const struct bch_hash_desc desc,
180 const struct bch_hash_info *info,
181 struct bch_fs *c, u64 inode,
182 struct btree_iter *iter, const void *key)
184 bch2_btree_iter_init(iter, c, desc.btree_id,
185 POS(inode, desc.hash_key(info, key)),
188 return bch2_hash_lookup_at(desc, info, iter, key);
191 static inline struct bkey_s_c
192 bch2_hash_lookup_intent(const struct bch_hash_desc desc,
193 const struct bch_hash_info *info,
194 struct bch_fs *c, u64 inode,
195 struct btree_iter *iter, const void *key)
197 bch2_btree_iter_init(iter, c, desc.btree_id,
198 POS(inode, desc.hash_key(info, key)),
199 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
201 return bch2_hash_lookup_at(desc, info, iter, key);
204 static inline struct bkey_s_c
205 bch2_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
207 u64 inode = iter->pos.inode;
210 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
211 if (iter->pos.inode != inode)
214 if (k.k->type != desc.key_type)
217 return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
220 static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc,
221 const struct bch_hash_info *info,
222 struct bch_fs *c, u64 inode,
223 struct btree_iter *iter,
226 bch2_btree_iter_init(iter, c, desc.btree_id,
227 POS(inode, desc.hash_key(info, key)),
228 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
230 return bch2_hash_hole_at(desc, iter);
233 static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc,
234 const struct bch_hash_info *info,
235 struct btree_iter *iter,
236 struct btree_iter *start)
240 bch2_btree_iter_copy(iter, start);
241 bch2_btree_iter_next_slot(iter);
243 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
244 if (k.k->type != desc.key_type &&
245 k.k->type != desc.whiteout_type)
248 if (k.k->type == desc.key_type &&
249 desc.hash_bkey(info, k) <= start->pos.offset)
252 return btree_iter_err(k);
255 static inline int bch2_hash_set(const struct bch_hash_desc desc,
256 const struct bch_hash_info *info,
257 struct bch_fs *c, u64 inode,
259 struct bkey_i *insert, int flags)
261 struct btree_iter iter, hashed_slot;
265 bch2_btree_iter_init(&hashed_slot, c, desc.btree_id,
266 POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
267 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
268 bch2_btree_iter_init(&iter, c, desc.btree_id, hashed_slot.pos,
269 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
270 bch2_btree_iter_link(&hashed_slot, &iter);
273 * On hash collision, we have to keep the slot we hashed to locked while
274 * we do the insert - to avoid racing with another thread deleting
275 * whatever's in the slot we hashed to:
277 ret = bch2_btree_iter_traverse(&hashed_slot);
282 * On -EINTR/retry, we dropped locks - always restart from the slot we
285 bch2_btree_iter_copy(&iter, &hashed_slot);
287 k = bch2_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert));
289 ret = btree_iter_err(k);
290 if (ret == -ENOENT) {
291 if (flags & BCH_HASH_SET_MUST_REPLACE) {
297 * Not found, so we're now looking for any open
298 * slot - we might have skipped over a whiteout
299 * that we could have used, so restart from the
302 bch2_btree_iter_copy(&iter, &hashed_slot);
303 k = bch2_hash_hole_at(desc, &iter);
304 if ((ret = btree_iter_err(k)))
307 if (flags & BCH_HASH_SET_MUST_CREATE) {
315 insert->k.p = iter.pos;
316 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
317 BTREE_INSERT_ATOMIC|flags,
318 BTREE_INSERT_ENTRY(&iter, insert));
324 * On successful insert, we don't want to clobber ret with error from
327 bch2_btree_iter_unlock(&iter);
328 bch2_btree_iter_unlock(&hashed_slot);
332 static inline int bch2_hash_delete_at(const struct bch_hash_desc desc,
333 const struct bch_hash_info *info,
334 struct btree_iter *iter,
337 struct btree_iter whiteout_iter;
338 struct bkey_i delete;
341 bch2_btree_iter_init(&whiteout_iter, iter->c, desc.btree_id,
342 iter->pos, BTREE_ITER_SLOTS);
343 bch2_btree_iter_link(iter, &whiteout_iter);
345 ret = bch2_hash_needs_whiteout(desc, info, &whiteout_iter, iter);
349 bkey_init(&delete.k);
350 delete.k.p = iter->pos;
351 delete.k.type = ret ? desc.whiteout_type : KEY_TYPE_DELETED;
353 ret = bch2_btree_insert_at(iter->c, NULL, NULL, journal_seq,
356 BTREE_INSERT_ENTRY(iter, &delete));
358 bch2_btree_iter_unlink(&whiteout_iter);
362 static inline int bch2_hash_delete(const struct bch_hash_desc desc,
363 const struct bch_hash_info *info,
364 struct bch_fs *c, u64 inode,
365 u64 *journal_seq, const void *key)
367 struct btree_iter iter, whiteout_iter;
371 bch2_btree_iter_init(&iter, c, desc.btree_id,
372 POS(inode, desc.hash_key(info, key)),
373 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
374 bch2_btree_iter_init(&whiteout_iter, c, desc.btree_id,
375 POS(inode, desc.hash_key(info, key)),
377 bch2_btree_iter_link(&iter, &whiteout_iter);
379 k = bch2_hash_lookup_at(desc, info, &iter, key);
380 if ((ret = btree_iter_err(k)))
383 ret = bch2_hash_delete_at(desc, info, &iter, journal_seq);
388 bch2_btree_iter_unlock(&whiteout_iter);
389 bch2_btree_iter_unlock(&iter);
393 #endif /* _BCACHEFS_STR_HASH_H */