1 #ifndef _BCACHEFS_STR_HASH_H
2 #define _BCACHEFS_STR_HASH_H
4 #include "btree_iter.h"
5 #include "btree_update.h"
12 #include <linux/crc32c.h>
13 #include <crypto/hash.h>
15 struct bch_hash_info {
19 SIPHASH_KEY siphash_key;
23 static inline struct bch_hash_info
24 bch2_hash_info_init(struct bch_fs *c,
25 const struct bch_inode_unpacked *bi)
28 struct bch_hash_info info = {
29 .type = (bi->bi_flags >> INODE_STR_HASH_OFFSET) &
30 ~(~0U << INODE_STR_HASH_BITS)
34 case BCH_STR_HASH_CRC32C:
35 case BCH_STR_HASH_CRC64:
36 info.crc_key = bi->bi_hash_seed;
38 case BCH_STR_HASH_SIPHASH: {
39 SHASH_DESC_ON_STACK(desc, c->sha256);
40 u8 digest[crypto_shash_digestsize(c->sha256)];
42 desc->tfm = c->sha256;
45 crypto_shash_digest(desc, (void *) &bi->bi_hash_seed,
46 sizeof(bi->bi_hash_seed), digest);
47 memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
57 struct bch_str_hash_ctx {
65 static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx,
66 const struct bch_hash_info *info)
69 case BCH_STR_HASH_CRC32C:
70 ctx->crc32c = crc32c(~0, &info->crc_key, sizeof(info->crc_key));
72 case BCH_STR_HASH_CRC64:
73 ctx->crc64 = bch2_crc64_update(~0, &info->crc_key, sizeof(info->crc_key));
75 case BCH_STR_HASH_SIPHASH:
76 SipHash24_Init(&ctx->siphash, &info->siphash_key);
83 static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx,
84 const struct bch_hash_info *info,
85 const void *data, size_t len)
88 case BCH_STR_HASH_CRC32C:
89 ctx->crc32c = crc32c(ctx->crc32c, data, len);
91 case BCH_STR_HASH_CRC64:
92 ctx->crc64 = bch2_crc64_update(ctx->crc64, data, len);
94 case BCH_STR_HASH_SIPHASH:
95 SipHash24_Update(&ctx->siphash, data, len);
102 static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx,
103 const struct bch_hash_info *info)
105 switch (info->type) {
106 case BCH_STR_HASH_CRC32C:
108 case BCH_STR_HASH_CRC64:
109 return ctx->crc64 >> 1;
110 case BCH_STR_HASH_SIPHASH:
111 return SipHash24_End(&ctx->siphash) >> 1;
117 struct bch_hash_desc {
118 enum btree_id btree_id;
122 u64 (*hash_key)(const struct bch_hash_info *, const void *);
123 u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c);
124 bool (*cmp_key)(struct bkey_s_c, const void *);
125 bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c);
128 static inline struct bkey_s_c
129 bch2_hash_lookup_at(const struct bch_hash_desc desc,
130 const struct bch_hash_info *info,
131 struct btree_iter *iter, const void *search)
133 u64 inode = iter->pos.inode;
136 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
137 if (iter->pos.inode != inode)
140 if (k.k->type == desc.key_type) {
141 if (!desc.cmp_key(k, search))
143 } else if (k.k->type == desc.whiteout_type) {
146 /* hole, not found */
150 return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
153 static inline struct bkey_s_c
154 bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc,
155 const struct bch_hash_info *info,
156 struct btree_iter *iter, struct bkey_s_c search)
158 u64 inode = iter->pos.inode;
161 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
162 if (iter->pos.inode != inode)
165 if (k.k->type == desc.key_type) {
166 if (!desc.cmp_bkey(k, search))
168 } else if (k.k->type == desc.whiteout_type) {
171 /* hole, not found */
175 return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
178 static inline struct bkey_s_c
179 bch2_hash_lookup(const struct bch_hash_desc desc,
180 const struct bch_hash_info *info,
181 struct bch_fs *c, u64 inode,
182 struct btree_iter *iter, const void *key)
184 bch2_btree_iter_init(iter, c, desc.btree_id,
185 POS(inode, desc.hash_key(info, key)),
188 return bch2_hash_lookup_at(desc, info, iter, key);
191 static inline struct bkey_s_c
192 bch2_hash_lookup_intent(const struct bch_hash_desc desc,
193 const struct bch_hash_info *info,
194 struct bch_fs *c, u64 inode,
195 struct btree_iter *iter, const void *key)
197 bch2_btree_iter_init(iter, c, desc.btree_id,
198 POS(inode, desc.hash_key(info, key)),
199 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
201 return bch2_hash_lookup_at(desc, info, iter, key);
204 static inline struct bkey_s_c
205 bch2_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
207 u64 inode = iter->pos.inode;
210 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
211 if (iter->pos.inode != inode)
214 if (k.k->type != desc.key_type)
217 return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
220 static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc,
221 const struct bch_hash_info *info,
222 struct bch_fs *c, u64 inode,
223 struct btree_iter *iter,
226 bch2_btree_iter_init(iter, c, desc.btree_id,
227 POS(inode, desc.hash_key(info, key)),
228 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
230 return bch2_hash_hole_at(desc, iter);
233 static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc,
234 const struct bch_hash_info *info,
235 struct btree_iter *iter,
236 struct btree_iter *start)
240 bch2_btree_iter_next_slot(iter);
242 for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
243 if (k.k->type != desc.key_type &&
244 k.k->type != desc.whiteout_type)
247 if (k.k->type == desc.key_type &&
248 desc.hash_bkey(info, k) <= start->pos.offset)
251 return btree_iter_err(k);
254 static inline int bch2_hash_set(const struct bch_hash_desc desc,
255 const struct bch_hash_info *info,
256 struct bch_fs *c, u64 inode,
258 struct bkey_i *insert, int flags)
260 struct btree_iter iter, hashed_slot;
264 bch2_btree_iter_init(&hashed_slot, c, desc.btree_id,
265 POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
266 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
267 bch2_btree_iter_init(&iter, c, desc.btree_id, hashed_slot.pos,
268 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
269 bch2_btree_iter_link(&hashed_slot, &iter);
272 * On hash collision, we have to keep the slot we hashed to locked while
273 * we do the insert - to avoid racing with another thread deleting
274 * whatever's in the slot we hashed to:
276 ret = bch2_btree_iter_traverse(&hashed_slot);
281 * On -EINTR/retry, we dropped locks - always restart from the slot we
284 bch2_btree_iter_copy(&iter, &hashed_slot);
286 k = bch2_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert));
288 ret = btree_iter_err(k);
289 if (ret == -ENOENT) {
290 if (flags & BCH_HASH_SET_MUST_REPLACE) {
296 * Not found, so we're now looking for any open
297 * slot - we might have skipped over a whiteout
298 * that we could have used, so restart from the
301 bch2_btree_iter_copy(&iter, &hashed_slot);
302 k = bch2_hash_hole_at(desc, &iter);
303 if ((ret = btree_iter_err(k)))
306 if (flags & BCH_HASH_SET_MUST_CREATE) {
314 insert->k.p = iter.pos;
315 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
316 BTREE_INSERT_ATOMIC|flags,
317 BTREE_INSERT_ENTRY(&iter, insert));
323 * On successful insert, we don't want to clobber ret with error from
326 bch2_btree_iter_unlock(&iter);
327 bch2_btree_iter_unlock(&hashed_slot);
331 static inline int bch2_hash_delete_at(const struct bch_hash_desc desc,
332 const struct bch_hash_info *info,
333 struct btree_iter *iter,
336 struct btree_iter whiteout_iter;
337 struct bkey_i delete;
340 bch2_btree_iter_init(&whiteout_iter, iter->c, desc.btree_id,
341 iter->pos, BTREE_ITER_SLOTS);
342 bch2_btree_iter_link(iter, &whiteout_iter);
344 ret = bch2_hash_needs_whiteout(desc, info, &whiteout_iter, iter);
348 bkey_init(&delete.k);
349 delete.k.p = iter->pos;
350 delete.k.type = ret ? desc.whiteout_type : KEY_TYPE_DELETED;
352 ret = bch2_btree_insert_at(iter->c, NULL, NULL, journal_seq,
355 BTREE_INSERT_ENTRY(iter, &delete));
357 bch2_btree_iter_unlink(&whiteout_iter);
361 static inline int bch2_hash_delete(const struct bch_hash_desc desc,
362 const struct bch_hash_info *info,
363 struct bch_fs *c, u64 inode,
364 u64 *journal_seq, const void *key)
366 struct btree_iter iter, whiteout_iter;
370 bch2_btree_iter_init(&iter, c, desc.btree_id,
371 POS(inode, desc.hash_key(info, key)),
372 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
373 bch2_btree_iter_init(&whiteout_iter, c, desc.btree_id,
374 POS(inode, desc.hash_key(info, key)),
376 bch2_btree_iter_link(&iter, &whiteout_iter);
378 k = bch2_hash_lookup_at(desc, info, &iter, key);
379 if ((ret = btree_iter_err(k)))
382 ret = bch2_hash_delete_at(desc, info, &iter, journal_seq);
387 bch2_btree_iter_unlock(&whiteout_iter);
388 bch2_btree_iter_unlock(&iter);
392 #endif /* _BCACHEFS_STR_HASH_H */