1 #ifndef _BCACHE_STR_HASH_H
2 #define _BCACHE_STR_HASH_H
4 #include "btree_iter.h"
10 #include <linux/crc32c.h>
11 #include <crypto/hash.h>
13 struct bch_hash_info {
17 SIPHASH_KEY siphash_key;
21 static inline struct bch_hash_info
22 bch2_hash_info_init(struct bch_fs *c,
23 const struct bch_inode_unpacked *bi)
26 struct bch_hash_info info = {
27 .type = (bi->i_flags >> INODE_STR_HASH_OFFSET) &
28 ~(~0 << INODE_STR_HASH_BITS)
32 case BCH_STR_HASH_CRC32C:
33 case BCH_STR_HASH_CRC64:
34 info.crc_key = bi->i_hash_seed;
36 case BCH_STR_HASH_SIPHASH: {
37 SHASH_DESC_ON_STACK(desc, c->sha256);
38 u8 digest[crypto_shash_digestsize(c->sha256)];
40 desc->tfm = c->sha256;
43 crypto_shash_digest(desc, (void *) &bi->i_hash_seed,
44 sizeof(bi->i_hash_seed), digest);
45 memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
55 struct bch_str_hash_ctx {
63 static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx,
64 const struct bch_hash_info *info)
67 case BCH_STR_HASH_CRC32C:
68 ctx->crc32c = crc32c(~0, &info->crc_key, sizeof(info->crc_key));
70 case BCH_STR_HASH_CRC64:
71 ctx->crc64 = bch2_crc64_update(~0, &info->crc_key, sizeof(info->crc_key));
73 case BCH_STR_HASH_SIPHASH:
74 SipHash24_Init(&ctx->siphash, &info->siphash_key);
81 static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx,
82 const struct bch_hash_info *info,
83 const void *data, size_t len)
86 case BCH_STR_HASH_CRC32C:
87 ctx->crc32c = crc32c(ctx->crc32c, data, len);
89 case BCH_STR_HASH_CRC64:
90 ctx->crc64 = bch2_crc64_update(ctx->crc64, data, len);
92 case BCH_STR_HASH_SIPHASH:
93 SipHash24_Update(&ctx->siphash, data, len);
100 static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx,
101 const struct bch_hash_info *info)
103 switch (info->type) {
104 case BCH_STR_HASH_CRC32C:
106 case BCH_STR_HASH_CRC64:
107 return ctx->crc64 >> 1;
108 case BCH_STR_HASH_SIPHASH:
109 return SipHash24_End(&ctx->siphash) >> 1;
115 struct bch_hash_desc {
116 enum btree_id btree_id;
120 u64 (*hash_key)(const struct bch_hash_info *, const void *);
121 u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c);
122 bool (*cmp_key)(struct bkey_s_c, const void *);
123 bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c);
126 static inline struct bkey_s_c
127 bch2_hash_lookup_at(const struct bch_hash_desc desc,
128 const struct bch_hash_info *info,
129 struct btree_iter *iter, const void *search)
131 u64 inode = iter->pos.inode;
134 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
136 if (btree_iter_err(k))
139 if (k.k->type == desc.key_type) {
140 if (!desc.cmp_key(k, search))
142 } else if (k.k->type == desc.whiteout_type) {
145 /* hole, not found */
149 bch2_btree_iter_advance_pos(iter);
150 } while (iter->pos.inode == inode);
152 return bkey_s_c_err(-ENOENT);
155 static inline struct bkey_s_c
156 bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc,
157 const struct bch_hash_info *info,
158 struct btree_iter *iter, struct bkey_s_c search)
160 u64 inode = iter->pos.inode;
163 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
165 if (btree_iter_err(k))
168 if (k.k->type == desc.key_type) {
169 if (!desc.cmp_bkey(k, search))
171 } else if (k.k->type == desc.whiteout_type) {
174 /* hole, not found */
178 bch2_btree_iter_advance_pos(iter);
179 } while (iter->pos.inode == inode);
181 return bkey_s_c_err(-ENOENT);
184 static inline struct bkey_s_c
185 bch2_hash_lookup(const struct bch_hash_desc desc,
186 const struct bch_hash_info *info,
187 struct bch_fs *c, u64 inode,
188 struct btree_iter *iter, const void *key)
190 bch2_btree_iter_init(iter, c, desc.btree_id,
191 POS(inode, desc.hash_key(info, key)));
193 return bch2_hash_lookup_at(desc, info, iter, key);
196 static inline struct bkey_s_c
197 bch2_hash_lookup_intent(const struct bch_hash_desc desc,
198 const struct bch_hash_info *info,
199 struct bch_fs *c, u64 inode,
200 struct btree_iter *iter, const void *key)
202 bch2_btree_iter_init_intent(iter, c, desc.btree_id,
203 POS(inode, desc.hash_key(info, key)));
205 return bch2_hash_lookup_at(desc, info, iter, key);
208 static inline struct bkey_s_c
209 bch2_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
212 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
214 if (btree_iter_err(k))
217 if (k.k->type != desc.key_type)
220 /* hash collision, keep going */
221 bch2_btree_iter_advance_pos(iter);
222 if (iter->pos.inode != k.k->p.inode)
223 return bkey_s_c_err(-ENOENT);
227 static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc,
228 const struct bch_hash_info *info,
229 struct bch_fs *c, u64 inode,
230 struct btree_iter *iter,
233 bch2_btree_iter_init_intent(iter, c, desc.btree_id,
234 POS(inode, desc.hash_key(info, key)));
236 return bch2_hash_hole_at(desc, iter);
239 static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc,
240 const struct bch_hash_info *info,
241 struct btree_iter *iter,
242 struct btree_iter *start)
244 bch2_btree_iter_set_pos(iter,
245 btree_type_successor(start->btree_id, start->pos));
248 struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
249 int ret = btree_iter_err(k);
254 if (k.k->type != desc.key_type &&
255 k.k->type != desc.whiteout_type)
258 if (k.k->type == desc.key_type &&
259 desc.hash_bkey(info, k) <= start->pos.offset)
262 bch2_btree_iter_advance_pos(iter);
266 #define BCH_HASH_SET_MUST_CREATE 1
267 #define BCH_HASH_SET_MUST_REPLACE 2
269 static inline int bch2_hash_set(const struct bch_hash_desc desc,
270 const struct bch_hash_info *info,
271 struct bch_fs *c, u64 inode,
273 struct bkey_i *insert, int flags)
275 struct btree_iter iter, hashed_slot;
279 bch2_btree_iter_init_intent(&hashed_slot, c, desc.btree_id,
280 POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))));
281 bch2_btree_iter_init_intent(&iter, c, desc.btree_id, hashed_slot.pos);
282 bch2_btree_iter_link(&hashed_slot, &iter);
285 * On hash collision, we have to keep the slot we hashed to locked while
286 * we do the insert - to avoid racing with another thread deleting
287 * whatever's in the slot we hashed to:
289 ret = bch2_btree_iter_traverse(&hashed_slot);
294 * On -EINTR/retry, we dropped locks - always restart from the slot we
297 bch2_btree_iter_copy(&iter, &hashed_slot);
299 k = bch2_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert));
301 ret = btree_iter_err(k);
302 if (ret == -ENOENT) {
303 if (flags & BCH_HASH_SET_MUST_REPLACE) {
309 * Not found, so we're now looking for any open
310 * slot - we might have skipped over a whiteout
311 * that we could have used, so restart from the
314 bch2_btree_iter_copy(&iter, &hashed_slot);
315 k = bch2_hash_hole_at(desc, &iter);
316 if ((ret = btree_iter_err(k)))
319 if (flags & BCH_HASH_SET_MUST_CREATE) {
327 insert->k.p = iter.pos;
328 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
330 BTREE_INSERT_ENTRY(&iter, insert));
336 * On successful insert, we don't want to clobber ret with error from
339 bch2_btree_iter_unlock(&iter);
340 bch2_btree_iter_unlock(&hashed_slot);
344 static inline int bch2_hash_delete(const struct bch_hash_desc desc,
345 const struct bch_hash_info *info,
346 struct bch_fs *c, u64 inode,
347 u64 *journal_seq, const void *key)
349 struct btree_iter iter, whiteout_iter;
351 struct bkey_i delete;
354 bch2_btree_iter_init_intent(&iter, c, desc.btree_id,
355 POS(inode, desc.hash_key(info, key)));
356 bch2_btree_iter_init(&whiteout_iter, c, desc.btree_id,
357 POS(inode, desc.hash_key(info, key)));
358 bch2_btree_iter_link(&iter, &whiteout_iter);
360 k = bch2_hash_lookup_at(desc, info, &iter, key);
361 if ((ret = btree_iter_err(k)))
364 ret = bch2_hash_needs_whiteout(desc, info, &whiteout_iter, &iter);
368 bkey_init(&delete.k);
370 delete.k.type = ret ? desc.whiteout_type : KEY_TYPE_DELETED;
372 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
375 BTREE_INSERT_ENTRY(&iter, &delete));
380 bch2_btree_iter_unlock(&whiteout_iter);
381 bch2_btree_iter_unlock(&iter);
385 #endif /* _BCACHE_STR_HASH_H */