#include <linux/crc32c.h>
#include <crypto/hash.h>
+#include <crypto/sha.h>
struct bch_hash_info {
u8 type;
break;
case BCH_STR_HASH_SIPHASH: {
SHASH_DESC_ON_STACK(desc, c->sha256);
- u8 digest[crypto_shash_digestsize(c->sha256)];
+ u8 digest[SHA256_DIGEST_SIZE];
desc->tfm = c->sha256;
desc->flags = 0;
struct bch_hash_desc {
enum btree_id btree_id;
u8 key_type;
- u8 whiteout_type;
u64 (*hash_key)(const struct bch_hash_info *, const void *);
u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c);
bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c);
};
-static inline struct bkey_s_c
-bch2_hash_lookup_at(const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct btree_iter *iter, const void *search)
+static inline struct btree_iter *
+bch2_hash_lookup(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ u64 inode, const void *key,
+ unsigned flags)
{
- u64 inode = iter->pos.inode;
+ struct btree_iter *iter;
struct bkey_s_c k;
- for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
- if (iter->pos.inode != inode)
- break;
-
- if (k.k->type == desc.key_type) {
- if (!desc.cmp_key(k, search))
- return k;
- } else if (k.k->type == desc.whiteout_type) {
- ;
- } else {
- /* hole, not found */
- break;
- }
- }
- return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
-}
-
-static inline struct bkey_s_c
-bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct btree_iter *iter, struct bkey_s_c search)
-{
- u64 inode = iter->pos.inode;
- struct bkey_s_c k;
+ iter = bch2_trans_get_iter(trans, desc.btree_id,
+ POS(inode, desc.hash_key(info, key)),
+ BTREE_ITER_SLOTS|flags);
+ if (IS_ERR(iter))
+ return iter;
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
if (iter->pos.inode != inode)
break;
if (k.k->type == desc.key_type) {
- if (!desc.cmp_bkey(k, search))
- return k;
- } else if (k.k->type == desc.whiteout_type) {
+ if (!desc.cmp_key(k, key))
+ return iter;
+ } else if (k.k->type == KEY_TYPE_whiteout) {
;
} else {
/* hole, not found */
break;
}
}
- return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
-}
-
-static inline struct bkey_s_c
-bch2_hash_lookup(const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct bch_fs *c, u64 inode,
- struct btree_iter *iter, const void *key)
-{
- bch2_btree_iter_init(iter, c, desc.btree_id,
- POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_SLOTS);
-
- return bch2_hash_lookup_at(desc, info, iter, key);
-}
-
-static inline struct bkey_s_c
-bch2_hash_lookup_intent(const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct bch_fs *c, u64 inode,
- struct btree_iter *iter, const void *key)
-{
- bch2_btree_iter_init(iter, c, desc.btree_id,
- POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- return bch2_hash_lookup_at(desc, info, iter, key);
+ return IS_ERR(k.k) ? ERR_CAST(k.k) : ERR_PTR(-ENOENT);
}
-static inline struct bkey_s_c
-bch2_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
+static inline struct btree_iter *
+bch2_hash_hole(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ u64 inode, const void *key)
{
- u64 inode = iter->pos.inode;
+ struct btree_iter *iter;
struct bkey_s_c k;
+ iter = bch2_trans_get_iter(trans, desc.btree_id,
+ POS(inode, desc.hash_key(info, key)),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ if (IS_ERR(iter))
+ return iter;
+
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
if (iter->pos.inode != inode)
break;
if (k.k->type != desc.key_type)
- return k;
+ return iter;
}
- return btree_iter_err(k) ? k : bkey_s_c_err(-ENOENT);
-}
-
-static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct bch_fs *c, u64 inode,
- struct btree_iter *iter,
- const void *key)
-{
- bch2_btree_iter_init(iter, c, desc.btree_id,
- POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- return bch2_hash_hole_at(desc, iter);
+ return IS_ERR(k.k) ? ERR_CAST(k.k) : ERR_PTR(-ENOSPC);
}
-static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc,
+static inline int bch2_hash_needs_whiteout(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
const struct bch_hash_info *info,
- struct btree_iter *iter,
struct btree_iter *start)
{
+ struct btree_iter *iter;
struct bkey_s_c k;
- bch2_btree_iter_copy(iter, start);
+ iter = bch2_trans_copy_iter(trans, start);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
bch2_btree_iter_next_slot(iter);
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
if (k.k->type != desc.key_type &&
- k.k->type != desc.whiteout_type)
+ k.k->type != KEY_TYPE_whiteout)
return false;
if (k.k->type == desc.key_type &&
return btree_iter_err(k);
}
-static inline int bch2_hash_set(const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct bch_fs *c, u64 inode,
- u64 *journal_seq,
- struct bkey_i *insert, int flags)
+static inline int __bch2_hash_set(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ u64 inode, struct bkey_i *insert, int flags)
{
- struct btree_iter iter, hashed_slot;
+ struct btree_iter *iter, *slot = NULL;
struct bkey_s_c k;
- int ret;
- bch2_btree_iter_init(&hashed_slot, c, desc.btree_id,
- POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- bch2_btree_iter_init(&iter, c, desc.btree_id, hashed_slot.pos,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- bch2_btree_iter_link(&hashed_slot, &iter);
-retry:
- /*
- * On hash collision, we have to keep the slot we hashed to locked while
- * we do the insert - to avoid racing with another thread deleting
- * whatever's in the slot we hashed to:
- */
- ret = bch2_btree_iter_traverse(&hashed_slot);
- if (ret)
- goto err;
-
- /*
- * On -EINTR/retry, we dropped locks - always restart from the slot we
- * hashed to:
- */
- bch2_btree_iter_copy(&iter, &hashed_slot);
-
- k = bch2_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert));
-
- ret = btree_iter_err(k);
- if (ret == -ENOENT) {
- if (flags & BCH_HASH_SET_MUST_REPLACE) {
- ret = -ENOENT;
- goto err;
+ iter = bch2_trans_get_iter(trans, desc.btree_id,
+ POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
+ if (iter->pos.inode != inode)
+ break;
+
+ if (k.k->type == desc.key_type) {
+ if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert)))
+ goto found;
+
+ /* hash collision: */
+ continue;
}
- /*
- * Not found, so we're now looking for any open
- * slot - we might have skipped over a whiteout
- * that we could have used, so restart from the
- * slot we hashed to:
- */
- bch2_btree_iter_copy(&iter, &hashed_slot);
- k = bch2_hash_hole_at(desc, &iter);
- if ((ret = btree_iter_err(k)))
- goto err;
- } else if (!ret) {
- if (flags & BCH_HASH_SET_MUST_CREATE) {
- ret = -EEXIST;
- goto err;
+ if (!slot &&
+ !(flags & BCH_HASH_SET_MUST_REPLACE)) {
+ slot = bch2_trans_copy_iter(trans, iter);
+ if (IS_ERR(slot))
+ return PTR_ERR(slot);
}
- } else {
- goto err;
+
+ if (k.k->type != KEY_TYPE_whiteout)
+ goto not_found;
}
- insert->k.p = iter.pos;
- ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
- BTREE_INSERT_ATOMIC|flags,
- BTREE_INSERT_ENTRY(&iter, insert));
-err:
- if (ret == -EINTR)
- goto retry;
-
- /*
- * On successful insert, we don't want to clobber ret with error from
- * iter:
- */
- bch2_btree_iter_unlock(&iter);
- bch2_btree_iter_unlock(&hashed_slot);
- return ret;
+ return btree_iter_err(k) ?: -ENOSPC;
+not_found:
+ if (flags & BCH_HASH_SET_MUST_REPLACE)
+ return -ENOENT;
+
+ insert->k.p = slot->pos;
+ bch2_trans_update(trans, BTREE_INSERT_ENTRY(slot, insert));
+ return 0;
+found:
+ if (flags & BCH_HASH_SET_MUST_CREATE)
+ return -EEXIST;
+
+ insert->k.p = iter->pos;
+ bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, insert));
+ return 0;
}
-static inline int bch2_hash_delete_at(const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct btree_iter *iter,
- u64 *journal_seq)
+static inline int bch2_hash_set(const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ struct bch_fs *c, u64 inode,
+ u64 *journal_seq,
+ struct bkey_i *insert, int flags)
{
- struct btree_iter whiteout_iter;
- struct bkey_i delete;
- int ret = -ENOENT;
+ return bch2_trans_do(c, journal_seq, flags|BTREE_INSERT_ATOMIC,
+ __bch2_hash_set(&trans, desc, info,
+ inode, insert, flags));
+}
- bch2_btree_iter_init(&whiteout_iter, iter->c, desc.btree_id,
- iter->pos, BTREE_ITER_SLOTS);
- bch2_btree_iter_link(iter, &whiteout_iter);
+static inline int bch2_hash_delete_at(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ struct btree_iter *iter)
+{
+ struct bkey_i *delete;
+ int ret;
- ret = bch2_hash_needs_whiteout(desc, info, &whiteout_iter, iter);
+ ret = bch2_hash_needs_whiteout(trans, desc, info, iter);
if (ret < 0)
- goto err;
-
- bkey_init(&delete.k);
- delete.k.p = iter->pos;
- delete.k.type = ret ? desc.whiteout_type : KEY_TYPE_DELETED;
-
- ret = bch2_btree_insert_at(iter->c, NULL, NULL, journal_seq,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_ATOMIC,
- BTREE_INSERT_ENTRY(iter, &delete));
-err:
- bch2_btree_iter_unlink(&whiteout_iter);
- return ret;
+ return ret;
+
+ delete = bch2_trans_kmalloc(trans, sizeof(*delete));
+ if (IS_ERR(delete))
+ return PTR_ERR(delete);
+
+ bkey_init(&delete->k);
+ delete->k.p = iter->pos;
+ delete->k.type = ret ? KEY_TYPE_whiteout : KEY_TYPE_deleted;
+
+ bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, delete));
+ return 0;
}
-static inline int bch2_hash_delete(const struct bch_hash_desc desc,
- const struct bch_hash_info *info,
- struct bch_fs *c, u64 inode,
- u64 *journal_seq, const void *key)
+static inline int bch2_hash_delete(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ u64 inode, const void *key)
{
- struct btree_iter iter, whiteout_iter;
- struct bkey_s_c k;
- int ret = -ENOENT;
-
- bch2_btree_iter_init(&iter, c, desc.btree_id,
- POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- bch2_btree_iter_init(&whiteout_iter, c, desc.btree_id,
- POS(inode, desc.hash_key(info, key)),
- BTREE_ITER_SLOTS);
- bch2_btree_iter_link(&iter, &whiteout_iter);
-retry:
- k = bch2_hash_lookup_at(desc, info, &iter, key);
- if ((ret = btree_iter_err(k)))
- goto err;
-
- ret = bch2_hash_delete_at(desc, info, &iter, journal_seq);
-err:
- if (ret == -EINTR)
- goto retry;
-
- bch2_btree_iter_unlock(&whiteout_iter);
- bch2_btree_iter_unlock(&iter);
- return ret;
+ struct btree_iter *iter;
+
+ iter = bch2_hash_lookup(trans, desc, info, inode, key,
+ BTREE_ITER_INTENT);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ return bch2_hash_delete_at(trans, desc, info, iter);
}
#endif /* _BCACHEFS_STR_HASH_H */