-380885b0b8c38dc770c48602325de77171acc419
+cddca21efc74d10223f6e2e149dfa79eeb67fdce
sb.sb->time_base_lo = cpu_to_le64(now.tv_sec * NSEC_PER_SEC + now.tv_nsec);
sb.sb->time_precision = cpu_to_le32(1);
+ sb.sb->features[0] |= 1ULL << BCH_FEATURE_NEW_SIPHASH;
+
/* Member info: */
mi = bch2_sb_resize_members(&sb,
(sizeof(*mi) + sizeof(struct bch_member) *
return ret == -ENOENT ? 0 : ret;
}
-static int inode_update_for_set_acl_fn(struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- umode_t mode = (unsigned long) p;
-
- bi->bi_ctime = bch2_current_time(c);
- bi->bi_mode = mode;
- return 0;
-}
-
-int bch2_set_acl(struct inode *vinode, struct posix_acl *acl, int type)
+int bch2_set_acl(struct inode *vinode, struct posix_acl *_acl, int type)
{
struct bch_inode_info *inode = to_bch_ei(vinode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct btree_trans trans;
+ struct btree_iter *inode_iter;
struct bch_inode_unpacked inode_u;
- umode_t mode = inode->v.i_mode;
+ struct posix_acl *acl;
+ umode_t mode;
int ret;
mutex_lock(&inode->ei_update_lock);
bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+ acl = _acl;
- if (type == ACL_TYPE_ACCESS && acl) {
+ inode_iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino,
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(inode_iter);
+ if (ret)
+ goto btree_err;
+
+ mode = inode_u.bi_mode;
+
+ if (type == ACL_TYPE_ACCESS) {
ret = posix_acl_update_mode(&inode->v, &mode, &acl);
if (ret)
goto err;
}
-retry:
- bch2_trans_begin(&trans);
- ret = bch2_set_acl_trans(&trans,
- &inode->ei_inode,
- &inode->ei_str_hash,
- acl, type) ?:
- bch2_write_inode_trans(&trans, inode, &inode_u,
- inode_update_for_set_acl_fn,
- (void *)(unsigned long) mode) ?:
+ ret = bch2_set_acl_trans(&trans, &inode_u,
+ &inode->ei_str_hash,
+ acl, type);
+ if (ret)
+ goto btree_err;
+
+ inode_u.bi_ctime = bch2_current_time(c);
+ inode_u.bi_mode = mode;
+
+ ret = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK);
+btree_err:
if (ret == -EINTR)
goto retry;
if (unlikely(ret))
BCH_FEATURE_EC = 4,
BCH_FEATURE_JOURNAL_SEQ_BLACKLIST_V3 = 5,
BCH_FEATURE_REFLINK = 6,
+ BCH_FEATURE_NEW_SIPHASH = 7,
BCH_FEATURE_NR,
};
BCH_CSUM_OPT_NR = 3,
};
-enum bch_str_hash_opts {
+enum bch_str_hash_type {
BCH_STR_HASH_CRC32C = 0,
BCH_STR_HASH_CRC64 = 1,
- BCH_STR_HASH_SIPHASH = 2,
- BCH_STR_HASH_NR = 3,
+ BCH_STR_HASH_SIPHASH_OLD = 2,
+ BCH_STR_HASH_SIPHASH = 3,
+ BCH_STR_HASH_NR = 4,
+};
+
+enum bch_str_hash_opts {
+ BCH_STR_HASH_OPT_CRC32C = 0,
+ BCH_STR_HASH_OPT_CRC64 = 1,
+ BCH_STR_HASH_OPT_SIPHASH = 2,
+ BCH_STR_HASH_OPT_NR = 3,
};
#define BCH_COMPRESSION_TYPES() \
if (unlikely(ret == -EIO)) {
trans->error = true;
- orig_iter->flags |= BTREE_ITER_ERROR;
- orig_iter->l[orig_iter->level].b = BTREE_ITER_NO_NODE_ERROR;
+ if (orig_iter) {
+ orig_iter->flags |= BTREE_ITER_ERROR;
+ orig_iter->l[orig_iter->level].b =
+ BTREE_ITER_NO_NODE_ERROR;
+ }
goto out;
}
if (ret)
return ret;
+ if (!idx)
+ return 0;
+
if (!gc &&
!init_heap(&c->ec_stripes_heap, roundup_pow_of_two(idx),
GFP_KERNEL))
if (*nr_iters >= max_iters) {
*end = bpos_min(*end, k.k->p);
- return 0;
+ ret = 1;
}
break;
pos.offset += r_k.k->p.offset - idx;
*end = bpos_min(*end, pos);
+ ret = 1;
break;
}
}
return ret;
}
+#define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3)
+
int bch2_extent_atomic_end(struct btree_iter *iter,
struct bkey_i *insert,
struct bpos *end)
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct bkey_packed *_k;
- unsigned nr_iters =
- bch2_bkey_nr_alloc_ptrs(bkey_i_to_s_c(insert));
- int ret = 0;
+ unsigned nr_iters = 0;
+ int ret;
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
*end = bpos_min(insert->k.p, b->key.k.p);
- ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert),
- 0, end, &nr_iters, 10, false);
- if (ret)
+ ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
+ &nr_iters, EXTENT_ITERS_MAX / 2, false);
+ if (ret < 0)
return ret;
- while (nr_iters < 20 &&
- (_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
+ while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
KEY_TYPE_discard))) {
struct bkey unpacked;
struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
offset = bkey_start_offset(&insert->k) -
bkey_start_offset(k.k);
- ret = count_iters_for_insert(trans, k, offset,
- end, &nr_iters, 20, true);
+ ret = count_iters_for_insert(trans, k, offset, end,
+ &nr_iters, EXTENT_ITERS_MAX, true);
if (ret)
- return ret;
-
- if (nr_iters >= 20)
break;
bch2_btree_node_iter_advance(&node_iter, b);
}
- return 0;
+ return ret < 0 ? ret : 0;
}
int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
static int sum_sector_overwrites(struct btree_trans *trans,
struct btree_iter *extent_iter,
- struct bkey_i *new, bool *allocating,
+ struct bkey_i *new,
+ bool may_allocate,
s64 *delta)
{
struct btree_iter *iter;
struct bkey_s_c old;
+ int ret = 0;
*delta = 0;
if (IS_ERR(iter))
return PTR_ERR(iter);
- old = bch2_btree_iter_peek_slot(iter);
-
- while (1) {
- /*
- * should not be possible to get an error here, since we're
- * carefully not advancing past @new and thus whatever leaf node
- * @_iter currently points to:
- */
- BUG_ON(bkey_err(old));
-
- if (allocating &&
- !*allocating &&
+ for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
+ if (!may_allocate &&
bch2_bkey_nr_ptrs_allocated(old) <
- bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new)))
- *allocating = true;
+ bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new))) {
+ ret = -ENOSPC;
+ break;
+ }
*delta += (min(new->k.p.offset,
old.k->p.offset) -
if (bkey_cmp(old.k->p, new->k.p) >= 0)
break;
-
- old = bch2_btree_iter_next_slot(iter);
}
bch2_trans_iter_put(trans, iter);
- return 0;
+ return ret;
}
int bch2_extent_update(struct btree_trans *trans,
struct btree_iter *inode_iter = NULL;
struct bch_inode_unpacked inode_u;
struct bkey_inode_buf inode_p;
- bool allocating = false;
bool extended = false;
- bool inode_locked = false;
s64 i_sectors_delta;
int ret;
if (ret)
return ret;
- ret = sum_sector_overwrites(trans, extent_iter,
- k, &allocating,
- &i_sectors_delta);
+ ret = sum_sector_overwrites(trans, extent_iter, k,
+ may_allocate, &i_sectors_delta);
if (ret)
return ret;
- if (!may_allocate && allocating)
- return -ENOSPC;
-
bch2_trans_update(trans, extent_iter, k);
new_i_size = min(k->k.p.offset << 9, new_i_size);
/* XXX: inode->i_size locking */
if (i_sectors_delta ||
new_i_size > inode->ei_inode.bi_size) {
- inode_iter = bch2_trans_get_iter(trans,
- BTREE_ID_INODES,
- POS(k->k.p.inode, 0),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ inode_iter = bch2_inode_peek(trans, &inode_u,
+ k->k.p.inode, BTREE_ITER_INTENT);
if (IS_ERR(inode_iter))
return PTR_ERR(inode_iter);
- ret = bch2_btree_iter_traverse(inode_iter);
- if (ret)
- goto err;
-
- inode_u = inode->ei_inode;
inode_u.bi_sectors += i_sectors_delta;
- /* XXX: this is slightly suspect */
+ /*
+ * XXX: can BCH_INODE_I_SIZE_DIRTY be true here? i.e. can we
+ * race with truncate?
+ */
if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
new_i_size > inode_u.bi_size) {
inode_u.bi_size = new_i_size;
extended = true;
}
- bch2_inode_pack(&inode_p, &inode_u);
- bch2_trans_update(trans, inode_iter, &inode_p.inode.k_i);
+ if (i_sectors_delta || extended) {
+ bch2_inode_pack(&inode_p, &inode_u);
+ bch2_trans_update(trans, inode_iter,
+ &inode_p.inode.k_i);
+ }
}
ret = bch2_trans_commit(trans, disk_res,
if (ret)
goto err;
- inode->ei_inode.bi_sectors += i_sectors_delta;
-
- EBUG_ON(i_sectors_delta &&
- inode->ei_inode.bi_sectors != inode_u.bi_sectors);
-
- if (extended) {
- inode->ei_inode.bi_size = new_i_size;
-
- if (direct) {
- spin_lock(&inode->v.i_lock);
- if (new_i_size > inode->v.i_size)
- i_size_write(&inode->v, new_i_size);
- spin_unlock(&inode->v.i_lock);
- }
+ if (i_sectors_delta || extended) {
+ inode->ei_inode.bi_sectors = inode_u.bi_sectors;
+ inode->ei_inode.bi_size = inode_u.bi_size;
}
if (direct)
i_sectors_acct(c, inode, quota_res, i_sectors_delta);
+ if (direct && extended) {
+ spin_lock(&inode->v.i_lock);
+ if (new_i_size > inode->v.i_size)
+ i_size_write(&inode->v, new_i_size);
+ spin_unlock(&inode->v.i_lock);
+ }
if (total_delta)
*total_delta += i_sectors_delta;
err:
if (!IS_ERR_OR_NULL(inode_iter))
bch2_trans_iter_put(trans, inode_iter);
- if (inode_locked)
- mutex_unlock(&inode->ei_update_lock);
-
return ret;
}
pages[i] = grab_cache_page_write_begin(mapping, index + i, 0);
if (!pages[i]) {
nr_pages = i;
- ret = -ENOMEM;
- goto out;
+ if (!i) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ len = min_t(unsigned, len,
+ nr_pages * PAGE_SIZE - offset);
+ break;
}
}
void *p)
{
struct btree_iter *iter = NULL;
- struct bkey_inode_buf *inode_p;
- int ret;
-
- lockdep_assert_held(&inode->ei_update_lock);
-
- iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
- POS(inode->v.i_ino, 0),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
+ int ret = 0;
- /* The btree node lock is our lock on the inode: */
- ret = bch2_btree_iter_traverse(iter);
+ iter = bch2_inode_peek(trans, inode_u, inode->v.i_ino,
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(iter);
if (ret)
return ret;
- *inode_u = inode->ei_inode;
-
- if (set) {
- ret = set(inode, inode_u, p);
- if (ret)
- return ret;
- }
-
- inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
- if (IS_ERR(inode_p))
- return PTR_ERR(inode_p);
-
- bch2_inode_pack(inode_p, inode_u);
- bch2_trans_update(trans, iter, &inode_p->inode.k_i);
+ ret = set ? set(inode, inode_u, p) : 0;
+ if (ret)
+ return ret;
- return 0;
+ return bch2_inode_write(trans, iter, inode_u);
}
int __must_check bch2_write_inode(struct bch_fs *c,
return 0;
}
-static int inode_update_for_link_fn(struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
-
- bi->bi_ctime = bch2_current_time(c);
- bch2_inode_nlink_inc(bi);
- return 0;
-}
-
static int __bch2_link(struct bch_fs *c,
struct bch_inode_info *inode,
struct bch_inode_info *dir,
struct dentry *dentry)
{
struct btree_trans trans;
+ struct btree_iter *inode_iter;
struct bch_inode_unpacked inode_u;
int ret;
bch2_trans_init(&trans, c, 4, 1024);
retry:
bch2_trans_begin(&trans);
-
ret = __bch2_dirent_create(&trans, dir->v.i_ino,
&dir->ei_str_hash,
mode_to_type(inode->v.i_mode),
&dentry->d_name,
inode->v.i_ino,
- BCH_HASH_SET_MUST_CREATE) ?:
- bch2_write_inode_trans(&trans, inode, &inode_u,
- inode_update_for_link_fn,
- NULL) ?:
- bch2_trans_commit(&trans, NULL,
- &inode->ei_journal_seq,
- BTREE_INSERT_ATOMIC|
- BTREE_INSERT_NOUNLOCK);
+ BCH_HASH_SET_MUST_CREATE);
+ if (ret)
+ goto err;
+
+ inode_iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino,
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(inode_iter);
+ if (ret)
+ goto err;
+ inode_u.bi_ctime = bch2_current_time(c);
+ bch2_inode_nlink_inc(&inode_u);
+
+ ret = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
+ bch2_trans_commit(&trans, NULL,
+ &inode->ei_journal_seq,
+ BTREE_INSERT_ATOMIC|
+ BTREE_INSERT_NOUNLOCK);
+err:
if (ret == -EINTR)
goto retry;
return 0;
}
-static int inode_update_dir_for_unlink_fn(struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_inode_info *unlink_inode = p;
-
- bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
-
- bi->bi_nlink -= S_ISDIR(unlink_inode->v.i_mode);
-
- return 0;
-}
-
-static int inode_update_for_unlink_fn(struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
-{
- struct bch_fs *c = inode->v.i_sb->s_fs_info;
-
- bi->bi_ctime = bch2_current_time(c);
- bch2_inode_nlink_dec(bi);
- return 0;
-}
-
static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
{
struct bch_fs *c = vdir->i_sb->s_fs_info;
struct bch_inode_info *dir = to_bch_ei(vdir);
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
+ struct btree_iter *dir_iter, *inode_iter;
struct bch_inode_unpacked dir_u, inode_u;
struct btree_trans trans;
int ret;
ret = __bch2_dirent_delete(&trans, dir->v.i_ino,
&dir->ei_str_hash,
- &dentry->d_name) ?:
- bch2_write_inode_trans(&trans, dir, &dir_u,
- inode_update_dir_for_unlink_fn,
- inode) ?:
- bch2_write_inode_trans(&trans, inode, &inode_u,
- inode_update_for_unlink_fn,
- NULL) ?:
+ &dentry->d_name);
+ if (ret)
+ goto btree_err;
+
+ dir_iter = bch2_inode_peek(&trans, &dir_u, dir->v.i_ino,
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(dir_iter);
+ if (ret)
+ goto btree_err;
+
+ inode_iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino,
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(inode_iter);
+ if (ret)
+ goto btree_err;
+
+ dir_u.bi_mtime = dir_u.bi_ctime = inode_u.bi_ctime =
+ bch2_current_time(c);
+
+ dir_u.bi_nlink -= S_ISDIR(inode_u.bi_mode);
+ bch2_inode_nlink_dec(&inode_u);
+
+ ret = bch2_inode_write(&trans, dir_iter, &dir_u) ?:
+ bch2_inode_write(&trans, inode_iter, &inode_u) ?:
bch2_trans_commit(&trans, NULL,
&dir->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
BTREE_INSERT_NOFAIL);
+btree_err:
if (ret == -EINTR)
goto retry;
if (ret)
goto err;
- if (dir->ei_journal_seq > inode->ei_journal_seq)
- inode->ei_journal_seq = dir->ei_journal_seq;
+ journal_seq_copy(inode, dir->ei_journal_seq);
bch2_inode_update_after_write(c, dir, &dir_u,
ATTR_MTIME|ATTR_CTIME);
return ret;
}
-static int inode_update_for_setattr_fn(struct bch_inode_info *inode,
- struct bch_inode_unpacked *bi,
- void *p)
+void bch2_setattr_copy(struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ struct iattr *attr)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct iattr *attr = p;
unsigned int ia_valid = attr->ia_valid;
if (ia_valid & ATTR_UID)
- bi->bi_uid = from_kuid(inode->v.i_sb->s_user_ns, attr->ia_uid);
+ bi->bi_uid = from_kuid(c->vfs_sb->s_user_ns, attr->ia_uid);
if (ia_valid & ATTR_GID)
- bi->bi_gid = from_kgid(inode->v.i_sb->s_user_ns, attr->ia_gid);
+ bi->bi_gid = from_kgid(c->vfs_sb->s_user_ns, attr->ia_gid);
if (ia_valid & ATTR_ATIME)
bi->bi_atime = timespec_to_bch2_time(c, attr->ia_atime);
mode &= ~S_ISGID;
bi->bi_mode = mode;
}
-
- return 0;
}
-static int bch2_setattr_nonsize(struct bch_inode_info *inode, struct iattr *iattr)
+static int bch2_setattr_nonsize(struct bch_inode_info *inode,
+ struct iattr *attr)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_qid qid;
struct btree_trans trans;
+ struct btree_iter *inode_iter;
struct bch_inode_unpacked inode_u;
struct posix_acl *acl = NULL;
int ret;
qid = inode->ei_qid;
- if (iattr->ia_valid & ATTR_UID)
- qid.q[QTYP_USR] = from_kuid(&init_user_ns, iattr->ia_uid);
+ if (attr->ia_valid & ATTR_UID)
+ qid.q[QTYP_USR] = from_kuid(&init_user_ns, attr->ia_uid);
- if (iattr->ia_valid & ATTR_GID)
- qid.q[QTYP_GRP] = from_kgid(&init_user_ns, iattr->ia_gid);
+ if (attr->ia_valid & ATTR_GID)
+ qid.q[QTYP_GRP] = from_kgid(&init_user_ns, attr->ia_gid);
ret = bch2_fs_quota_transfer(c, inode, qid, ~0,
KEY_TYPE_QUOTA_PREALLOC);
kfree(acl);
acl = NULL;
- ret = bch2_write_inode_trans(&trans, inode, &inode_u,
- inode_update_for_setattr_fn, iattr) ?:
- (iattr->ia_valid & ATTR_MODE
- ? bch2_acl_chmod(&trans, inode, iattr->ia_mode, &acl)
- : 0) ?:
+ inode_iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino,
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(inode_iter);
+ if (ret)
+ goto btree_err;
+
+ bch2_setattr_copy(inode, &inode_u, attr);
+
+ if (attr->ia_valid & ATTR_MODE) {
+ ret = bch2_acl_chmod(&trans, inode, inode_u.bi_mode, &acl);
+ if (ret)
+ goto btree_err;
+ }
+
+ ret = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
BTREE_INSERT_NOFAIL);
+btree_err:
if (ret == -EINTR)
goto retry;
if (unlikely(ret))
goto err_trans;
- bch2_inode_update_after_write(c, inode, &inode_u, iattr->ia_valid);
+ bch2_inode_update_after_write(c, inode, &inode_u, attr->ia_valid);
if (acl)
set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl);
#include "error.h"
#include "extents.h"
#include "inode.h"
-#include "io.h"
-#include "keylist.h"
+#include "str_hash.h"
#include <linux/random.h>
return 0;
}
+struct btree_iter *bch2_inode_peek(struct btree_trans *trans,
+ struct bch_inode_unpacked *inode,
+ u64 inum, unsigned flags)
+{
+ struct btree_iter *iter;
+ struct bkey_s_c k;
+ int ret;
+
+ iter = bch2_trans_get_iter(trans, BTREE_ID_INODES, POS(inum, 0),
+ BTREE_ITER_SLOTS|flags);
+ if (IS_ERR(iter))
+ return iter;
+
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ ret = k.k->type == KEY_TYPE_inode ? 0 : -EIO;
+ if (ret)
+ goto err;
+
+ ret = bch2_inode_unpack(bkey_s_c_to_inode(k), inode);
+ if (ret)
+ goto err;
+
+ return iter;
+err:
+ bch2_trans_iter_put(trans, iter);
+ return ERR_PTR(ret);
+}
+
+int bch2_inode_write(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_inode_unpacked *inode)
+{
+ struct bkey_inode_buf *inode_p;
+
+ inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
+ if (IS_ERR(inode_p))
+ return PTR_ERR(inode_p);
+
+ bch2_inode_pack(inode_p, inode);
+ bch2_trans_update(trans, iter, &inode_p->inode.k_i);
+ return 0;
+}
+
const char *bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
struct bch_inode_unpacked *parent)
{
s64 now = bch2_current_time(c);
+ enum bch_str_hash_type str_hash =
+ bch2_str_hash_opt_to_type(c, c->opts.str_hash);
memset(inode_u, 0, sizeof(*inode_u));
/* ick */
- inode_u->bi_flags |= c->opts.str_hash << INODE_STR_HASH_OFFSET;
+ inode_u->bi_flags |= str_hash << INODE_STR_HASH_OFFSET;
get_random_bytes(&inode_u->bi_hash_seed,
sizeof(inode_u->bi_hash_seed));
void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
int bch2_inode_unpack(struct bkey_s_c_inode, struct bch_inode_unpacked *);
+struct btree_iter *bch2_inode_peek(struct btree_trans *,
+ struct bch_inode_unpacked *, u64, unsigned);
+int bch2_inode_write(struct btree_trans *, struct btree_iter *,
+ struct bch_inode_unpacked *);
+
void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *,
uid_t, gid_t, umode_t, dev_t,
struct bch_inode_unpacked *);
if (!bkey_extent_is_direct_data(k.k))
goto next_nondata;
- if (cur_inum != k.k->p.inode) {
+ if (btree_id == BTREE_ID_EXTENTS &&
+ cur_inum != k.k->p.inode) {
struct bch_inode_unpacked inode;
/* don't hold btree locks while looking up inode: */
x(str_hash, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_str_hash_types), \
- BCH_SB_STR_HASH_TYPE, BCH_STR_HASH_SIPHASH, \
+ BCH_SB_STR_HASH_TYPE, BCH_STR_HASH_OPT_SIPHASH, \
NULL, "Hash function for directory entries and xattrs")\
x(foreground_target, u16, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
#include <crypto/hash.h>
#include <crypto/sha.h>
+static inline enum bch_str_hash_type
+bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
+{
+ switch (opt) {
+ case BCH_STR_HASH_OPT_CRC32C:
+ return BCH_STR_HASH_CRC32C;
+ case BCH_STR_HASH_OPT_CRC64:
+ return BCH_STR_HASH_CRC64;
+ case BCH_STR_HASH_OPT_SIPHASH:
+ return c->sb.features & (1ULL << BCH_FEATURE_NEW_SIPHASH)
+ ? BCH_STR_HASH_SIPHASH
+ : BCH_STR_HASH_SIPHASH_OLD;
+ default:
+ BUG();
+ }
+}
+
struct bch_hash_info {
u8 type;
union {
};
static inline struct bch_hash_info
-bch2_hash_info_init(struct bch_fs *c,
- const struct bch_inode_unpacked *bi)
+bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi)
{
/* XXX ick */
struct bch_hash_info info = {
.type = (bi->bi_flags >> INODE_STR_HASH_OFFSET) &
- ~(~0U << INODE_STR_HASH_BITS)
+ ~(~0U << INODE_STR_HASH_BITS),
+ .crc_key = bi->bi_hash_seed,
};
- switch (info.type) {
- case BCH_STR_HASH_CRC32C:
- case BCH_STR_HASH_CRC64:
- info.crc_key = bi->bi_hash_seed;
- break;
- case BCH_STR_HASH_SIPHASH: {
+ if (unlikely(info.type == BCH_STR_HASH_SIPHASH_OLD)) {
SHASH_DESC_ON_STACK(desc, c->sha256);
u8 digest[SHA256_DIGEST_SIZE];
crypto_shash_digest(desc, (void *) &bi->bi_hash_seed,
sizeof(bi->bi_hash_seed), digest);
memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
- break;
- }
- default:
- BUG();
}
return info;
case BCH_STR_HASH_CRC64:
ctx->crc64 = crc64_be(~0, &info->crc_key, sizeof(info->crc_key));
break;
+ case BCH_STR_HASH_SIPHASH_OLD:
case BCH_STR_HASH_SIPHASH:
SipHash24_Init(&ctx->siphash, &info->siphash_key);
break;
case BCH_STR_HASH_CRC64:
ctx->crc64 = crc64_be(ctx->crc64, data, len);
break;
+ case BCH_STR_HASH_SIPHASH_OLD:
case BCH_STR_HASH_SIPHASH:
SipHash24_Update(&ctx->siphash, data, len);
break;
return ctx->crc32c;
case BCH_STR_HASH_CRC64:
return ctx->crc64 >> 1;
+ case BCH_STR_HASH_SIPHASH_OLD:
case BCH_STR_HASH_SIPHASH:
return SipHash24_End(&ctx->siphash) >> 1;
default:
if (!ret)
ret = -ENOSPC;
out:
- if (slot)
+ if (!IS_ERR_OR_NULL(slot))
bch2_trans_iter_put(trans, slot);
- bch2_trans_iter_put(trans, iter);
+ if (!IS_ERR_OR_NULL(iter))
+ bch2_trans_iter_put(trans, iter);
return ret;
found: