1 // SPDX-License-Identifier: GPL-2.0
2 #ifdef CONFIG_BCACHEFS_POSIX_ACL
7 #include <linux/posix_acl.h>
8 #include <linux/posix_acl_xattr.h>
9 #include <linux/sched.h>
10 #include <linux/slab.h>
16 static inline size_t bch2_acl_size(unsigned nr_short, unsigned nr_long)
18 return sizeof(bch_acl_header) +
19 sizeof(bch_acl_entry_short) * nr_short +
20 sizeof(bch_acl_entry) * nr_long;
23 static inline int acl_to_xattr_type(int type)
27 return KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS;
28 case ACL_TYPE_DEFAULT:
29 return KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT;
36 * Convert from filesystem to in-memory representation.
38 static struct posix_acl *bch2_acl_from_disk(struct btree_trans *trans,
39 const void *value, size_t size)
41 const void *p, *end = value + size;
42 struct posix_acl *acl;
43 struct posix_acl_entry *out;
49 if (size < sizeof(bch_acl_header))
51 if (((bch_acl_header *)value)->a_version !=
52 cpu_to_le32(BCH_ACL_VERSION))
55 p = value + sizeof(bch_acl_header);
57 const bch_acl_entry *entry = p;
59 if (p + sizeof(bch_acl_entry_short) > end)
62 switch (le16_to_cpu(entry->e_tag)) {
67 p += sizeof(bch_acl_entry_short);
71 p += sizeof(bch_acl_entry);
86 acl = allocate_dropping_locks(trans, ret,
87 posix_acl_alloc(count, _gfp));
89 return ERR_PTR(-ENOMEM);
97 p = value + sizeof(bch_acl_header);
99 const bch_acl_entry *in = p;
101 out->e_tag = le16_to_cpu(in->e_tag);
102 out->e_perm = le16_to_cpu(in->e_perm);
104 switch (out->e_tag) {
109 p += sizeof(bch_acl_entry_short);
112 out->e_uid = make_kuid(&init_user_ns,
113 le32_to_cpu(in->e_id));
114 p += sizeof(bch_acl_entry);
117 out->e_gid = make_kgid(&init_user_ns,
118 le32_to_cpu(in->e_id));
119 p += sizeof(bch_acl_entry);
126 BUG_ON(out != acl->a_entries + acl->a_count);
130 pr_err("invalid acl entry");
131 return ERR_PTR(-EINVAL);
134 #define acl_for_each_entry(acl, acl_e) \
135 for (acl_e = acl->a_entries; \
136 acl_e < acl->a_entries + acl->a_count; \
140 * Convert from in-memory to filesystem representation.
142 static struct bkey_i_xattr *
143 bch2_acl_to_xattr(struct btree_trans *trans,
144 const struct posix_acl *acl,
147 struct bkey_i_xattr *xattr;
148 bch_acl_header *acl_header;
149 const struct posix_acl_entry *acl_e;
151 unsigned nr_short = 0, nr_long = 0, acl_len, u64s;
153 acl_for_each_entry(acl, acl_e) {
154 switch (acl_e->e_tag) {
166 return ERR_PTR(-EINVAL);
170 acl_len = bch2_acl_size(nr_short, nr_long);
171 u64s = BKEY_U64s + xattr_val_u64s(0, acl_len);
174 return ERR_PTR(-E2BIG);
176 xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
180 bkey_xattr_init(&xattr->k_i);
181 xattr->k.u64s = u64s;
182 xattr->v.x_type = acl_to_xattr_type(type);
183 xattr->v.x_name_len = 0;
184 xattr->v.x_val_len = cpu_to_le16(acl_len);
186 acl_header = xattr_val(&xattr->v);
187 acl_header->a_version = cpu_to_le32(BCH_ACL_VERSION);
189 outptr = (void *) acl_header + sizeof(*acl_header);
191 acl_for_each_entry(acl, acl_e) {
192 bch_acl_entry *entry = outptr;
194 entry->e_tag = cpu_to_le16(acl_e->e_tag);
195 entry->e_perm = cpu_to_le16(acl_e->e_perm);
196 switch (acl_e->e_tag) {
198 entry->e_id = cpu_to_le32(
199 from_kuid(&init_user_ns, acl_e->e_uid));
200 outptr += sizeof(bch_acl_entry);
203 entry->e_id = cpu_to_le32(
204 from_kgid(&init_user_ns, acl_e->e_gid));
205 outptr += sizeof(bch_acl_entry);
212 outptr += sizeof(bch_acl_entry_short);
217 BUG_ON(outptr != xattr_val(&xattr->v) + acl_len);
222 struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
223 struct dentry *dentry, int type)
225 struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
226 struct bch_fs *c = inode->v.i_sb->s_fs_info;
227 struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
228 struct btree_trans trans;
229 struct btree_iter iter = { NULL };
230 struct bkey_s_c_xattr xattr;
231 struct posix_acl *acl = NULL;
235 bch2_trans_init(&trans, c, 0, 0);
237 bch2_trans_begin(&trans);
239 ret = bch2_hash_lookup(&trans, &iter, bch2_xattr_hash_desc,
240 &hash, inode_inum(inode),
241 &X_SEARCH(acl_to_xattr_type(type), "", 0),
244 if (!bch2_err_matches(ret, ENOENT))
249 k = bch2_btree_iter_peek_slot(&iter);
256 xattr = bkey_s_c_to_xattr(k);
257 acl = bch2_acl_from_disk(&trans, xattr_val(xattr.v),
258 le16_to_cpu(xattr.v->x_val_len));
261 set_cached_acl(&inode->v, type, acl);
263 if (bch2_err_matches(PTR_ERR_OR_ZERO(acl), BCH_ERR_transaction_restart))
266 bch2_trans_iter_exit(&trans, &iter);
267 bch2_trans_exit(&trans);
271 int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
272 struct bch_inode_unpacked *inode_u,
273 struct posix_acl *acl, int type)
275 struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode_u);
278 if (type == ACL_TYPE_DEFAULT &&
279 !S_ISDIR(inode_u->bi_mode))
280 return acl ? -EACCES : 0;
283 struct bkey_i_xattr *xattr =
284 bch2_acl_to_xattr(trans, acl, type);
286 return PTR_ERR(xattr);
288 ret = bch2_hash_set(trans, bch2_xattr_hash_desc, &hash_info,
289 inum, &xattr->k_i, 0);
291 struct xattr_search_key search =
292 X_SEARCH(acl_to_xattr_type(type), "", 0);
294 ret = bch2_hash_delete(trans, bch2_xattr_hash_desc, &hash_info,
298 return bch2_err_matches(ret, ENOENT) ? 0 : ret;
301 int bch2_set_acl(struct mnt_idmap *idmap,
302 struct dentry *dentry,
303 struct posix_acl *_acl, int type)
305 struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
306 struct bch_fs *c = inode->v.i_sb->s_fs_info;
307 struct btree_trans trans;
308 struct btree_iter inode_iter = { NULL };
309 struct bch_inode_unpacked inode_u;
310 struct posix_acl *acl;
314 mutex_lock(&inode->ei_update_lock);
315 bch2_trans_init(&trans, c, 0, 0);
317 bch2_trans_begin(&trans);
320 ret = bch2_inode_peek(&trans, &inode_iter, &inode_u, inode_inum(inode),
325 mode = inode_u.bi_mode;
327 if (type == ACL_TYPE_ACCESS) {
328 ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
333 ret = bch2_set_acl_trans(&trans, inode_inum(inode), &inode_u, acl, type);
337 inode_u.bi_ctime = bch2_current_time(c);
338 inode_u.bi_mode = mode;
340 ret = bch2_inode_write(&trans, &inode_iter, &inode_u) ?:
341 bch2_trans_commit(&trans, NULL, NULL, 0);
343 bch2_trans_iter_exit(&trans, &inode_iter);
345 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
350 bch2_inode_update_after_write(&trans, inode, &inode_u,
351 ATTR_CTIME|ATTR_MODE);
353 set_cached_acl(&inode->v, type, acl);
355 bch2_trans_exit(&trans);
356 mutex_unlock(&inode->ei_update_lock);
361 int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
362 struct bch_inode_unpacked *inode,
364 struct posix_acl **new_acl)
366 struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
367 struct btree_iter iter;
368 struct bkey_s_c_xattr xattr;
369 struct bkey_i_xattr *new;
370 struct posix_acl *acl;
374 ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
376 &X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0),
379 return bch2_err_matches(ret, ENOENT) ? 0 : ret;
381 k = bch2_btree_iter_peek_slot(&iter);
382 xattr = bkey_s_c_to_xattr(k);
386 acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
387 le16_to_cpu(xattr.v->x_val_len));
388 ret = PTR_ERR_OR_ZERO(acl);
389 if (IS_ERR_OR_NULL(acl))
392 ret = allocate_dropping_locks_errcode(trans,
393 __posix_acl_chmod(&acl, _gfp, mode));
397 new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
404 ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
408 bch2_trans_iter_exit(trans, &iter);
409 if (!IS_ERR_OR_NULL(acl))
414 #endif /* CONFIG_BCACHEFS_POSIX_ACL */