]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/fs.c
Update bcachefs sources to 0a9be96b50 bcachefs: BSET_OFFSET()
[bcachefs-tools-debian] / libbcachefs / fs.c
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3
4 #include "bcachefs.h"
5 #include "acl.h"
6 #include "bkey_buf.h"
7 #include "btree_update.h"
8 #include "buckets.h"
9 #include "chardev.h"
10 #include "dirent.h"
11 #include "extents.h"
12 #include "fs.h"
13 #include "fs-common.h"
14 #include "fs-io.h"
15 #include "fs-ioctl.h"
16 #include "fsck.h"
17 #include "inode.h"
18 #include "io.h"
19 #include "journal.h"
20 #include "keylist.h"
21 #include "quota.h"
22 #include "super.h"
23 #include "xattr.h"
24
25 #include <linux/aio.h>
26 #include <linux/backing-dev.h>
27 #include <linux/exportfs.h>
28 #include <linux/fiemap.h>
29 #include <linux/module.h>
30 #include <linux/pagemap.h>
31 #include <linux/posix_acl.h>
32 #include <linux/random.h>
33 #include <linux/statfs.h>
34 #include <linux/string.h>
35 #include <linux/xattr.h>
36
37 static struct kmem_cache *bch2_inode_cache;
38
39 static void bch2_vfs_inode_init(struct bch_fs *,
40                                 struct bch_inode_info *,
41                                 struct bch_inode_unpacked *);
42
43 static void journal_seq_copy(struct bch_fs *c,
44                              struct bch_inode_info *dst,
45                              u64 journal_seq)
46 {
47         /*
48          * atomic64_cmpxchg has a fallback for archs that don't support it,
49          * cmpxchg does not:
50          */
51         atomic64_t *dst_seq = (void *) &dst->ei_journal_seq;
52         u64 old, v = READ_ONCE(dst->ei_journal_seq);
53
54         do {
55                 old = v;
56
57                 if (old >= journal_seq)
58                         break;
59         } while ((v = atomic64_cmpxchg(dst_seq, old, journal_seq)) != old);
60
61         bch2_journal_set_has_inum(&c->journal, dst->v.i_ino, journal_seq);
62 }
63
64 static void __pagecache_lock_put(struct pagecache_lock *lock, long i)
65 {
66         BUG_ON(atomic_long_read(&lock->v) == 0);
67
68         if (atomic_long_sub_return_release(i, &lock->v) == 0)
69                 wake_up_all(&lock->wait);
70 }
71
72 static bool __pagecache_lock_tryget(struct pagecache_lock *lock, long i)
73 {
74         long v = atomic_long_read(&lock->v), old;
75
76         do {
77                 old = v;
78
79                 if (i > 0 ? v < 0 : v > 0)
80                         return false;
81         } while ((v = atomic_long_cmpxchg_acquire(&lock->v,
82                                         old, old + i)) != old);
83         return true;
84 }
85
86 static void __pagecache_lock_get(struct pagecache_lock *lock, long i)
87 {
88         wait_event(lock->wait, __pagecache_lock_tryget(lock, i));
89 }
90
91 void bch2_pagecache_add_put(struct pagecache_lock *lock)
92 {
93         __pagecache_lock_put(lock, 1);
94 }
95
96 bool bch2_pagecache_add_tryget(struct pagecache_lock *lock)
97 {
98         return __pagecache_lock_tryget(lock, 1);
99 }
100
101 void bch2_pagecache_add_get(struct pagecache_lock *lock)
102 {
103         __pagecache_lock_get(lock, 1);
104 }
105
106 void bch2_pagecache_block_put(struct pagecache_lock *lock)
107 {
108         __pagecache_lock_put(lock, -1);
109 }
110
111 void bch2_pagecache_block_get(struct pagecache_lock *lock)
112 {
113         __pagecache_lock_get(lock, -1);
114 }
115
116 void bch2_inode_update_after_write(struct bch_fs *c,
117                                    struct bch_inode_info *inode,
118                                    struct bch_inode_unpacked *bi,
119                                    unsigned fields)
120 {
121         set_nlink(&inode->v, bch2_inode_nlink_get(bi));
122         i_uid_write(&inode->v, bi->bi_uid);
123         i_gid_write(&inode->v, bi->bi_gid);
124         inode->v.i_mode = bi->bi_mode;
125
126         if (fields & ATTR_ATIME)
127                 inode->v.i_atime = bch2_time_to_timespec(c, bi->bi_atime);
128         if (fields & ATTR_MTIME)
129                 inode->v.i_mtime = bch2_time_to_timespec(c, bi->bi_mtime);
130         if (fields & ATTR_CTIME)
131                 inode->v.i_ctime = bch2_time_to_timespec(c, bi->bi_ctime);
132
133         inode->ei_inode         = *bi;
134
135         bch2_inode_flags_to_vfs(inode);
136 }
137
138 int __must_check bch2_write_inode(struct bch_fs *c,
139                                   struct bch_inode_info *inode,
140                                   inode_set_fn set,
141                                   void *p, unsigned fields)
142 {
143         struct btree_trans trans;
144         struct btree_iter *iter;
145         struct bch_inode_unpacked inode_u;
146         int ret;
147
148         bch2_trans_init(&trans, c, 0, 512);
149 retry:
150         bch2_trans_begin(&trans);
151
152         iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino,
153                                BTREE_ITER_INTENT);
154         ret   = PTR_ERR_OR_ZERO(iter) ?:
155                 (set ? set(inode, &inode_u, p) : 0) ?:
156                 bch2_inode_write(&trans, iter, &inode_u) ?:
157                 bch2_trans_commit(&trans, NULL,
158                                   &inode->ei_journal_seq,
159                                   BTREE_INSERT_NOUNLOCK|
160                                   BTREE_INSERT_NOFAIL);
161
162         /*
163          * the btree node lock protects inode->ei_inode, not ei_update_lock;
164          * this is important for inode updates via bchfs_write_index_update
165          */
166         if (!ret)
167                 bch2_inode_update_after_write(c, inode, &inode_u, fields);
168
169         bch2_trans_iter_put(&trans, iter);
170
171         if (ret == -EINTR)
172                 goto retry;
173
174         bch2_trans_exit(&trans);
175         return ret < 0 ? ret : 0;
176 }
177
178 int bch2_fs_quota_transfer(struct bch_fs *c,
179                            struct bch_inode_info *inode,
180                            struct bch_qid new_qid,
181                            unsigned qtypes,
182                            enum quota_acct_mode mode)
183 {
184         unsigned i;
185         int ret;
186
187         qtypes &= enabled_qtypes(c);
188
189         for (i = 0; i < QTYP_NR; i++)
190                 if (new_qid.q[i] == inode->ei_qid.q[i])
191                         qtypes &= ~(1U << i);
192
193         if (!qtypes)
194                 return 0;
195
196         mutex_lock(&inode->ei_quota_lock);
197
198         ret = bch2_quota_transfer(c, qtypes, new_qid,
199                                   inode->ei_qid,
200                                   inode->v.i_blocks +
201                                   inode->ei_quota_reserved,
202                                   mode);
203         if (!ret)
204                 for (i = 0; i < QTYP_NR; i++)
205                         if (qtypes & (1 << i))
206                                 inode->ei_qid.q[i] = new_qid.q[i];
207
208         mutex_unlock(&inode->ei_quota_lock);
209
210         return ret;
211 }
212
213 struct inode *bch2_vfs_inode_get(struct bch_fs *c, u64 inum)
214 {
215         struct bch_inode_unpacked inode_u;
216         struct bch_inode_info *inode;
217         int ret;
218
219         inode = to_bch_ei(iget_locked(c->vfs_sb, inum));
220         if (unlikely(!inode))
221                 return ERR_PTR(-ENOMEM);
222         if (!(inode->v.i_state & I_NEW))
223                 return &inode->v;
224
225         ret = bch2_inode_find_by_inum(c, inum, &inode_u);
226         if (ret) {
227                 iget_failed(&inode->v);
228                 return ERR_PTR(ret);
229         }
230
231         bch2_vfs_inode_init(c, inode, &inode_u);
232
233         inode->ei_journal_seq = bch2_inode_journal_seq(&c->journal, inum);
234
235         unlock_new_inode(&inode->v);
236
237         return &inode->v;
238 }
239
240 static int inum_test(struct inode *inode, void *p)
241 {
242         unsigned long *ino = p;
243
244         return *ino == inode->i_ino;
245 }
246
247 static struct bch_inode_info *
248 __bch2_create(struct user_namespace *mnt_userns,
249               struct bch_inode_info *dir, struct dentry *dentry,
250               umode_t mode, dev_t rdev, bool tmpfile)
251 {
252         struct bch_fs *c = dir->v.i_sb->s_fs_info;
253         struct btree_trans trans;
254         struct bch_inode_unpacked dir_u;
255         struct bch_inode_info *inode, *old;
256         struct bch_inode_unpacked inode_u;
257         struct posix_acl *default_acl = NULL, *acl = NULL;
258         u64 journal_seq = 0;
259         int ret;
260
261         /*
262          * preallocate acls + vfs inode before btree transaction, so that
263          * nothing can fail after the transaction succeeds:
264          */
265 #ifdef CONFIG_BCACHEFS_POSIX_ACL
266         ret = posix_acl_create(&dir->v, &mode, &default_acl, &acl);
267         if (ret)
268                 return ERR_PTR(ret);
269 #endif
270         inode = to_bch_ei(new_inode(c->vfs_sb));
271         if (unlikely(!inode)) {
272                 inode = ERR_PTR(-ENOMEM);
273                 goto err;
274         }
275
276         bch2_inode_init_early(c, &inode_u);
277
278         if (!tmpfile)
279                 mutex_lock(&dir->ei_update_lock);
280
281         bch2_trans_init(&trans, c, 8,
282                         2048 + (!tmpfile ? dentry->d_name.len : 0));
283 retry:
284         bch2_trans_begin(&trans);
285
286         ret   = bch2_create_trans(&trans, dir->v.i_ino, &dir_u, &inode_u,
287                                   !tmpfile ? &dentry->d_name : NULL,
288                                   from_kuid(mnt_userns, current_fsuid()),
289                                   from_kgid(mnt_userns, current_fsgid()),
290                                   mode, rdev,
291                                   default_acl, acl) ?:
292                 bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1,
293                                 KEY_TYPE_QUOTA_PREALLOC);
294         if (unlikely(ret))
295                 goto err_before_quota;
296
297         ret   = bch2_trans_commit(&trans, NULL, &journal_seq,
298                                   BTREE_INSERT_NOUNLOCK);
299         if (unlikely(ret)) {
300                 bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1,
301                                 KEY_TYPE_QUOTA_WARN);
302 err_before_quota:
303                 if (ret == -EINTR)
304                         goto retry;
305                 goto err_trans;
306         }
307
308         if (!tmpfile) {
309                 bch2_inode_update_after_write(c, dir, &dir_u,
310                                               ATTR_MTIME|ATTR_CTIME);
311                 journal_seq_copy(c, dir, journal_seq);
312                 mutex_unlock(&dir->ei_update_lock);
313         }
314
315         bch2_vfs_inode_init(c, inode, &inode_u);
316         journal_seq_copy(c, inode, journal_seq);
317
318         set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl);
319         set_cached_acl(&inode->v, ACL_TYPE_DEFAULT, default_acl);
320
321         /*
322          * we must insert the new inode into the inode cache before calling
323          * bch2_trans_exit() and dropping locks, else we could race with another
324          * thread pulling the inode in and modifying it:
325          */
326
327         inode->v.i_state |= I_CREATING;
328         old = to_bch_ei(inode_insert5(&inode->v, inode->v.i_ino,
329                                       inum_test, NULL, &inode->v.i_ino));
330         BUG_ON(!old);
331
332         if (unlikely(old != inode)) {
333                 /*
334                  * We raced, another process pulled the new inode into cache
335                  * before us:
336                  */
337                 journal_seq_copy(c, old, journal_seq);
338                 make_bad_inode(&inode->v);
339                 iput(&inode->v);
340
341                 inode = old;
342         } else {
343                 /*
344                  * we really don't want insert_inode_locked2() to be setting
345                  * I_NEW...
346                  */
347                 unlock_new_inode(&inode->v);
348         }
349
350         bch2_trans_exit(&trans);
351 err:
352         posix_acl_release(default_acl);
353         posix_acl_release(acl);
354         return inode;
355 err_trans:
356         if (!tmpfile)
357                 mutex_unlock(&dir->ei_update_lock);
358
359         bch2_trans_exit(&trans);
360         make_bad_inode(&inode->v);
361         iput(&inode->v);
362         inode = ERR_PTR(ret);
363         goto err;
364 }
365
366 /* methods */
367
368 static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry,
369                                   unsigned int flags)
370 {
371         struct bch_fs *c = vdir->i_sb->s_fs_info;
372         struct bch_inode_info *dir = to_bch_ei(vdir);
373         struct bch_hash_info hash = bch2_hash_info_init(c, &dir->ei_inode);
374         struct inode *vinode = NULL;
375         u64 inum;
376
377         inum = bch2_dirent_lookup(c, dir->v.i_ino, &hash,
378                                   &dentry->d_name);
379
380         if (inum)
381                 vinode = bch2_vfs_inode_get(c, inum);
382
383         return d_splice_alias(vinode, dentry);
384 }
385
386 static int bch2_mknod(struct user_namespace *mnt_userns,
387                       struct inode *vdir, struct dentry *dentry,
388                       umode_t mode, dev_t rdev)
389 {
390         struct bch_inode_info *inode =
391                 __bch2_create(mnt_userns, to_bch_ei(vdir), dentry, mode, rdev, false);
392
393         if (IS_ERR(inode))
394                 return PTR_ERR(inode);
395
396         d_instantiate(dentry, &inode->v);
397         return 0;
398 }
399
400 static int bch2_create(struct user_namespace *mnt_userns,
401                        struct inode *vdir, struct dentry *dentry,
402                        umode_t mode, bool excl)
403 {
404         return bch2_mknod(mnt_userns, vdir, dentry, mode|S_IFREG, 0);
405 }
406
407 static int __bch2_link(struct bch_fs *c,
408                        struct bch_inode_info *inode,
409                        struct bch_inode_info *dir,
410                        struct dentry *dentry)
411 {
412         struct btree_trans trans;
413         struct bch_inode_unpacked dir_u, inode_u;
414         int ret;
415
416         mutex_lock(&inode->ei_update_lock);
417         bch2_trans_init(&trans, c, 4, 1024);
418
419         ret = __bch2_trans_do(&trans, NULL, &inode->ei_journal_seq,
420                               BTREE_INSERT_NOUNLOCK,
421                         bch2_link_trans(&trans,
422                                         dir->v.i_ino,
423                                         inode->v.i_ino, &dir_u, &inode_u,
424                                         &dentry->d_name));
425
426         if (likely(!ret)) {
427                 BUG_ON(inode_u.bi_inum != inode->v.i_ino);
428
429                 journal_seq_copy(c, inode, dir->ei_journal_seq);
430                 bch2_inode_update_after_write(c, dir, &dir_u,
431                                               ATTR_MTIME|ATTR_CTIME);
432                 bch2_inode_update_after_write(c, inode, &inode_u, ATTR_CTIME);
433         }
434
435         bch2_trans_exit(&trans);
436         mutex_unlock(&inode->ei_update_lock);
437         return ret;
438 }
439
440 static int bch2_link(struct dentry *old_dentry, struct inode *vdir,
441                      struct dentry *dentry)
442 {
443         struct bch_fs *c = vdir->i_sb->s_fs_info;
444         struct bch_inode_info *dir = to_bch_ei(vdir);
445         struct bch_inode_info *inode = to_bch_ei(old_dentry->d_inode);
446         int ret;
447
448         lockdep_assert_held(&inode->v.i_rwsem);
449
450         ret = __bch2_link(c, inode, dir, dentry);
451         if (unlikely(ret))
452                 return ret;
453
454         ihold(&inode->v);
455         d_instantiate(dentry, &inode->v);
456         return 0;
457 }
458
459 static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
460 {
461         struct bch_fs *c = vdir->i_sb->s_fs_info;
462         struct bch_inode_info *dir = to_bch_ei(vdir);
463         struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
464         struct bch_inode_unpacked dir_u, inode_u;
465         struct btree_trans trans;
466         int ret;
467
468         bch2_lock_inodes(INODE_UPDATE_LOCK, dir, inode);
469         bch2_trans_init(&trans, c, 4, 1024);
470
471         ret = __bch2_trans_do(&trans, NULL, &dir->ei_journal_seq,
472                               BTREE_INSERT_NOUNLOCK|
473                               BTREE_INSERT_NOFAIL,
474                         bch2_unlink_trans(&trans,
475                                           dir->v.i_ino, &dir_u,
476                                           &inode_u, &dentry->d_name));
477
478         if (likely(!ret)) {
479                 BUG_ON(inode_u.bi_inum != inode->v.i_ino);
480
481                 journal_seq_copy(c, inode, dir->ei_journal_seq);
482                 bch2_inode_update_after_write(c, dir, &dir_u,
483                                               ATTR_MTIME|ATTR_CTIME);
484                 bch2_inode_update_after_write(c, inode, &inode_u,
485                                               ATTR_MTIME);
486         }
487
488         bch2_trans_exit(&trans);
489         bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode);
490
491         return ret;
492 }
493
494 static int bch2_symlink(struct user_namespace *mnt_userns,
495                         struct inode *vdir, struct dentry *dentry,
496                         const char *symname)
497 {
498         struct bch_fs *c = vdir->i_sb->s_fs_info;
499         struct bch_inode_info *dir = to_bch_ei(vdir), *inode;
500         int ret;
501
502         inode = __bch2_create(mnt_userns, dir, dentry, S_IFLNK|S_IRWXUGO, 0, true);
503         if (unlikely(IS_ERR(inode)))
504                 return PTR_ERR(inode);
505
506         inode_lock(&inode->v);
507         ret = page_symlink(&inode->v, symname, strlen(symname) + 1);
508         inode_unlock(&inode->v);
509
510         if (unlikely(ret))
511                 goto err;
512
513         ret = filemap_write_and_wait_range(inode->v.i_mapping, 0, LLONG_MAX);
514         if (unlikely(ret))
515                 goto err;
516
517         journal_seq_copy(c, dir, inode->ei_journal_seq);
518
519         ret = __bch2_link(c, inode, dir, dentry);
520         if (unlikely(ret))
521                 goto err;
522
523         d_instantiate(dentry, &inode->v);
524         return 0;
525 err:
526         iput(&inode->v);
527         return ret;
528 }
529
530 static int bch2_mkdir(struct user_namespace *mnt_userns,
531                       struct inode *vdir, struct dentry *dentry, umode_t mode)
532 {
533         return bch2_mknod(mnt_userns, vdir, dentry, mode|S_IFDIR, 0);
534 }
535
536 static int bch2_rename2(struct user_namespace *mnt_userns,
537                         struct inode *src_vdir, struct dentry *src_dentry,
538                         struct inode *dst_vdir, struct dentry *dst_dentry,
539                         unsigned flags)
540 {
541         struct bch_fs *c = src_vdir->i_sb->s_fs_info;
542         struct bch_inode_info *src_dir = to_bch_ei(src_vdir);
543         struct bch_inode_info *dst_dir = to_bch_ei(dst_vdir);
544         struct bch_inode_info *src_inode = to_bch_ei(src_dentry->d_inode);
545         struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode);
546         struct bch_inode_unpacked dst_dir_u, src_dir_u;
547         struct bch_inode_unpacked src_inode_u, dst_inode_u;
548         struct btree_trans trans;
549         enum bch_rename_mode mode = flags & RENAME_EXCHANGE
550                 ? BCH_RENAME_EXCHANGE
551                 : dst_dentry->d_inode
552                 ? BCH_RENAME_OVERWRITE : BCH_RENAME;
553         u64 journal_seq = 0;
554         int ret;
555
556         if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE))
557                 return -EINVAL;
558
559         if (mode == BCH_RENAME_OVERWRITE) {
560                 ret = filemap_write_and_wait_range(src_inode->v.i_mapping,
561                                                    0, LLONG_MAX);
562                 if (ret)
563                         return ret;
564         }
565
566         bch2_trans_init(&trans, c, 8, 2048);
567
568         bch2_lock_inodes(INODE_UPDATE_LOCK,
569                          src_dir,
570                          dst_dir,
571                          src_inode,
572                          dst_inode);
573
574         if (inode_attr_changing(dst_dir, src_inode, Inode_opt_project)) {
575                 ret = bch2_fs_quota_transfer(c, src_inode,
576                                              dst_dir->ei_qid,
577                                              1 << QTYP_PRJ,
578                                              KEY_TYPE_QUOTA_PREALLOC);
579                 if (ret)
580                         goto err;
581         }
582
583         if (mode == BCH_RENAME_EXCHANGE &&
584             inode_attr_changing(src_dir, dst_inode, Inode_opt_project)) {
585                 ret = bch2_fs_quota_transfer(c, dst_inode,
586                                              src_dir->ei_qid,
587                                              1 << QTYP_PRJ,
588                                              KEY_TYPE_QUOTA_PREALLOC);
589                 if (ret)
590                         goto err;
591         }
592
593         ret = __bch2_trans_do(&trans, NULL, &journal_seq,
594                               BTREE_INSERT_NOUNLOCK,
595                         bch2_rename_trans(&trans,
596                                           src_dir->v.i_ino, &src_dir_u,
597                                           dst_dir->v.i_ino, &dst_dir_u,
598                                           &src_inode_u,
599                                           &dst_inode_u,
600                                           &src_dentry->d_name,
601                                           &dst_dentry->d_name,
602                                           mode));
603         if (unlikely(ret))
604                 goto err;
605
606         BUG_ON(src_inode->v.i_ino != src_inode_u.bi_inum);
607         BUG_ON(dst_inode &&
608                dst_inode->v.i_ino != dst_inode_u.bi_inum);
609
610         bch2_inode_update_after_write(c, src_dir, &src_dir_u,
611                                       ATTR_MTIME|ATTR_CTIME);
612         journal_seq_copy(c, src_dir, journal_seq);
613
614         if (src_dir != dst_dir) {
615                 bch2_inode_update_after_write(c, dst_dir, &dst_dir_u,
616                                               ATTR_MTIME|ATTR_CTIME);
617                 journal_seq_copy(c, dst_dir, journal_seq);
618         }
619
620         bch2_inode_update_after_write(c, src_inode, &src_inode_u,
621                                       ATTR_CTIME);
622         journal_seq_copy(c, src_inode, journal_seq);
623
624         if (dst_inode) {
625                 bch2_inode_update_after_write(c, dst_inode, &dst_inode_u,
626                                               ATTR_CTIME);
627                 journal_seq_copy(c, dst_inode, journal_seq);
628         }
629 err:
630         bch2_trans_exit(&trans);
631
632         bch2_fs_quota_transfer(c, src_inode,
633                                bch_qid(&src_inode->ei_inode),
634                                1 << QTYP_PRJ,
635                                KEY_TYPE_QUOTA_NOCHECK);
636         if (dst_inode)
637                 bch2_fs_quota_transfer(c, dst_inode,
638                                        bch_qid(&dst_inode->ei_inode),
639                                        1 << QTYP_PRJ,
640                                        KEY_TYPE_QUOTA_NOCHECK);
641
642         bch2_unlock_inodes(INODE_UPDATE_LOCK,
643                            src_dir,
644                            dst_dir,
645                            src_inode,
646                            dst_inode);
647
648         return ret;
649 }
650
651 static void bch2_setattr_copy(struct user_namespace *mnt_userns,
652                               struct bch_inode_info *inode,
653                               struct bch_inode_unpacked *bi,
654                               struct iattr *attr)
655 {
656         struct bch_fs *c = inode->v.i_sb->s_fs_info;
657         unsigned int ia_valid = attr->ia_valid;
658
659         if (ia_valid & ATTR_UID)
660                 bi->bi_uid = from_kuid(mnt_userns, attr->ia_uid);
661         if (ia_valid & ATTR_GID)
662                 bi->bi_gid = from_kgid(mnt_userns, attr->ia_gid);
663
664         if (ia_valid & ATTR_SIZE)
665                 bi->bi_size = attr->ia_size;
666
667         if (ia_valid & ATTR_ATIME)
668                 bi->bi_atime = timespec_to_bch2_time(c, attr->ia_atime);
669         if (ia_valid & ATTR_MTIME)
670                 bi->bi_mtime = timespec_to_bch2_time(c, attr->ia_mtime);
671         if (ia_valid & ATTR_CTIME)
672                 bi->bi_ctime = timespec_to_bch2_time(c, attr->ia_ctime);
673
674         if (ia_valid & ATTR_MODE) {
675                 umode_t mode = attr->ia_mode;
676                 kgid_t gid = ia_valid & ATTR_GID
677                         ? attr->ia_gid
678                         : inode->v.i_gid;
679
680                 if (!in_group_p(gid) &&
681                     !capable_wrt_inode_uidgid(mnt_userns, &inode->v, CAP_FSETID))
682                         mode &= ~S_ISGID;
683                 bi->bi_mode = mode;
684         }
685 }
686
687 int bch2_setattr_nonsize(struct user_namespace *mnt_userns,
688                          struct bch_inode_info *inode,
689                          struct iattr *attr)
690 {
691         struct bch_fs *c = inode->v.i_sb->s_fs_info;
692         struct bch_qid qid;
693         struct btree_trans trans;
694         struct btree_iter *inode_iter;
695         struct bch_inode_unpacked inode_u;
696         struct posix_acl *acl = NULL;
697         int ret;
698
699         mutex_lock(&inode->ei_update_lock);
700
701         qid = inode->ei_qid;
702
703         if (attr->ia_valid & ATTR_UID)
704                 qid.q[QTYP_USR] = from_kuid(&init_user_ns, attr->ia_uid);
705
706         if (attr->ia_valid & ATTR_GID)
707                 qid.q[QTYP_GRP] = from_kgid(&init_user_ns, attr->ia_gid);
708
709         ret = bch2_fs_quota_transfer(c, inode, qid, ~0,
710                                      KEY_TYPE_QUOTA_PREALLOC);
711         if (ret)
712                 goto err;
713
714         bch2_trans_init(&trans, c, 0, 0);
715 retry:
716         bch2_trans_begin(&trans);
717         kfree(acl);
718         acl = NULL;
719
720         inode_iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino,
721                                      BTREE_ITER_INTENT);
722         ret = PTR_ERR_OR_ZERO(inode_iter);
723         if (ret)
724                 goto btree_err;
725
726         bch2_setattr_copy(mnt_userns, inode, &inode_u, attr);
727
728         if (attr->ia_valid & ATTR_MODE) {
729                 ret = bch2_acl_chmod(&trans, &inode_u, inode_u.bi_mode, &acl);
730                 if (ret)
731                         goto btree_err;
732         }
733
734         ret =   bch2_inode_write(&trans, inode_iter, &inode_u) ?:
735                 bch2_trans_commit(&trans, NULL,
736                                   &inode->ei_journal_seq,
737                                   BTREE_INSERT_NOUNLOCK|
738                                   BTREE_INSERT_NOFAIL);
739 btree_err:
740         bch2_trans_iter_put(&trans, inode_iter);
741
742         if (ret == -EINTR)
743                 goto retry;
744         if (unlikely(ret))
745                 goto err_trans;
746
747         bch2_inode_update_after_write(c, inode, &inode_u, attr->ia_valid);
748
749         if (acl)
750                 set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl);
751 err_trans:
752         bch2_trans_exit(&trans);
753 err:
754         mutex_unlock(&inode->ei_update_lock);
755
756         return ret;
757 }
758
759 static int bch2_getattr(struct user_namespace *mnt_userns,
760                         const struct path *path, struct kstat *stat,
761                         u32 request_mask, unsigned query_flags)
762 {
763         struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry));
764         struct bch_fs *c = inode->v.i_sb->s_fs_info;
765
766         stat->dev       = inode->v.i_sb->s_dev;
767         stat->ino       = inode->v.i_ino;
768         stat->mode      = inode->v.i_mode;
769         stat->nlink     = inode->v.i_nlink;
770         stat->uid       = inode->v.i_uid;
771         stat->gid       = inode->v.i_gid;
772         stat->rdev      = inode->v.i_rdev;
773         stat->size      = i_size_read(&inode->v);
774         stat->atime     = inode->v.i_atime;
775         stat->mtime     = inode->v.i_mtime;
776         stat->ctime     = inode->v.i_ctime;
777         stat->blksize   = block_bytes(c);
778         stat->blocks    = inode->v.i_blocks;
779
780         if (request_mask & STATX_BTIME) {
781                 stat->result_mask |= STATX_BTIME;
782                 stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime);
783         }
784
785         if (inode->ei_inode.bi_flags & BCH_INODE_IMMUTABLE)
786                 stat->attributes |= STATX_ATTR_IMMUTABLE;
787         stat->attributes_mask    |= STATX_ATTR_IMMUTABLE;
788
789         if (inode->ei_inode.bi_flags & BCH_INODE_APPEND)
790                 stat->attributes |= STATX_ATTR_APPEND;
791         stat->attributes_mask    |= STATX_ATTR_APPEND;
792
793         if (inode->ei_inode.bi_flags & BCH_INODE_NODUMP)
794                 stat->attributes |= STATX_ATTR_NODUMP;
795         stat->attributes_mask    |= STATX_ATTR_NODUMP;
796
797         return 0;
798 }
799
800 static int bch2_setattr(struct user_namespace *mnt_userns,
801                         struct dentry *dentry, struct iattr *iattr)
802 {
803         struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
804         int ret;
805
806         lockdep_assert_held(&inode->v.i_rwsem);
807
808         ret = setattr_prepare(mnt_userns, dentry, iattr);
809         if (ret)
810                 return ret;
811
812         return iattr->ia_valid & ATTR_SIZE
813                 ? bch2_truncate(mnt_userns, inode, iattr)
814                 : bch2_setattr_nonsize(mnt_userns, inode, iattr);
815 }
816
817 static int bch2_tmpfile(struct user_namespace *mnt_userns,
818                         struct inode *vdir, struct dentry *dentry, umode_t mode)
819 {
820         struct bch_inode_info *inode =
821                 __bch2_create(mnt_userns, to_bch_ei(vdir), dentry, mode, 0, true);
822
823         if (IS_ERR(inode))
824                 return PTR_ERR(inode);
825
826         d_mark_tmpfile(dentry, &inode->v);
827         d_instantiate(dentry, &inode->v);
828         return 0;
829 }
830
831 static int bch2_fill_extent(struct bch_fs *c,
832                             struct fiemap_extent_info *info,
833                             struct bkey_s_c k, unsigned flags)
834 {
835         if (bkey_extent_is_direct_data(k.k)) {
836                 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
837                 const union bch_extent_entry *entry;
838                 struct extent_ptr_decoded p;
839                 int ret;
840
841                 if (k.k->type == KEY_TYPE_reflink_v)
842                         flags |= FIEMAP_EXTENT_SHARED;
843
844                 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
845                         int flags2 = 0;
846                         u64 offset = p.ptr.offset;
847
848                         if (p.crc.compression_type)
849                                 flags2 |= FIEMAP_EXTENT_ENCODED;
850                         else
851                                 offset += p.crc.offset;
852
853                         if ((offset & (c->opts.block_size - 1)) ||
854                             (k.k->size & (c->opts.block_size - 1)))
855                                 flags2 |= FIEMAP_EXTENT_NOT_ALIGNED;
856
857                         ret = fiemap_fill_next_extent(info,
858                                                 bkey_start_offset(k.k) << 9,
859                                                 offset << 9,
860                                                 k.k->size << 9, flags|flags2);
861                         if (ret)
862                                 return ret;
863                 }
864
865                 return 0;
866         } else if (bkey_extent_is_inline_data(k.k)) {
867                 return fiemap_fill_next_extent(info,
868                                                bkey_start_offset(k.k) << 9,
869                                                0, k.k->size << 9,
870                                                flags|
871                                                FIEMAP_EXTENT_DATA_INLINE);
872         } else if (k.k->type == KEY_TYPE_reservation) {
873                 return fiemap_fill_next_extent(info,
874                                                bkey_start_offset(k.k) << 9,
875                                                0, k.k->size << 9,
876                                                flags|
877                                                FIEMAP_EXTENT_DELALLOC|
878                                                FIEMAP_EXTENT_UNWRITTEN);
879         } else {
880                 BUG();
881         }
882 }
883
884 static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
885                        u64 start, u64 len)
886 {
887         struct bch_fs *c = vinode->i_sb->s_fs_info;
888         struct bch_inode_info *ei = to_bch_ei(vinode);
889         struct btree_trans trans;
890         struct btree_iter *iter;
891         struct bkey_s_c k;
892         struct bkey_buf cur, prev;
893         struct bpos end = POS(ei->v.i_ino, (start + len) >> 9);
894         unsigned offset_into_extent, sectors;
895         bool have_extent = false;
896         int ret = 0;
897
898         ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC);
899         if (ret)
900                 return ret;
901
902         if (start + len < start)
903                 return -EINVAL;
904
905         bch2_bkey_buf_init(&cur);
906         bch2_bkey_buf_init(&prev);
907         bch2_trans_init(&trans, c, 0, 0);
908
909         iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
910                                    POS(ei->v.i_ino, start >> 9), 0);
911 retry:
912         while ((k = bch2_btree_iter_peek(iter)).k &&
913                !(ret = bkey_err(k)) &&
914                bkey_cmp(iter->pos, end) < 0) {
915                 enum btree_id data_btree = BTREE_ID_extents;
916
917                 if (!bkey_extent_is_data(k.k) &&
918                     k.k->type != KEY_TYPE_reservation) {
919                         bch2_btree_iter_advance(iter);
920                         continue;
921                 }
922
923                 offset_into_extent      = iter->pos.offset -
924                         bkey_start_offset(k.k);
925                 sectors                 = k.k->size - offset_into_extent;
926
927                 bch2_bkey_buf_reassemble(&cur, c, k);
928
929                 ret = bch2_read_indirect_extent(&trans, &data_btree,
930                                         &offset_into_extent, &cur);
931                 if (ret)
932                         break;
933
934                 k = bkey_i_to_s_c(cur.k);
935                 bch2_bkey_buf_realloc(&prev, c, k.k->u64s);
936
937                 sectors = min(sectors, k.k->size - offset_into_extent);
938
939                 bch2_cut_front(POS(k.k->p.inode,
940                                    bkey_start_offset(k.k) +
941                                    offset_into_extent),
942                                cur.k);
943                 bch2_key_resize(&cur.k->k, sectors);
944                 cur.k->k.p = iter->pos;
945                 cur.k->k.p.offset += cur.k->k.size;
946
947                 if (have_extent) {
948                         ret = bch2_fill_extent(c, info,
949                                         bkey_i_to_s_c(prev.k), 0);
950                         if (ret)
951                                 break;
952                 }
953
954                 bkey_copy(prev.k, cur.k);
955                 have_extent = true;
956
957                 bch2_btree_iter_set_pos(iter,
958                         POS(iter->pos.inode, iter->pos.offset + sectors));
959         }
960
961         if (ret == -EINTR)
962                 goto retry;
963
964         if (!ret && have_extent)
965                 ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k),
966                                        FIEMAP_EXTENT_LAST);
967
968         bch2_trans_iter_put(&trans, iter);
969         ret = bch2_trans_exit(&trans) ?: ret;
970         bch2_bkey_buf_exit(&cur, c);
971         bch2_bkey_buf_exit(&prev, c);
972         return ret < 0 ? ret : 0;
973 }
974
975 static const struct vm_operations_struct bch_vm_ops = {
976         .fault          = bch2_page_fault,
977         .map_pages      = filemap_map_pages,
978         .page_mkwrite   = bch2_page_mkwrite,
979 };
980
981 static int bch2_mmap(struct file *file, struct vm_area_struct *vma)
982 {
983         file_accessed(file);
984
985         vma->vm_ops = &bch_vm_ops;
986         return 0;
987 }
988
989 /* Directories: */
990
991 static loff_t bch2_dir_llseek(struct file *file, loff_t offset, int whence)
992 {
993         return generic_file_llseek_size(file, offset, whence,
994                                         S64_MAX, S64_MAX);
995 }
996
997 static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
998 {
999         struct bch_inode_info *inode = file_bch_inode(file);
1000         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1001
1002         if (!dir_emit_dots(file, ctx))
1003                 return 0;
1004
1005         return bch2_readdir(c, inode->v.i_ino, ctx);
1006 }
1007
1008 static const struct file_operations bch_file_operations = {
1009         .llseek         = bch2_llseek,
1010         .read_iter      = bch2_read_iter,
1011         .write_iter     = bch2_write_iter,
1012         .mmap           = bch2_mmap,
1013         .open           = generic_file_open,
1014         .fsync          = bch2_fsync,
1015         .splice_read    = generic_file_splice_read,
1016         .splice_write   = iter_file_splice_write,
1017         .fallocate      = bch2_fallocate_dispatch,
1018         .unlocked_ioctl = bch2_fs_file_ioctl,
1019 #ifdef CONFIG_COMPAT
1020         .compat_ioctl   = bch2_compat_fs_ioctl,
1021 #endif
1022         .remap_file_range = bch2_remap_file_range,
1023 };
1024
1025 static const struct inode_operations bch_file_inode_operations = {
1026         .getattr        = bch2_getattr,
1027         .setattr        = bch2_setattr,
1028         .fiemap         = bch2_fiemap,
1029         .listxattr      = bch2_xattr_list,
1030 #ifdef CONFIG_BCACHEFS_POSIX_ACL
1031         .get_acl        = bch2_get_acl,
1032         .set_acl        = bch2_set_acl,
1033 #endif
1034 };
1035
1036 static const struct inode_operations bch_dir_inode_operations = {
1037         .lookup         = bch2_lookup,
1038         .create         = bch2_create,
1039         .link           = bch2_link,
1040         .unlink         = bch2_unlink,
1041         .symlink        = bch2_symlink,
1042         .mkdir          = bch2_mkdir,
1043         .rmdir          = bch2_unlink,
1044         .mknod          = bch2_mknod,
1045         .rename         = bch2_rename2,
1046         .getattr        = bch2_getattr,
1047         .setattr        = bch2_setattr,
1048         .tmpfile        = bch2_tmpfile,
1049         .listxattr      = bch2_xattr_list,
1050 #ifdef CONFIG_BCACHEFS_POSIX_ACL
1051         .get_acl        = bch2_get_acl,
1052         .set_acl        = bch2_set_acl,
1053 #endif
1054 };
1055
1056 static const struct file_operations bch_dir_file_operations = {
1057         .llseek         = bch2_dir_llseek,
1058         .read           = generic_read_dir,
1059         .iterate_shared = bch2_vfs_readdir,
1060         .fsync          = bch2_fsync,
1061         .unlocked_ioctl = bch2_fs_file_ioctl,
1062 #ifdef CONFIG_COMPAT
1063         .compat_ioctl   = bch2_compat_fs_ioctl,
1064 #endif
1065 };
1066
1067 static const struct inode_operations bch_symlink_inode_operations = {
1068         .get_link       = page_get_link,
1069         .getattr        = bch2_getattr,
1070         .setattr        = bch2_setattr,
1071         .listxattr      = bch2_xattr_list,
1072 #ifdef CONFIG_BCACHEFS_POSIX_ACL
1073         .get_acl        = bch2_get_acl,
1074         .set_acl        = bch2_set_acl,
1075 #endif
1076 };
1077
1078 static const struct inode_operations bch_special_inode_operations = {
1079         .getattr        = bch2_getattr,
1080         .setattr        = bch2_setattr,
1081         .listxattr      = bch2_xattr_list,
1082 #ifdef CONFIG_BCACHEFS_POSIX_ACL
1083         .get_acl        = bch2_get_acl,
1084         .set_acl        = bch2_set_acl,
1085 #endif
1086 };
1087
1088 static const struct address_space_operations bch_address_space_operations = {
1089         .writepage      = bch2_writepage,
1090         .readpage       = bch2_readpage,
1091         .writepages     = bch2_writepages,
1092         .readahead      = bch2_readahead,
1093         .set_page_dirty = __set_page_dirty_nobuffers,
1094         .write_begin    = bch2_write_begin,
1095         .write_end      = bch2_write_end,
1096         .invalidatepage = bch2_invalidatepage,
1097         .releasepage    = bch2_releasepage,
1098         .direct_IO      = noop_direct_IO,
1099 #ifdef CONFIG_MIGRATION
1100         .migratepage    = bch2_migrate_page,
1101 #endif
1102         .error_remove_page = generic_error_remove_page,
1103 };
1104
1105 static struct inode *bch2_nfs_get_inode(struct super_block *sb,
1106                 u64 ino, u32 generation)
1107 {
1108         struct bch_fs *c = sb->s_fs_info;
1109         struct inode *vinode;
1110
1111         if (ino < BCACHEFS_ROOT_INO)
1112                 return ERR_PTR(-ESTALE);
1113
1114         vinode = bch2_vfs_inode_get(c, ino);
1115         if (IS_ERR(vinode))
1116                 return ERR_CAST(vinode);
1117         if (generation && vinode->i_generation != generation) {
1118                 /* we didn't find the right inode.. */
1119                 iput(vinode);
1120                 return ERR_PTR(-ESTALE);
1121         }
1122         return vinode;
1123 }
1124
1125 static struct dentry *bch2_fh_to_dentry(struct super_block *sb, struct fid *fid,
1126                 int fh_len, int fh_type)
1127 {
1128         return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1129                                     bch2_nfs_get_inode);
1130 }
1131
1132 static struct dentry *bch2_fh_to_parent(struct super_block *sb, struct fid *fid,
1133                 int fh_len, int fh_type)
1134 {
1135         return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1136                                     bch2_nfs_get_inode);
1137 }
1138
1139 static const struct export_operations bch_export_ops = {
1140         .fh_to_dentry   = bch2_fh_to_dentry,
1141         .fh_to_parent   = bch2_fh_to_parent,
1142         //.get_parent   = bch2_get_parent,
1143 };
1144
1145 static void bch2_vfs_inode_init(struct bch_fs *c,
1146                                 struct bch_inode_info *inode,
1147                                 struct bch_inode_unpacked *bi)
1148 {
1149         bch2_inode_update_after_write(c, inode, bi, ~0);
1150
1151         inode->v.i_blocks       = bi->bi_sectors;
1152         inode->v.i_ino          = bi->bi_inum;
1153         inode->v.i_rdev         = bi->bi_dev;
1154         inode->v.i_generation   = bi->bi_generation;
1155         inode->v.i_size         = bi->bi_size;
1156
1157         inode->ei_flags         = 0;
1158         inode->ei_journal_seq   = 0;
1159         inode->ei_quota_reserved = 0;
1160         inode->ei_qid           = bch_qid(bi);
1161
1162         inode->v.i_mapping->a_ops = &bch_address_space_operations;
1163
1164         switch (inode->v.i_mode & S_IFMT) {
1165         case S_IFREG:
1166                 inode->v.i_op   = &bch_file_inode_operations;
1167                 inode->v.i_fop  = &bch_file_operations;
1168                 break;
1169         case S_IFDIR:
1170                 inode->v.i_op   = &bch_dir_inode_operations;
1171                 inode->v.i_fop  = &bch_dir_file_operations;
1172                 break;
1173         case S_IFLNK:
1174                 inode_nohighmem(&inode->v);
1175                 inode->v.i_op   = &bch_symlink_inode_operations;
1176                 break;
1177         default:
1178                 init_special_inode(&inode->v, inode->v.i_mode, inode->v.i_rdev);
1179                 inode->v.i_op   = &bch_special_inode_operations;
1180                 break;
1181         }
1182 }
1183
1184 static struct inode *bch2_alloc_inode(struct super_block *sb)
1185 {
1186         struct bch_inode_info *inode;
1187
1188         inode = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS);
1189         if (!inode)
1190                 return NULL;
1191
1192         inode_init_once(&inode->v);
1193         mutex_init(&inode->ei_update_lock);
1194         pagecache_lock_init(&inode->ei_pagecache_lock);
1195         mutex_init(&inode->ei_quota_lock);
1196         inode->ei_journal_seq = 0;
1197
1198         return &inode->v;
1199 }
1200
1201 static void bch2_i_callback(struct rcu_head *head)
1202 {
1203         struct inode *vinode = container_of(head, struct inode, i_rcu);
1204         struct bch_inode_info *inode = to_bch_ei(vinode);
1205
1206         kmem_cache_free(bch2_inode_cache, inode);
1207 }
1208
1209 static void bch2_destroy_inode(struct inode *vinode)
1210 {
1211         call_rcu(&vinode->i_rcu, bch2_i_callback);
1212 }
1213
1214 static int inode_update_times_fn(struct bch_inode_info *inode,
1215                                  struct bch_inode_unpacked *bi,
1216                                  void *p)
1217 {
1218         struct bch_fs *c = inode->v.i_sb->s_fs_info;
1219
1220         bi->bi_atime    = timespec_to_bch2_time(c, inode->v.i_atime);
1221         bi->bi_mtime    = timespec_to_bch2_time(c, inode->v.i_mtime);
1222         bi->bi_ctime    = timespec_to_bch2_time(c, inode->v.i_ctime);
1223
1224         return 0;
1225 }
1226
1227 static int bch2_vfs_write_inode(struct inode *vinode,
1228                                 struct writeback_control *wbc)
1229 {
1230         struct bch_fs *c = vinode->i_sb->s_fs_info;
1231         struct bch_inode_info *inode = to_bch_ei(vinode);
1232         int ret;
1233
1234         mutex_lock(&inode->ei_update_lock);
1235         ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
1236                                ATTR_ATIME|ATTR_MTIME|ATTR_CTIME);
1237         mutex_unlock(&inode->ei_update_lock);
1238
1239         return ret;
1240 }
1241
1242 static void bch2_evict_inode(struct inode *vinode)
1243 {
1244         struct bch_fs *c = vinode->i_sb->s_fs_info;
1245         struct bch_inode_info *inode = to_bch_ei(vinode);
1246
1247         truncate_inode_pages_final(&inode->v.i_data);
1248
1249         clear_inode(&inode->v);
1250
1251         BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
1252
1253         if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
1254                 bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
1255                                 KEY_TYPE_QUOTA_WARN);
1256                 bch2_quota_acct(c, inode->ei_qid, Q_INO, -1,
1257                                 KEY_TYPE_QUOTA_WARN);
1258                 bch2_inode_rm(c, inode->v.i_ino, true);
1259         }
1260 }
1261
1262 static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf)
1263 {
1264         struct super_block *sb = dentry->d_sb;
1265         struct bch_fs *c = sb->s_fs_info;
1266         struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
1267         unsigned shift = sb->s_blocksize_bits - 9;
1268         /*
1269          * this assumes inodes take up 64 bytes, which is a decent average
1270          * number:
1271          */
1272         u64 avail_inodes = ((usage.capacity - usage.used) << 3);
1273         u64 fsid;
1274
1275         buf->f_type     = BCACHEFS_STATFS_MAGIC;
1276         buf->f_bsize    = sb->s_blocksize;
1277         buf->f_blocks   = usage.capacity >> shift;
1278         buf->f_bfree    = usage.free >> shift;
1279         buf->f_bavail   = avail_factor(usage.free) >> shift;
1280
1281         buf->f_files    = usage.nr_inodes + avail_inodes;
1282         buf->f_ffree    = avail_inodes;
1283
1284         fsid = le64_to_cpup((void *) c->sb.user_uuid.b) ^
1285                le64_to_cpup((void *) c->sb.user_uuid.b + sizeof(u64));
1286         buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
1287         buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
1288         buf->f_namelen  = BCH_NAME_MAX;
1289
1290         return 0;
1291 }
1292
1293 static int bch2_sync_fs(struct super_block *sb, int wait)
1294 {
1295         struct bch_fs *c = sb->s_fs_info;
1296
1297         if (c->opts.journal_flush_disabled)
1298                 return 0;
1299
1300         if (!wait) {
1301                 bch2_journal_flush_async(&c->journal, NULL);
1302                 return 0;
1303         }
1304
1305         return bch2_journal_flush(&c->journal);
1306 }
1307
1308 static struct bch_fs *bch2_path_to_fs(const char *path)
1309 {
1310         struct bch_fs *c;
1311         dev_t dev;
1312         int ret;
1313
1314         ret = lookup_bdev(path, &dev);
1315         if (ret)
1316                 return ERR_PTR(ret);
1317
1318         c = bch2_dev_to_fs(dev);
1319         if (c)
1320                 closure_put(&c->cl);
1321         return c ?: ERR_PTR(-ENOENT);
1322 }
1323
1324 static char **split_devs(const char *_dev_name, unsigned *nr)
1325 {
1326         char *dev_name = NULL, **devs = NULL, *s;
1327         size_t i, nr_devs = 0;
1328
1329         dev_name = kstrdup(_dev_name, GFP_KERNEL);
1330         if (!dev_name)
1331                 return NULL;
1332
1333         for (s = dev_name; s; s = strchr(s + 1, ':'))
1334                 nr_devs++;
1335
1336         devs = kcalloc(nr_devs + 1, sizeof(const char *), GFP_KERNEL);
1337         if (!devs) {
1338                 kfree(dev_name);
1339                 return NULL;
1340         }
1341
1342         for (i = 0, s = dev_name;
1343              s;
1344              (s = strchr(s, ':')) && (*s++ = '\0'))
1345                 devs[i++] = s;
1346
1347         *nr = nr_devs;
1348         return devs;
1349 }
1350
1351 static int bch2_remount(struct super_block *sb, int *flags, char *data)
1352 {
1353         struct bch_fs *c = sb->s_fs_info;
1354         struct bch_opts opts = bch2_opts_empty();
1355         int ret;
1356
1357         opt_set(opts, read_only, (*flags & SB_RDONLY) != 0);
1358
1359         ret = bch2_parse_mount_opts(c, &opts, data);
1360         if (ret)
1361                 return ret;
1362
1363         if (opts.read_only != c->opts.read_only) {
1364                 down_write(&c->state_lock);
1365
1366                 if (opts.read_only) {
1367                         bch2_fs_read_only(c);
1368
1369                         sb->s_flags |= SB_RDONLY;
1370                 } else {
1371                         ret = bch2_fs_read_write(c);
1372                         if (ret) {
1373                                 bch_err(c, "error going rw: %i", ret);
1374                                 up_write(&c->state_lock);
1375                                 return -EINVAL;
1376                         }
1377
1378                         sb->s_flags &= ~SB_RDONLY;
1379                 }
1380
1381                 c->opts.read_only = opts.read_only;
1382
1383                 up_write(&c->state_lock);
1384         }
1385
1386         if (opts.errors >= 0)
1387                 c->opts.errors = opts.errors;
1388
1389         return ret;
1390 }
1391
1392 static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
1393 {
1394         struct bch_fs *c = root->d_sb->s_fs_info;
1395         struct bch_dev *ca;
1396         unsigned i;
1397         bool first = true;
1398
1399         for_each_online_member(ca, c, i) {
1400                 if (!first)
1401                         seq_putc(seq, ':');
1402                 first = false;
1403                 seq_puts(seq, "/dev/");
1404                 seq_puts(seq, ca->name);
1405         }
1406
1407         return 0;
1408 }
1409
1410 static int bch2_show_options(struct seq_file *seq, struct dentry *root)
1411 {
1412         struct bch_fs *c = root->d_sb->s_fs_info;
1413         enum bch_opt_id i;
1414         char buf[512];
1415
1416         for (i = 0; i < bch2_opts_nr; i++) {
1417                 const struct bch_option *opt = &bch2_opt_table[i];
1418                 u64 v = bch2_opt_get_by_id(&c->opts, i);
1419
1420                 if (!(opt->mode & OPT_MOUNT))
1421                         continue;
1422
1423                 if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
1424                         continue;
1425
1426                 bch2_opt_to_text(&PBUF(buf), c, opt, v,
1427                                  OPT_SHOW_MOUNT_STYLE);
1428                 seq_putc(seq, ',');
1429                 seq_puts(seq, buf);
1430         }
1431
1432         return 0;
1433 }
1434
1435 static void bch2_put_super(struct super_block *sb)
1436 {
1437         struct bch_fs *c = sb->s_fs_info;
1438
1439         __bch2_fs_stop(c);
1440 }
1441
1442 static const struct super_operations bch_super_operations = {
1443         .alloc_inode    = bch2_alloc_inode,
1444         .destroy_inode  = bch2_destroy_inode,
1445         .write_inode    = bch2_vfs_write_inode,
1446         .evict_inode    = bch2_evict_inode,
1447         .sync_fs        = bch2_sync_fs,
1448         .statfs         = bch2_statfs,
1449         .show_devname   = bch2_show_devname,
1450         .show_options   = bch2_show_options,
1451         .remount_fs     = bch2_remount,
1452         .put_super      = bch2_put_super,
1453 #if 0
1454         .freeze_fs      = bch2_freeze,
1455         .unfreeze_fs    = bch2_unfreeze,
1456 #endif
1457 };
1458
1459 static int bch2_set_super(struct super_block *s, void *data)
1460 {
1461         s->s_fs_info = data;
1462         return 0;
1463 }
1464
1465 static int bch2_noset_super(struct super_block *s, void *data)
1466 {
1467         return -EBUSY;
1468 }
1469
1470 static int bch2_test_super(struct super_block *s, void *data)
1471 {
1472         struct bch_fs *c = s->s_fs_info;
1473         struct bch_fs **devs = data;
1474         unsigned i;
1475
1476         if (!c)
1477                 return false;
1478
1479         for (i = 0; devs[i]; i++)
1480                 if (c != devs[i])
1481                         return false;
1482         return true;
1483 }
1484
1485 static struct dentry *bch2_mount(struct file_system_type *fs_type,
1486                                  int flags, const char *dev_name, void *data)
1487 {
1488         struct bch_fs *c;
1489         struct bch_dev *ca;
1490         struct super_block *sb;
1491         struct inode *vinode;
1492         struct bch_opts opts = bch2_opts_empty();
1493         char **devs;
1494         struct bch_fs **devs_to_fs = NULL;
1495         unsigned i, nr_devs;
1496         int ret;
1497
1498         opt_set(opts, read_only, (flags & SB_RDONLY) != 0);
1499
1500         ret = bch2_parse_mount_opts(NULL, &opts, data);
1501         if (ret)
1502                 return ERR_PTR(ret);
1503
1504         if (!dev_name || strlen(dev_name) == 0)
1505                 return ERR_PTR(-EINVAL);
1506
1507         devs = split_devs(dev_name, &nr_devs);
1508         if (!devs)
1509                 return ERR_PTR(-ENOMEM);
1510
1511         devs_to_fs = kcalloc(nr_devs + 1, sizeof(void *), GFP_KERNEL);
1512         if (!devs_to_fs) {
1513                 sb = ERR_PTR(-ENOMEM);
1514                 goto got_sb;
1515         }
1516
1517         for (i = 0; i < nr_devs; i++)
1518                 devs_to_fs[i] = bch2_path_to_fs(devs[i]);
1519
1520         sb = sget(fs_type, bch2_test_super, bch2_noset_super,
1521                   flags|SB_NOSEC, devs_to_fs);
1522         if (!IS_ERR(sb))
1523                 goto got_sb;
1524
1525         c = bch2_fs_open(devs, nr_devs, opts);
1526         if (IS_ERR(c)) {
1527                 sb = ERR_CAST(c);
1528                 goto got_sb;
1529         }
1530
1531         /* Some options can't be parsed until after the fs is started: */
1532         ret = bch2_parse_mount_opts(c, &opts, data);
1533         if (ret) {
1534                 bch2_fs_stop(c);
1535                 sb = ERR_PTR(ret);
1536                 goto got_sb;
1537         }
1538
1539         bch2_opts_apply(&c->opts, opts);
1540
1541         sb = sget(fs_type, NULL, bch2_set_super, flags|SB_NOSEC, c);
1542         if (IS_ERR(sb))
1543                 bch2_fs_stop(c);
1544 got_sb:
1545         kfree(devs_to_fs);
1546         kfree(devs[0]);
1547         kfree(devs);
1548
1549         if (IS_ERR(sb))
1550                 return ERR_CAST(sb);
1551
1552         c = sb->s_fs_info;
1553
1554         if (sb->s_root) {
1555                 if ((flags ^ sb->s_flags) & SB_RDONLY) {
1556                         ret = -EBUSY;
1557                         goto err_put_super;
1558                 }
1559                 goto out;
1560         }
1561
1562         sb->s_blocksize         = block_bytes(c);
1563         sb->s_blocksize_bits    = ilog2(block_bytes(c));
1564         sb->s_maxbytes          = MAX_LFS_FILESIZE;
1565         sb->s_op                = &bch_super_operations;
1566         sb->s_export_op         = &bch_export_ops;
1567 #ifdef CONFIG_BCACHEFS_QUOTA
1568         sb->s_qcop              = &bch2_quotactl_operations;
1569         sb->s_quota_types       = QTYPE_MASK_USR|QTYPE_MASK_GRP|QTYPE_MASK_PRJ;
1570 #endif
1571         sb->s_xattr             = bch2_xattr_handlers;
1572         sb->s_magic             = BCACHEFS_STATFS_MAGIC;
1573         sb->s_time_gran         = c->sb.nsec_per_time_unit;
1574         sb->s_time_min          = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1;
1575         sb->s_time_max          = div_s64(S64_MAX, c->sb.time_units_per_sec);
1576         c->vfs_sb               = sb;
1577         strlcpy(sb->s_id, c->name, sizeof(sb->s_id));
1578
1579         ret = super_setup_bdi(sb);
1580         if (ret)
1581                 goto err_put_super;
1582
1583         sb->s_bdi->ra_pages             = VM_READAHEAD_PAGES;
1584
1585         for_each_online_member(ca, c, i) {
1586                 struct block_device *bdev = ca->disk_sb.bdev;
1587
1588                 /* XXX: create an anonymous device for multi device filesystems */
1589                 sb->s_bdev      = bdev;
1590                 sb->s_dev       = bdev->bd_dev;
1591                 percpu_ref_put(&ca->io_ref);
1592                 break;
1593         }
1594
1595         c->dev = sb->s_dev;
1596
1597 #ifdef CONFIG_BCACHEFS_POSIX_ACL
1598         if (c->opts.acl)
1599                 sb->s_flags     |= SB_POSIXACL;
1600 #endif
1601
1602         vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_INO);
1603         if (IS_ERR(vinode)) {
1604                 bch_err(c, "error mounting: error getting root inode %i",
1605                         (int) PTR_ERR(vinode));
1606                 ret = PTR_ERR(vinode);
1607                 goto err_put_super;
1608         }
1609
1610         sb->s_root = d_make_root(vinode);
1611         if (!sb->s_root) {
1612                 bch_err(c, "error mounting: error allocating root dentry");
1613                 ret = -ENOMEM;
1614                 goto err_put_super;
1615         }
1616
1617         sb->s_flags |= SB_ACTIVE;
1618 out:
1619         return dget(sb->s_root);
1620
1621 err_put_super:
1622         deactivate_locked_super(sb);
1623         return ERR_PTR(ret);
1624 }
1625
1626 static void bch2_kill_sb(struct super_block *sb)
1627 {
1628         struct bch_fs *c = sb->s_fs_info;
1629
1630         generic_shutdown_super(sb);
1631         bch2_fs_free(c);
1632 }
1633
1634 static struct file_system_type bcache_fs_type = {
1635         .owner          = THIS_MODULE,
1636         .name           = "bcachefs",
1637         .mount          = bch2_mount,
1638         .kill_sb        = bch2_kill_sb,
1639         .fs_flags       = FS_REQUIRES_DEV,
1640 };
1641
1642 MODULE_ALIAS_FS("bcachefs");
1643
1644 void bch2_vfs_exit(void)
1645 {
1646         unregister_filesystem(&bcache_fs_type);
1647         if (bch2_inode_cache)
1648                 kmem_cache_destroy(bch2_inode_cache);
1649 }
1650
1651 int __init bch2_vfs_init(void)
1652 {
1653         int ret = -ENOMEM;
1654
1655         bch2_inode_cache = KMEM_CACHE(bch_inode_info, 0);
1656         if (!bch2_inode_cache)
1657                 goto err;
1658
1659         ret = register_filesystem(&bcache_fs_type);
1660         if (ret)
1661                 goto err;
1662
1663         return 0;
1664 err:
1665         bch2_vfs_exit();
1666         return ret;
1667 }
1668
1669 #endif /* NO_BCACHEFS_FS */