2 #include "btree_update.h"
7 static const char *bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k)
9 struct bkey_s_c_quota dq;
11 if (k.k->p.inode >= QTYP_NR)
12 return "invalid quota type";
16 dq = bkey_s_c_to_quota(k);
18 if (bkey_val_bytes(k.k) != sizeof(struct bch_quota))
19 return "incorrect value size";
24 return "invalid type";
28 static const char * const bch2_quota_counters[] = {
33 static void bch2_quota_to_text(struct bch_fs *c, char *buf,
34 size_t size, struct bkey_s_c k)
36 char *out = buf, *end= buf + size;
37 struct bkey_s_c_quota dq;
42 dq = bkey_s_c_to_quota(k);
44 for (i = 0; i < Q_COUNTERS; i++)
45 out += scnprintf(out, end - out, "%s hardlimit %llu softlimit %llu",
46 bch2_quota_counters[i],
47 le64_to_cpu(dq.v->c[i].hardlimit),
48 le64_to_cpu(dq.v->c[i].softlimit));
53 const struct bkey_ops bch2_bkey_quota_ops = {
54 .key_invalid = bch2_quota_invalid,
55 .val_to_text = bch2_quota_to_text,
58 #ifdef CONFIG_BCACHEFS_QUOTA
60 #include <linux/cred.h>
62 #include <linux/quota.h>
64 static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
67 return qtypes ? i + __ffs(qtypes) : QTYP_NR;
70 #define for_each_set_qtype(_c, _i, _q, _qtypes) \
72 (_i = __next_qtype(_i, _qtypes), \
73 _q = &(_c)->quotas[_i], \
77 static inline unsigned enabled_qtypes(struct bch_fs *c)
79 return ((c->opts.usrquota << QTYP_USR)|
80 (c->opts.grpquota << QTYP_GRP)|
81 (c->opts.prjquota << QTYP_PRJ));
84 static bool ignore_hardlimit(struct bch_memquota_type *q)
86 if (capable(CAP_SYS_RESOURCE))
89 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
91 return capable(CAP_SYS_RESOURCE) &&
92 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
93 !(info->dqi_flags & DQF_ROOT_SQUASH));
99 SOFTWARN, /* Softlimit reached */
100 SOFTLONGWARN, /* Grace time expired */
101 HARDWARN, /* Hardlimit reached */
103 HARDBELOW, /* Usage got below inode hardlimit */
104 SOFTBELOW, /* Usage got below inode softlimit */
107 static int quota_nl[][Q_COUNTERS] = {
108 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
109 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
110 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
111 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
112 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
114 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
115 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
116 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
117 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
118 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
126 } m[QTYP_NR * Q_COUNTERS];
129 static void prepare_msg(unsigned qtype,
130 enum quota_counters counter,
131 struct quota_msgs *msgs,
132 enum quota_msg msg_type)
134 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
136 msgs->m[msgs->nr].qtype = qtype;
137 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
141 static void prepare_warning(struct memquota_counter *qc,
143 enum quota_counters counter,
144 struct quota_msgs *msgs,
145 enum quota_msg msg_type)
147 if (qc->warning_issued & (1 << msg_type))
150 prepare_msg(qtype, counter, msgs, msg_type);
153 static void flush_warnings(struct bch_qid qid,
154 struct super_block *sb,
155 struct quota_msgs *msgs)
159 for (i = 0; i < msgs->nr; i++)
160 quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]),
161 sb->s_dev, msgs->m[i].msg);
164 static int bch2_quota_check_limit(struct bch_fs *c,
166 struct bch_memquota *mq,
167 struct quota_msgs *msgs,
168 enum quota_counters counter,
170 enum quota_acct_mode mode)
172 struct bch_memquota_type *q = &c->quotas[qtype];
173 struct memquota_counter *qc = &mq->c[counter];
178 if (mode == BCH_QUOTA_NOCHECK)
182 if (n < qc->hardlimit &&
183 (qc->warning_issued & (1 << HARDWARN))) {
184 qc->warning_issued &= ~(1 << HARDWARN);
185 prepare_msg(qtype, counter, msgs, HARDBELOW);
188 if (n < qc->softlimit &&
189 (qc->warning_issued & (1 << SOFTWARN))) {
190 qc->warning_issued &= ~(1 << SOFTWARN);
191 prepare_msg(qtype, counter, msgs, SOFTBELOW);
194 qc->warning_issued = 0;
200 !ignore_hardlimit(q)) {
201 if (mode == BCH_QUOTA_PREALLOC)
204 prepare_warning(qc, qtype, counter, msgs, HARDWARN);
210 ktime_get_real_seconds() >= qc->timer &&
211 !ignore_hardlimit(q)) {
212 if (mode == BCH_QUOTA_PREALLOC)
215 prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
221 if (mode == BCH_QUOTA_PREALLOC)
224 prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
226 /* XXX is this the right one? */
227 qc->timer = ktime_get_real_seconds() +
228 q->limits[counter].warnlimit;
234 int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
235 enum quota_counters counter, s64 v,
236 enum quota_acct_mode mode)
238 unsigned qtypes = enabled_qtypes(c);
239 struct bch_memquota_type *q;
240 struct bch_memquota *mq[QTYP_NR];
241 struct quota_msgs msgs;
245 memset(&msgs, 0, sizeof(msgs));
247 for_each_set_qtype(c, i, q, qtypes)
248 mutex_lock_nested(&q->lock, i);
250 for_each_set_qtype(c, i, q, qtypes) {
251 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_NOFS);
257 ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
262 for_each_set_qtype(c, i, q, qtypes)
263 mq[i]->c[counter].v += v;
265 for_each_set_qtype(c, i, q, qtypes)
266 mutex_unlock(&q->lock);
268 flush_warnings(qid, c->vfs_sb, &msgs);
273 static void __bch2_quota_transfer(struct bch_memquota *src_q,
274 struct bch_memquota *dst_q,
275 enum quota_counters counter, s64 v)
277 BUG_ON(v > src_q->c[counter].v);
278 BUG_ON(v + dst_q->c[counter].v < v);
280 src_q->c[counter].v -= v;
281 dst_q->c[counter].v += v;
284 int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
286 struct bch_qid src, u64 space)
288 struct bch_memquota_type *q;
289 struct bch_memquota *src_q[3], *dst_q[3];
290 struct quota_msgs msgs;
294 qtypes &= enabled_qtypes(c);
296 memset(&msgs, 0, sizeof(msgs));
298 for_each_set_qtype(c, i, q, qtypes)
299 mutex_lock_nested(&q->lock, i);
301 for_each_set_qtype(c, i, q, qtypes) {
302 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_NOFS);
303 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_NOFS);
305 if (!src_q[i] || !dst_q[i]) {
310 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
311 dst_q[i]->c[Q_SPC].v + space,
316 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO,
317 dst_q[i]->c[Q_INO].v + 1,
323 for_each_set_qtype(c, i, q, qtypes) {
324 __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space);
325 __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1);
329 for_each_set_qtype(c, i, q, qtypes)
330 mutex_unlock(&q->lock);
332 flush_warnings(dst, c->vfs_sb, &msgs);
337 static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k)
339 struct bkey_s_c_quota dq;
340 struct bch_memquota_type *q;
341 struct bch_memquota *mq;
344 BUG_ON(k.k->p.inode >= QTYP_NR);
348 dq = bkey_s_c_to_quota(k);
349 q = &c->quotas[k.k->p.inode];
351 mutex_lock(&q->lock);
352 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
354 mutex_unlock(&q->lock);
358 for (i = 0; i < Q_COUNTERS; i++) {
359 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
360 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
363 mutex_unlock(&q->lock);
369 static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type)
371 struct btree_iter iter;
375 for_each_btree_key(&iter, c, BTREE_ID_QUOTAS, POS(type, 0),
376 BTREE_ITER_PREFETCH, k) {
377 if (k.k->p.inode != type)
380 ret = __bch2_quota_set(c, k);
385 return bch2_btree_iter_unlock(&iter) ?: ret;
388 void bch2_fs_quota_exit(struct bch_fs *c)
392 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
393 genradix_free(&c->quotas[i].table);
396 void bch2_fs_quota_init(struct bch_fs *c)
400 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
401 mutex_init(&c->quotas[i].lock);
404 static void bch2_sb_quota_read(struct bch_fs *c)
406 struct bch_sb_field_quota *sb_quota;
409 sb_quota = bch2_sb_get_quota(c->disk_sb);
413 for (i = 0; i < QTYP_NR; i++) {
414 struct bch_memquota_type *q = &c->quotas[i];
416 for (j = 0; j < Q_COUNTERS; j++) {
417 q->limits[j].timelimit =
418 le32_to_cpu(sb_quota->q[i].c[j].timelimit);
419 q->limits[j].warnlimit =
420 le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
425 int bch2_fs_quota_read(struct bch_fs *c)
427 unsigned i, qtypes = enabled_qtypes(c);
428 struct bch_memquota_type *q;
429 struct btree_iter iter;
430 struct bch_inode_unpacked u;
434 mutex_lock(&c->sb_lock);
435 bch2_sb_quota_read(c);
436 mutex_unlock(&c->sb_lock);
438 for_each_set_qtype(c, i, q, qtypes) {
439 ret = bch2_quota_init_type(c, i);
444 for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN,
445 BTREE_ITER_PREFETCH, k) {
448 ret = bch2_inode_unpack(bkey_s_c_to_inode(k), &u);
452 bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors,
454 bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
458 return bch2_btree_iter_unlock(&iter) ?: ret;
461 /* Enable/disable/delete quotas for an entire filesystem: */
463 static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
465 struct bch_fs *c = sb->s_fs_info;
467 if (sb->s_flags & MS_RDONLY)
470 /* Accounting must be enabled at mount time: */
471 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
474 /* Can't enable enforcement without accounting: */
475 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
478 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
481 if (uflags & FS_QUOTA_PDQ_ENFD)
484 mutex_lock(&c->sb_lock);
485 if (uflags & FS_QUOTA_UDQ_ENFD)
486 SET_BCH_SB_USRQUOTA(c->disk_sb, true);
488 if (uflags & FS_QUOTA_GDQ_ENFD)
489 SET_BCH_SB_GRPQUOTA(c->disk_sb, true);
491 if (uflags & FS_QUOTA_PDQ_ENFD)
492 SET_BCH_SB_PRJQUOTA(c->disk_sb, true);
496 mutex_unlock(&c->sb_lock);
501 static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
503 struct bch_fs *c = sb->s_fs_info;
505 if (sb->s_flags & MS_RDONLY)
508 mutex_lock(&c->sb_lock);
509 if (uflags & FS_QUOTA_UDQ_ENFD)
510 SET_BCH_SB_USRQUOTA(c->disk_sb, false);
512 if (uflags & FS_QUOTA_GDQ_ENFD)
513 SET_BCH_SB_GRPQUOTA(c->disk_sb, false);
515 if (uflags & FS_QUOTA_PDQ_ENFD)
516 SET_BCH_SB_PRJQUOTA(c->disk_sb, false);
519 mutex_unlock(&c->sb_lock);
524 static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
526 struct bch_fs *c = sb->s_fs_info;
529 if (sb->s_flags & MS_RDONLY)
532 if (uflags & FS_USER_QUOTA) {
533 if (c->opts.usrquota)
536 ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
538 POS(QTYP_USR + 1, 0),
539 ZERO_VERSION, NULL, NULL, NULL);
544 if (uflags & FS_GROUP_QUOTA) {
545 if (c->opts.grpquota)
548 ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
550 POS(QTYP_GRP + 1, 0),
551 ZERO_VERSION, NULL, NULL, NULL);
556 if (uflags & FS_PROJ_QUOTA) {
557 if (c->opts.prjquota)
560 ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
562 POS(QTYP_PRJ + 1, 0),
563 ZERO_VERSION, NULL, NULL, NULL);
572 * Return quota status information, such as enforcements, quota file inode
575 static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
577 struct bch_fs *c = sb->s_fs_info;
578 unsigned qtypes = enabled_qtypes(c);
581 memset(state, 0, sizeof(*state));
583 for (i = 0; i < QTYP_NR; i++) {
584 state->s_state[i].flags |= QCI_SYSFILE;
586 if (!(qtypes & (1 << i)))
589 state->s_state[i].flags |= QCI_ACCT_ENABLED;
591 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
592 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
594 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
595 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
602 * Adjust quota timers & warnings
604 static int bch2_quota_set_info(struct super_block *sb, int type,
605 struct qc_info *info)
607 struct bch_fs *c = sb->s_fs_info;
608 struct bch_sb_field_quota *sb_quota;
609 struct bch_memquota_type *q;
611 if (sb->s_flags & MS_RDONLY)
617 if (!((1 << type) & enabled_qtypes(c)))
620 if (info->i_fieldmask &
621 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
624 q = &c->quotas[type];
626 mutex_lock(&c->sb_lock);
627 sb_quota = bch2_sb_get_quota(c->disk_sb);
629 sb_quota = bch2_fs_sb_resize_quota(c, sizeof(*sb_quota) / sizeof(u64));
634 if (info->i_fieldmask & QC_SPC_TIMER)
635 sb_quota->q[type].c[Q_SPC].timelimit =
636 cpu_to_le32(info->i_spc_timelimit);
638 if (info->i_fieldmask & QC_SPC_WARNS)
639 sb_quota->q[type].c[Q_SPC].warnlimit =
640 cpu_to_le32(info->i_spc_warnlimit);
642 if (info->i_fieldmask & QC_INO_TIMER)
643 sb_quota->q[type].c[Q_INO].timelimit =
644 cpu_to_le32(info->i_ino_timelimit);
646 if (info->i_fieldmask & QC_INO_WARNS)
647 sb_quota->q[type].c[Q_INO].warnlimit =
648 cpu_to_le32(info->i_ino_warnlimit);
650 bch2_sb_quota_read(c);
653 mutex_unlock(&c->sb_lock);
658 /* Get/set individual quotas: */
660 static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
662 dst->d_space = src->c[Q_SPC].v << 9;
663 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
664 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
665 dst->d_spc_timer = src->c[Q_SPC].timer;
666 dst->d_spc_warns = src->c[Q_SPC].warns;
668 dst->d_ino_count = src->c[Q_INO].v;
669 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
670 dst->d_ino_softlimit = src->c[Q_INO].softlimit;
671 dst->d_ino_timer = src->c[Q_INO].timer;
672 dst->d_ino_warns = src->c[Q_INO].warns;
675 static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
676 struct qc_dqblk *qdq)
678 struct bch_fs *c = sb->s_fs_info;
679 struct bch_memquota_type *q = &c->quotas[kqid.type];
680 qid_t qid = from_kqid(&init_user_ns, kqid);
681 struct bch_memquota *mq;
683 memset(qdq, 0, sizeof(*qdq));
685 mutex_lock(&q->lock);
686 mq = genradix_ptr(&q->table, qid);
688 __bch2_quota_get(qdq, mq);
689 mutex_unlock(&q->lock);
694 static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
695 struct qc_dqblk *qdq)
697 struct bch_fs *c = sb->s_fs_info;
698 struct bch_memquota_type *q = &c->quotas[kqid->type];
699 qid_t qid = from_kqid(&init_user_ns, *kqid);
700 struct genradix_iter iter = genradix_iter_init(&q->table, qid);
701 struct bch_memquota *mq;
704 mutex_lock(&q->lock);
706 while ((mq = genradix_iter_peek(&iter, &q->table))) {
707 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) {
708 __bch2_quota_get(qdq, mq);
709 *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos);
713 genradix_iter_advance(&iter, &q->table);
718 mutex_unlock(&q->lock);
722 static int bch2_set_quota(struct super_block *sb, struct kqid qid,
723 struct qc_dqblk *qdq)
725 struct bch_fs *c = sb->s_fs_info;
726 struct btree_iter iter;
728 struct bkey_i_quota new_quota;
731 if (sb->s_flags & MS_RDONLY)
734 bkey_quota_init(&new_quota.k_i);
735 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
737 bch2_btree_iter_init(&iter, c, BTREE_ID_QUOTAS, new_quota.k.p,
738 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
739 k = bch2_btree_iter_peek_slot(&iter);
741 ret = btree_iter_err(k);
747 new_quota.v = *bkey_s_c_to_quota(k).v;
751 if (qdq->d_fieldmask & QC_SPC_SOFT)
752 new_quota.v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
753 if (qdq->d_fieldmask & QC_SPC_HARD)
754 new_quota.v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
756 if (qdq->d_fieldmask & QC_INO_SOFT)
757 new_quota.v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
758 if (qdq->d_fieldmask & QC_INO_HARD)
759 new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
761 ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
762 BTREE_INSERT_ENTRY(&iter, &new_quota.k_i));
763 bch2_btree_iter_unlock(&iter);
768 ret = __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i));
773 const struct quotactl_ops bch2_quotactl_operations = {
774 .quota_enable = bch2_quota_enable,
775 .quota_disable = bch2_quota_disable,
776 .rm_xquota = bch2_quota_remove,
778 .get_state = bch2_quota_get_state,
779 .set_info = bch2_quota_set_info,
781 .get_dqblk = bch2_get_quota,
782 .get_nextdqblk = bch2_get_next_quota,
783 .set_dqblk = bch2_set_quota,
786 #endif /* CONFIG_BCACHEFS_QUOTA */