1 // SPDX-License-Identifier: GPL-2.0
3 #include "btree_update.h"
8 static const char *bch2_sb_validate_quota(struct bch_sb *sb,
9 struct bch_sb_field *f)
11 struct bch_sb_field_quota *q = field_to_type(f, quota);
13 if (vstruct_bytes(&q->field) != sizeof(*q))
14 return "invalid field quota: wrong size";
19 const struct bch_sb_field_ops bch_sb_field_ops_quota = {
20 .validate = bch2_sb_validate_quota,
23 const char *bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k)
25 if (k.k->p.inode >= QTYP_NR)
26 return "invalid quota type";
28 if (bkey_val_bytes(k.k) != sizeof(struct bch_quota))
29 return "incorrect value size";
34 static const char * const bch2_quota_counters[] = {
39 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
42 struct bkey_s_c_quota dq = bkey_s_c_to_quota(k);
45 for (i = 0; i < Q_COUNTERS; i++)
46 pr_buf(out, "%s hardlimit %llu softlimit %llu",
47 bch2_quota_counters[i],
48 le64_to_cpu(dq.v->c[i].hardlimit),
49 le64_to_cpu(dq.v->c[i].softlimit));
52 #ifdef CONFIG_BCACHEFS_QUOTA
54 #include <linux/cred.h>
56 #include <linux/quota.h>
58 static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
61 return qtypes ? i + __ffs(qtypes) : QTYP_NR;
64 #define for_each_set_qtype(_c, _i, _q, _qtypes) \
66 (_i = __next_qtype(_i, _qtypes), \
67 _q = &(_c)->quotas[_i], \
71 static bool ignore_hardlimit(struct bch_memquota_type *q)
73 if (capable(CAP_SYS_RESOURCE))
76 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
78 return capable(CAP_SYS_RESOURCE) &&
79 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
80 !(info->dqi_flags & DQF_ROOT_SQUASH));
86 SOFTWARN, /* Softlimit reached */
87 SOFTLONGWARN, /* Grace time expired */
88 HARDWARN, /* Hardlimit reached */
90 HARDBELOW, /* Usage got below inode hardlimit */
91 SOFTBELOW, /* Usage got below inode softlimit */
94 static int quota_nl[][Q_COUNTERS] = {
95 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
96 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
97 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
98 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
99 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
101 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
102 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
103 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
104 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
105 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
113 } m[QTYP_NR * Q_COUNTERS];
116 static void prepare_msg(unsigned qtype,
117 enum quota_counters counter,
118 struct quota_msgs *msgs,
119 enum quota_msg msg_type)
121 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
123 msgs->m[msgs->nr].qtype = qtype;
124 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
128 static void prepare_warning(struct memquota_counter *qc,
130 enum quota_counters counter,
131 struct quota_msgs *msgs,
132 enum quota_msg msg_type)
134 if (qc->warning_issued & (1 << msg_type))
137 prepare_msg(qtype, counter, msgs, msg_type);
140 static void flush_warnings(struct bch_qid qid,
141 struct super_block *sb,
142 struct quota_msgs *msgs)
146 for (i = 0; i < msgs->nr; i++)
147 quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]),
148 sb->s_dev, msgs->m[i].msg);
151 static int bch2_quota_check_limit(struct bch_fs *c,
153 struct bch_memquota *mq,
154 struct quota_msgs *msgs,
155 enum quota_counters counter,
157 enum quota_acct_mode mode)
159 struct bch_memquota_type *q = &c->quotas[qtype];
160 struct memquota_counter *qc = &mq->c[counter];
165 if (mode == KEY_TYPE_QUOTA_NOCHECK)
169 if (n < qc->hardlimit &&
170 (qc->warning_issued & (1 << HARDWARN))) {
171 qc->warning_issued &= ~(1 << HARDWARN);
172 prepare_msg(qtype, counter, msgs, HARDBELOW);
175 if (n < qc->softlimit &&
176 (qc->warning_issued & (1 << SOFTWARN))) {
177 qc->warning_issued &= ~(1 << SOFTWARN);
178 prepare_msg(qtype, counter, msgs, SOFTBELOW);
181 qc->warning_issued = 0;
187 !ignore_hardlimit(q)) {
188 if (mode == KEY_TYPE_QUOTA_PREALLOC)
191 prepare_warning(qc, qtype, counter, msgs, HARDWARN);
197 ktime_get_real_seconds() >= qc->timer &&
198 !ignore_hardlimit(q)) {
199 if (mode == KEY_TYPE_QUOTA_PREALLOC)
202 prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
208 if (mode == KEY_TYPE_QUOTA_PREALLOC)
211 prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
213 /* XXX is this the right one? */
214 qc->timer = ktime_get_real_seconds() +
215 q->limits[counter].warnlimit;
221 int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
222 enum quota_counters counter, s64 v,
223 enum quota_acct_mode mode)
225 unsigned qtypes = enabled_qtypes(c);
226 struct bch_memquota_type *q;
227 struct bch_memquota *mq[QTYP_NR];
228 struct quota_msgs msgs;
232 memset(&msgs, 0, sizeof(msgs));
234 for_each_set_qtype(c, i, q, qtypes)
235 mutex_lock_nested(&q->lock, i);
237 for_each_set_qtype(c, i, q, qtypes) {
238 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_NOFS);
244 ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
249 for_each_set_qtype(c, i, q, qtypes)
250 mq[i]->c[counter].v += v;
252 for_each_set_qtype(c, i, q, qtypes)
253 mutex_unlock(&q->lock);
255 flush_warnings(qid, c->vfs_sb, &msgs);
260 static void __bch2_quota_transfer(struct bch_memquota *src_q,
261 struct bch_memquota *dst_q,
262 enum quota_counters counter, s64 v)
264 BUG_ON(v > src_q->c[counter].v);
265 BUG_ON(v + dst_q->c[counter].v < v);
267 src_q->c[counter].v -= v;
268 dst_q->c[counter].v += v;
271 int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
273 struct bch_qid src, u64 space,
274 enum quota_acct_mode mode)
276 struct bch_memquota_type *q;
277 struct bch_memquota *src_q[3], *dst_q[3];
278 struct quota_msgs msgs;
282 qtypes &= enabled_qtypes(c);
284 memset(&msgs, 0, sizeof(msgs));
286 for_each_set_qtype(c, i, q, qtypes)
287 mutex_lock_nested(&q->lock, i);
289 for_each_set_qtype(c, i, q, qtypes) {
290 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_NOFS);
291 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_NOFS);
293 if (!src_q[i] || !dst_q[i]) {
298 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
299 dst_q[i]->c[Q_SPC].v + space,
304 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO,
305 dst_q[i]->c[Q_INO].v + 1,
311 for_each_set_qtype(c, i, q, qtypes) {
312 __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space);
313 __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1);
317 for_each_set_qtype(c, i, q, qtypes)
318 mutex_unlock(&q->lock);
320 flush_warnings(dst, c->vfs_sb, &msgs);
325 static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k)
327 struct bkey_s_c_quota dq;
328 struct bch_memquota_type *q;
329 struct bch_memquota *mq;
332 BUG_ON(k.k->p.inode >= QTYP_NR);
336 dq = bkey_s_c_to_quota(k);
337 q = &c->quotas[k.k->p.inode];
339 mutex_lock(&q->lock);
340 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
342 mutex_unlock(&q->lock);
346 for (i = 0; i < Q_COUNTERS; i++) {
347 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
348 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
351 mutex_unlock(&q->lock);
357 static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type)
359 struct btree_trans trans;
360 struct btree_iter *iter;
364 bch2_trans_init(&trans, c, 0, 0);
366 for_each_btree_key(&trans, iter, BTREE_ID_QUOTAS, POS(type, 0),
367 BTREE_ITER_PREFETCH, k, ret) {
368 if (k.k->p.inode != type)
371 ret = __bch2_quota_set(c, k);
376 return bch2_trans_exit(&trans) ?: ret;
379 void bch2_fs_quota_exit(struct bch_fs *c)
383 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
384 genradix_free(&c->quotas[i].table);
387 void bch2_fs_quota_init(struct bch_fs *c)
391 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
392 mutex_init(&c->quotas[i].lock);
395 static void bch2_sb_quota_read(struct bch_fs *c)
397 struct bch_sb_field_quota *sb_quota;
400 sb_quota = bch2_sb_get_quota(c->disk_sb.sb);
404 for (i = 0; i < QTYP_NR; i++) {
405 struct bch_memquota_type *q = &c->quotas[i];
407 for (j = 0; j < Q_COUNTERS; j++) {
408 q->limits[j].timelimit =
409 le32_to_cpu(sb_quota->q[i].c[j].timelimit);
410 q->limits[j].warnlimit =
411 le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
416 int bch2_fs_quota_read(struct bch_fs *c)
418 unsigned i, qtypes = enabled_qtypes(c);
419 struct bch_memquota_type *q;
420 struct btree_trans trans;
421 struct btree_iter *iter;
422 struct bch_inode_unpacked u;
426 mutex_lock(&c->sb_lock);
427 bch2_sb_quota_read(c);
428 mutex_unlock(&c->sb_lock);
430 for_each_set_qtype(c, i, q, qtypes) {
431 ret = bch2_quota_init_type(c, i);
436 bch2_trans_init(&trans, c, 0, 0);
438 for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN,
439 BTREE_ITER_PREFETCH, k, ret) {
442 ret = bch2_inode_unpack(bkey_s_c_to_inode(k), &u);
446 bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors,
447 KEY_TYPE_QUOTA_NOCHECK);
448 bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
449 KEY_TYPE_QUOTA_NOCHECK);
452 return bch2_trans_exit(&trans) ?: ret;
455 /* Enable/disable/delete quotas for an entire filesystem: */
457 static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
459 struct bch_fs *c = sb->s_fs_info;
461 if (sb->s_flags & SB_RDONLY)
464 /* Accounting must be enabled at mount time: */
465 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
468 /* Can't enable enforcement without accounting: */
469 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
472 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
475 if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota)
478 mutex_lock(&c->sb_lock);
479 if (uflags & FS_QUOTA_UDQ_ENFD)
480 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true);
482 if (uflags & FS_QUOTA_GDQ_ENFD)
483 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true);
485 if (uflags & FS_QUOTA_PDQ_ENFD)
486 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true);
489 mutex_unlock(&c->sb_lock);
494 static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
496 struct bch_fs *c = sb->s_fs_info;
498 if (sb->s_flags & SB_RDONLY)
501 mutex_lock(&c->sb_lock);
502 if (uflags & FS_QUOTA_UDQ_ENFD)
503 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false);
505 if (uflags & FS_QUOTA_GDQ_ENFD)
506 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false);
508 if (uflags & FS_QUOTA_PDQ_ENFD)
509 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false);
512 mutex_unlock(&c->sb_lock);
517 static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
519 struct bch_fs *c = sb->s_fs_info;
522 if (sb->s_flags & SB_RDONLY)
525 if (uflags & FS_USER_QUOTA) {
526 if (c->opts.usrquota)
529 ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
531 POS(QTYP_USR + 1, 0),
537 if (uflags & FS_GROUP_QUOTA) {
538 if (c->opts.grpquota)
541 ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
543 POS(QTYP_GRP + 1, 0),
549 if (uflags & FS_PROJ_QUOTA) {
550 if (c->opts.prjquota)
553 ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
555 POS(QTYP_PRJ + 1, 0),
565 * Return quota status information, such as enforcements, quota file inode
568 static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
570 struct bch_fs *c = sb->s_fs_info;
571 unsigned qtypes = enabled_qtypes(c);
574 memset(state, 0, sizeof(*state));
576 for (i = 0; i < QTYP_NR; i++) {
577 state->s_state[i].flags |= QCI_SYSFILE;
579 if (!(qtypes & (1 << i)))
582 state->s_state[i].flags |= QCI_ACCT_ENABLED;
584 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
585 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
587 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
588 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
595 * Adjust quota timers & warnings
597 static int bch2_quota_set_info(struct super_block *sb, int type,
598 struct qc_info *info)
600 struct bch_fs *c = sb->s_fs_info;
601 struct bch_sb_field_quota *sb_quota;
602 struct bch_memquota_type *q;
604 if (sb->s_flags & SB_RDONLY)
610 if (!((1 << type) & enabled_qtypes(c)))
613 if (info->i_fieldmask &
614 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
617 q = &c->quotas[type];
619 mutex_lock(&c->sb_lock);
620 sb_quota = bch2_sb_get_quota(c->disk_sb.sb);
622 sb_quota = bch2_sb_resize_quota(&c->disk_sb,
623 sizeof(*sb_quota) / sizeof(u64));
628 if (info->i_fieldmask & QC_SPC_TIMER)
629 sb_quota->q[type].c[Q_SPC].timelimit =
630 cpu_to_le32(info->i_spc_timelimit);
632 if (info->i_fieldmask & QC_SPC_WARNS)
633 sb_quota->q[type].c[Q_SPC].warnlimit =
634 cpu_to_le32(info->i_spc_warnlimit);
636 if (info->i_fieldmask & QC_INO_TIMER)
637 sb_quota->q[type].c[Q_INO].timelimit =
638 cpu_to_le32(info->i_ino_timelimit);
640 if (info->i_fieldmask & QC_INO_WARNS)
641 sb_quota->q[type].c[Q_INO].warnlimit =
642 cpu_to_le32(info->i_ino_warnlimit);
644 bch2_sb_quota_read(c);
647 mutex_unlock(&c->sb_lock);
652 /* Get/set individual quotas: */
654 static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
656 dst->d_space = src->c[Q_SPC].v << 9;
657 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
658 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
659 dst->d_spc_timer = src->c[Q_SPC].timer;
660 dst->d_spc_warns = src->c[Q_SPC].warns;
662 dst->d_ino_count = src->c[Q_INO].v;
663 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
664 dst->d_ino_softlimit = src->c[Q_INO].softlimit;
665 dst->d_ino_timer = src->c[Q_INO].timer;
666 dst->d_ino_warns = src->c[Q_INO].warns;
669 static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
670 struct qc_dqblk *qdq)
672 struct bch_fs *c = sb->s_fs_info;
673 struct bch_memquota_type *q = &c->quotas[kqid.type];
674 qid_t qid = from_kqid(&init_user_ns, kqid);
675 struct bch_memquota *mq;
677 memset(qdq, 0, sizeof(*qdq));
679 mutex_lock(&q->lock);
680 mq = genradix_ptr(&q->table, qid);
682 __bch2_quota_get(qdq, mq);
683 mutex_unlock(&q->lock);
688 static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
689 struct qc_dqblk *qdq)
691 struct bch_fs *c = sb->s_fs_info;
692 struct bch_memquota_type *q = &c->quotas[kqid->type];
693 qid_t qid = from_kqid(&init_user_ns, *kqid);
694 struct genradix_iter iter;
695 struct bch_memquota *mq;
698 mutex_lock(&q->lock);
700 genradix_for_each_from(&q->table, iter, mq, qid)
701 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) {
702 __bch2_quota_get(qdq, mq);
703 *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos);
709 mutex_unlock(&q->lock);
713 static int bch2_set_quota_trans(struct btree_trans *trans,
714 struct bkey_i_quota *new_quota,
715 struct qc_dqblk *qdq)
717 struct btree_iter *iter;
721 iter = bch2_trans_get_iter(trans, BTREE_ID_QUOTAS, new_quota->k.p,
722 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
723 k = bch2_btree_iter_peek_slot(iter);
729 if (k.k->type == KEY_TYPE_quota)
730 new_quota->v = *bkey_s_c_to_quota(k).v;
732 if (qdq->d_fieldmask & QC_SPC_SOFT)
733 new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
734 if (qdq->d_fieldmask & QC_SPC_HARD)
735 new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
737 if (qdq->d_fieldmask & QC_INO_SOFT)
738 new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
739 if (qdq->d_fieldmask & QC_INO_HARD)
740 new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
742 return bch2_trans_update(trans, iter, &new_quota->k_i, 0);
745 static int bch2_set_quota(struct super_block *sb, struct kqid qid,
746 struct qc_dqblk *qdq)
748 struct bch_fs *c = sb->s_fs_info;
749 struct btree_trans trans;
750 struct bkey_i_quota new_quota;
753 if (sb->s_flags & SB_RDONLY)
756 bkey_quota_init(&new_quota.k_i);
757 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
759 bch2_trans_init(&trans, c, 0, 0);
761 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOUNLOCK,
762 bch2_set_quota_trans(&trans, &new_quota, qdq)) ?:
763 __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i));
765 bch2_trans_exit(&trans);
770 const struct quotactl_ops bch2_quotactl_operations = {
771 .quota_enable = bch2_quota_enable,
772 .quota_disable = bch2_quota_disable,
773 .rm_xquota = bch2_quota_remove,
775 .get_state = bch2_quota_get_state,
776 .set_info = bch2_quota_set_info,
778 .get_dqblk = bch2_get_quota,
779 .get_nextdqblk = bch2_get_next_quota,
780 .set_dqblk = bch2_set_quota,
783 #endif /* CONFIG_BCACHEFS_QUOTA */