1 // SPDX-License-Identifier: GPL-2.0
3 #include "btree_update.h"
10 static const char * const bch2_quota_types[] = {
16 static const char * const bch2_quota_counters[] = {
21 static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f,
24 struct bch_sb_field_quota *q = field_to_type(f, quota);
26 if (vstruct_bytes(&q->field) < sizeof(*q)) {
27 prt_printf(err, "wrong size (got %zu should be %zu)",
28 vstruct_bytes(&q->field), sizeof(*q));
35 static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb,
36 struct bch_sb_field *f)
38 struct bch_sb_field_quota *q = field_to_type(f, quota);
39 unsigned qtyp, counter;
41 for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) {
42 prt_printf(out, "%s: flags %llx",
43 bch2_quota_types[qtyp],
44 le64_to_cpu(q->q[qtyp].flags));
46 for (counter = 0; counter < Q_COUNTERS; counter++)
47 prt_printf(out, " %s timelimit %u warnlimit %u",
48 bch2_quota_counters[counter],
49 le32_to_cpu(q->q[qtyp].c[counter].timelimit),
50 le32_to_cpu(q->q[qtyp].c[counter].warnlimit));
56 const struct bch_sb_field_ops bch_sb_field_ops_quota = {
57 .validate = bch2_sb_quota_validate,
58 .to_text = bch2_sb_quota_to_text,
61 int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k,
62 int rw, struct printbuf *err)
64 if (k.k->p.inode >= QTYP_NR) {
65 prt_printf(err, "invalid quota type (%llu >= %u)",
66 k.k->p.inode, QTYP_NR);
70 if (bkey_val_bytes(k.k) != sizeof(struct bch_quota)) {
71 prt_printf(err, "incorrect value size (%zu != %zu)",
72 bkey_val_bytes(k.k), sizeof(struct bch_quota));
79 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
82 struct bkey_s_c_quota dq = bkey_s_c_to_quota(k);
85 for (i = 0; i < Q_COUNTERS; i++)
86 prt_printf(out, "%s hardlimit %llu softlimit %llu",
87 bch2_quota_counters[i],
88 le64_to_cpu(dq.v->c[i].hardlimit),
89 le64_to_cpu(dq.v->c[i].softlimit));
92 #ifdef CONFIG_BCACHEFS_QUOTA
94 #include <linux/cred.h>
96 #include <linux/quota.h>
98 static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
101 return qtypes ? i + __ffs(qtypes) : QTYP_NR;
104 #define for_each_set_qtype(_c, _i, _q, _qtypes) \
106 (_i = __next_qtype(_i, _qtypes), \
107 _q = &(_c)->quotas[_i], \
111 static bool ignore_hardlimit(struct bch_memquota_type *q)
113 if (capable(CAP_SYS_RESOURCE))
116 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
118 return capable(CAP_SYS_RESOURCE) &&
119 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
120 !(info->dqi_flags & DQF_ROOT_SQUASH));
126 SOFTWARN, /* Softlimit reached */
127 SOFTLONGWARN, /* Grace time expired */
128 HARDWARN, /* Hardlimit reached */
130 HARDBELOW, /* Usage got below inode hardlimit */
131 SOFTBELOW, /* Usage got below inode softlimit */
134 static int quota_nl[][Q_COUNTERS] = {
135 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
136 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
137 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
138 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
139 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
141 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
142 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
143 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
144 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
145 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
153 } m[QTYP_NR * Q_COUNTERS];
156 static void prepare_msg(unsigned qtype,
157 enum quota_counters counter,
158 struct quota_msgs *msgs,
159 enum quota_msg msg_type)
161 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
163 msgs->m[msgs->nr].qtype = qtype;
164 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
168 static void prepare_warning(struct memquota_counter *qc,
170 enum quota_counters counter,
171 struct quota_msgs *msgs,
172 enum quota_msg msg_type)
174 if (qc->warning_issued & (1 << msg_type))
177 prepare_msg(qtype, counter, msgs, msg_type);
180 static void flush_warnings(struct bch_qid qid,
181 struct super_block *sb,
182 struct quota_msgs *msgs)
186 for (i = 0; i < msgs->nr; i++)
187 quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]),
188 sb->s_dev, msgs->m[i].msg);
191 static int bch2_quota_check_limit(struct bch_fs *c,
193 struct bch_memquota *mq,
194 struct quota_msgs *msgs,
195 enum quota_counters counter,
197 enum quota_acct_mode mode)
199 struct bch_memquota_type *q = &c->quotas[qtype];
200 struct memquota_counter *qc = &mq->c[counter];
205 if (mode == KEY_TYPE_QUOTA_NOCHECK)
209 if (n < qc->hardlimit &&
210 (qc->warning_issued & (1 << HARDWARN))) {
211 qc->warning_issued &= ~(1 << HARDWARN);
212 prepare_msg(qtype, counter, msgs, HARDBELOW);
215 if (n < qc->softlimit &&
216 (qc->warning_issued & (1 << SOFTWARN))) {
217 qc->warning_issued &= ~(1 << SOFTWARN);
218 prepare_msg(qtype, counter, msgs, SOFTBELOW);
221 qc->warning_issued = 0;
227 !ignore_hardlimit(q)) {
228 if (mode == KEY_TYPE_QUOTA_PREALLOC)
231 prepare_warning(qc, qtype, counter, msgs, HARDWARN);
237 ktime_get_real_seconds() >= qc->timer &&
238 !ignore_hardlimit(q)) {
239 if (mode == KEY_TYPE_QUOTA_PREALLOC)
242 prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
248 if (mode == KEY_TYPE_QUOTA_PREALLOC)
251 prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
253 /* XXX is this the right one? */
254 qc->timer = ktime_get_real_seconds() +
255 q->limits[counter].warnlimit;
261 int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
262 enum quota_counters counter, s64 v,
263 enum quota_acct_mode mode)
265 unsigned qtypes = enabled_qtypes(c);
266 struct bch_memquota_type *q;
267 struct bch_memquota *mq[QTYP_NR];
268 struct quota_msgs msgs;
272 memset(&msgs, 0, sizeof(msgs));
274 for_each_set_qtype(c, i, q, qtypes)
275 mutex_lock_nested(&q->lock, i);
277 for_each_set_qtype(c, i, q, qtypes) {
278 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_NOFS);
284 ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
289 for_each_set_qtype(c, i, q, qtypes)
290 mq[i]->c[counter].v += v;
292 for_each_set_qtype(c, i, q, qtypes)
293 mutex_unlock(&q->lock);
295 flush_warnings(qid, c->vfs_sb, &msgs);
300 static void __bch2_quota_transfer(struct bch_memquota *src_q,
301 struct bch_memquota *dst_q,
302 enum quota_counters counter, s64 v)
304 BUG_ON(v > src_q->c[counter].v);
305 BUG_ON(v + dst_q->c[counter].v < v);
307 src_q->c[counter].v -= v;
308 dst_q->c[counter].v += v;
311 int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
313 struct bch_qid src, u64 space,
314 enum quota_acct_mode mode)
316 struct bch_memquota_type *q;
317 struct bch_memquota *src_q[3], *dst_q[3];
318 struct quota_msgs msgs;
322 qtypes &= enabled_qtypes(c);
324 memset(&msgs, 0, sizeof(msgs));
326 for_each_set_qtype(c, i, q, qtypes)
327 mutex_lock_nested(&q->lock, i);
329 for_each_set_qtype(c, i, q, qtypes) {
330 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_NOFS);
331 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_NOFS);
333 if (!src_q[i] || !dst_q[i]) {
338 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
339 dst_q[i]->c[Q_SPC].v + space,
344 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO,
345 dst_q[i]->c[Q_INO].v + 1,
351 for_each_set_qtype(c, i, q, qtypes) {
352 __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space);
353 __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1);
357 for_each_set_qtype(c, i, q, qtypes)
358 mutex_unlock(&q->lock);
360 flush_warnings(dst, c->vfs_sb, &msgs);
365 static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k)
367 struct bkey_s_c_quota dq;
368 struct bch_memquota_type *q;
369 struct bch_memquota *mq;
372 BUG_ON(k.k->p.inode >= QTYP_NR);
374 if (!((1U << k.k->p.inode) & enabled_qtypes(c)))
379 dq = bkey_s_c_to_quota(k);
380 q = &c->quotas[k.k->p.inode];
382 mutex_lock(&q->lock);
383 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
385 mutex_unlock(&q->lock);
389 for (i = 0; i < Q_COUNTERS; i++) {
390 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
391 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
394 mutex_unlock(&q->lock);
400 void bch2_fs_quota_exit(struct bch_fs *c)
404 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
405 genradix_free(&c->quotas[i].table);
408 void bch2_fs_quota_init(struct bch_fs *c)
412 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
413 mutex_init(&c->quotas[i].lock);
416 static void bch2_sb_quota_read(struct bch_fs *c)
418 struct bch_sb_field_quota *sb_quota;
421 sb_quota = bch2_sb_get_quota(c->disk_sb.sb);
425 for (i = 0; i < QTYP_NR; i++) {
426 struct bch_memquota_type *q = &c->quotas[i];
428 for (j = 0; j < Q_COUNTERS; j++) {
429 q->limits[j].timelimit =
430 le32_to_cpu(sb_quota->q[i].c[j].timelimit);
431 q->limits[j].warnlimit =
432 le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
437 static int bch2_fs_quota_read_inode(struct btree_trans *trans,
438 struct btree_iter *iter,
441 struct bch_fs *c = trans->c;
442 struct bch_inode_unpacked u;
443 struct bch_subvolume subvolume;
446 ret = bch2_snapshot_get_subvol(trans, k.k->p.snapshot, &subvolume);
451 * We don't do quota accounting in snapshots:
453 if (BCH_SUBVOLUME_SNAP(&subvolume))
456 if (!bkey_is_inode(k.k))
459 ret = bch2_inode_unpack(k, &u);
463 bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors,
464 KEY_TYPE_QUOTA_NOCHECK);
465 bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
466 KEY_TYPE_QUOTA_NOCHECK);
468 bch2_btree_iter_set_pos(iter, POS(iter->pos.inode, iter->pos.offset + 1));
472 int bch2_fs_quota_read(struct bch_fs *c)
474 struct btree_trans trans;
475 struct btree_iter iter;
479 mutex_lock(&c->sb_lock);
480 bch2_sb_quota_read(c);
481 mutex_unlock(&c->sb_lock);
483 bch2_trans_init(&trans, c, 0, 0);
485 ret = for_each_btree_key2(&trans, iter, BTREE_ID_quotas,
486 POS_MIN, BTREE_ITER_PREFETCH, k,
487 __bch2_quota_set(c, k)) ?:
488 for_each_btree_key2(&trans, iter, BTREE_ID_inodes,
489 POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
490 bch2_fs_quota_read_inode(&trans, &iter, k));
492 bch_err(c, "err in quota_read: %s", bch2_err_str(ret));
494 bch2_trans_exit(&trans);
498 /* Enable/disable/delete quotas for an entire filesystem: */
500 static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
502 struct bch_fs *c = sb->s_fs_info;
504 if (sb->s_flags & SB_RDONLY)
507 /* Accounting must be enabled at mount time: */
508 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
511 /* Can't enable enforcement without accounting: */
512 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
515 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
518 if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota)
521 mutex_lock(&c->sb_lock);
522 if (uflags & FS_QUOTA_UDQ_ENFD)
523 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true);
525 if (uflags & FS_QUOTA_GDQ_ENFD)
526 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true);
528 if (uflags & FS_QUOTA_PDQ_ENFD)
529 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true);
532 mutex_unlock(&c->sb_lock);
537 static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
539 struct bch_fs *c = sb->s_fs_info;
541 if (sb->s_flags & SB_RDONLY)
544 mutex_lock(&c->sb_lock);
545 if (uflags & FS_QUOTA_UDQ_ENFD)
546 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false);
548 if (uflags & FS_QUOTA_GDQ_ENFD)
549 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false);
551 if (uflags & FS_QUOTA_PDQ_ENFD)
552 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false);
555 mutex_unlock(&c->sb_lock);
560 static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
562 struct bch_fs *c = sb->s_fs_info;
565 if (sb->s_flags & SB_RDONLY)
568 if (uflags & FS_USER_QUOTA) {
569 if (c->opts.usrquota)
572 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
574 POS(QTYP_USR + 1, 0),
580 if (uflags & FS_GROUP_QUOTA) {
581 if (c->opts.grpquota)
584 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
586 POS(QTYP_GRP + 1, 0),
592 if (uflags & FS_PROJ_QUOTA) {
593 if (c->opts.prjquota)
596 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
598 POS(QTYP_PRJ + 1, 0),
608 * Return quota status information, such as enforcements, quota file inode
611 static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
613 struct bch_fs *c = sb->s_fs_info;
614 unsigned qtypes = enabled_qtypes(c);
617 memset(state, 0, sizeof(*state));
619 for (i = 0; i < QTYP_NR; i++) {
620 state->s_state[i].flags |= QCI_SYSFILE;
622 if (!(qtypes & (1 << i)))
625 state->s_state[i].flags |= QCI_ACCT_ENABLED;
627 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
628 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
630 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
631 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
638 * Adjust quota timers & warnings
640 static int bch2_quota_set_info(struct super_block *sb, int type,
641 struct qc_info *info)
643 struct bch_fs *c = sb->s_fs_info;
644 struct bch_sb_field_quota *sb_quota;
645 struct bch_memquota_type *q;
647 if (sb->s_flags & SB_RDONLY)
653 if (!((1 << type) & enabled_qtypes(c)))
656 if (info->i_fieldmask &
657 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
660 q = &c->quotas[type];
662 mutex_lock(&c->sb_lock);
663 sb_quota = bch2_sb_get_quota(c->disk_sb.sb);
665 sb_quota = bch2_sb_resize_quota(&c->disk_sb,
666 sizeof(*sb_quota) / sizeof(u64));
668 return -BCH_ERR_ENOSPC_sb_quota;
671 if (info->i_fieldmask & QC_SPC_TIMER)
672 sb_quota->q[type].c[Q_SPC].timelimit =
673 cpu_to_le32(info->i_spc_timelimit);
675 if (info->i_fieldmask & QC_SPC_WARNS)
676 sb_quota->q[type].c[Q_SPC].warnlimit =
677 cpu_to_le32(info->i_spc_warnlimit);
679 if (info->i_fieldmask & QC_INO_TIMER)
680 sb_quota->q[type].c[Q_INO].timelimit =
681 cpu_to_le32(info->i_ino_timelimit);
683 if (info->i_fieldmask & QC_INO_WARNS)
684 sb_quota->q[type].c[Q_INO].warnlimit =
685 cpu_to_le32(info->i_ino_warnlimit);
687 bch2_sb_quota_read(c);
690 mutex_unlock(&c->sb_lock);
695 /* Get/set individual quotas: */
697 static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
699 dst->d_space = src->c[Q_SPC].v << 9;
700 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
701 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
702 dst->d_spc_timer = src->c[Q_SPC].timer;
703 dst->d_spc_warns = src->c[Q_SPC].warns;
705 dst->d_ino_count = src->c[Q_INO].v;
706 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
707 dst->d_ino_softlimit = src->c[Q_INO].softlimit;
708 dst->d_ino_timer = src->c[Q_INO].timer;
709 dst->d_ino_warns = src->c[Q_INO].warns;
712 static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
713 struct qc_dqblk *qdq)
715 struct bch_fs *c = sb->s_fs_info;
716 struct bch_memquota_type *q = &c->quotas[kqid.type];
717 qid_t qid = from_kqid(&init_user_ns, kqid);
718 struct bch_memquota *mq;
720 memset(qdq, 0, sizeof(*qdq));
722 mutex_lock(&q->lock);
723 mq = genradix_ptr(&q->table, qid);
725 __bch2_quota_get(qdq, mq);
726 mutex_unlock(&q->lock);
731 static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
732 struct qc_dqblk *qdq)
734 struct bch_fs *c = sb->s_fs_info;
735 struct bch_memquota_type *q = &c->quotas[kqid->type];
736 qid_t qid = from_kqid(&init_user_ns, *kqid);
737 struct genradix_iter iter;
738 struct bch_memquota *mq;
741 mutex_lock(&q->lock);
743 genradix_for_each_from(&q->table, iter, mq, qid)
744 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) {
745 __bch2_quota_get(qdq, mq);
746 *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos);
752 mutex_unlock(&q->lock);
756 static int bch2_set_quota_trans(struct btree_trans *trans,
757 struct bkey_i_quota *new_quota,
758 struct qc_dqblk *qdq)
760 struct btree_iter iter;
764 bch2_trans_iter_init(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
765 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
766 k = bch2_btree_iter_peek_slot(&iter);
772 if (k.k->type == KEY_TYPE_quota)
773 new_quota->v = *bkey_s_c_to_quota(k).v;
775 if (qdq->d_fieldmask & QC_SPC_SOFT)
776 new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
777 if (qdq->d_fieldmask & QC_SPC_HARD)
778 new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
780 if (qdq->d_fieldmask & QC_INO_SOFT)
781 new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
782 if (qdq->d_fieldmask & QC_INO_HARD)
783 new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
785 ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
786 bch2_trans_iter_exit(trans, &iter);
790 static int bch2_set_quota(struct super_block *sb, struct kqid qid,
791 struct qc_dqblk *qdq)
793 struct bch_fs *c = sb->s_fs_info;
794 struct bkey_i_quota new_quota;
797 if (sb->s_flags & SB_RDONLY)
800 bkey_quota_init(&new_quota.k_i);
801 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
803 ret = bch2_trans_do(c, NULL, NULL, 0,
804 bch2_set_quota_trans(&trans, &new_quota, qdq)) ?:
805 __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i));
810 const struct quotactl_ops bch2_quotactl_operations = {
811 .quota_enable = bch2_quota_enable,
812 .quota_disable = bch2_quota_disable,
813 .rm_xquota = bch2_quota_remove,
815 .get_state = bch2_quota_get_state,
816 .set_info = bch2_quota_set_info,
818 .get_dqblk = bch2_get_quota,
819 .get_nextdqblk = bch2_get_next_quota,
820 .set_dqblk = bch2_set_quota,
823 #endif /* CONFIG_BCACHEFS_QUOTA */