1 // SPDX-License-Identifier: GPL-2.0
3 #include "btree_update.h"
11 static const char * const bch2_quota_types[] = {
17 static const char * const bch2_quota_counters[] = {
22 static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f,
25 struct bch_sb_field_quota *q = field_to_type(f, quota);
27 if (vstruct_bytes(&q->field) < sizeof(*q)) {
28 prt_printf(err, "wrong size (got %zu should be %zu)",
29 vstruct_bytes(&q->field), sizeof(*q));
30 return -BCH_ERR_invalid_sb_quota;
36 static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb,
37 struct bch_sb_field *f)
39 struct bch_sb_field_quota *q = field_to_type(f, quota);
40 unsigned qtyp, counter;
42 for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) {
43 prt_printf(out, "%s: flags %llx",
44 bch2_quota_types[qtyp],
45 le64_to_cpu(q->q[qtyp].flags));
47 for (counter = 0; counter < Q_COUNTERS; counter++)
48 prt_printf(out, " %s timelimit %u warnlimit %u",
49 bch2_quota_counters[counter],
50 le32_to_cpu(q->q[qtyp].c[counter].timelimit),
51 le32_to_cpu(q->q[qtyp].c[counter].warnlimit));
57 const struct bch_sb_field_ops bch_sb_field_ops_quota = {
58 .validate = bch2_sb_quota_validate,
59 .to_text = bch2_sb_quota_to_text,
62 int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k,
63 enum bkey_invalid_flags flags,
66 if (k.k->p.inode >= QTYP_NR) {
67 prt_printf(err, "invalid quota type (%llu >= %u)",
68 k.k->p.inode, QTYP_NR);
69 return -BCH_ERR_invalid_bkey;
75 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
78 struct bkey_s_c_quota dq = bkey_s_c_to_quota(k);
81 for (i = 0; i < Q_COUNTERS; i++)
82 prt_printf(out, "%s hardlimit %llu softlimit %llu",
83 bch2_quota_counters[i],
84 le64_to_cpu(dq.v->c[i].hardlimit),
85 le64_to_cpu(dq.v->c[i].softlimit));
88 #ifdef CONFIG_BCACHEFS_QUOTA
90 #include <linux/cred.h>
92 #include <linux/quota.h>
94 static void qc_info_to_text(struct printbuf *out, struct qc_info *i)
96 printbuf_tabstops_reset(out);
97 printbuf_tabstop_push(out, 20);
99 prt_str(out, "i_fieldmask");
101 prt_printf(out, "%x", i->i_fieldmask);
104 prt_str(out, "i_flags");
106 prt_printf(out, "%u", i->i_flags);
109 prt_str(out, "i_spc_timelimit");
111 prt_printf(out, "%u", i->i_spc_timelimit);
114 prt_str(out, "i_ino_timelimit");
116 prt_printf(out, "%u", i->i_ino_timelimit);
119 prt_str(out, "i_rt_spc_timelimit");
121 prt_printf(out, "%u", i->i_rt_spc_timelimit);
124 prt_str(out, "i_spc_warnlimit");
126 prt_printf(out, "%u", i->i_spc_warnlimit);
129 prt_str(out, "i_ino_warnlimit");
131 prt_printf(out, "%u", i->i_ino_warnlimit);
134 prt_str(out, "i_rt_spc_warnlimit");
136 prt_printf(out, "%u", i->i_rt_spc_warnlimit);
140 static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
142 printbuf_tabstops_reset(out);
143 printbuf_tabstop_push(out, 20);
145 prt_str(out, "d_fieldmask");
147 prt_printf(out, "%x", q->d_fieldmask);
150 prt_str(out, "d_spc_hardlimit");
152 prt_printf(out, "%llu", q->d_spc_hardlimit);
155 prt_str(out, "d_spc_softlimit");
157 prt_printf(out, "%llu", q->d_spc_softlimit);
160 prt_str(out, "d_ino_hardlimit");
162 prt_printf(out, "%llu", q->d_ino_hardlimit);
165 prt_str(out, "d_ino_softlimit");
167 prt_printf(out, "%llu", q->d_ino_softlimit);
170 prt_str(out, "d_space");
172 prt_printf(out, "%llu", q->d_space);
175 prt_str(out, "d_ino_count");
177 prt_printf(out, "%llu", q->d_ino_count);
180 prt_str(out, "d_ino_timer");
182 prt_printf(out, "%llu", q->d_ino_timer);
185 prt_str(out, "d_spc_timer");
187 prt_printf(out, "%llu", q->d_spc_timer);
190 prt_str(out, "d_ino_warns");
192 prt_printf(out, "%i", q->d_ino_warns);
195 prt_str(out, "d_spc_warns");
197 prt_printf(out, "%i", q->d_spc_warns);
201 static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
204 return qtypes ? i + __ffs(qtypes) : QTYP_NR;
207 #define for_each_set_qtype(_c, _i, _q, _qtypes) \
209 (_i = __next_qtype(_i, _qtypes), \
210 _q = &(_c)->quotas[_i], \
214 static bool ignore_hardlimit(struct bch_memquota_type *q)
216 if (capable(CAP_SYS_RESOURCE))
219 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
221 return capable(CAP_SYS_RESOURCE) &&
222 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
223 !(info->dqi_flags & DQF_ROOT_SQUASH));
229 SOFTWARN, /* Softlimit reached */
230 SOFTLONGWARN, /* Grace time expired */
231 HARDWARN, /* Hardlimit reached */
233 HARDBELOW, /* Usage got below inode hardlimit */
234 SOFTBELOW, /* Usage got below inode softlimit */
237 static int quota_nl[][Q_COUNTERS] = {
238 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
239 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
240 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
241 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
242 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
244 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
245 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
246 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
247 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
248 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
256 } m[QTYP_NR * Q_COUNTERS];
259 static void prepare_msg(unsigned qtype,
260 enum quota_counters counter,
261 struct quota_msgs *msgs,
262 enum quota_msg msg_type)
264 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
266 msgs->m[msgs->nr].qtype = qtype;
267 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
271 static void prepare_warning(struct memquota_counter *qc,
273 enum quota_counters counter,
274 struct quota_msgs *msgs,
275 enum quota_msg msg_type)
277 if (qc->warning_issued & (1 << msg_type))
280 prepare_msg(qtype, counter, msgs, msg_type);
283 static void flush_warnings(struct bch_qid qid,
284 struct super_block *sb,
285 struct quota_msgs *msgs)
289 for (i = 0; i < msgs->nr; i++)
290 quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]),
291 sb->s_dev, msgs->m[i].msg);
294 static int bch2_quota_check_limit(struct bch_fs *c,
296 struct bch_memquota *mq,
297 struct quota_msgs *msgs,
298 enum quota_counters counter,
300 enum quota_acct_mode mode)
302 struct bch_memquota_type *q = &c->quotas[qtype];
303 struct memquota_counter *qc = &mq->c[counter];
308 if (mode == KEY_TYPE_QUOTA_NOCHECK)
312 if (n < qc->hardlimit &&
313 (qc->warning_issued & (1 << HARDWARN))) {
314 qc->warning_issued &= ~(1 << HARDWARN);
315 prepare_msg(qtype, counter, msgs, HARDBELOW);
318 if (n < qc->softlimit &&
319 (qc->warning_issued & (1 << SOFTWARN))) {
320 qc->warning_issued &= ~(1 << SOFTWARN);
321 prepare_msg(qtype, counter, msgs, SOFTBELOW);
324 qc->warning_issued = 0;
330 !ignore_hardlimit(q)) {
331 prepare_warning(qc, qtype, counter, msgs, HARDWARN);
337 if (qc->timer == 0) {
338 qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit;
339 prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
340 } else if (ktime_get_real_seconds() >= qc->timer &&
341 !ignore_hardlimit(q)) {
342 prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
350 int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
351 enum quota_counters counter, s64 v,
352 enum quota_acct_mode mode)
354 unsigned qtypes = enabled_qtypes(c);
355 struct bch_memquota_type *q;
356 struct bch_memquota *mq[QTYP_NR];
357 struct quota_msgs msgs;
361 memset(&msgs, 0, sizeof(msgs));
363 for_each_set_qtype(c, i, q, qtypes) {
364 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL);
369 for_each_set_qtype(c, i, q, qtypes)
370 mutex_lock_nested(&q->lock, i);
372 for_each_set_qtype(c, i, q, qtypes) {
373 ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
378 for_each_set_qtype(c, i, q, qtypes)
379 mq[i]->c[counter].v += v;
381 for_each_set_qtype(c, i, q, qtypes)
382 mutex_unlock(&q->lock);
384 flush_warnings(qid, c->vfs_sb, &msgs);
389 static void __bch2_quota_transfer(struct bch_memquota *src_q,
390 struct bch_memquota *dst_q,
391 enum quota_counters counter, s64 v)
393 BUG_ON(v > src_q->c[counter].v);
394 BUG_ON(v + dst_q->c[counter].v < v);
396 src_q->c[counter].v -= v;
397 dst_q->c[counter].v += v;
400 int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
402 struct bch_qid src, u64 space,
403 enum quota_acct_mode mode)
405 struct bch_memquota_type *q;
406 struct bch_memquota *src_q[3], *dst_q[3];
407 struct quota_msgs msgs;
411 qtypes &= enabled_qtypes(c);
413 memset(&msgs, 0, sizeof(msgs));
415 for_each_set_qtype(c, i, q, qtypes) {
416 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL);
417 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL);
418 if (!src_q[i] || !dst_q[i])
422 for_each_set_qtype(c, i, q, qtypes)
423 mutex_lock_nested(&q->lock, i);
425 for_each_set_qtype(c, i, q, qtypes) {
426 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
427 dst_q[i]->c[Q_SPC].v + space,
432 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO,
433 dst_q[i]->c[Q_INO].v + 1,
439 for_each_set_qtype(c, i, q, qtypes) {
440 __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space);
441 __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1);
445 for_each_set_qtype(c, i, q, qtypes)
446 mutex_unlock(&q->lock);
448 flush_warnings(dst, c->vfs_sb, &msgs);
453 static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k,
454 struct qc_dqblk *qdq)
456 struct bkey_s_c_quota dq;
457 struct bch_memquota_type *q;
458 struct bch_memquota *mq;
461 BUG_ON(k.k->p.inode >= QTYP_NR);
463 if (!((1U << k.k->p.inode) & enabled_qtypes(c)))
468 dq = bkey_s_c_to_quota(k);
469 q = &c->quotas[k.k->p.inode];
471 mutex_lock(&q->lock);
472 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
474 mutex_unlock(&q->lock);
478 for (i = 0; i < Q_COUNTERS; i++) {
479 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
480 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
483 if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
484 mq->c[Q_SPC].timer = qdq->d_spc_timer;
485 if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
486 mq->c[Q_SPC].warns = qdq->d_spc_warns;
487 if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
488 mq->c[Q_INO].timer = qdq->d_ino_timer;
489 if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
490 mq->c[Q_INO].warns = qdq->d_ino_warns;
492 mutex_unlock(&q->lock);
498 void bch2_fs_quota_exit(struct bch_fs *c)
502 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
503 genradix_free(&c->quotas[i].table);
506 void bch2_fs_quota_init(struct bch_fs *c)
510 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
511 mutex_init(&c->quotas[i].lock);
514 static struct bch_sb_field_quota *bch2_sb_get_or_create_quota(struct bch_sb_handle *sb)
516 struct bch_sb_field_quota *sb_quota = bch2_sb_get_quota(sb->sb);
521 sb_quota = bch2_sb_resize_quota(sb, sizeof(*sb_quota) / sizeof(u64));
525 for (qtype = 0; qtype < QTYP_NR; qtype++)
526 for (qc = 0; qc < Q_COUNTERS; qc++)
527 sb_quota->q[qtype].c[qc].timelimit =
528 cpu_to_le32(7 * 24 * 60 * 60);
534 static void bch2_sb_quota_read(struct bch_fs *c)
536 struct bch_sb_field_quota *sb_quota;
539 sb_quota = bch2_sb_get_quota(c->disk_sb.sb);
543 for (i = 0; i < QTYP_NR; i++) {
544 struct bch_memquota_type *q = &c->quotas[i];
546 for (j = 0; j < Q_COUNTERS; j++) {
547 q->limits[j].timelimit =
548 le32_to_cpu(sb_quota->q[i].c[j].timelimit);
549 q->limits[j].warnlimit =
550 le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
555 static int bch2_fs_quota_read_inode(struct btree_trans *trans,
556 struct btree_iter *iter,
559 struct bch_fs *c = trans->c;
560 struct bch_inode_unpacked u;
561 struct bch_snapshot_tree s_t;
564 ret = bch2_snapshot_tree_lookup(trans,
565 bch2_snapshot_tree(c, k.k->p.snapshot), &s_t);
566 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
567 "%s: snapshot tree %u not found", __func__,
568 snapshot_t(c, k.k->p.snapshot)->tree);
572 if (!s_t.master_subvol)
575 ret = bch2_inode_find_by_inum_trans(trans,
577 le32_to_cpu(s_t.master_subvol),
581 * Inode might be deleted in this snapshot - the easiest way to handle
582 * that is to just skip it here:
584 if (bch2_err_matches(ret, ENOENT))
590 bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors,
591 KEY_TYPE_QUOTA_NOCHECK);
592 bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
593 KEY_TYPE_QUOTA_NOCHECK);
595 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
599 int bch2_fs_quota_read(struct bch_fs *c)
601 struct bch_sb_field_quota *sb_quota;
602 struct btree_trans trans;
603 struct btree_iter iter;
607 mutex_lock(&c->sb_lock);
608 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
610 mutex_unlock(&c->sb_lock);
611 return -BCH_ERR_ENOSPC_sb_quota;
614 bch2_sb_quota_read(c);
615 mutex_unlock(&c->sb_lock);
617 bch2_trans_init(&trans, c, 0, 0);
619 ret = for_each_btree_key2(&trans, iter, BTREE_ID_quotas,
620 POS_MIN, BTREE_ITER_PREFETCH, k,
621 __bch2_quota_set(c, k, NULL)) ?:
622 for_each_btree_key2(&trans, iter, BTREE_ID_inodes,
623 POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
624 bch2_fs_quota_read_inode(&trans, &iter, k));
626 bch2_trans_exit(&trans);
633 /* Enable/disable/delete quotas for an entire filesystem: */
635 static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
637 struct bch_fs *c = sb->s_fs_info;
638 struct bch_sb_field_quota *sb_quota;
641 if (sb->s_flags & SB_RDONLY)
644 /* Accounting must be enabled at mount time: */
645 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
648 /* Can't enable enforcement without accounting: */
649 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
652 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
655 if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota)
658 mutex_lock(&c->sb_lock);
659 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
661 ret = -BCH_ERR_ENOSPC_sb_quota;
665 if (uflags & FS_QUOTA_UDQ_ENFD)
666 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true);
668 if (uflags & FS_QUOTA_GDQ_ENFD)
669 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true);
671 if (uflags & FS_QUOTA_PDQ_ENFD)
672 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true);
676 mutex_unlock(&c->sb_lock);
678 return bch2_err_class(ret);
681 static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
683 struct bch_fs *c = sb->s_fs_info;
685 if (sb->s_flags & SB_RDONLY)
688 mutex_lock(&c->sb_lock);
689 if (uflags & FS_QUOTA_UDQ_ENFD)
690 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false);
692 if (uflags & FS_QUOTA_GDQ_ENFD)
693 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false);
695 if (uflags & FS_QUOTA_PDQ_ENFD)
696 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false);
699 mutex_unlock(&c->sb_lock);
704 static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
706 struct bch_fs *c = sb->s_fs_info;
709 if (sb->s_flags & SB_RDONLY)
712 if (uflags & FS_USER_QUOTA) {
713 if (c->opts.usrquota)
716 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
718 POS(QTYP_USR, U64_MAX),
724 if (uflags & FS_GROUP_QUOTA) {
725 if (c->opts.grpquota)
728 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
730 POS(QTYP_GRP, U64_MAX),
736 if (uflags & FS_PROJ_QUOTA) {
737 if (c->opts.prjquota)
740 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
742 POS(QTYP_PRJ, U64_MAX),
752 * Return quota status information, such as enforcements, quota file inode
755 static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
757 struct bch_fs *c = sb->s_fs_info;
758 unsigned qtypes = enabled_qtypes(c);
761 memset(state, 0, sizeof(*state));
763 for (i = 0; i < QTYP_NR; i++) {
764 state->s_state[i].flags |= QCI_SYSFILE;
766 if (!(qtypes & (1 << i)))
769 state->s_state[i].flags |= QCI_ACCT_ENABLED;
771 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
772 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
774 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
775 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
782 * Adjust quota timers & warnings
784 static int bch2_quota_set_info(struct super_block *sb, int type,
785 struct qc_info *info)
787 struct bch_fs *c = sb->s_fs_info;
788 struct bch_sb_field_quota *sb_quota;
789 struct bch_memquota_type *q;
793 struct printbuf buf = PRINTBUF;
795 qc_info_to_text(&buf, info);
796 pr_info("setting:\n%s", buf.buf);
800 if (sb->s_flags & SB_RDONLY)
806 if (!((1 << type) & enabled_qtypes(c)))
809 if (info->i_fieldmask &
810 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
813 q = &c->quotas[type];
815 mutex_lock(&c->sb_lock);
816 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
818 ret = -BCH_ERR_ENOSPC_sb_quota;
822 if (info->i_fieldmask & QC_SPC_TIMER)
823 sb_quota->q[type].c[Q_SPC].timelimit =
824 cpu_to_le32(info->i_spc_timelimit);
826 if (info->i_fieldmask & QC_SPC_WARNS)
827 sb_quota->q[type].c[Q_SPC].warnlimit =
828 cpu_to_le32(info->i_spc_warnlimit);
830 if (info->i_fieldmask & QC_INO_TIMER)
831 sb_quota->q[type].c[Q_INO].timelimit =
832 cpu_to_le32(info->i_ino_timelimit);
834 if (info->i_fieldmask & QC_INO_WARNS)
835 sb_quota->q[type].c[Q_INO].warnlimit =
836 cpu_to_le32(info->i_ino_warnlimit);
838 bch2_sb_quota_read(c);
842 mutex_unlock(&c->sb_lock);
844 return bch2_err_class(ret);
847 /* Get/set individual quotas: */
849 static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
851 dst->d_space = src->c[Q_SPC].v << 9;
852 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
853 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
854 dst->d_spc_timer = src->c[Q_SPC].timer;
855 dst->d_spc_warns = src->c[Q_SPC].warns;
857 dst->d_ino_count = src->c[Q_INO].v;
858 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
859 dst->d_ino_softlimit = src->c[Q_INO].softlimit;
860 dst->d_ino_timer = src->c[Q_INO].timer;
861 dst->d_ino_warns = src->c[Q_INO].warns;
864 static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
865 struct qc_dqblk *qdq)
867 struct bch_fs *c = sb->s_fs_info;
868 struct bch_memquota_type *q = &c->quotas[kqid.type];
869 qid_t qid = from_kqid(&init_user_ns, kqid);
870 struct bch_memquota *mq;
872 memset(qdq, 0, sizeof(*qdq));
874 mutex_lock(&q->lock);
875 mq = genradix_ptr(&q->table, qid);
877 __bch2_quota_get(qdq, mq);
878 mutex_unlock(&q->lock);
883 static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
884 struct qc_dqblk *qdq)
886 struct bch_fs *c = sb->s_fs_info;
887 struct bch_memquota_type *q = &c->quotas[kqid->type];
888 qid_t qid = from_kqid(&init_user_ns, *kqid);
889 struct genradix_iter iter;
890 struct bch_memquota *mq;
893 mutex_lock(&q->lock);
895 genradix_for_each_from(&q->table, iter, mq, qid)
896 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) {
897 __bch2_quota_get(qdq, mq);
898 *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos);
904 mutex_unlock(&q->lock);
905 return bch2_err_class(ret);
908 static int bch2_set_quota_trans(struct btree_trans *trans,
909 struct bkey_i_quota *new_quota,
910 struct qc_dqblk *qdq)
912 struct btree_iter iter;
916 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
917 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
922 if (k.k->type == KEY_TYPE_quota)
923 new_quota->v = *bkey_s_c_to_quota(k).v;
925 if (qdq->d_fieldmask & QC_SPC_SOFT)
926 new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
927 if (qdq->d_fieldmask & QC_SPC_HARD)
928 new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
930 if (qdq->d_fieldmask & QC_INO_SOFT)
931 new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
932 if (qdq->d_fieldmask & QC_INO_HARD)
933 new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
935 ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
936 bch2_trans_iter_exit(trans, &iter);
940 static int bch2_set_quota(struct super_block *sb, struct kqid qid,
941 struct qc_dqblk *qdq)
943 struct bch_fs *c = sb->s_fs_info;
944 struct bkey_i_quota new_quota;
948 struct printbuf buf = PRINTBUF;
950 qc_dqblk_to_text(&buf, qdq);
951 pr_info("setting:\n%s", buf.buf);
955 if (sb->s_flags & SB_RDONLY)
958 bkey_quota_init(&new_quota.k_i);
959 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
961 ret = bch2_trans_do(c, NULL, NULL, 0,
962 bch2_set_quota_trans(&trans, &new_quota, qdq)) ?:
963 __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq);
965 return bch2_err_class(ret);
968 const struct quotactl_ops bch2_quotactl_operations = {
969 .quota_enable = bch2_quota_enable,
970 .quota_disable = bch2_quota_disable,
971 .rm_xquota = bch2_quota_remove,
973 .get_state = bch2_quota_get_state,
974 .set_info = bch2_quota_set_info,
976 .get_dqblk = bch2_get_quota,
977 .get_nextdqblk = bch2_get_next_quota,
978 .set_dqblk = bch2_set_quota,
981 #endif /* CONFIG_BCACHEFS_QUOTA */