1 // SPDX-License-Identifier: GPL-2.0
3 #include "btree_update.h"
10 static const char * const bch2_quota_types[] = {
16 static const char * const bch2_quota_counters[] = {
21 static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f,
24 struct bch_sb_field_quota *q = field_to_type(f, quota);
26 if (vstruct_bytes(&q->field) < sizeof(*q)) {
27 prt_printf(err, "wrong size (got %zu should be %zu)",
28 vstruct_bytes(&q->field), sizeof(*q));
29 return -BCH_ERR_invalid_sb_quota;
35 static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb,
36 struct bch_sb_field *f)
38 struct bch_sb_field_quota *q = field_to_type(f, quota);
39 unsigned qtyp, counter;
41 for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) {
42 prt_printf(out, "%s: flags %llx",
43 bch2_quota_types[qtyp],
44 le64_to_cpu(q->q[qtyp].flags));
46 for (counter = 0; counter < Q_COUNTERS; counter++)
47 prt_printf(out, " %s timelimit %u warnlimit %u",
48 bch2_quota_counters[counter],
49 le32_to_cpu(q->q[qtyp].c[counter].timelimit),
50 le32_to_cpu(q->q[qtyp].c[counter].warnlimit));
56 const struct bch_sb_field_ops bch_sb_field_ops_quota = {
57 .validate = bch2_sb_quota_validate,
58 .to_text = bch2_sb_quota_to_text,
61 int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k,
62 unsigned flags, struct printbuf *err)
64 if (k.k->p.inode >= QTYP_NR) {
65 prt_printf(err, "invalid quota type (%llu >= %u)",
66 k.k->p.inode, QTYP_NR);
67 return -BCH_ERR_invalid_bkey;
73 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
76 struct bkey_s_c_quota dq = bkey_s_c_to_quota(k);
79 for (i = 0; i < Q_COUNTERS; i++)
80 prt_printf(out, "%s hardlimit %llu softlimit %llu",
81 bch2_quota_counters[i],
82 le64_to_cpu(dq.v->c[i].hardlimit),
83 le64_to_cpu(dq.v->c[i].softlimit));
86 #ifdef CONFIG_BCACHEFS_QUOTA
88 #include <linux/cred.h>
90 #include <linux/quota.h>
92 static void qc_info_to_text(struct printbuf *out, struct qc_info *i)
94 printbuf_tabstops_reset(out);
95 printbuf_tabstop_push(out, 20);
97 prt_str(out, "i_fieldmask");
99 prt_printf(out, "%x", i->i_fieldmask);
102 prt_str(out, "i_flags");
104 prt_printf(out, "%u", i->i_flags);
107 prt_str(out, "i_spc_timelimit");
109 prt_printf(out, "%u", i->i_spc_timelimit);
112 prt_str(out, "i_ino_timelimit");
114 prt_printf(out, "%u", i->i_ino_timelimit);
117 prt_str(out, "i_rt_spc_timelimit");
119 prt_printf(out, "%u", i->i_rt_spc_timelimit);
122 prt_str(out, "i_spc_warnlimit");
124 prt_printf(out, "%u", i->i_spc_warnlimit);
127 prt_str(out, "i_ino_warnlimit");
129 prt_printf(out, "%u", i->i_ino_warnlimit);
132 prt_str(out, "i_rt_spc_warnlimit");
134 prt_printf(out, "%u", i->i_rt_spc_warnlimit);
138 static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
140 printbuf_tabstops_reset(out);
141 printbuf_tabstop_push(out, 20);
143 prt_str(out, "d_fieldmask");
145 prt_printf(out, "%x", q->d_fieldmask);
148 prt_str(out, "d_spc_hardlimit");
150 prt_printf(out, "%llu", q->d_spc_hardlimit);
153 prt_str(out, "d_spc_softlimit");
155 prt_printf(out, "%llu", q->d_spc_softlimit);
158 prt_str(out, "d_ino_hardlimit");
160 prt_printf(out, "%llu", q->d_ino_hardlimit);
163 prt_str(out, "d_ino_softlimit");
165 prt_printf(out, "%llu", q->d_ino_softlimit);
168 prt_str(out, "d_space");
170 prt_printf(out, "%llu", q->d_space);
173 prt_str(out, "d_ino_count");
175 prt_printf(out, "%llu", q->d_ino_count);
178 prt_str(out, "d_ino_timer");
180 prt_printf(out, "%llu", q->d_ino_timer);
183 prt_str(out, "d_spc_timer");
185 prt_printf(out, "%llu", q->d_spc_timer);
188 prt_str(out, "d_ino_warns");
190 prt_printf(out, "%i", q->d_ino_warns);
193 prt_str(out, "d_spc_warns");
195 prt_printf(out, "%i", q->d_spc_warns);
199 static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
202 return qtypes ? i + __ffs(qtypes) : QTYP_NR;
205 #define for_each_set_qtype(_c, _i, _q, _qtypes) \
207 (_i = __next_qtype(_i, _qtypes), \
208 _q = &(_c)->quotas[_i], \
212 static bool ignore_hardlimit(struct bch_memquota_type *q)
214 if (capable(CAP_SYS_RESOURCE))
217 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
219 return capable(CAP_SYS_RESOURCE) &&
220 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
221 !(info->dqi_flags & DQF_ROOT_SQUASH));
227 SOFTWARN, /* Softlimit reached */
228 SOFTLONGWARN, /* Grace time expired */
229 HARDWARN, /* Hardlimit reached */
231 HARDBELOW, /* Usage got below inode hardlimit */
232 SOFTBELOW, /* Usage got below inode softlimit */
235 static int quota_nl[][Q_COUNTERS] = {
236 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
237 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
238 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
239 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
240 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
242 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
243 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
244 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
245 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
246 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
254 } m[QTYP_NR * Q_COUNTERS];
257 static void prepare_msg(unsigned qtype,
258 enum quota_counters counter,
259 struct quota_msgs *msgs,
260 enum quota_msg msg_type)
262 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
264 msgs->m[msgs->nr].qtype = qtype;
265 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
269 static void prepare_warning(struct memquota_counter *qc,
271 enum quota_counters counter,
272 struct quota_msgs *msgs,
273 enum quota_msg msg_type)
275 if (qc->warning_issued & (1 << msg_type))
278 prepare_msg(qtype, counter, msgs, msg_type);
281 static void flush_warnings(struct bch_qid qid,
282 struct super_block *sb,
283 struct quota_msgs *msgs)
287 for (i = 0; i < msgs->nr; i++)
288 quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]),
289 sb->s_dev, msgs->m[i].msg);
292 static int bch2_quota_check_limit(struct bch_fs *c,
294 struct bch_memquota *mq,
295 struct quota_msgs *msgs,
296 enum quota_counters counter,
298 enum quota_acct_mode mode)
300 struct bch_memquota_type *q = &c->quotas[qtype];
301 struct memquota_counter *qc = &mq->c[counter];
306 if (mode == KEY_TYPE_QUOTA_NOCHECK)
310 if (n < qc->hardlimit &&
311 (qc->warning_issued & (1 << HARDWARN))) {
312 qc->warning_issued &= ~(1 << HARDWARN);
313 prepare_msg(qtype, counter, msgs, HARDBELOW);
316 if (n < qc->softlimit &&
317 (qc->warning_issued & (1 << SOFTWARN))) {
318 qc->warning_issued &= ~(1 << SOFTWARN);
319 prepare_msg(qtype, counter, msgs, SOFTBELOW);
322 qc->warning_issued = 0;
328 !ignore_hardlimit(q)) {
329 prepare_warning(qc, qtype, counter, msgs, HARDWARN);
335 if (qc->timer == 0) {
336 qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit;
337 prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
338 } else if (ktime_get_real_seconds() >= qc->timer &&
339 !ignore_hardlimit(q)) {
340 prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
348 int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
349 enum quota_counters counter, s64 v,
350 enum quota_acct_mode mode)
352 unsigned qtypes = enabled_qtypes(c);
353 struct bch_memquota_type *q;
354 struct bch_memquota *mq[QTYP_NR];
355 struct quota_msgs msgs;
359 memset(&msgs, 0, sizeof(msgs));
361 for_each_set_qtype(c, i, q, qtypes) {
362 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL);
367 for_each_set_qtype(c, i, q, qtypes)
368 mutex_lock_nested(&q->lock, i);
370 for_each_set_qtype(c, i, q, qtypes) {
371 ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
376 for_each_set_qtype(c, i, q, qtypes)
377 mq[i]->c[counter].v += v;
379 for_each_set_qtype(c, i, q, qtypes)
380 mutex_unlock(&q->lock);
382 flush_warnings(qid, c->vfs_sb, &msgs);
387 static void __bch2_quota_transfer(struct bch_memquota *src_q,
388 struct bch_memquota *dst_q,
389 enum quota_counters counter, s64 v)
391 BUG_ON(v > src_q->c[counter].v);
392 BUG_ON(v + dst_q->c[counter].v < v);
394 src_q->c[counter].v -= v;
395 dst_q->c[counter].v += v;
398 int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
400 struct bch_qid src, u64 space,
401 enum quota_acct_mode mode)
403 struct bch_memquota_type *q;
404 struct bch_memquota *src_q[3], *dst_q[3];
405 struct quota_msgs msgs;
409 qtypes &= enabled_qtypes(c);
411 memset(&msgs, 0, sizeof(msgs));
413 for_each_set_qtype(c, i, q, qtypes) {
414 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL);
415 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL);
416 if (!src_q[i] || !dst_q[i])
420 for_each_set_qtype(c, i, q, qtypes)
421 mutex_lock_nested(&q->lock, i);
423 for_each_set_qtype(c, i, q, qtypes) {
424 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
425 dst_q[i]->c[Q_SPC].v + space,
430 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO,
431 dst_q[i]->c[Q_INO].v + 1,
437 for_each_set_qtype(c, i, q, qtypes) {
438 __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space);
439 __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1);
443 for_each_set_qtype(c, i, q, qtypes)
444 mutex_unlock(&q->lock);
446 flush_warnings(dst, c->vfs_sb, &msgs);
451 static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k,
452 struct qc_dqblk *qdq)
454 struct bkey_s_c_quota dq;
455 struct bch_memquota_type *q;
456 struct bch_memquota *mq;
459 BUG_ON(k.k->p.inode >= QTYP_NR);
461 if (!((1U << k.k->p.inode) & enabled_qtypes(c)))
466 dq = bkey_s_c_to_quota(k);
467 q = &c->quotas[k.k->p.inode];
469 mutex_lock(&q->lock);
470 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
472 mutex_unlock(&q->lock);
476 for (i = 0; i < Q_COUNTERS; i++) {
477 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
478 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
481 if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
482 mq->c[Q_SPC].timer = cpu_to_le64(qdq->d_spc_timer);
483 if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
484 mq->c[Q_SPC].warns = cpu_to_le64(qdq->d_spc_warns);
485 if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
486 mq->c[Q_INO].timer = cpu_to_le64(qdq->d_ino_timer);
487 if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
488 mq->c[Q_INO].warns = cpu_to_le64(qdq->d_ino_warns);
490 mutex_unlock(&q->lock);
496 void bch2_fs_quota_exit(struct bch_fs *c)
500 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
501 genradix_free(&c->quotas[i].table);
504 void bch2_fs_quota_init(struct bch_fs *c)
508 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
509 mutex_init(&c->quotas[i].lock);
512 static struct bch_sb_field_quota *bch2_sb_get_or_create_quota(struct bch_sb_handle *sb)
514 struct bch_sb_field_quota *sb_quota = bch2_sb_get_quota(sb->sb);
519 sb_quota = bch2_sb_resize_quota(sb, sizeof(*sb_quota) / sizeof(u64));
523 for (qtype = 0; qtype < QTYP_NR; qtype++)
524 for (qc = 0; qc < Q_COUNTERS; qc++)
525 sb_quota->q[qtype].c[qc].timelimit =
526 cpu_to_le32(7 * 24 * 60 * 60);
532 static void bch2_sb_quota_read(struct bch_fs *c)
534 struct bch_sb_field_quota *sb_quota;
537 sb_quota = bch2_sb_get_quota(c->disk_sb.sb);
541 for (i = 0; i < QTYP_NR; i++) {
542 struct bch_memquota_type *q = &c->quotas[i];
544 for (j = 0; j < Q_COUNTERS; j++) {
545 q->limits[j].timelimit =
546 le32_to_cpu(sb_quota->q[i].c[j].timelimit);
547 q->limits[j].warnlimit =
548 le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
553 static int bch2_fs_quota_read_inode(struct btree_trans *trans,
554 struct btree_iter *iter,
557 struct bch_fs *c = trans->c;
558 struct bch_inode_unpacked u;
559 struct bch_snapshot_tree s_t;
562 ret = bch2_snapshot_tree_lookup(trans,
563 snapshot_t(c, k.k->p.snapshot)->tree, &s_t);
567 if (!s_t.master_subvol)
570 ret = bch2_inode_find_by_inum_trans(trans,
572 le32_to_cpu(s_t.master_subvol),
578 bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors,
579 KEY_TYPE_QUOTA_NOCHECK);
580 bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
581 KEY_TYPE_QUOTA_NOCHECK);
583 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
587 int bch2_fs_quota_read(struct bch_fs *c)
589 struct bch_sb_field_quota *sb_quota;
590 struct btree_trans trans;
591 struct btree_iter iter;
595 mutex_lock(&c->sb_lock);
596 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
598 mutex_unlock(&c->sb_lock);
599 return -BCH_ERR_ENOSPC_sb_quota;
602 bch2_sb_quota_read(c);
603 mutex_unlock(&c->sb_lock);
605 bch2_trans_init(&trans, c, 0, 0);
607 ret = for_each_btree_key2(&trans, iter, BTREE_ID_quotas,
608 POS_MIN, BTREE_ITER_PREFETCH, k,
609 __bch2_quota_set(c, k, NULL)) ?:
610 for_each_btree_key2(&trans, iter, BTREE_ID_inodes,
611 POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
612 bch2_fs_quota_read_inode(&trans, &iter, k));
614 bch_err(c, "err in quota_read: %s", bch2_err_str(ret));
616 bch2_trans_exit(&trans);
620 /* Enable/disable/delete quotas for an entire filesystem: */
622 static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
624 struct bch_fs *c = sb->s_fs_info;
625 struct bch_sb_field_quota *sb_quota;
628 if (sb->s_flags & SB_RDONLY)
631 /* Accounting must be enabled at mount time: */
632 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
635 /* Can't enable enforcement without accounting: */
636 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
639 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
642 if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota)
645 mutex_lock(&c->sb_lock);
646 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
648 ret = -BCH_ERR_ENOSPC_sb_quota;
652 if (uflags & FS_QUOTA_UDQ_ENFD)
653 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true);
655 if (uflags & FS_QUOTA_GDQ_ENFD)
656 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true);
658 if (uflags & FS_QUOTA_PDQ_ENFD)
659 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true);
663 mutex_unlock(&c->sb_lock);
665 return bch2_err_class(ret);
668 static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
670 struct bch_fs *c = sb->s_fs_info;
672 if (sb->s_flags & SB_RDONLY)
675 mutex_lock(&c->sb_lock);
676 if (uflags & FS_QUOTA_UDQ_ENFD)
677 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false);
679 if (uflags & FS_QUOTA_GDQ_ENFD)
680 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false);
682 if (uflags & FS_QUOTA_PDQ_ENFD)
683 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false);
686 mutex_unlock(&c->sb_lock);
691 static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
693 struct bch_fs *c = sb->s_fs_info;
696 if (sb->s_flags & SB_RDONLY)
699 if (uflags & FS_USER_QUOTA) {
700 if (c->opts.usrquota)
703 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
705 POS(QTYP_USR, U64_MAX),
711 if (uflags & FS_GROUP_QUOTA) {
712 if (c->opts.grpquota)
715 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
717 POS(QTYP_GRP, U64_MAX),
723 if (uflags & FS_PROJ_QUOTA) {
724 if (c->opts.prjquota)
727 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
729 POS(QTYP_PRJ, U64_MAX),
739 * Return quota status information, such as enforcements, quota file inode
742 static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
744 struct bch_fs *c = sb->s_fs_info;
745 unsigned qtypes = enabled_qtypes(c);
748 memset(state, 0, sizeof(*state));
750 for (i = 0; i < QTYP_NR; i++) {
751 state->s_state[i].flags |= QCI_SYSFILE;
753 if (!(qtypes & (1 << i)))
756 state->s_state[i].flags |= QCI_ACCT_ENABLED;
758 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
759 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
761 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
762 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
769 * Adjust quota timers & warnings
771 static int bch2_quota_set_info(struct super_block *sb, int type,
772 struct qc_info *info)
774 struct bch_fs *c = sb->s_fs_info;
775 struct bch_sb_field_quota *sb_quota;
776 struct bch_memquota_type *q;
780 struct printbuf buf = PRINTBUF;
782 qc_info_to_text(&buf, info);
783 pr_info("setting:\n%s", buf.buf);
787 if (sb->s_flags & SB_RDONLY)
793 if (!((1 << type) & enabled_qtypes(c)))
796 if (info->i_fieldmask &
797 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
800 q = &c->quotas[type];
802 mutex_lock(&c->sb_lock);
803 sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
805 ret = -BCH_ERR_ENOSPC_sb_quota;
809 if (info->i_fieldmask & QC_SPC_TIMER)
810 sb_quota->q[type].c[Q_SPC].timelimit =
811 cpu_to_le32(info->i_spc_timelimit);
813 if (info->i_fieldmask & QC_SPC_WARNS)
814 sb_quota->q[type].c[Q_SPC].warnlimit =
815 cpu_to_le32(info->i_spc_warnlimit);
817 if (info->i_fieldmask & QC_INO_TIMER)
818 sb_quota->q[type].c[Q_INO].timelimit =
819 cpu_to_le32(info->i_ino_timelimit);
821 if (info->i_fieldmask & QC_INO_WARNS)
822 sb_quota->q[type].c[Q_INO].warnlimit =
823 cpu_to_le32(info->i_ino_warnlimit);
825 bch2_sb_quota_read(c);
829 mutex_unlock(&c->sb_lock);
831 return bch2_err_class(ret);
834 /* Get/set individual quotas: */
836 static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
838 dst->d_space = src->c[Q_SPC].v << 9;
839 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
840 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
841 dst->d_spc_timer = src->c[Q_SPC].timer;
842 dst->d_spc_warns = src->c[Q_SPC].warns;
844 dst->d_ino_count = src->c[Q_INO].v;
845 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
846 dst->d_ino_softlimit = src->c[Q_INO].softlimit;
847 dst->d_ino_timer = src->c[Q_INO].timer;
848 dst->d_ino_warns = src->c[Q_INO].warns;
851 static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
852 struct qc_dqblk *qdq)
854 struct bch_fs *c = sb->s_fs_info;
855 struct bch_memquota_type *q = &c->quotas[kqid.type];
856 qid_t qid = from_kqid(&init_user_ns, kqid);
857 struct bch_memquota *mq;
859 memset(qdq, 0, sizeof(*qdq));
861 mutex_lock(&q->lock);
862 mq = genradix_ptr(&q->table, qid);
864 __bch2_quota_get(qdq, mq);
865 mutex_unlock(&q->lock);
870 static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
871 struct qc_dqblk *qdq)
873 struct bch_fs *c = sb->s_fs_info;
874 struct bch_memquota_type *q = &c->quotas[kqid->type];
875 qid_t qid = from_kqid(&init_user_ns, *kqid);
876 struct genradix_iter iter;
877 struct bch_memquota *mq;
880 mutex_lock(&q->lock);
882 genradix_for_each_from(&q->table, iter, mq, qid)
883 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) {
884 __bch2_quota_get(qdq, mq);
885 *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos);
891 mutex_unlock(&q->lock);
895 static int bch2_set_quota_trans(struct btree_trans *trans,
896 struct bkey_i_quota *new_quota,
897 struct qc_dqblk *qdq)
899 struct btree_iter iter;
903 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
904 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
909 if (k.k->type == KEY_TYPE_quota)
910 new_quota->v = *bkey_s_c_to_quota(k).v;
912 if (qdq->d_fieldmask & QC_SPC_SOFT)
913 new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
914 if (qdq->d_fieldmask & QC_SPC_HARD)
915 new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
917 if (qdq->d_fieldmask & QC_INO_SOFT)
918 new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
919 if (qdq->d_fieldmask & QC_INO_HARD)
920 new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
922 ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
923 bch2_trans_iter_exit(trans, &iter);
927 static int bch2_set_quota(struct super_block *sb, struct kqid qid,
928 struct qc_dqblk *qdq)
930 struct bch_fs *c = sb->s_fs_info;
931 struct bkey_i_quota new_quota;
935 struct printbuf buf = PRINTBUF;
937 qc_dqblk_to_text(&buf, qdq);
938 pr_info("setting:\n%s", buf.buf);
942 if (sb->s_flags & SB_RDONLY)
945 bkey_quota_init(&new_quota.k_i);
946 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
948 ret = bch2_trans_do(c, NULL, NULL, 0,
949 bch2_set_quota_trans(&trans, &new_quota, qdq)) ?:
950 __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq);
955 const struct quotactl_ops bch2_quotactl_operations = {
956 .quota_enable = bch2_quota_enable,
957 .quota_disable = bch2_quota_disable,
958 .rm_xquota = bch2_quota_remove,
960 .get_state = bch2_quota_get_state,
961 .set_info = bch2_quota_set_info,
963 .get_dqblk = bch2_get_quota,
964 .get_nextdqblk = bch2_get_next_quota,
965 .set_dqblk = bch2_set_quota,
968 #endif /* CONFIG_BCACHEFS_QUOTA */