1 // SPDX-License-Identifier: GPL-2.0
3 #include "btree_update.h"
9 static const char * const bch2_quota_types[] = {
15 static const char * const bch2_quota_counters[] = {
20 static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f,
23 struct bch_sb_field_quota *q = field_to_type(f, quota);
25 if (vstruct_bytes(&q->field) < sizeof(*q)) {
26 prt_printf(err, "wrong size (got %zu should be %zu)",
27 vstruct_bytes(&q->field), sizeof(*q));
34 static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb,
35 struct bch_sb_field *f)
37 struct bch_sb_field_quota *q = field_to_type(f, quota);
38 unsigned qtyp, counter;
40 for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) {
41 prt_printf(out, "%s: flags %llx",
42 bch2_quota_types[qtyp],
43 le64_to_cpu(q->q[qtyp].flags));
45 for (counter = 0; counter < Q_COUNTERS; counter++)
46 prt_printf(out, " %s timelimit %u warnlimit %u",
47 bch2_quota_counters[counter],
48 le32_to_cpu(q->q[qtyp].c[counter].timelimit),
49 le32_to_cpu(q->q[qtyp].c[counter].warnlimit));
55 const struct bch_sb_field_ops bch_sb_field_ops_quota = {
56 .validate = bch2_sb_quota_validate,
57 .to_text = bch2_sb_quota_to_text,
60 int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k,
61 int rw, struct printbuf *err)
63 if (k.k->p.inode >= QTYP_NR) {
64 prt_printf(err, "invalid quota type (%llu >= %u)",
65 k.k->p.inode, QTYP_NR);
69 if (bkey_val_bytes(k.k) != sizeof(struct bch_quota)) {
70 prt_printf(err, "incorrect value size (%zu != %zu)",
71 bkey_val_bytes(k.k), sizeof(struct bch_quota));
78 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
81 struct bkey_s_c_quota dq = bkey_s_c_to_quota(k);
84 for (i = 0; i < Q_COUNTERS; i++)
85 prt_printf(out, "%s hardlimit %llu softlimit %llu",
86 bch2_quota_counters[i],
87 le64_to_cpu(dq.v->c[i].hardlimit),
88 le64_to_cpu(dq.v->c[i].softlimit));
91 #ifdef CONFIG_BCACHEFS_QUOTA
93 #include <linux/cred.h>
95 #include <linux/quota.h>
97 static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
100 return qtypes ? i + __ffs(qtypes) : QTYP_NR;
103 #define for_each_set_qtype(_c, _i, _q, _qtypes) \
105 (_i = __next_qtype(_i, _qtypes), \
106 _q = &(_c)->quotas[_i], \
110 static bool ignore_hardlimit(struct bch_memquota_type *q)
112 if (capable(CAP_SYS_RESOURCE))
115 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
117 return capable(CAP_SYS_RESOURCE) &&
118 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
119 !(info->dqi_flags & DQF_ROOT_SQUASH));
125 SOFTWARN, /* Softlimit reached */
126 SOFTLONGWARN, /* Grace time expired */
127 HARDWARN, /* Hardlimit reached */
129 HARDBELOW, /* Usage got below inode hardlimit */
130 SOFTBELOW, /* Usage got below inode softlimit */
133 static int quota_nl[][Q_COUNTERS] = {
134 [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
135 [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
136 [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
137 [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
138 [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
140 [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
141 [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
142 [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
143 [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
144 [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
152 } m[QTYP_NR * Q_COUNTERS];
155 static void prepare_msg(unsigned qtype,
156 enum quota_counters counter,
157 struct quota_msgs *msgs,
158 enum quota_msg msg_type)
160 BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
162 msgs->m[msgs->nr].qtype = qtype;
163 msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
167 static void prepare_warning(struct memquota_counter *qc,
169 enum quota_counters counter,
170 struct quota_msgs *msgs,
171 enum quota_msg msg_type)
173 if (qc->warning_issued & (1 << msg_type))
176 prepare_msg(qtype, counter, msgs, msg_type);
179 static void flush_warnings(struct bch_qid qid,
180 struct super_block *sb,
181 struct quota_msgs *msgs)
185 for (i = 0; i < msgs->nr; i++)
186 quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]),
187 sb->s_dev, msgs->m[i].msg);
190 static int bch2_quota_check_limit(struct bch_fs *c,
192 struct bch_memquota *mq,
193 struct quota_msgs *msgs,
194 enum quota_counters counter,
196 enum quota_acct_mode mode)
198 struct bch_memquota_type *q = &c->quotas[qtype];
199 struct memquota_counter *qc = &mq->c[counter];
204 if (mode == KEY_TYPE_QUOTA_NOCHECK)
208 if (n < qc->hardlimit &&
209 (qc->warning_issued & (1 << HARDWARN))) {
210 qc->warning_issued &= ~(1 << HARDWARN);
211 prepare_msg(qtype, counter, msgs, HARDBELOW);
214 if (n < qc->softlimit &&
215 (qc->warning_issued & (1 << SOFTWARN))) {
216 qc->warning_issued &= ~(1 << SOFTWARN);
217 prepare_msg(qtype, counter, msgs, SOFTBELOW);
220 qc->warning_issued = 0;
226 !ignore_hardlimit(q)) {
227 if (mode == KEY_TYPE_QUOTA_PREALLOC)
230 prepare_warning(qc, qtype, counter, msgs, HARDWARN);
236 ktime_get_real_seconds() >= qc->timer &&
237 !ignore_hardlimit(q)) {
238 if (mode == KEY_TYPE_QUOTA_PREALLOC)
241 prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
247 if (mode == KEY_TYPE_QUOTA_PREALLOC)
250 prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
252 /* XXX is this the right one? */
253 qc->timer = ktime_get_real_seconds() +
254 q->limits[counter].warnlimit;
260 int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
261 enum quota_counters counter, s64 v,
262 enum quota_acct_mode mode)
264 unsigned qtypes = enabled_qtypes(c);
265 struct bch_memquota_type *q;
266 struct bch_memquota *mq[QTYP_NR];
267 struct quota_msgs msgs;
271 memset(&msgs, 0, sizeof(msgs));
273 for_each_set_qtype(c, i, q, qtypes)
274 mutex_lock_nested(&q->lock, i);
276 for_each_set_qtype(c, i, q, qtypes) {
277 mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_NOFS);
283 ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
288 for_each_set_qtype(c, i, q, qtypes)
289 mq[i]->c[counter].v += v;
291 for_each_set_qtype(c, i, q, qtypes)
292 mutex_unlock(&q->lock);
294 flush_warnings(qid, c->vfs_sb, &msgs);
299 static void __bch2_quota_transfer(struct bch_memquota *src_q,
300 struct bch_memquota *dst_q,
301 enum quota_counters counter, s64 v)
303 BUG_ON(v > src_q->c[counter].v);
304 BUG_ON(v + dst_q->c[counter].v < v);
306 src_q->c[counter].v -= v;
307 dst_q->c[counter].v += v;
310 int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
312 struct bch_qid src, u64 space,
313 enum quota_acct_mode mode)
315 struct bch_memquota_type *q;
316 struct bch_memquota *src_q[3], *dst_q[3];
317 struct quota_msgs msgs;
321 qtypes &= enabled_qtypes(c);
323 memset(&msgs, 0, sizeof(msgs));
325 for_each_set_qtype(c, i, q, qtypes)
326 mutex_lock_nested(&q->lock, i);
328 for_each_set_qtype(c, i, q, qtypes) {
329 src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_NOFS);
330 dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_NOFS);
332 if (!src_q[i] || !dst_q[i]) {
337 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
338 dst_q[i]->c[Q_SPC].v + space,
343 ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO,
344 dst_q[i]->c[Q_INO].v + 1,
350 for_each_set_qtype(c, i, q, qtypes) {
351 __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space);
352 __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1);
356 for_each_set_qtype(c, i, q, qtypes)
357 mutex_unlock(&q->lock);
359 flush_warnings(dst, c->vfs_sb, &msgs);
364 static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k)
366 struct bkey_s_c_quota dq;
367 struct bch_memquota_type *q;
368 struct bch_memquota *mq;
371 BUG_ON(k.k->p.inode >= QTYP_NR);
375 dq = bkey_s_c_to_quota(k);
376 q = &c->quotas[k.k->p.inode];
378 mutex_lock(&q->lock);
379 mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
381 mutex_unlock(&q->lock);
385 for (i = 0; i < Q_COUNTERS; i++) {
386 mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
387 mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
390 mutex_unlock(&q->lock);
396 static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type)
398 struct btree_trans trans;
399 struct btree_iter iter;
403 bch2_trans_init(&trans, c, 0, 0);
405 for_each_btree_key(&trans, iter, BTREE_ID_quotas, POS(type, 0),
406 BTREE_ITER_PREFETCH, k, ret) {
407 if (k.k->p.inode != type)
410 ret = __bch2_quota_set(c, k);
414 bch2_trans_iter_exit(&trans, &iter);
416 bch2_trans_exit(&trans);
420 void bch2_fs_quota_exit(struct bch_fs *c)
424 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
425 genradix_free(&c->quotas[i].table);
428 void bch2_fs_quota_init(struct bch_fs *c)
432 for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
433 mutex_init(&c->quotas[i].lock);
436 static void bch2_sb_quota_read(struct bch_fs *c)
438 struct bch_sb_field_quota *sb_quota;
441 sb_quota = bch2_sb_get_quota(c->disk_sb.sb);
445 for (i = 0; i < QTYP_NR; i++) {
446 struct bch_memquota_type *q = &c->quotas[i];
448 for (j = 0; j < Q_COUNTERS; j++) {
449 q->limits[j].timelimit =
450 le32_to_cpu(sb_quota->q[i].c[j].timelimit);
451 q->limits[j].warnlimit =
452 le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
457 static int bch2_fs_quota_read_inode(struct btree_trans *trans,
458 struct btree_iter *iter)
460 struct bch_fs *c = trans->c;
461 struct bch_inode_unpacked u;
462 struct bch_subvolume subvolume;
466 k = bch2_btree_iter_peek(iter);
474 ret = bch2_snapshot_get_subvol(trans, k.k->p.snapshot, &subvolume);
479 * We don't do quota accounting in snapshots:
481 if (BCH_SUBVOLUME_SNAP(&subvolume))
484 if (!bkey_is_inode(k.k))
487 ret = bch2_inode_unpack(k, &u);
491 bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors,
492 KEY_TYPE_QUOTA_NOCHECK);
493 bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
494 KEY_TYPE_QUOTA_NOCHECK);
496 bch2_btree_iter_set_pos(iter, POS(iter->pos.inode, iter->pos.offset + 1));
500 int bch2_fs_quota_read(struct bch_fs *c)
502 unsigned i, qtypes = enabled_qtypes(c);
503 struct bch_memquota_type *q;
504 struct btree_trans trans;
505 struct btree_iter iter;
508 mutex_lock(&c->sb_lock);
509 bch2_sb_quota_read(c);
510 mutex_unlock(&c->sb_lock);
512 for_each_set_qtype(c, i, q, qtypes) {
513 ret = bch2_quota_init_type(c, i);
518 bch2_trans_init(&trans, c, 0, 0);
520 bch2_trans_iter_init(&trans, &iter, BTREE_ID_inodes, POS_MIN,
523 BTREE_ITER_ALL_SNAPSHOTS);
525 ret = lockrestart_do(&trans,
526 bch2_fs_quota_read_inode(&trans, &iter));
528 bch2_trans_iter_exit(&trans, &iter);
530 bch2_trans_exit(&trans);
531 return ret < 0 ? ret : 0;
534 /* Enable/disable/delete quotas for an entire filesystem: */
536 static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
538 struct bch_fs *c = sb->s_fs_info;
540 if (sb->s_flags & SB_RDONLY)
543 /* Accounting must be enabled at mount time: */
544 if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
547 /* Can't enable enforcement without accounting: */
548 if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
551 if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
554 if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota)
557 mutex_lock(&c->sb_lock);
558 if (uflags & FS_QUOTA_UDQ_ENFD)
559 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true);
561 if (uflags & FS_QUOTA_GDQ_ENFD)
562 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true);
564 if (uflags & FS_QUOTA_PDQ_ENFD)
565 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true);
568 mutex_unlock(&c->sb_lock);
573 static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
575 struct bch_fs *c = sb->s_fs_info;
577 if (sb->s_flags & SB_RDONLY)
580 mutex_lock(&c->sb_lock);
581 if (uflags & FS_QUOTA_UDQ_ENFD)
582 SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false);
584 if (uflags & FS_QUOTA_GDQ_ENFD)
585 SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false);
587 if (uflags & FS_QUOTA_PDQ_ENFD)
588 SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false);
591 mutex_unlock(&c->sb_lock);
596 static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
598 struct bch_fs *c = sb->s_fs_info;
601 if (sb->s_flags & SB_RDONLY)
604 if (uflags & FS_USER_QUOTA) {
605 if (c->opts.usrquota)
608 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
610 POS(QTYP_USR + 1, 0),
616 if (uflags & FS_GROUP_QUOTA) {
617 if (c->opts.grpquota)
620 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
622 POS(QTYP_GRP + 1, 0),
628 if (uflags & FS_PROJ_QUOTA) {
629 if (c->opts.prjquota)
632 ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
634 POS(QTYP_PRJ + 1, 0),
644 * Return quota status information, such as enforcements, quota file inode
647 static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
649 struct bch_fs *c = sb->s_fs_info;
650 unsigned qtypes = enabled_qtypes(c);
653 memset(state, 0, sizeof(*state));
655 for (i = 0; i < QTYP_NR; i++) {
656 state->s_state[i].flags |= QCI_SYSFILE;
658 if (!(qtypes & (1 << i)))
661 state->s_state[i].flags |= QCI_ACCT_ENABLED;
663 state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
664 state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
666 state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
667 state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
674 * Adjust quota timers & warnings
676 static int bch2_quota_set_info(struct super_block *sb, int type,
677 struct qc_info *info)
679 struct bch_fs *c = sb->s_fs_info;
680 struct bch_sb_field_quota *sb_quota;
681 struct bch_memquota_type *q;
683 if (sb->s_flags & SB_RDONLY)
689 if (!((1 << type) & enabled_qtypes(c)))
692 if (info->i_fieldmask &
693 ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
696 q = &c->quotas[type];
698 mutex_lock(&c->sb_lock);
699 sb_quota = bch2_sb_get_quota(c->disk_sb.sb);
701 sb_quota = bch2_sb_resize_quota(&c->disk_sb,
702 sizeof(*sb_quota) / sizeof(u64));
707 if (info->i_fieldmask & QC_SPC_TIMER)
708 sb_quota->q[type].c[Q_SPC].timelimit =
709 cpu_to_le32(info->i_spc_timelimit);
711 if (info->i_fieldmask & QC_SPC_WARNS)
712 sb_quota->q[type].c[Q_SPC].warnlimit =
713 cpu_to_le32(info->i_spc_warnlimit);
715 if (info->i_fieldmask & QC_INO_TIMER)
716 sb_quota->q[type].c[Q_INO].timelimit =
717 cpu_to_le32(info->i_ino_timelimit);
719 if (info->i_fieldmask & QC_INO_WARNS)
720 sb_quota->q[type].c[Q_INO].warnlimit =
721 cpu_to_le32(info->i_ino_warnlimit);
723 bch2_sb_quota_read(c);
726 mutex_unlock(&c->sb_lock);
731 /* Get/set individual quotas: */
733 static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
735 dst->d_space = src->c[Q_SPC].v << 9;
736 dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
737 dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
738 dst->d_spc_timer = src->c[Q_SPC].timer;
739 dst->d_spc_warns = src->c[Q_SPC].warns;
741 dst->d_ino_count = src->c[Q_INO].v;
742 dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
743 dst->d_ino_softlimit = src->c[Q_INO].softlimit;
744 dst->d_ino_timer = src->c[Q_INO].timer;
745 dst->d_ino_warns = src->c[Q_INO].warns;
748 static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
749 struct qc_dqblk *qdq)
751 struct bch_fs *c = sb->s_fs_info;
752 struct bch_memquota_type *q = &c->quotas[kqid.type];
753 qid_t qid = from_kqid(&init_user_ns, kqid);
754 struct bch_memquota *mq;
756 memset(qdq, 0, sizeof(*qdq));
758 mutex_lock(&q->lock);
759 mq = genradix_ptr(&q->table, qid);
761 __bch2_quota_get(qdq, mq);
762 mutex_unlock(&q->lock);
767 static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
768 struct qc_dqblk *qdq)
770 struct bch_fs *c = sb->s_fs_info;
771 struct bch_memquota_type *q = &c->quotas[kqid->type];
772 qid_t qid = from_kqid(&init_user_ns, *kqid);
773 struct genradix_iter iter;
774 struct bch_memquota *mq;
777 mutex_lock(&q->lock);
779 genradix_for_each_from(&q->table, iter, mq, qid)
780 if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) {
781 __bch2_quota_get(qdq, mq);
782 *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos);
788 mutex_unlock(&q->lock);
792 static int bch2_set_quota_trans(struct btree_trans *trans,
793 struct bkey_i_quota *new_quota,
794 struct qc_dqblk *qdq)
796 struct btree_iter iter;
800 bch2_trans_iter_init(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
801 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
802 k = bch2_btree_iter_peek_slot(&iter);
808 if (k.k->type == KEY_TYPE_quota)
809 new_quota->v = *bkey_s_c_to_quota(k).v;
811 if (qdq->d_fieldmask & QC_SPC_SOFT)
812 new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
813 if (qdq->d_fieldmask & QC_SPC_HARD)
814 new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
816 if (qdq->d_fieldmask & QC_INO_SOFT)
817 new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
818 if (qdq->d_fieldmask & QC_INO_HARD)
819 new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
821 ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
822 bch2_trans_iter_exit(trans, &iter);
826 static int bch2_set_quota(struct super_block *sb, struct kqid qid,
827 struct qc_dqblk *qdq)
829 struct bch_fs *c = sb->s_fs_info;
830 struct bkey_i_quota new_quota;
833 if (sb->s_flags & SB_RDONLY)
836 bkey_quota_init(&new_quota.k_i);
837 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
839 ret = bch2_trans_do(c, NULL, NULL, 0,
840 bch2_set_quota_trans(&trans, &new_quota, qdq)) ?:
841 __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i));
846 const struct quotactl_ops bch2_quotactl_operations = {
847 .quota_enable = bch2_quota_enable,
848 .quota_disable = bch2_quota_disable,
849 .rm_xquota = bch2_quota_remove,
851 .get_state = bch2_quota_get_state,
852 .set_info = bch2_quota_set_info,
854 .get_dqblk = bch2_get_quota,
855 .get_nextdqblk = bch2_get_next_quota,
856 .set_dqblk = bch2_set_quota,
859 #endif /* CONFIG_BCACHEFS_QUOTA */