From 494421ee6e85514f90bb316d77e1dd4f7dad3420 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 15 Oct 2022 05:29:10 -0400 Subject: [PATCH] Update bcachefs sources to 44be8c1da2 fixup! bcachefs: Btree key cache improvements Signed-off-by: Kent Overstreet --- .bcachefs_revision | 2 +- libbcachefs/btree_iter.c | 7 +++++ libbcachefs/btree_key_cache.c | 23 ++++++++++------- libbcachefs/fs-io.c | 18 +++++-------- libbcachefs/journal_io.c | 5 ---- libbcachefs/quota.c | 48 ++++++++++++++++------------------- 6 files changed, 50 insertions(+), 53 deletions(-) diff --git a/.bcachefs_revision b/.bcachefs_revision index 11194e5..da204b9 100644 --- a/.bcachefs_revision +++ b/.bcachefs_revision @@ -1 +1 @@ -3e93567c5196ef0c80e2ac3c08295130d858dfd6 +44be8c1da2e1d4edb23d5dcf3b522971c245c3f6 diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c index 0dfde9f..2aa5655 100644 --- a/libbcachefs/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -3041,6 +3041,13 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) void bch2_fs_btree_iter_exit(struct bch_fs *c) { + struct btree_transaction_stats *s; + + for (s = c->btree_transaction_stats; + s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats); + s++) + kfree(s->max_paths_text); + if (c->btree_trans_barrier_initialized) cleanup_srcu_struct(&c->btree_trans_barrier); mempool_exit(&c->btree_trans_mem_pool); diff --git a/libbcachefs/btree_key_cache.c b/libbcachefs/btree_key_cache.c index 958feac..b8ed25b 100644 --- a/libbcachefs/btree_key_cache.c +++ b/libbcachefs/btree_key_cache.c @@ -891,15 +891,20 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc) mutex_lock(&bc->lock); - rcu_read_lock(); - tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); - if (tbl) - for (i = 0; i < tbl->size; i++) - rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { - bkey_cached_evict(bc, ck); - list_add(&ck->list, &bc->freed_nonpcpu); - } - rcu_read_unlock(); + /* + * The loop is needed to guard against racing with rehash: + */ + while (atomic_long_read(&bc->nr_keys)) { + rcu_read_lock(); + tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); + if (tbl) + for (i = 0; i < tbl->size; i++) + rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { + bkey_cached_evict(bc, ck); + list_add(&ck->list, &bc->freed_nonpcpu); + } + rcu_read_unlock(); + } #ifdef __KERNEL__ for_each_possible_cpu(cpu) { diff --git a/libbcachefs/fs-io.c b/libbcachefs/fs-io.c index 02ef343..7429206 100644 --- a/libbcachefs/fs-io.c +++ b/libbcachefs/fs-io.c @@ -606,7 +606,7 @@ static void bch2_page_reservation_put(struct bch_fs *c, static int bch2_page_reservation_get(struct bch_fs *c, struct bch_inode_info *inode, struct page *page, struct bch2_page_reservation *res, - unsigned offset, unsigned len, bool check_enospc) + unsigned offset, unsigned len) { struct bch_page_state *s = bch2_page_state_create(page, 0); unsigned i, disk_sectors = 0, quota_sectors = 0; @@ -626,19 +626,14 @@ static int bch2_page_reservation_get(struct bch_fs *c, } if (disk_sectors) { - ret = bch2_disk_reservation_add(c, &res->disk, - disk_sectors, - !check_enospc - ? BCH_DISK_RESERVATION_NOFAIL - : 0); + ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0); if (unlikely(ret)) return ret; } if (quota_sectors) { ret = bch2_quota_reservation_add(c, inode, &res->quota, - quota_sectors, - check_enospc); + quota_sectors, true); if (unlikely(ret)) { struct disk_reservation tmp = { .sectors = disk_sectors @@ -822,7 +817,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf) } } - if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) { + if (bch2_page_reservation_get(c, inode, page, &res, 0, len)) { unlock_page(page); ret = VM_FAULT_SIGBUS; goto out; @@ -1530,8 +1525,7 @@ out: goto err; } - ret = bch2_page_reservation_get(c, inode, page, res, - offset, len, true); + ret = bch2_page_reservation_get(c, inode, page, res, offset, len); if (ret) { if (!PageUptodate(page)) { /* @@ -1673,7 +1667,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode, } ret = bch2_page_reservation_get(c, inode, page, &res, - pg_offset, pg_len, true); + pg_offset, pg_len); if (ret) goto out; diff --git a/libbcachefs/journal_io.c b/libbcachefs/journal_io.c index 68113a0..c4922c6 100644 --- a/libbcachefs/journal_io.c +++ b/libbcachefs/journal_io.c @@ -1162,11 +1162,6 @@ int bch2_journal_read(struct bch_fs *c, u64 *blacklist_seq, u64 *start_seq) le64_to_cpu(i->j.seq))) i->j.last_seq = i->j.seq; - pr_info("last flush %llu-%llu csum good %u", - le64_to_cpu(i->j.last_seq), - le64_to_cpu(i->j.seq), - i->csum_good); - last_seq = le64_to_cpu(i->j.last_seq); *blacklist_seq = le64_to_cpu(i->j.seq) + 1; break; diff --git a/libbcachefs/quota.c b/libbcachefs/quota.c index ad7130a..db81727 100644 --- a/libbcachefs/quota.c +++ b/libbcachefs/quota.c @@ -332,34 +332,20 @@ static int bch2_quota_check_limit(struct bch_fs *c, if (qc->hardlimit && qc->hardlimit < n && !ignore_hardlimit(q)) { - if (mode == KEY_TYPE_QUOTA_PREALLOC) - return -EDQUOT; - prepare_warning(qc, qtype, counter, msgs, HARDWARN); + return -EDQUOT; } if (qc->softlimit && - qc->softlimit < n && - qc->timer && - ktime_get_real_seconds() >= qc->timer && - !ignore_hardlimit(q)) { - if (mode == KEY_TYPE_QUOTA_PREALLOC) - return -EDQUOT; - - prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN); - } - - if (qc->softlimit && - qc->softlimit < n && - qc->timer == 0) { - if (mode == KEY_TYPE_QUOTA_PREALLOC) + qc->softlimit < n) { + if (qc->timer == 0) { + qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit; + prepare_warning(qc, qtype, counter, msgs, SOFTWARN); + } else if (ktime_get_real_seconds() >= qc->timer && + !ignore_hardlimit(q)) { + prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN); return -EDQUOT; - - prepare_warning(qc, qtype, counter, msgs, SOFTWARN); - - /* XXX is this the right one? */ - qc->timer = ktime_get_real_seconds() + - q->limits[counter].warnlimit; + } } return 0; @@ -469,7 +455,8 @@ err: return ret; } -static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k) +static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k, + struct qc_dqblk *qdq) { struct bkey_s_c_quota dq; struct bch_memquota_type *q; @@ -498,6 +485,15 @@ static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k) mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit); } + if (qdq && qdq->d_fieldmask & QC_SPC_TIMER) + mq->c[Q_SPC].timer = cpu_to_le64(qdq->d_spc_timer); + if (qdq && qdq->d_fieldmask & QC_SPC_WARNS) + mq->c[Q_SPC].warns = cpu_to_le64(qdq->d_spc_warns); + if (qdq && qdq->d_fieldmask & QC_INO_TIMER) + mq->c[Q_INO].timer = cpu_to_le64(qdq->d_ino_timer); + if (qdq && qdq->d_fieldmask & QC_INO_WARNS) + mq->c[Q_INO].warns = cpu_to_le64(qdq->d_ino_warns); + mutex_unlock(&q->lock); } @@ -618,7 +614,7 @@ int bch2_fs_quota_read(struct bch_fs *c) ret = for_each_btree_key2(&trans, iter, BTREE_ID_quotas, POS_MIN, BTREE_ITER_PREFETCH, k, - __bch2_quota_set(c, k)) ?: + __bch2_quota_set(c, k, NULL)) ?: for_each_btree_key2(&trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, bch2_fs_quota_read_inode(&trans, &iter, k)); @@ -961,7 +957,7 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid, ret = bch2_trans_do(c, NULL, NULL, 0, bch2_set_quota_trans(&trans, &new_quota, qdq)) ?: - __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i)); + __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq); return ret; } -- 2.39.5