+static unsigned bkey_to_sector_state(const struct bkey *k)
+{
+ if (k->type == KEY_TYPE_reservation)
+ return SECTOR_RESERVED;
+ if (bkey_extent_is_allocation(k))
+ return SECTOR_ALLOCATED;
+ return SECTOR_UNALLOCATED;
+}
+
+static void __bch2_page_state_set(struct page *page,
+ unsigned pg_offset, unsigned pg_len,
+ unsigned nr_ptrs, unsigned state)
+{
+ struct bch_page_state *s = bch2_page_state_create(page, __GFP_NOFAIL);
+ unsigned i;
+
+ BUG_ON(pg_offset >= PAGE_SECTORS);
+ BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+
+ spin_lock(&s->lock);
+
+ for (i = pg_offset; i < pg_offset + pg_len; i++) {
+ s->s[i].nr_replicas = nr_ptrs;
+ s->s[i].state = state;
+ }
+
+ if (i == PAGE_SECTORS)
+ s->uptodate = true;
+
+ spin_unlock(&s->lock);
+}
+
+static int bch2_page_state_set(struct bch_fs *c, subvol_inum inum,
+ struct page **pages, unsigned nr_pages)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 offset = pages[0]->index << PAGE_SECTORS_SHIFT;
+ unsigned pg_idx = 0;
+ u32 snapshot;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+retry:
+ bch2_trans_begin(&trans);
+
+ ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inum.inum, offset, snapshot),
+ BTREE_ITER_SLOTS, k, ret) {
+ unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k.k);
+
+ while (pg_idx < nr_pages) {
+ struct page *page = pages[pg_idx];
+ u64 pg_start = page->index << PAGE_SECTORS_SHIFT;
+ u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT;
+ unsigned pg_offset = max(bkey_start_offset(k.k), pg_start) - pg_start;
+ unsigned pg_len = min(k.k->p.offset, pg_end) - pg_offset - pg_start;
+
+ BUG_ON(k.k->p.offset < pg_start);
+ BUG_ON(bkey_start_offset(k.k) > pg_end);
+
+ if (!bch2_page_state_create(page, __GFP_NOFAIL)->uptodate)
+ __bch2_page_state_set(page, pg_offset, pg_len, nr_ptrs, state);
+
+ if (k.k->p.offset < pg_end)
+ break;
+ pg_idx++;
+ }
+
+ if (pg_idx == nr_pages)
+ break;
+ }
+
+ offset = iter.pos.offset;
+ bch2_trans_iter_exit(&trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+ bch2_trans_exit(&trans);
+
+ return ret;
+}
+
+static void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
+{
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
+ ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k.k);
+
+ bio_for_each_segment(bv, bio, iter)
+ __bch2_page_state_set(bv.bv_page, bv.bv_offset >> 9,
+ bv.bv_len >> 9, nr_ptrs, state);
+}
+
+static void mark_pagecache_unallocated(struct bch_inode_info *inode,
+ u64 start, u64 end)
+{
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct folio_batch fbatch;
+ unsigned i, j;
+
+ if (end <= start)
+ return;
+
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+ u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
+ unsigned pg_offset = max(start, pg_start) - pg_start;
+ unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
+ struct bch_page_state *s;
+
+ BUG_ON(end <= pg_start);
+ BUG_ON(pg_offset >= PAGE_SECTORS);
+ BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+
+ folio_lock(folio);
+ s = bch2_page_state(&folio->page);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = pg_offset; j < pg_offset + pg_len; j++)
+ s->s[j].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
+
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+}
+
+static void mark_pagecache_reserved(struct bch_inode_info *inode,
+ u64 start, u64 end)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct folio_batch fbatch;
+ s64 i_sectors_delta = 0;
+ unsigned i, j;
+
+ if (end <= start)
+ return;
+
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 pg_start = folio->index << PAGE_SECTORS_SHIFT;
+ u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT;
+ unsigned pg_offset = max(start, pg_start) - pg_start;
+ unsigned pg_len = min(end, pg_end) - pg_offset - pg_start;
+ struct bch_page_state *s;
+
+ BUG_ON(end <= pg_start);
+ BUG_ON(pg_offset >= PAGE_SECTORS);
+ BUG_ON(pg_offset + pg_len > PAGE_SECTORS);
+
+ folio_lock(folio);
+ s = bch2_page_state(&folio->page);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = pg_offset; j < pg_offset + pg_len; j++)
+ switch (s->s[j].state) {
+ case SECTOR_UNALLOCATED:
+ s->s[j].state = SECTOR_RESERVED;
+ break;
+ case SECTOR_DIRTY:
+ s->s[j].state = SECTOR_DIRTY_RESERVED;
+ i_sectors_delta--;
+ break;
+ default:
+ break;
+ }
+ spin_unlock(&s->lock);
+ }
+
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
+}
+