]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update bcachefs sources to 6ee8a33cee bcachefs: Call bch2_btree_update_add_new_node...
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 12 Oct 2022 15:06:50 +0000 (11:06 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Wed, 12 Oct 2022 15:06:50 +0000 (11:06 -0400)
.bcachefs_revision
libbcachefs/btree_iter.c
libbcachefs/btree_update_interior.c
libbcachefs/fs-io.c

index e79243813474af6a5856d902bdf6f8adbc374eae..2c908e6b7d9a5887af285bdd5ccc3559f5545122 100644 (file)
@@ -1 +1 @@
-83edfdeb29c92e0617c2bb0971184944eac09085
+6ee8a33cee5dfb74a1fb6ff348578fd43aae3a14
index 63197e4fa53c634f342114343377665a4ab2d341..af658390bd679f9dd56de95b1e75c327c896c6eb 100644 (file)
@@ -1540,15 +1540,17 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
 {
 
+       struct btree_path_level *l = path_l(path);
+       struct bkey_packed *_k;
        struct bkey_s_c k;
 
+       if (unlikely(!l->b))
+               return bkey_s_c_null;
+
        EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
        EBUG_ON(!btree_node_locked(path, path->level));
 
        if (!path->cached) {
-               struct btree_path_level *l = path_l(path);
-               struct bkey_packed *_k;
-
                _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
                k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
 
index b966140760ff5db351b0ca997c9ef33b9f48fe5b..03c4fd0998945447f4075ddc1ec8ed05c5e1f074 100644 (file)
@@ -429,7 +429,6 @@ static struct btree *__btree_root_alloc(struct btree_update *as,
 
        btree_node_set_format(b, b->data->format);
        bch2_btree_build_aux_trees(b);
-       six_unlock_write(&b->c.lock);
 
        return b;
 }
@@ -1527,6 +1526,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
 
                bch2_btree_build_aux_trees(n2);
                bch2_btree_build_aux_trees(n1);
+
+               bch2_btree_update_add_new_node(as, n1);
+               bch2_btree_update_add_new_node(as, n2);
                six_unlock_write(&n2->c.lock);
                six_unlock_write(&n1->c.lock);
 
@@ -1540,9 +1542,6 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
                mark_btree_node_locked(trans, path2, n2->c.level, SIX_LOCK_intent);
                bch2_btree_path_level_init(trans, path2, n2);
 
-               bch2_btree_update_add_new_node(as, n1);
-               bch2_btree_update_add_new_node(as, n2);
-
                /*
                 * Note that on recursive parent_keys == keys, so we
                 * can't start adding new keys to parent_keys before emptying it
@@ -1555,6 +1554,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
                        /* Depth increases, make a new root */
                        n3 = __btree_root_alloc(as, trans, b->c.level + 1);
 
+                       bch2_btree_update_add_new_node(as, n3);
+                       six_unlock_write(&n3->c.lock);
+
                        path2->locks_want++;
                        BUG_ON(btree_node_locked(path2, n3->c.level));
                        six_lock_increment(&n3->c.lock, SIX_LOCK_intent);
@@ -1564,14 +1566,13 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
                        n3->sib_u64s[0] = U16_MAX;
                        n3->sib_u64s[1] = U16_MAX;
 
-                       bch2_btree_update_add_new_node(as, n3);
-
                        btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
                }
        } else {
                trace_and_count(c, btree_node_compact, c, b);
 
                bch2_btree_build_aux_trees(n1);
+               bch2_btree_update_add_new_node(as, n1);
                six_unlock_write(&n1->c.lock);
 
                path1 = get_unlocked_mut_path(trans, path->btree_id, n1->c.level, n1->key.k.p);
@@ -1579,8 +1580,6 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
                mark_btree_node_locked(trans, path1, n1->c.level, SIX_LOCK_intent);
                bch2_btree_path_level_init(trans, path1, n1);
 
-               bch2_btree_update_add_new_node(as, n1);
-
                if (parent)
                        bch2_keylist_add(&as->parent_keys, &n1->key);
        }
@@ -1903,9 +1902,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
        bch2_btree_sort_into(c, n, next);
 
        bch2_btree_build_aux_trees(n);
-       six_unlock_write(&n->c.lock);
-
        bch2_btree_update_add_new_node(as, n);
+       six_unlock_write(&n->c.lock);
 
        new_path = get_unlocked_mut_path(trans, path->btree_id, n->c.level, n->key.k.p);
        six_lock_increment(&n->c.lock, SIX_LOCK_intent);
@@ -1979,9 +1977,9 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
        bch2_btree_interior_update_will_free_node(as, b);
 
        n = bch2_btree_node_alloc_replacement(as, trans, b);
-       bch2_btree_update_add_new_node(as, n);
 
        bch2_btree_build_aux_trees(n);
+       bch2_btree_update_add_new_node(as, n);
        six_unlock_write(&n->c.lock);
 
        new_path = get_unlocked_mut_path(trans, iter->btree_id, n->c.level, n->key.k.p);
index 8e025768875521caa6ffa9fbd1a439036f0c5abf..2ea6e79f9f0ac146b3243f91d966dd485e0e9793 100644 (file)
@@ -151,7 +151,7 @@ static void bch2_quota_reservation_put(struct bch_fs *c,
 static int bch2_quota_reservation_add(struct bch_fs *c,
                                      struct bch_inode_info *inode,
                                      struct quota_res *res,
-                                     unsigned sectors,
+                                     u64 sectors,
                                      bool check_enospc)
 {
        int ret;
@@ -3256,6 +3256,62 @@ err:
 
 /* fseek: */
 
+static int page_data_offset(struct page *page, unsigned offset)
+{
+       struct bch_page_state *s = bch2_page_state(page);
+       unsigned i;
+
+       if (s)
+               for (i = offset >> 9; i < PAGE_SECTORS; i++)
+                       if (s->s[i].state >= SECTOR_DIRTY)
+                               return i << 9;
+
+       return -1;
+}
+
+static loff_t bch2_seek_pagecache_data(struct inode *vinode,
+                                      loff_t start_offset,
+                                      loff_t end_offset)
+{
+       struct folio_batch fbatch;
+       pgoff_t start_index     = start_offset >> PAGE_SHIFT;
+       pgoff_t end_index       = end_offset >> PAGE_SHIFT;
+       pgoff_t index           = start_index;
+       unsigned i;
+       loff_t ret;
+       int offset;
+
+       folio_batch_init(&fbatch);
+
+       while (filemap_get_folios(vinode->i_mapping,
+                                 &index, end_index, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); i++) {
+                       struct folio *folio = fbatch.folios[i];
+
+                       folio_lock(folio);
+
+                       offset = page_data_offset(&folio->page,
+                                       folio->index == start_index
+                                       ? start_offset & (PAGE_SIZE - 1)
+                                       : 0);
+                       if (offset >= 0) {
+                               ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
+                                           offset,
+                                           start_offset, end_offset);
+                               folio_unlock(folio);
+                               folio_batch_release(&fbatch);
+                               return ret;
+                       }
+
+                       folio_unlock(folio);
+               }
+               folio_batch_release(&fbatch);
+               cond_resched();
+       }
+
+       return end_offset;
+}
+
 static loff_t bch2_seek_data(struct file *file, u64 offset)
 {
        struct bch_inode_info *inode = file_bch_inode(file);
@@ -3299,13 +3355,9 @@ err:
        if (ret)
                return ret;
 
-       if (next_data > offset) {
-               loff_t pagecache_next_data =
-                       mapping_seek_hole_data(inode->v.i_mapping, offset,
-                                              next_data, SEEK_DATA);
-               if (pagecache_next_data >= 0)
-                       next_data = min_t(u64, next_data, pagecache_next_data);
-       }
+       if (next_data > offset)
+               next_data = bch2_seek_pagecache_data(&inode->v,
+                                                    offset, next_data);
 
        if (next_data >= isize)
                return -ENXIO;