]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/alloc_background.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
index 769c37f9e90710c1db2531d6b318c9e819249d71..a09b9d00226a4e1dd510c0c097ac59e7cb7d3c77 100644 (file)
@@ -534,14 +534,8 @@ void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bke
 int bch2_bucket_gens_init(struct bch_fs *c)
 {
        struct btree_trans *trans = bch2_trans_get(c);
-       struct btree_iter iter;
-       struct bkey_s_c k;
-       struct bch_alloc_v4 a;
        struct bkey_i_bucket_gens g;
        bool have_bucket_gens_key = false;
-       unsigned offset;
-       struct bpos pos;
-       u8 gen;
        int ret;
 
        ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
@@ -553,8 +547,10 @@ int bch2_bucket_gens_init(struct bch_fs *c)
                if (!bch2_dev_bucket_exists(c, k.k->p))
                        continue;
 
-               gen = bch2_alloc_to_v4(k, &a)->gen;
-               pos = alloc_gens_pos(iter.pos, &offset);
+               struct bch_alloc_v4 a;
+               u8 gen = bch2_alloc_to_v4(k, &a)->gen;
+               unsigned offset;
+               struct bpos pos = alloc_gens_pos(iter.pos, &offset);
 
                if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
                        ret = commit_do(trans, NULL, NULL,
@@ -589,17 +585,11 @@ int bch2_bucket_gens_init(struct bch_fs *c)
 int bch2_alloc_read(struct bch_fs *c)
 {
        struct btree_trans *trans = bch2_trans_get(c);
-       struct btree_iter iter;
-       struct bkey_s_c k;
-       struct bch_dev *ca;
        int ret;
 
        down_read(&c->gc_lock);
 
        if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
-               const struct bch_bucket_gens *g;
-               u64 b;
-
                ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
                                         BTREE_ITER_PREFETCH, k, ({
                        u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
@@ -608,7 +598,7 @@ int bch2_alloc_read(struct bch_fs *c)
                        if (k.k->type != KEY_TYPE_bucket_gens)
                                continue;
 
-                       g = bkey_s_c_to_bucket_gens(k).v;
+                       const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
 
                        /*
                         * Not a fsck error because this is checked/repaired by
@@ -617,17 +607,15 @@ int bch2_alloc_read(struct bch_fs *c)
                        if (!bch2_dev_exists2(c, k.k->p.inode))
                                continue;
 
-                       ca = bch_dev_bkey_exists(c, k.k->p.inode);
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
 
-                       for (b = max_t(u64, ca->mi.first_bucket, start);
+                       for (u64 b = max_t(u64, ca->mi.first_bucket, start);
                             b < min_t(u64, ca->mi.nbuckets, end);
                             b++)
                                *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
                        0;
                }));
        } else {
-               struct bch_alloc_v4 a;
-
                ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
                                         BTREE_ITER_PREFETCH, k, ({
                        /*
@@ -637,8 +625,9 @@ int bch2_alloc_read(struct bch_fs *c)
                        if (!bch2_dev_bucket_exists(c, k.k->p))
                                continue;
 
-                       ca = bch_dev_bkey_exists(c, k.k->p.inode);
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
 
+                       struct bch_alloc_v4 a;
                        *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
                        0;
                }));
@@ -760,95 +749,177 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
        return ret;
 }
 
-int bch2_trans_mark_alloc(struct btree_trans *trans,
-                         enum btree_id btree_id, unsigned level,
-                         struct bkey_s_c old, struct bkey_i *new,
-                         unsigned flags)
+int bch2_trigger_alloc(struct btree_trans *trans,
+                      enum btree_id btree, unsigned level,
+                      struct bkey_s_c old, struct bkey_s new,
+                      unsigned flags)
 {
        struct bch_fs *c = trans->c;
-       struct bch_alloc_v4 old_a_convert, *new_a;
-       const struct bch_alloc_v4 *old_a;
-       u64 old_lru, new_lru;
        int ret = 0;
 
-       /*
-        * Deletion only happens in the device removal path, with
-        * BTREE_TRIGGER_NORUN:
-        */
-       BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
+       if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
+                                      "alloc key for invalid device or bucket"))
+               return -EIO;
 
-       old_a = bch2_alloc_to_v4(old, &old_a_convert);
-       new_a = &bkey_i_to_alloc_v4(new)->v;
+       struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
 
-       new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
+       struct bch_alloc_v4 old_a_convert;
+       const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
 
-       if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
-               new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
-               new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
-               SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
-               SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
-       }
+       if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+               struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
 
-       if (data_type_is_empty(new_a->data_type) &&
-           BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
-           !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
-               new_a->gen++;
-               SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
-       }
+               new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
 
-       if (old_a->data_type != new_a->data_type ||
-           (new_a->data_type == BCH_DATA_free &&
-            alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
-               ret =   bch2_bucket_do_index(trans, old, old_a, false) ?:
-                       bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
-               if (ret)
-                       return ret;
-       }
+               if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
+                       new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+                       new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
+                       SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
+                       SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
+               }
 
-       if (new_a->data_type == BCH_DATA_cached &&
-           !new_a->io_time[READ])
-               new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+               if (data_type_is_empty(new_a->data_type) &&
+                   BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
+                   !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
+                       new_a->gen++;
+                       SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
+               }
 
-       old_lru = alloc_lru_idx_read(*old_a);
-       new_lru = alloc_lru_idx_read(*new_a);
+               if (old_a->data_type != new_a->data_type ||
+                   (new_a->data_type == BCH_DATA_free &&
+                    alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
+                       ret =   bch2_bucket_do_index(trans, old, old_a, false) ?:
+                               bch2_bucket_do_index(trans, new.s_c, new_a, true);
+                       if (ret)
+                               return ret;
+               }
 
-       if (old_lru != new_lru) {
-               ret = bch2_lru_change(trans, new->k.p.inode,
-                                     bucket_to_u64(new->k.p),
-                                     old_lru, new_lru);
-               if (ret)
-                       return ret;
-       }
+               if (new_a->data_type == BCH_DATA_cached &&
+                   !new_a->io_time[READ])
+                       new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
 
-       new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
-                                       bch_dev_bkey_exists(c, new->k.p.inode));
+               u64 old_lru = alloc_lru_idx_read(*old_a);
+               u64 new_lru = alloc_lru_idx_read(*new_a);
+               if (old_lru != new_lru) {
+                       ret = bch2_lru_change(trans, new.k->p.inode,
+                                             bucket_to_u64(new.k->p),
+                                             old_lru, new_lru);
+                       if (ret)
+                               return ret;
+               }
 
-       if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
-               ret = bch2_lru_change(trans,
-                               BCH_LRU_FRAGMENTATION_START,
-                               bucket_to_u64(new->k.p),
-                               old_a->fragmentation_lru, new_a->fragmentation_lru);
-               if (ret)
-                       return ret;
+               new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
+                                               bch_dev_bkey_exists(c, new.k->p.inode));
+               if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
+                       ret = bch2_lru_change(trans,
+                                       BCH_LRU_FRAGMENTATION_START,
+                                       bucket_to_u64(new.k->p),
+                                       old_a->fragmentation_lru, new_a->fragmentation_lru);
+                       if (ret)
+                               return ret;
+               }
+
+               if (old_a->gen != new_a->gen) {
+                       ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
+                       if (ret)
+                               return ret;
+               }
+
+               /*
+                * need to know if we're getting called from the invalidate path or
+                * not:
+                */
+
+               if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
+                   old_a->cached_sectors) {
+                       ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
+                                                             -((s64) old_a->cached_sectors));
+                       if (ret)
+                               return ret;
+               }
        }
 
-       if (old_a->gen != new_a->gen) {
-               ret = bch2_bucket_gen_update(trans, new->k.p, new_a->gen);
-               if (ret)
-                       return ret;
+       if (!(flags & BTREE_TRIGGER_TRANSACTIONAL) && (flags & BTREE_TRIGGER_INSERT)) {
+               struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
+               u64 journal_seq = trans->journal_res.seq;
+               u64 bucket_journal_seq = new_a->journal_seq;
+
+               if ((flags & BTREE_TRIGGER_INSERT) &&
+                   data_type_is_empty(old_a->data_type) !=
+                   data_type_is_empty(new_a->data_type) &&
+                   new.k->type == KEY_TYPE_alloc_v4) {
+                       struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
+
+                       /*
+                        * If the btree updates referring to a bucket weren't flushed
+                        * before the bucket became empty again, then the we don't have
+                        * to wait on a journal flush before we can reuse the bucket:
+                        */
+                       v->journal_seq = bucket_journal_seq =
+                               data_type_is_empty(new_a->data_type) &&
+                               (journal_seq == v->journal_seq ||
+                                bch2_journal_noflush_seq(&c->journal, v->journal_seq))
+                               ? 0 : journal_seq;
+               }
+
+               if (!data_type_is_empty(old_a->data_type) &&
+                   data_type_is_empty(new_a->data_type) &&
+                   bucket_journal_seq) {
+                       ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+                                       c->journal.flushed_seq_ondisk,
+                                       new.k->p.inode, new.k->p.offset,
+                                       bucket_journal_seq);
+                       if (ret) {
+                               bch2_fs_fatal_error(c,
+                                       "error setting bucket_needs_journal_commit: %i", ret);
+                               return ret;
+                       }
+               }
+
+               percpu_down_read(&c->mark_lock);
+               if (new_a->gen != old_a->gen)
+                       *bucket_gen(ca, new.k->p.offset) = new_a->gen;
+
+               bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
+
+               if (new_a->data_type == BCH_DATA_free &&
+                   (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
+                       closure_wake_up(&c->freelist_wait);
+
+               if (new_a->data_type == BCH_DATA_need_discard &&
+                   (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
+                       bch2_do_discards(c);
+
+               if (old_a->data_type != BCH_DATA_cached &&
+                   new_a->data_type == BCH_DATA_cached &&
+                   should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
+                       bch2_do_invalidates(c);
+
+               if (new_a->data_type == BCH_DATA_need_gc_gens)
+                       bch2_do_gc_gens(c);
+               percpu_up_read(&c->mark_lock);
        }
 
-       /*
-        * need to know if we're getting called from the invalidate path or
-        * not:
-        */
+       if ((flags & BTREE_TRIGGER_GC) &&
+           (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
+               struct bch_alloc_v4 new_a_convert;
+               const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
 
-       if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
-           old_a->cached_sectors) {
-               ret = bch2_update_cached_sectors_list(trans, new->k.p.inode,
-                                                     -((s64) old_a->cached_sectors));
-               if (ret)
-                       return ret;
+               percpu_down_read(&c->mark_lock);
+               struct bucket *g = gc_bucket(ca, new.k->p.offset);
+
+               bucket_lock(g);
+
+               g->gen_valid            = 1;
+               g->gen                  = new_a->gen;
+               g->data_type            = new_a->data_type;
+               g->stripe               = new_a->stripe;
+               g->stripe_redundancy    = new_a->stripe_redundancy;
+               g->dirty_sectors        = new_a->dirty_sectors;
+               g->cached_sectors       = new_a->cached_sectors;
+
+               bucket_unlock(g);
+               percpu_up_read(&c->mark_lock);
        }
 
        return 0;
@@ -903,7 +974,6 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
 static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
 {
        struct bch_dev *ca;
-       unsigned iter;
 
        if (bch2_dev_bucket_exists(c, *bucket))
                return true;
@@ -921,8 +991,7 @@ static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
        }
 
        rcu_read_lock();
-       iter = bucket->inode;
-       ca = __bch2_next_dev(c, &iter, NULL);
+       ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
        if (ca)
                *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
        rcu_read_unlock();
@@ -1163,9 +1232,6 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
        unsigned i, gens_offset, gens_end_offset;
        int ret;
 
-       if (c->sb.version < bcachefs_metadata_version_bucket_gens)
-               return 0;
-
        bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
 
        k = bch2_btree_iter_peek_slot(bucket_gens_iter);
@@ -1471,8 +1537,7 @@ bkey_err:
                bch2_check_bucket_gens_key(trans, &iter, k));
 err:
        bch2_trans_put(trans);
-       if (ret)
-               bch_err_fn(c, ret);
+       bch_err_fn(c, ret);
        return ret;
 }
 
@@ -1551,9 +1616,6 @@ fsck_err:
 
 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
 {
-       struct btree_iter iter;
-       struct bkey_s_c k;
-
        int ret = bch2_trans_run(c,
                for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
                                POS_MIN, BTREE_ITER_PREFETCH, k,
@@ -1682,8 +1744,6 @@ out:
 static void bch2_do_discards_work(struct work_struct *work)
 {
        struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
-       struct btree_iter iter;
-       struct bkey_s_c k;
        u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
        struct bpos discard_pos_done = POS_MAX;
        int ret;
@@ -1805,22 +1865,18 @@ err:
 static void bch2_do_invalidates_work(struct work_struct *work)
 {
        struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
-       struct bch_dev *ca;
        struct btree_trans *trans = bch2_trans_get(c);
-       struct btree_iter iter;
-       struct bkey_s_c k;
-       unsigned i;
        int ret = 0;
 
        ret = bch2_btree_write_buffer_tryflush(trans);
        if (ret)
                goto err;
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                s64 nr_to_invalidate =
                        should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
 
-               ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
+               ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
                                lru_pos(ca->dev_idx, 0, 0),
                                lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
                                BTREE_ITER_INTENT, k,
@@ -1945,8 +2001,6 @@ bkey_err:
 
 int bch2_fs_freespace_init(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
        int ret = 0;
        bool doing_init = false;
 
@@ -1955,7 +2009,7 @@ int bch2_fs_freespace_init(struct bch_fs *c)
         * every mount:
         */
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                if (ca->mi.freespace_initialized)
                        continue;
 
@@ -2015,15 +2069,13 @@ out:
 
 void bch2_recalc_capacity(struct bch_fs *c)
 {
-       struct bch_dev *ca;
        u64 capacity = 0, reserved_sectors = 0, gc_reserve;
        unsigned bucket_size_max = 0;
        unsigned long ra_pages = 0;
-       unsigned i;
 
        lockdep_assert_held(&c->state_lock);
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
 
                ra_pages += bdi->ra_pages;
@@ -2031,7 +2083,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
 
        bch2_set_ra_pages(c, ra_pages);
 
-       for_each_rw_member(ca, c, i) {
+       for_each_rw_member(c, ca) {
                u64 dev_reserve = 0;
 
                /*
@@ -2087,11 +2139,9 @@ void bch2_recalc_capacity(struct bch_fs *c)
 
 u64 bch2_min_rw_member_capacity(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
        u64 ret = U64_MAX;
 
-       for_each_rw_member(ca, c, i)
+       for_each_rw_member(c, ca)
                ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
        return ret;
 }