+ return ret;
+}
+
+static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ percpu_down_read(&c->mark_lock);
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, false);
+
+ if (gen_after(g->mark.gen, ptr->gen) > 16) {
+ percpu_up_read(&c->mark_lock);
+ return true;
+ }
+ }
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, false);
+
+ if (gen_after(g->gc_gen, ptr->gen))
+ g->gc_gen = ptr->gen;
+ }
+ percpu_up_read(&c->mark_lock);
+
+ return false;
+}
+
+/*
+ * For recalculating oldest gen, we only need to walk keys in leaf nodes; btree
+ * node pointers currently never have cached pointers that can become stale:
+ */
+static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
+{
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ struct bkey_s_c k;
+ struct bkey_on_stack sk;
+ int ret = 0;
+
+ bkey_on_stack_init(&sk);
+ bch2_trans_init(&trans, c, 0, 0);
+
+ iter = bch2_trans_get_iter(&trans, btree_id, POS_MIN,
+ BTREE_ITER_PREFETCH);
+
+ while ((k = bch2_btree_iter_peek(iter)).k &&
+ !(ret = bkey_err(k))) {
+ if (gc_btree_gens_key(c, k)) {
+ bkey_on_stack_reassemble(&sk, c, k);
+ bch2_extent_normalize(c, bkey_i_to_s(sk.k));
+
+ bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k));
+
+ bch2_trans_update(&trans, iter, sk.k, 0);
+
+ ret = bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL);
+ if (ret == -EINTR)
+ continue;
+ if (ret) {
+ break;
+ }
+ }
+
+ bch2_btree_iter_next(iter);
+ }
+
+ bch2_trans_exit(&trans);
+ bkey_on_stack_exit(&sk, c);
+
+ return ret;
+}
+
+int bch2_gc_gens(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ struct bucket_array *buckets;
+ struct bucket *g;
+ unsigned i;
+ int ret;
+
+ /*
+ * Ideally we would be using state_lock and not gc_lock here, but that
+ * introduces a deadlock in the RO path - we currently take the state
+ * lock at the start of going RO, thus the gc thread may get stuck:
+ */
+ down_read(&c->gc_lock);
+
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets)
+ g->gc_gen = g->mark.gen;
+ up_read(&ca->bucket_lock);
+ }
+
+ for (i = 0; i < BTREE_ID_NR; i++)
+ if (btree_node_type_needs_gc(i)) {
+ ret = bch2_gc_btree_gens(c, i);
+ if (ret) {
+ bch_err(c, "error recalculating oldest_gen: %i", ret);
+ goto err;
+ }
+ }
+
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets)
+ g->oldest_gen = g->gc_gen;
+ up_read(&ca->bucket_lock);
+ }
+
+ c->gc_count++;
+err:
+ up_read(&c->gc_lock);
+ return ret;