+ for_each_rw_member(ca, c, dev_idx)
+ heap_size += ca->mi.nbuckets >> 7;
+
+ if (h->size < heap_size) {
+ free_heap(&c->copygc_heap);
+ if (!init_heap(&c->copygc_heap, heap_size, GFP_KERNEL)) {
+ bch_err(c, "error allocating copygc heap");
+ return 0;
+ }
+ }
+
+ for_each_rw_member(ca, c, dev_idx) {
+ closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
+
+ spin_lock(&ca->fs->freelist_lock);
+ sectors_reserved += fifo_used(&ca->free[RESERVE_MOVINGGC]) * ca->mi.bucket_size;
+ spin_unlock(&ca->fs->freelist_lock);
+
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for (b = buckets->first_bucket; b < buckets->nbuckets; b++) {
+ struct bucket *g = buckets->b + b;
+ struct bucket_mark m = READ_ONCE(g->mark);
+ struct copygc_heap_entry e;
+
+ if (m.owned_by_allocator ||
+ m.data_type != BCH_DATA_user ||
+ !bucket_sectors_used(m) ||
+ bucket_sectors_used(m) >= ca->mi.bucket_size)
+ continue;
+
+ WARN_ON(m.stripe && !g->stripe_redundancy);
+
+ e = (struct copygc_heap_entry) {
+ .dev = dev_idx,
+ .gen = m.gen,
+ .replicas = 1 + g->stripe_redundancy,
+ .fragmentation = bucket_sectors_used(m) * (1U << 15)
+ / ca->mi.bucket_size,
+ .sectors = bucket_sectors_used(m),
+ .offset = bucket_to_sector(ca, b),
+ };
+ heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
+ }
+ up_read(&ca->bucket_lock);
+ }
+