e.nr++;
} else {
if (e.nr)
- heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
+ heap_add_or_replace(&ca->alloc_heap, e,
+ -bucket_alloc_cmp, NULL);
e = (struct alloc_heap_entry) {
.bucket = b,
}
if (e.nr)
- heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
+ heap_add_or_replace(&ca->alloc_heap, e,
+ -bucket_alloc_cmp, NULL);
for (i = 0; i < ca->alloc_heap.used; i++)
nr += ca->alloc_heap.data[i].nr;
while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
nr -= ca->alloc_heap.data[0].nr;
- heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp);
+ heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
}
up_read(&ca->bucket_lock);
if (bch2_can_invalidate_bucket(ca, b, m)) {
struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
- heap_add(&ca->alloc_heap, e, bucket_alloc_cmp);
+ heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
if (heap_full(&ca->alloc_heap))
break;
}
if (bch2_can_invalidate_bucket(ca, b, m)) {
struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
- heap_add(&ca->alloc_heap, e, bucket_alloc_cmp);
+ heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
if (heap_full(&ca->alloc_heap))
break;
}
break;
}
- heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
+ heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
for (i = 0; i < ca->alloc_heap.used; i++)
nr += ca->alloc_heap.data[i].nr;
return b;
}
- heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp);
+ heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
}
return -1;