"Force reads to use the reconstruct path, when reading" \
"from erasure coded extents") \
BCH_DEBUG_PARAM(test_restart_gc, \
- "Test restarting mark and sweep gc when bucket gens change")\
- BCH_DEBUG_PARAM(test_reconstruct_alloc, \
- "Test reconstructing the alloc btree")
+ "Test restarting mark and sweep gc when bucket gens change")
#define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
int idx = bch2_replicas_entry_idx(c, r);
BUG_ON(idx < 0);
- BUG_ON(!sectors);
switch (r->data_type) {
case BCH_DATA_BTREE:
{
struct replicas_delta_list *d;
struct replicas_delta *n;
- unsigned b = replicas_entry_bytes(r) + 8;
+ unsigned b;
+
+ if (!sectors)
+ return;
+ b = replicas_entry_bytes(r) + 8;
d = replicas_deltas_realloc(trans, b);
n = (void *) d->d + d->used;
fs_usage, journal_seq, flags);
if (p.ptr.cached) {
- if (disk_sectors && !stale)
+ if (!stale)
update_cached_sectors(c, fs_usage, p.ptr.dev,
disk_sectors);
} else if (!p.ec_nr) {
}
}
- if (dirty_sectors)
- update_replicas(c, fs_usage, &r.e, dirty_sectors);
+ update_replicas(c, fs_usage, &r.e, dirty_sectors);
return 0;
}
struct bkey_s_c k;
struct bkey_alloc_unpacked u;
struct bkey_i_alloc *a;
+ unsigned old;
bool overflow;
int ret;
* Unless we're already updating that key:
*/
if (k.k->type != KEY_TYPE_alloc) {
- bch_err_ratelimited(c, "pointer to nonexistent bucket %u:%zu",
- p.ptr.dev,
- PTR_BUCKET_NR(ca, &p.ptr));
+ bch_err_ratelimited(c, "pointer to nonexistent bucket %llu:%llu",
+ iter->pos.inode,
+ iter->pos.offset);
ret = -1;
goto out;
}
goto out;
}
- if (!p.ptr.cached)
+ if (!p.ptr.cached) {
+ old = u.dirty_sectors;
overflow = checked_add(u.dirty_sectors, sectors);
- else
+ } else {
+ old = u.cached_sectors;
overflow = checked_add(u.cached_sectors, sectors);
+ }
u.data_type = u.dirty_sectors || u.cached_sectors
? data_type : 0;
bch2_fs_inconsistent_on(overflow, c,
"bucket sector count overflow: %u + %lli > U16_MAX",
- !p.ptr.cached
- ? u.dirty_sectors
- : u.cached_sectors, sectors);
+ old, sectors);
a = trans_update_key(trans, iter, BKEY_ALLOC_U64s_MAX);
ret = PTR_ERR_OR_ZERO(a);
? sectors
: ptr_disk_sectors_delta(p, offset, sectors, flags);
- /*
- * can happen due to rounding with compressed extents:
- */
- if (!disk_sectors)
- continue;
-
ret = bch2_trans_mark_pointer(trans, p, disk_sectors,
data_type);
if (ret < 0)
stale = ret > 0;
if (p.ptr.cached) {
- if (disk_sectors && !stale)
+ if (!stale)
update_cached_sectors_list(trans, p.ptr.dev,
disk_sectors);
} else if (!p.ec_nr) {
}
}
- if (dirty_sectors)
- update_replicas_list(trans, &r.e, dirty_sectors);
+ update_replicas_list(trans, &r.e, dirty_sectors);
return 0;
}
bch2_disk_reservation_init(c, 0);
struct bkey_i *split;
struct bpos atomic_end;
- bool split_compressed = false;
+ /*
+ * Some extents aren't equivalent - w.r.t. what the triggers do
+ * - if they're split:
+ */
+ bool remark_if_split = bch2_extent_is_compressed(bkey_i_to_s_c(k)) ||
+ k->k.type == KEY_TYPE_reflink_p;
+ bool remark = false;
int ret;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
if (ret)
goto err;
- if (!split_compressed &&
- bch2_extent_is_compressed(bkey_i_to_s_c(k)) &&
+ if (!remark &&
+ remark_if_split &&
bkey_cmp(atomic_end, k->k.p) < 0) {
ret = bch2_disk_reservation_add(c, &disk_res,
k->k.size *
BCH_DISK_RESERVATION_NOFAIL);
BUG_ON(ret);
- split_compressed = true;
+ remark = true;
}
bkey_copy(split, k);
bch2_btree_iter_set_pos(iter, split->k.p);
} while (bkey_cmp(iter->pos, k->k.p) < 0);
- if (split_compressed) {
+ if (remark) {
ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k),
0, -((s64) k->k.size),
BCH_BUCKET_MARK_OVERWRITE) ?:
continue;
if (i == BTREE_ID_ALLOC &&
- test_reconstruct_alloc(c)) {
+ c->opts.reconstruct_alloc) {
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
continue;
}