#include <linux/mm.h>
-#define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10
-
/*
* Convert from pos in backpointer btree to pos of corresponding bucket in alloc
* btree:
(bucket_to_sector(ca, bucket.offset) <<
MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
- BUG_ON(bkey_cmp(bucket, bp_pos_to_bucket(c, ret)));
+ BUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
return ret;
}
-void bch2_extent_ptr_to_bp(struct bch_fs *c,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, struct extent_ptr_decoded p,
- struct bpos *bucket_pos, struct bch_backpointer *bp)
-{
- enum bch_data_type data_type = level ? BCH_DATA_btree : BCH_DATA_user;
- s64 sectors = level ? btree_sectors(c) : k.k->size;
- u32 bucket_offset;
-
- *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
- *bp = (struct bch_backpointer) {
- .btree_id = btree_id,
- .level = level,
- .data_type = data_type,
- .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
- p.crc.offset,
- .bucket_len = ptr_disk_sectors(sectors, p),
- .pos = k.k->p,
- };
-}
-
static bool extent_matches_bp(struct bch_fs *c,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k,
bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
&bucket2, &bp2);
- if (!bpos_cmp(bucket, bucket2) &&
+ if (bpos_eq(bucket, bucket2) &&
!memcmp(&bp, &bp2, sizeof(bp)))
return true;
}
if (bkey_val_bytes(bp.k) < sizeof(*bp.v)) {
prt_str(err, "incorrect value size");
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
- if (bpos_cmp(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
+ if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
prt_str(err, "backpointer at wrong pos");
- return -EINVAL;
+ return -BCH_ERR_invalid_bkey;
}
return 0;
prt_printf(&buf, "for ");
bch2_bkey_val_to_text(&buf, c, orig_k);
- if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
- bch_err(c, "%s", buf.buf);
- } else {
+ bch_err(c, "%s", buf.buf);
+ if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+ bch2_inconsistent_error(c);
ret = -EIO;
- bch2_trans_inconsistent(trans, "%s", buf.buf);
}
printbuf_exit(&buf);
goto err;
struct bkey_s_c orig_k)
{
struct bch_fs *c = trans->c;
- struct bch_dev *ca;
struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
struct bkey_i_backpointer *bp_k;
prt_printf(&buf, "for ");
bch2_bkey_val_to_text(&buf, c, orig_k);
- if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
- bch_err(c, "%s", buf.buf);
- else {
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- printbuf_exit(&buf);
+ bch_err(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+ bch2_inconsistent_error(c);
return -EIO;
}
}
}
/* Overflow: use backpointer btree */
- bp_k = bch2_trans_kmalloc(trans, sizeof(*bp_k));
- ret = PTR_ERR_OR_ZERO(bp_k);
- if (ret)
- return ret;
-
- ca = bch_dev_bkey_exists(c, a->k.p.inode);
-
- bkey_backpointer_init(&bp_k->k_i);
- bp_k->k.p = bucket_pos_to_bp(c, a->k.p, bp.bucket_offset);
- bp_k->v = bp;
- bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_k->k.p,
+ bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
+ bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
BTREE_ITER_INTENT|
BTREE_ITER_SLOTS|
BTREE_ITER_WITH_UPDATES);
prt_printf(&buf, "for ");
bch2_bkey_val_to_text(&buf, c, orig_k);
- if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
- bch_err(c, "%s", buf.buf);
- else {
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- printbuf_exit(&buf);
+ bch_err(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+ bch2_inconsistent_error(c);
ret = -EIO;
goto err;
}
}
+ bp_k = bch2_bkey_alloc(trans, &bp_iter, backpointer);
+ ret = PTR_ERR_OR_ZERO(bp_k);
+ if (ret)
+ goto err;
+
+ bp_k->v = bp;
+
ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
err:
bch2_trans_iter_exit(trans, &bp_iter);
for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
bp_pos, 0, k, ret) {
- if (bpos_cmp(k.k->p, bp_end_pos) >= 0)
+ if (bpos_ge(k.k->p, bp_end_pos))
break;
if (k.k->type != KEY_TYPE_backpointer)
struct bkey_s_c alloc_k, bp_k;
int ret;
- if (bpos_cmp(bucket_pos, bucket_start) < 0 ||
- bpos_cmp(bucket_pos, bucket_end) > 0)
+ if (bpos_lt(bucket_pos, bucket_start) ||
+ bpos_gt(bucket_pos, bucket_end))
return 0;
bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0);
return ret;
}
+static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
+ struct bpos bucket)
+{
+ return bch2_dev_exists2(c, bucket.inode)
+ ? bucket_pos_to_bp(c, bucket, 0)
+ : bucket;
+}
+
int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
struct bpos start, struct bpos *end)
{
bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
start, 0, 1, 0);
bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
- bucket_pos_to_bp(trans->c, start, 0), 0, 1, 0);
+ bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
while (1) {
alloc_k = !alloc_end
? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
break;
}
- if (bpos_cmp(alloc_iter.pos, SPOS_MAX) &&
- bpos_cmp(bucket_pos_to_bp(trans->c, alloc_iter.pos, 0), bp_iter.pos) < 0) {
+ if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
+ bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
if (!bch2_btree_iter_advance(&alloc_iter))
alloc_end = true;
} else {
if (ret)
break;
- if (!bpos_cmp(start, POS_MIN) && bpos_cmp(end, SPOS_MAX))
- bch_verbose(c, "check_extents_to_backpointers(): alloc info does not fit in ram,"
- "running in multiple passes with %zu nodes per pass",
- btree_nodes_fit_in_ram(c));
+ if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
+ bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
+ __func__, btree_nodes_fit_in_ram(c));
- if (bpos_cmp(start, POS_MIN) || bpos_cmp(end, SPOS_MAX)) {
+ if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
struct printbuf buf = PRINTBUF;
prt_str(&buf, "check_extents_to_backpointers(): ");
}
ret = bch2_check_extents_to_backpointers_pass(&trans, start, end);
- if (ret || !bpos_cmp(end, SPOS_MAX))
+ if (ret || bpos_eq(end, SPOS_MAX))
break;
start = bpos_successor(end);
if (!bbpos_cmp(start, BBPOS_MIN) &&
bbpos_cmp(end, BBPOS_MAX))
- bch_verbose(c, "check_backpointers_to_extents(): extents do not fit in ram,"
- "running in multiple passes with %zu nodes per pass",
- btree_nodes_fit_in_ram(c));
+ bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
+ __func__, btree_nodes_fit_in_ram(c));
if (bbpos_cmp(start, BBPOS_MIN) ||
bbpos_cmp(end, BBPOS_MAX)) {