X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbackpointers.c;h=7d4367f93b5c337de1018b3db83c323e373bd3fb;hb=a06dee6da2babd19a392ff30a399b89b85018a36;hp=d74de1df7aa3433a9f2c284e96ab66bc9086f5db;hpb=188b6d0c8ef1c02462a744b176557c27220112c9;p=bcachefs-tools-debian diff --git a/libbcachefs/backpointers.c b/libbcachefs/backpointers.c index d74de1d..7d4367f 100644 --- a/libbcachefs/backpointers.c +++ b/libbcachefs/backpointers.c @@ -9,8 +9,6 @@ #include -#define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10 - /* * Convert from pos in backpointer btree to pos of corresponding bucket in alloc * btree: @@ -38,32 +36,11 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c, (bucket_to_sector(ca, bucket.offset) << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset); - BUG_ON(bkey_cmp(bucket, bp_pos_to_bucket(c, ret))); + BUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret))); return ret; } -void bch2_extent_ptr_to_bp(struct bch_fs *c, - enum btree_id btree_id, unsigned level, - struct bkey_s_c k, struct extent_ptr_decoded p, - struct bpos *bucket_pos, struct bch_backpointer *bp) -{ - enum bch_data_type data_type = level ? BCH_DATA_btree : BCH_DATA_user; - s64 sectors = level ? btree_sectors(c) : k.k->size; - u32 bucket_offset; - - *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset); - *bp = (struct bch_backpointer) { - .btree_id = btree_id, - .level = level, - .data_type = data_type, - .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + - p.crc.offset, - .bucket_len = ptr_disk_sectors(sectors, p), - .pos = k.k->p, - }; -} - static bool extent_matches_bp(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bkey_s_c k, @@ -83,7 +60,7 @@ static bool extent_matches_bp(struct bch_fs *c, bch2_extent_ptr_to_bp(c, btree_id, level, k, p, &bucket2, &bp2); - if (!bpos_cmp(bucket, bucket2) && + if (bpos_eq(bucket, bucket2) && !memcmp(&bp, &bp2, sizeof(bp))) return true; } @@ -99,12 +76,12 @@ int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k, if (bkey_val_bytes(bp.k) < sizeof(*bp.v)) { prt_str(err, "incorrect value size"); - return -EINVAL; + return -BCH_ERR_invalid_bkey; } - if (bpos_cmp(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) { + if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) { prt_str(err, "backpointer at wrong pos"); - return -EINVAL; + return -BCH_ERR_invalid_bkey; } return 0; @@ -284,11 +261,10 @@ btree: prt_printf(&buf, "for "); bch2_bkey_val_to_text(&buf, c, orig_k); - if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) { - bch_err(c, "%s", buf.buf); - } else { + bch_err(c, "%s", buf.buf); + if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) { + bch2_inconsistent_error(c); ret = -EIO; - bch2_trans_inconsistent(trans, "%s", buf.buf); } printbuf_exit(&buf); goto err; @@ -306,7 +282,6 @@ int bch2_bucket_backpointer_add(struct btree_trans *trans, struct bkey_s_c orig_k) { struct bch_fs *c = trans->c; - struct bch_dev *ca; struct bch_backpointer *bps = alloc_v4_backpointers(&a->v); unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v); struct bkey_i_backpointer *bp_k; @@ -340,11 +315,10 @@ int bch2_bucket_backpointer_add(struct btree_trans *trans, prt_printf(&buf, "for "); bch2_bkey_val_to_text(&buf, c, orig_k); - if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) - bch_err(c, "%s", buf.buf); - else { - bch2_trans_inconsistent(trans, "%s", buf.buf); - printbuf_exit(&buf); + bch_err(c, "%s", buf.buf); + printbuf_exit(&buf); + if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) { + bch2_inconsistent_error(c); return -EIO; } } @@ -357,18 +331,9 @@ int bch2_bucket_backpointer_add(struct btree_trans *trans, } /* Overflow: use backpointer btree */ - bp_k = bch2_trans_kmalloc(trans, sizeof(*bp_k)); - ret = PTR_ERR_OR_ZERO(bp_k); - if (ret) - return ret; - - ca = bch_dev_bkey_exists(c, a->k.p.inode); - - bkey_backpointer_init(&bp_k->k_i); - bp_k->k.p = bucket_pos_to_bp(c, a->k.p, bp.bucket_offset); - bp_k->v = bp; - bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_k->k.p, + bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, + bucket_pos_to_bp(c, a->k.p, bp.bucket_offset), BTREE_ITER_INTENT| BTREE_ITER_SLOTS| BTREE_ITER_WITH_UPDATES); @@ -392,16 +357,22 @@ int bch2_bucket_backpointer_add(struct btree_trans *trans, prt_printf(&buf, "for "); bch2_bkey_val_to_text(&buf, c, orig_k); - if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) - bch_err(c, "%s", buf.buf); - else { - bch2_trans_inconsistent(trans, "%s", buf.buf); - printbuf_exit(&buf); + bch_err(c, "%s", buf.buf); + printbuf_exit(&buf); + if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) { + bch2_inconsistent_error(c); ret = -EIO; goto err; } } + bp_k = bch2_bkey_alloc(trans, &bp_iter, backpointer); + ret = PTR_ERR_OR_ZERO(bp_k); + if (ret) + goto err; + + bp_k->v = bp; + ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0); err: bch2_trans_iter_exit(trans, &bp_iter); @@ -457,7 +428,7 @@ int bch2_get_next_backpointer(struct btree_trans *trans, for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers, bp_pos, 0, k, ret) { - if (bpos_cmp(k.k->p, bp_end_pos) >= 0) + if (bpos_ge(k.k->p, bp_end_pos)) break; if (k.k->type != KEY_TYPE_backpointer) @@ -669,8 +640,8 @@ static int check_bp_exists(struct btree_trans *trans, struct bkey_s_c alloc_k, bp_k; int ret; - if (bpos_cmp(bucket_pos, bucket_start) < 0 || - bpos_cmp(bucket_pos, bucket_end) > 0) + if (bpos_lt(bucket_pos, bucket_start) || + bpos_gt(bucket_pos, bucket_end)) return 0; bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0); @@ -923,6 +894,14 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans, return ret; } +static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c, + struct bpos bucket) +{ + return bch2_dev_exists2(c, bucket.inode) + ? bucket_pos_to_bp(c, bucket, 0) + : bucket; +} + int bch2_get_alloc_in_memory_pos(struct btree_trans *trans, struct bpos start, struct bpos *end) { @@ -936,7 +915,7 @@ int bch2_get_alloc_in_memory_pos(struct btree_trans *trans, bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc, start, 0, 1, 0); bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers, - bucket_pos_to_bp(trans->c, start, 0), 0, 1, 0); + bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0); while (1) { alloc_k = !alloc_end ? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0) @@ -957,8 +936,8 @@ int bch2_get_alloc_in_memory_pos(struct btree_trans *trans, break; } - if (bpos_cmp(alloc_iter.pos, SPOS_MAX) && - bpos_cmp(bucket_pos_to_bp(trans->c, alloc_iter.pos, 0), bp_iter.pos) < 0) { + if (bpos_lt(alloc_iter.pos, SPOS_MAX) && + bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) { if (!bch2_btree_iter_advance(&alloc_iter)) alloc_end = true; } else { @@ -983,12 +962,11 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) if (ret) break; - if (!bpos_cmp(start, POS_MIN) && bpos_cmp(end, SPOS_MAX)) - bch_verbose(c, "check_extents_to_backpointers(): alloc info does not fit in ram," - "running in multiple passes with %zu nodes per pass", - btree_nodes_fit_in_ram(c)); + if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX)) + bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass", + __func__, btree_nodes_fit_in_ram(c)); - if (bpos_cmp(start, POS_MIN) || bpos_cmp(end, SPOS_MAX)) { + if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) { struct printbuf buf = PRINTBUF; prt_str(&buf, "check_extents_to_backpointers(): "); @@ -1001,7 +979,7 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) } ret = bch2_check_extents_to_backpointers_pass(&trans, start, end); - if (ret || !bpos_cmp(end, SPOS_MAX)) + if (ret || bpos_eq(end, SPOS_MAX)) break; start = bpos_successor(end); @@ -1099,9 +1077,8 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c) if (!bbpos_cmp(start, BBPOS_MIN) && bbpos_cmp(end, BBPOS_MAX)) - bch_verbose(c, "check_backpointers_to_extents(): extents do not fit in ram," - "running in multiple passes with %zu nodes per pass", - btree_nodes_fit_in_ram(c)); + bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass", + __func__, btree_nodes_fit_in_ram(c)); if (bbpos_cmp(start, BBPOS_MIN) || bbpos_cmp(end, BBPOS_MAX)) {