From 87179c7a6e2a210ea57951d444a3055e883d08fa Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 1 Aug 2023 20:18:33 -0400 Subject: [PATCH] Update bcachefs sources to 33a60d9b05 bcachefs: Assorted fixes for clang --- .bcachefs_revision | 2 +- include/linux/kernel.h | 3 + include/linux/math.h | 15 ++ include/linux/rhashtable.h | 3 - include/linux/slab.h | 12 +- libbcachefs/alloc_background.c | 35 +---- libbcachefs/alloc_foreground.c | 6 +- libbcachefs/alloc_types.h | 2 +- libbcachefs/bcachefs_format.h | 79 +++++++--- libbcachefs/bkey_methods.c | 80 +--------- libbcachefs/bkey_methods.h | 11 +- libbcachefs/btree_gc.h | 2 +- libbcachefs/btree_iter.c | 22 --- libbcachefs/btree_locking.c | 6 +- libbcachefs/btree_locking.h | 4 +- libbcachefs/btree_types.h | 67 ++++---- libbcachefs/btree_update.h | 14 +- libbcachefs/btree_update_interior.c | 4 +- libbcachefs/btree_update_interior.h | 9 ++ libbcachefs/btree_update_leaf.c | 229 +++++++++++++++------------- libbcachefs/darray.h | 6 +- libbcachefs/ec.c | 108 +++++++------ libbcachefs/ec.h | 5 +- libbcachefs/extents.h | 2 +- libbcachefs/fsck.c | 76 +-------- libbcachefs/inode.c | 63 ++++++++ libbcachefs/inode.h | 2 + libbcachefs/opts.c | 22 +-- libbcachefs/subvolume.c | 13 +- libbcachefs/super-io.c | 7 +- tools-util.c | 12 +- tools-util.h | 16 +- 32 files changed, 460 insertions(+), 477 deletions(-) diff --git a/.bcachefs_revision b/.bcachefs_revision index deb0e34..7fb9f9a 100644 --- a/.bcachefs_revision +++ b/.bcachefs_revision @@ -1 +1 @@ -4b5917839c4b279b303133b87cd94cc1a352a0e6 +33a60d9b05f523be93973b25e0df1ab2d65fa4fc diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 01466c4..35a7207 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -13,6 +13,9 @@ #include #include +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) + #define __ARG_PLACEHOLDER_1 0, #define __take_second_arg(__ignored, val, ...) val diff --git a/include/linux/math.h b/include/linux/math.h index db7cdd2..85c8c8a 100644 --- a/include/linux/math.h +++ b/include/linux/math.h @@ -153,4 +153,19 @@ static inline u32 int_sqrt64(u64 x) } #endif +#define abs(x) __abs_choose_expr(x, long long, \ + __abs_choose_expr(x, long, \ + __abs_choose_expr(x, int, \ + __abs_choose_expr(x, short, \ + __abs_choose_expr(x, char, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), char), \ + (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ + ((void)0))))))) + +#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), signed type) || \ + __builtin_types_compatible_p(typeof(x), unsigned type), \ + ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) + #endif /* _LINUX_MATH_H */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index adeef32..1c6dbdc 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -27,9 +27,6 @@ #include #include -#define BIT(nr) (1UL << (nr)) -#define BIT_ULL(nr) (1ULL << (nr)) - #include /* * Objects in an rhashtable have an embedded struct rhash_head diff --git a/include/linux/slab.h b/include/linux/slab.h index 78f906a..25ccf1a 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -80,9 +80,15 @@ static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t } #define kzalloc(size, flags) kmalloc(size, flags|__GFP_ZERO) -#define kmalloc_array(n, size, flags) \ - ((size) != 0 && (n) > SIZE_MAX / (size) \ - ? NULL : kmalloc((n) * (size), flags)) + +static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + return kmalloc(bytes, flags); +} #define kvmalloc_array(n, size, flags) \ ((size) != 0 && (n) > SIZE_MAX / (size) \ diff --git a/libbcachefs/alloc_background.c b/libbcachefs/alloc_background.c index 8d8481f..7bf2a50 100644 --- a/libbcachefs/alloc_background.c +++ b/libbcachefs/alloc_background.c @@ -79,36 +79,6 @@ static inline u64 alloc_field_v1_get(const struct bch_alloc *a, return v; } -static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p, - unsigned field, u64 v) -{ - unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field]; - - if (!v) - return; - - a->v.fields |= 1 << field; - - switch (bytes) { - case 1: - *((u8 *) *p) = v; - break; - case 2: - *((__le16 *) *p) = cpu_to_le16(v); - break; - case 4: - *((__le32 *) *p) = cpu_to_le32(v); - break; - case 8: - *((__le64 *) *p) = cpu_to_le64(v); - break; - default: - BUG(); - } - - *p += bytes; -} - static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out, struct bkey_s_c k) { @@ -267,7 +237,8 @@ int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k, } int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k, - unsigned flags, struct printbuf *err) + enum bkey_invalid_flags flags, + struct printbuf *err) { struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); int rw = flags & WRITE; @@ -1334,7 +1305,7 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_iter *iter, struct bpos end) { - if (!btree_node_type_is_extents(iter->btree_id)) { + if (!btree_id_is_extents(iter->btree_id)) { return __bch2_check_discard_freespace_key(trans, iter); } else { int ret; diff --git a/libbcachefs/alloc_foreground.c b/libbcachefs/alloc_foreground.c index fcb7311..1f4c5b3 100644 --- a/libbcachefs/alloc_foreground.c +++ b/libbcachefs/alloc_foreground.c @@ -1042,8 +1042,12 @@ static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c, unsigned i; if (!drop && ob->ec) { + unsigned nr_blocks; + mutex_lock(&ob->ec->lock); - for (i = 0; i < ob->ec->new_stripe.key.v.nr_blocks; i++) { + nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks; + + for (i = 0; i < nr_blocks; i++) { if (!ob->ec->blocks[i]) continue; diff --git a/libbcachefs/alloc_types.h b/libbcachefs/alloc_types.h index c33a299..804a843 100644 --- a/libbcachefs/alloc_types.h +++ b/libbcachefs/alloc_types.h @@ -32,7 +32,7 @@ enum bch_watermark { }; #define BCH_WATERMARK_BITS 3 -#define BCH_WATERMARK_MASK ~(~0 << BCH_WATERMARK_BITS) +#define BCH_WATERMARK_MASK ~(~0U << BCH_WATERMARK_BITS) #define OPEN_BUCKETS_COUNT 1024 diff --git a/libbcachefs/bcachefs_format.h b/libbcachefs/bcachefs_format.h index 5c308f8..b771d80 100644 --- a/libbcachefs/bcachefs_format.h +++ b/libbcachefs/bcachefs_format.h @@ -2138,7 +2138,7 @@ struct jset_entry_dev_usage { __le64 _buckets_unavailable; /* No longer used */ struct jset_entry_dev_usage_type d[]; -} __packed; +}; static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u) { @@ -2194,26 +2194,67 @@ LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6); /* Btree: */ -#define BCH_BTREE_IDS() \ - x(extents, 0) \ - x(inodes, 1) \ - x(dirents, 2) \ - x(xattrs, 3) \ - x(alloc, 4) \ - x(quotas, 5) \ - x(stripes, 6) \ - x(reflink, 7) \ - x(subvolumes, 8) \ - x(snapshots, 9) \ - x(lru, 10) \ - x(freespace, 11) \ - x(need_discard, 12) \ - x(backpointers, 13) \ - x(bucket_gens, 14) \ - x(snapshot_trees, 15) +enum btree_id_flags { + BTREE_ID_EXTENTS = BIT(0), + BTREE_ID_SNAPSHOTS = BIT(1), + BTREE_ID_DATA = BIT(2), +}; + +#define BCH_BTREE_IDS() \ + x(extents, 0, BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\ + BIT_ULL(KEY_TYPE_whiteout)| \ + BIT_ULL(KEY_TYPE_error)| \ + BIT_ULL(KEY_TYPE_cookie)| \ + BIT_ULL(KEY_TYPE_extent)| \ + BIT_ULL(KEY_TYPE_reservation)| \ + BIT_ULL(KEY_TYPE_reflink_p)| \ + BIT_ULL(KEY_TYPE_inline_data)) \ + x(inodes, 1, BTREE_ID_SNAPSHOTS, \ + BIT_ULL(KEY_TYPE_whiteout)| \ + BIT_ULL(KEY_TYPE_inode)| \ + BIT_ULL(KEY_TYPE_inode_v2)| \ + BIT_ULL(KEY_TYPE_inode_v3)| \ + BIT_ULL(KEY_TYPE_inode_generation)) \ + x(dirents, 2, BTREE_ID_SNAPSHOTS, \ + BIT_ULL(KEY_TYPE_whiteout)| \ + BIT_ULL(KEY_TYPE_hash_whiteout)| \ + BIT_ULL(KEY_TYPE_dirent)) \ + x(xattrs, 3, BTREE_ID_SNAPSHOTS, \ + BIT_ULL(KEY_TYPE_whiteout)| \ + BIT_ULL(KEY_TYPE_cookie)| \ + BIT_ULL(KEY_TYPE_hash_whiteout)| \ + BIT_ULL(KEY_TYPE_xattr)) \ + x(alloc, 4, 0, \ + BIT_ULL(KEY_TYPE_alloc)| \ + BIT_ULL(KEY_TYPE_alloc_v2)| \ + BIT_ULL(KEY_TYPE_alloc_v3)| \ + BIT_ULL(KEY_TYPE_alloc_v4)) \ + x(quotas, 5, 0, \ + BIT_ULL(KEY_TYPE_quota)) \ + x(stripes, 6, 0, \ + BIT_ULL(KEY_TYPE_stripe)) \ + x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \ + BIT_ULL(KEY_TYPE_reflink_v)| \ + BIT_ULL(KEY_TYPE_indirect_inline_data)) \ + x(subvolumes, 8, 0, \ + BIT_ULL(KEY_TYPE_subvolume)) \ + x(snapshots, 9, 0, \ + BIT_ULL(KEY_TYPE_snapshot)) \ + x(lru, 10, 0, \ + BIT_ULL(KEY_TYPE_set)) \ + x(freespace, 11, BTREE_ID_EXTENTS, \ + BIT_ULL(KEY_TYPE_set)) \ + x(need_discard, 12, 0, \ + BIT_ULL(KEY_TYPE_set)) \ + x(backpointers, 13, 0, \ + BIT_ULL(KEY_TYPE_backpointer)) \ + x(bucket_gens, 14, 0, \ + BIT_ULL(KEY_TYPE_bucket_gens)) \ + x(snapshot_trees, 15, 0, \ + BIT_ULL(KEY_TYPE_snapshot_tree)) enum btree_id { -#define x(kwd, val) BTREE_ID_##kwd = val, +#define x(name, nr, ...) BTREE_ID_##name = nr, BCH_BTREE_IDS() #undef x BTREE_ID_NR diff --git a/libbcachefs/bkey_methods.c b/libbcachefs/bkey_methods.c index 1381166..90557f4 100644 --- a/libbcachefs/bkey_methods.c +++ b/libbcachefs/bkey_methods.c @@ -140,78 +140,14 @@ int bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k, return ops->key_invalid(c, k, flags, err); } -static unsigned bch2_key_types_allowed[] = { - [BKEY_TYPE_extents] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_whiteout)| - (1U << KEY_TYPE_error)| - (1U << KEY_TYPE_cookie)| - (1U << KEY_TYPE_extent)| - (1U << KEY_TYPE_reservation)| - (1U << KEY_TYPE_reflink_p)| - (1U << KEY_TYPE_inline_data), - [BKEY_TYPE_inodes] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_whiteout)| - (1U << KEY_TYPE_inode)| - (1U << KEY_TYPE_inode_v2)| - (1U << KEY_TYPE_inode_v3)| - (1U << KEY_TYPE_inode_generation), - [BKEY_TYPE_dirents] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_whiteout)| - (1U << KEY_TYPE_hash_whiteout)| - (1U << KEY_TYPE_dirent), - [BKEY_TYPE_xattrs] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_whiteout)| - (1U << KEY_TYPE_cookie)| - (1U << KEY_TYPE_hash_whiteout)| - (1U << KEY_TYPE_xattr), - [BKEY_TYPE_alloc] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_alloc)| - (1U << KEY_TYPE_alloc_v2)| - (1U << KEY_TYPE_alloc_v3)| - (1U << KEY_TYPE_alloc_v4), - [BKEY_TYPE_quotas] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_quota), - [BKEY_TYPE_stripes] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_stripe), - [BKEY_TYPE_reflink] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_reflink_v)| - (1U << KEY_TYPE_indirect_inline_data), - [BKEY_TYPE_subvolumes] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_subvolume), - [BKEY_TYPE_snapshots] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_snapshot), - [BKEY_TYPE_lru] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_set), - [BKEY_TYPE_freespace] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_set), - [BKEY_TYPE_need_discard] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_set), - [BKEY_TYPE_backpointers] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_backpointer), - [BKEY_TYPE_bucket_gens] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_bucket_gens), - [BKEY_TYPE_snapshot_trees] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_snapshot_tree), +static u64 bch2_key_types_allowed[] = { +#define x(name, nr, flags, keys) [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys, + BCH_BTREE_IDS() +#undef x [BKEY_TYPE_btree] = - (1U << KEY_TYPE_deleted)| - (1U << KEY_TYPE_btree_ptr)| - (1U << KEY_TYPE_btree_ptr_v2), + BIT_ULL(KEY_TYPE_deleted)| + BIT_ULL(KEY_TYPE_btree_ptr)| + BIT_ULL(KEY_TYPE_btree_ptr_v2), }; int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k, @@ -225,7 +161,7 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k, } if (flags & BKEY_INVALID_COMMIT && - !(bch2_key_types_allowed[type] & (1U << k.k->type))) { + !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type))) { prt_printf(err, "invalid key type for btree %s (%s)", bch2_btree_ids[type], bch2_bkey_types[k.k->type]); return -BCH_ERR_invalid_bkey; diff --git a/libbcachefs/bkey_methods.h b/libbcachefs/bkey_methods.h index f4e60d2..d7b6376 100644 --- a/libbcachefs/bkey_methods.h +++ b/libbcachefs/bkey_methods.h @@ -55,11 +55,12 @@ static inline const struct bkey_ops *bch2_bkey_type_ops(enum bch_bkey_type type) : &bch2_bkey_null_ops; } -int bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c, unsigned, struct printbuf *); -int __bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, - enum btree_node_type, unsigned, struct printbuf *); -int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, - enum btree_node_type, unsigned, struct printbuf *); +int bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c, + enum bkey_invalid_flags, struct printbuf *); +int __bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type, + enum bkey_invalid_flags, struct printbuf *); +int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type, + enum bkey_invalid_flags, struct printbuf *); int bch2_bkey_in_btree_node(struct btree *, struct bkey_s_c, struct printbuf *); void bch2_bpos_to_text(struct printbuf *, struct bpos); diff --git a/libbcachefs/btree_gc.h b/libbcachefs/btree_gc.h index 402c691..b45e382 100644 --- a/libbcachefs/btree_gc.h +++ b/libbcachefs/btree_gc.h @@ -51,7 +51,7 @@ static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r) static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id) { switch (id) { -#define x(name, v) case BTREE_ID_##name: return GC_PHASE_BTREE_##name; +#define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name; BCH_BTREE_IDS() #undef x default: diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c index e292c5a..dfb77b2 100644 --- a/libbcachefs/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -35,18 +35,6 @@ static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter) static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *); -/* - * Unlocks before scheduling - * Note: does not revalidate iterator - */ -static inline int bch2_trans_cond_resched(struct btree_trans *trans) -{ - if (need_resched() || race_fault()) - return drop_locks_do(trans, (schedule(), 0)); - else - return 0; -} - static inline int __btree_path_cmp(const struct btree_path *l, enum btree_id r_btree_id, bool r_cached, @@ -2732,16 +2720,6 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) iter->key_cache_path = NULL; } -static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans, - struct btree_iter *iter, - unsigned btree_id, struct bpos pos, - unsigned flags) -{ - bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, - bch2_btree_iter_flags(trans, btree_id, flags), - _RET_IP_); -} - void bch2_trans_iter_init_outlined(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, diff --git a/libbcachefs/btree_locking.c b/libbcachefs/btree_locking.c index d7fd871..0b0f9d6 100644 --- a/libbcachefs/btree_locking.c +++ b/libbcachefs/btree_locking.c @@ -387,7 +387,7 @@ int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *p six_lock_readers_add(&b->lock, readers); if (ret) - mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED); return ret; } @@ -551,7 +551,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans, trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); return false; success: - mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); return true; } @@ -666,7 +666,7 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans, } else { if (btree_node_intent_locked(path, l)) { six_lock_downgrade(&path->l[l].b->c.lock); - mark_btree_node_locked_noreset(path, l, SIX_LOCK_read); + mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED); } break; } diff --git a/libbcachefs/btree_locking.h b/libbcachefs/btree_locking.h index f3e58aa..ce3c7d9 100644 --- a/libbcachefs/btree_locking.h +++ b/libbcachefs/btree_locking.h @@ -181,7 +181,7 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock)); EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write); - mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); trans_for_each_path_with_node(trans, b, linked) linked->l[b->c.level].lock_seq++; @@ -294,7 +294,7 @@ static inline int __btree_node_lock_write(struct btree_trans *trans, * write lock: thus, we need to tell the cycle detector we have a write * lock _before_ taking the lock: */ - mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write); + mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED); return likely(six_trylock_write(&b->lock)) ? 0 diff --git a/libbcachefs/btree_types.h b/libbcachefs/btree_types.h index d953601..6b6333d 100644 --- a/libbcachefs/btree_types.h +++ b/libbcachefs/btree_types.h @@ -643,7 +643,7 @@ static inline unsigned bset_byte_offset(struct btree *b, void *i) } enum btree_node_type { -#define x(kwd, val) BKEY_TYPE_##kwd = val, +#define x(kwd, val, ...) BKEY_TYPE_##kwd = val, BCH_BTREE_IDS() #undef x BKEY_TYPE_btree, @@ -662,31 +662,37 @@ static inline enum btree_node_type btree_node_type(struct btree *b) } #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \ - ((1U << BKEY_TYPE_extents)| \ - (1U << BKEY_TYPE_alloc)| \ - (1U << BKEY_TYPE_inodes)| \ - (1U << BKEY_TYPE_stripes)| \ - (1U << BKEY_TYPE_reflink)| \ - (1U << BKEY_TYPE_btree)) + (BIT(BKEY_TYPE_extents)| \ + BIT(BKEY_TYPE_alloc)| \ + BIT(BKEY_TYPE_inodes)| \ + BIT(BKEY_TYPE_stripes)| \ + BIT(BKEY_TYPE_reflink)| \ + BIT(BKEY_TYPE_btree)) #define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \ - ((1U << BKEY_TYPE_alloc)| \ - (1U << BKEY_TYPE_inodes)| \ - (1U << BKEY_TYPE_stripes)| \ - (1U << BKEY_TYPE_snapshots)) + (BIT(BKEY_TYPE_alloc)| \ + BIT(BKEY_TYPE_inodes)| \ + BIT(BKEY_TYPE_stripes)| \ + BIT(BKEY_TYPE_snapshots)) #define BTREE_NODE_TYPE_HAS_TRIGGERS \ (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \ BTREE_NODE_TYPE_HAS_MEM_TRIGGERS) -#define BTREE_ID_IS_EXTENTS \ - ((1U << BTREE_ID_extents)| \ - (1U << BTREE_ID_reflink)| \ - (1U << BTREE_ID_freespace)) +static inline bool btree_node_type_needs_gc(enum btree_node_type type) +{ + return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type); +} static inline bool btree_node_type_is_extents(enum btree_node_type type) { - return (1U << type) & BTREE_ID_IS_EXTENTS; + const unsigned mask = 0 +#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << nr) + BCH_BTREE_IDS() +#undef x + ; + + return (1U << type) & mask; } static inline bool btree_id_is_extents(enum btree_id btree) @@ -694,29 +700,26 @@ static inline bool btree_id_is_extents(enum btree_id btree) return btree_node_type_is_extents((enum btree_node_type) btree); } -#define BTREE_ID_HAS_SNAPSHOTS \ - ((1U << BTREE_ID_extents)| \ - (1U << BTREE_ID_inodes)| \ - (1U << BTREE_ID_dirents)| \ - (1U << BTREE_ID_xattrs)) - -#define BTREE_ID_HAS_PTRS \ - ((1U << BTREE_ID_extents)| \ - (1U << BTREE_ID_reflink)) - static inline bool btree_type_has_snapshots(enum btree_id id) { - return (1 << id) & BTREE_ID_HAS_SNAPSHOTS; + const unsigned mask = 0 +#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr) + BCH_BTREE_IDS() +#undef x + ; + + return (1U << id) & mask; } static inline bool btree_type_has_ptrs(enum btree_id id) { - return (1 << id) & BTREE_ID_HAS_PTRS; -} + const unsigned mask = 0 +#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr) + BCH_BTREE_IDS() +#undef x + ; -static inline bool btree_node_type_needs_gc(enum btree_node_type type) -{ - return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type); + return (1U << id) & mask; } struct btree_root { diff --git a/libbcachefs/btree_update.h b/libbcachefs/btree_update.h index f5700c2..2281140 100644 --- a/libbcachefs/btree_update.h +++ b/libbcachefs/btree_update.h @@ -74,15 +74,6 @@ int bch2_btree_delete_range(struct bch_fs *, enum btree_id, int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool); -int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *, - struct btree *, unsigned); -void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *); -int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *, - struct btree *, struct bkey_i *, - unsigned, bool); -int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *, - struct bkey_i *, unsigned, bool); - int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id, struct bpos, struct bpos); @@ -105,8 +96,9 @@ static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans, return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos); } -int bch2_trans_update_extent(struct btree_trans *, struct btree_iter *, - struct bkey_i *, enum btree_update_flags); +int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *, + enum btree_update_flags, + struct bkey_s_c, struct bkey_s_c); int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *, enum btree_id, struct bpos); diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c index 3659b2c..f42ef46 100644 --- a/libbcachefs/btree_update_interior.c +++ b/libbcachefs/btree_update_interior.c @@ -188,7 +188,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, bch2_btree_node_hash_remove(&c->btree_cache, b); __btree_node_free(c, b); six_unlock_write(&b->c.lock); - mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); trans_for_each_path(trans, path) if (path->l[level].b == b) { @@ -720,7 +720,7 @@ err: mutex_unlock(&c->btree_interior_update_lock); - mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); six_unlock_write(&b->c.lock); btree_node_write_if_need(c, b, SIX_LOCK_intent); diff --git a/libbcachefs/btree_update_interior.h b/libbcachefs/btree_update_interior.h index 221b7ad..5e0a467 100644 --- a/libbcachefs/btree_update_interior.h +++ b/libbcachefs/btree_update_interior.h @@ -154,6 +154,15 @@ static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, btree_next_sib); } +int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *, + struct btree *, unsigned); +void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *); +int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *, + struct btree *, struct bkey_i *, + unsigned, bool); +int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *, + struct bkey_i *, unsigned, bool); + void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *); void bch2_btree_root_alloc(struct bch_fs *, enum btree_id); diff --git a/libbcachefs/btree_update_leaf.c b/libbcachefs/btree_update_leaf.c index 336a91e..369e37a 100644 --- a/libbcachefs/btree_update_leaf.c +++ b/libbcachefs/btree_update_leaf.c @@ -413,7 +413,7 @@ static int run_one_mem_trigger(struct btree_trans *trans, if (unlikely(flags & BTREE_TRIGGER_NORUN)) return 0; - if (!btree_node_type_needs_gc(i->btree_id)) + if (!btree_node_type_needs_gc((enum btree_node_type) i->btree_id)) return 0; if (old_ops->atomic_trigger == new_ops->atomic_trigger && @@ -852,12 +852,11 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags { struct bch_fs *c = trans->c; struct btree_insert_entry *i; - int ret, u64s_delta = 0; + int ret = 0, u64s_delta = 0; #ifdef CONFIG_BCACHEFS_DEBUG - struct printbuf buf = PRINTBUF; - trans_for_each_update(trans, i) { + struct printbuf buf = PRINTBUF; enum bkey_invalid_flags invalid_flags = 0; if (!(flags & BTREE_INSERT_JOURNAL_REPLAY)) @@ -865,10 +864,13 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->bkey_type, invalid_flags, &buf))) - return bch2_trans_commit_bkey_invalid(trans, flags, i, &buf); + ret = bch2_trans_commit_bkey_invalid(trans, flags, i, &buf); btree_insert_entry_checks(trans, i); + printbuf_exit(&buf); + + if (ret) + return ret; } - printbuf_exit(&buf); #endif trans_for_each_update(trans, i) { @@ -1327,7 +1329,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, struct bpos new_pos) { struct bch_fs *c = trans->c; - struct btree_iter old_iter, new_iter; + struct btree_iter old_iter, new_iter = { NULL }; struct bkey_s_c old_k, new_k; snapshot_id_list s; struct bkey_i *update; @@ -1377,25 +1379,122 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, if (ret) break; } + bch2_trans_iter_exit(trans, &new_iter); bch2_trans_iter_exit(trans, &old_iter); darray_exit(&s); return ret; } -int bch2_trans_update_extent(struct btree_trans *trans, - struct btree_iter *orig_iter, - struct bkey_i *insert, - enum btree_update_flags flags) +int bch2_trans_update_extent_overwrite(struct btree_trans *trans, + struct btree_iter *iter, + enum btree_update_flags flags, + struct bkey_s_c old, + struct bkey_s_c new) { - struct btree_iter iter; - struct bpos start = bkey_start_pos(&insert->k); + enum btree_id btree_id = iter->btree_id; struct bkey_i *update; + struct bpos new_start = bkey_start_pos(new.k); + bool front_split = bkey_lt(bkey_start_pos(old.k), new_start); + bool back_split = bkey_gt(old.k->p, new.k->p); + int ret = 0, compressed_sectors; + + /* + * If we're going to be splitting a compressed extent, note it + * so that __bch2_trans_commit() can increase our disk + * reservation: + */ + if (((front_split && back_split) || + ((front_split || back_split) && old.k->p.snapshot != new.k->p.snapshot)) && + (compressed_sectors = bch2_bkey_sectors_compressed(old))) + trans->extra_journal_res += compressed_sectors; + + if (front_split) { + update = bch2_bkey_make_mut_noupdate(trans, old); + if ((ret = PTR_ERR_OR_ZERO(update))) + return ret; + + bch2_cut_back(new_start, update); + + ret = bch2_insert_snapshot_whiteouts(trans, btree_id, + old.k->p, update->k.p) ?: + bch2_btree_insert_nonextent(trans, btree_id, update, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags); + if (ret) + return ret; + } + + /* If we're overwriting in a different snapshot - middle split: */ + if (old.k->p.snapshot != new.k->p.snapshot && + (front_split || back_split)) { + update = bch2_bkey_make_mut_noupdate(trans, old); + if ((ret = PTR_ERR_OR_ZERO(update))) + return ret; + + bch2_cut_front(new_start, update); + bch2_cut_back(new.k->p, update); + + ret = bch2_insert_snapshot_whiteouts(trans, btree_id, + old.k->p, update->k.p) ?: + bch2_btree_insert_nonextent(trans, btree_id, update, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags); + if (ret) + return ret; + } + + if (bkey_le(old.k->p, new.k->p)) { + update = bch2_trans_kmalloc(trans, sizeof(*update)); + if ((ret = PTR_ERR_OR_ZERO(update))) + return ret; + + bkey_init(&update->k); + update->k.p = old.k->p; + update->k.p.snapshot = new.k->p.snapshot; + + if (new.k->p.snapshot != old.k->p.snapshot) { + update->k.type = KEY_TYPE_whiteout; + } else if (btree_type_has_snapshots(btree_id)) { + ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p); + if (ret < 0) + return ret; + if (ret) + update->k.type = KEY_TYPE_whiteout; + } + + ret = bch2_btree_insert_nonextent(trans, btree_id, update, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags); + if (ret) + return ret; + } + + if (back_split) { + update = bch2_bkey_make_mut_noupdate(trans, old); + if ((ret = PTR_ERR_OR_ZERO(update))) + return ret; + + bch2_cut_front(new.k->p, update); + + ret = bch2_trans_update_by_path(trans, iter->path, update, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE| + flags, _RET_IP_); + if (ret) + return ret; + } + + return 0; +} + +static int bch2_trans_update_extent(struct btree_trans *trans, + struct btree_iter *orig_iter, + struct bkey_i *insert, + enum btree_update_flags flags) +{ + struct btree_iter iter; struct bkey_s_c k; enum btree_id btree_id = orig_iter->btree_id; - int ret = 0, compressed_sectors; + int ret = 0; - bch2_trans_iter_init(trans, &iter, btree_id, start, + bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k), BTREE_ITER_INTENT| BTREE_ITER_WITH_UPDATES| BTREE_ITER_NOT_EXTENTS); @@ -1416,90 +1515,14 @@ int bch2_trans_update_extent(struct btree_trans *trans, } while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) { - bool front_split = bkey_lt(bkey_start_pos(k.k), start); - bool back_split = bkey_gt(k.k->p, insert->k.p); + bool done = bkey_lt(insert->k.p, k.k->p); - /* - * If we're going to be splitting a compressed extent, note it - * so that __bch2_trans_commit() can increase our disk - * reservation: - */ - if (((front_split && back_split) || - ((front_split || back_split) && k.k->p.snapshot != insert->k.p.snapshot)) && - (compressed_sectors = bch2_bkey_sectors_compressed(k))) - trans->extra_journal_res += compressed_sectors; - - if (front_split) { - update = bch2_bkey_make_mut_noupdate(trans, k); - if ((ret = PTR_ERR_OR_ZERO(update))) - goto err; - - bch2_cut_back(start, update); - - ret = bch2_insert_snapshot_whiteouts(trans, btree_id, - k.k->p, update->k.p) ?: - bch2_btree_insert_nonextent(trans, btree_id, update, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags); - if (ret) - goto err; - } - - if (k.k->p.snapshot != insert->k.p.snapshot && - (front_split || back_split)) { - update = bch2_bkey_make_mut_noupdate(trans, k); - if ((ret = PTR_ERR_OR_ZERO(update))) - goto err; - - bch2_cut_front(start, update); - bch2_cut_back(insert->k.p, update); - - ret = bch2_insert_snapshot_whiteouts(trans, btree_id, - k.k->p, update->k.p) ?: - bch2_btree_insert_nonextent(trans, btree_id, update, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags); - if (ret) - goto err; - } - - if (bkey_le(k.k->p, insert->k.p)) { - update = bch2_trans_kmalloc(trans, sizeof(*update)); - if ((ret = PTR_ERR_OR_ZERO(update))) - goto err; - - bkey_init(&update->k); - update->k.p = k.k->p; - update->k.p.snapshot = insert->k.p.snapshot; - - if (insert->k.p.snapshot != k.k->p.snapshot) { - update->k.type = KEY_TYPE_whiteout; - } else if (btree_type_has_snapshots(btree_id)) { - ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p); - if (ret < 0) - goto err; - if (ret) - update->k.type = KEY_TYPE_whiteout; - } - - ret = bch2_btree_insert_nonextent(trans, btree_id, update, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags); - if (ret) - goto err; - } - - if (back_split) { - update = bch2_bkey_make_mut_noupdate(trans, k); - if ((ret = PTR_ERR_OR_ZERO(update))) - goto err; - - bch2_cut_front(insert->k.p, update); + ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert)); + if (ret) + goto err; - ret = bch2_trans_update_by_path(trans, iter.path, update, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE| - flags, _RET_IP_); - if (ret) - goto err; + if (done) goto out; - } next: bch2_btree_iter_advance(&iter); k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX)); @@ -1515,18 +1538,8 @@ next: goto err; } out: - if (!bkey_deleted(&insert->k)) { - /* - * Rewinding iterators is expensive: get a new one and the one - * that points to the start of insert will be cloned from: - */ - bch2_trans_iter_exit(trans, &iter); - bch2_trans_iter_init(trans, &iter, btree_id, insert->k.p, - BTREE_ITER_NOT_EXTENTS| - BTREE_ITER_INTENT); - ret = bch2_btree_iter_traverse(&iter) ?: - bch2_trans_update(trans, &iter, insert, flags); - } + if (!bkey_deleted(&insert->k)) + ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags); err: bch2_trans_iter_exit(trans, &iter); diff --git a/libbcachefs/darray.h b/libbcachefs/darray.h index d4485fa..114f86b 100644 --- a/libbcachefs/darray.h +++ b/libbcachefs/darray.h @@ -59,13 +59,13 @@ static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more, #define darray_first(_d) ((_d).data[0]) #define darray_last(_d) ((_d).data[(_d).nr - 1]) -#define darray_insert_item(_d, _pos, _item) \ +#define darray_insert_item(_d, pos, _item) \ ({ \ - size_t pos = (_pos); \ + size_t _pos = (pos); \ int _ret = darray_make_room((_d), 1); \ \ if (!_ret) \ - array_insert_item((_d)->data, (_d)->nr, pos, (_item)); \ + array_insert_item((_d)->data, (_d)->nr, _pos, (_item)); \ _ret; \ }) diff --git a/libbcachefs/ec.c b/libbcachefs/ec.c index efbb7cf..f58e84a 100644 --- a/libbcachefs/ec.c +++ b/libbcachefs/ec.c @@ -200,11 +200,14 @@ static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx) static void ec_stripe_buf_exit(struct ec_stripe_buf *buf) { - unsigned i; + if (buf->key.k.type == KEY_TYPE_stripe) { + struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key); + unsigned i; - for (i = 0; i < buf->key.v.nr_blocks; i++) { - kvpfree(buf->data[i], buf->size << 9); - buf->data[i] = NULL; + for (i = 0; i < s->v.nr_blocks; i++) { + kvpfree(buf->data[i], buf->size << 9); + buf->data[i] = NULL; + } } } @@ -212,7 +215,7 @@ static void ec_stripe_buf_exit(struct ec_stripe_buf *buf) static int ec_stripe_buf_init(struct ec_stripe_buf *buf, unsigned offset, unsigned size) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned csum_granularity = 1U << v->csum_granularity_bits; unsigned end = offset + size; unsigned i; @@ -228,7 +231,7 @@ static int ec_stripe_buf_init(struct ec_stripe_buf *buf, memset(buf->valid, 0xFF, sizeof(buf->valid)); - for (i = 0; i < buf->key.v.nr_blocks; i++) { + for (i = 0; i < v->nr_blocks; i++) { buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL); if (!buf->data[i]) goto err; @@ -245,7 +248,7 @@ err: static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf, unsigned block, unsigned offset) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned csum_granularity = 1 << v->csum_granularity_bits; unsigned end = buf->offset + buf->size; unsigned len = min(csum_granularity, end - offset); @@ -264,7 +267,7 @@ static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf, static void ec_generate_checksums(struct ec_stripe_buf *buf) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned i, j, csums_per_device = stripe_csums_per_device(v); if (!v->csum_type) @@ -281,7 +284,7 @@ static void ec_generate_checksums(struct ec_stripe_buf *buf) static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned csum_granularity = 1 << v->csum_granularity_bits; unsigned i; @@ -304,7 +307,7 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) if (bch2_crc_cmp(want, got)) { struct printbuf buf2 = PRINTBUF; - bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key.k_i)); + bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key)); bch_err_ratelimited(c, "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s", @@ -324,7 +327,7 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) static void ec_generate_ec(struct ec_stripe_buf *buf) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned nr_data = v->nr_blocks - v->nr_redundant; unsigned bytes = le16_to_cpu(v->sectors) << 9; @@ -333,13 +336,14 @@ static void ec_generate_ec(struct ec_stripe_buf *buf) static unsigned ec_nr_failed(struct ec_stripe_buf *buf) { - return buf->key.v.nr_blocks - - bitmap_weight(buf->valid, buf->key.v.nr_blocks); + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; + + return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks); } static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0; unsigned nr_data = v->nr_blocks - v->nr_redundant; unsigned bytes = buf->size << 9; @@ -363,7 +367,7 @@ static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf) static void ec_block_endio(struct bio *bio) { struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio); - struct bch_stripe *v = &ec_bio->buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v; struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx]; struct bch_dev *ca = ec_bio->ca; struct closure *cl = bio->bi_private; @@ -388,11 +392,11 @@ static void ec_block_endio(struct bio *bio) static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, blk_opf_t opf, unsigned idx, struct closure *cl) { - struct bch_stripe *v = &buf->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v; unsigned offset = 0, bytes = buf->size << 9; struct bch_extent_ptr *ptr = &v->ptrs[idx]; struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev); - enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant + enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant ? BCH_DATA_user : BCH_DATA_parity; int rw = op_is_write(opf); @@ -463,7 +467,7 @@ static int get_stripe_key_trans(struct btree_trans *trans, u64 idx, ret = -ENOENT; goto err; } - bkey_reassemble(&stripe->key.k_i, k); + bkey_reassemble(&stripe->key, k); err: bch2_trans_iter_exit(trans, &iter); return ret; @@ -499,7 +503,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio) return -EIO; } - v = &buf->key.v; + v = &bkey_i_to_stripe(&buf->key)->v; if (!bch2_ptr_matches_stripe(v, rbio->pick)) { bch_err_ratelimited(c, @@ -875,6 +879,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans, struct ec_stripe_buf *s, struct bpos *bp_pos) { + struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; struct bch_fs *c = trans->c; struct bch_backpointer bp; struct btree_iter iter; @@ -926,7 +931,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans, if (extent_has_stripe_ptr(k, s->key.k.p.offset)) goto out; - ptr_c = bkey_matches_stripe(&s->key.v, k, &block); + ptr_c = bkey_matches_stripe(v, k, &block); /* * It doesn't generally make sense to erasure code cached ptrs: * XXX: should we be incrementing a counter? @@ -934,7 +939,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans, if (!ptr_c || ptr_c->cached) goto out; - dev = s->key.v.ptrs[block].dev; + dev = v->ptrs[block].dev; n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr)); ret = PTR_ERR_OR_ZERO(n); @@ -950,7 +955,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans, stripe_ptr = (struct bch_extent_stripe_ptr) { .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr, .block = block, - .redundancy = s->key.v.nr_redundant, + .redundancy = v->nr_redundant, .idx = s->key.k.p.offset, }; @@ -968,7 +973,8 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b unsigned block) { struct bch_fs *c = trans->c; - struct bch_extent_ptr bucket = s->key.v.ptrs[block]; + struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; + struct bch_extent_ptr bucket = v->ptrs[block]; struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket); struct bpos bp_pos = POS_MIN; int ret = 0; @@ -993,7 +999,7 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) { struct btree_trans trans; - struct bch_stripe *v = &s->key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; unsigned i, nr_data = v->nr_blocks - v->nr_redundant; int ret = 0; @@ -1057,7 +1063,7 @@ static void ec_stripe_create(struct ec_stripe_new *s) { struct bch_fs *c = s->c; struct open_bucket *ob; - struct bch_stripe *v = &s->new_stripe.key.v; + struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; unsigned i, nr_data = v->nr_blocks - v->nr_redundant; int ret; @@ -1090,7 +1096,7 @@ static void ec_stripe_create(struct ec_stripe_new *s) } for (i = 0; i < nr_data; i++) - if (stripe_blockcount_get(&s->existing_stripe.key.v, i)) + if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i)) swap(s->new_stripe.data[i], s->existing_stripe.data[i]); @@ -1117,8 +1123,9 @@ static void ec_stripe_create(struct ec_stripe_new *s) ret = bch2_trans_do(c, &s->res, NULL, BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOFAIL, - ec_stripe_key_update(&trans, &s->new_stripe.key, - !s->have_existing_stripe)); + ec_stripe_key_update(&trans, + bkey_i_to_stripe(&s->new_stripe.key), + !s->have_existing_stripe)); if (ret) { bch_err(c, "error creating stripe: error creating stripe key"); goto err; @@ -1279,14 +1286,14 @@ static bool may_create_new_stripe(struct bch_fs *c) } static void ec_stripe_key_init(struct bch_fs *c, - struct bkey_i_stripe *s, + struct bkey_i *k, unsigned nr_data, unsigned nr_parity, unsigned stripe_size) { + struct bkey_i_stripe *s = bkey_stripe_init(k); unsigned u64s; - bkey_stripe_init(&s->k_i); s->v.sectors = cpu_to_le16(stripe_size); s->v.algorithm = 0; s->v.nr_blocks = nr_data + nr_parity; @@ -1325,8 +1332,8 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) BCH_BKEY_PTRS_MAX) - h->redundancy; s->nr_parity = h->redundancy; - ec_stripe_key_init(c, &s->new_stripe.key, s->nr_data, - s->nr_parity, h->blocksize); + ec_stripe_key_init(c, &s->new_stripe.key, + s->nr_data, s->nr_parity, h->blocksize); h->s = s; return 0; @@ -1429,15 +1436,16 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ struct bch_devs_mask devs = h->devs; struct open_bucket *ob; struct open_buckets buckets; + struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; unsigned i, j, nr_have_parity = 0, nr_have_data = 0; bool have_cache = true; int ret = 0; - BUG_ON(h->s->new_stripe.key.v.nr_blocks != h->s->nr_data + h->s->nr_parity); - BUG_ON(h->s->new_stripe.key.v.nr_redundant != h->s->nr_parity); + BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity); + BUG_ON(v->nr_redundant != h->s->nr_parity); - for_each_set_bit(i, h->s->blocks_gotten, h->s->new_stripe.key.v.nr_blocks) { - __clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d); + for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) { + __clear_bit(v->ptrs[i].dev, devs.d); if (i < h->s->nr_data) nr_have_data++; else @@ -1466,7 +1474,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ BUG_ON(j >= h->s->nr_data + h->s->nr_parity); h->s->blocks[j] = buckets.v[i]; - h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob); + v->ptrs[j] = bch2_ob_ptr(c, ob); __set_bit(j, h->s->blocks_gotten); } @@ -1492,7 +1500,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ BUG_ON(j >= h->s->nr_data); h->s->blocks[j] = buckets.v[i]; - h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob); + v->ptrs[j] = bch2_ob_ptr(c, ob); __set_bit(j, h->s->blocks_gotten); } @@ -1542,6 +1550,8 @@ static s64 get_existing_stripe(struct bch_fs *c, static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h) { struct bch_fs *c = trans->c; + struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; + struct bch_stripe *existing_v; unsigned i; s64 idx; int ret; @@ -1562,9 +1572,11 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri return ret; } - BUG_ON(h->s->existing_stripe.key.v.nr_redundant != h->s->nr_parity); - h->s->nr_data = h->s->existing_stripe.key.v.nr_blocks - - h->s->existing_stripe.key.v.nr_redundant; + existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v; + + BUG_ON(existing_v->nr_redundant != h->s->nr_parity); + h->s->nr_data = existing_v->nr_blocks - + existing_v->nr_redundant; ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize); if (ret) { @@ -1573,21 +1585,21 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri } BUG_ON(h->s->existing_stripe.size != h->blocksize); - BUG_ON(h->s->existing_stripe.size != le16_to_cpu(h->s->existing_stripe.key.v.sectors)); + BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors)); /* * Free buckets we initially allocated - they might conflict with * blocks from the stripe we're reusing: */ - for_each_set_bit(i, h->s->blocks_gotten, h->s->new_stripe.key.v.nr_blocks) { + for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) { bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]); h->s->blocks[i] = 0; } memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten)); memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated)); - for (i = 0; i < h->s->existing_stripe.key.v.nr_blocks; i++) { - if (stripe_blockcount_get(&h->s->existing_stripe.key.v, i)) { + for (i = 0; i < existing_v->nr_blocks; i++) { + if (stripe_blockcount_get(existing_v, i)) { __set_bit(i, h->s->blocks_gotten); __set_bit(i, h->s->blocks_allocated); } @@ -1595,7 +1607,7 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone); } - bkey_copy(&h->s->new_stripe.key.k_i, &h->s->existing_stripe.key.k_i); + bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key); h->s->have_existing_stripe = true; return 0; @@ -1764,7 +1776,7 @@ static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca) if (!ca) goto found; - for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) { + for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) { if (!h->s->blocks[i]) continue; @@ -1922,7 +1934,7 @@ void bch2_fs_ec_exit(struct bch_fs *c) break; if (h->s) { - for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) + for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) BUG_ON(h->s->blocks[i]); kfree(h->s); diff --git a/libbcachefs/ec.h b/libbcachefs/ec.h index 1b1848e..885ae5d 100644 --- a/libbcachefs/ec.h +++ b/libbcachefs/ec.h @@ -138,10 +138,7 @@ struct ec_stripe_buf { void *data[BCH_BKEY_PTRS_MAX]; - union { - struct bkey_i_stripe key; - u64 pad[255]; - }; + __BKEY_PADDED(key, 255); }; struct ec_stripe_head; diff --git a/libbcachefs/extents.h b/libbcachefs/extents.h index d359b3f..6e9d23a 100644 --- a/libbcachefs/extents.h +++ b/libbcachefs/extents.h @@ -688,7 +688,7 @@ bool bch2_extent_normalize(struct bch_fs *, struct bkey_s); void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c, - unsigned, struct printbuf *); + enum bkey_invalid_flags, struct printbuf *); void bch2_ptr_swab(struct bkey_s); diff --git a/libbcachefs/fsck.c b/libbcachefs/fsck.c index c07ddfa..8c186ac 100644 --- a/libbcachefs/fsck.c +++ b/libbcachefs/fsck.c @@ -219,69 +219,6 @@ static int write_inode(struct btree_trans *trans, return ret; } -static int fsck_inode_rm(struct btree_trans *trans, u64 inum, u32 snapshot) -{ - struct bch_fs *c = trans->c; - struct btree_iter iter = { NULL }; - struct bkey_i_inode_generation delete; - struct bch_inode_unpacked inode_u; - struct bkey_s_c k; - int ret; - - do { - ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents, - SPOS(inum, 0, snapshot), - SPOS(inum, U64_MAX, snapshot), - 0, NULL) ?: - bch2_btree_delete_range_trans(trans, BTREE_ID_dirents, - SPOS(inum, 0, snapshot), - SPOS(inum, U64_MAX, snapshot), - 0, NULL) ?: - bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs, - SPOS(inum, 0, snapshot), - SPOS(inum, U64_MAX, snapshot), - 0, NULL); - } while (ret == -BCH_ERR_transaction_restart_nested); - if (ret) - goto err; -retry: - bch2_trans_begin(trans); - - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, inum, snapshot), BTREE_ITER_INTENT); - ret = bkey_err(k); - if (ret) - goto err; - - if (!bkey_is_inode(k.k)) { - bch2_fs_inconsistent(c, - "inode %llu:%u not found when deleting", - inum, snapshot); - ret = -EIO; - goto err; - } - - bch2_inode_unpack(k, &inode_u); - - /* Subvolume root? */ - if (inode_u.bi_subvol) - bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum); - - bkey_inode_generation_init(&delete.k_i); - delete.k.p = iter.pos; - delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1); - - ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?: - bch2_trans_commit(trans, NULL, NULL, - BTREE_INSERT_NOFAIL); -err: - bch2_trans_iter_exit(trans, &iter); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - goto retry; - - return ret ?: -BCH_ERR_transaction_restart_nested; -} - static int __remove_dirent(struct btree_trans *trans, struct bpos pos) { struct bch_fs *c = trans->c; @@ -521,7 +458,6 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see u32 id, u32 ancestor) { ssize_t i; - u32 top = seen->ids.nr ? seen->ids.data[seen->ids.nr - 1].equiv : 0; EBUG_ON(id > ancestor); EBUG_ON(!bch2_snapshot_is_equiv(c, id)); @@ -529,7 +465,7 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see /* @ancestor should be the snapshot most recently added to @seen */ EBUG_ON(ancestor != seen->pos.snapshot); - EBUG_ON(ancestor != top); + EBUG_ON(ancestor != seen->ids.data[seen->ids.nr - 1].equiv); if (id == ancestor) return true; @@ -930,7 +866,7 @@ static int check_inode(struct btree_trans *trans, bch2_trans_unlock(trans); bch2_fs_lazy_rw(c); - ret = fsck_inode_rm(trans, u.bi_inum, iter->pos.snapshot); + ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot); if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart)) bch_err(c, "error in fsck: error while deleting inode: %s", bch2_err_str(ret)); @@ -1198,19 +1134,13 @@ static int overlapping_extents_found(struct btree_trans *trans, BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2))); - prt_str(&buf, "\n "); - bch2_bpos_to_text(&buf, pos1); - prt_str(&buf, "\n "); - - bch2_bkey_to_text(&buf, &pos2); - prt_str(&buf, "\n "); - bch2_trans_iter_init(trans, &iter, btree, SPOS(pos1.inode, pos1.offset - 1, snapshot), 0); k = bch2_btree_iter_peek_upto(&iter, POS(pos1.inode, U64_MAX)); ret = bkey_err(k); if (ret) goto err; + prt_str(&buf, "\n "); bch2_bkey_val_to_text(&buf, c, k); if (!bpos_eq(pos1, k.k->p)) { diff --git a/libbcachefs/inode.c b/libbcachefs/inode.c index 8834809..755cf7d 100644 --- a/libbcachefs/inode.c +++ b/libbcachefs/inode.c @@ -923,3 +923,66 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c, if (opts->nocow) opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0; } + +int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter = { NULL }; + struct bkey_i_inode_generation delete; + struct bch_inode_unpacked inode_u; + struct bkey_s_c k; + int ret; + + do { + ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents, + SPOS(inum, 0, snapshot), + SPOS(inum, U64_MAX, snapshot), + 0, NULL) ?: + bch2_btree_delete_range_trans(trans, BTREE_ID_dirents, + SPOS(inum, 0, snapshot), + SPOS(inum, U64_MAX, snapshot), + 0, NULL) ?: + bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs, + SPOS(inum, 0, snapshot), + SPOS(inum, U64_MAX, snapshot), + 0, NULL); + } while (ret == -BCH_ERR_transaction_restart_nested); + if (ret) + goto err; +retry: + bch2_trans_begin(trans); + + k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + SPOS(0, inum, snapshot), BTREE_ITER_INTENT); + ret = bkey_err(k); + if (ret) + goto err; + + if (!bkey_is_inode(k.k)) { + bch2_fs_inconsistent(c, + "inode %llu:%u not found when deleting", + inum, snapshot); + ret = -EIO; + goto err; + } + + bch2_inode_unpack(k, &inode_u); + + /* Subvolume root? */ + if (inode_u.bi_subvol) + bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum); + + bkey_inode_generation_init(&delete.k_i); + delete.k.p = iter.pos; + delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1); + + ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?: + bch2_trans_commit(trans, NULL, NULL, + BTREE_INSERT_NOFAIL); +err: + bch2_trans_iter_exit(trans, &iter); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + goto retry; + + return ret ?: -BCH_ERR_transaction_restart_nested; +} diff --git a/libbcachefs/inode.h b/libbcachefs/inode.h index 7809d1b..1b9dc27 100644 --- a/libbcachefs/inode.h +++ b/libbcachefs/inode.h @@ -198,4 +198,6 @@ struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *); void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *, struct bch_inode_unpacked *); +int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32); + #endif /* _BCACHEFS_INODE_H */ diff --git a/libbcachefs/opts.c b/libbcachefs/opts.c index 9dcc61e..4d0daeb 100644 --- a/libbcachefs/opts.c +++ b/libbcachefs/opts.c @@ -10,7 +10,7 @@ #include "super-io.h" #include "util.h" -#define x(t, n) [n] = #t, +#define x(t, n, ...) [n] = #t, const char * const bch2_error_actions[] = { BCH_ERROR_ACTIONS() @@ -95,8 +95,8 @@ const char * const bch2_fs_usage_types[] = { #undef x -int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res, - struct printbuf *err) +static int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res, + struct printbuf *err) { if (!val) { *res = FSCK_FIX_yes; @@ -113,18 +113,18 @@ int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res, return 0; } -void bch2_opt_fix_errors_to_text(struct printbuf *out, - struct bch_fs *c, - struct bch_sb *sb, - u64 v) +static void bch2_opt_fix_errors_to_text(struct printbuf *out, + struct bch_fs *c, + struct bch_sb *sb, + u64 v) { prt_str(out, bch2_fsck_fix_opts[v]); } -static const struct bch_opt_fn bch2_opt_fix_errors = { - .parse = bch2_opt_fix_errors_parse, - .to_text = bch2_opt_fix_errors_to_text, -}; +#define bch2_opt_fix_errors (struct bch_opt_fn) { \ + .parse = bch2_opt_fix_errors_parse, \ + .to_text = bch2_opt_fix_errors_to_text, \ +} const char * const bch2_d_types[BCH_DT_MAX] = { [DT_UNKNOWN] = "unknown", diff --git a/libbcachefs/subvolume.c b/libbcachefs/subvolume.c index 811a6f4..736afb6 100644 --- a/libbcachefs/subvolume.c +++ b/libbcachefs/subvolume.c @@ -83,7 +83,7 @@ static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id) if (!new) return NULL; - old = c->snapshots; + old = rcu_dereference_protected(c->snapshots, true); if (old) memcpy(new->s, rcu_dereference_protected(c->snapshots, true)->s, @@ -698,6 +698,11 @@ err: return ret; } +static int cmp_le32(__le32 l, __le32 r) +{ + return cmp_int(le32_to_cpu(l), le32_to_cpu(r)); +} + static int check_snapshot(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) @@ -830,7 +835,7 @@ static int check_snapshot(struct btree_trans *trans, for (i = 0; i < ARRAY_SIZE(u->v.skip); i++) u->v.skip[i] = cpu_to_le32(snapshot_skiplist_get(c, parent_id)); - bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_int); + bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32); s = u->v; } ret = 0; @@ -946,7 +951,7 @@ int bch2_check_subvols(struct bch_fs *c) void bch2_fs_snapshots_exit(struct bch_fs *c) { - kfree(c->snapshots); + kfree(rcu_dereference_protected(c->snapshots, true)); } int bch2_snapshots_read(struct bch_fs *c) @@ -1123,7 +1128,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, for (j = 0; j < ARRAY_SIZE(n->v.skip); j++) n->v.skip[j] = cpu_to_le32(snapshot_skiplist_get(c, parent)); - bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_int); + bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32); SET_BCH_SNAPSHOT_SUBVOL(&n->v, true); ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, diff --git a/libbcachefs/super-io.c b/libbcachefs/super-io.c index d2d3eba..c9a5a7c 100644 --- a/libbcachefs/super-io.c +++ b/libbcachefs/super-io.c @@ -261,16 +261,13 @@ struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb, /* Superblock validate: */ -static inline void __bch2_sb_layout_size_assert(void) -{ - BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512); -} - static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out) { u64 offset, prev_offset, max_sectors; unsigned i; + BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512); + if (!uuid_equal(&layout->magic, &BCACHE_MAGIC) && !uuid_equal(&layout->magic, &BCHFS_MAGIC)) { prt_printf(out, "Not a bcachefs superblock layout"); diff --git a/tools-util.c b/tools-util.c index bb65093..624656a 100644 --- a/tools-util.c +++ b/tools-util.c @@ -330,21 +330,21 @@ struct fiemap_extent fiemap_iter_next(struct fiemap_iter *iter) { struct fiemap_extent e; - BUG_ON(iter->idx > iter->f.fm_mapped_extents); + BUG_ON(iter->idx > iter->f->fm_mapped_extents); - if (iter->idx == iter->f.fm_mapped_extents) { - xioctl(iter->fd, FS_IOC_FIEMAP, &iter->f); + if (iter->idx == iter->f->fm_mapped_extents) { + xioctl(iter->fd, FS_IOC_FIEMAP, iter->f); - if (!iter->f.fm_mapped_extents) + if (!iter->f->fm_mapped_extents) return (struct fiemap_extent) { .fe_length = 0 }; iter->idx = 0; } - e = iter->f.fm_extents[iter->idx++]; + e = iter->f->fm_extents[iter->idx++]; BUG_ON(!e.fe_length); - iter->f.fm_start = e.fe_logical + e.fe_length; + iter->f->fm_start = e.fe_logical + e.fe_length; return e; } diff --git a/tools-util.h b/tools-util.h index aa7c027..e7bdd2c 100644 --- a/tools-util.h +++ b/tools-util.h @@ -115,8 +115,7 @@ static inline struct range hole_iter_next(struct hole_iter *iter) #include struct fiemap_iter { - struct fiemap f; - struct fiemap_extent fe[1024]; + struct fiemap *f; unsigned idx; int fd; }; @@ -125,11 +124,20 @@ static inline void fiemap_iter_init(struct fiemap_iter *iter, int fd) { memset(iter, 0, sizeof(*iter)); - iter->f.fm_extent_count = ARRAY_SIZE(iter->fe); - iter->f.fm_length = FIEMAP_MAX_OFFSET; + iter->f = xmalloc(sizeof(struct fiemap) + + sizeof(struct fiemap_extent) * 1024); + + iter->f->fm_extent_count = 1024; + iter->f->fm_length = FIEMAP_MAX_OFFSET; iter->fd = fd; } +static inline void fiemap_iter_exit(struct fiemap_iter *iter) +{ + free(iter->f); + memset(iter, 0, sizeof(*iter)); +} + struct fiemap_extent fiemap_iter_next(struct fiemap_iter *); #define fiemap_for_each(fd, iter, extent) \ -- 2.39.2