7 * NOTE: we can't differentiate between __le64 and u64 with type_is - this
8 * assumes u64 is little endian:
10 #define __vstruct_u64s(_s) \
12 ( type_is((_s)->u64s, u64) ? le64_to_cpu((_s)->u64s) \
13 : type_is((_s)->u64s, u32) ? le32_to_cpu((_s)->u64s) \
14 : type_is((_s)->u64s, u16) ? le16_to_cpu((_s)->u64s) \
18 #define __vstruct_bytes(_type, _u64s) \
20 BUILD_BUG_ON(offsetof(_type, _data) % sizeof(u64)); \
22 (offsetof(_type, _data) + (_u64s) * sizeof(u64)); \
25 #define vstruct_bytes(_s) \
26 __vstruct_bytes(typeof(*(_s)), __vstruct_u64s(_s))
28 #define __vstruct_blocks(_type, _sector_block_bits, _u64s) \
29 (round_up(__vstruct_bytes(_type, _u64s), \
30 512 << (_sector_block_bits)) >> (9 + (_sector_block_bits)))
32 #define vstruct_blocks(_s, _sector_block_bits) \
33 __vstruct_blocks(typeof(*(_s)), _sector_block_bits, __vstruct_u64s(_s))
35 #define vstruct_blocks_plus(_s, _sector_block_bits, _u64s) \
36 __vstruct_blocks(typeof(*(_s)), _sector_block_bits, \
37 __vstruct_u64s(_s) + (_u64s))
39 #define vstruct_sectors(_s, _sector_block_bits) \
40 (round_up(vstruct_bytes(_s), 512 << (_sector_block_bits)) >> 9)
42 #define vstruct_next(_s) \
43 ((typeof(_s)) ((_s)->_data + __vstruct_u64s(_s)))
44 #define vstruct_last(_s) \
45 ((typeof(&(_s)->start[0])) ((_s)->_data + __vstruct_u64s(_s)))
46 #define vstruct_end(_s) \
47 ((void *) ((_s)->_data + __vstruct_u64s(_s)))
49 #define vstruct_for_each(_s, _i) \
50 for (_i = (_s)->start; \
51 _i < vstruct_last(_s); \
52 _i = vstruct_next(_i))
54 #define vstruct_for_each_safe(_s, _i, _t) \
55 for (_i = (_s)->start; \
56 _i < vstruct_last(_s) && (_t = vstruct_next(_i), true); \
59 #define vstruct_idx(_s, _idx) \
60 ((typeof(&(_s)->start[0])) ((_s)->_data + (_idx)))
62 #endif /* _VSTRUCTS_H */