X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Futil.h;h=67f1a1d2a02d31a22edb411c5f7057b6cc8326a5;hb=5e215654da7e97a6395de6e7592fbaa426697897;hp=ecfe54012e3d0306b04e0c7c7e5ffa790b0a4742;hpb=0206d42daf4c4bd3bbcfa15a2bef34319524db49;p=bcachefs-tools-debian diff --git a/libbcachefs/util.h b/libbcachefs/util.h index ecfe540..67f1a1d 100644 --- a/libbcachefs/util.h +++ b/libbcachefs/util.h @@ -60,14 +60,12 @@ static inline void vpfree(void *p, size_t size) free_pages((unsigned long) p, get_order(size)); } -static inline void *_vpmalloc(size_t size, gfp_t gfp_mask) +static inline void *vpmalloc(size_t size, gfp_t gfp_mask) { - return (void *) _get_free_pages(gfp_mask|__GFP_NOWARN, + return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN, get_order(size)) ?: __vmalloc(size, gfp_mask); } -#define vpmalloc(_size, _gfp) \ - alloc_hooks(_vpmalloc(_size, _gfp), void *, NULL) static inline void kvpfree(void *p, size_t size) { @@ -77,14 +75,12 @@ static inline void kvpfree(void *p, size_t size) vpfree(p, size); } -static inline void *_kvpmalloc(size_t size, gfp_t gfp_mask) +static inline void *kvpmalloc(size_t size, gfp_t gfp_mask) { return size < PAGE_SIZE - ? _kmalloc(size, gfp_mask) - : _vpmalloc(size, gfp_mask); + ? kmalloc(size, gfp_mask) + : vpmalloc(size, gfp_mask); } -#define kvpmalloc(_size, _gfp) \ - alloc_hooks(_kvpmalloc(_size, _gfp), void *, NULL) int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t); @@ -470,8 +466,10 @@ struct bch_pd_controller { s64 last_change; s64 last_target; - /* If true, the rate will not increase if bch2_ratelimit_delay() - * is not being called often enough. */ + /* + * If true, the rate will not increase if bch2_ratelimit_delay() + * is not being called often enough. + */ bool backpressure; }; @@ -534,9 +532,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) } void bch2_bio_map(struct bio *bio, void *base, size_t); -int _bch2_bio_alloc_pages(struct bio *, size_t, gfp_t); -#define bch2_bio_alloc_pages(_bio, _size, _gfp) \ - alloc_hooks(_bch2_bio_alloc_pages(_bio, _size, _gfp), int, -ENOMEM) +int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t); static inline sector_t bdev_sectors(struct block_device *bdev) { @@ -609,6 +605,7 @@ static inline void __memcpy_u64s(void *dst, const void *src, { #ifdef CONFIG_X86_64 long d0, d1, d2; + asm volatile("rep ; movsq" : "=&c" (d0), "=&D" (d1), "=&S" (d2) : "0" (u64s), "1" (dst), "2" (src) @@ -685,6 +682,7 @@ static inline void __memmove_u64s_up(void *_dst, const void *_src, #ifdef CONFIG_X86_64 long d0, d1, d2; + asm volatile("std ;\n" "rep ; movsq\n" "cld ;\n" @@ -777,12 +775,12 @@ static inline void __move_gap(void *array, size_t element_size, #define bubble_sort(_base, _nr, _cmp) \ do { \ - ssize_t _i, _end; \ + ssize_t _i, _last; \ bool _swapped = true; \ \ - for (_end = (ssize_t) (_nr) - 1; _end > 0 && _swapped; --_end) {\ + for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\ _swapped = false; \ - for (_i = 0; _i < _end; _i++) \ + for (_i = 0; _i < _last; _i++) \ if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \ swap((_base)[_i], (_base)[_i + 1]); \ _swapped = true; \ @@ -843,4 +841,11 @@ static inline int u8_cmp(u8 l, u8 r) return cmp_int(l, r); } +static inline int cmp_le32(__le32 l, __le32 r) +{ + return cmp_int(le32_to_cpu(l), le32_to_cpu(r)); +} + +#include + #endif /* _BCACHEFS_UTIL_H */