free_pages((unsigned long) p, get_order(size));
}
-static inline void *_vpmalloc(size_t size, gfp_t gfp_mask)
+static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
{
- return (void *) _get_free_pages(gfp_mask|__GFP_NOWARN,
+ return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
get_order(size)) ?:
__vmalloc(size, gfp_mask);
}
-#define vpmalloc(_size, _gfp) \
- alloc_hooks(_vpmalloc(_size, _gfp), void *, NULL)
static inline void kvpfree(void *p, size_t size)
{
vpfree(p, size);
}
-static inline void *_kvpmalloc(size_t size, gfp_t gfp_mask)
+static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
{
return size < PAGE_SIZE
- ? _kmalloc(size, gfp_mask)
- : _vpmalloc(size, gfp_mask);
+ ? kmalloc(size, gfp_mask)
+ : vpmalloc(size, gfp_mask);
}
-#define kvpmalloc(_size, _gfp) \
- alloc_hooks(_kvpmalloc(_size, _gfp), void *, NULL)
int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
s64 last_change;
s64 last_target;
- /* If true, the rate will not increase if bch2_ratelimit_delay()
- * is not being called often enough. */
+ /*
+ * If true, the rate will not increase if bch2_ratelimit_delay()
+ * is not being called often enough.
+ */
bool backpressure;
};
}
void bch2_bio_map(struct bio *bio, void *base, size_t);
-int _bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
-#define bch2_bio_alloc_pages(_bio, _size, _gfp) \
- alloc_hooks(_bch2_bio_alloc_pages(_bio, _size, _gfp), int, -ENOMEM)
+int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
static inline sector_t bdev_sectors(struct block_device *bdev)
{
{
#ifdef CONFIG_X86_64
long d0, d1, d2;
+
asm volatile("rep ; movsq"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (u64s), "1" (dst), "2" (src)
#ifdef CONFIG_X86_64
long d0, d1, d2;
+
asm volatile("std ;\n"
"rep ; movsq\n"
"cld ;\n"
#define bubble_sort(_base, _nr, _cmp) \
do { \
- ssize_t _i, _end; \
+ ssize_t _i, _last; \
bool _swapped = true; \
\
- for (_end = (ssize_t) (_nr) - 1; _end > 0 && _swapped; --_end) {\
+ for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\
_swapped = false; \
- for (_i = 0; _i < _end; _i++) \
+ for (_i = 0; _i < _last; _i++) \
if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \
swap((_base)[_i], (_base)[_i + 1]); \
_swapped = true; \
return cmp_int(l, r);
}
+static inline int cmp_le32(__le32 l, __le32 r)
+{
+ return cmp_int(le32_to_cpu(l), le32_to_cpu(r));
+}
+
+#include <linux/uuid.h>
+
#endif /* _BCACHEFS_UTIL_H */