]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/util.h
Update bcachefs sources to a8115093df bcachefs: Fix divide by zero in rebalance_work()
[bcachefs-tools-debian] / libbcachefs / util.h
index 09e272932dff388b2962f853898f6f3786a34338..d06671a09852ada4329013206595ec00eaf1fbd0 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/workqueue.h>
 #include <linux/mean_and_variance.h>
 
+#include "darray.h"
+
 struct closure;
 
 #ifdef CONFIG_BCACHEFS_DEBUG
@@ -58,12 +60,13 @@ static inline void vpfree(void *p, size_t size)
                free_pages((unsigned long) p, get_order(size));
 }
 
-static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
+static inline void *vpmalloc_noprof(size_t size, gfp_t gfp_mask)
 {
-       return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
-                                        get_order(size)) ?:
-               __vmalloc(size, gfp_mask);
+       return (void *) get_free_pages_noprof(gfp_mask|__GFP_NOWARN,
+                                             get_order(size)) ?:
+               __vmalloc_noprof(size, gfp_mask);
 }
+#define vpmalloc(_size, _gfp)  alloc_hooks(vpmalloc_noprof(_size, _gfp))
 
 static inline void kvpfree(void *p, size_t size)
 {
@@ -73,12 +76,13 @@ static inline void kvpfree(void *p, size_t size)
                vpfree(p, size);
 }
 
-static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
+static inline void *kvpmalloc_noprof(size_t size, gfp_t gfp_mask)
 {
        return size < PAGE_SIZE
-               ? kmalloc(size, gfp_mask)
-               : vpmalloc(size, gfp_mask);
+               ? kmalloc_noprof(size, gfp_mask)
+               : vpmalloc_noprof(size, gfp_mask);
 }
+#define kvpmalloc(_size, _gfp) alloc_hooks(kvpmalloc_noprof(_size, _gfp))
 
 int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
 
@@ -233,7 +237,7 @@ do {                                                                        \
 #define prt_tab_rjust(_out)            bch2_prt_tab_rjust(_out)
 
 #define prt_bytes_indented(...)                bch2_prt_bytes_indented(__VA_ARGS__)
-#define prt_u64(_out, _v)              prt_printf(_out, "%llu", _v)
+#define prt_u64(_out, _v)              prt_printf(_out, "%llu", (u64) (_v))
 #define prt_human_readable_u64(...)    bch2_prt_human_readable_u64(__VA_ARGS__)
 #define prt_human_readable_s64(...)    bch2_prt_human_readable_s64(__VA_ARGS__)
 #define prt_units_u64(...)             bch2_prt_units_u64(__VA_ARGS__)
@@ -360,7 +364,11 @@ u64 bch2_read_flag_list(char *, const char * const[]);
 void bch2_prt_u64_binary(struct printbuf *, u64, unsigned);
 
 void bch2_print_string_as_lines(const char *prefix, const char *lines);
-int bch2_prt_backtrace(struct printbuf *, struct task_struct *);
+
+typedef DARRAY(unsigned long) bch_stacktrace;
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *);
+void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
+int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *);
 
 #define NR_QUANTILES   15
 #define QUANTILE_IDX(i)        inorder_to_eytzinger0(i, NR_QUANTILES)
@@ -460,8 +468,10 @@ struct bch_pd_controller {
        s64                     last_change;
        s64                     last_target;
 
-       /* If true, the rate will not increase if bch2_ratelimit_delay()
-        * is not being called often enough. */
+       /*
+        * If true, the rate will not increase if bch2_ratelimit_delay()
+        * is not being called often enough.
+        */
        bool                    backpressure;
 };
 
@@ -524,7 +534,9 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
 }
 
 void bch2_bio_map(struct bio *bio, void *base, size_t);
-int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
+int bch2_bio_alloc_pages_noprof(struct bio *, size_t, gfp_t);
+#define bch2_bio_alloc_pages(_bio, _size, _gfp)                                \
+       alloc_hooks(bch2_bio_alloc_pages_noprof(_bio, _size, _gfp))
 
 static inline sector_t bdev_sectors(struct block_device *bdev)
 {
@@ -537,6 +549,26 @@ do {                                                                       \
        submit_bio(bio);                                                \
 } while (0)
 
+#define kthread_wait(cond)                                             \
+({                                                                     \
+       int _ret = 0;                                                   \
+                                                                       \
+       while (1) {                                                     \
+               set_current_state(TASK_INTERRUPTIBLE);                  \
+               if (kthread_should_stop()) {                            \
+                       _ret = -1;                                      \
+                       break;                                          \
+               }                                                       \
+                                                                       \
+               if (cond)                                               \
+                       break;                                          \
+                                                                       \
+               schedule();                                             \
+       }                                                               \
+       set_current_state(TASK_RUNNING);                                \
+       _ret;                                                           \
+})
+
 #define kthread_wait_freezable(cond)                                   \
 ({                                                                     \
        int _ret = 0;                                                   \
@@ -577,6 +609,7 @@ static inline void __memcpy_u64s(void *dst, const void *src,
 {
 #ifdef CONFIG_X86_64
        long d0, d1, d2;
+
        asm volatile("rep ; movsq"
                     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
                     : "0" (u64s), "1" (dst), "2" (src)
@@ -653,6 +686,7 @@ static inline void __memmove_u64s_up(void *_dst, const void *_src,
 
 #ifdef CONFIG_X86_64
        long d0, d1, d2;
+
        asm volatile("std ;\n"
                     "rep ; movsq\n"
                     "cld ;\n"
@@ -811,4 +845,11 @@ static inline int u8_cmp(u8 l, u8 r)
        return cmp_int(l, r);
 }
 
+static inline int cmp_le32(__le32 l, __le32 r)
+{
+       return cmp_int(le32_to_cpu(l), le32_to_cpu(r));
+}
+
+#include <linux/uuid.h>
+
 #endif /* _BCACHEFS_UTIL_H */