X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Futil.h;h=4290e0a53b7563a4e6d912ce17083a947b6a3f4b;hb=1f79cf3825e94fcb146d417b6dda9b94c93c7a53;hp=8aa5c34b456dbf8f8e35aaa04ac5e50102b78cc8;hpb=819f2dde79241915a6edda2c20bb4ca5d4017030;p=bcachefs-tools-debian diff --git a/libbcachefs/util.h b/libbcachefs/util.h index 8aa5c34..4290e0a 100644 --- a/libbcachefs/util.h +++ b/libbcachefs/util.h @@ -1,72 +1,38 @@ -#ifndef _BCACHE_UTIL_H -#define _BCACHE_UTIL_H +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_UTIL_H +#define _BCACHEFS_UTIL_H #include #include #include #include -#include #include #include +#include #include #include +#include +#include #include #include #include #include -#define PAGE_SECTOR_SHIFT (PAGE_SHIFT - 9) -#define PAGE_SECTORS (1UL << PAGE_SECTOR_SHIFT) +#include "mean_and_variance.h" + +#include "darray.h" struct closure; #ifdef CONFIG_BCACHEFS_DEBUG - #define EBUG_ON(cond) BUG_ON(cond) -#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) -#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) -#define atomic_sub_bug(i, v) BUG_ON(atomic_sub_return(i, v) < 0) -#define atomic_add_bug(i, v) BUG_ON(atomic_add_return(i, v) < 0) -#define atomic_long_dec_bug(v) BUG_ON(atomic_long_dec_return(v) < 0) -#define atomic_long_sub_bug(i, v) BUG_ON(atomic_long_sub_return(i, v) < 0) -#define atomic64_dec_bug(v) BUG_ON(atomic64_dec_return(v) < 0) -#define atomic64_inc_bug(v, i) BUG_ON(atomic64_inc_return(v) <= i) -#define atomic64_sub_bug(i, v) BUG_ON(atomic64_sub_return(i, v) < 0) -#define atomic64_add_bug(i, v) BUG_ON(atomic64_add_return(i, v) < 0) - -#define memcpy(_dst, _src, _len) \ -do { \ - BUG_ON(!((void *) (_dst) >= (void *) (_src) + (_len) || \ - (void *) (_dst) + (_len) <= (void *) (_src))); \ - memcpy(_dst, _src, _len); \ -} while (0) - -#else /* DEBUG */ - -#define EBUG_ON(cond) -#define atomic_dec_bug(v) atomic_dec(v) -#define atomic_inc_bug(v, i) atomic_inc(v) -#define atomic_sub_bug(i, v) atomic_sub(i, v) -#define atomic_add_bug(i, v) atomic_add(i, v) -#define atomic_long_dec_bug(v) atomic_long_dec(v) -#define atomic_long_sub_bug(i, v) atomic_long_sub(i, v) -#define atomic64_dec_bug(v) atomic64_dec(v) -#define atomic64_inc_bug(v, i) atomic64_inc(v) -#define atomic64_sub_bug(i, v) atomic64_sub(i, v) -#define atomic64_add_bug(i, v) atomic64_add(i, v) - -#endif - -#ifndef __CHECKER__ -#define __flatten __attribute__((flatten)) #else -/* sparse doesn't know about attribute((flatten)) */ -#define __flatten +#define EBUG_ON(cond) #endif -#ifdef __LITTLE_ENDIAN +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define CPU_BIG_ENDIAN 0 -#else +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define CPU_BIG_ENDIAN 1 #endif @@ -79,30 +45,53 @@ do { \ (__builtin_types_compatible_p(typeof(_val), _type) || \ __builtin_types_compatible_p(typeof(_val), const _type)) -static inline void kvpfree(void *p, size_t size) +/* Userspace doesn't align allocations as nicely as the kernel allocators: */ +static inline size_t buf_pages(void *p, size_t len) { - if (size < PAGE_SIZE) - kfree(p); - else if (is_vmalloc_addr(p)) + return DIV_ROUND_UP(len + + ((unsigned long) p & (PAGE_SIZE - 1)), + PAGE_SIZE); +} + +static inline void vpfree(void *p, size_t size) +{ + if (is_vmalloc_addr(p)) vfree(p); else free_pages((unsigned long) p, get_order(size)); +} +static inline void *vpmalloc(size_t size, gfp_t gfp_mask) +{ + return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN, + get_order(size)) ?: + __vmalloc(size, gfp_mask); +} + +static inline void kvpfree(void *p, size_t size) +{ + if (size < PAGE_SIZE) + kfree(p); + else + vpfree(p, size); } static inline void *kvpmalloc(size_t size, gfp_t gfp_mask) { - return size < PAGE_SIZE ? kmalloc(size, gfp_mask) - : (void *) __get_free_pages(gfp_mask|__GFP_NOWARN, - get_order(size)) - ?: __vmalloc(size, gfp_mask, PAGE_KERNEL); + return size < PAGE_SIZE + ? kmalloc(size, gfp_mask) + : vpmalloc(size, gfp_mask); +} + +int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t); + +#define HEAP(type) \ +struct { \ + size_t size, used; \ + type *data; \ } -#define DECLARE_HEAP(type, name) \ - struct { \ - size_t size, used; \ - type *data; \ - } name +#define DECLARE_HEAP(type, name) HEAP(type) name #define init_heap(heap, _size, gfp) \ ({ \ @@ -118,142 +107,169 @@ do { \ (heap)->data = NULL; \ } while (0) -#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) +#define heap_set_backpointer(h, i, _fn) \ +do { \ + void (*fn)(typeof(h), size_t) = _fn; \ + if (fn) \ + fn(h, i); \ +} while (0) -#define heap_sift(h, i, cmp) \ +#define heap_swap(h, i, j, set_backpointer) \ do { \ - size_t _r, _j = i; \ + swap((h)->data[i], (h)->data[j]); \ + heap_set_backpointer(h, i, set_backpointer); \ + heap_set_backpointer(h, j, set_backpointer); \ +} while (0) + +#define heap_peek(h) \ +({ \ + EBUG_ON(!(h)->used); \ + (h)->data[0]; \ +}) + +#define heap_full(h) ((h)->used == (h)->size) + +#define heap_sift_down(h, i, cmp, set_backpointer) \ +do { \ + size_t _c, _j = i; \ \ - for (; _j * 2 + 1 < (h)->used; _j = _r) { \ - _r = _j * 2 + 1; \ - if (_r + 1 < (h)->used && \ - cmp((h)->data[_r], (h)->data[_r + 1])) \ - _r++; \ + for (; _j * 2 + 1 < (h)->used; _j = _c) { \ + _c = _j * 2 + 1; \ + if (_c + 1 < (h)->used && \ + cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \ + _c++; \ \ - if (cmp((h)->data[_r], (h)->data[_j])) \ + if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \ break; \ - heap_swap(h, _r, _j); \ + heap_swap(h, _c, _j, set_backpointer); \ } \ } while (0) -#define heap_sift_down(h, i, cmp) \ +#define heap_sift_up(h, i, cmp, set_backpointer) \ do { \ while (i) { \ size_t p = (i - 1) / 2; \ - if (cmp((h)->data[i], (h)->data[p])) \ + if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \ break; \ - heap_swap(h, i, p); \ + heap_swap(h, i, p, set_backpointer); \ i = p; \ } \ } while (0) -#define heap_add(h, d, cmp) \ +#define __heap_add(h, d, cmp, set_backpointer) \ ({ \ - bool _r = !heap_full(h); \ - if (_r) { \ - size_t _i = (h)->used++; \ - (h)->data[_i] = d; \ + size_t _i = (h)->used++; \ + (h)->data[_i] = d; \ + heap_set_backpointer(h, _i, set_backpointer); \ \ - heap_sift_down(h, _i, cmp); \ - heap_sift(h, _i, cmp); \ - } \ + heap_sift_up(h, _i, cmp, set_backpointer); \ + _i; \ +}) + +#define heap_add(h, d, cmp, set_backpointer) \ +({ \ + bool _r = !heap_full(h); \ + if (_r) \ + __heap_add(h, d, cmp, set_backpointer); \ _r; \ }) -#define heap_del(h, i, cmp) \ +#define heap_add_or_replace(h, new, cmp, set_backpointer) \ +do { \ + if (!heap_add(h, new, cmp, set_backpointer) && \ + cmp(h, new, heap_peek(h)) >= 0) { \ + (h)->data[0] = new; \ + heap_set_backpointer(h, 0, set_backpointer); \ + heap_sift_down(h, 0, cmp, set_backpointer); \ + } \ +} while (0) + +#define heap_del(h, i, cmp, set_backpointer) \ do { \ size_t _i = (i); \ \ BUG_ON(_i >= (h)->used); \ (h)->used--; \ - heap_swap(h, _i, (h)->used); \ - heap_sift_down(h, _i, cmp); \ - heap_sift(h, _i, cmp); \ + if ((_i) < (h)->used) { \ + heap_swap(h, _i, (h)->used, set_backpointer); \ + heap_sift_up(h, _i, cmp, set_backpointer); \ + heap_sift_down(h, _i, cmp, set_backpointer); \ + } \ } while (0) -#define heap_pop(h, d, cmp) \ +#define heap_pop(h, d, cmp, set_backpointer) \ ({ \ bool _r = (h)->used; \ if (_r) { \ (d) = (h)->data[0]; \ - heap_del(h, 0, cmp); \ + heap_del(h, 0, cmp, set_backpointer); \ } \ _r; \ }) -#define heap_peek(h) \ -({ \ - EBUG_ON(!(h)->used); \ - (h)->data[0]; \ -}) - -#define heap_full(h) ((h)->used == (h)->size) - -#define heap_resort(heap, cmp) \ +#define heap_resort(heap, cmp, set_backpointer) \ do { \ ssize_t _i; \ for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \ - heap_sift(heap, _i, cmp); \ + heap_sift_down(heap, _i, cmp, set_backpointer); \ } while (0) -/* - * Simple array based allocator - preallocates a number of elements and you can - * never allocate more than that, also has no locking. - * - * Handy because if you know you only need a fixed number of elements you don't - * have to worry about memory allocation failure, and sometimes a mempool isn't - * what you want. - * - * We treat the free elements as entries in a singly linked list, and the - * freelist as a stack - allocating and freeing push and pop off the freelist. - */ - -#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \ - struct { \ - type *freelist; \ - type data[size]; \ - } name - -#define array_alloc(array) \ -({ \ - typeof((array)->freelist) _ret = (array)->freelist; \ - \ - if (_ret) \ - (array)->freelist = *((typeof((array)->freelist) *) _ret);\ - \ - _ret; \ -}) +#define ANYSINT_MAX(t) \ + ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) -#define array_free(array, ptr) \ -do { \ - typeof((array)->freelist) _ptr = ptr; \ - \ - *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \ - (array)->freelist = _ptr; \ -} while (0) +#include "printbuf.h" -#define array_allocator_init(array) \ -do { \ - typeof((array)->freelist) _i; \ - \ - BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \ - (array)->freelist = NULL; \ - \ - for (_i = (array)->data; \ - _i < (array)->data + ARRAY_SIZE((array)->data); \ - _i++) \ - array_free(array, _i); \ -} while (0) +#define prt_vprintf(_out, ...) bch2_prt_vprintf(_out, __VA_ARGS__) +#define prt_printf(_out, ...) bch2_prt_printf(_out, __VA_ARGS__) +#define printbuf_str(_buf) bch2_printbuf_str(_buf) +#define printbuf_exit(_buf) bch2_printbuf_exit(_buf) -#define array_freelist_empty(array) ((array)->freelist == NULL) +#define printbuf_tabstops_reset(_buf) bch2_printbuf_tabstops_reset(_buf) +#define printbuf_tabstop_pop(_buf) bch2_printbuf_tabstop_pop(_buf) +#define printbuf_tabstop_push(_buf, _n) bch2_printbuf_tabstop_push(_buf, _n) -#define ANYSINT_MAX(t) \ - ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) +#define printbuf_indent_add(_out, _n) bch2_printbuf_indent_add(_out, _n) +#define printbuf_indent_sub(_out, _n) bch2_printbuf_indent_sub(_out, _n) + +#define prt_newline(_out) bch2_prt_newline(_out) +#define prt_tab(_out) bch2_prt_tab(_out) +#define prt_tab_rjust(_out) bch2_prt_tab_rjust(_out) + +#define prt_bytes_indented(...) bch2_prt_bytes_indented(__VA_ARGS__) +#define prt_u64(_out, _v) prt_printf(_out, "%llu", (u64) (_v)) +#define prt_human_readable_u64(...) bch2_prt_human_readable_u64(__VA_ARGS__) +#define prt_human_readable_s64(...) bch2_prt_human_readable_s64(__VA_ARGS__) +#define prt_units_u64(...) bch2_prt_units_u64(__VA_ARGS__) +#define prt_units_s64(...) bch2_prt_units_s64(__VA_ARGS__) +#define prt_string_option(...) bch2_prt_string_option(__VA_ARGS__) +#define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__) +#define prt_bitflags_vector(...) bch2_prt_bitflags_vector(__VA_ARGS__) + +void bch2_pr_time_units(struct printbuf *, u64); +void bch2_prt_datetime(struct printbuf *, time64_t); + +#ifdef __KERNEL__ +static inline void uuid_unparse_lower(u8 *uuid, char *out) +{ + sprintf(out, "%pUb", uuid); +} +#else +#include +#endif + +static inline void pr_uuid(struct printbuf *out, u8 *uuid) +{ + char uuid_str[40]; + + uuid_unparse_lower(uuid, uuid_str); + prt_printf(out, "%s", uuid_str); +} int bch2_strtoint_h(const char *, int *); int bch2_strtouint_h(const char *, unsigned int *); int bch2_strtoll_h(const char *, long long *); int bch2_strtoull_h(const char *, unsigned long long *); +int bch2_strtou64_h(const char *, u64 *); static inline int bch2_strtol_h(const char *cp, long *res) { @@ -311,8 +327,8 @@ static inline int bch2_strtoul_h(const char *cp, long *res) _r; \ }) -#define snprint(buf, size, var) \ - snprintf(buf, size, \ +#define snprint(out, var) \ + prt_printf(out, \ type_is(var, int) ? "%i\n" \ : type_is(var, unsigned) ? "%u\n" \ : type_is(var, long) ? "%li\n" \ @@ -322,96 +338,96 @@ static inline int bch2_strtoul_h(const char *cp, long *res) : type_is(var, char *) ? "%s\n" \ : "%i\n", var) -ssize_t bch2_hprint(char *buf, s64 v); - bool bch2_is_zero(const void *, size_t); -ssize_t bch2_snprint_string_list(char *buf, size_t size, const char * const list[], - size_t selected); +u64 bch2_read_flag_list(char *, const char * const[]); + +void bch2_prt_u64_binary(struct printbuf *, u64, unsigned); + +void bch2_print_string_as_lines(const char *prefix, const char *lines); + +typedef DARRAY(unsigned long) bch_stacktrace; +int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *); +void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *); +int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *); -ssize_t bch2_read_string_list(const char *buf, const char * const list[]); +#define NR_QUANTILES 15 +#define QUANTILE_IDX(i) inorder_to_eytzinger0(i, NR_QUANTILES) +#define QUANTILE_FIRST eytzinger0_first(NR_QUANTILES) +#define QUANTILE_LAST eytzinger0_last(NR_QUANTILES) + +struct bch2_quantiles { + struct bch2_quantile_entry { + u64 m; + u64 step; + } entries[NR_QUANTILES]; +}; + +struct bch2_time_stat_buffer { + unsigned nr; + struct bch2_time_stat_buffer_entry { + u64 start; + u64 end; + } entries[32]; +}; -struct time_stats { +struct bch2_time_stats { spinlock_t lock; - u64 count; - /* - * all fields are in nanoseconds, averages are ewmas stored left shifted - * by 8 - */ - u64 last_duration; + /* all fields are in nanoseconds */ + u64 min_duration; u64 max_duration; - u64 average_duration; - u64 average_frequency; - u64 last; + u64 total_duration; + u64 max_freq; + u64 min_freq; + u64 last_event; + struct bch2_quantiles quantiles; + + struct mean_and_variance duration_stats; + struct mean_and_variance_weighted duration_stats_weighted; + struct mean_and_variance freq_stats; + struct mean_and_variance_weighted freq_stats_weighted; + struct bch2_time_stat_buffer __percpu *buffer; }; -void bch2_time_stats_clear(struct time_stats *stats); -void __bch2_time_stats_update(struct time_stats *stats, u64 time); -void bch2_time_stats_update(struct time_stats *stats, u64 time); +#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT +void __bch2_time_stats_update(struct bch2_time_stats *stats, u64, u64); -static inline unsigned local_clock_us(void) +static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start) { - return local_clock() >> 10; + __bch2_time_stats_update(stats, start, local_clock()); } -#define NSEC_PER_ns 1L -#define NSEC_PER_us NSEC_PER_USEC -#define NSEC_PER_ms NSEC_PER_MSEC -#define NSEC_PER_sec NSEC_PER_SEC - -#define __print_time_stat(stats, name, stat, units) \ - sysfs_print(name ## _ ## stat ## _ ## units, \ - div_u64((stats)->stat >> 8, NSEC_PER_ ## units)) +static inline bool track_event_change(struct bch2_time_stats *stats, + u64 *start, bool v) +{ + if (v != !!*start) { + if (!v) { + bch2_time_stats_update(stats, *start); + *start = 0; + } else { + *start = local_clock() ?: 1; + return true; + } + } -#define sysfs_print_time_stats(stats, name, \ - frequency_units, \ - duration_units) \ -do { \ - __print_time_stat(stats, name, \ - average_frequency, frequency_units); \ - __print_time_stat(stats, name, \ - average_duration, duration_units); \ - sysfs_print(name ## _ ##count, (stats)->count); \ - sysfs_print(name ## _ ##last_duration ## _ ## duration_units, \ - div_u64((stats)->last_duration, \ - NSEC_PER_ ## duration_units)); \ - sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \ - div_u64((stats)->max_duration, \ - NSEC_PER_ ## duration_units)); \ - \ - sysfs_print(name ## _last_ ## frequency_units, (stats)->last \ - ? div_s64(local_clock() - (stats)->last, \ - NSEC_PER_ ## frequency_units) \ - : -1LL); \ -} while (0) + return false; +} +#else +static inline void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) {} +static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start) {} +static inline bool track_event_change(struct bch2_time_stats *stats, + u64 *start, bool v) +{ + bool ret = v && !*start; + *start = v; + return ret; +} +#endif -#define sysfs_clear_time_stats(stats, name) \ -do { \ - if (attr == &sysfs_ ## name ## _clear) \ - bch2_time_stats_clear(stats); \ -} while (0) +void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *); -#define sysfs_time_stats_attribute(name, \ - frequency_units, \ - duration_units) \ -write_attribute(name ## _clear); \ -read_attribute(name ## _count); \ -read_attribute(name ## _average_frequency_ ## frequency_units); \ -read_attribute(name ## _average_duration_ ## duration_units); \ -read_attribute(name ## _last_duration_ ## duration_units); \ -read_attribute(name ## _max_duration_ ## duration_units); \ -read_attribute(name ## _last_ ## frequency_units) - -#define sysfs_time_stats_attribute_list(name, \ - frequency_units, \ - duration_units) \ -&sysfs_ ## name ## _clear, \ -&sysfs_ ## name ## _count, \ -&sysfs_ ## name ## _average_frequency_ ## frequency_units, \ -&sysfs_ ## name ## _average_duration_ ## duration_units, \ -&sysfs_ ## name ## _last_duration_ ## duration_units, \ -&sysfs_ ## name ## _max_duration_ ## duration_units, \ -&sysfs_ ## name ## _last_ ## frequency_units, +void bch2_time_stats_exit(struct bch2_time_stats *); +void bch2_time_stats_init(struct bch2_time_stats *); #define ewma_add(ewma, val, weight) \ ({ \ @@ -440,7 +456,6 @@ static inline void bch2_ratelimit_reset(struct bch_ratelimit *d) u64 bch2_ratelimit_delay(struct bch_ratelimit *); void bch2_ratelimit_increment(struct bch_ratelimit *, u64); -int bch2_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *); struct bch_pd_controller { struct bch_ratelimit rate; @@ -459,14 +474,16 @@ struct bch_pd_controller { s64 last_change; s64 last_target; - /* If true, the rate will not increase if bch2_ratelimit_delay() - * is not being called often enough. */ + /* + * If true, the rate will not increase if bch2_ratelimit_delay() + * is not being called often enough. + */ bool backpressure; }; void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int); void bch2_pd_controller_init(struct bch_pd_controller *); -size_t bch2_pd_controller_print_debug(struct bch_pd_controller *, char *); +void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *); #define sysfs_pd_controller_attribute(name) \ rw_attribute(name##_rate); \ @@ -490,7 +507,7 @@ do { \ sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \ \ if (attr == &sysfs_##name##_rate_debug) \ - return bch2_pd_controller_print_debug(var, buf); \ + bch2_pd_controller_debug_to_text(out, var); \ } while (0) #define sysfs_pd_controller_store(name, var) \ @@ -504,96 +521,12 @@ do { \ (var)->p_term_inverse, 1, INT_MAX); \ } while (0) -#define __DIV_SAFE(n, d, zero) \ -({ \ - typeof(n) _n = (n); \ - typeof(d) _d = (d); \ - _d ? _n / _d : zero; \ -}) - -#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0) - #define container_of_or_null(ptr, type, member) \ ({ \ typeof(ptr) _ptr = ptr; \ _ptr ? container_of(_ptr, type, member) : NULL; \ }) -#define RB_INSERT(root, new, member, cmp) \ -({ \ - __label__ dup; \ - struct rb_node **n = &(root)->rb_node, *parent = NULL; \ - typeof(new) this; \ - int res, ret = -1; \ - \ - while (*n) { \ - parent = *n; \ - this = container_of(*n, typeof(*(new)), member); \ - res = cmp(new, this); \ - if (!res) \ - goto dup; \ - n = res < 0 \ - ? &(*n)->rb_left \ - : &(*n)->rb_right; \ - } \ - \ - rb_link_node(&(new)->member, parent, n); \ - rb_insert_color(&(new)->member, root); \ - ret = 0; \ -dup: \ - ret; \ -}) - -#define RB_SEARCH(root, search, member, cmp) \ -({ \ - struct rb_node *n = (root)->rb_node; \ - typeof(&(search)) this, ret = NULL; \ - int res; \ - \ - while (n) { \ - this = container_of(n, typeof(search), member); \ - res = cmp(&(search), this); \ - if (!res) { \ - ret = this; \ - break; \ - } \ - n = res < 0 \ - ? n->rb_left \ - : n->rb_right; \ - } \ - ret; \ -}) - -#define RB_GREATER(root, search, member, cmp) \ -({ \ - struct rb_node *n = (root)->rb_node; \ - typeof(&(search)) this, ret = NULL; \ - int res; \ - \ - while (n) { \ - this = container_of(n, typeof(search), member); \ - res = cmp(&(search), this); \ - if (res < 0) { \ - ret = this; \ - n = n->rb_left; \ - } else \ - n = n->rb_right; \ - } \ - ret; \ -}) - -#define RB_FIRST(root, type, member) \ - container_of_or_null(rb_first(root), type, member) - -#define RB_LAST(root, type, member) \ - container_of_or_null(rb_last(root), type, member) - -#define RB_NEXT(ptr, member) \ - container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member) - -#define RB_PREV(ptr, member) \ - container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member) - /* Does linear interpolation between powers of two */ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) { @@ -606,7 +539,8 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) return x; } -void bch2_bio_map(struct bio *bio, void *base); +void bch2_bio_map(struct bio *bio, void *base, size_t); +int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t); static inline sector_t bdev_sectors(struct block_device *bdev) { @@ -619,6 +553,26 @@ do { \ submit_bio(bio); \ } while (0) +#define kthread_wait(cond) \ +({ \ + int _ret = 0; \ + \ + while (1) { \ + set_current_state(TASK_INTERRUPTIBLE); \ + if (kthread_should_stop()) { \ + _ret = -1; \ + break; \ + } \ + \ + if (cond) \ + break; \ + \ + schedule(); \ + } \ + set_current_state(TASK_RUNNING); \ + _ret; \ +}) + #define kthread_wait_freezable(cond) \ ({ \ int _ret = 0; \ @@ -641,14 +595,25 @@ do { \ size_t bch2_rand_range(size_t); -void memcpy_to_bio(struct bio *, struct bvec_iter, void *); +void memcpy_to_bio(struct bio *, struct bvec_iter, const void *); void memcpy_from_bio(void *, struct bio *, struct bvec_iter); +static inline void memcpy_u64s_small(void *dst, const void *src, + unsigned u64s) +{ + u64 *d = dst; + const u64 *s = src; + + while (u64s--) + *d++ = *s++; +} + static inline void __memcpy_u64s(void *dst, const void *src, unsigned u64s) { #ifdef CONFIG_X86_64 long d0, d1, d2; + asm volatile("rep ; movsq" : "=&c" (d0), "=&D" (d1), "=&S" (d2) : "0" (u64s), "1" (dst), "2" (src) @@ -685,6 +650,38 @@ static inline void memmove_u64s_down(void *dst, const void *src, __memmove_u64s_down(dst, src, u64s); } +static inline void __memmove_u64s_down_small(void *dst, const void *src, + unsigned u64s) +{ + memcpy_u64s_small(dst, src, u64s); +} + +static inline void memmove_u64s_down_small(void *dst, const void *src, + unsigned u64s) +{ + EBUG_ON(dst > src); + + __memmove_u64s_down_small(dst, src, u64s); +} + +static inline void __memmove_u64s_up_small(void *_dst, const void *_src, + unsigned u64s) +{ + u64 *dst = (u64 *) _dst + u64s; + u64 *src = (u64 *) _src + u64s; + + while (u64s--) + *--dst = *--src; +} + +static inline void memmove_u64s_up_small(void *dst, const void *src, + unsigned u64s) +{ + EBUG_ON(dst < src); + + __memmove_u64s_up_small(dst, src, u64s); +} + static inline void __memmove_u64s_up(void *_dst, const void *_src, unsigned u64s) { @@ -693,6 +690,7 @@ static inline void __memmove_u64s_up(void *_dst, const void *_src, #ifdef CONFIG_X86_64 long d0, d1, d2; + asm volatile("std ;\n" "rep ; movsq\n" "cld ;\n" @@ -722,35 +720,150 @@ static inline void memmove_u64s(void *dst, const void *src, __memmove_u64s_up(dst, src, u64s); } -static inline struct bio_vec next_contig_bvec(struct bio *bio, - struct bvec_iter *iter) +/* Set the last few bytes up to a u64 boundary given an offset into a buffer. */ +static inline void memset_u64s_tail(void *s, int c, unsigned bytes) { - struct bio_vec bv = bio_iter_iovec(bio, *iter); + unsigned rem = round_up(bytes, sizeof(u64)) - bytes; + + memset(s + bytes, c, rem); +} - bio_advance_iter(bio, iter, bv.bv_len); -#ifndef CONFIG_HIGHMEM - while (iter->bi_size) { - struct bio_vec next = bio_iter_iovec(bio, *iter); +void sort_cmp_size(void *base, size_t num, size_t size, + int (*cmp_func)(const void *, const void *, size_t), + void (*swap_func)(void *, void *, size_t)); - if (page_address(bv.bv_page) + bv.bv_offset + bv.bv_len != - page_address(next.bv_page) + next.bv_offset) - break; +/* just the memmove, doesn't update @_nr */ +#define __array_insert_item(_array, _nr, _pos) \ + memmove(&(_array)[(_pos) + 1], \ + &(_array)[(_pos)], \ + sizeof((_array)[0]) * ((_nr) - (_pos))) - bv.bv_len += next.bv_len; - bio_advance_iter(bio, iter, next.bv_len); +#define array_insert_item(_array, _nr, _pos, _new_item) \ +do { \ + __array_insert_item(_array, _nr, _pos); \ + (_nr)++; \ + (_array)[(_pos)] = (_new_item); \ +} while (0) + +#define array_remove_items(_array, _nr, _pos, _nr_to_remove) \ +do { \ + (_nr) -= (_nr_to_remove); \ + memmove(&(_array)[(_pos)], \ + &(_array)[(_pos) + (_nr_to_remove)], \ + sizeof((_array)[0]) * ((_nr) - (_pos))); \ +} while (0) + +#define array_remove_item(_array, _nr, _pos) \ + array_remove_items(_array, _nr, _pos, 1) + +static inline void __move_gap(void *array, size_t element_size, + size_t nr, size_t size, + size_t old_gap, size_t new_gap) +{ + size_t gap_end = old_gap + size - nr; + + if (new_gap < old_gap) { + size_t move = old_gap - new_gap; + + memmove(array + element_size * (gap_end - move), + array + element_size * (old_gap - move), + element_size * move); + } else if (new_gap > old_gap) { + size_t move = new_gap - old_gap; + + memmove(array + element_size * old_gap, + array + element_size * gap_end, + element_size * move); } -#endif - return bv; } -#define __bio_for_each_contig_segment(bv, bio, iter, start) \ - for (iter = (start); \ - (iter).bi_size && \ - ((bv = next_contig_bvec((bio), &(iter))), 1);) +/* Move the gap in a gap buffer: */ +#define move_gap(_array, _nr, _size, _old_gap, _new_gap) \ + __move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap) + +#define bubble_sort(_base, _nr, _cmp) \ +do { \ + ssize_t _i, _last; \ + bool _swapped = true; \ + \ + for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\ + _swapped = false; \ + for (_i = 0; _i < _last; _i++) \ + if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \ + swap((_base)[_i], (_base)[_i + 1]); \ + _swapped = true; \ + } \ + } \ +} while (0) + +static inline u64 percpu_u64_get(u64 __percpu *src) +{ + u64 ret = 0; + int cpu; -#define bio_for_each_contig_segment(bv, bio, iter) \ - __bio_for_each_contig_segment(bv, bio, iter, (bio)->bi_iter) + for_each_possible_cpu(cpu) + ret += *per_cpu_ptr(src, cpu); + return ret; +} + +static inline void percpu_u64_set(u64 __percpu *dst, u64 src) +{ + int cpu; + + for_each_possible_cpu(cpu) + *per_cpu_ptr(dst, cpu) = 0; + this_cpu_write(*dst, src); +} + +static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr) +{ + unsigned i; + + for (i = 0; i < nr; i++) + acc[i] += src[i]; +} + +static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src, + unsigned nr) +{ + int cpu; + + for_each_possible_cpu(cpu) + acc_u64s(acc, per_cpu_ptr(src, cpu), nr); +} + +static inline void percpu_memset(void __percpu *p, int c, size_t bytes) +{ + int cpu; + + for_each_possible_cpu(cpu) + memset(per_cpu_ptr(p, cpu), c, bytes); +} + +u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned); + +#define cmp_int(l, r) ((l > r) - (l < r)) + +static inline int u8_cmp(u8 l, u8 r) +{ + return cmp_int(l, r); +} + +static inline int cmp_le32(__le32 l, __le32 r) +{ + return cmp_int(le32_to_cpu(l), le32_to_cpu(r)); +} + +#include + +#define QSTR(n) { { { .len = strlen(n) } }, .name = n } + +static inline bool qstr_eq(const struct qstr l, const struct qstr r) +{ + return l.len == r.len && !memcmp(l.name, r.name, l.len); +} -size_t bch_scnmemcpy(char *, size_t, const char *, size_t); +void bch2_darray_str_exit(darray_str *); +int bch2_split_devs(const char *, darray_str *); -#endif /* _BCACHE_UTIL_H */ +#endif /* _BCACHEFS_UTIL_H */