X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Futil.h;h=09e272932dff388b2962f853898f6f3786a34338;hb=284c1f5148afb088e64c8a77983a43732c4d499b;hp=184915593e866aa89f4acae896c26c0f4328de8e;hpb=ff86d4722124c300c40b85b6eb8ef2d410ab303c;p=bcachefs-tools-debian diff --git a/libbcachefs/util.h b/libbcachefs/util.h index 1849155..09e2729 100644 --- a/libbcachefs/util.h +++ b/libbcachefs/util.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_UTIL_H #define _BCACHEFS_UTIL_H @@ -10,62 +11,20 @@ #include #include #include +#include +#include #include #include #include #include - -#define PAGE_SECTOR_SHIFT (PAGE_SHIFT - 9) -#define PAGE_SECTORS (1UL << PAGE_SECTOR_SHIFT) +#include struct closure; #ifdef CONFIG_BCACHEFS_DEBUG - #define EBUG_ON(cond) BUG_ON(cond) -#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) -#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i) -#define atomic_sub_bug(i, v) BUG_ON(atomic_sub_return(i, v) < 0) -#define atomic_add_bug(i, v) BUG_ON(atomic_add_return(i, v) < 0) -#define atomic_long_dec_bug(v) BUG_ON(atomic_long_dec_return(v) < 0) -#define atomic_long_sub_bug(i, v) BUG_ON(atomic_long_sub_return(i, v) < 0) -#define atomic64_dec_bug(v) BUG_ON(atomic64_dec_return(v) < 0) -#define atomic64_inc_bug(v, i) BUG_ON(atomic64_inc_return(v) <= i) -#define atomic64_sub_bug(i, v) BUG_ON(atomic64_sub_return(i, v) < 0) -#define atomic64_add_bug(i, v) BUG_ON(atomic64_add_return(i, v) < 0) - -#define memcpy(dst, src, len) \ -({ \ - void *_dst = (dst); \ - const void *_src = (src); \ - size_t _len = (len); \ - \ - BUG_ON(!((void *) (_dst) >= (void *) (_src) + (_len) || \ - (void *) (_dst) + (_len) <= (void *) (_src))); \ - memcpy(_dst, _src, _len); \ -}) - -#else /* DEBUG */ - -#define EBUG_ON(cond) -#define atomic_dec_bug(v) atomic_dec(v) -#define atomic_inc_bug(v, i) atomic_inc(v) -#define atomic_sub_bug(i, v) atomic_sub(i, v) -#define atomic_add_bug(i, v) atomic_add(i, v) -#define atomic_long_dec_bug(v) atomic_long_dec(v) -#define atomic_long_sub_bug(i, v) atomic_long_sub(i, v) -#define atomic64_dec_bug(v) atomic64_dec(v) -#define atomic64_inc_bug(v, i) atomic64_inc(v) -#define atomic64_sub_bug(i, v) atomic64_sub(i, v) -#define atomic64_add_bug(i, v) atomic64_add(i, v) - -#endif - -#ifndef __CHECKER__ -#define __flatten __attribute__((flatten)) #else -/* sparse doesn't know about attribute((flatten)) */ -#define __flatten +#define EBUG_ON(cond) #endif #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ @@ -83,6 +42,14 @@ struct closure; (__builtin_types_compatible_p(typeof(_val), _type) || \ __builtin_types_compatible_p(typeof(_val), const _type)) +/* Userspace doesn't align allocations as nicely as the kernel allocators: */ +static inline size_t buf_pages(void *p, size_t len) +{ + return DIV_ROUND_UP(len + + ((unsigned long) p & (PAGE_SIZE - 1)), + PAGE_SIZE); +} + static inline void vpfree(void *p, size_t size) { if (is_vmalloc_addr(p)) @@ -95,7 +62,7 @@ static inline void *vpmalloc(size_t size, gfp_t gfp_mask) { return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN, get_order(size)) ?: - __vmalloc(size, gfp_mask, PAGE_KERNEL); + __vmalloc(size, gfp_mask); } static inline void kvpfree(void *p, size_t size) @@ -137,7 +104,19 @@ do { \ (heap)->data = NULL; \ } while (0) -#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) +#define heap_set_backpointer(h, i, _fn) \ +do { \ + void (*fn)(typeof(h), size_t) = _fn; \ + if (fn) \ + fn(h, i); \ +} while (0) + +#define heap_swap(h, i, j, set_backpointer) \ +do { \ + swap((h)->data[i], (h)->data[j]); \ + heap_set_backpointer(h, i, set_backpointer); \ + heap_set_backpointer(h, j, set_backpointer); \ +} while (0) #define heap_peek(h) \ ({ \ @@ -147,7 +126,7 @@ do { \ #define heap_full(h) ((h)->used == (h)->size) -#define heap_sift_down(h, i, cmp) \ +#define heap_sift_down(h, i, cmp, set_backpointer) \ do { \ size_t _c, _j = i; \ \ @@ -159,132 +138,153 @@ do { \ \ if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \ break; \ - heap_swap(h, _c, _j); \ + heap_swap(h, _c, _j, set_backpointer); \ } \ } while (0) -#define heap_sift_up(h, i, cmp) \ +#define heap_sift_up(h, i, cmp, set_backpointer) \ do { \ while (i) { \ size_t p = (i - 1) / 2; \ if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \ break; \ - heap_swap(h, i, p); \ + heap_swap(h, i, p, set_backpointer); \ i = p; \ } \ } while (0) -#define __heap_add(h, d, cmp) \ -do { \ +#define __heap_add(h, d, cmp, set_backpointer) \ +({ \ size_t _i = (h)->used++; \ (h)->data[_i] = d; \ + heap_set_backpointer(h, _i, set_backpointer); \ \ - heap_sift_up(h, _i, cmp); \ -} while (0) + heap_sift_up(h, _i, cmp, set_backpointer); \ + _i; \ +}) -#define heap_add(h, d, cmp) \ +#define heap_add(h, d, cmp, set_backpointer) \ ({ \ bool _r = !heap_full(h); \ if (_r) \ - __heap_add(h, d, cmp); \ + __heap_add(h, d, cmp, set_backpointer); \ _r; \ }) -#define heap_add_or_replace(h, new, cmp) \ +#define heap_add_or_replace(h, new, cmp, set_backpointer) \ do { \ - if (!heap_add(h, new, cmp) && \ + if (!heap_add(h, new, cmp, set_backpointer) && \ cmp(h, new, heap_peek(h)) >= 0) { \ (h)->data[0] = new; \ - heap_sift_down(h, 0, cmp); \ + heap_set_backpointer(h, 0, set_backpointer); \ + heap_sift_down(h, 0, cmp, set_backpointer); \ } \ } while (0) -#define heap_del(h, i, cmp) \ +#define heap_del(h, i, cmp, set_backpointer) \ do { \ size_t _i = (i); \ \ BUG_ON(_i >= (h)->used); \ (h)->used--; \ - heap_swap(h, _i, (h)->used); \ - heap_sift_up(h, _i, cmp); \ - heap_sift_down(h, _i, cmp); \ + if ((_i) < (h)->used) { \ + heap_swap(h, _i, (h)->used, set_backpointer); \ + heap_sift_up(h, _i, cmp, set_backpointer); \ + heap_sift_down(h, _i, cmp, set_backpointer); \ + } \ } while (0) -#define heap_pop(h, d, cmp) \ +#define heap_pop(h, d, cmp, set_backpointer) \ ({ \ bool _r = (h)->used; \ if (_r) { \ (d) = (h)->data[0]; \ - heap_del(h, 0, cmp); \ + heap_del(h, 0, cmp, set_backpointer); \ } \ _r; \ }) -#define heap_resort(heap, cmp) \ +#define heap_resort(heap, cmp, set_backpointer) \ do { \ ssize_t _i; \ for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \ - heap_sift_down(heap, _i, cmp); \ + heap_sift_down(heap, _i, cmp, set_backpointer); \ } while (0) -/* - * Simple array based allocator - preallocates a number of elements and you can - * never allocate more than that, also has no locking. - * - * Handy because if you know you only need a fixed number of elements you don't - * have to worry about memory allocation failure, and sometimes a mempool isn't - * what you want. - * - * We treat the free elements as entries in a singly linked list, and the - * freelist as a stack - allocating and freeing push and pop off the freelist. - */ - -#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \ - struct { \ - type *freelist; \ - type data[size]; \ - } name - -#define array_alloc(array) \ -({ \ - typeof((array)->freelist) _ret = (array)->freelist; \ - \ - if (_ret) \ - (array)->freelist = *((typeof((array)->freelist) *) _ret);\ - \ - _ret; \ -}) +#define ANYSINT_MAX(t) \ + ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) -#define array_free(array, ptr) \ -do { \ - typeof((array)->freelist) _ptr = ptr; \ - \ - *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \ - (array)->freelist = _ptr; \ -} while (0) +#include "printbuf.h" -#define array_allocator_init(array) \ -do { \ - typeof((array)->freelist) _i; \ - \ - BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \ - (array)->freelist = NULL; \ - \ - for (_i = (array)->data; \ - _i < (array)->data + ARRAY_SIZE((array)->data); \ - _i++) \ - array_free(array, _i); \ -} while (0) +#define prt_vprintf(_out, ...) bch2_prt_vprintf(_out, __VA_ARGS__) +#define prt_printf(_out, ...) bch2_prt_printf(_out, __VA_ARGS__) +#define printbuf_str(_buf) bch2_printbuf_str(_buf) +#define printbuf_exit(_buf) bch2_printbuf_exit(_buf) -#define array_freelist_empty(array) ((array)->freelist == NULL) +#define printbuf_tabstops_reset(_buf) bch2_printbuf_tabstops_reset(_buf) +#define printbuf_tabstop_pop(_buf) bch2_printbuf_tabstop_pop(_buf) +#define printbuf_tabstop_push(_buf, _n) bch2_printbuf_tabstop_push(_buf, _n) -#define ANYSINT_MAX(t) \ - ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) +#define printbuf_indent_add(_out, _n) bch2_printbuf_indent_add(_out, _n) +#define printbuf_indent_sub(_out, _n) bch2_printbuf_indent_sub(_out, _n) + +#define prt_newline(_out) bch2_prt_newline(_out) +#define prt_tab(_out) bch2_prt_tab(_out) +#define prt_tab_rjust(_out) bch2_prt_tab_rjust(_out) + +#define prt_bytes_indented(...) bch2_prt_bytes_indented(__VA_ARGS__) +#define prt_u64(_out, _v) prt_printf(_out, "%llu", _v) +#define prt_human_readable_u64(...) bch2_prt_human_readable_u64(__VA_ARGS__) +#define prt_human_readable_s64(...) bch2_prt_human_readable_s64(__VA_ARGS__) +#define prt_units_u64(...) bch2_prt_units_u64(__VA_ARGS__) +#define prt_units_s64(...) bch2_prt_units_s64(__VA_ARGS__) +#define prt_string_option(...) bch2_prt_string_option(__VA_ARGS__) +#define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__) + +void bch2_pr_time_units(struct printbuf *, u64); + +#ifdef __KERNEL__ +static inline void pr_time(struct printbuf *out, u64 time) +{ + prt_printf(out, "%llu", time); +} +#else +#include +static inline void pr_time(struct printbuf *out, u64 _time) +{ + char time_str[64]; + time_t time = _time; + struct tm *tm = localtime(&time); + size_t err = strftime(time_str, sizeof(time_str), "%c", tm); + if (!err) + prt_printf(out, "(formatting error)"); + else + prt_printf(out, "%s", time_str); +} +#endif + +#ifdef __KERNEL__ +static inline void uuid_unparse_lower(u8 *uuid, char *out) +{ + sprintf(out, "%pUb", uuid); +} +#else +#include +#endif + +static inline void pr_uuid(struct printbuf *out, u8 *uuid) +{ + char uuid_str[40]; + + uuid_unparse_lower(uuid, uuid_str); + prt_printf(out, "%s", uuid_str); +} int bch2_strtoint_h(const char *, int *); int bch2_strtouint_h(const char *, unsigned int *); int bch2_strtoll_h(const char *, long long *); int bch2_strtoull_h(const char *, unsigned long long *); +int bch2_strtou64_h(const char *, u64 *); static inline int bch2_strtol_h(const char *cp, long *res) { @@ -342,8 +342,8 @@ static inline int bch2_strtoul_h(const char *cp, long *res) _r; \ }) -#define snprint(buf, size, var) \ - snprintf(buf, size, \ +#define snprint(out, var) \ + prt_printf(out, \ type_is(var, int) ? "%i\n" \ : type_is(var, unsigned) ? "%u\n" \ : type_is(var, long) ? "%li\n" \ @@ -353,61 +353,67 @@ static inline int bch2_strtoul_h(const char *cp, long *res) : type_is(var, char *) ? "%s\n" \ : "%i\n", var) -ssize_t bch2_hprint(char *buf, s64 v); - bool bch2_is_zero(const void *, size_t); -ssize_t bch2_scnprint_string_list(char *, size_t, const char * const[], size_t); +u64 bch2_read_flag_list(char *, const char * const[]); -ssize_t bch2_read_string_list(const char *, const char * const[]); +void bch2_prt_u64_binary(struct printbuf *, u64, unsigned); -ssize_t bch2_scnprint_flag_list(char *, size_t, const char * const[], u64); -u64 bch2_read_flag_list(char *, const char * const[]); +void bch2_print_string_as_lines(const char *prefix, const char *lines); +int bch2_prt_backtrace(struct printbuf *, struct task_struct *); #define NR_QUANTILES 15 #define QUANTILE_IDX(i) inorder_to_eytzinger0(i, NR_QUANTILES) #define QUANTILE_FIRST eytzinger0_first(NR_QUANTILES) #define QUANTILE_LAST eytzinger0_last(NR_QUANTILES) -struct quantiles { - struct quantile_entry { +struct bch2_quantiles { + struct bch2_quantile_entry { u64 m; u64 step; } entries[NR_QUANTILES]; }; -struct time_stat_buffer { +struct bch2_time_stat_buffer { unsigned nr; - struct time_stat_buffer_entry { + struct bch2_time_stat_buffer_entry { u64 start; u64 end; } entries[32]; }; -struct time_stats { +struct bch2_time_stats { spinlock_t lock; - u64 count; /* all fields are in nanoseconds */ - u64 average_duration; - u64 average_frequency; u64 max_duration; + u64 min_duration; + u64 max_freq; + u64 min_freq; u64 last_event; - struct quantiles quantiles; + struct bch2_quantiles quantiles; - struct time_stat_buffer __percpu *buffer; + struct mean_and_variance duration_stats; + struct mean_and_variance_weighted duration_stats_weighted; + struct mean_and_variance freq_stats; + struct mean_and_variance_weighted freq_stats_weighted; + struct bch2_time_stat_buffer __percpu *buffer; }; -void __bch2_time_stats_update(struct time_stats *stats, u64, u64); +#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT +void __bch2_time_stats_update(struct bch2_time_stats *stats, u64, u64); +#else +static inline void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) {} +#endif -static inline void bch2_time_stats_update(struct time_stats *stats, u64 start) +static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start) { __bch2_time_stats_update(stats, start, local_clock()); } -size_t bch2_time_stats_print(struct time_stats *, char *, size_t); +void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *); -void bch2_time_stats_exit(struct time_stats *); -void bch2_time_stats_init(struct time_stats *); +void bch2_time_stats_exit(struct bch2_time_stats *); +void bch2_time_stats_init(struct bch2_time_stats *); #define ewma_add(ewma, val, weight) \ ({ \ @@ -436,7 +442,6 @@ static inline void bch2_ratelimit_reset(struct bch_ratelimit *d) u64 bch2_ratelimit_delay(struct bch_ratelimit *); void bch2_ratelimit_increment(struct bch_ratelimit *, u64); -int bch2_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *); struct bch_pd_controller { struct bch_ratelimit rate; @@ -462,7 +467,7 @@ struct bch_pd_controller { void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int); void bch2_pd_controller_init(struct bch_pd_controller *); -size_t bch2_pd_controller_print_debug(struct bch_pd_controller *, char *); +void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *); #define sysfs_pd_controller_attribute(name) \ rw_attribute(name##_rate); \ @@ -486,7 +491,7 @@ do { \ sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \ \ if (attr == &sysfs_##name##_rate_debug) \ - return bch2_pd_controller_print_debug(var, buf); \ + bch2_pd_controller_debug_to_text(out, var); \ } while (0) #define sysfs_pd_controller_store(name, var) \ @@ -500,96 +505,12 @@ do { \ (var)->p_term_inverse, 1, INT_MAX); \ } while (0) -#define __DIV_SAFE(n, d, zero) \ -({ \ - typeof(n) _n = (n); \ - typeof(d) _d = (d); \ - _d ? _n / _d : zero; \ -}) - -#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0) - #define container_of_or_null(ptr, type, member) \ ({ \ typeof(ptr) _ptr = ptr; \ _ptr ? container_of(_ptr, type, member) : NULL; \ }) -#define RB_INSERT(root, new, member, cmp) \ -({ \ - __label__ dup; \ - struct rb_node **n = &(root)->rb_node, *parent = NULL; \ - typeof(new) this; \ - int res, ret = -1; \ - \ - while (*n) { \ - parent = *n; \ - this = container_of(*n, typeof(*(new)), member); \ - res = cmp(new, this); \ - if (!res) \ - goto dup; \ - n = res < 0 \ - ? &(*n)->rb_left \ - : &(*n)->rb_right; \ - } \ - \ - rb_link_node(&(new)->member, parent, n); \ - rb_insert_color(&(new)->member, root); \ - ret = 0; \ -dup: \ - ret; \ -}) - -#define RB_SEARCH(root, search, member, cmp) \ -({ \ - struct rb_node *n = (root)->rb_node; \ - typeof(&(search)) this, ret = NULL; \ - int res; \ - \ - while (n) { \ - this = container_of(n, typeof(search), member); \ - res = cmp(&(search), this); \ - if (!res) { \ - ret = this; \ - break; \ - } \ - n = res < 0 \ - ? n->rb_left \ - : n->rb_right; \ - } \ - ret; \ -}) - -#define RB_GREATER(root, search, member, cmp) \ -({ \ - struct rb_node *n = (root)->rb_node; \ - typeof(&(search)) this, ret = NULL; \ - int res; \ - \ - while (n) { \ - this = container_of(n, typeof(search), member); \ - res = cmp(&(search), this); \ - if (res < 0) { \ - ret = this; \ - n = n->rb_left; \ - } else \ - n = n->rb_right; \ - } \ - ret; \ -}) - -#define RB_FIRST(root, type, member) \ - container_of_or_null(rb_first(root), type, member) - -#define RB_LAST(root, type, member) \ - container_of_or_null(rb_last(root), type, member) - -#define RB_NEXT(ptr, member) \ - container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member) - -#define RB_PREV(ptr, member) \ - container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member) - /* Does linear interpolation between powers of two */ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) { @@ -602,8 +523,8 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) return x; } -void bch2_bio_map(struct bio *bio, void *base); -int bch2_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask); +void bch2_bio_map(struct bio *bio, void *base, size_t); +int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t); static inline sector_t bdev_sectors(struct block_device *bdev) { @@ -638,9 +559,19 @@ do { \ size_t bch2_rand_range(size_t); -void memcpy_to_bio(struct bio *, struct bvec_iter, void *); +void memcpy_to_bio(struct bio *, struct bvec_iter, const void *); void memcpy_from_bio(void *, struct bio *, struct bvec_iter); +static inline void memcpy_u64s_small(void *dst, const void *src, + unsigned u64s) +{ + u64 *d = dst; + const u64 *s = src; + + while (u64s--) + *d++ = *s++; +} + static inline void __memcpy_u64s(void *dst, const void *src, unsigned u64s) { @@ -682,6 +613,38 @@ static inline void memmove_u64s_down(void *dst, const void *src, __memmove_u64s_down(dst, src, u64s); } +static inline void __memmove_u64s_down_small(void *dst, const void *src, + unsigned u64s) +{ + memcpy_u64s_small(dst, src, u64s); +} + +static inline void memmove_u64s_down_small(void *dst, const void *src, + unsigned u64s) +{ + EBUG_ON(dst > src); + + __memmove_u64s_down_small(dst, src, u64s); +} + +static inline void __memmove_u64s_up_small(void *_dst, const void *_src, + unsigned u64s) +{ + u64 *dst = (u64 *) _dst + u64s; + u64 *src = (u64 *) _src + u64s; + + while (u64s--) + *--dst = *--src; +} + +static inline void memmove_u64s_up_small(void *dst, const void *src, + unsigned u64s) +{ + EBUG_ON(dst < src); + + __memmove_u64s_up_small(dst, src, u64s); +} + static inline void __memmove_u64s_up(void *_dst, const void *_src, unsigned u64s) { @@ -719,37 +682,14 @@ static inline void memmove_u64s(void *dst, const void *src, __memmove_u64s_up(dst, src, u64s); } -static inline struct bio_vec next_contig_bvec(struct bio *bio, - struct bvec_iter *iter) +/* Set the last few bytes up to a u64 boundary given an offset into a buffer. */ +static inline void memset_u64s_tail(void *s, int c, unsigned bytes) { - struct bio_vec bv = bio_iter_iovec(bio, *iter); + unsigned rem = round_up(bytes, sizeof(u64)) - bytes; - bio_advance_iter(bio, iter, bv.bv_len); -#ifndef CONFIG_HIGHMEM - while (iter->bi_size) { - struct bio_vec next = bio_iter_iovec(bio, *iter); - - if (page_address(bv.bv_page) + bv.bv_offset + bv.bv_len != - page_address(next.bv_page) + next.bv_offset) - break; - - bv.bv_len += next.bv_len; - bio_advance_iter(bio, iter, next.bv_len); - } -#endif - return bv; + memset(s + bytes, c, rem); } -#define __bio_for_each_contig_segment(bv, bio, iter, start) \ - for (iter = (start); \ - (iter).bi_size && \ - ((bv = next_contig_bvec((bio), &(iter))), 1);) - -#define bio_for_each_contig_segment(bv, bio, iter) \ - __bio_for_each_contig_segment(bv, bio, iter, (bio)->bi_iter) - -size_t bch_scnmemcpy(char *, size_t, const char *, size_t); - void sort_cmp_size(void *base, size_t num, size_t size, int (*cmp_func)(const void *, const void *, size_t), void (*swap_func)(void *, void *, size_t)); @@ -778,6 +718,31 @@ do { \ #define array_remove_item(_array, _nr, _pos) \ array_remove_items(_array, _nr, _pos, 1) +static inline void __move_gap(void *array, size_t element_size, + size_t nr, size_t size, + size_t old_gap, size_t new_gap) +{ + size_t gap_end = old_gap + size - nr; + + if (new_gap < old_gap) { + size_t move = old_gap - new_gap; + + memmove(array + element_size * (gap_end - move), + array + element_size * (old_gap - move), + element_size * move); + } else if (new_gap > old_gap) { + size_t move = new_gap - old_gap; + + memmove(array + element_size * old_gap, + array + element_size * gap_end, + element_size * move); + } +} + +/* Move the gap in a gap buffer: */ +#define move_gap(_array, _nr, _size, _old_gap, _new_gap) \ + __move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap) + #define bubble_sort(_base, _nr, _cmp) \ do { \ ssize_t _i, _end; \ @@ -793,4 +758,57 @@ do { \ } \ } while (0) +static inline u64 percpu_u64_get(u64 __percpu *src) +{ + u64 ret = 0; + int cpu; + + for_each_possible_cpu(cpu) + ret += *per_cpu_ptr(src, cpu); + return ret; +} + +static inline void percpu_u64_set(u64 __percpu *dst, u64 src) +{ + int cpu; + + for_each_possible_cpu(cpu) + *per_cpu_ptr(dst, cpu) = 0; + this_cpu_write(*dst, src); +} + +static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr) +{ + unsigned i; + + for (i = 0; i < nr; i++) + acc[i] += src[i]; +} + +static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src, + unsigned nr) +{ + int cpu; + + for_each_possible_cpu(cpu) + acc_u64s(acc, per_cpu_ptr(src, cpu), nr); +} + +static inline void percpu_memset(void __percpu *p, int c, size_t bytes) +{ + int cpu; + + for_each_possible_cpu(cpu) + memset(per_cpu_ptr(p, cpu), c, bytes); +} + +u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned); + +#define cmp_int(l, r) ((l > r) - (l < r)) + +static inline int u8_cmp(u8 l, u8 r) +{ + return cmp_int(l, r); +} + #endif /* _BCACHEFS_UTIL_H */