1 // SPDX-License-Identifier: GPL-2.0
3 * random utiility code, for bcache but in theory not specific to bcache
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
10 #include <linux/blkdev.h>
11 #include <linux/console.h>
12 #include <linux/ctype.h>
13 #include <linux/debugfs.h>
14 #include <linux/freezer.h>
15 #include <linux/kthread.h>
16 #include <linux/log2.h>
17 #include <linux/math64.h>
18 #include <linux/percpu.h>
19 #include <linux/preempt.h>
20 #include <linux/random.h>
21 #include <linux/seq_file.h>
22 #include <linux/string.h>
23 #include <linux/types.h>
24 #include <linux/sched/clock.h>
25 #include <linux/mean_and_variance.h>
27 #include "eytzinger.h"
30 static const char si_units[] = "?kMGTPEZY";
32 /* string_get_size units: */
33 static const char *const units_2[] = {
34 "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
36 static const char *const units_10[] = {
37 "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
40 static int parse_u64(const char *cp, u64 *res)
42 const char *start = cp;
52 if (v > U64_MAX - (*cp - '0'))
56 } while (isdigit(*cp));
62 static int bch2_pow(u64 n, u64 p, u64 *res)
67 if (*res > div_u64(U64_MAX, n))
74 static int parse_unit_suffix(const char *cp, u64 *res)
76 const char *start = cp;
84 for (u = 1; u < strlen(si_units); u++)
85 if (*cp == si_units[u]) {
90 for (u = 0; u < ARRAY_SIZE(units_2); u++)
91 if (!strncmp(cp, units_2[u], strlen(units_2[u]))) {
92 cp += strlen(units_2[u]);
96 for (u = 0; u < ARRAY_SIZE(units_10); u++)
97 if (!strncmp(cp, units_10[u], strlen(units_10[u]))) {
98 cp += strlen(units_10[u]);
106 ret = bch2_pow(base, u, res);
113 #define parse_or_ret(cp, _f) \
121 static int __bch2_strtou64_h(const char *cp, u64 *res)
123 const char *start = cp;
124 u64 v = 0, b, f_n = 0, f_d = 1;
127 parse_or_ret(cp, parse_u64(cp, &v));
131 ret = parse_u64(cp, &f_n);
136 ret = bch2_pow(10, ret, &f_d);
141 parse_or_ret(cp, parse_unit_suffix(cp, &b));
143 if (v > div_u64(U64_MAX, b))
147 if (f_n > div_u64(U64_MAX, b))
150 f_n = div_u64(f_n * b, f_d);
159 static int __bch2_strtoh(const char *cp, u64 *res,
160 u64 t_max, bool t_signed)
162 bool positive = *cp != '-';
165 if (*cp == '+' || *cp == '-')
168 parse_or_ret(cp, __bch2_strtou64_h(cp, &v));
191 #define STRTO_H(name, type) \
192 int bch2_ ## name ## _h(const char *cp, type *res) \
195 int ret = __bch2_strtoh(cp, &v, ANYSINT_MAX(type), \
196 ANYSINT_MAX(type) != ((type) ~0ULL)); \
201 STRTO_H(strtoint, int)
202 STRTO_H(strtouint, unsigned int)
203 STRTO_H(strtoll, long long)
204 STRTO_H(strtoull, unsigned long long)
205 STRTO_H(strtou64, u64)
207 u64 bch2_read_flag_list(char *opt, const char * const list[])
210 char *p, *s, *d = kstrdup(opt, GFP_KERNEL);
217 while ((p = strsep(&s, ","))) {
218 int flag = match_string(list, -1, p);
232 bool bch2_is_zero(const void *_p, size_t n)
237 for (i = 0; i < n; i++)
243 static void bch2_quantiles_update(struct quantiles *q, u64 v)
247 while (i < ARRAY_SIZE(q->entries)) {
248 struct quantile_entry *e = q->entries + i;
250 if (unlikely(!e->step)) {
252 e->step = max_t(unsigned, v / 2, 1024);
253 } else if (e->m > v) {
254 e->m = e->m >= e->step
257 } else if (e->m < v) {
258 e->m = e->m + e->step > e->m
263 if ((e->m > v ? e->m - v : v - e->m) < e->step)
264 e->step = max_t(unsigned, e->step / 2, 1);
269 i = eytzinger0_child(i, v > e->m);
273 void bch2_prt_u64_binary(struct printbuf *out, u64 v, unsigned nr_bits)
276 prt_char(out, '0' + ((v >> --nr_bits) & 1));
279 void bch2_print_string_as_lines(const char *prefix, const char *lines)
284 printk("%s (null)\n", prefix);
290 p = strchrnul(lines, '\n');
291 printk("%s%.*s\n", prefix, (int) (p - lines), lines);
300 int bch2_prt_backtrace(struct printbuf *out, struct task_struct *task)
302 unsigned long entries[32];
303 unsigned i, nr_entries;
306 ret = down_read_killable(&task->signal->exec_update_lock);
310 nr_entries = stack_trace_save_tsk(task, entries, ARRAY_SIZE(entries), 0);
311 for (i = 0; i < nr_entries; i++) {
312 prt_printf(out, "[<0>] %pB", (void *)entries[i]);
316 up_read(&task->signal->exec_update_lock);
322 static inline void bch2_time_stats_update_one(struct time_stats *stats,
327 if (time_after64(end, start)) {
328 duration = end - start;
329 stats->duration_stats = mean_and_variance_update_inlined(stats->duration_stats,
331 stats->duration_stats_weighted = mean_and_variance_weighted_update(
332 stats->duration_stats_weighted,
334 stats->max_duration = max(stats->max_duration, duration);
335 stats->min_duration = min(stats->min_duration, duration);
336 bch2_quantiles_update(&stats->quantiles, duration);
339 if (time_after64(end, stats->last_event)) {
340 freq = end - stats->last_event;
341 stats->freq_stats = mean_and_variance_update_inlined(stats->freq_stats, freq);
342 stats->freq_stats_weighted = mean_and_variance_weighted_update(
343 stats->freq_stats_weighted,
345 stats->max_freq = max(stats->max_freq, freq);
346 stats->min_freq = min(stats->min_freq, freq);
347 stats->last_event = end;
351 static noinline void bch2_time_stats_clear_buffer(struct time_stats *stats,
352 struct time_stat_buffer *b)
354 struct time_stat_buffer_entry *i;
357 spin_lock_irqsave(&stats->lock, flags);
359 i < b->entries + ARRAY_SIZE(b->entries);
361 bch2_time_stats_update_one(stats, i->start, i->end);
362 spin_unlock_irqrestore(&stats->lock, flags);
367 void __bch2_time_stats_update(struct time_stats *stats, u64 start, u64 end)
371 WARN_RATELIMIT(!stats->min_duration || !stats->min_freq,
372 "time_stats: min_duration = %llu, min_freq = %llu",
373 stats->min_duration, stats->min_freq);
375 if (!stats->buffer) {
376 spin_lock_irqsave(&stats->lock, flags);
377 bch2_time_stats_update_one(stats, start, end);
379 if (mean_and_variance_weighted_get_mean(stats->freq_stats_weighted) < 32 &&
380 stats->duration_stats.n > 1024)
382 alloc_percpu_gfp(struct time_stat_buffer,
384 spin_unlock_irqrestore(&stats->lock, flags);
386 struct time_stat_buffer *b;
389 b = this_cpu_ptr(stats->buffer);
391 BUG_ON(b->nr >= ARRAY_SIZE(b->entries));
392 b->entries[b->nr++] = (struct time_stat_buffer_entry) {
397 if (unlikely(b->nr == ARRAY_SIZE(b->entries)))
398 bch2_time_stats_clear_buffer(stats, b);
403 static const struct time_unit {
408 { "us", NSEC_PER_USEC },
409 { "ms", NSEC_PER_MSEC },
410 { "s", NSEC_PER_SEC },
411 { "m", NSEC_PER_SEC * 60},
412 { "h", NSEC_PER_SEC * 3600},
416 static const struct time_unit *pick_time_units(u64 ns)
418 const struct time_unit *u;
421 u + 1 < time_units + ARRAY_SIZE(time_units) &&
422 ns >= u[1].nsecs << 1;
429 static void pr_time_units(struct printbuf *out, u64 ns)
431 const struct time_unit *u = pick_time_units(ns);
433 prt_printf(out, "%llu ", div64_u64(ns, u->nsecs));
435 prt_printf(out, "%s", u->name);
438 #define TABSTOP_SIZE 12
440 static inline void pr_name_and_units(struct printbuf *out, const char *name, u64 ns)
444 pr_time_units(out, ns);
448 void bch2_time_stats_to_text(struct printbuf *out, struct time_stats *stats)
450 const struct time_unit *u;
451 s64 f_mean = 0, d_mean = 0;
452 u64 q, last_q = 0, f_stddev = 0, d_stddev = 0;
455 * avoid divide by zero
457 if (stats->freq_stats.n) {
458 f_mean = mean_and_variance_get_mean(stats->freq_stats);
459 f_stddev = mean_and_variance_get_stddev(stats->freq_stats);
460 d_mean = mean_and_variance_get_mean(stats->duration_stats);
461 d_stddev = mean_and_variance_get_stddev(stats->duration_stats);
464 printbuf_tabstop_push(out, out->indent + TABSTOP_SIZE);
465 prt_printf(out, "count:");
467 prt_printf(out, "%llu ",
468 stats->duration_stats.n);
469 printbuf_tabstop_pop(out);
472 printbuf_tabstops_reset(out);
474 printbuf_tabstop_push(out, out->indent + 20);
475 printbuf_tabstop_push(out, TABSTOP_SIZE + 2);
476 printbuf_tabstop_push(out, 0);
477 printbuf_tabstop_push(out, TABSTOP_SIZE + 2);
480 prt_printf(out, "since mount");
483 prt_printf(out, "recent");
487 printbuf_tabstops_reset(out);
488 printbuf_tabstop_push(out, out->indent + 20);
489 printbuf_tabstop_push(out, TABSTOP_SIZE);
490 printbuf_tabstop_push(out, 2);
491 printbuf_tabstop_push(out, TABSTOP_SIZE);
493 prt_printf(out, "duration of events");
495 printbuf_indent_add(out, 2);
497 pr_name_and_units(out, "min:", stats->min_duration);
498 pr_name_and_units(out, "max:", stats->max_duration);
500 prt_printf(out, "mean:");
502 pr_time_units(out, d_mean);
504 pr_time_units(out, mean_and_variance_weighted_get_mean(stats->duration_stats_weighted));
507 prt_printf(out, "stddev:");
509 pr_time_units(out, d_stddev);
511 pr_time_units(out, mean_and_variance_weighted_get_stddev(stats->duration_stats_weighted));
513 printbuf_indent_sub(out, 2);
516 prt_printf(out, "time between events");
518 printbuf_indent_add(out, 2);
520 pr_name_and_units(out, "min:", stats->min_freq);
521 pr_name_and_units(out, "max:", stats->max_freq);
523 prt_printf(out, "mean:");
525 pr_time_units(out, f_mean);
527 pr_time_units(out, mean_and_variance_weighted_get_mean(stats->freq_stats_weighted));
530 prt_printf(out, "stddev:");
532 pr_time_units(out, f_stddev);
534 pr_time_units(out, mean_and_variance_weighted_get_stddev(stats->freq_stats_weighted));
536 printbuf_indent_sub(out, 2);
539 printbuf_tabstops_reset(out);
541 i = eytzinger0_first(NR_QUANTILES);
542 u = pick_time_units(stats->quantiles.entries[i].m);
544 prt_printf(out, "quantiles (%s):\t", u->name);
545 eytzinger0_for_each(i, NR_QUANTILES) {
546 bool is_last = eytzinger0_next(i, NR_QUANTILES) == -1;
548 q = max(stats->quantiles.entries[i].m, last_q);
549 prt_printf(out, "%llu ",
550 div_u64(q, u->nsecs));
557 void bch2_time_stats_exit(struct time_stats *stats)
559 free_percpu(stats->buffer);
562 void bch2_time_stats_init(struct time_stats *stats)
564 memset(stats, 0, sizeof(*stats));
565 stats->duration_stats_weighted.w = 8;
566 stats->freq_stats_weighted.w = 8;
567 stats->min_duration = U64_MAX;
568 stats->min_freq = U64_MAX;
569 spin_lock_init(&stats->lock);
575 * bch2_ratelimit_delay() - return how long to delay until the next time to do
578 * @d - the struct bch_ratelimit to update
580 * Returns the amount of time to delay by, in jiffies
582 u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
584 u64 now = local_clock();
586 return time_after64(d->next, now)
587 ? nsecs_to_jiffies(d->next - now)
592 * bch2_ratelimit_increment() - increment @d by the amount of work done
594 * @d - the struct bch_ratelimit to update
595 * @done - the amount of work done, in arbitrary units
597 void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done)
599 u64 now = local_clock();
601 d->next += div_u64(done * NSEC_PER_SEC, d->rate);
603 if (time_before64(now + NSEC_PER_SEC, d->next))
604 d->next = now + NSEC_PER_SEC;
606 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
607 d->next = now - NSEC_PER_SEC * 2;
613 * Updates pd_controller. Attempts to scale inputed values to units per second.
614 * @target: desired value
615 * @actual: current value
617 * @sign: 1 or -1; 1 if increasing the rate makes actual go up, -1 if increasing
618 * it makes actual go down.
620 void bch2_pd_controller_update(struct bch_pd_controller *pd,
621 s64 target, s64 actual, int sign)
623 s64 proportional, derivative, change;
625 unsigned long seconds_since_update = (jiffies - pd->last_update) / HZ;
627 if (seconds_since_update == 0)
630 pd->last_update = jiffies;
632 proportional = actual - target;
633 proportional *= seconds_since_update;
634 proportional = div_s64(proportional, pd->p_term_inverse);
636 derivative = actual - pd->last_actual;
637 derivative = div_s64(derivative, seconds_since_update);
638 derivative = ewma_add(pd->smoothed_derivative, derivative,
639 (pd->d_term / seconds_since_update) ?: 1);
640 derivative = derivative * pd->d_term;
641 derivative = div_s64(derivative, pd->p_term_inverse);
643 change = proportional + derivative;
645 /* Don't increase rate if not keeping up */
648 time_after64(local_clock(),
649 pd->rate.next + NSEC_PER_MSEC))
652 change *= (sign * -1);
654 pd->rate.rate = clamp_t(s64, (s64) pd->rate.rate + change,
657 pd->last_actual = actual;
658 pd->last_derivative = derivative;
659 pd->last_proportional = proportional;
660 pd->last_change = change;
661 pd->last_target = target;
664 void bch2_pd_controller_init(struct bch_pd_controller *pd)
666 pd->rate.rate = 1024;
667 pd->last_update = jiffies;
668 pd->p_term_inverse = 6000;
670 pd->d_smooth = pd->d_term;
671 pd->backpressure = 1;
674 void bch2_pd_controller_debug_to_text(struct printbuf *out, struct bch_pd_controller *pd)
676 if (!out->nr_tabstops)
677 printbuf_tabstop_push(out, 20);
679 prt_printf(out, "rate:");
681 prt_human_readable_s64(out, pd->rate.rate);
684 prt_printf(out, "target:");
686 prt_human_readable_u64(out, pd->last_target);
689 prt_printf(out, "actual:");
691 prt_human_readable_u64(out, pd->last_actual);
694 prt_printf(out, "proportional:");
696 prt_human_readable_s64(out, pd->last_proportional);
699 prt_printf(out, "derivative:");
701 prt_human_readable_s64(out, pd->last_derivative);
704 prt_printf(out, "change:");
706 prt_human_readable_s64(out, pd->last_change);
709 prt_printf(out, "next io:");
711 prt_printf(out, "%llims", div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC));
717 void bch2_bio_map(struct bio *bio, void *base, size_t size)
720 struct page *page = is_vmalloc_addr(base)
721 ? vmalloc_to_page(base)
722 : virt_to_page(base);
723 unsigned offset = offset_in_page(base);
724 unsigned len = min_t(size_t, PAGE_SIZE - offset, size);
726 BUG_ON(!bio_add_page(bio, page, len, offset));
732 int bch2_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask)
735 struct page *page = alloc_page(gfp_mask);
736 unsigned len = min_t(size_t, PAGE_SIZE, size);
741 if (unlikely(!bio_add_page(bio, page, len, 0))) {
752 size_t bch2_rand_range(size_t max)
760 rand = get_random_long();
761 rand &= roundup_pow_of_two(max) - 1;
762 } while (rand >= max);
767 void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)
770 struct bvec_iter iter;
772 __bio_for_each_segment(bv, dst, iter, dst_iter) {
773 void *dstp = kmap_atomic(bv.bv_page);
774 memcpy(dstp + bv.bv_offset, src, bv.bv_len);
781 void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
784 struct bvec_iter iter;
786 __bio_for_each_segment(bv, src, iter, src_iter) {
787 void *srcp = kmap_atomic(bv.bv_page);
788 memcpy(dst, srcp + bv.bv_offset, bv.bv_len);
795 static int alignment_ok(const void *base, size_t align)
797 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
798 ((unsigned long)base & (align - 1)) == 0;
801 static void u32_swap(void *a, void *b, size_t size)
804 *(u32 *)a = *(u32 *)b;
808 static void u64_swap(void *a, void *b, size_t size)
811 *(u64 *)a = *(u64 *)b;
815 static void generic_swap(void *a, void *b, size_t size)
821 *(char *)a++ = *(char *)b;
823 } while (--size > 0);
826 static inline int do_cmp(void *base, size_t n, size_t size,
827 int (*cmp_func)(const void *, const void *, size_t),
830 return cmp_func(base + inorder_to_eytzinger0(l, n) * size,
831 base + inorder_to_eytzinger0(r, n) * size,
835 static inline void do_swap(void *base, size_t n, size_t size,
836 void (*swap_func)(void *, void *, size_t),
839 swap_func(base + inorder_to_eytzinger0(l, n) * size,
840 base + inorder_to_eytzinger0(r, n) * size,
844 void eytzinger0_sort(void *base, size_t n, size_t size,
845 int (*cmp_func)(const void *, const void *, size_t),
846 void (*swap_func)(void *, void *, size_t))
851 if (size == 4 && alignment_ok(base, 4))
852 swap_func = u32_swap;
853 else if (size == 8 && alignment_ok(base, 8))
854 swap_func = u64_swap;
856 swap_func = generic_swap;
860 for (i = n / 2 - 1; i >= 0; --i) {
861 for (r = i; r * 2 + 1 < n; r = c) {
865 do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
868 if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
871 do_swap(base, n, size, swap_func, r, c);
876 for (i = n - 1; i > 0; --i) {
877 do_swap(base, n, size, swap_func, 0, i);
879 for (r = 0; r * 2 + 1 < i; r = c) {
883 do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
886 if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
889 do_swap(base, n, size, swap_func, r, c);
894 void sort_cmp_size(void *base, size_t num, size_t size,
895 int (*cmp_func)(const void *, const void *, size_t),
896 void (*swap_func)(void *, void *, size_t size))
898 /* pre-scale counters for performance */
899 int i = (num/2 - 1) * size, n = num * size, c, r;
902 if (size == 4 && alignment_ok(base, 4))
903 swap_func = u32_swap;
904 else if (size == 8 && alignment_ok(base, 8))
905 swap_func = u64_swap;
907 swap_func = generic_swap;
911 for ( ; i >= 0; i -= size) {
912 for (r = i; r * 2 + size < n; r = c) {
915 cmp_func(base + c, base + c + size, size) < 0)
917 if (cmp_func(base + r, base + c, size) >= 0)
919 swap_func(base + r, base + c, size);
924 for (i = n - size; i > 0; i -= size) {
925 swap_func(base, base + i, size);
926 for (r = 0; r * 2 + size < i; r = c) {
929 cmp_func(base + c, base + c + size, size) < 0)
931 if (cmp_func(base + r, base + c, size) >= 0)
933 swap_func(base + r, base + c, size);
938 static void mempool_free_vp(void *element, void *pool_data)
940 size_t size = (size_t) pool_data;
942 vpfree(element, size);
945 static void *mempool_alloc_vp(gfp_t gfp_mask, void *pool_data)
947 size_t size = (size_t) pool_data;
949 return vpmalloc(size, gfp_mask);
952 int mempool_init_kvpmalloc_pool(mempool_t *pool, int min_nr, size_t size)
954 return size < PAGE_SIZE
955 ? mempool_init_kmalloc_pool(pool, min_nr, size)
956 : mempool_init(pool, min_nr, mempool_alloc_vp,
957 mempool_free_vp, (void *) size);
961 void eytzinger1_test(void)
963 unsigned inorder, eytz, size;
965 pr_info("1 based eytzinger test:");
970 unsigned extra = eytzinger1_extra(size);
973 pr_info("tree size %u", size);
975 BUG_ON(eytzinger1_prev(0, size) != eytzinger1_last(size));
976 BUG_ON(eytzinger1_next(0, size) != eytzinger1_first(size));
978 BUG_ON(eytzinger1_prev(eytzinger1_first(size), size) != 0);
979 BUG_ON(eytzinger1_next(eytzinger1_last(size), size) != 0);
982 eytzinger1_for_each(eytz, size) {
983 BUG_ON(__inorder_to_eytzinger1(inorder, size, extra) != eytz);
984 BUG_ON(__eytzinger1_to_inorder(eytz, size, extra) != inorder);
985 BUG_ON(eytz != eytzinger1_last(size) &&
986 eytzinger1_prev(eytzinger1_next(eytz, size), size) != eytz);
993 void eytzinger0_test(void)
996 unsigned inorder, eytz, size;
998 pr_info("0 based eytzinger test:");
1003 unsigned extra = eytzinger0_extra(size);
1006 pr_info("tree size %u", size);
1008 BUG_ON(eytzinger0_prev(-1, size) != eytzinger0_last(size));
1009 BUG_ON(eytzinger0_next(-1, size) != eytzinger0_first(size));
1011 BUG_ON(eytzinger0_prev(eytzinger0_first(size), size) != -1);
1012 BUG_ON(eytzinger0_next(eytzinger0_last(size), size) != -1);
1015 eytzinger0_for_each(eytz, size) {
1016 BUG_ON(__inorder_to_eytzinger0(inorder, size, extra) != eytz);
1017 BUG_ON(__eytzinger0_to_inorder(eytz, size, extra) != inorder);
1018 BUG_ON(eytz != eytzinger0_last(size) &&
1019 eytzinger0_prev(eytzinger0_next(eytz, size), size) != eytz);
1026 static inline int cmp_u16(const void *_l, const void *_r, size_t size)
1028 const u16 *l = _l, *r = _r;
1030 return (*l > *r) - (*r - *l);
1033 static void eytzinger0_find_test_val(u16 *test_array, unsigned nr, u16 search)
1035 int i, c1 = -1, c2 = -1;
1038 r = eytzinger0_find_le(test_array, nr,
1039 sizeof(test_array[0]),
1044 for (i = 0; i < nr; i++)
1045 if (test_array[i] <= search && test_array[i] > c2)
1049 eytzinger0_for_each(i, nr)
1050 pr_info("[%3u] = %12u", i, test_array[i]);
1051 pr_info("find_le(%2u) -> [%2zi] = %2i should be %2i",
1056 void eytzinger0_find_test(void)
1058 unsigned i, nr, allocated = 1 << 12;
1059 u16 *test_array = kmalloc_array(allocated, sizeof(test_array[0]), GFP_KERNEL);
1061 for (nr = 1; nr < allocated; nr++) {
1062 pr_info("testing %u elems", nr);
1064 get_random_bytes(test_array, nr * sizeof(test_array[0]));
1065 eytzinger0_sort(test_array, nr, sizeof(test_array[0]), cmp_u16, NULL);
1067 /* verify array is sorted correctly: */
1068 eytzinger0_for_each(i, nr)
1069 BUG_ON(i != eytzinger0_last(nr) &&
1070 test_array[i] > test_array[eytzinger0_next(i, nr)]);
1072 for (i = 0; i < U16_MAX; i += 1 << 12)
1073 eytzinger0_find_test_val(test_array, nr, i);
1075 for (i = 0; i < nr; i++) {
1076 eytzinger0_find_test_val(test_array, nr, test_array[i] - 1);
1077 eytzinger0_find_test_val(test_array, nr, test_array[i]);
1078 eytzinger0_find_test_val(test_array, nr, test_array[i] + 1);
1087 * Accumulate percpu counters onto one cpu's copy - only valid when access
1088 * against any percpu counter is guarded against
1090 u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr)
1095 /* access to pcpu vars has to be blocked by other locking */
1097 ret = this_cpu_ptr(p);
1100 for_each_possible_cpu(cpu) {
1101 u64 *i = per_cpu_ptr(p, cpu);
1104 acc_u64s(ret, i, nr);
1105 memset(i, 0, nr * sizeof(u64));