2 * random utiility code, for bcache but in theory not specific to bcache
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/debugfs.h>
12 #include <linux/freezer.h>
13 #include <linux/kthread.h>
14 #include <linux/log2.h>
15 #include <linux/math64.h>
16 #include <linux/percpu.h>
17 #include <linux/preempt.h>
18 #include <linux/random.h>
19 #include <linux/seq_file.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/sched/clock.h>
24 #include "eytzinger.h"
27 #define simple_strtoint(c, end, base) simple_strtol(c, end, base)
28 #define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
30 #define STRTO_H(name, type) \
31 int bch2_ ## name ## _h(const char *cp, type *res) \
35 type i = simple_ ## name(cp, &e, 10); \
37 switch (tolower(*e)) { \
67 if ((type) ~0 > 0 && \
68 (type) ~0 / 1024 <= i) \
70 if ((i > 0 && ANYSINT_MAX(type) / 1024 < i) || \
71 (i < 0 && -ANYSINT_MAX(type) / 1024 > i)) \
80 STRTO_H(strtoint, int)
81 STRTO_H(strtouint, unsigned int)
82 STRTO_H(strtoll, long long)
83 STRTO_H(strtoull, unsigned long long)
85 ssize_t bch2_hprint(char *buf, s64 v)
87 static const char units[] = "?kMGTPEZY";
91 for (u = 0; v >= 1024 || v <= -1024; u++) {
97 return sprintf(buf, "%lli", v);
100 * 103 is magic: t is in the range [-1023, 1023] and we want
101 * to turn it into [-9, 9]
103 if (v < 100 && v > -100)
104 scnprintf(dec, sizeof(dec), ".%i", t / 103);
106 return sprintf(buf, "%lli%s%c", v, dec, units[u]);
109 ssize_t bch2_scnprint_string_list(char *buf, size_t size,
110 const char * const list[],
119 for (i = 0; list[i]; i++)
120 out += scnprintf(out, buf + size - out,
121 i == selected ? "[%s] " : "%s ", list[i]);
129 ssize_t bch2_read_string_list(const char *buf, const char * const list[])
133 buf = skip_spaces(buf);
136 while (len && isspace(buf[len - 1]))
139 for (i = 0; list[i]; i++)
140 if (strlen(list[i]) == len &&
141 !memcmp(buf, list[i], len))
144 return list[i] ? i : -EINVAL;
147 ssize_t bch2_scnprint_flag_list(char *buf, size_t size,
148 const char * const list[], u64 flags)
150 char *out = buf, *end = buf + size;
151 unsigned bit, nr = 0;
159 while (flags && (bit = __ffs(flags)) < nr) {
160 out += scnprintf(out, end - out, "%s,", list[bit]);
170 u64 bch2_read_flag_list(char *opt, const char * const list[])
173 char *p, *s, *d = kstrndup(opt, PAGE_SIZE - 1, GFP_KERNEL);
180 while ((p = strsep(&s, ","))) {
181 int flag = bch2_read_string_list(p, list);
195 bool bch2_is_zero(const void *_p, size_t n)
200 for (i = 0; i < n; i++)
206 void bch2_quantiles_update(struct quantiles *q, u64 v)
210 while (i < ARRAY_SIZE(q->entries)) {
211 struct quantile_entry *e = q->entries + i;
213 if (unlikely(!e->step)) {
215 e->step = max_t(unsigned, v / 2, 1024);
216 } else if (e->m > v) {
217 e->m = e->m >= e->step
220 } else if (e->m < v) {
221 e->m = e->m + e->step > e->m
226 if ((e->m > v ? e->m - v : v - e->m) < e->step)
227 e->step = max_t(unsigned, e->step / 2, 1);
232 i = eytzinger0_child(i, v > e->m);
238 static void bch2_time_stats_update_one(struct time_stats *stats,
243 duration = time_after64(end, start)
245 freq = time_after64(end, stats->last_event)
246 ? end - stats->last_event : 0;
250 stats->average_duration = stats->average_duration
251 ? ewma_add(stats->average_duration, duration, 6)
254 stats->average_frequency = stats->average_frequency
255 ? ewma_add(stats->average_frequency, freq, 6)
258 stats->max_duration = max(stats->max_duration, duration);
260 stats->last_event = end;
262 bch2_quantiles_update(&stats->quantiles, duration);
265 void __bch2_time_stats_update(struct time_stats *stats, u64 start, u64 end)
269 if (!stats->buffer) {
270 spin_lock_irqsave(&stats->lock, flags);
271 bch2_time_stats_update_one(stats, start, end);
273 if (stats->average_frequency < 32 &&
276 alloc_percpu_gfp(struct time_stat_buffer,
278 spin_unlock_irqrestore(&stats->lock, flags);
280 struct time_stat_buffer_entry *i;
281 struct time_stat_buffer *b;
284 b = this_cpu_ptr(stats->buffer);
286 BUG_ON(b->nr >= ARRAY_SIZE(b->entries));
287 b->entries[b->nr++] = (struct time_stat_buffer_entry) {
292 if (b->nr == ARRAY_SIZE(b->entries)) {
293 spin_lock_irqsave(&stats->lock, flags);
295 i < b->entries + ARRAY_SIZE(b->entries);
297 bch2_time_stats_update_one(stats, i->start, i->end);
298 spin_unlock_irqrestore(&stats->lock, flags);
307 static const struct time_unit {
312 { "us", NSEC_PER_USEC },
313 { "ms", NSEC_PER_MSEC },
314 { "sec", NSEC_PER_SEC },
317 static const struct time_unit *pick_time_units(u64 ns)
319 const struct time_unit *u;
322 u + 1 < time_units + ARRAY_SIZE(time_units) &&
323 ns >= u[1].nsecs << 1;
330 static size_t pr_time_units(char *buf, size_t len, u64 ns)
332 const struct time_unit *u = pick_time_units(ns);
334 return scnprintf(buf, len, "%llu %s", div_u64(ns, u->nsecs), u->name);
337 size_t bch2_time_stats_print(struct time_stats *stats, char *buf, size_t len)
339 char *out = buf, *end = buf + len;
340 const struct time_unit *u;
341 u64 freq = READ_ONCE(stats->average_frequency);
345 out += scnprintf(out, end - out, "count:\t\t%llu\n",
347 out += scnprintf(out, end - out, "rate:\t\t%llu/sec\n",
348 freq ? div64_u64(NSEC_PER_SEC, freq) : 0);
350 out += scnprintf(out, end - out, "frequency:\t");
351 out += pr_time_units(out, end - out, freq);
353 out += scnprintf(out, end - out, "\navg duration:\t");
354 out += pr_time_units(out, end - out, stats->average_duration);
356 out += scnprintf(out, end - out, "\nmax duration:\t");
357 out += pr_time_units(out, end - out, stats->max_duration);
359 i = eytzinger0_first(NR_QUANTILES);
360 u = pick_time_units(stats->quantiles.entries[i].m);
362 out += scnprintf(out, end - out, "\nquantiles (%s):\t", u->name);
363 eytzinger0_for_each(i, NR_QUANTILES) {
364 bool is_last = eytzinger0_next(i, NR_QUANTILES) == -1;
366 q = max(stats->quantiles.entries[i].m, last_q);
367 out += scnprintf(out, end - out, "%llu%s",
368 div_u64(q, u->nsecs),
369 is_last ? "\n" : " ");
376 void bch2_time_stats_exit(struct time_stats *stats)
378 free_percpu(stats->buffer);
381 void bch2_time_stats_init(struct time_stats *stats)
383 memset(stats, 0, sizeof(*stats));
384 spin_lock_init(&stats->lock);
390 * bch2_ratelimit_delay() - return how long to delay until the next time to do
393 * @d - the struct bch_ratelimit to update
395 * Returns the amount of time to delay by, in jiffies
397 u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
399 u64 now = local_clock();
401 return time_after64(d->next, now)
402 ? nsecs_to_jiffies(d->next - now)
407 * bch2_ratelimit_increment() - increment @d by the amount of work done
409 * @d - the struct bch_ratelimit to update
410 * @done - the amount of work done, in arbitrary units
412 void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done)
414 u64 now = local_clock();
416 d->next += div_u64(done * NSEC_PER_SEC, d->rate);
418 if (time_before64(now + NSEC_PER_SEC, d->next))
419 d->next = now + NSEC_PER_SEC;
421 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
422 d->next = now - NSEC_PER_SEC * 2;
425 int bch2_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *d)
427 bool kthread = (current->flags & PF_KTHREAD) != 0;
430 u64 delay = bch2_ratelimit_delay(d);
433 set_current_state(TASK_INTERRUPTIBLE);
435 if (kthread && kthread_should_stop())
441 schedule_timeout(delay);
449 * Updates pd_controller. Attempts to scale inputed values to units per second.
450 * @target: desired value
451 * @actual: current value
453 * @sign: 1 or -1; 1 if increasing the rate makes actual go up, -1 if increasing
454 * it makes actual go down.
456 void bch2_pd_controller_update(struct bch_pd_controller *pd,
457 s64 target, s64 actual, int sign)
459 s64 proportional, derivative, change;
461 unsigned long seconds_since_update = (jiffies - pd->last_update) / HZ;
463 if (seconds_since_update == 0)
466 pd->last_update = jiffies;
468 proportional = actual - target;
469 proportional *= seconds_since_update;
470 proportional = div_s64(proportional, pd->p_term_inverse);
472 derivative = actual - pd->last_actual;
473 derivative = div_s64(derivative, seconds_since_update);
474 derivative = ewma_add(pd->smoothed_derivative, derivative,
475 (pd->d_term / seconds_since_update) ?: 1);
476 derivative = derivative * pd->d_term;
477 derivative = div_s64(derivative, pd->p_term_inverse);
479 change = proportional + derivative;
481 /* Don't increase rate if not keeping up */
484 time_after64(local_clock(),
485 pd->rate.next + NSEC_PER_MSEC))
488 change *= (sign * -1);
490 pd->rate.rate = clamp_t(s64, (s64) pd->rate.rate + change,
493 pd->last_actual = actual;
494 pd->last_derivative = derivative;
495 pd->last_proportional = proportional;
496 pd->last_change = change;
497 pd->last_target = target;
500 void bch2_pd_controller_init(struct bch_pd_controller *pd)
502 pd->rate.rate = 1024;
503 pd->last_update = jiffies;
504 pd->p_term_inverse = 6000;
506 pd->d_smooth = pd->d_term;
507 pd->backpressure = 1;
510 size_t bch2_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf)
512 /* 2^64 - 1 is 20 digits, plus null byte */
516 char proportional[21];
521 bch2_hprint(rate, pd->rate.rate);
522 bch2_hprint(actual, pd->last_actual);
523 bch2_hprint(target, pd->last_target);
524 bch2_hprint(proportional, pd->last_proportional);
525 bch2_hprint(derivative, pd->last_derivative);
526 bch2_hprint(change, pd->last_change);
528 next_io = div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC);
534 "proportional:\t%s\n"
536 "change:\t\t%s/sec\n"
537 "next io:\t%llims\n",
538 rate, target, actual, proportional,
539 derivative, change, next_io);
544 void bch2_bio_map(struct bio *bio, void *base)
546 size_t size = bio->bi_iter.bi_size;
547 struct bio_vec *bv = bio->bi_io_vec;
549 BUG_ON(!bio->bi_iter.bi_size);
550 BUG_ON(bio->bi_vcnt);
552 bv->bv_offset = base ? offset_in_page(base) : 0;
555 for (; size; bio->bi_vcnt++, bv++) {
557 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
559 BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
561 bv->bv_page = is_vmalloc_addr(base)
562 ? vmalloc_to_page(base)
563 : virt_to_page(base);
572 size_t bch2_rand_range(size_t max)
580 rand = get_random_long();
581 rand &= roundup_pow_of_two(max) - 1;
582 } while (rand >= max);
587 void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, void *src)
590 struct bvec_iter iter;
592 __bio_for_each_segment(bv, dst, iter, dst_iter) {
593 void *dstp = kmap_atomic(bv.bv_page);
594 memcpy(dstp + bv.bv_offset, src, bv.bv_len);
601 void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
604 struct bvec_iter iter;
606 __bio_for_each_segment(bv, src, iter, src_iter) {
607 void *srcp = kmap_atomic(bv.bv_page);
608 memcpy(dst, srcp + bv.bv_offset, bv.bv_len);
615 size_t bch_scnmemcpy(char *buf, size_t size, const char *src, size_t len)
622 n = min(size - 1, len);
629 #include "eytzinger.h"
631 static int alignment_ok(const void *base, size_t align)
633 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
634 ((unsigned long)base & (align - 1)) == 0;
637 static void u32_swap(void *a, void *b, size_t size)
640 *(u32 *)a = *(u32 *)b;
644 static void u64_swap(void *a, void *b, size_t size)
647 *(u64 *)a = *(u64 *)b;
651 static void generic_swap(void *a, void *b, size_t size)
657 *(char *)a++ = *(char *)b;
659 } while (--size > 0);
662 static inline int do_cmp(void *base, size_t n, size_t size,
663 int (*cmp_func)(const void *, const void *, size_t),
666 return cmp_func(base + inorder_to_eytzinger0(l, n) * size,
667 base + inorder_to_eytzinger0(r, n) * size,
671 static inline void do_swap(void *base, size_t n, size_t size,
672 void (*swap_func)(void *, void *, size_t),
675 swap_func(base + inorder_to_eytzinger0(l, n) * size,
676 base + inorder_to_eytzinger0(r, n) * size,
680 void eytzinger0_sort(void *base, size_t n, size_t size,
681 int (*cmp_func)(const void *, const void *, size_t),
682 void (*swap_func)(void *, void *, size_t))
687 if (size == 4 && alignment_ok(base, 4))
688 swap_func = u32_swap;
689 else if (size == 8 && alignment_ok(base, 8))
690 swap_func = u64_swap;
692 swap_func = generic_swap;
696 for (i = n / 2 - 1; i >= 0; --i) {
697 for (r = i; r * 2 + 1 < n; r = c) {
701 do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
704 if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
707 do_swap(base, n, size, swap_func, r, c);
712 for (i = n - 1; i > 0; --i) {
713 do_swap(base, n, size, swap_func, 0, i);
715 for (r = 0; r * 2 + 1 < i; r = c) {
719 do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
722 if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
725 do_swap(base, n, size, swap_func, r, c);
730 void sort_cmp_size(void *base, size_t num, size_t size,
731 int (*cmp_func)(const void *, const void *, size_t),
732 void (*swap_func)(void *, void *, size_t size))
734 /* pre-scale counters for performance */
735 int i = (num/2 - 1) * size, n = num * size, c, r;
738 if (size == 4 && alignment_ok(base, 4))
739 swap_func = u32_swap;
740 else if (size == 8 && alignment_ok(base, 8))
741 swap_func = u64_swap;
743 swap_func = generic_swap;
747 for ( ; i >= 0; i -= size) {
748 for (r = i; r * 2 + size < n; r = c) {
751 cmp_func(base + c, base + c + size, size) < 0)
753 if (cmp_func(base + r, base + c, size) >= 0)
755 swap_func(base + r, base + c, size);
760 for (i = n - size; i > 0; i -= size) {
761 swap_func(base, base + i, size);
762 for (r = 0; r * 2 + size < i; r = c) {
765 cmp_func(base + c, base + c + size, size) < 0)
767 if (cmp_func(base + r, base + c, size) >= 0)
769 swap_func(base + r, base + c, size);
774 void mempool_free_vp(void *element, void *pool_data)
776 size_t size = (size_t) pool_data;
778 vpfree(element, size);
781 void *mempool_alloc_vp(gfp_t gfp_mask, void *pool_data)
783 size_t size = (size_t) pool_data;
785 return vpmalloc(size, gfp_mask);
789 void eytzinger1_test(void)
791 unsigned inorder, eytz, size;
793 pr_info("1 based eytzinger test:");
798 unsigned extra = eytzinger1_extra(size);
801 pr_info("tree size %u", size);
803 BUG_ON(eytzinger1_prev(0, size) != eytzinger1_last(size));
804 BUG_ON(eytzinger1_next(0, size) != eytzinger1_first(size));
806 BUG_ON(eytzinger1_prev(eytzinger1_first(size), size) != 0);
807 BUG_ON(eytzinger1_next(eytzinger1_last(size), size) != 0);
810 eytzinger1_for_each(eytz, size) {
811 BUG_ON(__inorder_to_eytzinger1(inorder, size, extra) != eytz);
812 BUG_ON(__eytzinger1_to_inorder(eytz, size, extra) != inorder);
813 BUG_ON(eytz != eytzinger1_last(size) &&
814 eytzinger1_prev(eytzinger1_next(eytz, size), size) != eytz);
821 void eytzinger0_test(void)
824 unsigned inorder, eytz, size;
826 pr_info("0 based eytzinger test:");
831 unsigned extra = eytzinger0_extra(size);
834 pr_info("tree size %u", size);
836 BUG_ON(eytzinger0_prev(-1, size) != eytzinger0_last(size));
837 BUG_ON(eytzinger0_next(-1, size) != eytzinger0_first(size));
839 BUG_ON(eytzinger0_prev(eytzinger0_first(size), size) != -1);
840 BUG_ON(eytzinger0_next(eytzinger0_last(size), size) != -1);
843 eytzinger0_for_each(eytz, size) {
844 BUG_ON(__inorder_to_eytzinger0(inorder, size, extra) != eytz);
845 BUG_ON(__eytzinger0_to_inorder(eytz, size, extra) != inorder);
846 BUG_ON(eytz != eytzinger0_last(size) &&
847 eytzinger0_prev(eytzinger0_next(eytz, size), size) != eytz);
854 static inline int cmp_u16(const void *_l, const void *_r, size_t size)
856 const u16 *l = _l, *r = _r;
858 return (*l > *r) - (*r - *l);
861 static void eytzinger0_find_test_val(u16 *test_array, unsigned nr, u16 search)
863 int i, c1 = -1, c2 = -1;
866 r = eytzinger0_find_le(test_array, nr,
867 sizeof(test_array[0]),
872 for (i = 0; i < nr; i++)
873 if (test_array[i] <= search && test_array[i] > c2)
877 eytzinger0_for_each(i, nr)
878 pr_info("[%3u] = %12u", i, test_array[i]);
879 pr_info("find_le(%2u) -> [%2zi] = %2i should be %2i",
884 void eytzinger0_find_test(void)
886 unsigned i, nr, allocated = 1 << 12;
887 u16 *test_array = kmalloc_array(allocated, sizeof(test_array[0]), GFP_KERNEL);
889 for (nr = 1; nr < allocated; nr++) {
890 pr_info("testing %u elems", nr);
892 get_random_bytes(test_array, nr * sizeof(test_array[0]));
893 eytzinger0_sort(test_array, nr, sizeof(test_array[0]), cmp_u16, NULL);
895 /* verify array is sorted correctly: */
896 eytzinger0_for_each(i, nr)
897 BUG_ON(i != eytzinger0_last(nr) &&
898 test_array[i] > test_array[eytzinger0_next(i, nr)]);
900 for (i = 0; i < U16_MAX; i += 1 << 12)
901 eytzinger0_find_test_val(test_array, nr, i);
903 for (i = 0; i < nr; i++) {
904 eytzinger0_find_test_val(test_array, nr, test_array[i] - 1);
905 eytzinger0_find_test_val(test_array, nr, test_array[i]);
906 eytzinger0_find_test_val(test_array, nr, test_array[i] + 1);