2 * random utiility code, for bcache but in theory not specific to bcache
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/debugfs.h>
12 #include <linux/freezer.h>
13 #include <linux/kthread.h>
14 #include <linux/log2.h>
15 #include <linux/math64.h>
16 #include <linux/percpu.h>
17 #include <linux/preempt.h>
18 #include <linux/random.h>
19 #include <linux/seq_file.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/sched/clock.h>
24 #include "eytzinger.h"
27 static const char si_units[] = "?kMGTPEZY";
29 static int __bch2_strtoh(const char *cp, u64 *res,
30 u64 t_max, bool t_signed)
32 bool positive = *cp != '-';
36 if (*cp == '+' || *cp == '-')
46 if (v > U64_MAX - (*cp - '0'))
50 } while (isdigit(*cp));
52 for (u = 1; u < strlen(si_units); u++)
53 if (*cp == si_units[u]) {
64 if (fls64(v) + u * 10 > 64)
85 #define STRTO_H(name, type) \
86 int bch2_ ## name ## _h(const char *cp, type *res) \
89 int ret = __bch2_strtoh(cp, &v, ANYSINT_MAX(type), \
90 ANYSINT_MAX(type) != ((type) ~0ULL)); \
95 STRTO_H(strtoint, int)
96 STRTO_H(strtouint, unsigned int)
97 STRTO_H(strtoll, long long)
98 STRTO_H(strtoull, unsigned long long)
99 STRTO_H(strtou64, u64)
101 void bch2_hprint(struct printbuf *buf, s64 v)
105 for (u = 0; v >= 1024 || v <= -1024; u++) {
106 t = v & ~(~0U << 10);
110 pr_buf(buf, "%lli", v);
113 * 103 is magic: t is in the range [-1023, 1023] and we want
114 * to turn it into [-9, 9]
116 if (u && v < 100 && v > -100)
117 pr_buf(buf, ".%i", t / 103);
119 pr_buf(buf, "%c", si_units[u]);
122 void bch2_string_opt_to_text(struct printbuf *out,
123 const char * const list[],
128 for (i = 0; list[i]; i++)
129 pr_buf(out, i == selected ? "[%s] " : "%s ", list[i]);
132 void bch2_flags_to_text(struct printbuf *out,
133 const char * const list[], u64 flags)
135 unsigned bit, nr = 0;
138 if (out->pos != out->end)
144 while (flags && (bit = __ffs(flags)) < nr) {
145 pr_buf(out, "%s", list[bit]);
153 u64 bch2_read_flag_list(char *opt, const char * const list[])
156 char *p, *s, *d = kstrndup(opt, PAGE_SIZE - 1, GFP_KERNEL);
163 while ((p = strsep(&s, ","))) {
164 int flag = match_string(list, -1, p);
178 bool bch2_is_zero(const void *_p, size_t n)
183 for (i = 0; i < n; i++)
189 static void bch2_quantiles_update(struct quantiles *q, u64 v)
193 while (i < ARRAY_SIZE(q->entries)) {
194 struct quantile_entry *e = q->entries + i;
196 if (unlikely(!e->step)) {
198 e->step = max_t(unsigned, v / 2, 1024);
199 } else if (e->m > v) {
200 e->m = e->m >= e->step
203 } else if (e->m < v) {
204 e->m = e->m + e->step > e->m
209 if ((e->m > v ? e->m - v : v - e->m) < e->step)
210 e->step = max_t(unsigned, e->step / 2, 1);
215 i = eytzinger0_child(i, v > e->m);
221 static void bch2_time_stats_update_one(struct time_stats *stats,
226 duration = time_after64(end, start)
228 freq = time_after64(end, stats->last_event)
229 ? end - stats->last_event : 0;
233 stats->average_duration = stats->average_duration
234 ? ewma_add(stats->average_duration, duration, 6)
237 stats->average_frequency = stats->average_frequency
238 ? ewma_add(stats->average_frequency, freq, 6)
241 stats->max_duration = max(stats->max_duration, duration);
243 stats->last_event = end;
245 bch2_quantiles_update(&stats->quantiles, duration);
248 void __bch2_time_stats_update(struct time_stats *stats, u64 start, u64 end)
252 if (!stats->buffer) {
253 spin_lock_irqsave(&stats->lock, flags);
254 bch2_time_stats_update_one(stats, start, end);
256 if (stats->average_frequency < 32 &&
259 alloc_percpu_gfp(struct time_stat_buffer,
261 spin_unlock_irqrestore(&stats->lock, flags);
263 struct time_stat_buffer_entry *i;
264 struct time_stat_buffer *b;
267 b = this_cpu_ptr(stats->buffer);
269 BUG_ON(b->nr >= ARRAY_SIZE(b->entries));
270 b->entries[b->nr++] = (struct time_stat_buffer_entry) {
275 if (b->nr == ARRAY_SIZE(b->entries)) {
276 spin_lock_irqsave(&stats->lock, flags);
278 i < b->entries + ARRAY_SIZE(b->entries);
280 bch2_time_stats_update_one(stats, i->start, i->end);
281 spin_unlock_irqrestore(&stats->lock, flags);
290 static const struct time_unit {
295 { "us", NSEC_PER_USEC },
296 { "ms", NSEC_PER_MSEC },
297 { "sec", NSEC_PER_SEC },
300 static const struct time_unit *pick_time_units(u64 ns)
302 const struct time_unit *u;
305 u + 1 < time_units + ARRAY_SIZE(time_units) &&
306 ns >= u[1].nsecs << 1;
313 static void pr_time_units(struct printbuf *out, u64 ns)
315 const struct time_unit *u = pick_time_units(ns);
317 pr_buf(out, "%llu %s", div_u64(ns, u->nsecs), u->name);
320 size_t bch2_time_stats_print(struct time_stats *stats, char *buf, size_t len)
322 struct printbuf out = _PBUF(buf, len);
323 const struct time_unit *u;
324 u64 freq = READ_ONCE(stats->average_frequency);
328 pr_buf(&out, "count:\t\t%llu\n",
330 pr_buf(&out, "rate:\t\t%llu/sec\n",
331 freq ? div64_u64(NSEC_PER_SEC, freq) : 0);
333 pr_buf(&out, "frequency:\t");
334 pr_time_units(&out, freq);
336 pr_buf(&out, "\navg duration:\t");
337 pr_time_units(&out, stats->average_duration);
339 pr_buf(&out, "\nmax duration:\t");
340 pr_time_units(&out, stats->max_duration);
342 i = eytzinger0_first(NR_QUANTILES);
343 u = pick_time_units(stats->quantiles.entries[i].m);
345 pr_buf(&out, "\nquantiles (%s):\t", u->name);
346 eytzinger0_for_each(i, NR_QUANTILES) {
347 bool is_last = eytzinger0_next(i, NR_QUANTILES) == -1;
349 q = max(stats->quantiles.entries[i].m, last_q);
350 pr_buf(&out, "%llu%s",
351 div_u64(q, u->nsecs),
352 is_last ? "\n" : " ");
356 return out.pos - buf;
359 void bch2_time_stats_exit(struct time_stats *stats)
361 free_percpu(stats->buffer);
364 void bch2_time_stats_init(struct time_stats *stats)
366 memset(stats, 0, sizeof(*stats));
367 spin_lock_init(&stats->lock);
373 * bch2_ratelimit_delay() - return how long to delay until the next time to do
376 * @d - the struct bch_ratelimit to update
378 * Returns the amount of time to delay by, in jiffies
380 u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
382 u64 now = local_clock();
384 return time_after64(d->next, now)
385 ? nsecs_to_jiffies(d->next - now)
390 * bch2_ratelimit_increment() - increment @d by the amount of work done
392 * @d - the struct bch_ratelimit to update
393 * @done - the amount of work done, in arbitrary units
395 void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done)
397 u64 now = local_clock();
399 d->next += div_u64(done * NSEC_PER_SEC, d->rate);
401 if (time_before64(now + NSEC_PER_SEC, d->next))
402 d->next = now + NSEC_PER_SEC;
404 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
405 d->next = now - NSEC_PER_SEC * 2;
411 * Updates pd_controller. Attempts to scale inputed values to units per second.
412 * @target: desired value
413 * @actual: current value
415 * @sign: 1 or -1; 1 if increasing the rate makes actual go up, -1 if increasing
416 * it makes actual go down.
418 void bch2_pd_controller_update(struct bch_pd_controller *pd,
419 s64 target, s64 actual, int sign)
421 s64 proportional, derivative, change;
423 unsigned long seconds_since_update = (jiffies - pd->last_update) / HZ;
425 if (seconds_since_update == 0)
428 pd->last_update = jiffies;
430 proportional = actual - target;
431 proportional *= seconds_since_update;
432 proportional = div_s64(proportional, pd->p_term_inverse);
434 derivative = actual - pd->last_actual;
435 derivative = div_s64(derivative, seconds_since_update);
436 derivative = ewma_add(pd->smoothed_derivative, derivative,
437 (pd->d_term / seconds_since_update) ?: 1);
438 derivative = derivative * pd->d_term;
439 derivative = div_s64(derivative, pd->p_term_inverse);
441 change = proportional + derivative;
443 /* Don't increase rate if not keeping up */
446 time_after64(local_clock(),
447 pd->rate.next + NSEC_PER_MSEC))
450 change *= (sign * -1);
452 pd->rate.rate = clamp_t(s64, (s64) pd->rate.rate + change,
455 pd->last_actual = actual;
456 pd->last_derivative = derivative;
457 pd->last_proportional = proportional;
458 pd->last_change = change;
459 pd->last_target = target;
462 void bch2_pd_controller_init(struct bch_pd_controller *pd)
464 pd->rate.rate = 1024;
465 pd->last_update = jiffies;
466 pd->p_term_inverse = 6000;
468 pd->d_smooth = pd->d_term;
469 pd->backpressure = 1;
472 size_t bch2_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf)
474 /* 2^64 - 1 is 20 digits, plus null byte */
478 char proportional[21];
483 bch2_hprint(&PBUF(rate), pd->rate.rate);
484 bch2_hprint(&PBUF(actual), pd->last_actual);
485 bch2_hprint(&PBUF(target), pd->last_target);
486 bch2_hprint(&PBUF(proportional), pd->last_proportional);
487 bch2_hprint(&PBUF(derivative), pd->last_derivative);
488 bch2_hprint(&PBUF(change), pd->last_change);
490 next_io = div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC);
496 "proportional:\t%s\n"
498 "change:\t\t%s/sec\n"
499 "next io:\t%llims\n",
500 rate, target, actual, proportional,
501 derivative, change, next_io);
506 void bch2_bio_map(struct bio *bio, void *base)
508 size_t size = bio->bi_iter.bi_size;
509 struct bio_vec *bv = bio->bi_io_vec;
511 BUG_ON(!bio->bi_iter.bi_size);
512 BUG_ON(bio->bi_vcnt);
513 BUG_ON(!bio->bi_max_vecs);
515 bv->bv_offset = base ? offset_in_page(base) : 0;
518 for (; size; bio->bi_vcnt++, bv++) {
519 BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
522 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
525 bv->bv_page = is_vmalloc_addr(base)
526 ? vmalloc_to_page(base)
527 : virt_to_page(base);
536 int bch2_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
541 bio_for_each_segment_all(bv, bio, i) {
542 bv->bv_page = alloc_page(gfp_mask);
544 while (--bv >= bio->bi_io_vec)
545 __free_page(bv->bv_page);
553 size_t bch2_rand_range(size_t max)
561 rand = get_random_long();
562 rand &= roundup_pow_of_two(max) - 1;
563 } while (rand >= max);
568 void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, void *src)
571 struct bvec_iter iter;
573 __bio_for_each_segment(bv, dst, iter, dst_iter) {
574 void *dstp = kmap_atomic(bv.bv_page);
575 memcpy(dstp + bv.bv_offset, src, bv.bv_len);
582 void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
585 struct bvec_iter iter;
587 __bio_for_each_segment(bv, src, iter, src_iter) {
588 void *srcp = kmap_atomic(bv.bv_page);
589 memcpy(dst, srcp + bv.bv_offset, bv.bv_len);
596 void bch_scnmemcpy(struct printbuf *out,
597 const char *src, size_t len)
599 size_t n = printbuf_remaining(out);
603 memcpy(out->pos, src, n);
609 #include "eytzinger.h"
611 static int alignment_ok(const void *base, size_t align)
613 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
614 ((unsigned long)base & (align - 1)) == 0;
617 static void u32_swap(void *a, void *b, size_t size)
620 *(u32 *)a = *(u32 *)b;
624 static void u64_swap(void *a, void *b, size_t size)
627 *(u64 *)a = *(u64 *)b;
631 static void generic_swap(void *a, void *b, size_t size)
637 *(char *)a++ = *(char *)b;
639 } while (--size > 0);
642 static inline int do_cmp(void *base, size_t n, size_t size,
643 int (*cmp_func)(const void *, const void *, size_t),
646 return cmp_func(base + inorder_to_eytzinger0(l, n) * size,
647 base + inorder_to_eytzinger0(r, n) * size,
651 static inline void do_swap(void *base, size_t n, size_t size,
652 void (*swap_func)(void *, void *, size_t),
655 swap_func(base + inorder_to_eytzinger0(l, n) * size,
656 base + inorder_to_eytzinger0(r, n) * size,
660 void eytzinger0_sort(void *base, size_t n, size_t size,
661 int (*cmp_func)(const void *, const void *, size_t),
662 void (*swap_func)(void *, void *, size_t))
667 if (size == 4 && alignment_ok(base, 4))
668 swap_func = u32_swap;
669 else if (size == 8 && alignment_ok(base, 8))
670 swap_func = u64_swap;
672 swap_func = generic_swap;
676 for (i = n / 2 - 1; i >= 0; --i) {
677 for (r = i; r * 2 + 1 < n; r = c) {
681 do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
684 if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
687 do_swap(base, n, size, swap_func, r, c);
692 for (i = n - 1; i > 0; --i) {
693 do_swap(base, n, size, swap_func, 0, i);
695 for (r = 0; r * 2 + 1 < i; r = c) {
699 do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
702 if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
705 do_swap(base, n, size, swap_func, r, c);
710 void sort_cmp_size(void *base, size_t num, size_t size,
711 int (*cmp_func)(const void *, const void *, size_t),
712 void (*swap_func)(void *, void *, size_t size))
714 /* pre-scale counters for performance */
715 int i = (num/2 - 1) * size, n = num * size, c, r;
718 if (size == 4 && alignment_ok(base, 4))
719 swap_func = u32_swap;
720 else if (size == 8 && alignment_ok(base, 8))
721 swap_func = u64_swap;
723 swap_func = generic_swap;
727 for ( ; i >= 0; i -= size) {
728 for (r = i; r * 2 + size < n; r = c) {
731 cmp_func(base + c, base + c + size, size) < 0)
733 if (cmp_func(base + r, base + c, size) >= 0)
735 swap_func(base + r, base + c, size);
740 for (i = n - size; i > 0; i -= size) {
741 swap_func(base, base + i, size);
742 for (r = 0; r * 2 + size < i; r = c) {
745 cmp_func(base + c, base + c + size, size) < 0)
747 if (cmp_func(base + r, base + c, size) >= 0)
749 swap_func(base + r, base + c, size);
754 static void mempool_free_vp(void *element, void *pool_data)
756 size_t size = (size_t) pool_data;
758 vpfree(element, size);
761 static void *mempool_alloc_vp(gfp_t gfp_mask, void *pool_data)
763 size_t size = (size_t) pool_data;
765 return vpmalloc(size, gfp_mask);
768 int mempool_init_kvpmalloc_pool(mempool_t *pool, int min_nr, size_t size)
770 return size < PAGE_SIZE
771 ? mempool_init_kmalloc_pool(pool, min_nr, size)
772 : mempool_init(pool, min_nr, mempool_alloc_vp,
773 mempool_free_vp, (void *) size);
777 void eytzinger1_test(void)
779 unsigned inorder, eytz, size;
781 pr_info("1 based eytzinger test:");
786 unsigned extra = eytzinger1_extra(size);
789 pr_info("tree size %u", size);
791 BUG_ON(eytzinger1_prev(0, size) != eytzinger1_last(size));
792 BUG_ON(eytzinger1_next(0, size) != eytzinger1_first(size));
794 BUG_ON(eytzinger1_prev(eytzinger1_first(size), size) != 0);
795 BUG_ON(eytzinger1_next(eytzinger1_last(size), size) != 0);
798 eytzinger1_for_each(eytz, size) {
799 BUG_ON(__inorder_to_eytzinger1(inorder, size, extra) != eytz);
800 BUG_ON(__eytzinger1_to_inorder(eytz, size, extra) != inorder);
801 BUG_ON(eytz != eytzinger1_last(size) &&
802 eytzinger1_prev(eytzinger1_next(eytz, size), size) != eytz);
809 void eytzinger0_test(void)
812 unsigned inorder, eytz, size;
814 pr_info("0 based eytzinger test:");
819 unsigned extra = eytzinger0_extra(size);
822 pr_info("tree size %u", size);
824 BUG_ON(eytzinger0_prev(-1, size) != eytzinger0_last(size));
825 BUG_ON(eytzinger0_next(-1, size) != eytzinger0_first(size));
827 BUG_ON(eytzinger0_prev(eytzinger0_first(size), size) != -1);
828 BUG_ON(eytzinger0_next(eytzinger0_last(size), size) != -1);
831 eytzinger0_for_each(eytz, size) {
832 BUG_ON(__inorder_to_eytzinger0(inorder, size, extra) != eytz);
833 BUG_ON(__eytzinger0_to_inorder(eytz, size, extra) != inorder);
834 BUG_ON(eytz != eytzinger0_last(size) &&
835 eytzinger0_prev(eytzinger0_next(eytz, size), size) != eytz);
842 static inline int cmp_u16(const void *_l, const void *_r, size_t size)
844 const u16 *l = _l, *r = _r;
846 return (*l > *r) - (*r - *l);
849 static void eytzinger0_find_test_val(u16 *test_array, unsigned nr, u16 search)
851 int i, c1 = -1, c2 = -1;
854 r = eytzinger0_find_le(test_array, nr,
855 sizeof(test_array[0]),
860 for (i = 0; i < nr; i++)
861 if (test_array[i] <= search && test_array[i] > c2)
865 eytzinger0_for_each(i, nr)
866 pr_info("[%3u] = %12u", i, test_array[i]);
867 pr_info("find_le(%2u) -> [%2zi] = %2i should be %2i",
872 void eytzinger0_find_test(void)
874 unsigned i, nr, allocated = 1 << 12;
875 u16 *test_array = kmalloc_array(allocated, sizeof(test_array[0]), GFP_KERNEL);
877 for (nr = 1; nr < allocated; nr++) {
878 pr_info("testing %u elems", nr);
880 get_random_bytes(test_array, nr * sizeof(test_array[0]));
881 eytzinger0_sort(test_array, nr, sizeof(test_array[0]), cmp_u16, NULL);
883 /* verify array is sorted correctly: */
884 eytzinger0_for_each(i, nr)
885 BUG_ON(i != eytzinger0_last(nr) &&
886 test_array[i] > test_array[eytzinger0_next(i, nr)]);
888 for (i = 0; i < U16_MAX; i += 1 << 12)
889 eytzinger0_find_test_val(test_array, nr, i);
891 for (i = 0; i < nr; i++) {
892 eytzinger0_find_test_val(test_array, nr, test_array[i] - 1);
893 eytzinger0_find_test_val(test_array, nr, test_array[i]);
894 eytzinger0_find_test_val(test_array, nr, test_array[i] + 1);
903 * Accumulate percpu counters onto one cpu's copy - only valid when access
904 * against any percpu counter is guarded against
906 u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr)
912 ret = this_cpu_ptr(p);
915 for_each_possible_cpu(cpu) {
916 u64 *i = per_cpu_ptr(p, cpu);
919 acc_u64s(ret, i, nr);
920 memset(i, 0, nr * sizeof(u64));