2 * random utiility code, for bcache but in theory not specific to bcache
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/debugfs.h>
12 #include <linux/freezer.h>
13 #include <linux/kthread.h>
14 #include <linux/log2.h>
15 #include <linux/math64.h>
16 #include <linux/random.h>
17 #include <linux/seq_file.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/sched/clock.h>
24 #define simple_strtoint(c, end, base) simple_strtol(c, end, base)
25 #define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
27 #define STRTO_H(name, type) \
28 int bch2_ ## name ## _h(const char *cp, type *res) \
32 type i = simple_ ## name(cp, &e, 10); \
34 switch (tolower(*e)) { \
64 if ((type) ~0 > 0 && \
65 (type) ~0 / 1024 <= i) \
67 if ((i > 0 && ANYSINT_MAX(type) / 1024 < i) || \
68 (i < 0 && -ANYSINT_MAX(type) / 1024 > i)) \
77 STRTO_H(strtoint, int)
78 STRTO_H(strtouint, unsigned int)
79 STRTO_H(strtoll, long long)
80 STRTO_H(strtoull, unsigned long long)
82 ssize_t bch2_hprint(char *buf, s64 v)
84 static const char units[] = "?kMGTPEZY";
88 for (u = 0; v >= 1024 || v <= -1024; u++) {
94 return sprintf(buf, "%lli", v);
97 * 103 is magic: t is in the range [-1023, 1023] and we want
98 * to turn it into [-9, 9]
100 if (v < 100 && v > -100)
101 snprintf(dec, sizeof(dec), ".%i", t / 103);
103 return sprintf(buf, "%lli%s%c", v, dec, units[u]);
106 ssize_t bch2_snprint_string_list(char *buf, size_t size, const char * const list[],
112 for (i = 0; list[i]; i++)
113 out += snprintf(out, buf + size - out,
114 i == selected ? "[%s] " : "%s ", list[i]);
120 ssize_t bch2_read_string_list(const char *buf, const char * const list[])
123 char *s, *d = kstrndup(buf, PAGE_SIZE - 1, GFP_KERNEL);
129 for (i = 0; list[i]; i++)
130 if (!strcmp(list[i], s))
141 bool bch2_is_zero(const void *_p, size_t n)
146 for (i = 0; i < n; i++)
152 void bch2_time_stats_clear(struct time_stats *stats)
154 spin_lock(&stats->lock);
157 stats->last_duration = 0;
158 stats->max_duration = 0;
159 stats->average_duration = 0;
160 stats->average_frequency = 0;
163 spin_unlock(&stats->lock);
166 void __bch2_time_stats_update(struct time_stats *stats, u64 start_time)
168 u64 now, duration, last;
173 duration = time_after64(now, start_time)
174 ? now - start_time : 0;
175 last = time_after64(now, stats->last)
176 ? now - stats->last : 0;
178 stats->last_duration = duration;
179 stats->max_duration = max(stats->max_duration, duration);
182 stats->average_duration = ewma_add(stats->average_duration,
185 if (stats->average_frequency)
186 stats->average_frequency =
187 ewma_add(stats->average_frequency,
190 stats->average_frequency = last << 8;
192 stats->average_duration = duration << 8;
195 stats->last = now ?: 1;
198 void bch2_time_stats_update(struct time_stats *stats, u64 start_time)
200 spin_lock(&stats->lock);
201 __bch2_time_stats_update(stats, start_time);
202 spin_unlock(&stats->lock);
206 * bch2_ratelimit_delay() - return how long to delay until the next time to do
209 * @d - the struct bch_ratelimit to update
211 * Returns the amount of time to delay by, in jiffies
213 u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
215 u64 now = local_clock();
217 return time_after64(d->next, now)
218 ? nsecs_to_jiffies(d->next - now)
223 * bch2_ratelimit_increment() - increment @d by the amount of work done
225 * @d - the struct bch_ratelimit to update
226 * @done - the amount of work done, in arbitrary units
228 void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done)
230 u64 now = local_clock();
232 d->next += div_u64(done * NSEC_PER_SEC, d->rate);
234 if (time_before64(now + NSEC_PER_SEC, d->next))
235 d->next = now + NSEC_PER_SEC;
237 if (time_after64(now - NSEC_PER_SEC * 2, d->next))
238 d->next = now - NSEC_PER_SEC * 2;
241 int bch2_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *d)
244 u64 delay = bch2_ratelimit_delay(d);
247 set_current_state(TASK_INTERRUPTIBLE);
249 if (kthread_should_stop())
255 schedule_timeout(delay);
261 * Updates pd_controller. Attempts to scale inputed values to units per second.
262 * @target: desired value
263 * @actual: current value
265 * @sign: 1 or -1; 1 if increasing the rate makes actual go up, -1 if increasing
266 * it makes actual go down.
268 void bch2_pd_controller_update(struct bch_pd_controller *pd,
269 s64 target, s64 actual, int sign)
271 s64 proportional, derivative, change;
273 unsigned long seconds_since_update = (jiffies - pd->last_update) / HZ;
275 if (seconds_since_update == 0)
278 pd->last_update = jiffies;
280 proportional = actual - target;
281 proportional *= seconds_since_update;
282 proportional = div_s64(proportional, pd->p_term_inverse);
284 derivative = actual - pd->last_actual;
285 derivative = div_s64(derivative, seconds_since_update);
286 derivative = ewma_add(pd->smoothed_derivative, derivative,
287 (pd->d_term / seconds_since_update) ?: 1);
288 derivative = derivative * pd->d_term;
289 derivative = div_s64(derivative, pd->p_term_inverse);
291 change = proportional + derivative;
293 /* Don't increase rate if not keeping up */
296 time_after64(local_clock(),
297 pd->rate.next + NSEC_PER_MSEC))
300 change *= (sign * -1);
302 pd->rate.rate = clamp_t(s64, (s64) pd->rate.rate + change,
305 pd->last_actual = actual;
306 pd->last_derivative = derivative;
307 pd->last_proportional = proportional;
308 pd->last_change = change;
309 pd->last_target = target;
312 void bch2_pd_controller_init(struct bch_pd_controller *pd)
314 pd->rate.rate = 1024;
315 pd->last_update = jiffies;
316 pd->p_term_inverse = 6000;
318 pd->d_smooth = pd->d_term;
319 pd->backpressure = 1;
322 size_t bch2_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf)
324 /* 2^64 - 1 is 20 digits, plus null byte */
328 char proportional[21];
333 bch2_hprint(rate, pd->rate.rate);
334 bch2_hprint(actual, pd->last_actual);
335 bch2_hprint(target, pd->last_target);
336 bch2_hprint(proportional, pd->last_proportional);
337 bch2_hprint(derivative, pd->last_derivative);
338 bch2_hprint(change, pd->last_change);
340 next_io = div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC);
346 "proportional:\t%s\n"
348 "change:\t\t%s/sec\n"
349 "next io:\t%llims\n",
350 rate, target, actual, proportional,
351 derivative, change, next_io);
354 void bch2_bio_map(struct bio *bio, void *base)
356 size_t size = bio->bi_iter.bi_size;
357 struct bio_vec *bv = bio->bi_io_vec;
359 BUG_ON(!bio->bi_iter.bi_size);
360 BUG_ON(bio->bi_vcnt);
362 bv->bv_offset = base ? offset_in_page(base) : 0;
365 for (; size; bio->bi_vcnt++, bv++) {
367 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
369 BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
371 bv->bv_page = is_vmalloc_addr(base)
372 ? vmalloc_to_page(base)
373 : virt_to_page(base);
382 size_t bch2_rand_range(size_t max)
387 get_random_bytes(&rand, sizeof(rand));
388 rand &= roundup_pow_of_two(max) - 1;
389 } while (rand >= max);
394 void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, void *src)
397 struct bvec_iter iter;
399 __bio_for_each_segment(bv, dst, iter, dst_iter) {
400 void *dstp = kmap_atomic(bv.bv_page);
401 memcpy(dstp + bv.bv_offset, src, bv.bv_len);
408 void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
411 struct bvec_iter iter;
413 __bio_for_each_segment(bv, src, iter, src_iter) {
414 void *srcp = kmap_atomic(bv.bv_page);
415 memcpy(dst, srcp + bv.bv_offset, bv.bv_len);
422 size_t bch_scnmemcpy(char *buf, size_t size, const char *src, size_t len)
429 n = min(size - 1, len);
436 #include "eytzinger.h"
438 static int alignment_ok(const void *base, size_t align)
440 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
441 ((unsigned long)base & (align - 1)) == 0;
444 static void u32_swap(void *a, void *b, size_t size)
447 *(u32 *)a = *(u32 *)b;
451 static void u64_swap(void *a, void *b, size_t size)
454 *(u64 *)a = *(u64 *)b;
458 static void generic_swap(void *a, void *b, size_t size)
464 *(char *)a++ = *(char *)b;
466 } while (--size > 0);
469 static inline int do_cmp(void *base, size_t n, size_t size,
470 int (*cmp_func)(const void *, const void *, size_t),
473 return cmp_func(base + inorder_to_eytzinger0(l, n) * size,
474 base + inorder_to_eytzinger0(r, n) * size,
478 static inline void do_swap(void *base, size_t n, size_t size,
479 void (*swap_func)(void *, void *, size_t),
482 swap_func(base + inorder_to_eytzinger0(l, n) * size,
483 base + inorder_to_eytzinger0(r, n) * size,
487 void eytzinger0_sort(void *base, size_t n, size_t size,
488 int (*cmp_func)(const void *, const void *, size_t),
489 void (*swap_func)(void *, void *, size_t))
494 if (size == 4 && alignment_ok(base, 4))
495 swap_func = u32_swap;
496 else if (size == 8 && alignment_ok(base, 8))
497 swap_func = u64_swap;
499 swap_func = generic_swap;
503 for (i = n / 2 - 1; i >= 0; --i) {
504 for (r = i; r * 2 + 1 < n; r = c) {
508 do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
511 if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
514 do_swap(base, n, size, swap_func, r, c);
519 for (i = n - 1; i > 0; --i) {
520 do_swap(base, n, size, swap_func, 0, i);
522 for (r = 0; r * 2 + 1 < i; r = c) {
526 do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
529 if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
532 do_swap(base, n, size, swap_func, r, c);
537 void sort_cmp_size(void *base, size_t num, size_t size,
538 int (*cmp_func)(const void *, const void *, size_t),
539 void (*swap_func)(void *, void *, size_t size))
541 /* pre-scale counters for performance */
542 int i = (num/2 - 1) * size, n = num * size, c, r;
545 if (size == 4 && alignment_ok(base, 4))
546 swap_func = u32_swap;
547 else if (size == 8 && alignment_ok(base, 8))
548 swap_func = u64_swap;
550 swap_func = generic_swap;
554 for ( ; i >= 0; i -= size) {
555 for (r = i; r * 2 + size < n; r = c) {
558 cmp_func(base + c, base + c + size, size) < 0)
560 if (cmp_func(base + r, base + c, size) >= 0)
562 swap_func(base + r, base + c, size);
567 for (i = n - size; i > 0; i -= size) {
568 swap_func(base, base + i, size);
569 for (r = 0; r * 2 + size < i; r = c) {
572 cmp_func(base + c, base + c + size, size) < 0)
574 if (cmp_func(base + r, base + c, size) >= 0)
576 swap_func(base + r, base + c, size);
581 void mempool_free_vp(void *element, void *pool_data)
583 size_t size = (size_t) pool_data;
585 vpfree(element, size);
588 void *mempool_alloc_vp(gfp_t gfp_mask, void *pool_data)
590 size_t size = (size_t) pool_data;
592 return vpmalloc(size, gfp_mask);