1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_UTIL_H
3 #define _BCACHEFS_UTIL_H
6 #include <linux/blkdev.h>
7 #include <linux/closure.h>
8 #include <linux/errno.h>
9 #include <linux/freezer.h>
10 #include <linux/kernel.h>
11 #include <linux/sched/clock.h>
12 #include <linux/llist.h>
13 #include <linux/log2.h>
14 #include <linux/printbuf.h>
15 #include <linux/percpu.h>
16 #include <linux/preempt.h>
17 #include <linux/ratelimit.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/workqueue.h>
21 #include <linux/mean_and_variance.h>
25 #ifdef CONFIG_BCACHEFS_DEBUG
26 #define EBUG_ON(cond) BUG_ON(cond)
31 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
32 #define CPU_BIG_ENDIAN 0
33 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
34 #define CPU_BIG_ENDIAN 1
39 #define type_is_exact(_val, _type) \
40 __builtin_types_compatible_p(typeof(_val), _type)
42 #define type_is(_val, _type) \
43 (__builtin_types_compatible_p(typeof(_val), _type) || \
44 __builtin_types_compatible_p(typeof(_val), const _type))
46 /* Userspace doesn't align allocations as nicely as the kernel allocators: */
47 static inline size_t buf_pages(void *p, size_t len)
49 return DIV_ROUND_UP(len +
50 ((unsigned long) p & (PAGE_SIZE - 1)),
54 static inline void vpfree(void *p, size_t size)
56 if (is_vmalloc_addr(p))
59 free_pages((unsigned long) p, get_order(size));
62 static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
64 return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
66 __vmalloc(size, gfp_mask);
69 static inline void kvpfree(void *p, size_t size)
77 static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
79 return size < PAGE_SIZE
80 ? kmalloc(size, gfp_mask)
81 : vpmalloc(size, gfp_mask);
84 int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
92 #define DECLARE_HEAP(type, name) HEAP(type) name
94 #define init_heap(heap, _size, gfp) \
97 (heap)->size = (_size); \
98 (heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\
102 #define free_heap(heap) \
104 kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0])); \
105 (heap)->data = NULL; \
108 #define heap_set_backpointer(h, i, _fn) \
110 void (*fn)(typeof(h), size_t) = _fn; \
115 #define heap_swap(h, i, j, set_backpointer) \
117 swap((h)->data[i], (h)->data[j]); \
118 heap_set_backpointer(h, i, set_backpointer); \
119 heap_set_backpointer(h, j, set_backpointer); \
122 #define heap_peek(h) \
124 EBUG_ON(!(h)->used); \
128 #define heap_full(h) ((h)->used == (h)->size)
130 #define heap_sift_down(h, i, cmp, set_backpointer) \
134 for (; _j * 2 + 1 < (h)->used; _j = _c) { \
136 if (_c + 1 < (h)->used && \
137 cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \
140 if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \
142 heap_swap(h, _c, _j, set_backpointer); \
146 #define heap_sift_up(h, i, cmp, set_backpointer) \
149 size_t p = (i - 1) / 2; \
150 if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \
152 heap_swap(h, i, p, set_backpointer); \
157 #define __heap_add(h, d, cmp, set_backpointer) \
159 size_t _i = (h)->used++; \
161 heap_set_backpointer(h, _i, set_backpointer); \
163 heap_sift_up(h, _i, cmp, set_backpointer); \
167 #define heap_add(h, d, cmp, set_backpointer) \
169 bool _r = !heap_full(h); \
171 __heap_add(h, d, cmp, set_backpointer); \
175 #define heap_add_or_replace(h, new, cmp, set_backpointer) \
177 if (!heap_add(h, new, cmp, set_backpointer) && \
178 cmp(h, new, heap_peek(h)) >= 0) { \
179 (h)->data[0] = new; \
180 heap_set_backpointer(h, 0, set_backpointer); \
181 heap_sift_down(h, 0, cmp, set_backpointer); \
185 #define heap_del(h, i, cmp, set_backpointer) \
189 BUG_ON(_i >= (h)->used); \
191 if ((_i) < (h)->used) { \
192 heap_swap(h, _i, (h)->used, set_backpointer); \
193 heap_sift_up(h, _i, cmp, set_backpointer); \
194 heap_sift_down(h, _i, cmp, set_backpointer); \
198 #define heap_pop(h, d, cmp, set_backpointer) \
200 bool _r = (h)->used; \
202 (d) = (h)->data[0]; \
203 heap_del(h, 0, cmp, set_backpointer); \
208 #define heap_resort(heap, cmp, set_backpointer) \
211 for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \
212 heap_sift_down(heap, _i, cmp, set_backpointer); \
215 #define ANYSINT_MAX(t) \
216 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
220 static inline void pr_time(struct printbuf *out, u64 time)
222 prt_printf(out, "%llu", time);
226 static inline void pr_time(struct printbuf *out, u64 _time)
230 struct tm *tm = localtime(&time);
231 size_t err = strftime(time_str, sizeof(time_str), "%c", tm);
233 prt_printf(out, "(formatting error)");
235 prt_printf(out, "%s", time_str);
240 static inline void uuid_unparse_lower(u8 *uuid, char *out)
242 sprintf(out, "%pUb", uuid);
245 #include <uuid/uuid.h>
248 static inline void pr_uuid(struct printbuf *out, u8 *uuid)
252 uuid_unparse_lower(uuid, uuid_str);
253 prt_printf(out, "%s", uuid_str);
256 int bch2_strtoint_h(const char *, int *);
257 int bch2_strtouint_h(const char *, unsigned int *);
258 int bch2_strtoll_h(const char *, long long *);
259 int bch2_strtoull_h(const char *, unsigned long long *);
260 int bch2_strtou64_h(const char *, u64 *);
262 static inline int bch2_strtol_h(const char *cp, long *res)
264 #if BITS_PER_LONG == 32
265 return bch2_strtoint_h(cp, (int *) res);
267 return bch2_strtoll_h(cp, (long long *) res);
271 static inline int bch2_strtoul_h(const char *cp, long *res)
273 #if BITS_PER_LONG == 32
274 return bch2_strtouint_h(cp, (unsigned int *) res);
276 return bch2_strtoull_h(cp, (unsigned long long *) res);
280 #define strtoi_h(cp, res) \
281 ( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\
282 : type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\
283 : type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\
284 : type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\
285 : type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\
286 : type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\
289 #define strtoul_safe(cp, var) \
292 int _r = kstrtoul(cp, 10, &_v); \
298 #define strtoul_safe_clamp(cp, var, min, max) \
301 int _r = kstrtoul(cp, 10, &_v); \
303 var = clamp_t(typeof(var), _v, min, max); \
307 #define strtoul_safe_restrict(cp, var, min, max) \
310 int _r = kstrtoul(cp, 10, &_v); \
311 if (!_r && _v >= min && _v <= max) \
318 #define snprint(out, var) \
320 type_is(var, int) ? "%i\n" \
321 : type_is(var, unsigned) ? "%u\n" \
322 : type_is(var, long) ? "%li\n" \
323 : type_is(var, unsigned long) ? "%lu\n" \
324 : type_is(var, s64) ? "%lli\n" \
325 : type_is(var, u64) ? "%llu\n" \
326 : type_is(var, char *) ? "%s\n" \
329 bool bch2_is_zero(const void *, size_t);
331 u64 bch2_read_flag_list(char *, const char * const[]);
333 void bch2_prt_u64_binary(struct printbuf *, u64, unsigned);
335 void bch2_print_string_as_lines(const char *prefix, const char *lines);
336 int bch2_prt_backtrace(struct printbuf *, struct task_struct *);
338 #define NR_QUANTILES 15
339 #define QUANTILE_IDX(i) inorder_to_eytzinger0(i, NR_QUANTILES)
340 #define QUANTILE_FIRST eytzinger0_first(NR_QUANTILES)
341 #define QUANTILE_LAST eytzinger0_last(NR_QUANTILES)
344 struct quantile_entry {
347 } entries[NR_QUANTILES];
350 struct time_stat_buffer {
352 struct time_stat_buffer_entry {
360 /* all fields are in nanoseconds */
366 struct quantiles quantiles;
368 struct mean_and_variance duration_stats;
369 struct mean_and_variance_weighted duration_stats_weighted;
370 struct mean_and_variance freq_stats;
371 struct mean_and_variance_weighted freq_stats_weighted;
372 struct time_stat_buffer __percpu *buffer;
375 void __bch2_time_stats_update(struct time_stats *stats, u64, u64);
377 static inline void bch2_time_stats_update(struct time_stats *stats, u64 start)
379 __bch2_time_stats_update(stats, start, local_clock());
382 void bch2_time_stats_to_text(struct printbuf *, struct time_stats *);
384 void bch2_time_stats_exit(struct time_stats *);
385 void bch2_time_stats_init(struct time_stats *);
387 #define ewma_add(ewma, val, weight) \
389 typeof(ewma) _ewma = (ewma); \
390 typeof(weight) _weight = (weight); \
392 (((_ewma << _weight) - _ewma) + (val)) >> _weight; \
395 struct bch_ratelimit {
396 /* Next time we want to do some work, in nanoseconds */
400 * Rate at which we want to do work, in units per nanosecond
401 * The units here correspond to the units passed to
402 * bch2_ratelimit_increment()
407 static inline void bch2_ratelimit_reset(struct bch_ratelimit *d)
409 d->next = local_clock();
412 u64 bch2_ratelimit_delay(struct bch_ratelimit *);
413 void bch2_ratelimit_increment(struct bch_ratelimit *, u64);
415 struct bch_pd_controller {
416 struct bch_ratelimit rate;
417 unsigned long last_update;
420 s64 smoothed_derivative;
422 unsigned p_term_inverse;
426 /* for exporting to sysfs (no effect on behavior) */
428 s64 last_proportional;
432 /* If true, the rate will not increase if bch2_ratelimit_delay()
433 * is not being called often enough. */
437 void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
438 void bch2_pd_controller_init(struct bch_pd_controller *);
439 void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *);
441 #define sysfs_pd_controller_attribute(name) \
442 rw_attribute(name##_rate); \
443 rw_attribute(name##_rate_bytes); \
444 rw_attribute(name##_rate_d_term); \
445 rw_attribute(name##_rate_p_term_inverse); \
446 read_attribute(name##_rate_debug)
448 #define sysfs_pd_controller_files(name) \
449 &sysfs_##name##_rate, \
450 &sysfs_##name##_rate_bytes, \
451 &sysfs_##name##_rate_d_term, \
452 &sysfs_##name##_rate_p_term_inverse, \
453 &sysfs_##name##_rate_debug
455 #define sysfs_pd_controller_show(name, var) \
457 sysfs_hprint(name##_rate, (var)->rate.rate); \
458 sysfs_print(name##_rate_bytes, (var)->rate.rate); \
459 sysfs_print(name##_rate_d_term, (var)->d_term); \
460 sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \
462 if (attr == &sysfs_##name##_rate_debug) \
463 bch2_pd_controller_debug_to_text(out, var); \
466 #define sysfs_pd_controller_store(name, var) \
468 sysfs_strtoul_clamp(name##_rate, \
469 (var)->rate.rate, 1, UINT_MAX); \
470 sysfs_strtoul_clamp(name##_rate_bytes, \
471 (var)->rate.rate, 1, UINT_MAX); \
472 sysfs_strtoul(name##_rate_d_term, (var)->d_term); \
473 sysfs_strtoul_clamp(name##_rate_p_term_inverse, \
474 (var)->p_term_inverse, 1, INT_MAX); \
477 #define container_of_or_null(ptr, type, member) \
479 typeof(ptr) _ptr = ptr; \
480 _ptr ? container_of(_ptr, type, member) : NULL; \
483 /* Does linear interpolation between powers of two */
484 static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
486 unsigned fract = x & ~(~0 << fract_bits);
490 x += (x * fract) >> fract_bits;
495 void bch2_bio_map(struct bio *bio, void *base, size_t);
496 int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
498 static inline sector_t bdev_sectors(struct block_device *bdev)
500 return bdev->bd_inode->i_size >> 9;
503 #define closure_bio_submit(bio, cl) \
509 #define kthread_wait_freezable(cond) \
513 set_current_state(TASK_INTERRUPTIBLE); \
514 if (kthread_should_stop()) { \
525 set_current_state(TASK_RUNNING); \
529 size_t bch2_rand_range(size_t);
531 void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
532 void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
534 static inline void memcpy_u64s_small(void *dst, const void *src,
544 static inline void __memcpy_u64s(void *dst, const void *src,
549 asm volatile("rep ; movsq"
550 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
551 : "0" (u64s), "1" (dst), "2" (src)
562 static inline void memcpy_u64s(void *dst, const void *src,
565 EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
566 dst + u64s * sizeof(u64) <= src));
568 __memcpy_u64s(dst, src, u64s);
571 static inline void __memmove_u64s_down(void *dst, const void *src,
574 __memcpy_u64s(dst, src, u64s);
577 static inline void memmove_u64s_down(void *dst, const void *src,
582 __memmove_u64s_down(dst, src, u64s);
585 static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
588 u64 *dst = (u64 *) _dst + u64s;
589 u64 *src = (u64 *) _src + u64s;
595 static inline void memmove_u64s_up_small(void *dst, const void *src,
600 __memmove_u64s_up_small(dst, src, u64s);
603 static inline void __memmove_u64s_up(void *_dst, const void *_src,
606 u64 *dst = (u64 *) _dst + u64s - 1;
607 u64 *src = (u64 *) _src + u64s - 1;
611 asm volatile("std ;\n"
614 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
615 : "0" (u64s), "1" (dst), "2" (src)
623 static inline void memmove_u64s_up(void *dst, const void *src,
628 __memmove_u64s_up(dst, src, u64s);
631 static inline void memmove_u64s(void *dst, const void *src,
635 __memmove_u64s_down(dst, src, u64s);
637 __memmove_u64s_up(dst, src, u64s);
640 /* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
641 static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
643 unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
645 memset(s + bytes, c, rem);
648 void sort_cmp_size(void *base, size_t num, size_t size,
649 int (*cmp_func)(const void *, const void *, size_t),
650 void (*swap_func)(void *, void *, size_t));
652 /* just the memmove, doesn't update @_nr */
653 #define __array_insert_item(_array, _nr, _pos) \
654 memmove(&(_array)[(_pos) + 1], \
656 sizeof((_array)[0]) * ((_nr) - (_pos)))
658 #define array_insert_item(_array, _nr, _pos, _new_item) \
660 __array_insert_item(_array, _nr, _pos); \
662 (_array)[(_pos)] = (_new_item); \
665 #define array_remove_items(_array, _nr, _pos, _nr_to_remove) \
667 (_nr) -= (_nr_to_remove); \
668 memmove(&(_array)[(_pos)], \
669 &(_array)[(_pos) + (_nr_to_remove)], \
670 sizeof((_array)[0]) * ((_nr) - (_pos))); \
673 #define array_remove_item(_array, _nr, _pos) \
674 array_remove_items(_array, _nr, _pos, 1)
676 static inline void __move_gap(void *array, size_t element_size,
677 size_t nr, size_t size,
678 size_t old_gap, size_t new_gap)
680 size_t gap_end = old_gap + size - nr;
682 if (new_gap < old_gap) {
683 size_t move = old_gap - new_gap;
685 memmove(array + element_size * (gap_end - move),
686 array + element_size * (old_gap - move),
687 element_size * move);
688 } else if (new_gap > old_gap) {
689 size_t move = new_gap - old_gap;
691 memmove(array + element_size * old_gap,
692 array + element_size * gap_end,
693 element_size * move);
697 /* Move the gap in a gap buffer: */
698 #define move_gap(_array, _nr, _size, _old_gap, _new_gap) \
699 __move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap)
701 #define bubble_sort(_base, _nr, _cmp) \
704 bool _swapped = true; \
706 for (_end = (ssize_t) (_nr) - 1; _end > 0 && _swapped; --_end) {\
708 for (_i = 0; _i < _end; _i++) \
709 if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \
710 swap((_base)[_i], (_base)[_i + 1]); \
716 static inline u64 percpu_u64_get(u64 __percpu *src)
721 for_each_possible_cpu(cpu)
722 ret += *per_cpu_ptr(src, cpu);
726 static inline void percpu_u64_set(u64 __percpu *dst, u64 src)
730 for_each_possible_cpu(cpu)
731 *per_cpu_ptr(dst, cpu) = 0;
732 this_cpu_write(*dst, src);
735 static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr)
739 for (i = 0; i < nr; i++)
743 static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src,
748 for_each_possible_cpu(cpu)
749 acc_u64s(acc, per_cpu_ptr(src, cpu), nr);
752 static inline void percpu_memset(void __percpu *p, int c, size_t bytes)
756 for_each_possible_cpu(cpu)
757 memset(per_cpu_ptr(p, cpu), c, bytes);
760 u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
762 #define cmp_int(l, r) ((l > r) - (l < r))
764 static inline int u8_cmp(u8 l, u8 r)
766 return cmp_int(l, r);
769 #endif /* _BCACHEFS_UTIL_H */