1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_UTIL_H
3 #define _BCACHEFS_UTIL_H
6 #include <linux/blkdev.h>
7 #include <linux/closure.h>
8 #include <linux/errno.h>
9 #include <linux/freezer.h>
10 #include <linux/kernel.h>
11 #include <linux/sched/clock.h>
12 #include <linux/llist.h>
13 #include <linux/log2.h>
14 #include <linux/percpu.h>
15 #include <linux/preempt.h>
16 #include <linux/ratelimit.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/workqueue.h>
21 #define PAGE_SECTOR_SHIFT (PAGE_SHIFT - 9)
22 #define PAGE_SECTORS (1UL << PAGE_SECTOR_SHIFT)
26 #ifdef CONFIG_BCACHEFS_DEBUG
28 #define EBUG_ON(cond) BUG_ON(cond)
29 #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
30 #define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
31 #define atomic_sub_bug(i, v) BUG_ON(atomic_sub_return(i, v) < 0)
32 #define atomic_add_bug(i, v) BUG_ON(atomic_add_return(i, v) < 0)
33 #define atomic_long_dec_bug(v) BUG_ON(atomic_long_dec_return(v) < 0)
34 #define atomic_long_sub_bug(i, v) BUG_ON(atomic_long_sub_return(i, v) < 0)
35 #define atomic64_dec_bug(v) BUG_ON(atomic64_dec_return(v) < 0)
36 #define atomic64_inc_bug(v, i) BUG_ON(atomic64_inc_return(v) <= i)
37 #define atomic64_sub_bug(i, v) BUG_ON(atomic64_sub_return(i, v) < 0)
38 #define atomic64_add_bug(i, v) BUG_ON(atomic64_add_return(i, v) < 0)
40 #define memcpy(dst, src, len) \
43 const void *_src = (src); \
44 size_t _len = (len); \
46 BUG_ON(!((void *) (_dst) >= (void *) (_src) + (_len) || \
47 (void *) (_dst) + (_len) <= (void *) (_src))); \
48 memcpy(_dst, _src, _len); \
54 #define atomic_dec_bug(v) atomic_dec(v)
55 #define atomic_inc_bug(v, i) atomic_inc(v)
56 #define atomic_sub_bug(i, v) atomic_sub(i, v)
57 #define atomic_add_bug(i, v) atomic_add(i, v)
58 #define atomic_long_dec_bug(v) atomic_long_dec(v)
59 #define atomic_long_sub_bug(i, v) atomic_long_sub(i, v)
60 #define atomic64_dec_bug(v) atomic64_dec(v)
61 #define atomic64_inc_bug(v, i) atomic64_inc(v)
62 #define atomic64_sub_bug(i, v) atomic64_sub(i, v)
63 #define atomic64_add_bug(i, v) atomic64_add(i, v)
67 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
68 #define CPU_BIG_ENDIAN 0
69 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
70 #define CPU_BIG_ENDIAN 1
75 #define type_is_exact(_val, _type) \
76 __builtin_types_compatible_p(typeof(_val), _type)
78 #define type_is(_val, _type) \
79 (__builtin_types_compatible_p(typeof(_val), _type) || \
80 __builtin_types_compatible_p(typeof(_val), const _type))
82 /* Userspace doesn't align allocations as nicely as the kernel allocators: */
83 static inline size_t buf_pages(void *p, size_t len)
85 return DIV_ROUND_UP(len +
86 ((unsigned long) p & (PAGE_SIZE - 1)),
90 static inline void vpfree(void *p, size_t size)
92 if (is_vmalloc_addr(p))
95 free_pages((unsigned long) p, get_order(size));
98 static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
100 return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
102 __vmalloc(size, gfp_mask, PAGE_KERNEL);
105 static inline void kvpfree(void *p, size_t size)
107 if (size < PAGE_SIZE)
113 static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
115 return size < PAGE_SIZE
116 ? kmalloc(size, gfp_mask)
117 : vpmalloc(size, gfp_mask);
120 int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
128 #define DECLARE_HEAP(type, name) HEAP(type) name
130 #define init_heap(heap, _size, gfp) \
133 (heap)->size = (_size); \
134 (heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\
138 #define free_heap(heap) \
140 kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0])); \
141 (heap)->data = NULL; \
144 #define heap_set_backpointer(h, i, _fn) \
146 void (*fn)(typeof(h), size_t) = _fn; \
151 #define heap_swap(h, i, j, set_backpointer) \
153 swap((h)->data[i], (h)->data[j]); \
154 heap_set_backpointer(h, i, set_backpointer); \
155 heap_set_backpointer(h, j, set_backpointer); \
158 #define heap_peek(h) \
160 EBUG_ON(!(h)->used); \
164 #define heap_full(h) ((h)->used == (h)->size)
166 #define heap_sift_down(h, i, cmp, set_backpointer) \
170 for (; _j * 2 + 1 < (h)->used; _j = _c) { \
172 if (_c + 1 < (h)->used && \
173 cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \
176 if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \
178 heap_swap(h, _c, _j, set_backpointer); \
182 #define heap_sift_up(h, i, cmp, set_backpointer) \
185 size_t p = (i - 1) / 2; \
186 if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \
188 heap_swap(h, i, p, set_backpointer); \
193 #define __heap_add(h, d, cmp, set_backpointer) \
195 size_t _i = (h)->used++; \
197 heap_set_backpointer(h, _i, set_backpointer); \
199 heap_sift_up(h, _i, cmp, set_backpointer); \
203 #define heap_add(h, d, cmp, set_backpointer) \
205 bool _r = !heap_full(h); \
207 __heap_add(h, d, cmp, set_backpointer); \
211 #define heap_add_or_replace(h, new, cmp, set_backpointer) \
213 if (!heap_add(h, new, cmp, set_backpointer) && \
214 cmp(h, new, heap_peek(h)) >= 0) { \
215 (h)->data[0] = new; \
216 heap_set_backpointer(h, 0, set_backpointer); \
217 heap_sift_down(h, 0, cmp, set_backpointer); \
221 #define heap_del(h, i, cmp, set_backpointer) \
225 BUG_ON(_i >= (h)->used); \
227 heap_swap(h, _i, (h)->used, set_backpointer); \
228 heap_sift_up(h, _i, cmp, set_backpointer); \
229 heap_sift_down(h, _i, cmp, set_backpointer); \
232 #define heap_pop(h, d, cmp, set_backpointer) \
234 bool _r = (h)->used; \
236 (d) = (h)->data[0]; \
237 heap_del(h, 0, cmp, set_backpointer); \
242 #define heap_resort(heap, cmp, set_backpointer) \
245 for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \
246 heap_sift_down(heap, _i, cmp, set_backpointer); \
249 #define ANYSINT_MAX(t) \
250 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
257 static inline size_t printbuf_remaining(struct printbuf *buf)
259 return buf->end - buf->pos;
262 #define _PBUF(_buf, _len) \
263 ((struct printbuf) { \
265 .end = _buf + _len, \
268 #define PBUF(_buf) _PBUF(_buf, sizeof(_buf))
270 #define pr_buf(_out, ...) \
272 (_out)->pos += scnprintf((_out)->pos, printbuf_remaining(_out), \
276 void bch_scnmemcpy(struct printbuf *, const char *, size_t);
278 int bch2_strtoint_h(const char *, int *);
279 int bch2_strtouint_h(const char *, unsigned int *);
280 int bch2_strtoll_h(const char *, long long *);
281 int bch2_strtoull_h(const char *, unsigned long long *);
282 int bch2_strtou64_h(const char *, u64 *);
284 static inline int bch2_strtol_h(const char *cp, long *res)
286 #if BITS_PER_LONG == 32
287 return bch2_strtoint_h(cp, (int *) res);
289 return bch2_strtoll_h(cp, (long long *) res);
293 static inline int bch2_strtoul_h(const char *cp, long *res)
295 #if BITS_PER_LONG == 32
296 return bch2_strtouint_h(cp, (unsigned int *) res);
298 return bch2_strtoull_h(cp, (unsigned long long *) res);
302 #define strtoi_h(cp, res) \
303 ( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\
304 : type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\
305 : type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\
306 : type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\
307 : type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\
308 : type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\
311 #define strtoul_safe(cp, var) \
314 int _r = kstrtoul(cp, 10, &_v); \
320 #define strtoul_safe_clamp(cp, var, min, max) \
323 int _r = kstrtoul(cp, 10, &_v); \
325 var = clamp_t(typeof(var), _v, min, max); \
329 #define strtoul_safe_restrict(cp, var, min, max) \
332 int _r = kstrtoul(cp, 10, &_v); \
333 if (!_r && _v >= min && _v <= max) \
340 #define snprint(buf, size, var) \
341 snprintf(buf, size, \
342 type_is(var, int) ? "%i\n" \
343 : type_is(var, unsigned) ? "%u\n" \
344 : type_is(var, long) ? "%li\n" \
345 : type_is(var, unsigned long) ? "%lu\n" \
346 : type_is(var, s64) ? "%lli\n" \
347 : type_is(var, u64) ? "%llu\n" \
348 : type_is(var, char *) ? "%s\n" \
351 void bch2_hprint(struct printbuf *, s64);
353 bool bch2_is_zero(const void *, size_t);
355 void bch2_string_opt_to_text(struct printbuf *,
356 const char * const [], size_t);
358 void bch2_flags_to_text(struct printbuf *, const char * const[], u64);
359 u64 bch2_read_flag_list(char *, const char * const[]);
361 #define NR_QUANTILES 15
362 #define QUANTILE_IDX(i) inorder_to_eytzinger0(i, NR_QUANTILES)
363 #define QUANTILE_FIRST eytzinger0_first(NR_QUANTILES)
364 #define QUANTILE_LAST eytzinger0_last(NR_QUANTILES)
367 struct quantile_entry {
370 } entries[NR_QUANTILES];
373 struct time_stat_buffer {
375 struct time_stat_buffer_entry {
384 /* all fields are in nanoseconds */
385 u64 average_duration;
386 u64 average_frequency;
389 struct quantiles quantiles;
391 struct time_stat_buffer __percpu *buffer;
394 void __bch2_time_stats_update(struct time_stats *stats, u64, u64);
396 static inline void bch2_time_stats_update(struct time_stats *stats, u64 start)
398 __bch2_time_stats_update(stats, start, local_clock());
401 size_t bch2_time_stats_print(struct time_stats *, char *, size_t);
403 void bch2_time_stats_exit(struct time_stats *);
404 void bch2_time_stats_init(struct time_stats *);
406 #define ewma_add(ewma, val, weight) \
408 typeof(ewma) _ewma = (ewma); \
409 typeof(weight) _weight = (weight); \
411 (((_ewma << _weight) - _ewma) + (val)) >> _weight; \
414 struct bch_ratelimit {
415 /* Next time we want to do some work, in nanoseconds */
419 * Rate at which we want to do work, in units per nanosecond
420 * The units here correspond to the units passed to
421 * bch2_ratelimit_increment()
426 static inline void bch2_ratelimit_reset(struct bch_ratelimit *d)
428 d->next = local_clock();
431 u64 bch2_ratelimit_delay(struct bch_ratelimit *);
432 void bch2_ratelimit_increment(struct bch_ratelimit *, u64);
434 struct bch_pd_controller {
435 struct bch_ratelimit rate;
436 unsigned long last_update;
439 s64 smoothed_derivative;
441 unsigned p_term_inverse;
445 /* for exporting to sysfs (no effect on behavior) */
447 s64 last_proportional;
451 /* If true, the rate will not increase if bch2_ratelimit_delay()
452 * is not being called often enough. */
456 void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
457 void bch2_pd_controller_init(struct bch_pd_controller *);
458 size_t bch2_pd_controller_print_debug(struct bch_pd_controller *, char *);
460 #define sysfs_pd_controller_attribute(name) \
461 rw_attribute(name##_rate); \
462 rw_attribute(name##_rate_bytes); \
463 rw_attribute(name##_rate_d_term); \
464 rw_attribute(name##_rate_p_term_inverse); \
465 read_attribute(name##_rate_debug)
467 #define sysfs_pd_controller_files(name) \
468 &sysfs_##name##_rate, \
469 &sysfs_##name##_rate_bytes, \
470 &sysfs_##name##_rate_d_term, \
471 &sysfs_##name##_rate_p_term_inverse, \
472 &sysfs_##name##_rate_debug
474 #define sysfs_pd_controller_show(name, var) \
476 sysfs_hprint(name##_rate, (var)->rate.rate); \
477 sysfs_print(name##_rate_bytes, (var)->rate.rate); \
478 sysfs_print(name##_rate_d_term, (var)->d_term); \
479 sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \
481 if (attr == &sysfs_##name##_rate_debug) \
482 return bch2_pd_controller_print_debug(var, buf); \
485 #define sysfs_pd_controller_store(name, var) \
487 sysfs_strtoul_clamp(name##_rate, \
488 (var)->rate.rate, 1, UINT_MAX); \
489 sysfs_strtoul_clamp(name##_rate_bytes, \
490 (var)->rate.rate, 1, UINT_MAX); \
491 sysfs_strtoul(name##_rate_d_term, (var)->d_term); \
492 sysfs_strtoul_clamp(name##_rate_p_term_inverse, \
493 (var)->p_term_inverse, 1, INT_MAX); \
496 #define container_of_or_null(ptr, type, member) \
498 typeof(ptr) _ptr = ptr; \
499 _ptr ? container_of(_ptr, type, member) : NULL; \
502 /* Does linear interpolation between powers of two */
503 static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
505 unsigned fract = x & ~(~0 << fract_bits);
509 x += (x * fract) >> fract_bits;
514 void bch2_bio_map(struct bio *bio, void *base, size_t);
515 int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
517 static inline sector_t bdev_sectors(struct block_device *bdev)
519 return bdev->bd_inode->i_size >> 9;
522 #define closure_bio_submit(bio, cl) \
528 #define kthread_wait_freezable(cond) \
532 set_current_state(TASK_INTERRUPTIBLE); \
533 if (kthread_should_stop()) { \
544 set_current_state(TASK_RUNNING); \
548 size_t bch2_rand_range(size_t);
550 void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
551 void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
553 static inline void memcpy_u64s_small(void *dst, const void *src,
563 static inline void __memcpy_u64s(void *dst, const void *src,
568 asm volatile("rep ; movsq"
569 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
570 : "0" (u64s), "1" (dst), "2" (src)
581 static inline void memcpy_u64s(void *dst, const void *src,
584 EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
585 dst + u64s * sizeof(u64) <= src));
587 __memcpy_u64s(dst, src, u64s);
590 static inline void __memmove_u64s_down(void *dst, const void *src,
593 __memcpy_u64s(dst, src, u64s);
596 static inline void memmove_u64s_down(void *dst, const void *src,
601 __memmove_u64s_down(dst, src, u64s);
604 static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
607 u64 *dst = (u64 *) _dst + u64s;
608 u64 *src = (u64 *) _src + u64s;
614 static inline void memmove_u64s_up_small(void *dst, const void *src,
619 __memmove_u64s_up_small(dst, src, u64s);
622 static inline void __memmove_u64s_up(void *_dst, const void *_src,
625 u64 *dst = (u64 *) _dst + u64s - 1;
626 u64 *src = (u64 *) _src + u64s - 1;
630 asm volatile("std ;\n"
633 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
634 : "0" (u64s), "1" (dst), "2" (src)
642 static inline void memmove_u64s_up(void *dst, const void *src,
647 __memmove_u64s_up(dst, src, u64s);
650 static inline void memmove_u64s(void *dst, const void *src,
654 __memmove_u64s_down(dst, src, u64s);
656 __memmove_u64s_up(dst, src, u64s);
659 /* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
660 static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
662 unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
664 memset(s + bytes, c, rem);
667 void sort_cmp_size(void *base, size_t num, size_t size,
668 int (*cmp_func)(const void *, const void *, size_t),
669 void (*swap_func)(void *, void *, size_t));
671 /* just the memmove, doesn't update @_nr */
672 #define __array_insert_item(_array, _nr, _pos) \
673 memmove(&(_array)[(_pos) + 1], \
675 sizeof((_array)[0]) * ((_nr) - (_pos)))
677 #define array_insert_item(_array, _nr, _pos, _new_item) \
679 __array_insert_item(_array, _nr, _pos); \
681 (_array)[(_pos)] = (_new_item); \
684 #define array_remove_items(_array, _nr, _pos, _nr_to_remove) \
686 (_nr) -= (_nr_to_remove); \
687 memmove(&(_array)[(_pos)], \
688 &(_array)[(_pos) + (_nr_to_remove)], \
689 sizeof((_array)[0]) * ((_nr) - (_pos))); \
692 #define array_remove_item(_array, _nr, _pos) \
693 array_remove_items(_array, _nr, _pos, 1)
695 #define bubble_sort(_base, _nr, _cmp) \
698 bool _swapped = true; \
700 for (_end = (ssize_t) (_nr) - 1; _end > 0 && _swapped; --_end) {\
702 for (_i = 0; _i < _end; _i++) \
703 if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \
704 swap((_base)[_i], (_base)[_i + 1]); \
710 static inline u64 percpu_u64_get(u64 __percpu *src)
715 for_each_possible_cpu(cpu)
716 ret += *per_cpu_ptr(src, cpu);
720 static inline void percpu_u64_set(u64 __percpu *dst, u64 src)
724 for_each_possible_cpu(cpu)
725 *per_cpu_ptr(dst, cpu) = 0;
728 *this_cpu_ptr(dst) = src;
732 static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr)
736 for (i = 0; i < nr; i++)
740 static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src,
745 for_each_possible_cpu(cpu)
746 acc_u64s(acc, per_cpu_ptr(src, cpu), nr);
749 static inline void percpu_memset(void __percpu *p, int c, size_t bytes)
753 for_each_possible_cpu(cpu)
754 memset(per_cpu_ptr(p, cpu), c, bytes);
757 u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
759 #define cmp_int(l, r) ((l > r) - (l < r))
761 #endif /* _BCACHEFS_UTIL_H */