]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/util.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / util.c
index bb8a495e2290a847d222e9cc0f91d87830bc7bea..c2ef7cddaa4fcb0e9de9df263aadd019cc7a4965 100644 (file)
@@ -22,9 +22,9 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/sched/clock.h>
-#include <linux/mean_and_variance.h>
 
 #include "eytzinger.h"
+#include "mean_and_variance.h"
 #include "util.h"
 
 static const char si_units[] = "?kMGTPEZY";
@@ -112,10 +112,10 @@ got_unit:
 
 #define parse_or_ret(cp, _f)                   \
 do {                                           \
-       int ret = _f;                           \
-       if (ret < 0)                            \
-               return ret;                     \
-       cp += ret;                              \
+       int _ret = _f;                          \
+       if (_ret < 0)                           \
+               return _ret;                    \
+       cp += _ret;                             \
 } while (0)
 
 static int __bch2_strtou64_h(const char *cp, u64 *res)
@@ -216,6 +216,7 @@ u64 bch2_read_flag_list(char *opt, const char * const list[])
 
        while ((p = strsep(&s, ","))) {
                int flag = match_string(list, -1, p);
+
                if (flag < 0) {
                        ret = -1;
                        break;
@@ -240,36 +241,6 @@ bool bch2_is_zero(const void *_p, size_t n)
        return true;
 }
 
-static void bch2_quantiles_update(struct quantiles *q, u64 v)
-{
-       unsigned i = 0;
-
-       while (i < ARRAY_SIZE(q->entries)) {
-               struct quantile_entry *e = q->entries + i;
-
-               if (unlikely(!e->step)) {
-                       e->m = v;
-                       e->step = max_t(unsigned, v / 2, 1024);
-               } else if (e->m > v) {
-                       e->m = e->m >= e->step
-                               ? e->m - e->step
-                               : 0;
-               } else if (e->m < v) {
-                       e->m = e->m + e->step > e->m
-                               ? e->m + e->step
-                               : U32_MAX;
-               }
-
-               if ((e->m > v ? e->m - v : v - e->m) < e->step)
-                       e->step = max_t(unsigned, e->step / 2, 1);
-
-               if (v >= e->m)
-                       break;
-
-               i = eytzinger0_child(i, v > e->m);
-       }
-}
-
 void bch2_prt_u64_binary(struct printbuf *out, u64 v, unsigned nr_bits)
 {
        while (nr_bits)
@@ -292,85 +263,193 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines)
                if (!*p)
                        break;
                lines = p + 1;
-               prefix = KERN_CONT;
        }
        console_unlock();
 }
 
-int bch2_prt_backtrace(struct printbuf *out, struct task_struct *task)
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr)
 {
-       unsigned long entries[32];
-       unsigned i, nr_entries;
-       int ret;
+#ifdef CONFIG_STACKTRACE
+       unsigned nr_entries = 0;
+       int ret = 0;
 
-       ret = down_read_killable(&task->signal->exec_update_lock);
+       stack->nr = 0;
+       ret = darray_make_room(stack, 32);
        if (ret)
                return ret;
 
-       nr_entries = stack_trace_save_tsk(task, entries, ARRAY_SIZE(entries), 0);
-       for (i = 0; i < nr_entries; i++) {
-               prt_printf(out, "[<0>] %pB", (void *)entries[i]);
-               prt_newline(out);
-       }
+       if (!down_read_trylock(&task->signal->exec_update_lock))
+               return -1;
+
+       do {
+               nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, skipnr + 1);
+       } while (nr_entries == stack->size &&
+                !(ret = darray_make_room(stack, stack->size * 2)));
 
+       stack->nr = nr_entries;
        up_read(&task->signal->exec_update_lock);
+
+       return ret;
+#else
        return 0;
+#endif
+}
+
+void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack)
+{
+       darray_for_each(*stack, i) {
+               prt_printf(out, "[<0>] %pB", (void *) *i);
+               prt_newline(out);
+       }
+}
+
+int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr)
+{
+       bch_stacktrace stack = { 0 };
+       int ret = bch2_save_backtrace(&stack, task, skipnr + 1);
+
+       bch2_prt_backtrace(out, &stack);
+       darray_exit(&stack);
+       return ret;
+}
+
+#ifndef __KERNEL__
+#include <time.h>
+void bch2_prt_datetime(struct printbuf *out, time64_t sec)
+{
+       time_t t = sec;
+       char buf[64];
+       ctime_r(&t, buf);
+       strim(buf);
+       prt_str(out, buf);
+}
+#else
+void bch2_prt_datetime(struct printbuf *out, time64_t sec)
+{
+       char buf[64];
+       snprintf(buf, sizeof(buf), "%ptT", &sec);
+       prt_u64(out, sec);
+}
+#endif
+
+static const struct time_unit {
+       const char      *name;
+       u64             nsecs;
+} time_units[] = {
+       { "ns",         1                },
+       { "us",         NSEC_PER_USEC    },
+       { "ms",         NSEC_PER_MSEC    },
+       { "s",          NSEC_PER_SEC     },
+       { "m",          (u64) NSEC_PER_SEC * 60},
+       { "h",          (u64) NSEC_PER_SEC * 3600},
+       { "eon",        U64_MAX          },
+};
+
+static const struct time_unit *pick_time_units(u64 ns)
+{
+       const struct time_unit *u;
+
+       for (u = time_units;
+            u + 1 < time_units + ARRAY_SIZE(time_units) &&
+            ns >= u[1].nsecs << 1;
+            u++)
+               ;
+
+       return u;
+}
+
+void bch2_pr_time_units(struct printbuf *out, u64 ns)
+{
+       const struct time_unit *u = pick_time_units(ns);
+
+       prt_printf(out, "%llu %s", div_u64(ns, u->nsecs), u->name);
 }
 
 /* time stats: */
 
-static inline void bch2_time_stats_update_one(struct time_stats *stats,
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
+static void bch2_quantiles_update(struct bch2_quantiles *q, u64 v)
+{
+       unsigned i = 0;
+
+       while (i < ARRAY_SIZE(q->entries)) {
+               struct bch2_quantile_entry *e = q->entries + i;
+
+               if (unlikely(!e->step)) {
+                       e->m = v;
+                       e->step = max_t(unsigned, v / 2, 1024);
+               } else if (e->m > v) {
+                       e->m = e->m >= e->step
+                               ? e->m - e->step
+                               : 0;
+               } else if (e->m < v) {
+                       e->m = e->m + e->step > e->m
+                               ? e->m + e->step
+                               : U32_MAX;
+               }
+
+               if ((e->m > v ? e->m - v : v - e->m) < e->step)
+                       e->step = max_t(unsigned, e->step / 2, 1);
+
+               if (v >= e->m)
+                       break;
+
+               i = eytzinger0_child(i, v > e->m);
+       }
+}
+
+static inline void bch2_time_stats_update_one(struct bch2_time_stats *stats,
                                              u64 start, u64 end)
 {
        u64 duration, freq;
 
        if (time_after64(end, start)) {
                duration = end - start;
-               stats->duration_stats = mean_and_variance_update_inlined(stats->duration_stats,
-                                                                duration);
-               stats->duration_stats_weighted = mean_and_variance_weighted_update(
-                       stats->duration_stats_weighted,
-                       duration);
+               mean_and_variance_update(&stats->duration_stats, duration);
+               mean_and_variance_weighted_update(&stats->duration_stats_weighted, duration);
                stats->max_duration = max(stats->max_duration, duration);
                stats->min_duration = min(stats->min_duration, duration);
+               stats->total_duration += duration;
                bch2_quantiles_update(&stats->quantiles, duration);
        }
 
        if (time_after64(end, stats->last_event)) {
                freq = end - stats->last_event;
-               stats->freq_stats = mean_and_variance_update_inlined(stats->freq_stats, freq);
-               stats->freq_stats_weighted = mean_and_variance_weighted_update(
-                       stats->freq_stats_weighted,
-                       freq);
+               mean_and_variance_update(&stats->freq_stats, freq);
+               mean_and_variance_weighted_update(&stats->freq_stats_weighted, freq);
                stats->max_freq = max(stats->max_freq, freq);
                stats->min_freq = min(stats->min_freq, freq);
                stats->last_event = end;
        }
 }
 
-static noinline void bch2_time_stats_clear_buffer(struct time_stats *stats,
-                                                 struct time_stat_buffer *b)
+static void __bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
+                                          struct bch2_time_stat_buffer *b)
 {
-       struct time_stat_buffer_entry *i;
-       unsigned long flags;
-
-       spin_lock_irqsave(&stats->lock, flags);
-       for (i = b->entries;
+       for (struct bch2_time_stat_buffer_entry *i = b->entries;
             i < b->entries + ARRAY_SIZE(b->entries);
             i++)
                bch2_time_stats_update_one(stats, i->start, i->end);
-       spin_unlock_irqrestore(&stats->lock, flags);
-
        b->nr = 0;
 }
 
-void __bch2_time_stats_update(struct time_stats *stats, u64 start, u64 end)
+static noinline void bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
+                                                 struct bch2_time_stat_buffer *b)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&stats->lock, flags);
+       __bch2_time_stats_clear_buffer(stats, b);
+       spin_unlock_irqrestore(&stats->lock, flags);
+}
+
+void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
 {
        unsigned long flags;
 
-       WARN_RATELIMIT(!stats->min_duration || !stats->min_freq,
-                      "time_stats: min_duration = %llu, min_freq = %llu",
-                      stats->min_duration, stats->min_freq);
+       WARN_ONCE(!stats->duration_stats_weighted.weight ||
+                 !stats->freq_stats_weighted.weight,
+                 "uninitialized time_stats");
 
        if (!stats->buffer) {
                spin_lock_irqsave(&stats->lock, flags);
@@ -379,17 +458,17 @@ void __bch2_time_stats_update(struct time_stats *stats, u64 start, u64 end)
                if (mean_and_variance_weighted_get_mean(stats->freq_stats_weighted) < 32 &&
                    stats->duration_stats.n > 1024)
                        stats->buffer =
-                               alloc_percpu_gfp(struct time_stat_buffer,
+                               alloc_percpu_gfp(struct bch2_time_stat_buffer,
                                                 GFP_ATOMIC);
                spin_unlock_irqrestore(&stats->lock, flags);
        } else {
-               struct time_stat_buffer *b;
+               struct bch2_time_stat_buffer *b;
 
                preempt_disable();
                b = this_cpu_ptr(stats->buffer);
 
                BUG_ON(b->nr >= ARRAY_SIZE(b->entries));
-               b->entries[b->nr++] = (struct time_stat_buffer_entry) {
+               b->entries[b->nr++] = (struct bch2_time_stat_buffer_entry) {
                        .start = start,
                        .end = end
                };
@@ -400,33 +479,7 @@ void __bch2_time_stats_update(struct time_stats *stats, u64 start, u64 end)
        }
 }
 
-static const struct time_unit {
-       const char      *name;
-       u64             nsecs;
-} time_units[] = {
-       { "ns",         1                },
-       { "us",         NSEC_PER_USEC    },
-       { "ms",         NSEC_PER_MSEC    },
-       { "s",          NSEC_PER_SEC     },
-       { "m",          NSEC_PER_SEC * 60},
-       { "h",          NSEC_PER_SEC * 3600},
-       { "eon",        U64_MAX          },
-};
-
-static const struct time_unit *pick_time_units(u64 ns)
-{
-       const struct time_unit *u;
-
-       for (u = time_units;
-            u + 1 < time_units + ARRAY_SIZE(time_units) &&
-            ns >= u[1].nsecs << 1;
-            u++)
-               ;
-
-       return u;
-}
-
-static void pr_time_units(struct printbuf *out, u64 ns)
+static void bch2_pr_time_units_aligned(struct printbuf *out, u64 ns)
 {
        const struct time_unit *u = pick_time_units(ns);
 
@@ -435,22 +488,32 @@ static void pr_time_units(struct printbuf *out, u64 ns)
        prt_printf(out, "%s", u->name);
 }
 
-#define TABSTOP_SIZE 12
-
 static inline void pr_name_and_units(struct printbuf *out, const char *name, u64 ns)
 {
        prt_str(out, name);
        prt_tab(out);
-       pr_time_units(out, ns);
+       bch2_pr_time_units_aligned(out, ns);
        prt_newline(out);
 }
 
-void bch2_time_stats_to_text(struct printbuf *out, struct time_stats *stats)
+#define TABSTOP_SIZE 12
+
+void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats)
 {
        const struct time_unit *u;
        s64 f_mean = 0, d_mean = 0;
        u64 q, last_q = 0, f_stddev = 0, d_stddev = 0;
        int i;
+
+       if (stats->buffer) {
+               int cpu;
+
+               spin_lock_irq(&stats->lock);
+               for_each_possible_cpu(cpu)
+                       __bch2_time_stats_clear_buffer(stats, per_cpu_ptr(stats->buffer, cpu));
+               spin_unlock_irq(&stats->lock);
+       }
+
        /*
         * avoid divide by zero
         */
@@ -496,19 +559,20 @@ void bch2_time_stats_to_text(struct printbuf *out, struct time_stats *stats)
 
        pr_name_and_units(out, "min:", stats->min_duration);
        pr_name_and_units(out, "max:", stats->max_duration);
+       pr_name_and_units(out, "total:", stats->total_duration);
 
        prt_printf(out, "mean:");
        prt_tab(out);
-       pr_time_units(out, d_mean);
+       bch2_pr_time_units_aligned(out, d_mean);
        prt_tab(out);
-       pr_time_units(out, mean_and_variance_weighted_get_mean(stats->duration_stats_weighted));
+       bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->duration_stats_weighted));
        prt_newline(out);
 
        prt_printf(out, "stddev:");
        prt_tab(out);
-       pr_time_units(out, d_stddev);
+       bch2_pr_time_units_aligned(out, d_stddev);
        prt_tab(out);
-       pr_time_units(out, mean_and_variance_weighted_get_stddev(stats->duration_stats_weighted));
+       bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->duration_stats_weighted));
 
        printbuf_indent_sub(out, 2);
        prt_newline(out);
@@ -522,16 +586,16 @@ void bch2_time_stats_to_text(struct printbuf *out, struct time_stats *stats)
 
        prt_printf(out, "mean:");
        prt_tab(out);
-       pr_time_units(out, f_mean);
+       bch2_pr_time_units_aligned(out, f_mean);
        prt_tab(out);
-       pr_time_units(out, mean_and_variance_weighted_get_mean(stats->freq_stats_weighted));
+       bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->freq_stats_weighted));
        prt_newline(out);
 
        prt_printf(out, "stddev:");
        prt_tab(out);
-       pr_time_units(out, f_stddev);
+       bch2_pr_time_units_aligned(out, f_stddev);
        prt_tab(out);
-       pr_time_units(out, mean_and_variance_weighted_get_stddev(stats->freq_stats_weighted));
+       bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->freq_stats_weighted));
 
        printbuf_indent_sub(out, 2);
        prt_newline(out);
@@ -553,17 +617,20 @@ void bch2_time_stats_to_text(struct printbuf *out, struct time_stats *stats)
                last_q = q;
        }
 }
+#else
+void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats) {}
+#endif
 
-void bch2_time_stats_exit(struct time_stats *stats)
+void bch2_time_stats_exit(struct bch2_time_stats *stats)
 {
        free_percpu(stats->buffer);
 }
 
-void bch2_time_stats_init(struct time_stats *stats)
+void bch2_time_stats_init(struct bch2_time_stats *stats)
 {
        memset(stats, 0, sizeof(*stats));
-       stats->duration_stats_weighted.w = 8;
-       stats->freq_stats_weighted.w = 8;
+       stats->duration_stats_weighted.weight = 8;
+       stats->freq_stats_weighted.weight = 8;
        stats->min_duration = U64_MAX;
        stats->min_freq = U64_MAX;
        spin_lock_init(&stats->lock);
@@ -573,11 +640,9 @@ void bch2_time_stats_init(struct time_stats *stats)
 
 /**
  * bch2_ratelimit_delay() - return how long to delay until the next time to do
- * some work
- *
- * @d - the struct bch_ratelimit to update
- *
- * Returns the amount of time to delay by, in jiffies
+ *             some work
+ * @d:         the struct bch_ratelimit to update
+ * Returns:    the amount of time to delay by, in jiffies
  */
 u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
 {
@@ -590,9 +655,8 @@ u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
 
 /**
  * bch2_ratelimit_increment() - increment @d by the amount of work done
- *
- * @d - the struct bch_ratelimit to update
- * @done - the amount of work done, in arbitrary units
+ * @d:         the struct bch_ratelimit to update
+ * @done:      the amount of work done, in arbitrary units
  */
 void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done)
 {
@@ -732,7 +796,7 @@ void bch2_bio_map(struct bio *bio, void *base, size_t size)
 int bch2_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask)
 {
        while (size) {
-               struct page *page = alloc_page(gfp_mask);
+               struct page *page = alloc_pages(gfp_mask, 0);
                unsigned len = min_t(size_t, PAGE_SIZE, size);
 
                if (!page)
@@ -770,9 +834,10 @@ void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)
        struct bvec_iter iter;
 
        __bio_for_each_segment(bv, dst, iter, dst_iter) {
-               void *dstp = kmap_atomic(bv.bv_page);
+               void *dstp = kmap_local_page(bv.bv_page);
+
                memcpy(dstp + bv.bv_offset, src, bv.bv_len);
-               kunmap_atomic(dstp);
+               kunmap_local(dstp);
 
                src += bv.bv_len;
        }
@@ -784,9 +849,10 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
        struct bvec_iter iter;
 
        __bio_for_each_segment(bv, src, iter, src_iter) {
-               void *srcp = kmap_atomic(bv.bv_page);
+               void *srcp = kmap_local_page(bv.bv_page);
+
                memcpy(dst, srcp + bv.bv_offset, bv.bv_len);
-               kunmap_atomic(srcp);
+               kunmap_local(srcp);
 
                dst += bv.bv_len;
        }
@@ -1108,3 +1174,37 @@ u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr)
 
        return ret;
 }
+
+void bch2_darray_str_exit(darray_str *d)
+{
+       darray_for_each(*d, i)
+               kfree(*i);
+       darray_exit(d);
+}
+
+int bch2_split_devs(const char *_dev_name, darray_str *ret)
+{
+       darray_init(ret);
+
+       char *dev_name = kstrdup(_dev_name, GFP_KERNEL), *s = dev_name;
+       if (!dev_name)
+               return -ENOMEM;
+
+       while ((s = strsep(&dev_name, ":"))) {
+               char *p = kstrdup(s, GFP_KERNEL);
+               if (!p)
+                       goto err;
+
+               if (darray_push(ret, p)) {
+                       kfree(p);
+                       goto err;
+               }
+       }
+
+       kfree(dev_name);
+       return 0;
+err:
+       bch2_darray_str_exit(ret);
+       kfree(dev_name);
+       return -ENOMEM;
+}