]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/sysfs.c
Update bcachefs sources to bdf6d7c135 fixup! bcachefs: Kill journal buf bloom filter
[bcachefs-tools-debian] / libbcachefs / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #ifndef NO_BCACHEFS_SYSFS
10
11 #include "bcachefs.h"
12 #include "alloc_background.h"
13 #include "alloc_foreground.h"
14 #include "sysfs.h"
15 #include "btree_cache.h"
16 #include "btree_io.h"
17 #include "btree_iter.h"
18 #include "btree_key_cache.h"
19 #include "btree_update.h"
20 #include "btree_update_interior.h"
21 #include "btree_gc.h"
22 #include "buckets.h"
23 #include "clock.h"
24 #include "disk_groups.h"
25 #include "ec.h"
26 #include "inode.h"
27 #include "journal.h"
28 #include "keylist.h"
29 #include "move.h"
30 #include "opts.h"
31 #include "rebalance.h"
32 #include "replicas.h"
33 #include "super-io.h"
34 #include "tests.h"
35
36 #include <linux/blkdev.h>
37 #include <linux/sort.h>
38 #include <linux/sched/clock.h>
39
40 #include "util.h"
41
42 #define SYSFS_OPS(type)                                                 \
43 const struct sysfs_ops type ## _sysfs_ops = {                                   \
44         .show   = type ## _show,                                        \
45         .store  = type ## _store                                        \
46 }
47
48 #define SHOW(fn)                                                        \
49 static ssize_t fn ## _to_text(struct printbuf *,                        \
50                               struct kobject *, struct attribute *);\
51                                                                         \
52 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
53                            char *buf)                                   \
54 {                                                                       \
55         struct printbuf out = PRINTBUF;                                 \
56         ssize_t ret = fn ## _to_text(&out, kobj, attr);                 \
57                                                                         \
58         if (out.pos && out.buf[out.pos - 1] != '\n')                    \
59                 pr_newline(&out);                                       \
60                                                                         \
61         if (!ret && out.allocation_failure)                             \
62                 ret = -ENOMEM;                                          \
63                                                                         \
64         if (!ret) {                                                     \
65                 ret = min_t(size_t, out.pos, PAGE_SIZE - 1);            \
66                 memcpy(buf, out.buf, ret);                              \
67         }                                                               \
68         printbuf_exit(&out);                                            \
69         return ret;                                                     \
70 }                                                                       \
71                                                                         \
72 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
73                               struct attribute *attr)
74
75 #define STORE(fn)                                                       \
76 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
77                             const char *buf, size_t size)               \
78
79 #define __sysfs_attribute(_name, _mode)                                 \
80         static struct attribute sysfs_##_name =                         \
81                 { .name = #_name, .mode = _mode }
82
83 #define write_attribute(n)      __sysfs_attribute(n, S_IWUSR)
84 #define read_attribute(n)       __sysfs_attribute(n, S_IRUGO)
85 #define rw_attribute(n)         __sysfs_attribute(n, S_IRUGO|S_IWUSR)
86
87 #define sysfs_printf(file, fmt, ...)                                    \
88 do {                                                                    \
89         if (attr == &sysfs_ ## file)                                    \
90                 pr_buf(out, fmt "\n", __VA_ARGS__);                     \
91 } while (0)
92
93 #define sysfs_print(file, var)                                          \
94 do {                                                                    \
95         if (attr == &sysfs_ ## file)                                    \
96                 snprint(out, var);                                      \
97 } while (0)
98
99 #define sysfs_hprint(file, val)                                         \
100 do {                                                                    \
101         if (attr == &sysfs_ ## file)                                    \
102                 bch2_hprint(out, val);                                  \
103 } while (0)
104
105 #define var_printf(_var, fmt)   sysfs_printf(_var, fmt, var(_var))
106 #define var_print(_var)         sysfs_print(_var, var(_var))
107 #define var_hprint(_var)        sysfs_hprint(_var, var(_var))
108
109 #define sysfs_strtoul(file, var)                                        \
110 do {                                                                    \
111         if (attr == &sysfs_ ## file)                                    \
112                 return strtoul_safe(buf, var) ?: (ssize_t) size;        \
113 } while (0)
114
115 #define sysfs_strtoul_clamp(file, var, min, max)                        \
116 do {                                                                    \
117         if (attr == &sysfs_ ## file)                                    \
118                 return strtoul_safe_clamp(buf, var, min, max)           \
119                         ?: (ssize_t) size;                              \
120 } while (0)
121
122 #define strtoul_or_return(cp)                                           \
123 ({                                                                      \
124         unsigned long _v;                                               \
125         int _r = kstrtoul(cp, 10, &_v);                                 \
126         if (_r)                                                         \
127                 return _r;                                              \
128         _v;                                                             \
129 })
130
131 #define strtoul_restrict_or_return(cp, min, max)                        \
132 ({                                                                      \
133         unsigned long __v = 0;                                          \
134         int _r = strtoul_safe_restrict(cp, __v, min, max);              \
135         if (_r)                                                         \
136                 return _r;                                              \
137         __v;                                                            \
138 })
139
140 #define strtoi_h_or_return(cp)                                          \
141 ({                                                                      \
142         u64 _v;                                                         \
143         int _r = strtoi_h(cp, &_v);                                     \
144         if (_r)                                                         \
145                 return _r;                                              \
146         _v;                                                             \
147 })
148
149 #define sysfs_hatoi(file, var)                                          \
150 do {                                                                    \
151         if (attr == &sysfs_ ## file)                                    \
152                 return strtoi_h(buf, &var) ?: (ssize_t) size;           \
153 } while (0)
154
155 write_attribute(trigger_gc);
156 write_attribute(trigger_discards);
157 write_attribute(prune_cache);
158 rw_attribute(btree_gc_periodic);
159 rw_attribute(gc_gens_pos);
160
161 read_attribute(uuid);
162 read_attribute(minor);
163 read_attribute(bucket_size);
164 read_attribute(first_bucket);
165 read_attribute(nbuckets);
166 read_attribute(durability);
167 read_attribute(iodone);
168
169 read_attribute(io_latency_read);
170 read_attribute(io_latency_write);
171 read_attribute(io_latency_stats_read);
172 read_attribute(io_latency_stats_write);
173 read_attribute(congested);
174
175 read_attribute(btree_avg_write_size);
176
177 read_attribute(btree_cache_size);
178 read_attribute(compression_stats);
179 read_attribute(journal_debug);
180 read_attribute(btree_updates);
181 read_attribute(btree_cache);
182 read_attribute(btree_key_cache);
183 read_attribute(btree_transactions);
184 read_attribute(stripes_heap);
185 read_attribute(open_buckets);
186
187 read_attribute(internal_uuid);
188
189 read_attribute(has_data);
190 read_attribute(alloc_debug);
191
192 read_attribute(read_realloc_races);
193 read_attribute(extent_migrate_done);
194 read_attribute(extent_migrate_raced);
195 read_attribute(bucket_alloc_fail);
196
197 #define x(t, n, ...) read_attribute(t);
198 BCH_PERSISTENT_COUNTERS()
199 #undef x
200
201 rw_attribute(discard);
202 rw_attribute(label);
203
204 rw_attribute(copy_gc_enabled);
205 read_attribute(copy_gc_wait);
206
207 rw_attribute(rebalance_enabled);
208 sysfs_pd_controller_attribute(rebalance);
209 read_attribute(rebalance_work);
210 rw_attribute(promote_whole_extents);
211
212 read_attribute(new_stripes);
213
214 read_attribute(io_timers_read);
215 read_attribute(io_timers_write);
216
217 read_attribute(data_jobs);
218
219 #ifdef CONFIG_BCACHEFS_TESTS
220 write_attribute(perf_test);
221 #endif /* CONFIG_BCACHEFS_TESTS */
222
223 #define x(_name)                                                \
224         static struct attribute sysfs_time_stat_##_name =               \
225                 { .name = #_name, .mode = S_IRUGO };
226         BCH_TIME_STATS()
227 #undef x
228
229 static struct attribute sysfs_state_rw = {
230         .name = "state",
231         .mode = S_IRUGO
232 };
233
234 static size_t bch2_btree_cache_size(struct bch_fs *c)
235 {
236         size_t ret = 0;
237         struct btree *b;
238
239         mutex_lock(&c->btree_cache.lock);
240         list_for_each_entry(b, &c->btree_cache.live, list)
241                 ret += btree_bytes(c);
242
243         mutex_unlock(&c->btree_cache.lock);
244         return ret;
245 }
246
247 static size_t bch2_btree_avg_write_size(struct bch_fs *c)
248 {
249         u64 nr = atomic64_read(&c->btree_writes_nr);
250         u64 sectors = atomic64_read(&c->btree_writes_sectors);
251
252         return nr ? div64_u64(sectors, nr) : 0;
253 }
254
255 static long data_progress_to_text(struct printbuf *out, struct bch_fs *c)
256 {
257         long ret = 0;
258         struct bch_move_stats *stats;
259
260         mutex_lock(&c->data_progress_lock);
261         list_for_each_entry(stats, &c->data_progress_list, list) {
262                 pr_buf(out, "%s: data type %s btree_id %s position: ",
263                        stats->name,
264                        bch2_data_types[stats->data_type],
265                        bch2_btree_ids[stats->btree_id]);
266                 bch2_bpos_to_text(out, stats->pos);
267                 pr_buf(out, "%s", "\n");
268         }
269
270         mutex_unlock(&c->data_progress_lock);
271         return ret;
272 }
273
274 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
275 {
276         struct btree_trans trans;
277         struct btree_iter iter;
278         struct bkey_s_c k;
279         enum btree_id id;
280         u64 nr_uncompressed_extents = 0,
281             nr_compressed_extents = 0,
282             nr_incompressible_extents = 0,
283             uncompressed_sectors = 0,
284             incompressible_sectors = 0,
285             compressed_sectors_compressed = 0,
286             compressed_sectors_uncompressed = 0;
287         int ret;
288
289         if (!test_bit(BCH_FS_STARTED, &c->flags))
290                 return -EPERM;
291
292         bch2_trans_init(&trans, c, 0, 0);
293
294         for (id = 0; id < BTREE_ID_NR; id++) {
295                 if (!((1U << id) & BTREE_ID_HAS_PTRS))
296                         continue;
297
298                 for_each_btree_key(&trans, iter, id, POS_MIN,
299                                    BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
300                         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
301                         const union bch_extent_entry *entry;
302                         struct extent_ptr_decoded p;
303                         bool compressed = false, uncompressed = false, incompressible = false;
304
305                         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
306                                 switch (p.crc.compression_type) {
307                                 case BCH_COMPRESSION_TYPE_none:
308                                         uncompressed = true;
309                                         uncompressed_sectors += k.k->size;
310                                         break;
311                                 case BCH_COMPRESSION_TYPE_incompressible:
312                                         incompressible = true;
313                                         incompressible_sectors += k.k->size;
314                                         break;
315                                 default:
316                                         compressed_sectors_compressed +=
317                                                 p.crc.compressed_size;
318                                         compressed_sectors_uncompressed +=
319                                                 p.crc.uncompressed_size;
320                                         compressed = true;
321                                         break;
322                                 }
323                         }
324
325                         if (incompressible)
326                                 nr_incompressible_extents++;
327                         else if (uncompressed)
328                                 nr_uncompressed_extents++;
329                         else if (compressed)
330                                 nr_compressed_extents++;
331                 }
332                 bch2_trans_iter_exit(&trans, &iter);
333         }
334
335         bch2_trans_exit(&trans);
336
337         if (ret)
338                 return ret;
339
340         pr_buf(out, "uncompressed:\n");
341         pr_buf(out, "   nr extents:             %llu\n", nr_uncompressed_extents);
342         pr_buf(out, "   size:                   ");
343         bch2_hprint(out, uncompressed_sectors << 9);
344         pr_buf(out, "\n");
345
346         pr_buf(out, "compressed:\n");
347         pr_buf(out, "   nr extents:             %llu\n", nr_compressed_extents);
348         pr_buf(out, "   compressed size:        ");
349         bch2_hprint(out, compressed_sectors_compressed << 9);
350         pr_buf(out, "\n");
351         pr_buf(out, "   uncompressed size:      ");
352         bch2_hprint(out, compressed_sectors_uncompressed << 9);
353         pr_buf(out, "\n");
354
355         pr_buf(out, "incompressible:\n");
356         pr_buf(out, "   nr extents:             %llu\n", nr_incompressible_extents);
357         pr_buf(out, "   size:                   ");
358         bch2_hprint(out, incompressible_sectors << 9);
359         pr_buf(out, "\n");
360         return 0;
361 }
362
363 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
364 {
365         pr_buf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]);
366         bch2_bpos_to_text(out, c->gc_gens_pos);
367         pr_buf(out, "\n");
368 }
369
370 SHOW(bch2_fs)
371 {
372         struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
373
374         sysfs_print(minor,                      c->minor);
375         sysfs_printf(internal_uuid, "%pU",      c->sb.uuid.b);
376
377         sysfs_hprint(btree_cache_size,          bch2_btree_cache_size(c));
378         sysfs_hprint(btree_avg_write_size,      bch2_btree_avg_write_size(c));
379
380         sysfs_print(read_realloc_races,
381                     atomic_long_read(&c->read_realloc_races));
382         sysfs_print(extent_migrate_done,
383                     atomic_long_read(&c->extent_migrate_done));
384         sysfs_print(extent_migrate_raced,
385                     atomic_long_read(&c->extent_migrate_raced));
386         sysfs_print(bucket_alloc_fail,
387                     atomic_long_read(&c->bucket_alloc_fail));
388
389         sysfs_printf(btree_gc_periodic, "%u",   (int) c->btree_gc_periodic);
390
391         if (attr == &sysfs_gc_gens_pos)
392                 bch2_gc_gens_pos_to_text(out, c);
393
394         sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
395
396         sysfs_printf(rebalance_enabled,         "%i", c->rebalance.enabled);
397         sysfs_pd_controller_show(rebalance,     &c->rebalance.pd); /* XXX */
398         sysfs_hprint(copy_gc_wait,
399                      max(0LL, c->copygc_wait -
400                          atomic64_read(&c->io_clock[WRITE].now)) << 9);
401
402         if (attr == &sysfs_rebalance_work)
403                 bch2_rebalance_work_to_text(out, c);
404
405         sysfs_print(promote_whole_extents,      c->promote_whole_extents);
406
407         /* Debugging: */
408
409         if (attr == &sysfs_journal_debug)
410                 bch2_journal_debug_to_text(out, &c->journal);
411
412         if (attr == &sysfs_btree_updates)
413                 bch2_btree_updates_to_text(out, c);
414
415         if (attr == &sysfs_btree_cache)
416                 bch2_btree_cache_to_text(out, c);
417
418         if (attr == &sysfs_btree_key_cache)
419                 bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
420
421         if (attr == &sysfs_btree_transactions)
422                 bch2_btree_trans_to_text(out, c);
423
424         if (attr == &sysfs_stripes_heap)
425                 bch2_stripes_heap_to_text(out, c);
426
427         if (attr == &sysfs_open_buckets)
428                 bch2_open_buckets_to_text(out, c);
429
430         if (attr == &sysfs_compression_stats)
431                 bch2_compression_stats_to_text(out, c);
432
433         if (attr == &sysfs_new_stripes)
434                 bch2_new_stripes_to_text(out, c);
435
436         if (attr == &sysfs_io_timers_read)
437                 bch2_io_timers_to_text(out, &c->io_clock[READ]);
438
439         if (attr == &sysfs_io_timers_write)
440                 bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
441
442         if (attr == &sysfs_data_jobs)
443                 data_progress_to_text(out, c);
444
445         return 0;
446 }
447
448 STORE(bch2_fs)
449 {
450         struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
451
452         if (attr == &sysfs_btree_gc_periodic) {
453                 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
454                         ?: (ssize_t) size;
455
456                 wake_up_process(c->gc_thread);
457                 return ret;
458         }
459
460         if (attr == &sysfs_copy_gc_enabled) {
461                 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
462                         ?: (ssize_t) size;
463
464                 if (c->copygc_thread)
465                         wake_up_process(c->copygc_thread);
466                 return ret;
467         }
468
469         if (attr == &sysfs_rebalance_enabled) {
470                 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
471                         ?: (ssize_t) size;
472
473                 rebalance_wakeup(c);
474                 return ret;
475         }
476
477         sysfs_pd_controller_store(rebalance,    &c->rebalance.pd);
478
479         sysfs_strtoul(promote_whole_extents,    c->promote_whole_extents);
480
481         /* Debugging: */
482
483         if (!test_bit(BCH_FS_STARTED, &c->flags))
484                 return -EPERM;
485
486         /* Debugging: */
487
488         if (!test_bit(BCH_FS_RW, &c->flags))
489                 return -EROFS;
490
491         if (attr == &sysfs_prune_cache) {
492                 struct shrink_control sc;
493
494                 sc.gfp_mask = GFP_KERNEL;
495                 sc.nr_to_scan = strtoul_or_return(buf);
496                 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
497         }
498
499         if (attr == &sysfs_trigger_gc) {
500                 /*
501                  * Full gc is currently incompatible with btree key cache:
502                  */
503 #if 0
504                 down_read(&c->state_lock);
505                 bch2_gc(c, false, false);
506                 up_read(&c->state_lock);
507 #else
508                 bch2_gc_gens(c);
509 #endif
510         }
511
512         if (attr == &sysfs_trigger_discards)
513                 bch2_do_discards(c);
514
515 #ifdef CONFIG_BCACHEFS_TESTS
516         if (attr == &sysfs_perf_test) {
517                 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
518                 char *test              = strsep(&p, " \t\n");
519                 char *nr_str            = strsep(&p, " \t\n");
520                 char *threads_str       = strsep(&p, " \t\n");
521                 unsigned threads;
522                 u64 nr;
523                 int ret = -EINVAL;
524
525                 if (threads_str &&
526                     !(ret = kstrtouint(threads_str, 10, &threads)) &&
527                     !(ret = bch2_strtoull_h(nr_str, &nr)))
528                         ret = bch2_btree_perf_test(c, test, nr, threads);
529                 kfree(tmp);
530
531                 if (ret)
532                         size = ret;
533         }
534 #endif
535         return size;
536 }
537 SYSFS_OPS(bch2_fs);
538
539 struct attribute *bch2_fs_files[] = {
540         &sysfs_minor,
541         &sysfs_btree_cache_size,
542         &sysfs_btree_avg_write_size,
543
544         &sysfs_promote_whole_extents,
545
546         &sysfs_compression_stats,
547
548 #ifdef CONFIG_BCACHEFS_TESTS
549         &sysfs_perf_test,
550 #endif
551         NULL
552 };
553
554 /* counters dir */
555
556 SHOW(bch2_fs_counters)
557 {
558         struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
559         u64 counter = 0;
560         u64 counter_since_mount = 0;
561
562         out->tabstops[0] = 32;
563         #define x(t, ...) \
564                 if (attr == &sysfs_##t) {                                       \
565                         counter             = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
566                         counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
567                         pr_buf(out, "since mount:");                            \
568                         pr_tab(out);                                            \
569                         bch2_hprint(out, counter_since_mount << 9);             \
570                         pr_newline(out);                                        \
571                                                                                 \
572                         pr_buf(out, "since filesystem creation:");              \
573                         pr_tab(out);                                            \
574                         bch2_hprint(out, counter << 9);                         \
575                         pr_newline(out);                                        \
576                 }
577         BCH_PERSISTENT_COUNTERS()
578         #undef x
579         return 0;
580 }
581
582 STORE(bch2_fs_counters) {
583         return 0;
584 }
585
586 SYSFS_OPS(bch2_fs_counters);
587
588 struct attribute *bch2_fs_counters_files[] = {
589 #define x(t, ...) \
590         &sysfs_##t,
591         BCH_PERSISTENT_COUNTERS()
592 #undef x
593         NULL
594 };
595 /* internal dir - just a wrapper */
596
597 SHOW(bch2_fs_internal)
598 {
599         struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
600         return bch2_fs_to_text(out, &c->kobj, attr);
601 }
602
603 STORE(bch2_fs_internal)
604 {
605         struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
606         return bch2_fs_store(&c->kobj, attr, buf, size);
607 }
608 SYSFS_OPS(bch2_fs_internal);
609
610 struct attribute *bch2_fs_internal_files[] = {
611         &sysfs_journal_debug,
612         &sysfs_btree_updates,
613         &sysfs_btree_cache,
614         &sysfs_btree_key_cache,
615         &sysfs_btree_transactions,
616         &sysfs_new_stripes,
617         &sysfs_stripes_heap,
618         &sysfs_open_buckets,
619         &sysfs_io_timers_read,
620         &sysfs_io_timers_write,
621
622         &sysfs_trigger_gc,
623         &sysfs_trigger_discards,
624         &sysfs_prune_cache,
625
626         &sysfs_read_realloc_races,
627         &sysfs_extent_migrate_done,
628         &sysfs_extent_migrate_raced,
629         &sysfs_bucket_alloc_fail,
630
631         &sysfs_gc_gens_pos,
632
633         &sysfs_copy_gc_enabled,
634         &sysfs_copy_gc_wait,
635
636         &sysfs_rebalance_enabled,
637         &sysfs_rebalance_work,
638         sysfs_pd_controller_files(rebalance),
639
640         &sysfs_data_jobs,
641
642         &sysfs_internal_uuid,
643         NULL
644 };
645
646 /* options */
647
648 SHOW(bch2_fs_opts_dir)
649 {
650         struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
651         const struct bch_option *opt = container_of(attr, struct bch_option, attr);
652         int id = opt - bch2_opt_table;
653         u64 v = bch2_opt_get_by_id(&c->opts, id);
654
655         bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
656         pr_char(out, '\n');
657
658         return 0;
659 }
660
661 STORE(bch2_fs_opts_dir)
662 {
663         struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
664         const struct bch_option *opt = container_of(attr, struct bch_option, attr);
665         int ret, id = opt - bch2_opt_table;
666         char *tmp;
667         u64 v;
668
669         /*
670          * We don't need to take c->writes for correctness, but it eliminates an
671          * unsightly error message in the dmesg log when we're RO:
672          */
673         if (unlikely(!percpu_ref_tryget(&c->writes)))
674                 return -EROFS;
675
676         tmp = kstrdup(buf, GFP_KERNEL);
677         if (!tmp) {
678                 ret = -ENOMEM;
679                 goto err;
680         }
681
682         ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
683         kfree(tmp);
684
685         if (ret < 0)
686                 goto err;
687
688         ret = bch2_opt_check_may_set(c, id, v);
689         if (ret < 0)
690                 goto err;
691
692         bch2_opt_set_sb(c, opt, v);
693         bch2_opt_set_by_id(&c->opts, id, v);
694
695         if ((id == Opt_background_target ||
696              id == Opt_background_compression) && v) {
697                 bch2_rebalance_add_work(c, S64_MAX);
698                 rebalance_wakeup(c);
699         }
700
701         ret = size;
702 err:
703         percpu_ref_put(&c->writes);
704         return ret;
705 }
706 SYSFS_OPS(bch2_fs_opts_dir);
707
708 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
709
710 int bch2_opts_create_sysfs_files(struct kobject *kobj)
711 {
712         const struct bch_option *i;
713         int ret;
714
715         for (i = bch2_opt_table;
716              i < bch2_opt_table + bch2_opts_nr;
717              i++) {
718                 if (!(i->flags & OPT_FS))
719                         continue;
720
721                 ret = sysfs_create_file(kobj, &i->attr);
722                 if (ret)
723                         return ret;
724         }
725
726         return 0;
727 }
728
729 /* time stats */
730
731 SHOW(bch2_fs_time_stats)
732 {
733         struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
734
735 #define x(name)                                                         \
736         if (attr == &sysfs_time_stat_##name)                            \
737                 bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
738         BCH_TIME_STATS()
739 #undef x
740
741         return 0;
742 }
743
744 STORE(bch2_fs_time_stats)
745 {
746         return size;
747 }
748 SYSFS_OPS(bch2_fs_time_stats);
749
750 struct attribute *bch2_fs_time_stats_files[] = {
751 #define x(name)                                         \
752         &sysfs_time_stat_##name,
753         BCH_TIME_STATS()
754 #undef x
755         NULL
756 };
757
758 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
759 {
760         struct bch_fs *c = ca->fs;
761         struct bch_dev_usage stats = bch2_dev_usage_read(ca);
762         unsigned i, nr[BCH_DATA_NR];
763
764         memset(nr, 0, sizeof(nr));
765
766         for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
767                 nr[c->open_buckets[i].data_type]++;
768
769         pr_buf(out,
770                "\t\t\t buckets\t sectors      fragmented\n"
771                "capacity\t%16llu\n",
772                ca->mi.nbuckets - ca->mi.first_bucket);
773
774         for (i = 0; i < BCH_DATA_NR; i++)
775                 pr_buf(out, "%-16s%16llu%16llu%16llu\n",
776                        bch2_data_types[i], stats.d[i].buckets,
777                        stats.d[i].sectors, stats.d[i].fragmented);
778
779         pr_buf(out,
780                "ec\t\t%16llu\n"
781                "\n"
782                "freelist_wait\t\t%s\n"
783                "open buckets allocated\t%u\n"
784                "open buckets this dev\t%u\n"
785                "open buckets total\t%u\n"
786                "open_buckets_wait\t%s\n"
787                "open_buckets_btree\t%u\n"
788                "open_buckets_user\t%u\n"
789                "btree reserve cache\t%u\n",
790                stats.buckets_ec,
791                c->freelist_wait.list.first              ? "waiting" : "empty",
792                OPEN_BUCKETS_COUNT - c->open_buckets_nr_free,
793                ca->nr_open_buckets,
794                OPEN_BUCKETS_COUNT,
795                c->open_buckets_wait.list.first          ? "waiting" : "empty",
796                nr[BCH_DATA_btree],
797                nr[BCH_DATA_user],
798                c->btree_reserve_cache_nr);
799 }
800
801 static const char * const bch2_rw[] = {
802         "read",
803         "write",
804         NULL
805 };
806
807 static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
808 {
809         int rw, i;
810
811         for (rw = 0; rw < 2; rw++) {
812                 pr_buf(out, "%s:\n", bch2_rw[rw]);
813
814                 for (i = 1; i < BCH_DATA_NR; i++)
815                         pr_buf(out, "%-12s:%12llu\n",
816                                bch2_data_types[i],
817                                percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
818         }
819 }
820
821 SHOW(bch2_dev)
822 {
823         struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
824         struct bch_fs *c = ca->fs;
825
826         sysfs_printf(uuid,              "%pU\n", ca->uuid.b);
827
828         sysfs_print(bucket_size,        bucket_bytes(ca));
829         sysfs_print(first_bucket,       ca->mi.first_bucket);
830         sysfs_print(nbuckets,           ca->mi.nbuckets);
831         sysfs_print(durability,         ca->mi.durability);
832         sysfs_print(discard,            ca->mi.discard);
833
834         if (attr == &sysfs_label) {
835                 if (ca->mi.group) {
836                         mutex_lock(&c->sb_lock);
837                         bch2_disk_path_to_text(out, c->disk_sb.sb,
838                                                ca->mi.group - 1);
839                         mutex_unlock(&c->sb_lock);
840                 }
841
842                 pr_char(out, '\n');
843         }
844
845         if (attr == &sysfs_has_data) {
846                 bch2_flags_to_text(out, bch2_data_types,
847                                    bch2_dev_has_data(c, ca));
848                 pr_char(out, '\n');
849         }
850
851         if (attr == &sysfs_state_rw) {
852                 bch2_string_opt_to_text(out, bch2_member_states,
853                                         ca->mi.state);
854                 pr_char(out, '\n');
855         }
856
857         if (attr == &sysfs_iodone)
858                 dev_iodone_to_text(out, ca);
859
860         sysfs_print(io_latency_read,            atomic64_read(&ca->cur_latency[READ]));
861         sysfs_print(io_latency_write,           atomic64_read(&ca->cur_latency[WRITE]));
862
863         if (attr == &sysfs_io_latency_stats_read)
864                 bch2_time_stats_to_text(out, &ca->io_latency[READ]);
865
866         if (attr == &sysfs_io_latency_stats_write)
867                 bch2_time_stats_to_text(out, &ca->io_latency[WRITE]);
868
869         sysfs_printf(congested,                 "%u%%",
870                      clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
871                      * 100 / CONGESTED_MAX);
872
873         if (attr == &sysfs_alloc_debug)
874                 dev_alloc_debug_to_text(out, ca);
875
876         return 0;
877 }
878
879 STORE(bch2_dev)
880 {
881         struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
882         struct bch_fs *c = ca->fs;
883         struct bch_member *mi;
884
885         if (attr == &sysfs_discard) {
886                 bool v = strtoul_or_return(buf);
887
888                 mutex_lock(&c->sb_lock);
889                 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
890
891                 if (v != BCH_MEMBER_DISCARD(mi)) {
892                         SET_BCH_MEMBER_DISCARD(mi, v);
893                         bch2_write_super(c);
894                 }
895                 mutex_unlock(&c->sb_lock);
896         }
897
898         if (attr == &sysfs_label) {
899                 char *tmp;
900                 int ret;
901
902                 tmp = kstrdup(buf, GFP_KERNEL);
903                 if (!tmp)
904                         return -ENOMEM;
905
906                 ret = bch2_dev_group_set(c, ca, strim(tmp));
907                 kfree(tmp);
908                 if (ret)
909                         return ret;
910         }
911
912         return size;
913 }
914 SYSFS_OPS(bch2_dev);
915
916 struct attribute *bch2_dev_files[] = {
917         &sysfs_uuid,
918         &sysfs_bucket_size,
919         &sysfs_first_bucket,
920         &sysfs_nbuckets,
921         &sysfs_durability,
922
923         /* settings: */
924         &sysfs_discard,
925         &sysfs_state_rw,
926         &sysfs_label,
927
928         &sysfs_has_data,
929         &sysfs_iodone,
930
931         &sysfs_io_latency_read,
932         &sysfs_io_latency_write,
933         &sysfs_io_latency_stats_read,
934         &sysfs_io_latency_stats_write,
935         &sysfs_congested,
936
937         /* debug: */
938         &sysfs_alloc_debug,
939         NULL
940 };
941
942 #endif  /* _BCACHEFS_SYSFS_H_ */