]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/sysfs.c
db2727e5cc5feec1a9c4d9a4c53a689bd8a91201
[bcachefs-tools-debian] / libbcachefs / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #ifndef NO_BCACHEFS_SYSFS
10
11 #include "bcachefs.h"
12 #include "alloc_background.h"
13 #include "alloc_foreground.h"
14 #include "sysfs.h"
15 #include "btree_cache.h"
16 #include "btree_io.h"
17 #include "btree_iter.h"
18 #include "btree_key_cache.h"
19 #include "btree_update.h"
20 #include "btree_update_interior.h"
21 #include "btree_gc.h"
22 #include "buckets.h"
23 #include "clock.h"
24 #include "disk_groups.h"
25 #include "ec.h"
26 #include "inode.h"
27 #include "journal.h"
28 #include "keylist.h"
29 #include "move.h"
30 #include "movinggc.h"
31 #include "nocow_locking.h"
32 #include "opts.h"
33 #include "rebalance.h"
34 #include "replicas.h"
35 #include "super-io.h"
36 #include "tests.h"
37
38 #include <linux/blkdev.h>
39 #include <linux/sort.h>
40 #include <linux/sched/clock.h>
41
42 #include "util.h"
43
44 #define SYSFS_OPS(type)                                                 \
45 const struct sysfs_ops type ## _sysfs_ops = {                           \
46         .show   = type ## _show,                                        \
47         .store  = type ## _store                                        \
48 }
49
50 #define SHOW(fn)                                                        \
51 static ssize_t fn ## _to_text(struct printbuf *,                        \
52                               struct kobject *, struct attribute *);    \
53                                                                         \
54 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
55                            char *buf)                                   \
56 {                                                                       \
57         struct printbuf out = PRINTBUF;                                 \
58         ssize_t ret = fn ## _to_text(&out, kobj, attr);                 \
59                                                                         \
60         if (out.pos && out.buf[out.pos - 1] != '\n')                    \
61                 prt_newline(&out);                                      \
62                                                                         \
63         if (!ret && out.allocation_failure)                             \
64                 ret = -ENOMEM;                                          \
65                                                                         \
66         if (!ret) {                                                     \
67                 ret = min_t(size_t, out.pos, PAGE_SIZE - 1);            \
68                 memcpy(buf, out.buf, ret);                              \
69         }                                                               \
70         printbuf_exit(&out);                                            \
71         return bch2_err_class(ret);                                     \
72 }                                                                       \
73                                                                         \
74 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
75                               struct attribute *attr)
76
77 #define STORE(fn)                                                       \
78 static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\
79                             const char *, size_t);                      \
80                                                                         \
81 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
82                             const char *buf, size_t size)               \
83 {                                                                       \
84         return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \
85 }                                                                       \
86                                                                         \
87 static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
88                                   const char *buf, size_t size)
89
90 #define __sysfs_attribute(_name, _mode)                                 \
91         static struct attribute sysfs_##_name =                         \
92                 { .name = #_name, .mode = _mode }
93
94 #define write_attribute(n)      __sysfs_attribute(n, 0200)
95 #define read_attribute(n)       __sysfs_attribute(n, 0444)
96 #define rw_attribute(n)         __sysfs_attribute(n, 0644)
97
98 #define sysfs_printf(file, fmt, ...)                                    \
99 do {                                                                    \
100         if (attr == &sysfs_ ## file)                                    \
101                 prt_printf(out, fmt "\n", __VA_ARGS__);                 \
102 } while (0)
103
104 #define sysfs_print(file, var)                                          \
105 do {                                                                    \
106         if (attr == &sysfs_ ## file)                                    \
107                 snprint(out, var);                                      \
108 } while (0)
109
110 #define sysfs_hprint(file, val)                                         \
111 do {                                                                    \
112         if (attr == &sysfs_ ## file)                                    \
113                 prt_human_readable_s64(out, val);                       \
114 } while (0)
115
116 #define sysfs_strtoul(file, var)                                        \
117 do {                                                                    \
118         if (attr == &sysfs_ ## file)                                    \
119                 return strtoul_safe(buf, var) ?: (ssize_t) size;        \
120 } while (0)
121
122 #define sysfs_strtoul_clamp(file, var, min, max)                        \
123 do {                                                                    \
124         if (attr == &sysfs_ ## file)                                    \
125                 return strtoul_safe_clamp(buf, var, min, max)           \
126                         ?: (ssize_t) size;                              \
127 } while (0)
128
129 #define strtoul_or_return(cp)                                           \
130 ({                                                                      \
131         unsigned long _v;                                               \
132         int _r = kstrtoul(cp, 10, &_v);                                 \
133         if (_r)                                                         \
134                 return _r;                                              \
135         _v;                                                             \
136 })
137
138 write_attribute(trigger_gc);
139 write_attribute(trigger_discards);
140 write_attribute(trigger_invalidates);
141 write_attribute(prune_cache);
142 write_attribute(btree_wakeup);
143 rw_attribute(btree_gc_periodic);
144 rw_attribute(gc_gens_pos);
145
146 read_attribute(uuid);
147 read_attribute(minor);
148 read_attribute(bucket_size);
149 read_attribute(first_bucket);
150 read_attribute(nbuckets);
151 rw_attribute(durability);
152 read_attribute(iodone);
153
154 read_attribute(io_latency_read);
155 read_attribute(io_latency_write);
156 read_attribute(io_latency_stats_read);
157 read_attribute(io_latency_stats_write);
158 read_attribute(congested);
159
160 read_attribute(btree_write_stats);
161
162 read_attribute(btree_cache_size);
163 read_attribute(compression_stats);
164 read_attribute(journal_debug);
165 read_attribute(btree_updates);
166 read_attribute(btree_cache);
167 read_attribute(btree_key_cache);
168 read_attribute(stripes_heap);
169 read_attribute(open_buckets);
170 read_attribute(open_buckets_partial);
171 read_attribute(write_points);
172 read_attribute(nocow_lock_table);
173
174 #ifdef BCH_WRITE_REF_DEBUG
175 read_attribute(write_refs);
176
177 static const char * const bch2_write_refs[] = {
178 #define x(n)    #n,
179         BCH_WRITE_REFS()
180 #undef x
181         NULL
182 };
183
184 static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
185 {
186         bch2_printbuf_tabstop_push(out, 24);
187
188         for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) {
189                 prt_str(out, bch2_write_refs[i]);
190                 prt_tab(out);
191                 prt_printf(out, "%li", atomic_long_read(&c->writes[i]));
192                 prt_newline(out);
193         }
194 }
195 #endif
196
197 read_attribute(internal_uuid);
198 read_attribute(disk_groups);
199
200 read_attribute(has_data);
201 read_attribute(alloc_debug);
202
203 #define x(t, n, ...) read_attribute(t);
204 BCH_PERSISTENT_COUNTERS()
205 #undef x
206
207 rw_attribute(discard);
208 rw_attribute(label);
209
210 rw_attribute(copy_gc_enabled);
211 read_attribute(copy_gc_wait);
212
213 rw_attribute(rebalance_enabled);
214 sysfs_pd_controller_attribute(rebalance);
215 read_attribute(rebalance_status);
216 rw_attribute(promote_whole_extents);
217
218 read_attribute(new_stripes);
219
220 read_attribute(io_timers_read);
221 read_attribute(io_timers_write);
222
223 read_attribute(moving_ctxts);
224
225 #ifdef CONFIG_BCACHEFS_TESTS
226 write_attribute(perf_test);
227 #endif /* CONFIG_BCACHEFS_TESTS */
228
229 #define x(_name)                                                \
230         static struct attribute sysfs_time_stat_##_name =               \
231                 { .name = #_name, .mode = 0444 };
232         BCH_TIME_STATS()
233 #undef x
234
235 static struct attribute sysfs_state_rw = {
236         .name = "state",
237         .mode =  0444,
238 };
239
240 static size_t bch2_btree_cache_size(struct bch_fs *c)
241 {
242         size_t ret = 0;
243         struct btree *b;
244
245         mutex_lock(&c->btree_cache.lock);
246         list_for_each_entry(b, &c->btree_cache.live, list)
247                 ret += btree_bytes(c);
248
249         mutex_unlock(&c->btree_cache.lock);
250         return ret;
251 }
252
253 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
254 {
255         struct btree_trans *trans;
256         struct btree_iter iter;
257         struct bkey_s_c k;
258         enum btree_id id;
259         u64 nr_uncompressed_extents = 0,
260             nr_compressed_extents = 0,
261             nr_incompressible_extents = 0,
262             uncompressed_sectors = 0,
263             incompressible_sectors = 0,
264             compressed_sectors_compressed = 0,
265             compressed_sectors_uncompressed = 0;
266         int ret = 0;
267
268         if (!test_bit(BCH_FS_STARTED, &c->flags))
269                 return -EPERM;
270
271         trans = bch2_trans_get(c);
272
273         for (id = 0; id < BTREE_ID_NR; id++) {
274                 if (!btree_type_has_ptrs(id))
275                         continue;
276
277                 for_each_btree_key(trans, iter, id, POS_MIN,
278                                    BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
279                         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
280                         const union bch_extent_entry *entry;
281                         struct extent_ptr_decoded p;
282                         bool compressed = false, uncompressed = false, incompressible = false;
283
284                         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
285                                 switch (p.crc.compression_type) {
286                                 case BCH_COMPRESSION_TYPE_none:
287                                         uncompressed = true;
288                                         uncompressed_sectors += k.k->size;
289                                         break;
290                                 case BCH_COMPRESSION_TYPE_incompressible:
291                                         incompressible = true;
292                                         incompressible_sectors += k.k->size;
293                                         break;
294                                 default:
295                                         compressed_sectors_compressed +=
296                                                 p.crc.compressed_size;
297                                         compressed_sectors_uncompressed +=
298                                                 p.crc.uncompressed_size;
299                                         compressed = true;
300                                         break;
301                                 }
302                         }
303
304                         if (incompressible)
305                                 nr_incompressible_extents++;
306                         else if (uncompressed)
307                                 nr_uncompressed_extents++;
308                         else if (compressed)
309                                 nr_compressed_extents++;
310                 }
311                 bch2_trans_iter_exit(trans, &iter);
312         }
313
314         bch2_trans_put(trans);
315
316         if (ret)
317                 return ret;
318
319         prt_printf(out, "uncompressed:\n");
320         prt_printf(out, "       nr extents:             %llu\n", nr_uncompressed_extents);
321         prt_printf(out, "       size:                   ");
322         prt_human_readable_u64(out, uncompressed_sectors << 9);
323         prt_printf(out, "\n");
324
325         prt_printf(out, "compressed:\n");
326         prt_printf(out, "       nr extents:             %llu\n", nr_compressed_extents);
327         prt_printf(out, "       compressed size:        ");
328         prt_human_readable_u64(out, compressed_sectors_compressed << 9);
329         prt_printf(out, "\n");
330         prt_printf(out, "       uncompressed size:      ");
331         prt_human_readable_u64(out, compressed_sectors_uncompressed << 9);
332         prt_printf(out, "\n");
333
334         prt_printf(out, "incompressible:\n");
335         prt_printf(out, "       nr extents:             %llu\n", nr_incompressible_extents);
336         prt_printf(out, "       size:                   ");
337         prt_human_readable_u64(out, incompressible_sectors << 9);
338         prt_printf(out, "\n");
339         return 0;
340 }
341
342 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
343 {
344         prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree));
345         bch2_bpos_to_text(out, c->gc_gens_pos);
346         prt_printf(out, "\n");
347 }
348
349 static void bch2_btree_wakeup_all(struct bch_fs *c)
350 {
351         struct btree_trans *trans;
352
353         seqmutex_lock(&c->btree_trans_lock);
354         list_for_each_entry(trans, &c->btree_trans_list, list) {
355                 struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
356
357                 if (b)
358                         six_lock_wakeup_all(&b->lock);
359
360         }
361         seqmutex_unlock(&c->btree_trans_lock);
362 }
363
364 SHOW(bch2_fs)
365 {
366         struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
367
368         sysfs_print(minor,                      c->minor);
369         sysfs_printf(internal_uuid, "%pU",      c->sb.uuid.b);
370
371         sysfs_hprint(btree_cache_size,          bch2_btree_cache_size(c));
372
373         if (attr == &sysfs_btree_write_stats)
374                 bch2_btree_write_stats_to_text(out, c);
375
376         sysfs_printf(btree_gc_periodic, "%u",   (int) c->btree_gc_periodic);
377
378         if (attr == &sysfs_gc_gens_pos)
379                 bch2_gc_gens_pos_to_text(out, c);
380
381         sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
382
383         sysfs_printf(rebalance_enabled,         "%i", c->rebalance.enabled);
384         sysfs_pd_controller_show(rebalance,     &c->rebalance.pd); /* XXX */
385
386         if (attr == &sysfs_copy_gc_wait)
387                 bch2_copygc_wait_to_text(out, c);
388
389         if (attr == &sysfs_rebalance_status)
390                 bch2_rebalance_status_to_text(out, c);
391
392         sysfs_print(promote_whole_extents,      c->promote_whole_extents);
393
394         /* Debugging: */
395
396         if (attr == &sysfs_journal_debug)
397                 bch2_journal_debug_to_text(out, &c->journal);
398
399         if (attr == &sysfs_btree_updates)
400                 bch2_btree_updates_to_text(out, c);
401
402         if (attr == &sysfs_btree_cache)
403                 bch2_btree_cache_to_text(out, &c->btree_cache);
404
405         if (attr == &sysfs_btree_key_cache)
406                 bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
407
408         if (attr == &sysfs_stripes_heap)
409                 bch2_stripes_heap_to_text(out, c);
410
411         if (attr == &sysfs_open_buckets)
412                 bch2_open_buckets_to_text(out, c);
413
414         if (attr == &sysfs_open_buckets_partial)
415                 bch2_open_buckets_partial_to_text(out, c);
416
417         if (attr == &sysfs_write_points)
418                 bch2_write_points_to_text(out, c);
419
420         if (attr == &sysfs_compression_stats)
421                 bch2_compression_stats_to_text(out, c);
422
423         if (attr == &sysfs_new_stripes)
424                 bch2_new_stripes_to_text(out, c);
425
426         if (attr == &sysfs_io_timers_read)
427                 bch2_io_timers_to_text(out, &c->io_clock[READ]);
428
429         if (attr == &sysfs_io_timers_write)
430                 bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
431
432         if (attr == &sysfs_moving_ctxts)
433                 bch2_fs_moving_ctxts_to_text(out, c);
434
435 #ifdef BCH_WRITE_REF_DEBUG
436         if (attr == &sysfs_write_refs)
437                 bch2_write_refs_to_text(out, c);
438 #endif
439
440         if (attr == &sysfs_nocow_lock_table)
441                 bch2_nocow_locks_to_text(out, &c->nocow_locks);
442
443         if (attr == &sysfs_disk_groups)
444                 bch2_disk_groups_to_text(out, c);
445
446         return 0;
447 }
448
449 STORE(bch2_fs)
450 {
451         struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
452
453         if (attr == &sysfs_btree_gc_periodic) {
454                 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
455                         ?: (ssize_t) size;
456
457                 wake_up_process(c->gc_thread);
458                 return ret;
459         }
460
461         if (attr == &sysfs_copy_gc_enabled) {
462                 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
463                         ?: (ssize_t) size;
464
465                 if (c->copygc_thread)
466                         wake_up_process(c->copygc_thread);
467                 return ret;
468         }
469
470         if (attr == &sysfs_rebalance_enabled) {
471                 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
472                         ?: (ssize_t) size;
473
474                 rebalance_wakeup(c);
475                 return ret;
476         }
477
478         sysfs_pd_controller_store(rebalance,    &c->rebalance.pd);
479
480         sysfs_strtoul(promote_whole_extents,    c->promote_whole_extents);
481
482         /* Debugging: */
483
484         if (!test_bit(BCH_FS_STARTED, &c->flags))
485                 return -EPERM;
486
487         /* Debugging: */
488
489         if (!test_bit(BCH_FS_RW, &c->flags))
490                 return -EROFS;
491
492         if (attr == &sysfs_prune_cache) {
493                 struct shrink_control sc;
494
495                 sc.gfp_mask = GFP_KERNEL;
496                 sc.nr_to_scan = strtoul_or_return(buf);
497                 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
498         }
499
500         if (attr == &sysfs_btree_wakeup)
501                 bch2_btree_wakeup_all(c);
502
503         if (attr == &sysfs_trigger_gc) {
504                 /*
505                  * Full gc is currently incompatible with btree key cache:
506                  */
507 #if 0
508                 down_read(&c->state_lock);
509                 bch2_gc(c, false, false);
510                 up_read(&c->state_lock);
511 #else
512                 bch2_gc_gens(c);
513 #endif
514         }
515
516         if (attr == &sysfs_trigger_discards)
517                 bch2_do_discards(c);
518
519         if (attr == &sysfs_trigger_invalidates)
520                 bch2_do_invalidates(c);
521
522 #ifdef CONFIG_BCACHEFS_TESTS
523         if (attr == &sysfs_perf_test) {
524                 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
525                 char *test              = strsep(&p, " \t\n");
526                 char *nr_str            = strsep(&p, " \t\n");
527                 char *threads_str       = strsep(&p, " \t\n");
528                 unsigned threads;
529                 u64 nr;
530                 int ret = -EINVAL;
531
532                 if (threads_str &&
533                     !(ret = kstrtouint(threads_str, 10, &threads)) &&
534                     !(ret = bch2_strtoull_h(nr_str, &nr)))
535                         ret = bch2_btree_perf_test(c, test, nr, threads);
536                 kfree(tmp);
537
538                 if (ret)
539                         size = ret;
540         }
541 #endif
542         return size;
543 }
544 SYSFS_OPS(bch2_fs);
545
546 struct attribute *bch2_fs_files[] = {
547         &sysfs_minor,
548         &sysfs_btree_cache_size,
549         &sysfs_btree_write_stats,
550
551         &sysfs_promote_whole_extents,
552
553         &sysfs_compression_stats,
554
555 #ifdef CONFIG_BCACHEFS_TESTS
556         &sysfs_perf_test,
557 #endif
558         NULL
559 };
560
561 /* counters dir */
562
563 SHOW(bch2_fs_counters)
564 {
565         struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
566         u64 counter = 0;
567         u64 counter_since_mount = 0;
568
569         printbuf_tabstop_push(out, 32);
570
571         #define x(t, ...) \
572                 if (attr == &sysfs_##t) {                                       \
573                         counter             = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
574                         counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
575                         prt_printf(out, "since mount:");                                \
576                         prt_tab(out);                                           \
577                         prt_human_readable_u64(out, counter_since_mount);       \
578                         prt_newline(out);                                       \
579                                                                                 \
580                         prt_printf(out, "since filesystem creation:");          \
581                         prt_tab(out);                                           \
582                         prt_human_readable_u64(out, counter);                   \
583                         prt_newline(out);                                       \
584                 }
585         BCH_PERSISTENT_COUNTERS()
586         #undef x
587         return 0;
588 }
589
590 STORE(bch2_fs_counters) {
591         return 0;
592 }
593
594 SYSFS_OPS(bch2_fs_counters);
595
596 struct attribute *bch2_fs_counters_files[] = {
597 #define x(t, ...) \
598         &sysfs_##t,
599         BCH_PERSISTENT_COUNTERS()
600 #undef x
601         NULL
602 };
603 /* internal dir - just a wrapper */
604
605 SHOW(bch2_fs_internal)
606 {
607         struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
608
609         return bch2_fs_to_text(out, &c->kobj, attr);
610 }
611
612 STORE(bch2_fs_internal)
613 {
614         struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
615
616         return bch2_fs_store(&c->kobj, attr, buf, size);
617 }
618 SYSFS_OPS(bch2_fs_internal);
619
620 struct attribute *bch2_fs_internal_files[] = {
621         &sysfs_journal_debug,
622         &sysfs_btree_updates,
623         &sysfs_btree_cache,
624         &sysfs_btree_key_cache,
625         &sysfs_new_stripes,
626         &sysfs_stripes_heap,
627         &sysfs_open_buckets,
628         &sysfs_open_buckets_partial,
629         &sysfs_write_points,
630 #ifdef BCH_WRITE_REF_DEBUG
631         &sysfs_write_refs,
632 #endif
633         &sysfs_nocow_lock_table,
634         &sysfs_io_timers_read,
635         &sysfs_io_timers_write,
636
637         &sysfs_trigger_gc,
638         &sysfs_trigger_discards,
639         &sysfs_trigger_invalidates,
640         &sysfs_prune_cache,
641         &sysfs_btree_wakeup,
642
643         &sysfs_gc_gens_pos,
644
645         &sysfs_copy_gc_enabled,
646         &sysfs_copy_gc_wait,
647
648         &sysfs_rebalance_enabled,
649         &sysfs_rebalance_status,
650         sysfs_pd_controller_files(rebalance),
651
652         &sysfs_moving_ctxts,
653
654         &sysfs_internal_uuid,
655
656         &sysfs_disk_groups,
657         NULL
658 };
659
660 /* options */
661
662 SHOW(bch2_fs_opts_dir)
663 {
664         struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
665         const struct bch_option *opt = container_of(attr, struct bch_option, attr);
666         int id = opt - bch2_opt_table;
667         u64 v = bch2_opt_get_by_id(&c->opts, id);
668
669         bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
670         prt_char(out, '\n');
671
672         return 0;
673 }
674
675 STORE(bch2_fs_opts_dir)
676 {
677         struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
678         const struct bch_option *opt = container_of(attr, struct bch_option, attr);
679         int ret, id = opt - bch2_opt_table;
680         char *tmp;
681         u64 v;
682
683         /*
684          * We don't need to take c->writes for correctness, but it eliminates an
685          * unsightly error message in the dmesg log when we're RO:
686          */
687         if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
688                 return -EROFS;
689
690         tmp = kstrdup(buf, GFP_KERNEL);
691         if (!tmp) {
692                 ret = -ENOMEM;
693                 goto err;
694         }
695
696         ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
697         kfree(tmp);
698
699         if (ret < 0)
700                 goto err;
701
702         ret = bch2_opt_check_may_set(c, id, v);
703         if (ret < 0)
704                 goto err;
705
706         bch2_opt_set_sb(c, opt, v);
707         bch2_opt_set_by_id(&c->opts, id, v);
708
709         if ((id == Opt_background_target ||
710              id == Opt_background_compression) && v)
711                 bch2_set_rebalance_needs_scan(c, 0);
712
713         ret = size;
714 err:
715         bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
716         return ret;
717 }
718 SYSFS_OPS(bch2_fs_opts_dir);
719
720 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
721
722 int bch2_opts_create_sysfs_files(struct kobject *kobj)
723 {
724         const struct bch_option *i;
725         int ret;
726
727         for (i = bch2_opt_table;
728              i < bch2_opt_table + bch2_opts_nr;
729              i++) {
730                 if (!(i->flags & OPT_FS))
731                         continue;
732
733                 ret = sysfs_create_file(kobj, &i->attr);
734                 if (ret)
735                         return ret;
736         }
737
738         return 0;
739 }
740
741 /* time stats */
742
743 SHOW(bch2_fs_time_stats)
744 {
745         struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
746
747 #define x(name)                                                         \
748         if (attr == &sysfs_time_stat_##name)                            \
749                 bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
750         BCH_TIME_STATS()
751 #undef x
752
753         return 0;
754 }
755
756 STORE(bch2_fs_time_stats)
757 {
758         return size;
759 }
760 SYSFS_OPS(bch2_fs_time_stats);
761
762 struct attribute *bch2_fs_time_stats_files[] = {
763 #define x(name)                                         \
764         &sysfs_time_stat_##name,
765         BCH_TIME_STATS()
766 #undef x
767         NULL
768 };
769
770 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
771 {
772         struct bch_fs *c = ca->fs;
773         struct bch_dev_usage stats = bch2_dev_usage_read(ca);
774         unsigned i, nr[BCH_DATA_NR];
775
776         memset(nr, 0, sizeof(nr));
777
778         for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
779                 nr[c->open_buckets[i].data_type]++;
780
781         printbuf_tabstop_push(out, 8);
782         printbuf_tabstop_push(out, 16);
783         printbuf_tabstop_push(out, 16);
784         printbuf_tabstop_push(out, 16);
785         printbuf_tabstop_push(out, 16);
786
787         prt_tab(out);
788         prt_str(out, "buckets");
789         prt_tab_rjust(out);
790         prt_str(out, "sectors");
791         prt_tab_rjust(out);
792         prt_str(out, "fragmented");
793         prt_tab_rjust(out);
794         prt_newline(out);
795
796         for (i = 0; i < BCH_DATA_NR; i++) {
797                 prt_str(out, bch2_data_types[i]);
798                 prt_tab(out);
799                 prt_u64(out, stats.d[i].buckets);
800                 prt_tab_rjust(out);
801                 prt_u64(out, stats.d[i].sectors);
802                 prt_tab_rjust(out);
803                 prt_u64(out, stats.d[i].fragmented);
804                 prt_tab_rjust(out);
805                 prt_newline(out);
806         }
807
808         prt_str(out, "ec");
809         prt_tab(out);
810         prt_u64(out, stats.buckets_ec);
811         prt_tab_rjust(out);
812         prt_newline(out);
813
814         prt_newline(out);
815
816         prt_printf(out, "reserves:");
817         prt_newline(out);
818         for (i = 0; i < BCH_WATERMARK_NR; i++) {
819                 prt_str(out, bch2_watermarks[i]);
820                 prt_tab(out);
821                 prt_u64(out, bch2_dev_buckets_reserved(ca, i));
822                 prt_tab_rjust(out);
823                 prt_newline(out);
824         }
825
826         prt_newline(out);
827
828         printbuf_tabstops_reset(out);
829         printbuf_tabstop_push(out, 24);
830
831         prt_str(out, "freelist_wait");
832         prt_tab(out);
833         prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty");
834         prt_newline(out);
835
836         prt_str(out, "open buckets allocated");
837         prt_tab(out);
838         prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
839         prt_newline(out);
840
841         prt_str(out, "open buckets this dev");
842         prt_tab(out);
843         prt_u64(out, ca->nr_open_buckets);
844         prt_newline(out);
845
846         prt_str(out, "open buckets total");
847         prt_tab(out);
848         prt_u64(out, OPEN_BUCKETS_COUNT);
849         prt_newline(out);
850
851         prt_str(out, "open_buckets_wait");
852         prt_tab(out);
853         prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty");
854         prt_newline(out);
855
856         prt_str(out, "open_buckets_btree");
857         prt_tab(out);
858         prt_u64(out, nr[BCH_DATA_btree]);
859         prt_newline(out);
860
861         prt_str(out, "open_buckets_user");
862         prt_tab(out);
863         prt_u64(out, nr[BCH_DATA_user]);
864         prt_newline(out);
865
866         prt_str(out, "buckets_to_invalidate");
867         prt_tab(out);
868         prt_u64(out, should_invalidate_buckets(ca, stats));
869         prt_newline(out);
870
871         prt_str(out, "btree reserve cache");
872         prt_tab(out);
873         prt_u64(out, c->btree_reserve_cache_nr);
874         prt_newline(out);
875 }
876
877 static const char * const bch2_rw[] = {
878         "read",
879         "write",
880         NULL
881 };
882
883 static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
884 {
885         int rw, i;
886
887         for (rw = 0; rw < 2; rw++) {
888                 prt_printf(out, "%s:\n", bch2_rw[rw]);
889
890                 for (i = 1; i < BCH_DATA_NR; i++)
891                         prt_printf(out, "%-12s:%12llu\n",
892                                bch2_data_types[i],
893                                percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
894         }
895 }
896
897 SHOW(bch2_dev)
898 {
899         struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
900         struct bch_fs *c = ca->fs;
901
902         sysfs_printf(uuid,              "%pU\n", ca->uuid.b);
903
904         sysfs_print(bucket_size,        bucket_bytes(ca));
905         sysfs_print(first_bucket,       ca->mi.first_bucket);
906         sysfs_print(nbuckets,           ca->mi.nbuckets);
907         sysfs_print(durability,         ca->mi.durability);
908         sysfs_print(discard,            ca->mi.discard);
909
910         if (attr == &sysfs_label) {
911                 if (ca->mi.group)
912                         bch2_disk_path_to_text(out, c, ca->mi.group - 1);
913                 prt_char(out, '\n');
914         }
915
916         if (attr == &sysfs_has_data) {
917                 prt_bitflags(out, bch2_data_types, bch2_dev_has_data(c, ca));
918                 prt_char(out, '\n');
919         }
920
921         if (attr == &sysfs_state_rw) {
922                 prt_string_option(out, bch2_member_states, ca->mi.state);
923                 prt_char(out, '\n');
924         }
925
926         if (attr == &sysfs_iodone)
927                 dev_iodone_to_text(out, ca);
928
929         sysfs_print(io_latency_read,            atomic64_read(&ca->cur_latency[READ]));
930         sysfs_print(io_latency_write,           atomic64_read(&ca->cur_latency[WRITE]));
931
932         if (attr == &sysfs_io_latency_stats_read)
933                 bch2_time_stats_to_text(out, &ca->io_latency[READ]);
934
935         if (attr == &sysfs_io_latency_stats_write)
936                 bch2_time_stats_to_text(out, &ca->io_latency[WRITE]);
937
938         sysfs_printf(congested,                 "%u%%",
939                      clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
940                      * 100 / CONGESTED_MAX);
941
942         if (attr == &sysfs_alloc_debug)
943                 dev_alloc_debug_to_text(out, ca);
944
945         return 0;
946 }
947
948 STORE(bch2_dev)
949 {
950         struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
951         struct bch_fs *c = ca->fs;
952         struct bch_member *mi;
953
954         if (attr == &sysfs_discard) {
955                 bool v = strtoul_or_return(buf);
956
957                 mutex_lock(&c->sb_lock);
958                 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
959
960                 if (v != BCH_MEMBER_DISCARD(mi)) {
961                         SET_BCH_MEMBER_DISCARD(mi, v);
962                         bch2_write_super(c);
963                 }
964                 mutex_unlock(&c->sb_lock);
965         }
966
967         if (attr == &sysfs_durability) {
968                 u64 v = strtoul_or_return(buf);
969
970                 mutex_lock(&c->sb_lock);
971                 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
972
973                 if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
974                         SET_BCH_MEMBER_DURABILITY(mi, v + 1);
975                         bch2_write_super(c);
976                 }
977                 mutex_unlock(&c->sb_lock);
978         }
979
980         if (attr == &sysfs_label) {
981                 char *tmp;
982                 int ret;
983
984                 tmp = kstrdup(buf, GFP_KERNEL);
985                 if (!tmp)
986                         return -ENOMEM;
987
988                 ret = bch2_dev_group_set(c, ca, strim(tmp));
989                 kfree(tmp);
990                 if (ret)
991                         return ret;
992         }
993
994         return size;
995 }
996 SYSFS_OPS(bch2_dev);
997
998 struct attribute *bch2_dev_files[] = {
999         &sysfs_uuid,
1000         &sysfs_bucket_size,
1001         &sysfs_first_bucket,
1002         &sysfs_nbuckets,
1003         &sysfs_durability,
1004
1005         /* settings: */
1006         &sysfs_discard,
1007         &sysfs_state_rw,
1008         &sysfs_label,
1009
1010         &sysfs_has_data,
1011         &sysfs_iodone,
1012
1013         &sysfs_io_latency_read,
1014         &sysfs_io_latency_write,
1015         &sysfs_io_latency_stats_read,
1016         &sysfs_io_latency_stats_write,
1017         &sysfs_congested,
1018
1019         /* debug: */
1020         &sysfs_alloc_debug,
1021         NULL
1022 };
1023
1024 #endif  /* _BCACHEFS_SYSFS_H_ */