]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/replicas.c
Update bcachefs sources to ece184f718 bcachefs: Reflink
[bcachefs-tools-debian] / libbcachefs / replicas.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "buckets.h"
5 #include "journal.h"
6 #include "replicas.h"
7 #include "super-io.h"
8
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10                                             struct bch_replicas_cpu *);
11
12 /* Replicas tracking - in memory: */
13
14 static inline int u8_cmp(u8 l, u8 r)
15 {
16         return cmp_int(l, r);
17 }
18
19 static void verify_replicas_entry_sorted(struct bch_replicas_entry *e)
20 {
21 #ifdef CONFIG_BCACHES_DEBUG
22         unsigned i;
23
24         for (i = 0; i + 1 < e->nr_devs; i++)
25                 BUG_ON(e->devs[i] >= e->devs[i + 1]);
26 #endif
27 }
28
29 static void replicas_entry_sort(struct bch_replicas_entry *e)
30 {
31         bubble_sort(e->devs, e->nr_devs, u8_cmp);
32 }
33
34 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
35 {
36         eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
37 }
38
39 void bch2_replicas_entry_to_text(struct printbuf *out,
40                                  struct bch_replicas_entry *e)
41 {
42         unsigned i;
43
44         pr_buf(out, "%s: %u/%u [",
45                bch2_data_types[e->data_type],
46                e->nr_required,
47                e->nr_devs);
48
49         for (i = 0; i < e->nr_devs; i++)
50                 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
51         pr_buf(out, "]");
52 }
53
54 void bch2_cpu_replicas_to_text(struct printbuf *out,
55                               struct bch_replicas_cpu *r)
56 {
57         struct bch_replicas_entry *e;
58         bool first = true;
59
60         for_each_cpu_replicas_entry(r, e) {
61                 if (!first)
62                         pr_buf(out, " ");
63                 first = false;
64
65                 bch2_replicas_entry_to_text(out, e);
66         }
67 }
68
69 static void extent_to_replicas(struct bkey_s_c k,
70                                struct bch_replicas_entry *r)
71 {
72         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
73         const union bch_extent_entry *entry;
74         struct extent_ptr_decoded p;
75
76         r->nr_required  = 1;
77
78         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
79                 if (p.ptr.cached)
80                         continue;
81
82                 if (p.ec_nr) {
83                         r->nr_devs = 0;
84                         break;
85                 }
86
87                 r->devs[r->nr_devs++] = p.ptr.dev;
88         }
89 }
90
91 static void stripe_to_replicas(struct bkey_s_c k,
92                                struct bch_replicas_entry *r)
93 {
94         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
95         const struct bch_extent_ptr *ptr;
96
97         r->nr_required  = s.v->nr_blocks - s.v->nr_redundant;
98
99         for (ptr = s.v->ptrs;
100              ptr < s.v->ptrs + s.v->nr_blocks;
101              ptr++)
102                 r->devs[r->nr_devs++] = ptr->dev;
103 }
104
105 void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
106                            struct bkey_s_c k)
107 {
108         e->nr_devs = 0;
109
110         switch (k.k->type) {
111         case KEY_TYPE_btree_ptr:
112                 e->data_type = BCH_DATA_BTREE;
113                 extent_to_replicas(k, e);
114                 break;
115         case KEY_TYPE_extent:
116         case KEY_TYPE_reflink_v:
117                 e->data_type = BCH_DATA_USER;
118                 extent_to_replicas(k, e);
119                 break;
120         case KEY_TYPE_stripe:
121                 e->data_type = BCH_DATA_USER;
122                 stripe_to_replicas(k, e);
123                 break;
124         }
125
126         replicas_entry_sort(e);
127 }
128
129 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
130                               enum bch_data_type data_type,
131                               struct bch_devs_list devs)
132 {
133         unsigned i;
134
135         BUG_ON(!data_type ||
136                data_type == BCH_DATA_SB ||
137                data_type >= BCH_DATA_NR);
138
139         e->data_type    = data_type;
140         e->nr_devs      = 0;
141         e->nr_required  = 1;
142
143         for (i = 0; i < devs.nr; i++)
144                 e->devs[e->nr_devs++] = devs.devs[i];
145
146         replicas_entry_sort(e);
147 }
148
149 static struct bch_replicas_cpu
150 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
151                        struct bch_replicas_entry *new_entry)
152 {
153         unsigned i;
154         struct bch_replicas_cpu new = {
155                 .nr             = old->nr + 1,
156                 .entry_size     = max_t(unsigned, old->entry_size,
157                                         replicas_entry_bytes(new_entry)),
158         };
159
160         BUG_ON(!new_entry->data_type);
161         verify_replicas_entry_sorted(new_entry);
162
163         new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
164         if (!new.entries)
165                 return new;
166
167         for (i = 0; i < old->nr; i++)
168                 memcpy(cpu_replicas_entry(&new, i),
169                        cpu_replicas_entry(old, i),
170                        old->entry_size);
171
172         memcpy(cpu_replicas_entry(&new, old->nr),
173                new_entry,
174                replicas_entry_bytes(new_entry));
175
176         bch2_cpu_replicas_sort(&new);
177         return new;
178 }
179
180 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
181                                        struct bch_replicas_entry *search)
182 {
183         int idx, entry_size = replicas_entry_bytes(search);
184
185         if (unlikely(entry_size > r->entry_size))
186                 return -1;
187
188         verify_replicas_entry_sorted(search);
189
190 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
191         idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
192                               entry_cmp, search);
193 #undef entry_cmp
194
195         return idx < r->nr ? idx : -1;
196 }
197
198 int bch2_replicas_entry_idx(struct bch_fs *c,
199                             struct bch_replicas_entry *search)
200 {
201         replicas_entry_sort(search);
202
203         return __replicas_entry_idx(&c->replicas, search);
204 }
205
206 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
207                                  struct bch_replicas_entry *search)
208 {
209         return __replicas_entry_idx(r, search) >= 0;
210 }
211
212 static bool bch2_replicas_marked_locked(struct bch_fs *c,
213                           struct bch_replicas_entry *search,
214                           bool check_gc_replicas)
215 {
216         if (!search->nr_devs)
217                 return true;
218
219         verify_replicas_entry_sorted(search);
220
221         return __replicas_has_entry(&c->replicas, search) &&
222                 (!check_gc_replicas ||
223                  likely((!c->replicas_gc.entries)) ||
224                  __replicas_has_entry(&c->replicas_gc, search));
225 }
226
227 bool bch2_replicas_marked(struct bch_fs *c,
228                           struct bch_replicas_entry *search,
229                           bool check_gc_replicas)
230 {
231         bool marked;
232
233         percpu_down_read(&c->mark_lock);
234         marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
235         percpu_up_read(&c->mark_lock);
236
237         return marked;
238 }
239
240 static void __replicas_table_update(struct bch_fs_usage *dst,
241                                     struct bch_replicas_cpu *dst_r,
242                                     struct bch_fs_usage *src,
243                                     struct bch_replicas_cpu *src_r)
244 {
245         int src_idx, dst_idx;
246
247         *dst = *src;
248
249         for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
250                 if (!src->replicas[src_idx])
251                         continue;
252
253                 dst_idx = __replicas_entry_idx(dst_r,
254                                 cpu_replicas_entry(src_r, src_idx));
255                 BUG_ON(dst_idx < 0);
256
257                 dst->replicas[dst_idx] = src->replicas[src_idx];
258         }
259 }
260
261 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
262                                     struct bch_replicas_cpu *dst_r,
263                                     struct bch_fs_usage __percpu *src_p,
264                                     struct bch_replicas_cpu *src_r)
265 {
266         unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
267         struct bch_fs_usage *dst, *src = (void *)
268                 bch2_acc_percpu_u64s((void *) src_p, src_nr);
269
270         preempt_disable();
271         dst = this_cpu_ptr(dst_p);
272         preempt_enable();
273
274         __replicas_table_update(dst, dst_r, src, src_r);
275 }
276
277 /*
278  * Resize filesystem accounting:
279  */
280 static int replicas_table_update(struct bch_fs *c,
281                                  struct bch_replicas_cpu *new_r)
282 {
283         struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
284         struct bch_fs_usage *new_scratch = NULL;
285         struct bch_fs_usage __percpu *new_gc = NULL;
286         struct bch_fs_usage *new_base = NULL;
287         unsigned bytes = sizeof(struct bch_fs_usage) +
288                 sizeof(u64) * new_r->nr;
289         int ret = -ENOMEM;
290
291         if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
292             !(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
293                                                 GFP_NOIO)) ||
294             !(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
295                                                 GFP_NOIO)) ||
296             !(new_scratch  = kmalloc(bytes, GFP_NOIO)) ||
297             (c->usage_gc &&
298              !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO))))
299                 goto err;
300
301         if (c->usage_base)
302                 __replicas_table_update(new_base,               new_r,
303                                         c->usage_base,          &c->replicas);
304         if (c->usage[0])
305                 __replicas_table_update_pcpu(new_usage[0],      new_r,
306                                              c->usage[0],       &c->replicas);
307         if (c->usage[1])
308                 __replicas_table_update_pcpu(new_usage[1],      new_r,
309                                              c->usage[1],       &c->replicas);
310         if (c->usage_gc)
311                 __replicas_table_update_pcpu(new_gc,            new_r,
312                                              c->usage_gc,       &c->replicas);
313
314         swap(c->usage_base,     new_base);
315         swap(c->usage[0],       new_usage[0]);
316         swap(c->usage[1],       new_usage[1]);
317         swap(c->usage_scratch,  new_scratch);
318         swap(c->usage_gc,       new_gc);
319         swap(c->replicas,       *new_r);
320         ret = 0;
321 err:
322         free_percpu(new_gc);
323         kfree(new_scratch);
324         free_percpu(new_usage[1]);
325         free_percpu(new_usage[0]);
326         kfree(new_base);
327         return ret;
328 }
329
330 static unsigned reserve_journal_replicas(struct bch_fs *c,
331                                      struct bch_replicas_cpu *r)
332 {
333         struct bch_replicas_entry *e;
334         unsigned journal_res_u64s = 0;
335
336         /* nr_inodes: */
337         journal_res_u64s +=
338                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
339
340         /* key_version: */
341         journal_res_u64s +=
342                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
343
344         /* persistent_reserved: */
345         journal_res_u64s +=
346                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
347                 BCH_REPLICAS_MAX;
348
349         for_each_cpu_replicas_entry(r, e)
350                 journal_res_u64s +=
351                         DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
352                                      e->nr_devs, sizeof(u64));
353         return journal_res_u64s;
354 }
355
356 noinline
357 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
358                                 struct bch_replicas_entry *new_entry)
359 {
360         struct bch_replicas_cpu new_r, new_gc;
361         int ret = -ENOMEM;
362
363         memset(&new_r, 0, sizeof(new_r));
364         memset(&new_gc, 0, sizeof(new_gc));
365
366         mutex_lock(&c->sb_lock);
367
368         if (c->replicas_gc.entries &&
369             !__replicas_has_entry(&c->replicas_gc, new_entry)) {
370                 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
371                 if (!new_gc.entries)
372                         goto err;
373         }
374
375         if (!__replicas_has_entry(&c->replicas, new_entry)) {
376                 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
377                 if (!new_r.entries)
378                         goto err;
379
380                 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
381                 if (ret)
382                         goto err;
383
384                 bch2_journal_entry_res_resize(&c->journal,
385                                 &c->replicas_journal_res,
386                                 reserve_journal_replicas(c, &new_r));
387         }
388
389         if (!new_r.entries &&
390             !new_gc.entries)
391                 goto out;
392
393         /* allocations done, now commit: */
394
395         if (new_r.entries)
396                 bch2_write_super(c);
397
398         /* don't update in memory replicas until changes are persistent */
399         percpu_down_write(&c->mark_lock);
400         if (new_r.entries)
401                 ret = replicas_table_update(c, &new_r);
402         if (new_gc.entries)
403                 swap(new_gc, c->replicas_gc);
404         percpu_up_write(&c->mark_lock);
405 out:
406         ret = 0;
407 err:
408         mutex_unlock(&c->sb_lock);
409
410         kfree(new_r.entries);
411         kfree(new_gc.entries);
412
413         return ret;
414 }
415
416 int bch2_mark_replicas(struct bch_fs *c,
417                        struct bch_replicas_entry *r)
418 {
419         return likely(bch2_replicas_marked(c, r, true))
420                 ? 0
421                 : bch2_mark_replicas_slowpath(c, r);
422 }
423
424 bool bch2_bkey_replicas_marked_locked(struct bch_fs *c,
425                                       struct bkey_s_c k,
426                                       bool check_gc_replicas)
427 {
428         struct bch_replicas_padded search;
429         struct bch_devs_list cached = bch2_bkey_cached_devs(k);
430         unsigned i;
431
432         for (i = 0; i < cached.nr; i++) {
433                 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
434
435                 if (!bch2_replicas_marked_locked(c, &search.e,
436                                                  check_gc_replicas))
437                         return false;
438         }
439
440         bch2_bkey_to_replicas(&search.e, k);
441
442         return bch2_replicas_marked_locked(c, &search.e, check_gc_replicas);
443 }
444
445 bool bch2_bkey_replicas_marked(struct bch_fs *c,
446                                struct bkey_s_c k,
447                                bool check_gc_replicas)
448 {
449         bool marked;
450
451         percpu_down_read(&c->mark_lock);
452         marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
453         percpu_up_read(&c->mark_lock);
454
455         return marked;
456 }
457
458 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
459 {
460         struct bch_replicas_padded search;
461         struct bch_devs_list cached = bch2_bkey_cached_devs(k);
462         unsigned i;
463         int ret;
464
465         for (i = 0; i < cached.nr; i++) {
466                 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
467
468                 ret = bch2_mark_replicas(c, &search.e);
469                 if (ret)
470                         return ret;
471         }
472
473         bch2_bkey_to_replicas(&search.e, k);
474
475         return bch2_mark_replicas(c, &search.e);
476 }
477
478 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
479 {
480         unsigned i;
481
482         lockdep_assert_held(&c->replicas_gc_lock);
483
484         mutex_lock(&c->sb_lock);
485         percpu_down_write(&c->mark_lock);
486
487         /*
488          * this is kind of crappy; the replicas gc mechanism needs to be ripped
489          * out
490          */
491
492         for (i = 0; i < c->replicas.nr; i++) {
493                 struct bch_replicas_entry *e =
494                         cpu_replicas_entry(&c->replicas, i);
495                 struct bch_replicas_cpu n;
496
497                 if (!__replicas_has_entry(&c->replicas_gc, e) &&
498                     (c->usage_base->replicas[i] ||
499                      percpu_u64_get(&c->usage[0]->replicas[i]) ||
500                      percpu_u64_get(&c->usage[1]->replicas[i]))) {
501                         n = cpu_replicas_add_entry(&c->replicas_gc, e);
502                         if (!n.entries) {
503                                 ret = -ENOSPC;
504                                 goto err;
505                         }
506
507                         swap(n, c->replicas_gc);
508                         kfree(n.entries);
509                 }
510         }
511
512         if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
513                 ret = -ENOSPC;
514                 goto err;
515         }
516
517         ret = replicas_table_update(c, &c->replicas_gc);
518 err:
519         kfree(c->replicas_gc.entries);
520         c->replicas_gc.entries = NULL;
521
522         percpu_up_write(&c->mark_lock);
523
524         if (!ret)
525                 bch2_write_super(c);
526
527         mutex_unlock(&c->sb_lock);
528
529         return ret;
530 }
531
532 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
533 {
534         struct bch_replicas_entry *e;
535         unsigned i = 0;
536
537         lockdep_assert_held(&c->replicas_gc_lock);
538
539         mutex_lock(&c->sb_lock);
540         BUG_ON(c->replicas_gc.entries);
541
542         c->replicas_gc.nr               = 0;
543         c->replicas_gc.entry_size       = 0;
544
545         for_each_cpu_replicas_entry(&c->replicas, e)
546                 if (!((1 << e->data_type) & typemask)) {
547                         c->replicas_gc.nr++;
548                         c->replicas_gc.entry_size =
549                                 max_t(unsigned, c->replicas_gc.entry_size,
550                                       replicas_entry_bytes(e));
551                 }
552
553         c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
554                                          c->replicas_gc.entry_size,
555                                          GFP_NOIO);
556         if (!c->replicas_gc.entries) {
557                 mutex_unlock(&c->sb_lock);
558                 return -ENOMEM;
559         }
560
561         for_each_cpu_replicas_entry(&c->replicas, e)
562                 if (!((1 << e->data_type) & typemask))
563                         memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
564                                e, c->replicas_gc.entry_size);
565
566         bch2_cpu_replicas_sort(&c->replicas_gc);
567         mutex_unlock(&c->sb_lock);
568
569         return 0;
570 }
571
572 int bch2_replicas_gc2(struct bch_fs *c)
573 {
574         struct bch_replicas_cpu new = { 0 };
575         unsigned i, nr;
576         int ret = 0;
577
578         bch2_journal_meta(&c->journal);
579 retry:
580         nr              = READ_ONCE(c->replicas.nr);
581         new.entry_size  = READ_ONCE(c->replicas.entry_size);
582         new.entries     = kcalloc(nr, new.entry_size, GFP_KERNEL);
583         if (!new.entries)
584                 return -ENOMEM;
585
586         mutex_lock(&c->sb_lock);
587         percpu_down_write(&c->mark_lock);
588
589         if (nr                  != c->replicas.nr ||
590             new.entry_size      != c->replicas.entry_size) {
591                 percpu_up_write(&c->mark_lock);
592                 mutex_unlock(&c->sb_lock);
593                 kfree(new.entries);
594                 goto retry;
595         }
596
597         for (i = 0; i < c->replicas.nr; i++) {
598                 struct bch_replicas_entry *e =
599                         cpu_replicas_entry(&c->replicas, i);
600
601                 if (e->data_type == BCH_DATA_JOURNAL ||
602                     c->usage_base->replicas[i] ||
603                     percpu_u64_get(&c->usage[0]->replicas[i]) ||
604                     percpu_u64_get(&c->usage[1]->replicas[i]))
605                         memcpy(cpu_replicas_entry(&new, new.nr++),
606                                e, new.entry_size);
607         }
608
609         bch2_cpu_replicas_sort(&new);
610
611         if (bch2_cpu_replicas_to_sb_replicas(c, &new)) {
612                 ret = -ENOSPC;
613                 goto err;
614         }
615
616         ret = replicas_table_update(c, &new);
617 err:
618         kfree(new.entries);
619
620         percpu_up_write(&c->mark_lock);
621
622         if (!ret)
623                 bch2_write_super(c);
624
625         mutex_unlock(&c->sb_lock);
626
627         return ret;
628 }
629
630 int bch2_replicas_set_usage(struct bch_fs *c,
631                             struct bch_replicas_entry *r,
632                             u64 sectors)
633 {
634         int ret, idx = bch2_replicas_entry_idx(c, r);
635
636         if (idx < 0) {
637                 struct bch_replicas_cpu n;
638
639                 n = cpu_replicas_add_entry(&c->replicas, r);
640                 if (!n.entries)
641                         return -ENOMEM;
642
643                 ret = replicas_table_update(c, &n);
644                 if (ret)
645                         return ret;
646
647                 kfree(n.entries);
648
649                 idx = bch2_replicas_entry_idx(c, r);
650                 BUG_ON(ret < 0);
651         }
652
653         c->usage_base->replicas[idx] = sectors;
654
655         return 0;
656 }
657
658 /* Replicas tracking - superblock: */
659
660 static int
661 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
662                                    struct bch_replicas_cpu *cpu_r)
663 {
664         struct bch_replicas_entry *e, *dst;
665         unsigned nr = 0, entry_size = 0, idx = 0;
666
667         for_each_replicas_entry(sb_r, e) {
668                 entry_size = max_t(unsigned, entry_size,
669                                    replicas_entry_bytes(e));
670                 nr++;
671         }
672
673         cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
674         if (!cpu_r->entries)
675                 return -ENOMEM;
676
677         cpu_r->nr               = nr;
678         cpu_r->entry_size       = entry_size;
679
680         for_each_replicas_entry(sb_r, e) {
681                 dst = cpu_replicas_entry(cpu_r, idx++);
682                 memcpy(dst, e, replicas_entry_bytes(e));
683                 replicas_entry_sort(dst);
684         }
685
686         return 0;
687 }
688
689 static int
690 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
691                                       struct bch_replicas_cpu *cpu_r)
692 {
693         struct bch_replicas_entry_v0 *e;
694         unsigned nr = 0, entry_size = 0, idx = 0;
695
696         for_each_replicas_entry(sb_r, e) {
697                 entry_size = max_t(unsigned, entry_size,
698                                    replicas_entry_bytes(e));
699                 nr++;
700         }
701
702         entry_size += sizeof(struct bch_replicas_entry) -
703                 sizeof(struct bch_replicas_entry_v0);
704
705         cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
706         if (!cpu_r->entries)
707                 return -ENOMEM;
708
709         cpu_r->nr               = nr;
710         cpu_r->entry_size       = entry_size;
711
712         for_each_replicas_entry(sb_r, e) {
713                 struct bch_replicas_entry *dst =
714                         cpu_replicas_entry(cpu_r, idx++);
715
716                 dst->data_type  = e->data_type;
717                 dst->nr_devs    = e->nr_devs;
718                 dst->nr_required = 1;
719                 memcpy(dst->devs, e->devs, e->nr_devs);
720                 replicas_entry_sort(dst);
721         }
722
723         return 0;
724 }
725
726 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
727 {
728         struct bch_sb_field_replicas *sb_v1;
729         struct bch_sb_field_replicas_v0 *sb_v0;
730         struct bch_replicas_cpu new_r = { 0, 0, NULL };
731         int ret = 0;
732
733         if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
734                 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
735         else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
736                 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
737
738         if (ret)
739                 return -ENOMEM;
740
741         bch2_cpu_replicas_sort(&new_r);
742
743         percpu_down_write(&c->mark_lock);
744
745         ret = replicas_table_update(c, &new_r);
746         percpu_up_write(&c->mark_lock);
747
748         kfree(new_r.entries);
749
750         return 0;
751 }
752
753 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
754                                                struct bch_replicas_cpu *r)
755 {
756         struct bch_sb_field_replicas_v0 *sb_r;
757         struct bch_replicas_entry_v0 *dst;
758         struct bch_replicas_entry *src;
759         size_t bytes;
760
761         bytes = sizeof(struct bch_sb_field_replicas);
762
763         for_each_cpu_replicas_entry(r, src)
764                 bytes += replicas_entry_bytes(src) - 1;
765
766         sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
767                         DIV_ROUND_UP(bytes, sizeof(u64)));
768         if (!sb_r)
769                 return -ENOSPC;
770
771         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
772         sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
773
774         memset(&sb_r->entries, 0,
775                vstruct_end(&sb_r->field) -
776                (void *) &sb_r->entries);
777
778         dst = sb_r->entries;
779         for_each_cpu_replicas_entry(r, src) {
780                 dst->data_type  = src->data_type;
781                 dst->nr_devs    = src->nr_devs;
782                 memcpy(dst->devs, src->devs, src->nr_devs);
783
784                 dst = replicas_entry_next(dst);
785
786                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
787         }
788
789         return 0;
790 }
791
792 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
793                                             struct bch_replicas_cpu *r)
794 {
795         struct bch_sb_field_replicas *sb_r;
796         struct bch_replicas_entry *dst, *src;
797         bool need_v1 = false;
798         size_t bytes;
799
800         bytes = sizeof(struct bch_sb_field_replicas);
801
802         for_each_cpu_replicas_entry(r, src) {
803                 bytes += replicas_entry_bytes(src);
804                 if (src->nr_required != 1)
805                         need_v1 = true;
806         }
807
808         if (!need_v1)
809                 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
810
811         sb_r = bch2_sb_resize_replicas(&c->disk_sb,
812                         DIV_ROUND_UP(bytes, sizeof(u64)));
813         if (!sb_r)
814                 return -ENOSPC;
815
816         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
817         sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
818
819         memset(&sb_r->entries, 0,
820                vstruct_end(&sb_r->field) -
821                (void *) &sb_r->entries);
822
823         dst = sb_r->entries;
824         for_each_cpu_replicas_entry(r, src) {
825                 memcpy(dst, src, replicas_entry_bytes(src));
826
827                 dst = replicas_entry_next(dst);
828
829                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
830         }
831
832         return 0;
833 }
834
835 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
836 {
837         unsigned i;
838
839         sort_cmp_size(cpu_r->entries,
840                       cpu_r->nr,
841                       cpu_r->entry_size,
842                       memcmp, NULL);
843
844         for (i = 0; i + 1 < cpu_r->nr; i++) {
845                 struct bch_replicas_entry *l =
846                         cpu_replicas_entry(cpu_r, i);
847                 struct bch_replicas_entry *r =
848                         cpu_replicas_entry(cpu_r, i + 1);
849
850                 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
851
852                 if (!memcmp(l, r, cpu_r->entry_size))
853                         return "duplicate replicas entry";
854         }
855
856         return NULL;
857 }
858
859 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
860 {
861         struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
862         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
863         struct bch_replicas_cpu cpu_r = { .entries = NULL };
864         struct bch_replicas_entry *e;
865         const char *err;
866         unsigned i;
867
868         for_each_replicas_entry(sb_r, e) {
869                 err = "invalid replicas entry: invalid data type";
870                 if (e->data_type >= BCH_DATA_NR)
871                         goto err;
872
873                 err = "invalid replicas entry: no devices";
874                 if (!e->nr_devs)
875                         goto err;
876
877                 err = "invalid replicas entry: bad nr_required";
878                 if (!e->nr_required ||
879                     (e->nr_required > 1 &&
880                      e->nr_required >= e->nr_devs))
881                         goto err;
882
883                 err = "invalid replicas entry: invalid device";
884                 for (i = 0; i < e->nr_devs; i++)
885                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
886                                 goto err;
887         }
888
889         err = "cannot allocate memory";
890         if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
891                 goto err;
892
893         err = check_dup_replicas_entries(&cpu_r);
894 err:
895         kfree(cpu_r.entries);
896         return err;
897 }
898
899 static void bch2_sb_replicas_to_text(struct printbuf *out,
900                                      struct bch_sb *sb,
901                                      struct bch_sb_field *f)
902 {
903         struct bch_sb_field_replicas *r = field_to_type(f, replicas);
904         struct bch_replicas_entry *e;
905         bool first = true;
906
907         for_each_replicas_entry(r, e) {
908                 if (!first)
909                         pr_buf(out, " ");
910                 first = false;
911
912                 bch2_replicas_entry_to_text(out, e);
913         }
914 }
915
916 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
917         .validate       = bch2_sb_validate_replicas,
918         .to_text        = bch2_sb_replicas_to_text,
919 };
920
921 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
922 {
923         struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
924         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
925         struct bch_replicas_cpu cpu_r = { .entries = NULL };
926         struct bch_replicas_entry_v0 *e;
927         const char *err;
928         unsigned i;
929
930         for_each_replicas_entry_v0(sb_r, e) {
931                 err = "invalid replicas entry: invalid data type";
932                 if (e->data_type >= BCH_DATA_NR)
933                         goto err;
934
935                 err = "invalid replicas entry: no devices";
936                 if (!e->nr_devs)
937                         goto err;
938
939                 err = "invalid replicas entry: invalid device";
940                 for (i = 0; i < e->nr_devs; i++)
941                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
942                                 goto err;
943         }
944
945         err = "cannot allocate memory";
946         if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
947                 goto err;
948
949         err = check_dup_replicas_entries(&cpu_r);
950 err:
951         kfree(cpu_r.entries);
952         return err;
953 }
954
955 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
956         .validate       = bch2_sb_validate_replicas_v0,
957 };
958
959 /* Query replicas: */
960
961 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
962                                               struct bch_devs_mask online_devs)
963 {
964         struct bch_sb_field_members *mi;
965         struct bch_replicas_entry *e;
966         unsigned i, nr_online, nr_offline;
967         struct replicas_status ret;
968
969         memset(&ret, 0, sizeof(ret));
970
971         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
972                 ret.replicas[i].redundancy = INT_MAX;
973
974         mi = bch2_sb_get_members(c->disk_sb.sb);
975
976         percpu_down_read(&c->mark_lock);
977
978         for_each_cpu_replicas_entry(&c->replicas, e) {
979                 if (e->data_type >= ARRAY_SIZE(ret.replicas))
980                         panic("e %p data_type %u\n", e, e->data_type);
981
982                 nr_online = nr_offline = 0;
983
984                 for (i = 0; i < e->nr_devs; i++) {
985                         BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
986                                                 e->devs[i]));
987
988                         if (test_bit(e->devs[i], online_devs.d))
989                                 nr_online++;
990                         else
991                                 nr_offline++;
992                 }
993
994                 ret.replicas[e->data_type].redundancy =
995                         min(ret.replicas[e->data_type].redundancy,
996                             (int) nr_online - (int) e->nr_required);
997
998                 ret.replicas[e->data_type].nr_offline =
999                         max(ret.replicas[e->data_type].nr_offline,
1000                             nr_offline);
1001         }
1002
1003         percpu_up_read(&c->mark_lock);
1004
1005         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
1006                 if (ret.replicas[i].redundancy == INT_MAX)
1007                         ret.replicas[i].redundancy = 0;
1008
1009         return ret;
1010 }
1011
1012 struct replicas_status bch2_replicas_status(struct bch_fs *c)
1013 {
1014         return __bch2_replicas_status(c, bch2_online_devs(c));
1015 }
1016
1017 static bool have_enough_devs(struct replicas_status s,
1018                              enum bch_data_type type,
1019                              bool force_if_degraded,
1020                              bool force_if_lost)
1021 {
1022         return (!s.replicas[type].nr_offline || force_if_degraded) &&
1023                 (s.replicas[type].redundancy >= 0 || force_if_lost);
1024 }
1025
1026 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
1027 {
1028         return (have_enough_devs(s, BCH_DATA_JOURNAL,
1029                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
1030                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
1031                 have_enough_devs(s, BCH_DATA_BTREE,
1032                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
1033                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
1034                 have_enough_devs(s, BCH_DATA_USER,
1035                                  flags & BCH_FORCE_IF_DATA_DEGRADED,
1036                                  flags & BCH_FORCE_IF_DATA_LOST));
1037 }
1038
1039 int bch2_replicas_online(struct bch_fs *c, bool meta)
1040 {
1041         struct replicas_status s = bch2_replicas_status(c);
1042
1043         return (meta
1044                 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
1045                       s.replicas[BCH_DATA_BTREE].redundancy)
1046                 : s.replicas[BCH_DATA_USER].redundancy) + 1;
1047 }
1048
1049 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1050 {
1051         struct bch_replicas_entry *e;
1052         unsigned i, ret = 0;
1053
1054         percpu_down_read(&c->mark_lock);
1055
1056         for_each_cpu_replicas_entry(&c->replicas, e)
1057                 for (i = 0; i < e->nr_devs; i++)
1058                         if (e->devs[i] == ca->dev_idx)
1059                                 ret |= 1 << e->data_type;
1060
1061         percpu_up_read(&c->mark_lock);
1062
1063         return ret;
1064 }
1065
1066 int bch2_fs_replicas_init(struct bch_fs *c)
1067 {
1068         c->journal.entry_u64s_reserved +=
1069                 reserve_journal_replicas(c, &c->replicas);
1070
1071         return replicas_table_update(c, &c->replicas);
1072 }