]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/replicas.c
Update bcachefs sources to ffe09df106 bcachefs: Verify fs hasn't been modified before...
[bcachefs-tools-debian] / libbcachefs / replicas.c
1
2 #include "bcachefs.h"
3 #include "journal.h"
4 #include "replicas.h"
5 #include "super-io.h"
6
7 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
8                                             struct bch_replicas_cpu *);
9
10 /* Replicas tracking - in memory: */
11
12 static inline int u8_cmp(u8 l, u8 r)
13 {
14         return (l > r) - (l < r);
15 }
16
17 static void verify_replicas_entry_sorted(struct bch_replicas_entry *e)
18 {
19 #ifdef CONFIG_BCACHES_DEBUG
20         unsigned i;
21
22         for (i = 0; i + 1 < e->nr_devs; i++)
23                 BUG_ON(e->devs[i] >= e->devs[i + 1]);
24 #endif
25 }
26
27 static void replicas_entry_sort(struct bch_replicas_entry *e)
28 {
29         bubble_sort(e->devs, e->nr_devs, u8_cmp);
30 }
31
32 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
33 {
34         eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
35 }
36
37 void bch2_replicas_entry_to_text(struct printbuf *out,
38                                  struct bch_replicas_entry *e)
39 {
40         unsigned i;
41
42         pr_buf(out, "%s: %u/%u [",
43                bch2_data_types[e->data_type],
44                e->nr_required,
45                e->nr_devs);
46
47         for (i = 0; i < e->nr_devs; i++)
48                 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
49         pr_buf(out, "]");
50 }
51
52 void bch2_cpu_replicas_to_text(struct printbuf *out,
53                               struct bch_replicas_cpu *r)
54 {
55         struct bch_replicas_entry *e;
56         bool first = true;
57
58         for_each_cpu_replicas_entry(r, e) {
59                 if (!first)
60                         pr_buf(out, " ");
61                 first = false;
62
63                 bch2_replicas_entry_to_text(out, e);
64         }
65 }
66
67 static void extent_to_replicas(struct bkey_s_c k,
68                                struct bch_replicas_entry *r)
69 {
70         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
71         const union bch_extent_entry *entry;
72         struct extent_ptr_decoded p;
73
74         r->nr_required  = 1;
75
76         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
77                 if (p.ptr.cached)
78                         continue;
79
80                 if (p.ec_nr) {
81                         r->nr_devs = 0;
82                         break;
83                 }
84
85                 r->devs[r->nr_devs++] = p.ptr.dev;
86         }
87 }
88
89 static void stripe_to_replicas(struct bkey_s_c k,
90                                struct bch_replicas_entry *r)
91 {
92         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
93         const struct bch_extent_ptr *ptr;
94
95         r->nr_required  = s.v->nr_blocks - s.v->nr_redundant;
96
97         for (ptr = s.v->ptrs;
98              ptr < s.v->ptrs + s.v->nr_blocks;
99              ptr++)
100                 r->devs[r->nr_devs++] = ptr->dev;
101 }
102
103 static void bkey_to_replicas(struct bch_replicas_entry *e,
104                              struct bkey_s_c k)
105 {
106         e->nr_devs = 0;
107
108         switch (k.k->type) {
109         case KEY_TYPE_btree_ptr:
110                 e->data_type = BCH_DATA_BTREE;
111                 extent_to_replicas(k, e);
112                 break;
113         case KEY_TYPE_extent:
114                 e->data_type = BCH_DATA_USER;
115                 extent_to_replicas(k, e);
116                 break;
117         case KEY_TYPE_stripe:
118                 e->data_type = BCH_DATA_USER;
119                 stripe_to_replicas(k, e);
120                 break;
121         }
122
123         replicas_entry_sort(e);
124 }
125
126 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
127                               enum bch_data_type data_type,
128                               struct bch_devs_list devs)
129 {
130         unsigned i;
131
132         BUG_ON(!data_type ||
133                data_type == BCH_DATA_SB ||
134                data_type >= BCH_DATA_NR);
135
136         e->data_type    = data_type;
137         e->nr_devs      = 0;
138         e->nr_required  = 1;
139
140         for (i = 0; i < devs.nr; i++)
141                 e->devs[e->nr_devs++] = devs.devs[i];
142
143         replicas_entry_sort(e);
144 }
145
146 static struct bch_replicas_cpu
147 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
148                        struct bch_replicas_entry *new_entry)
149 {
150         unsigned i;
151         struct bch_replicas_cpu new = {
152                 .nr             = old->nr + 1,
153                 .entry_size     = max_t(unsigned, old->entry_size,
154                                         replicas_entry_bytes(new_entry)),
155         };
156
157         BUG_ON(!new_entry->data_type);
158         verify_replicas_entry_sorted(new_entry);
159
160         new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
161         if (!new.entries)
162                 return new;
163
164         for (i = 0; i < old->nr; i++)
165                 memcpy(cpu_replicas_entry(&new, i),
166                        cpu_replicas_entry(old, i),
167                        old->entry_size);
168
169         memcpy(cpu_replicas_entry(&new, old->nr),
170                new_entry,
171                replicas_entry_bytes(new_entry));
172
173         bch2_cpu_replicas_sort(&new);
174         return new;
175 }
176
177 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
178                                        struct bch_replicas_entry *search)
179 {
180         int idx, entry_size = replicas_entry_bytes(search);
181
182         if (unlikely(entry_size > r->entry_size))
183                 return -1;
184
185         verify_replicas_entry_sorted(search);
186
187 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
188         idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
189                               entry_cmp, search);
190 #undef entry_cmp
191
192         return idx < r->nr ? idx : -1;
193 }
194
195 int bch2_replicas_entry_idx(struct bch_fs *c,
196                             struct bch_replicas_entry *search)
197 {
198         replicas_entry_sort(search);
199
200         return __replicas_entry_idx(&c->replicas, search);
201 }
202
203 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
204                                  struct bch_replicas_entry *search)
205 {
206         return __replicas_entry_idx(r, search) >= 0;
207 }
208
209 static bool bch2_replicas_marked_locked(struct bch_fs *c,
210                           struct bch_replicas_entry *search,
211                           bool check_gc_replicas)
212 {
213         if (!search->nr_devs)
214                 return true;
215
216         verify_replicas_entry_sorted(search);
217
218         return __replicas_has_entry(&c->replicas, search) &&
219                 (!check_gc_replicas ||
220                  likely((!c->replicas_gc.entries)) ||
221                  __replicas_has_entry(&c->replicas_gc, search));
222 }
223
224 bool bch2_replicas_marked(struct bch_fs *c,
225                           struct bch_replicas_entry *search,
226                           bool check_gc_replicas)
227 {
228         bool marked;
229
230         percpu_down_read_preempt_disable(&c->mark_lock);
231         marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
232         percpu_up_read_preempt_enable(&c->mark_lock);
233
234         return marked;
235 }
236
237 static void __replicas_table_update(struct bch_fs_usage __percpu *dst_p,
238                                     struct bch_replicas_cpu *dst_r,
239                                     struct bch_fs_usage __percpu *src_p,
240                                     struct bch_replicas_cpu *src_r)
241 {
242         unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
243         struct bch_fs_usage *dst, *src = (void *)
244                 bch2_acc_percpu_u64s((void *) src_p, src_nr);
245         int src_idx, dst_idx;
246
247         preempt_disable();
248         dst = this_cpu_ptr(dst_p);
249         preempt_enable();
250
251         *dst = *src;
252
253         for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
254                 if (!src->replicas[src_idx])
255                         continue;
256
257                 dst_idx = __replicas_entry_idx(dst_r,
258                                 cpu_replicas_entry(src_r, src_idx));
259                 BUG_ON(dst_idx < 0);
260
261                 dst->replicas[dst_idx] = src->replicas[src_idx];
262         }
263 }
264
265 /*
266  * Resize filesystem accounting:
267  */
268 static int replicas_table_update(struct bch_fs *c,
269                                  struct bch_replicas_cpu *new_r)
270 {
271         struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
272         struct bch_fs_usage *new_scratch = NULL;
273         unsigned bytes = sizeof(struct bch_fs_usage) +
274                 sizeof(u64) * new_r->nr;
275         int ret = -ENOMEM;
276
277         if (!(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
278                                                 GFP_NOIO)) ||
279             (c->usage[1] &&
280              !(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
281                                                  GFP_NOIO))) ||
282             !(new_scratch  = kmalloc(bytes, GFP_NOIO)))
283                 goto err;
284
285         if (c->usage[0])
286                 __replicas_table_update(new_usage[0],   new_r,
287                                         c->usage[0],    &c->replicas);
288         if (c->usage[1])
289                 __replicas_table_update(new_usage[1],   new_r,
290                                         c->usage[1],    &c->replicas);
291
292         swap(c->usage[0],       new_usage[0]);
293         swap(c->usage[1],       new_usage[1]);
294         swap(c->usage_scratch,  new_scratch);
295         swap(c->replicas,       *new_r);
296         ret = 0;
297 err:
298         kfree(new_scratch);
299         free_percpu(new_usage[1]);
300         free_percpu(new_usage[0]);
301         return ret;
302 }
303
304 static unsigned reserve_journal_replicas(struct bch_fs *c,
305                                      struct bch_replicas_cpu *r)
306 {
307         struct bch_replicas_entry *e;
308         unsigned journal_res_u64s = 0;
309
310         /* nr_inodes: */
311         journal_res_u64s +=
312                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
313
314         /* key_version: */
315         journal_res_u64s +=
316                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
317
318         /* persistent_reserved: */
319         journal_res_u64s +=
320                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
321                 BCH_REPLICAS_MAX;
322
323         for_each_cpu_replicas_entry(r, e)
324                 journal_res_u64s +=
325                         DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
326                                      e->nr_devs, sizeof(u64));
327         return journal_res_u64s;
328 }
329
330 noinline
331 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
332                                 struct bch_replicas_entry *new_entry)
333 {
334         struct bch_replicas_cpu new_r, new_gc;
335         int ret = -ENOMEM;
336
337         memset(&new_r, 0, sizeof(new_r));
338         memset(&new_gc, 0, sizeof(new_gc));
339
340         mutex_lock(&c->sb_lock);
341
342         if (c->replicas_gc.entries &&
343             !__replicas_has_entry(&c->replicas_gc, new_entry)) {
344                 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
345                 if (!new_gc.entries)
346                         goto err;
347         }
348
349         if (!__replicas_has_entry(&c->replicas, new_entry)) {
350                 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
351                 if (!new_r.entries)
352                         goto err;
353
354                 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
355                 if (ret)
356                         goto err;
357
358                 bch2_journal_entry_res_resize(&c->journal,
359                                 &c->replicas_journal_res,
360                                 reserve_journal_replicas(c, &new_r));
361         }
362
363         if (!new_r.entries &&
364             !new_gc.entries)
365                 goto out;
366
367         /* allocations done, now commit: */
368
369         if (new_r.entries)
370                 bch2_write_super(c);
371
372         /* don't update in memory replicas until changes are persistent */
373         percpu_down_write(&c->mark_lock);
374         if (new_r.entries)
375                 ret = replicas_table_update(c, &new_r);
376         if (new_gc.entries)
377                 swap(new_gc, c->replicas_gc);
378         percpu_up_write(&c->mark_lock);
379 out:
380         ret = 0;
381 err:
382         mutex_unlock(&c->sb_lock);
383
384         kfree(new_r.entries);
385         kfree(new_gc.entries);
386
387         return ret;
388 }
389
390 int bch2_mark_replicas(struct bch_fs *c,
391                        struct bch_replicas_entry *r)
392 {
393         return likely(bch2_replicas_marked(c, r, true))
394                 ? 0
395                 : bch2_mark_replicas_slowpath(c, r);
396 }
397
398 bool bch2_bkey_replicas_marked_locked(struct bch_fs *c,
399                                       struct bkey_s_c k,
400                                       bool check_gc_replicas)
401 {
402         struct bch_replicas_padded search;
403         struct bch_devs_list cached = bch2_bkey_cached_devs(k);
404         unsigned i;
405
406         for (i = 0; i < cached.nr; i++) {
407                 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
408
409                 if (!bch2_replicas_marked_locked(c, &search.e,
410                                                  check_gc_replicas))
411                         return false;
412         }
413
414         bkey_to_replicas(&search.e, k);
415
416         return bch2_replicas_marked_locked(c, &search.e, check_gc_replicas);
417 }
418
419 bool bch2_bkey_replicas_marked(struct bch_fs *c,
420                                struct bkey_s_c k,
421                                bool check_gc_replicas)
422 {
423         bool marked;
424
425         percpu_down_read_preempt_disable(&c->mark_lock);
426         marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
427         percpu_up_read_preempt_enable(&c->mark_lock);
428
429         return marked;
430 }
431
432 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
433 {
434         struct bch_replicas_padded search;
435         struct bch_devs_list cached = bch2_bkey_cached_devs(k);
436         unsigned i;
437         int ret;
438
439         for (i = 0; i < cached.nr; i++) {
440                 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
441
442                 ret = bch2_mark_replicas(c, &search.e);
443                 if (ret)
444                         return ret;
445         }
446
447         bkey_to_replicas(&search.e, k);
448
449         return bch2_mark_replicas(c, &search.e);
450 }
451
452 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
453 {
454         unsigned i;
455
456         lockdep_assert_held(&c->replicas_gc_lock);
457
458         mutex_lock(&c->sb_lock);
459
460         if (ret)
461                 goto err;
462
463         /*
464          * this is kind of crappy; the replicas gc mechanism needs to be ripped
465          * out
466          */
467
468         for (i = 0; i < c->replicas.nr; i++) {
469                 struct bch_replicas_entry *e =
470                         cpu_replicas_entry(&c->replicas, i);
471                 struct bch_replicas_cpu n;
472                 u64 v;
473
474                 if (__replicas_has_entry(&c->replicas_gc, e))
475                         continue;
476
477                 v = percpu_u64_get(&c->usage[0]->replicas[i]);
478                 if (!v)
479                         continue;
480
481                 n = cpu_replicas_add_entry(&c->replicas_gc, e);
482                 if (!n.entries) {
483                         ret = -ENOSPC;
484                         goto err;
485                 }
486
487                 percpu_down_write(&c->mark_lock);
488                 swap(n, c->replicas_gc);
489                 percpu_up_write(&c->mark_lock);
490
491                 kfree(n.entries);
492         }
493
494         if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
495                 ret = -ENOSPC;
496                 goto err;
497         }
498
499         bch2_write_super(c);
500
501         /* don't update in memory replicas until changes are persistent */
502 err:
503         percpu_down_write(&c->mark_lock);
504         if (!ret)
505                 ret = replicas_table_update(c, &c->replicas_gc);
506
507         kfree(c->replicas_gc.entries);
508         c->replicas_gc.entries = NULL;
509         percpu_up_write(&c->mark_lock);
510
511         mutex_unlock(&c->sb_lock);
512         return ret;
513 }
514
515 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
516 {
517         struct bch_replicas_entry *e;
518         unsigned i = 0;
519
520         lockdep_assert_held(&c->replicas_gc_lock);
521
522         mutex_lock(&c->sb_lock);
523         BUG_ON(c->replicas_gc.entries);
524
525         c->replicas_gc.nr               = 0;
526         c->replicas_gc.entry_size       = 0;
527
528         for_each_cpu_replicas_entry(&c->replicas, e)
529                 if (!((1 << e->data_type) & typemask)) {
530                         c->replicas_gc.nr++;
531                         c->replicas_gc.entry_size =
532                                 max_t(unsigned, c->replicas_gc.entry_size,
533                                       replicas_entry_bytes(e));
534                 }
535
536         c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
537                                          c->replicas_gc.entry_size,
538                                          GFP_NOIO);
539         if (!c->replicas_gc.entries) {
540                 mutex_unlock(&c->sb_lock);
541                 return -ENOMEM;
542         }
543
544         for_each_cpu_replicas_entry(&c->replicas, e)
545                 if (!((1 << e->data_type) & typemask))
546                         memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
547                                e, c->replicas_gc.entry_size);
548
549         bch2_cpu_replicas_sort(&c->replicas_gc);
550         mutex_unlock(&c->sb_lock);
551
552         return 0;
553 }
554
555 int bch2_replicas_set_usage(struct bch_fs *c,
556                             struct bch_replicas_entry *r,
557                             u64 sectors)
558 {
559         int ret, idx = bch2_replicas_entry_idx(c, r);
560
561         if (idx < 0) {
562                 struct bch_replicas_cpu n;
563
564                 n = cpu_replicas_add_entry(&c->replicas, r);
565                 if (!n.entries)
566                         return -ENOMEM;
567
568                 ret = replicas_table_update(c, &n);
569                 if (ret)
570                         return ret;
571
572                 kfree(n.entries);
573
574                 idx = bch2_replicas_entry_idx(c, r);
575                 BUG_ON(ret < 0);
576         }
577
578         percpu_u64_set(&c->usage[0]->replicas[idx], sectors);
579
580         return 0;
581 }
582
583 /* Replicas tracking - superblock: */
584
585 static int
586 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
587                                    struct bch_replicas_cpu *cpu_r)
588 {
589         struct bch_replicas_entry *e, *dst;
590         unsigned nr = 0, entry_size = 0, idx = 0;
591
592         for_each_replicas_entry(sb_r, e) {
593                 entry_size = max_t(unsigned, entry_size,
594                                    replicas_entry_bytes(e));
595                 nr++;
596         }
597
598         cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
599         if (!cpu_r->entries)
600                 return -ENOMEM;
601
602         cpu_r->nr               = nr;
603         cpu_r->entry_size       = entry_size;
604
605         for_each_replicas_entry(sb_r, e) {
606                 dst = cpu_replicas_entry(cpu_r, idx++);
607                 memcpy(dst, e, replicas_entry_bytes(e));
608                 replicas_entry_sort(dst);
609         }
610
611         return 0;
612 }
613
614 static int
615 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
616                                       struct bch_replicas_cpu *cpu_r)
617 {
618         struct bch_replicas_entry_v0 *e;
619         unsigned nr = 0, entry_size = 0, idx = 0;
620
621         for_each_replicas_entry(sb_r, e) {
622                 entry_size = max_t(unsigned, entry_size,
623                                    replicas_entry_bytes(e));
624                 nr++;
625         }
626
627         entry_size += sizeof(struct bch_replicas_entry) -
628                 sizeof(struct bch_replicas_entry_v0);
629
630         cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
631         if (!cpu_r->entries)
632                 return -ENOMEM;
633
634         cpu_r->nr               = nr;
635         cpu_r->entry_size       = entry_size;
636
637         for_each_replicas_entry(sb_r, e) {
638                 struct bch_replicas_entry *dst =
639                         cpu_replicas_entry(cpu_r, idx++);
640
641                 dst->data_type  = e->data_type;
642                 dst->nr_devs    = e->nr_devs;
643                 dst->nr_required = 1;
644                 memcpy(dst->devs, e->devs, e->nr_devs);
645                 replicas_entry_sort(dst);
646         }
647
648         return 0;
649 }
650
651 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
652 {
653         struct bch_sb_field_replicas *sb_v1;
654         struct bch_sb_field_replicas_v0 *sb_v0;
655         struct bch_replicas_cpu new_r = { 0, 0, NULL };
656         int ret = 0;
657
658         if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
659                 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
660         else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
661                 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
662
663         if (ret)
664                 return -ENOMEM;
665
666         bch2_cpu_replicas_sort(&new_r);
667
668         percpu_down_write(&c->mark_lock);
669
670         ret = replicas_table_update(c, &new_r);
671         percpu_up_write(&c->mark_lock);
672
673         kfree(new_r.entries);
674
675         return 0;
676 }
677
678 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
679                                                struct bch_replicas_cpu *r)
680 {
681         struct bch_sb_field_replicas_v0 *sb_r;
682         struct bch_replicas_entry_v0 *dst;
683         struct bch_replicas_entry *src;
684         size_t bytes;
685
686         bytes = sizeof(struct bch_sb_field_replicas);
687
688         for_each_cpu_replicas_entry(r, src)
689                 bytes += replicas_entry_bytes(src) - 1;
690
691         sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
692                         DIV_ROUND_UP(bytes, sizeof(u64)));
693         if (!sb_r)
694                 return -ENOSPC;
695
696         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
697         sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
698
699         memset(&sb_r->entries, 0,
700                vstruct_end(&sb_r->field) -
701                (void *) &sb_r->entries);
702
703         dst = sb_r->entries;
704         for_each_cpu_replicas_entry(r, src) {
705                 dst->data_type  = src->data_type;
706                 dst->nr_devs    = src->nr_devs;
707                 memcpy(dst->devs, src->devs, src->nr_devs);
708
709                 dst = replicas_entry_next(dst);
710
711                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
712         }
713
714         return 0;
715 }
716
717 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
718                                             struct bch_replicas_cpu *r)
719 {
720         struct bch_sb_field_replicas *sb_r;
721         struct bch_replicas_entry *dst, *src;
722         bool need_v1 = false;
723         size_t bytes;
724
725         bytes = sizeof(struct bch_sb_field_replicas);
726
727         for_each_cpu_replicas_entry(r, src) {
728                 bytes += replicas_entry_bytes(src);
729                 if (src->nr_required != 1)
730                         need_v1 = true;
731         }
732
733         if (!need_v1)
734                 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
735
736         sb_r = bch2_sb_resize_replicas(&c->disk_sb,
737                         DIV_ROUND_UP(bytes, sizeof(u64)));
738         if (!sb_r)
739                 return -ENOSPC;
740
741         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
742         sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
743
744         memset(&sb_r->entries, 0,
745                vstruct_end(&sb_r->field) -
746                (void *) &sb_r->entries);
747
748         dst = sb_r->entries;
749         for_each_cpu_replicas_entry(r, src) {
750                 memcpy(dst, src, replicas_entry_bytes(src));
751
752                 dst = replicas_entry_next(dst);
753
754                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
755         }
756
757         return 0;
758 }
759
760 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
761 {
762         unsigned i;
763
764         sort_cmp_size(cpu_r->entries,
765                       cpu_r->nr,
766                       cpu_r->entry_size,
767                       memcmp, NULL);
768
769         for (i = 0; i + 1 < cpu_r->nr; i++) {
770                 struct bch_replicas_entry *l =
771                         cpu_replicas_entry(cpu_r, i);
772                 struct bch_replicas_entry *r =
773                         cpu_replicas_entry(cpu_r, i + 1);
774
775                 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
776
777                 if (!memcmp(l, r, cpu_r->entry_size))
778                         return "duplicate replicas entry";
779         }
780
781         return NULL;
782 }
783
784 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
785 {
786         struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
787         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
788         struct bch_replicas_cpu cpu_r = { .entries = NULL };
789         struct bch_replicas_entry *e;
790         const char *err;
791         unsigned i;
792
793         for_each_replicas_entry(sb_r, e) {
794                 err = "invalid replicas entry: invalid data type";
795                 if (e->data_type >= BCH_DATA_NR)
796                         goto err;
797
798                 err = "invalid replicas entry: no devices";
799                 if (!e->nr_devs)
800                         goto err;
801
802                 err = "invalid replicas entry: bad nr_required";
803                 if (!e->nr_required ||
804                     (e->nr_required > 1 &&
805                      e->nr_required >= e->nr_devs))
806                         goto err;
807
808                 err = "invalid replicas entry: invalid device";
809                 for (i = 0; i < e->nr_devs; i++)
810                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
811                                 goto err;
812         }
813
814         err = "cannot allocate memory";
815         if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
816                 goto err;
817
818         err = check_dup_replicas_entries(&cpu_r);
819 err:
820         kfree(cpu_r.entries);
821         return err;
822 }
823
824 static void bch2_sb_replicas_to_text(struct printbuf *out,
825                                      struct bch_sb *sb,
826                                      struct bch_sb_field *f)
827 {
828         struct bch_sb_field_replicas *r = field_to_type(f, replicas);
829         struct bch_replicas_entry *e;
830         bool first = true;
831
832         for_each_replicas_entry(r, e) {
833                 if (!first)
834                         pr_buf(out, " ");
835                 first = false;
836
837                 bch2_replicas_entry_to_text(out, e);
838         }
839 }
840
841 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
842         .validate       = bch2_sb_validate_replicas,
843         .to_text        = bch2_sb_replicas_to_text,
844 };
845
846 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
847 {
848         struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
849         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
850         struct bch_replicas_cpu cpu_r = { .entries = NULL };
851         struct bch_replicas_entry_v0 *e;
852         const char *err;
853         unsigned i;
854
855         for_each_replicas_entry_v0(sb_r, e) {
856                 err = "invalid replicas entry: invalid data type";
857                 if (e->data_type >= BCH_DATA_NR)
858                         goto err;
859
860                 err = "invalid replicas entry: no devices";
861                 if (!e->nr_devs)
862                         goto err;
863
864                 err = "invalid replicas entry: invalid device";
865                 for (i = 0; i < e->nr_devs; i++)
866                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
867                                 goto err;
868         }
869
870         err = "cannot allocate memory";
871         if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
872                 goto err;
873
874         err = check_dup_replicas_entries(&cpu_r);
875 err:
876         kfree(cpu_r.entries);
877         return err;
878 }
879
880 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
881         .validate       = bch2_sb_validate_replicas_v0,
882 };
883
884 /* Query replicas: */
885
886 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
887                                               struct bch_devs_mask online_devs)
888 {
889         struct bch_sb_field_members *mi;
890         struct bch_replicas_entry *e;
891         unsigned i, nr_online, nr_offline;
892         struct replicas_status ret;
893
894         memset(&ret, 0, sizeof(ret));
895
896         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
897                 ret.replicas[i].redundancy = INT_MAX;
898
899         mi = bch2_sb_get_members(c->disk_sb.sb);
900
901         percpu_down_read_preempt_disable(&c->mark_lock);
902
903         for_each_cpu_replicas_entry(&c->replicas, e) {
904                 if (e->data_type >= ARRAY_SIZE(ret.replicas))
905                         panic("e %p data_type %u\n", e, e->data_type);
906
907                 nr_online = nr_offline = 0;
908
909                 for (i = 0; i < e->nr_devs; i++) {
910                         BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
911                                                 e->devs[i]));
912
913                         if (test_bit(e->devs[i], online_devs.d))
914                                 nr_online++;
915                         else
916                                 nr_offline++;
917                 }
918
919                 ret.replicas[e->data_type].redundancy =
920                         min(ret.replicas[e->data_type].redundancy,
921                             (int) nr_online - (int) e->nr_required);
922
923                 ret.replicas[e->data_type].nr_offline =
924                         max(ret.replicas[e->data_type].nr_offline,
925                             nr_offline);
926         }
927
928         percpu_up_read_preempt_enable(&c->mark_lock);
929
930         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
931                 if (ret.replicas[i].redundancy == INT_MAX)
932                         ret.replicas[i].redundancy = 0;
933
934         return ret;
935 }
936
937 struct replicas_status bch2_replicas_status(struct bch_fs *c)
938 {
939         return __bch2_replicas_status(c, bch2_online_devs(c));
940 }
941
942 static bool have_enough_devs(struct replicas_status s,
943                              enum bch_data_type type,
944                              bool force_if_degraded,
945                              bool force_if_lost)
946 {
947         return (!s.replicas[type].nr_offline || force_if_degraded) &&
948                 (s.replicas[type].redundancy >= 0 || force_if_lost);
949 }
950
951 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
952 {
953         return (have_enough_devs(s, BCH_DATA_JOURNAL,
954                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
955                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
956                 have_enough_devs(s, BCH_DATA_BTREE,
957                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
958                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
959                 have_enough_devs(s, BCH_DATA_USER,
960                                  flags & BCH_FORCE_IF_DATA_DEGRADED,
961                                  flags & BCH_FORCE_IF_DATA_LOST));
962 }
963
964 int bch2_replicas_online(struct bch_fs *c, bool meta)
965 {
966         struct replicas_status s = bch2_replicas_status(c);
967
968         return (meta
969                 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
970                       s.replicas[BCH_DATA_BTREE].redundancy)
971                 : s.replicas[BCH_DATA_USER].redundancy) + 1;
972 }
973
974 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
975 {
976         struct bch_replicas_entry *e;
977         unsigned i, ret = 0;
978
979         percpu_down_read_preempt_disable(&c->mark_lock);
980
981         for_each_cpu_replicas_entry(&c->replicas, e)
982                 for (i = 0; i < e->nr_devs; i++)
983                         if (e->devs[i] == ca->dev_idx)
984                                 ret |= 1 << e->data_type;
985
986         percpu_up_read_preempt_enable(&c->mark_lock);
987
988         return ret;
989 }
990
991 int bch2_fs_replicas_init(struct bch_fs *c)
992 {
993         c->journal.entry_u64s_reserved +=
994                 reserve_journal_replicas(c, &c->replicas);
995
996         return replicas_table_update(c, &c->replicas);
997 }