]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/replicas.c
New upstream snapshot
[bcachefs-tools-debian] / libbcachefs / replicas.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "buckets.h"
5 #include "journal.h"
6 #include "replicas.h"
7 #include "super-io.h"
8
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10                                             struct bch_replicas_cpu *);
11
12 /* Replicas tracking - in memory: */
13
14 static inline int u8_cmp(u8 l, u8 r)
15 {
16         return cmp_int(l, r);
17 }
18
19 static void verify_replicas_entry(struct bch_replicas_entry *e)
20 {
21 #ifdef CONFIG_BCACHEFS_DEBUG
22         unsigned i;
23
24         BUG_ON(e->data_type >= BCH_DATA_NR);
25         BUG_ON(!e->nr_devs);
26         BUG_ON(e->nr_required > 1 &&
27                e->nr_required >= e->nr_devs);
28
29         for (i = 0; i + 1 < e->nr_devs; i++)
30                 BUG_ON(e->devs[i] >= e->devs[i + 1]);
31 #endif
32 }
33
34 static void replicas_entry_sort(struct bch_replicas_entry *e)
35 {
36         bubble_sort(e->devs, e->nr_devs, u8_cmp);
37 }
38
39 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
40 {
41         eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
42 }
43
44 void bch2_replicas_entry_to_text(struct printbuf *out,
45                                  struct bch_replicas_entry *e)
46 {
47         unsigned i;
48
49         pr_buf(out, "%s: %u/%u [",
50                bch2_data_types[e->data_type],
51                e->nr_required,
52                e->nr_devs);
53
54         for (i = 0; i < e->nr_devs; i++)
55                 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
56         pr_buf(out, "]");
57 }
58
59 void bch2_cpu_replicas_to_text(struct printbuf *out,
60                               struct bch_replicas_cpu *r)
61 {
62         struct bch_replicas_entry *e;
63         bool first = true;
64
65         for_each_cpu_replicas_entry(r, e) {
66                 if (!first)
67                         pr_buf(out, " ");
68                 first = false;
69
70                 bch2_replicas_entry_to_text(out, e);
71         }
72 }
73
74 static void extent_to_replicas(struct bkey_s_c k,
75                                struct bch_replicas_entry *r)
76 {
77         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
78         const union bch_extent_entry *entry;
79         struct extent_ptr_decoded p;
80
81         r->nr_required  = 1;
82
83         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
84                 if (p.ptr.cached)
85                         continue;
86
87                 if (!p.has_ec)
88                         r->devs[r->nr_devs++] = p.ptr.dev;
89                 else
90                         r->nr_required = 0;
91         }
92 }
93
94 static void stripe_to_replicas(struct bkey_s_c k,
95                                struct bch_replicas_entry *r)
96 {
97         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
98         const struct bch_extent_ptr *ptr;
99
100         r->nr_required  = s.v->nr_blocks - s.v->nr_redundant;
101
102         for (ptr = s.v->ptrs;
103              ptr < s.v->ptrs + s.v->nr_blocks;
104              ptr++)
105                 r->devs[r->nr_devs++] = ptr->dev;
106 }
107
108 void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
109                            struct bkey_s_c k)
110 {
111         e->nr_devs = 0;
112
113         switch (k.k->type) {
114         case KEY_TYPE_btree_ptr:
115         case KEY_TYPE_btree_ptr_v2:
116                 e->data_type = BCH_DATA_btree;
117                 extent_to_replicas(k, e);
118                 break;
119         case KEY_TYPE_extent:
120         case KEY_TYPE_reflink_v:
121                 e->data_type = BCH_DATA_user;
122                 extent_to_replicas(k, e);
123                 break;
124         case KEY_TYPE_stripe:
125                 e->data_type = BCH_DATA_parity;
126                 stripe_to_replicas(k, e);
127                 break;
128         }
129
130         replicas_entry_sort(e);
131 }
132
133 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
134                               enum bch_data_type data_type,
135                               struct bch_devs_list devs)
136 {
137         unsigned i;
138
139         BUG_ON(!data_type ||
140                data_type == BCH_DATA_sb ||
141                data_type >= BCH_DATA_NR);
142
143         e->data_type    = data_type;
144         e->nr_devs      = 0;
145         e->nr_required  = 1;
146
147         for (i = 0; i < devs.nr; i++)
148                 e->devs[e->nr_devs++] = devs.devs[i];
149
150         replicas_entry_sort(e);
151 }
152
153 static struct bch_replicas_cpu
154 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
155                        struct bch_replicas_entry *new_entry)
156 {
157         unsigned i;
158         struct bch_replicas_cpu new = {
159                 .nr             = old->nr + 1,
160                 .entry_size     = max_t(unsigned, old->entry_size,
161                                         replicas_entry_bytes(new_entry)),
162         };
163
164         BUG_ON(!new_entry->data_type);
165         verify_replicas_entry(new_entry);
166
167         new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
168         if (!new.entries)
169                 return new;
170
171         for (i = 0; i < old->nr; i++)
172                 memcpy(cpu_replicas_entry(&new, i),
173                        cpu_replicas_entry(old, i),
174                        old->entry_size);
175
176         memcpy(cpu_replicas_entry(&new, old->nr),
177                new_entry,
178                replicas_entry_bytes(new_entry));
179
180         bch2_cpu_replicas_sort(&new);
181         return new;
182 }
183
184 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
185                                        struct bch_replicas_entry *search)
186 {
187         int idx, entry_size = replicas_entry_bytes(search);
188
189         if (unlikely(entry_size > r->entry_size))
190                 return -1;
191
192         verify_replicas_entry(search);
193
194 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
195         idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
196                               entry_cmp, search);
197 #undef entry_cmp
198
199         return idx < r->nr ? idx : -1;
200 }
201
202 int bch2_replicas_entry_idx(struct bch_fs *c,
203                             struct bch_replicas_entry *search)
204 {
205         replicas_entry_sort(search);
206
207         return __replicas_entry_idx(&c->replicas, search);
208 }
209
210 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
211                                  struct bch_replicas_entry *search)
212 {
213         return __replicas_entry_idx(r, search) >= 0;
214 }
215
216 bool bch2_replicas_marked(struct bch_fs *c,
217                           struct bch_replicas_entry *search)
218 {
219         bool marked;
220
221         if (!search->nr_devs)
222                 return true;
223
224         verify_replicas_entry(search);
225
226         percpu_down_read(&c->mark_lock);
227         marked = __replicas_has_entry(&c->replicas, search) &&
228                 (likely((!c->replicas_gc.entries)) ||
229                  __replicas_has_entry(&c->replicas_gc, search));
230         percpu_up_read(&c->mark_lock);
231
232         return marked;
233 }
234
235 static void __replicas_table_update(struct bch_fs_usage *dst,
236                                     struct bch_replicas_cpu *dst_r,
237                                     struct bch_fs_usage *src,
238                                     struct bch_replicas_cpu *src_r)
239 {
240         int src_idx, dst_idx;
241
242         *dst = *src;
243
244         for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
245                 if (!src->replicas[src_idx])
246                         continue;
247
248                 dst_idx = __replicas_entry_idx(dst_r,
249                                 cpu_replicas_entry(src_r, src_idx));
250                 BUG_ON(dst_idx < 0);
251
252                 dst->replicas[dst_idx] = src->replicas[src_idx];
253         }
254 }
255
256 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
257                                     struct bch_replicas_cpu *dst_r,
258                                     struct bch_fs_usage __percpu *src_p,
259                                     struct bch_replicas_cpu *src_r)
260 {
261         unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
262         struct bch_fs_usage *dst, *src = (void *)
263                 bch2_acc_percpu_u64s((void *) src_p, src_nr);
264
265         preempt_disable();
266         dst = this_cpu_ptr(dst_p);
267         preempt_enable();
268
269         __replicas_table_update(dst, dst_r, src, src_r);
270 }
271
272 /*
273  * Resize filesystem accounting:
274  */
275 static int replicas_table_update(struct bch_fs *c,
276                                  struct bch_replicas_cpu *new_r)
277 {
278         struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
279         struct bch_fs_usage *new_scratch = NULL;
280         struct bch_fs_usage __percpu *new_gc = NULL;
281         struct bch_fs_usage *new_base = NULL;
282         unsigned bytes = sizeof(struct bch_fs_usage) +
283                 sizeof(u64) * new_r->nr;
284         int ret = -ENOMEM;
285
286         if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
287             !(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
288                                                 GFP_NOIO)) ||
289             !(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
290                                                 GFP_NOIO)) ||
291             !(new_scratch  = kmalloc(bytes, GFP_NOIO)) ||
292             (c->usage_gc &&
293              !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO)))) {
294                 bch_err(c, "error updating replicas table: memory allocation failure");
295                 goto err;
296         }
297
298         if (c->usage_base)
299                 __replicas_table_update(new_base,               new_r,
300                                         c->usage_base,          &c->replicas);
301         if (c->usage[0])
302                 __replicas_table_update_pcpu(new_usage[0],      new_r,
303                                              c->usage[0],       &c->replicas);
304         if (c->usage[1])
305                 __replicas_table_update_pcpu(new_usage[1],      new_r,
306                                              c->usage[1],       &c->replicas);
307         if (c->usage_gc)
308                 __replicas_table_update_pcpu(new_gc,            new_r,
309                                              c->usage_gc,       &c->replicas);
310
311         swap(c->usage_base,     new_base);
312         swap(c->usage[0],       new_usage[0]);
313         swap(c->usage[1],       new_usage[1]);
314         swap(c->usage_scratch,  new_scratch);
315         swap(c->usage_gc,       new_gc);
316         swap(c->replicas,       *new_r);
317         ret = 0;
318 err:
319         free_percpu(new_gc);
320         kfree(new_scratch);
321         free_percpu(new_usage[1]);
322         free_percpu(new_usage[0]);
323         kfree(new_base);
324         return ret;
325 }
326
327 static unsigned reserve_journal_replicas(struct bch_fs *c,
328                                      struct bch_replicas_cpu *r)
329 {
330         struct bch_replicas_entry *e;
331         unsigned journal_res_u64s = 0;
332
333         /* nr_inodes: */
334         journal_res_u64s +=
335                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
336
337         /* key_version: */
338         journal_res_u64s +=
339                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
340
341         /* persistent_reserved: */
342         journal_res_u64s +=
343                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
344                 BCH_REPLICAS_MAX;
345
346         for_each_cpu_replicas_entry(r, e)
347                 journal_res_u64s +=
348                         DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
349                                      e->nr_devs, sizeof(u64));
350         return journal_res_u64s;
351 }
352
353 noinline
354 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
355                                 struct bch_replicas_entry *new_entry)
356 {
357         struct bch_replicas_cpu new_r, new_gc;
358         int ret = 0;
359
360         verify_replicas_entry(new_entry);
361
362         memset(&new_r, 0, sizeof(new_r));
363         memset(&new_gc, 0, sizeof(new_gc));
364
365         mutex_lock(&c->sb_lock);
366
367         if (c->replicas_gc.entries &&
368             !__replicas_has_entry(&c->replicas_gc, new_entry)) {
369                 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
370                 if (!new_gc.entries)
371                         goto err;
372         }
373
374         if (!__replicas_has_entry(&c->replicas, new_entry)) {
375                 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
376                 if (!new_r.entries)
377                         goto err;
378
379                 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
380                 if (ret)
381                         goto err;
382
383                 bch2_journal_entry_res_resize(&c->journal,
384                                 &c->replicas_journal_res,
385                                 reserve_journal_replicas(c, &new_r));
386         }
387
388         if (!new_r.entries &&
389             !new_gc.entries)
390                 goto out;
391
392         /* allocations done, now commit: */
393
394         if (new_r.entries)
395                 bch2_write_super(c);
396
397         /* don't update in memory replicas until changes are persistent */
398         percpu_down_write(&c->mark_lock);
399         if (new_r.entries)
400                 ret = replicas_table_update(c, &new_r);
401         if (new_gc.entries)
402                 swap(new_gc, c->replicas_gc);
403         percpu_up_write(&c->mark_lock);
404 out:
405         mutex_unlock(&c->sb_lock);
406
407         kfree(new_r.entries);
408         kfree(new_gc.entries);
409
410         return ret;
411 err:
412         bch_err(c, "error adding replicas entry: memory allocation failure");
413         ret = -ENOMEM;
414         goto out;
415 }
416
417 static int __bch2_mark_replicas(struct bch_fs *c,
418                                 struct bch_replicas_entry *r,
419                                 bool check)
420 {
421         return likely(bch2_replicas_marked(c, r))       ? 0
422                 : check                                 ? -1
423                 : bch2_mark_replicas_slowpath(c, r);
424 }
425
426 int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry *r)
427 {
428         return __bch2_mark_replicas(c, r, false);
429 }
430
431 static int __bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k,
432                                      bool check)
433 {
434         struct bch_replicas_padded search;
435         struct bch_devs_list cached = bch2_bkey_cached_devs(k);
436         unsigned i;
437         int ret;
438
439         for (i = 0; i < cached.nr; i++) {
440                 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
441
442                 ret = __bch2_mark_replicas(c, &search.e, check);
443                 if (ret)
444                         return ret;
445         }
446
447         bch2_bkey_to_replicas(&search.e, k);
448
449         ret = __bch2_mark_replicas(c, &search.e, check);
450         if (ret)
451                 return ret;
452
453         if (search.e.data_type == BCH_DATA_parity) {
454                 search.e.data_type = BCH_DATA_cached;
455                 ret = __bch2_mark_replicas(c, &search.e, check);
456                 if (ret)
457                         return ret;
458
459                 search.e.data_type = BCH_DATA_user;
460                 ret = __bch2_mark_replicas(c, &search.e, check);
461                 if (ret)
462                         return ret;
463         }
464
465         return 0;
466 }
467
468 bool bch2_bkey_replicas_marked(struct bch_fs *c,
469                                struct bkey_s_c k)
470 {
471         return __bch2_mark_bkey_replicas(c, k, true) == 0;
472 }
473
474 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
475 {
476         return __bch2_mark_bkey_replicas(c, k, false);
477 }
478
479 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
480 {
481         unsigned i;
482
483         lockdep_assert_held(&c->replicas_gc_lock);
484
485         mutex_lock(&c->sb_lock);
486         percpu_down_write(&c->mark_lock);
487
488         /*
489          * this is kind of crappy; the replicas gc mechanism needs to be ripped
490          * out
491          */
492
493         for (i = 0; i < c->replicas.nr; i++) {
494                 struct bch_replicas_entry *e =
495                         cpu_replicas_entry(&c->replicas, i);
496                 struct bch_replicas_cpu n;
497
498                 if (!__replicas_has_entry(&c->replicas_gc, e) &&
499                     (c->usage_base->replicas[i] ||
500                      percpu_u64_get(&c->usage[0]->replicas[i]) ||
501                      percpu_u64_get(&c->usage[1]->replicas[i]))) {
502                         n = cpu_replicas_add_entry(&c->replicas_gc, e);
503                         if (!n.entries) {
504                                 ret = -ENOSPC;
505                                 goto err;
506                         }
507
508                         swap(n, c->replicas_gc);
509                         kfree(n.entries);
510                 }
511         }
512
513         if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
514                 ret = -ENOSPC;
515                 goto err;
516         }
517
518         ret = replicas_table_update(c, &c->replicas_gc);
519 err:
520         kfree(c->replicas_gc.entries);
521         c->replicas_gc.entries = NULL;
522
523         percpu_up_write(&c->mark_lock);
524
525         if (!ret)
526                 bch2_write_super(c);
527
528         mutex_unlock(&c->sb_lock);
529
530         return ret;
531 }
532
533 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
534 {
535         struct bch_replicas_entry *e;
536         unsigned i = 0;
537
538         lockdep_assert_held(&c->replicas_gc_lock);
539
540         mutex_lock(&c->sb_lock);
541         BUG_ON(c->replicas_gc.entries);
542
543         c->replicas_gc.nr               = 0;
544         c->replicas_gc.entry_size       = 0;
545
546         for_each_cpu_replicas_entry(&c->replicas, e)
547                 if (!((1 << e->data_type) & typemask)) {
548                         c->replicas_gc.nr++;
549                         c->replicas_gc.entry_size =
550                                 max_t(unsigned, c->replicas_gc.entry_size,
551                                       replicas_entry_bytes(e));
552                 }
553
554         c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
555                                          c->replicas_gc.entry_size,
556                                          GFP_NOIO);
557         if (!c->replicas_gc.entries) {
558                 mutex_unlock(&c->sb_lock);
559                 bch_err(c, "error allocating c->replicas_gc");
560                 return -ENOMEM;
561         }
562
563         for_each_cpu_replicas_entry(&c->replicas, e)
564                 if (!((1 << e->data_type) & typemask))
565                         memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
566                                e, c->replicas_gc.entry_size);
567
568         bch2_cpu_replicas_sort(&c->replicas_gc);
569         mutex_unlock(&c->sb_lock);
570
571         return 0;
572 }
573
574 int bch2_replicas_gc2(struct bch_fs *c)
575 {
576         struct bch_replicas_cpu new = { 0 };
577         unsigned i, nr;
578         int ret = 0;
579
580         bch2_journal_meta(&c->journal);
581 retry:
582         nr              = READ_ONCE(c->replicas.nr);
583         new.entry_size  = READ_ONCE(c->replicas.entry_size);
584         new.entries     = kcalloc(nr, new.entry_size, GFP_KERNEL);
585         if (!new.entries) {
586                 bch_err(c, "error allocating c->replicas_gc");
587                 return -ENOMEM;
588         }
589
590         mutex_lock(&c->sb_lock);
591         percpu_down_write(&c->mark_lock);
592
593         if (nr                  != c->replicas.nr ||
594             new.entry_size      != c->replicas.entry_size) {
595                 percpu_up_write(&c->mark_lock);
596                 mutex_unlock(&c->sb_lock);
597                 kfree(new.entries);
598                 goto retry;
599         }
600
601         for (i = 0; i < c->replicas.nr; i++) {
602                 struct bch_replicas_entry *e =
603                         cpu_replicas_entry(&c->replicas, i);
604
605                 if (e->data_type == BCH_DATA_journal ||
606                     c->usage_base->replicas[i] ||
607                     percpu_u64_get(&c->usage[0]->replicas[i]) ||
608                     percpu_u64_get(&c->usage[1]->replicas[i]))
609                         memcpy(cpu_replicas_entry(&new, new.nr++),
610                                e, new.entry_size);
611         }
612
613         bch2_cpu_replicas_sort(&new);
614
615         if (bch2_cpu_replicas_to_sb_replicas(c, &new)) {
616                 ret = -ENOSPC;
617                 goto err;
618         }
619
620         ret = replicas_table_update(c, &new);
621 err:
622         kfree(new.entries);
623
624         percpu_up_write(&c->mark_lock);
625
626         if (!ret)
627                 bch2_write_super(c);
628
629         mutex_unlock(&c->sb_lock);
630
631         return ret;
632 }
633
634 int bch2_replicas_set_usage(struct bch_fs *c,
635                             struct bch_replicas_entry *r,
636                             u64 sectors)
637 {
638         int ret, idx = bch2_replicas_entry_idx(c, r);
639
640         if (idx < 0) {
641                 struct bch_replicas_cpu n;
642
643                 n = cpu_replicas_add_entry(&c->replicas, r);
644                 if (!n.entries)
645                         return -ENOMEM;
646
647                 ret = replicas_table_update(c, &n);
648                 if (ret)
649                         return ret;
650
651                 kfree(n.entries);
652
653                 idx = bch2_replicas_entry_idx(c, r);
654                 BUG_ON(ret < 0);
655         }
656
657         c->usage_base->replicas[idx] = sectors;
658
659         return 0;
660 }
661
662 /* Replicas tracking - superblock: */
663
664 static int
665 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
666                                    struct bch_replicas_cpu *cpu_r)
667 {
668         struct bch_replicas_entry *e, *dst;
669         unsigned nr = 0, entry_size = 0, idx = 0;
670
671         for_each_replicas_entry(sb_r, e) {
672                 entry_size = max_t(unsigned, entry_size,
673                                    replicas_entry_bytes(e));
674                 nr++;
675         }
676
677         cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
678         if (!cpu_r->entries)
679                 return -ENOMEM;
680
681         cpu_r->nr               = nr;
682         cpu_r->entry_size       = entry_size;
683
684         for_each_replicas_entry(sb_r, e) {
685                 dst = cpu_replicas_entry(cpu_r, idx++);
686                 memcpy(dst, e, replicas_entry_bytes(e));
687                 replicas_entry_sort(dst);
688         }
689
690         return 0;
691 }
692
693 static int
694 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
695                                       struct bch_replicas_cpu *cpu_r)
696 {
697         struct bch_replicas_entry_v0 *e;
698         unsigned nr = 0, entry_size = 0, idx = 0;
699
700         for_each_replicas_entry(sb_r, e) {
701                 entry_size = max_t(unsigned, entry_size,
702                                    replicas_entry_bytes(e));
703                 nr++;
704         }
705
706         entry_size += sizeof(struct bch_replicas_entry) -
707                 sizeof(struct bch_replicas_entry_v0);
708
709         cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
710         if (!cpu_r->entries)
711                 return -ENOMEM;
712
713         cpu_r->nr               = nr;
714         cpu_r->entry_size       = entry_size;
715
716         for_each_replicas_entry(sb_r, e) {
717                 struct bch_replicas_entry *dst =
718                         cpu_replicas_entry(cpu_r, idx++);
719
720                 dst->data_type  = e->data_type;
721                 dst->nr_devs    = e->nr_devs;
722                 dst->nr_required = 1;
723                 memcpy(dst->devs, e->devs, e->nr_devs);
724                 replicas_entry_sort(dst);
725         }
726
727         return 0;
728 }
729
730 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
731 {
732         struct bch_sb_field_replicas *sb_v1;
733         struct bch_sb_field_replicas_v0 *sb_v0;
734         struct bch_replicas_cpu new_r = { 0, 0, NULL };
735         int ret = 0;
736
737         if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
738                 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
739         else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
740                 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
741
742         if (ret)
743                 return -ENOMEM;
744
745         bch2_cpu_replicas_sort(&new_r);
746
747         percpu_down_write(&c->mark_lock);
748
749         ret = replicas_table_update(c, &new_r);
750         percpu_up_write(&c->mark_lock);
751
752         kfree(new_r.entries);
753
754         return 0;
755 }
756
757 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
758                                                struct bch_replicas_cpu *r)
759 {
760         struct bch_sb_field_replicas_v0 *sb_r;
761         struct bch_replicas_entry_v0 *dst;
762         struct bch_replicas_entry *src;
763         size_t bytes;
764
765         bytes = sizeof(struct bch_sb_field_replicas);
766
767         for_each_cpu_replicas_entry(r, src)
768                 bytes += replicas_entry_bytes(src) - 1;
769
770         sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
771                         DIV_ROUND_UP(bytes, sizeof(u64)));
772         if (!sb_r)
773                 return -ENOSPC;
774
775         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
776         sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
777
778         memset(&sb_r->entries, 0,
779                vstruct_end(&sb_r->field) -
780                (void *) &sb_r->entries);
781
782         dst = sb_r->entries;
783         for_each_cpu_replicas_entry(r, src) {
784                 dst->data_type  = src->data_type;
785                 dst->nr_devs    = src->nr_devs;
786                 memcpy(dst->devs, src->devs, src->nr_devs);
787
788                 dst = replicas_entry_next(dst);
789
790                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
791         }
792
793         return 0;
794 }
795
796 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
797                                             struct bch_replicas_cpu *r)
798 {
799         struct bch_sb_field_replicas *sb_r;
800         struct bch_replicas_entry *dst, *src;
801         bool need_v1 = false;
802         size_t bytes;
803
804         bytes = sizeof(struct bch_sb_field_replicas);
805
806         for_each_cpu_replicas_entry(r, src) {
807                 bytes += replicas_entry_bytes(src);
808                 if (src->nr_required != 1)
809                         need_v1 = true;
810         }
811
812         if (!need_v1)
813                 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
814
815         sb_r = bch2_sb_resize_replicas(&c->disk_sb,
816                         DIV_ROUND_UP(bytes, sizeof(u64)));
817         if (!sb_r)
818                 return -ENOSPC;
819
820         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
821         sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
822
823         memset(&sb_r->entries, 0,
824                vstruct_end(&sb_r->field) -
825                (void *) &sb_r->entries);
826
827         dst = sb_r->entries;
828         for_each_cpu_replicas_entry(r, src) {
829                 memcpy(dst, src, replicas_entry_bytes(src));
830
831                 dst = replicas_entry_next(dst);
832
833                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
834         }
835
836         return 0;
837 }
838
839 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
840 {
841         unsigned i;
842
843         sort_cmp_size(cpu_r->entries,
844                       cpu_r->nr,
845                       cpu_r->entry_size,
846                       memcmp, NULL);
847
848         for (i = 0; i + 1 < cpu_r->nr; i++) {
849                 struct bch_replicas_entry *l =
850                         cpu_replicas_entry(cpu_r, i);
851                 struct bch_replicas_entry *r =
852                         cpu_replicas_entry(cpu_r, i + 1);
853
854                 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
855
856                 if (!memcmp(l, r, cpu_r->entry_size))
857                         return "duplicate replicas entry";
858         }
859
860         return NULL;
861 }
862
863 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
864 {
865         struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
866         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
867         struct bch_replicas_cpu cpu_r = { .entries = NULL };
868         struct bch_replicas_entry *e;
869         const char *err;
870         unsigned i;
871
872         for_each_replicas_entry(sb_r, e) {
873                 err = "invalid replicas entry: invalid data type";
874                 if (e->data_type >= BCH_DATA_NR)
875                         goto err;
876
877                 err = "invalid replicas entry: no devices";
878                 if (!e->nr_devs)
879                         goto err;
880
881                 err = "invalid replicas entry: bad nr_required";
882                 if (e->nr_required > 1 &&
883                     e->nr_required >= e->nr_devs)
884                         goto err;
885
886                 err = "invalid replicas entry: invalid device";
887                 for (i = 0; i < e->nr_devs; i++)
888                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
889                                 goto err;
890         }
891
892         err = "cannot allocate memory";
893         if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
894                 goto err;
895
896         err = check_dup_replicas_entries(&cpu_r);
897 err:
898         kfree(cpu_r.entries);
899         return err;
900 }
901
902 static void bch2_sb_replicas_to_text(struct printbuf *out,
903                                      struct bch_sb *sb,
904                                      struct bch_sb_field *f)
905 {
906         struct bch_sb_field_replicas *r = field_to_type(f, replicas);
907         struct bch_replicas_entry *e;
908         bool first = true;
909
910         for_each_replicas_entry(r, e) {
911                 if (!first)
912                         pr_buf(out, " ");
913                 first = false;
914
915                 bch2_replicas_entry_to_text(out, e);
916         }
917 }
918
919 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
920         .validate       = bch2_sb_validate_replicas,
921         .to_text        = bch2_sb_replicas_to_text,
922 };
923
924 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
925 {
926         struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
927         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
928         struct bch_replicas_cpu cpu_r = { .entries = NULL };
929         struct bch_replicas_entry_v0 *e;
930         const char *err;
931         unsigned i;
932
933         for_each_replicas_entry_v0(sb_r, e) {
934                 err = "invalid replicas entry: invalid data type";
935                 if (e->data_type >= BCH_DATA_NR)
936                         goto err;
937
938                 err = "invalid replicas entry: no devices";
939                 if (!e->nr_devs)
940                         goto err;
941
942                 err = "invalid replicas entry: invalid device";
943                 for (i = 0; i < e->nr_devs; i++)
944                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
945                                 goto err;
946         }
947
948         err = "cannot allocate memory";
949         if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
950                 goto err;
951
952         err = check_dup_replicas_entries(&cpu_r);
953 err:
954         kfree(cpu_r.entries);
955         return err;
956 }
957
958 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
959         .validate       = bch2_sb_validate_replicas_v0,
960 };
961
962 /* Query replicas: */
963
964 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
965                                               struct bch_devs_mask online_devs)
966 {
967         struct bch_sb_field_members *mi;
968         struct bch_replicas_entry *e;
969         unsigned i, nr_online, nr_offline;
970         struct replicas_status ret;
971
972         memset(&ret, 0, sizeof(ret));
973
974         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
975                 ret.replicas[i].redundancy = INT_MAX;
976
977         mi = bch2_sb_get_members(c->disk_sb.sb);
978
979         percpu_down_read(&c->mark_lock);
980
981         for_each_cpu_replicas_entry(&c->replicas, e) {
982                 if (e->data_type >= ARRAY_SIZE(ret.replicas))
983                         panic("e %p data_type %u\n", e, e->data_type);
984
985                 nr_online = nr_offline = 0;
986
987                 for (i = 0; i < e->nr_devs; i++) {
988                         BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
989                                                 e->devs[i]));
990
991                         if (test_bit(e->devs[i], online_devs.d))
992                                 nr_online++;
993                         else
994                                 nr_offline++;
995                 }
996
997                 ret.replicas[e->data_type].redundancy =
998                         min(ret.replicas[e->data_type].redundancy,
999                             (int) nr_online - (int) e->nr_required);
1000
1001                 ret.replicas[e->data_type].nr_offline =
1002                         max(ret.replicas[e->data_type].nr_offline,
1003                             nr_offline);
1004         }
1005
1006         percpu_up_read(&c->mark_lock);
1007
1008         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
1009                 if (ret.replicas[i].redundancy == INT_MAX)
1010                         ret.replicas[i].redundancy = 0;
1011
1012         return ret;
1013 }
1014
1015 struct replicas_status bch2_replicas_status(struct bch_fs *c)
1016 {
1017         return __bch2_replicas_status(c, bch2_online_devs(c));
1018 }
1019
1020 static bool have_enough_devs(struct replicas_status s,
1021                              enum bch_data_type type,
1022                              bool force_if_degraded,
1023                              bool force_if_lost)
1024 {
1025         return (!s.replicas[type].nr_offline || force_if_degraded) &&
1026                 (s.replicas[type].redundancy >= 0 || force_if_lost);
1027 }
1028
1029 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
1030 {
1031         return (have_enough_devs(s, BCH_DATA_journal,
1032                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
1033                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
1034                 have_enough_devs(s, BCH_DATA_btree,
1035                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
1036                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
1037                 have_enough_devs(s, BCH_DATA_user,
1038                                  flags & BCH_FORCE_IF_DATA_DEGRADED,
1039                                  flags & BCH_FORCE_IF_DATA_LOST));
1040 }
1041
1042 int bch2_replicas_online(struct bch_fs *c, bool meta)
1043 {
1044         struct replicas_status s = bch2_replicas_status(c);
1045
1046         return (meta
1047                 ? min(s.replicas[BCH_DATA_journal].redundancy,
1048                       s.replicas[BCH_DATA_btree].redundancy)
1049                 : s.replicas[BCH_DATA_user].redundancy) + 1;
1050 }
1051
1052 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1053 {
1054         struct bch_replicas_entry *e;
1055         unsigned i, ret = 0;
1056
1057         percpu_down_read(&c->mark_lock);
1058
1059         for_each_cpu_replicas_entry(&c->replicas, e)
1060                 for (i = 0; i < e->nr_devs; i++)
1061                         if (e->devs[i] == ca->dev_idx)
1062                                 ret |= 1 << e->data_type;
1063
1064         percpu_up_read(&c->mark_lock);
1065
1066         return ret;
1067 }
1068
1069 int bch2_fs_replicas_init(struct bch_fs *c)
1070 {
1071         c->journal.entry_u64s_reserved +=
1072                 reserve_journal_replicas(c, &c->replicas);
1073
1074         return replicas_table_update(c, &c->replicas);
1075 }