]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/replicas.c
Update bcachefs sources to e1f6739c4a bcachefs: Fix another iterator counting bug
[bcachefs-tools-debian] / libbcachefs / replicas.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "buckets.h"
5 #include "journal.h"
6 #include "replicas.h"
7 #include "super-io.h"
8
9 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
10                                             struct bch_replicas_cpu *);
11
12 /* Replicas tracking - in memory: */
13
14 static inline int u8_cmp(u8 l, u8 r)
15 {
16         return cmp_int(l, r);
17 }
18
19 static void verify_replicas_entry(struct bch_replicas_entry *e)
20 {
21 #ifdef CONFIG_BCACHEFS_DEBUG
22         unsigned i;
23
24         BUG_ON(e->data_type >= BCH_DATA_NR);
25         BUG_ON(!e->nr_devs);
26         BUG_ON(e->nr_required > 1 &&
27                e->nr_required >= e->nr_devs);
28
29         for (i = 0; i + 1 < e->nr_devs; i++)
30                 BUG_ON(e->devs[i] >= e->devs[i + 1]);
31 #endif
32 }
33
34 static void replicas_entry_sort(struct bch_replicas_entry *e)
35 {
36         bubble_sort(e->devs, e->nr_devs, u8_cmp);
37 }
38
39 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
40 {
41         eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
42 }
43
44 void bch2_replicas_entry_to_text(struct printbuf *out,
45                                  struct bch_replicas_entry *e)
46 {
47         unsigned i;
48
49         pr_buf(out, "%s: %u/%u [",
50                bch2_data_types[e->data_type],
51                e->nr_required,
52                e->nr_devs);
53
54         for (i = 0; i < e->nr_devs; i++)
55                 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
56         pr_buf(out, "]");
57 }
58
59 void bch2_cpu_replicas_to_text(struct printbuf *out,
60                               struct bch_replicas_cpu *r)
61 {
62         struct bch_replicas_entry *e;
63         bool first = true;
64
65         for_each_cpu_replicas_entry(r, e) {
66                 if (!first)
67                         pr_buf(out, " ");
68                 first = false;
69
70                 bch2_replicas_entry_to_text(out, e);
71         }
72 }
73
74 static void extent_to_replicas(struct bkey_s_c k,
75                                struct bch_replicas_entry *r)
76 {
77         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
78         const union bch_extent_entry *entry;
79         struct extent_ptr_decoded p;
80
81         r->nr_required  = 1;
82
83         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
84                 if (p.ptr.cached)
85                         continue;
86
87                 if (!p.has_ec)
88                         r->devs[r->nr_devs++] = p.ptr.dev;
89                 else
90                         r->nr_required = 0;
91         }
92 }
93
94 static void stripe_to_replicas(struct bkey_s_c k,
95                                struct bch_replicas_entry *r)
96 {
97         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
98         const struct bch_extent_ptr *ptr;
99
100         r->nr_required  = s.v->nr_blocks - s.v->nr_redundant;
101
102         for (ptr = s.v->ptrs;
103              ptr < s.v->ptrs + s.v->nr_blocks;
104              ptr++)
105                 r->devs[r->nr_devs++] = ptr->dev;
106 }
107
108 void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
109                            struct bkey_s_c k)
110 {
111         e->nr_devs = 0;
112
113         switch (k.k->type) {
114         case KEY_TYPE_btree_ptr:
115         case KEY_TYPE_btree_ptr_v2:
116                 e->data_type = BCH_DATA_BTREE;
117                 extent_to_replicas(k, e);
118                 break;
119         case KEY_TYPE_extent:
120         case KEY_TYPE_reflink_v:
121                 e->data_type = BCH_DATA_USER;
122                 extent_to_replicas(k, e);
123                 break;
124         case KEY_TYPE_stripe:
125                 e->data_type = BCH_DATA_USER;
126                 stripe_to_replicas(k, e);
127                 break;
128         }
129
130         replicas_entry_sort(e);
131 }
132
133 void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
134                               enum bch_data_type data_type,
135                               struct bch_devs_list devs)
136 {
137         unsigned i;
138
139         BUG_ON(!data_type ||
140                data_type == BCH_DATA_SB ||
141                data_type >= BCH_DATA_NR);
142
143         e->data_type    = data_type;
144         e->nr_devs      = 0;
145         e->nr_required  = 1;
146
147         for (i = 0; i < devs.nr; i++)
148                 e->devs[e->nr_devs++] = devs.devs[i];
149
150         replicas_entry_sort(e);
151 }
152
153 static struct bch_replicas_cpu
154 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
155                        struct bch_replicas_entry *new_entry)
156 {
157         unsigned i;
158         struct bch_replicas_cpu new = {
159                 .nr             = old->nr + 1,
160                 .entry_size     = max_t(unsigned, old->entry_size,
161                                         replicas_entry_bytes(new_entry)),
162         };
163
164         BUG_ON(!new_entry->data_type);
165         verify_replicas_entry(new_entry);
166
167         new.entries = kcalloc(new.nr, new.entry_size, GFP_NOIO);
168         if (!new.entries)
169                 return new;
170
171         for (i = 0; i < old->nr; i++)
172                 memcpy(cpu_replicas_entry(&new, i),
173                        cpu_replicas_entry(old, i),
174                        old->entry_size);
175
176         memcpy(cpu_replicas_entry(&new, old->nr),
177                new_entry,
178                replicas_entry_bytes(new_entry));
179
180         bch2_cpu_replicas_sort(&new);
181         return new;
182 }
183
184 static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
185                                        struct bch_replicas_entry *search)
186 {
187         int idx, entry_size = replicas_entry_bytes(search);
188
189         if (unlikely(entry_size > r->entry_size))
190                 return -1;
191
192         verify_replicas_entry(search);
193
194 #define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
195         idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
196                               entry_cmp, search);
197 #undef entry_cmp
198
199         return idx < r->nr ? idx : -1;
200 }
201
202 int bch2_replicas_entry_idx(struct bch_fs *c,
203                             struct bch_replicas_entry *search)
204 {
205         replicas_entry_sort(search);
206
207         return __replicas_entry_idx(&c->replicas, search);
208 }
209
210 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
211                                  struct bch_replicas_entry *search)
212 {
213         return __replicas_entry_idx(r, search) >= 0;
214 }
215
216 static bool bch2_replicas_marked_locked(struct bch_fs *c,
217                           struct bch_replicas_entry *search,
218                           bool check_gc_replicas)
219 {
220         if (!search->nr_devs)
221                 return true;
222
223         verify_replicas_entry(search);
224
225         return __replicas_has_entry(&c->replicas, search) &&
226                 (!check_gc_replicas ||
227                  likely((!c->replicas_gc.entries)) ||
228                  __replicas_has_entry(&c->replicas_gc, search));
229 }
230
231 bool bch2_replicas_marked(struct bch_fs *c,
232                           struct bch_replicas_entry *search,
233                           bool check_gc_replicas)
234 {
235         bool marked;
236
237         percpu_down_read(&c->mark_lock);
238         marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
239         percpu_up_read(&c->mark_lock);
240
241         return marked;
242 }
243
244 static void __replicas_table_update(struct bch_fs_usage *dst,
245                                     struct bch_replicas_cpu *dst_r,
246                                     struct bch_fs_usage *src,
247                                     struct bch_replicas_cpu *src_r)
248 {
249         int src_idx, dst_idx;
250
251         *dst = *src;
252
253         for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
254                 if (!src->replicas[src_idx])
255                         continue;
256
257                 dst_idx = __replicas_entry_idx(dst_r,
258                                 cpu_replicas_entry(src_r, src_idx));
259                 BUG_ON(dst_idx < 0);
260
261                 dst->replicas[dst_idx] = src->replicas[src_idx];
262         }
263 }
264
265 static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
266                                     struct bch_replicas_cpu *dst_r,
267                                     struct bch_fs_usage __percpu *src_p,
268                                     struct bch_replicas_cpu *src_r)
269 {
270         unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
271         struct bch_fs_usage *dst, *src = (void *)
272                 bch2_acc_percpu_u64s((void *) src_p, src_nr);
273
274         preempt_disable();
275         dst = this_cpu_ptr(dst_p);
276         preempt_enable();
277
278         __replicas_table_update(dst, dst_r, src, src_r);
279 }
280
281 /*
282  * Resize filesystem accounting:
283  */
284 static int replicas_table_update(struct bch_fs *c,
285                                  struct bch_replicas_cpu *new_r)
286 {
287         struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
288         struct bch_fs_usage *new_scratch = NULL;
289         struct bch_fs_usage __percpu *new_gc = NULL;
290         struct bch_fs_usage *new_base = NULL;
291         unsigned bytes = sizeof(struct bch_fs_usage) +
292                 sizeof(u64) * new_r->nr;
293         int ret = -ENOMEM;
294
295         if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
296             !(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
297                                                 GFP_NOIO)) ||
298             !(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
299                                                 GFP_NOIO)) ||
300             !(new_scratch  = kmalloc(bytes, GFP_NOIO)) ||
301             (c->usage_gc &&
302              !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO)))) {
303                 bch_err(c, "error updating replicas table: memory allocation failure");
304                 goto err;
305         }
306
307         if (c->usage_base)
308                 __replicas_table_update(new_base,               new_r,
309                                         c->usage_base,          &c->replicas);
310         if (c->usage[0])
311                 __replicas_table_update_pcpu(new_usage[0],      new_r,
312                                              c->usage[0],       &c->replicas);
313         if (c->usage[1])
314                 __replicas_table_update_pcpu(new_usage[1],      new_r,
315                                              c->usage[1],       &c->replicas);
316         if (c->usage_gc)
317                 __replicas_table_update_pcpu(new_gc,            new_r,
318                                              c->usage_gc,       &c->replicas);
319
320         swap(c->usage_base,     new_base);
321         swap(c->usage[0],       new_usage[0]);
322         swap(c->usage[1],       new_usage[1]);
323         swap(c->usage_scratch,  new_scratch);
324         swap(c->usage_gc,       new_gc);
325         swap(c->replicas,       *new_r);
326         ret = 0;
327 err:
328         free_percpu(new_gc);
329         kfree(new_scratch);
330         free_percpu(new_usage[1]);
331         free_percpu(new_usage[0]);
332         kfree(new_base);
333         return ret;
334 }
335
336 static unsigned reserve_journal_replicas(struct bch_fs *c,
337                                      struct bch_replicas_cpu *r)
338 {
339         struct bch_replicas_entry *e;
340         unsigned journal_res_u64s = 0;
341
342         /* nr_inodes: */
343         journal_res_u64s +=
344                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
345
346         /* key_version: */
347         journal_res_u64s +=
348                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
349
350         /* persistent_reserved: */
351         journal_res_u64s +=
352                 DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
353                 BCH_REPLICAS_MAX;
354
355         for_each_cpu_replicas_entry(r, e)
356                 journal_res_u64s +=
357                         DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
358                                      e->nr_devs, sizeof(u64));
359         return journal_res_u64s;
360 }
361
362 noinline
363 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
364                                 struct bch_replicas_entry *new_entry)
365 {
366         struct bch_replicas_cpu new_r, new_gc;
367         int ret = 0;
368
369         verify_replicas_entry(new_entry);
370
371         memset(&new_r, 0, sizeof(new_r));
372         memset(&new_gc, 0, sizeof(new_gc));
373
374         mutex_lock(&c->sb_lock);
375
376         if (c->replicas_gc.entries &&
377             !__replicas_has_entry(&c->replicas_gc, new_entry)) {
378                 new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
379                 if (!new_gc.entries)
380                         goto err;
381         }
382
383         if (!__replicas_has_entry(&c->replicas, new_entry)) {
384                 new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
385                 if (!new_r.entries)
386                         goto err;
387
388                 ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
389                 if (ret)
390                         goto err;
391
392                 bch2_journal_entry_res_resize(&c->journal,
393                                 &c->replicas_journal_res,
394                                 reserve_journal_replicas(c, &new_r));
395         }
396
397         if (!new_r.entries &&
398             !new_gc.entries)
399                 goto out;
400
401         /* allocations done, now commit: */
402
403         if (new_r.entries)
404                 bch2_write_super(c);
405
406         /* don't update in memory replicas until changes are persistent */
407         percpu_down_write(&c->mark_lock);
408         if (new_r.entries)
409                 ret = replicas_table_update(c, &new_r);
410         if (new_gc.entries)
411                 swap(new_gc, c->replicas_gc);
412         percpu_up_write(&c->mark_lock);
413 out:
414         mutex_unlock(&c->sb_lock);
415
416         kfree(new_r.entries);
417         kfree(new_gc.entries);
418
419         return ret;
420 err:
421         bch_err(c, "error adding replicas entry: memory allocation failure");
422         ret = -ENOMEM;
423         goto out;
424 }
425
426 int bch2_mark_replicas(struct bch_fs *c,
427                        struct bch_replicas_entry *r)
428 {
429         return likely(bch2_replicas_marked(c, r, true))
430                 ? 0
431                 : bch2_mark_replicas_slowpath(c, r);
432 }
433
434 bool bch2_bkey_replicas_marked_locked(struct bch_fs *c,
435                                       struct bkey_s_c k,
436                                       bool check_gc_replicas)
437 {
438         struct bch_replicas_padded search;
439         struct bch_devs_list cached = bch2_bkey_cached_devs(k);
440         unsigned i;
441
442         for (i = 0; i < cached.nr; i++) {
443                 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
444
445                 if (!bch2_replicas_marked_locked(c, &search.e,
446                                                  check_gc_replicas))
447                         return false;
448         }
449
450         bch2_bkey_to_replicas(&search.e, k);
451
452         return bch2_replicas_marked_locked(c, &search.e, check_gc_replicas);
453 }
454
455 bool bch2_bkey_replicas_marked(struct bch_fs *c,
456                                struct bkey_s_c k,
457                                bool check_gc_replicas)
458 {
459         bool marked;
460
461         percpu_down_read(&c->mark_lock);
462         marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
463         percpu_up_read(&c->mark_lock);
464
465         return marked;
466 }
467
468 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
469 {
470         struct bch_replicas_padded search;
471         struct bch_devs_list cached = bch2_bkey_cached_devs(k);
472         unsigned i;
473         int ret;
474
475         for (i = 0; i < cached.nr; i++) {
476                 bch2_replicas_entry_cached(&search.e, cached.devs[i]);
477
478                 ret = bch2_mark_replicas(c, &search.e);
479                 if (ret)
480                         return ret;
481         }
482
483         bch2_bkey_to_replicas(&search.e, k);
484
485         return bch2_mark_replicas(c, &search.e);
486 }
487
488 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
489 {
490         unsigned i;
491
492         lockdep_assert_held(&c->replicas_gc_lock);
493
494         mutex_lock(&c->sb_lock);
495         percpu_down_write(&c->mark_lock);
496
497         /*
498          * this is kind of crappy; the replicas gc mechanism needs to be ripped
499          * out
500          */
501
502         for (i = 0; i < c->replicas.nr; i++) {
503                 struct bch_replicas_entry *e =
504                         cpu_replicas_entry(&c->replicas, i);
505                 struct bch_replicas_cpu n;
506
507                 if (!__replicas_has_entry(&c->replicas_gc, e) &&
508                     (c->usage_base->replicas[i] ||
509                      percpu_u64_get(&c->usage[0]->replicas[i]) ||
510                      percpu_u64_get(&c->usage[1]->replicas[i]))) {
511                         n = cpu_replicas_add_entry(&c->replicas_gc, e);
512                         if (!n.entries) {
513                                 ret = -ENOSPC;
514                                 goto err;
515                         }
516
517                         swap(n, c->replicas_gc);
518                         kfree(n.entries);
519                 }
520         }
521
522         if (bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc)) {
523                 ret = -ENOSPC;
524                 goto err;
525         }
526
527         ret = replicas_table_update(c, &c->replicas_gc);
528 err:
529         kfree(c->replicas_gc.entries);
530         c->replicas_gc.entries = NULL;
531
532         percpu_up_write(&c->mark_lock);
533
534         if (!ret)
535                 bch2_write_super(c);
536
537         mutex_unlock(&c->sb_lock);
538
539         return ret;
540 }
541
542 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
543 {
544         struct bch_replicas_entry *e;
545         unsigned i = 0;
546
547         lockdep_assert_held(&c->replicas_gc_lock);
548
549         mutex_lock(&c->sb_lock);
550         BUG_ON(c->replicas_gc.entries);
551
552         c->replicas_gc.nr               = 0;
553         c->replicas_gc.entry_size       = 0;
554
555         for_each_cpu_replicas_entry(&c->replicas, e)
556                 if (!((1 << e->data_type) & typemask)) {
557                         c->replicas_gc.nr++;
558                         c->replicas_gc.entry_size =
559                                 max_t(unsigned, c->replicas_gc.entry_size,
560                                       replicas_entry_bytes(e));
561                 }
562
563         c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
564                                          c->replicas_gc.entry_size,
565                                          GFP_NOIO);
566         if (!c->replicas_gc.entries) {
567                 mutex_unlock(&c->sb_lock);
568                 bch_err(c, "error allocating c->replicas_gc");
569                 return -ENOMEM;
570         }
571
572         for_each_cpu_replicas_entry(&c->replicas, e)
573                 if (!((1 << e->data_type) & typemask))
574                         memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
575                                e, c->replicas_gc.entry_size);
576
577         bch2_cpu_replicas_sort(&c->replicas_gc);
578         mutex_unlock(&c->sb_lock);
579
580         return 0;
581 }
582
583 int bch2_replicas_gc2(struct bch_fs *c)
584 {
585         struct bch_replicas_cpu new = { 0 };
586         unsigned i, nr;
587         int ret = 0;
588
589         bch2_journal_meta(&c->journal);
590 retry:
591         nr              = READ_ONCE(c->replicas.nr);
592         new.entry_size  = READ_ONCE(c->replicas.entry_size);
593         new.entries     = kcalloc(nr, new.entry_size, GFP_KERNEL);
594         if (!new.entries) {
595                 bch_err(c, "error allocating c->replicas_gc");
596                 return -ENOMEM;
597         }
598
599         mutex_lock(&c->sb_lock);
600         percpu_down_write(&c->mark_lock);
601
602         if (nr                  != c->replicas.nr ||
603             new.entry_size      != c->replicas.entry_size) {
604                 percpu_up_write(&c->mark_lock);
605                 mutex_unlock(&c->sb_lock);
606                 kfree(new.entries);
607                 goto retry;
608         }
609
610         for (i = 0; i < c->replicas.nr; i++) {
611                 struct bch_replicas_entry *e =
612                         cpu_replicas_entry(&c->replicas, i);
613
614                 if (e->data_type == BCH_DATA_JOURNAL ||
615                     c->usage_base->replicas[i] ||
616                     percpu_u64_get(&c->usage[0]->replicas[i]) ||
617                     percpu_u64_get(&c->usage[1]->replicas[i]))
618                         memcpy(cpu_replicas_entry(&new, new.nr++),
619                                e, new.entry_size);
620         }
621
622         bch2_cpu_replicas_sort(&new);
623
624         if (bch2_cpu_replicas_to_sb_replicas(c, &new)) {
625                 ret = -ENOSPC;
626                 goto err;
627         }
628
629         ret = replicas_table_update(c, &new);
630 err:
631         kfree(new.entries);
632
633         percpu_up_write(&c->mark_lock);
634
635         if (!ret)
636                 bch2_write_super(c);
637
638         mutex_unlock(&c->sb_lock);
639
640         return ret;
641 }
642
643 int bch2_replicas_set_usage(struct bch_fs *c,
644                             struct bch_replicas_entry *r,
645                             u64 sectors)
646 {
647         int ret, idx = bch2_replicas_entry_idx(c, r);
648
649         if (idx < 0) {
650                 struct bch_replicas_cpu n;
651
652                 n = cpu_replicas_add_entry(&c->replicas, r);
653                 if (!n.entries)
654                         return -ENOMEM;
655
656                 ret = replicas_table_update(c, &n);
657                 if (ret)
658                         return ret;
659
660                 kfree(n.entries);
661
662                 idx = bch2_replicas_entry_idx(c, r);
663                 BUG_ON(ret < 0);
664         }
665
666         c->usage_base->replicas[idx] = sectors;
667
668         return 0;
669 }
670
671 /* Replicas tracking - superblock: */
672
673 static int
674 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
675                                    struct bch_replicas_cpu *cpu_r)
676 {
677         struct bch_replicas_entry *e, *dst;
678         unsigned nr = 0, entry_size = 0, idx = 0;
679
680         for_each_replicas_entry(sb_r, e) {
681                 entry_size = max_t(unsigned, entry_size,
682                                    replicas_entry_bytes(e));
683                 nr++;
684         }
685
686         cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
687         if (!cpu_r->entries)
688                 return -ENOMEM;
689
690         cpu_r->nr               = nr;
691         cpu_r->entry_size       = entry_size;
692
693         for_each_replicas_entry(sb_r, e) {
694                 dst = cpu_replicas_entry(cpu_r, idx++);
695                 memcpy(dst, e, replicas_entry_bytes(e));
696                 replicas_entry_sort(dst);
697         }
698
699         return 0;
700 }
701
702 static int
703 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
704                                       struct bch_replicas_cpu *cpu_r)
705 {
706         struct bch_replicas_entry_v0 *e;
707         unsigned nr = 0, entry_size = 0, idx = 0;
708
709         for_each_replicas_entry(sb_r, e) {
710                 entry_size = max_t(unsigned, entry_size,
711                                    replicas_entry_bytes(e));
712                 nr++;
713         }
714
715         entry_size += sizeof(struct bch_replicas_entry) -
716                 sizeof(struct bch_replicas_entry_v0);
717
718         cpu_r->entries = kcalloc(nr, entry_size, GFP_NOIO);
719         if (!cpu_r->entries)
720                 return -ENOMEM;
721
722         cpu_r->nr               = nr;
723         cpu_r->entry_size       = entry_size;
724
725         for_each_replicas_entry(sb_r, e) {
726                 struct bch_replicas_entry *dst =
727                         cpu_replicas_entry(cpu_r, idx++);
728
729                 dst->data_type  = e->data_type;
730                 dst->nr_devs    = e->nr_devs;
731                 dst->nr_required = 1;
732                 memcpy(dst->devs, e->devs, e->nr_devs);
733                 replicas_entry_sort(dst);
734         }
735
736         return 0;
737 }
738
739 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
740 {
741         struct bch_sb_field_replicas *sb_v1;
742         struct bch_sb_field_replicas_v0 *sb_v0;
743         struct bch_replicas_cpu new_r = { 0, 0, NULL };
744         int ret = 0;
745
746         if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
747                 ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
748         else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
749                 ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
750
751         if (ret)
752                 return -ENOMEM;
753
754         bch2_cpu_replicas_sort(&new_r);
755
756         percpu_down_write(&c->mark_lock);
757
758         ret = replicas_table_update(c, &new_r);
759         percpu_up_write(&c->mark_lock);
760
761         kfree(new_r.entries);
762
763         return 0;
764 }
765
766 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
767                                                struct bch_replicas_cpu *r)
768 {
769         struct bch_sb_field_replicas_v0 *sb_r;
770         struct bch_replicas_entry_v0 *dst;
771         struct bch_replicas_entry *src;
772         size_t bytes;
773
774         bytes = sizeof(struct bch_sb_field_replicas);
775
776         for_each_cpu_replicas_entry(r, src)
777                 bytes += replicas_entry_bytes(src) - 1;
778
779         sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
780                         DIV_ROUND_UP(bytes, sizeof(u64)));
781         if (!sb_r)
782                 return -ENOSPC;
783
784         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
785         sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
786
787         memset(&sb_r->entries, 0,
788                vstruct_end(&sb_r->field) -
789                (void *) &sb_r->entries);
790
791         dst = sb_r->entries;
792         for_each_cpu_replicas_entry(r, src) {
793                 dst->data_type  = src->data_type;
794                 dst->nr_devs    = src->nr_devs;
795                 memcpy(dst->devs, src->devs, src->nr_devs);
796
797                 dst = replicas_entry_next(dst);
798
799                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
800         }
801
802         return 0;
803 }
804
805 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
806                                             struct bch_replicas_cpu *r)
807 {
808         struct bch_sb_field_replicas *sb_r;
809         struct bch_replicas_entry *dst, *src;
810         bool need_v1 = false;
811         size_t bytes;
812
813         bytes = sizeof(struct bch_sb_field_replicas);
814
815         for_each_cpu_replicas_entry(r, src) {
816                 bytes += replicas_entry_bytes(src);
817                 if (src->nr_required != 1)
818                         need_v1 = true;
819         }
820
821         if (!need_v1)
822                 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
823
824         sb_r = bch2_sb_resize_replicas(&c->disk_sb,
825                         DIV_ROUND_UP(bytes, sizeof(u64)));
826         if (!sb_r)
827                 return -ENOSPC;
828
829         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
830         sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
831
832         memset(&sb_r->entries, 0,
833                vstruct_end(&sb_r->field) -
834                (void *) &sb_r->entries);
835
836         dst = sb_r->entries;
837         for_each_cpu_replicas_entry(r, src) {
838                 memcpy(dst, src, replicas_entry_bytes(src));
839
840                 dst = replicas_entry_next(dst);
841
842                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
843         }
844
845         return 0;
846 }
847
848 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
849 {
850         unsigned i;
851
852         sort_cmp_size(cpu_r->entries,
853                       cpu_r->nr,
854                       cpu_r->entry_size,
855                       memcmp, NULL);
856
857         for (i = 0; i + 1 < cpu_r->nr; i++) {
858                 struct bch_replicas_entry *l =
859                         cpu_replicas_entry(cpu_r, i);
860                 struct bch_replicas_entry *r =
861                         cpu_replicas_entry(cpu_r, i + 1);
862
863                 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
864
865                 if (!memcmp(l, r, cpu_r->entry_size))
866                         return "duplicate replicas entry";
867         }
868
869         return NULL;
870 }
871
872 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
873 {
874         struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
875         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
876         struct bch_replicas_cpu cpu_r = { .entries = NULL };
877         struct bch_replicas_entry *e;
878         const char *err;
879         unsigned i;
880
881         for_each_replicas_entry(sb_r, e) {
882                 err = "invalid replicas entry: invalid data type";
883                 if (e->data_type >= BCH_DATA_NR)
884                         goto err;
885
886                 err = "invalid replicas entry: no devices";
887                 if (!e->nr_devs)
888                         goto err;
889
890                 err = "invalid replicas entry: bad nr_required";
891                 if (e->nr_required > 1 &&
892                     e->nr_required >= e->nr_devs)
893                         goto err;
894
895                 err = "invalid replicas entry: invalid device";
896                 for (i = 0; i < e->nr_devs; i++)
897                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
898                                 goto err;
899         }
900
901         err = "cannot allocate memory";
902         if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r))
903                 goto err;
904
905         err = check_dup_replicas_entries(&cpu_r);
906 err:
907         kfree(cpu_r.entries);
908         return err;
909 }
910
911 static void bch2_sb_replicas_to_text(struct printbuf *out,
912                                      struct bch_sb *sb,
913                                      struct bch_sb_field *f)
914 {
915         struct bch_sb_field_replicas *r = field_to_type(f, replicas);
916         struct bch_replicas_entry *e;
917         bool first = true;
918
919         for_each_replicas_entry(r, e) {
920                 if (!first)
921                         pr_buf(out, " ");
922                 first = false;
923
924                 bch2_replicas_entry_to_text(out, e);
925         }
926 }
927
928 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
929         .validate       = bch2_sb_validate_replicas,
930         .to_text        = bch2_sb_replicas_to_text,
931 };
932
933 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
934 {
935         struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
936         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
937         struct bch_replicas_cpu cpu_r = { .entries = NULL };
938         struct bch_replicas_entry_v0 *e;
939         const char *err;
940         unsigned i;
941
942         for_each_replicas_entry_v0(sb_r, e) {
943                 err = "invalid replicas entry: invalid data type";
944                 if (e->data_type >= BCH_DATA_NR)
945                         goto err;
946
947                 err = "invalid replicas entry: no devices";
948                 if (!e->nr_devs)
949                         goto err;
950
951                 err = "invalid replicas entry: invalid device";
952                 for (i = 0; i < e->nr_devs; i++)
953                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
954                                 goto err;
955         }
956
957         err = "cannot allocate memory";
958         if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r))
959                 goto err;
960
961         err = check_dup_replicas_entries(&cpu_r);
962 err:
963         kfree(cpu_r.entries);
964         return err;
965 }
966
967 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
968         .validate       = bch2_sb_validate_replicas_v0,
969 };
970
971 /* Query replicas: */
972
973 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
974                                               struct bch_devs_mask online_devs)
975 {
976         struct bch_sb_field_members *mi;
977         struct bch_replicas_entry *e;
978         unsigned i, nr_online, nr_offline;
979         struct replicas_status ret;
980
981         memset(&ret, 0, sizeof(ret));
982
983         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
984                 ret.replicas[i].redundancy = INT_MAX;
985
986         mi = bch2_sb_get_members(c->disk_sb.sb);
987
988         percpu_down_read(&c->mark_lock);
989
990         for_each_cpu_replicas_entry(&c->replicas, e) {
991                 if (e->data_type >= ARRAY_SIZE(ret.replicas))
992                         panic("e %p data_type %u\n", e, e->data_type);
993
994                 nr_online = nr_offline = 0;
995
996                 for (i = 0; i < e->nr_devs; i++) {
997                         BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
998                                                 e->devs[i]));
999
1000                         if (test_bit(e->devs[i], online_devs.d))
1001                                 nr_online++;
1002                         else
1003                                 nr_offline++;
1004                 }
1005
1006                 ret.replicas[e->data_type].redundancy =
1007                         min(ret.replicas[e->data_type].redundancy,
1008                             (int) nr_online - (int) e->nr_required);
1009
1010                 ret.replicas[e->data_type].nr_offline =
1011                         max(ret.replicas[e->data_type].nr_offline,
1012                             nr_offline);
1013         }
1014
1015         percpu_up_read(&c->mark_lock);
1016
1017         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
1018                 if (ret.replicas[i].redundancy == INT_MAX)
1019                         ret.replicas[i].redundancy = 0;
1020
1021         return ret;
1022 }
1023
1024 struct replicas_status bch2_replicas_status(struct bch_fs *c)
1025 {
1026         return __bch2_replicas_status(c, bch2_online_devs(c));
1027 }
1028
1029 static bool have_enough_devs(struct replicas_status s,
1030                              enum bch_data_type type,
1031                              bool force_if_degraded,
1032                              bool force_if_lost)
1033 {
1034         return (!s.replicas[type].nr_offline || force_if_degraded) &&
1035                 (s.replicas[type].redundancy >= 0 || force_if_lost);
1036 }
1037
1038 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
1039 {
1040         return (have_enough_devs(s, BCH_DATA_JOURNAL,
1041                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
1042                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
1043                 have_enough_devs(s, BCH_DATA_BTREE,
1044                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
1045                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
1046                 have_enough_devs(s, BCH_DATA_USER,
1047                                  flags & BCH_FORCE_IF_DATA_DEGRADED,
1048                                  flags & BCH_FORCE_IF_DATA_LOST));
1049 }
1050
1051 int bch2_replicas_online(struct bch_fs *c, bool meta)
1052 {
1053         struct replicas_status s = bch2_replicas_status(c);
1054
1055         return (meta
1056                 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
1057                       s.replicas[BCH_DATA_BTREE].redundancy)
1058                 : s.replicas[BCH_DATA_USER].redundancy) + 1;
1059 }
1060
1061 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
1062 {
1063         struct bch_replicas_entry *e;
1064         unsigned i, ret = 0;
1065
1066         percpu_down_read(&c->mark_lock);
1067
1068         for_each_cpu_replicas_entry(&c->replicas, e)
1069                 for (i = 0; i < e->nr_devs; i++)
1070                         if (e->devs[i] == ca->dev_idx)
1071                                 ret |= 1 << e->data_type;
1072
1073         percpu_up_read(&c->mark_lock);
1074
1075         return ret;
1076 }
1077
1078 int bch2_fs_replicas_init(struct bch_fs *c)
1079 {
1080         c->journal.entry_u64s_reserved +=
1081                 reserve_journal_replicas(c, &c->replicas);
1082
1083         return replicas_table_update(c, &c->replicas);
1084 }