]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/replicas.c
Update bcachefs sources to 62de7539dc bcachefs: Make bkey types globally unique
[bcachefs-tools-debian] / libbcachefs / replicas.c
1
2 #include "bcachefs.h"
3 #include "replicas.h"
4 #include "super-io.h"
5
6 struct bch_replicas_entry_padded {
7         struct bch_replicas_entry       e;
8         u8                              pad[BCH_SB_MEMBERS_MAX];
9 };
10
11 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
12                                             struct bch_replicas_cpu *);
13
14 /* Replicas tracking - in memory: */
15
16 static inline int u8_cmp(u8 l, u8 r)
17 {
18         return (l > r) - (l < r);
19 }
20
21 static void replicas_entry_sort(struct bch_replicas_entry *e)
22 {
23         bubble_sort(e->devs, e->nr_devs, u8_cmp);
24 }
25
26 #define for_each_cpu_replicas_entry(_r, _i)                             \
27         for (_i = (_r)->entries;                                        \
28              (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
29              _i = (void *) (_i) + (_r)->entry_size)
30
31 static inline struct bch_replicas_entry *
32 cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
33 {
34         return (void *) r->entries + r->entry_size * i;
35 }
36
37 static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
38 {
39         eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
40 }
41
42 static void replicas_entry_to_text(struct printbuf *out,
43                                   struct bch_replicas_entry *e)
44 {
45         unsigned i;
46
47         pr_buf(out, "%s: %u/%u [",
48                bch2_data_types[e->data_type],
49                e->nr_required,
50                e->nr_devs);
51
52         for (i = 0; i < e->nr_devs; i++)
53                 pr_buf(out, i ? " %u" : "%u", e->devs[i]);
54         pr_buf(out, "]");
55 }
56
57 void bch2_cpu_replicas_to_text(struct printbuf *out,
58                               struct bch_replicas_cpu *r)
59 {
60         struct bch_replicas_entry *e;
61         bool first = true;
62
63         for_each_cpu_replicas_entry(r, e) {
64                 if (!first)
65                         pr_buf(out, " ");
66                 first = false;
67
68                 replicas_entry_to_text(out, e);
69         }
70 }
71
72 static void extent_to_replicas(struct bkey_s_c k,
73                                struct bch_replicas_entry *r)
74 {
75         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
76         const union bch_extent_entry *entry;
77         struct extent_ptr_decoded p;
78
79         r->nr_required  = 1;
80
81         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
82                 if (p.ptr.cached)
83                         continue;
84
85                 if (p.ec_nr) {
86                         r->nr_devs = 0;
87                         break;
88                 }
89
90                 r->devs[r->nr_devs++] = p.ptr.dev;
91         }
92 }
93
94 static void stripe_to_replicas(struct bkey_s_c k,
95                                struct bch_replicas_entry *r)
96 {
97         struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
98         const struct bch_extent_ptr *ptr;
99
100         r->nr_required  = s.v->nr_blocks - s.v->nr_redundant;
101
102         for (ptr = s.v->ptrs;
103              ptr < s.v->ptrs + s.v->nr_blocks;
104              ptr++)
105                 r->devs[r->nr_devs++] = ptr->dev;
106 }
107
108 static void bkey_to_replicas(struct bkey_s_c k,
109                              struct bch_replicas_entry *e)
110 {
111         e->nr_devs = 0;
112
113         switch (k.k->type) {
114         case KEY_TYPE_btree_ptr:
115                 e->data_type = BCH_DATA_BTREE;
116                 extent_to_replicas(k, e);
117                 break;
118         case KEY_TYPE_extent:
119                 e->data_type = BCH_DATA_USER;
120                 extent_to_replicas(k, e);
121                 break;
122         case KEY_TYPE_stripe:
123                 e->data_type = BCH_DATA_USER;
124                 stripe_to_replicas(k, e);
125                 break;
126         }
127
128         replicas_entry_sort(e);
129 }
130
131 static inline void devlist_to_replicas(struct bch_devs_list devs,
132                                        enum bch_data_type data_type,
133                                        struct bch_replicas_entry *e)
134 {
135         unsigned i;
136
137         BUG_ON(!data_type ||
138                data_type == BCH_DATA_SB ||
139                data_type >= BCH_DATA_NR);
140
141         e->data_type    = data_type;
142         e->nr_devs      = 0;
143         e->nr_required  = 1;
144
145         for (i = 0; i < devs.nr; i++)
146                 e->devs[e->nr_devs++] = devs.devs[i];
147
148         replicas_entry_sort(e);
149 }
150
151 static struct bch_replicas_cpu *
152 cpu_replicas_add_entry(struct bch_replicas_cpu *old,
153                        struct bch_replicas_entry *new_entry)
154 {
155         struct bch_replicas_cpu *new;
156         unsigned i, nr, entry_size;
157
158         entry_size = max_t(unsigned, old->entry_size,
159                            replicas_entry_bytes(new_entry));
160         nr = old->nr + 1;
161
162         new = kzalloc(sizeof(struct bch_replicas_cpu) +
163                       nr * entry_size, GFP_NOIO);
164         if (!new)
165                 return NULL;
166
167         new->nr         = nr;
168         new->entry_size = entry_size;
169
170         for (i = 0; i < old->nr; i++)
171                 memcpy(cpu_replicas_entry(new, i),
172                        cpu_replicas_entry(old, i),
173                        old->entry_size);
174
175         memcpy(cpu_replicas_entry(new, old->nr),
176                new_entry,
177                replicas_entry_bytes(new_entry));
178
179         bch2_cpu_replicas_sort(new);
180         return new;
181 }
182
183 static bool __replicas_has_entry(struct bch_replicas_cpu *r,
184                                  struct bch_replicas_entry *search)
185 {
186         return replicas_entry_bytes(search) <= r->entry_size &&
187                 eytzinger0_find(r->entries, r->nr,
188                                 r->entry_size,
189                                 memcmp, search) < r->nr;
190 }
191
192 static bool replicas_has_entry(struct bch_fs *c,
193                                struct bch_replicas_entry *search,
194                                bool check_gc_replicas)
195 {
196         struct bch_replicas_cpu *r, *gc_r;
197         bool marked;
198
199         rcu_read_lock();
200         r = rcu_dereference(c->replicas);
201         marked = __replicas_has_entry(r, search) &&
202                 (!check_gc_replicas ||
203                  likely(!(gc_r = rcu_dereference(c->replicas_gc))) ||
204                  __replicas_has_entry(gc_r, search));
205         rcu_read_unlock();
206
207         return marked;
208 }
209
210 noinline
211 static int bch2_mark_replicas_slowpath(struct bch_fs *c,
212                                 struct bch_replicas_entry *new_entry)
213 {
214         struct bch_replicas_cpu *old_gc, *new_gc = NULL, *old_r, *new_r = NULL;
215         int ret = -ENOMEM;
216
217         mutex_lock(&c->sb_lock);
218
219         old_gc = rcu_dereference_protected(c->replicas_gc,
220                                            lockdep_is_held(&c->sb_lock));
221         if (old_gc && !__replicas_has_entry(old_gc, new_entry)) {
222                 new_gc = cpu_replicas_add_entry(old_gc, new_entry);
223                 if (!new_gc)
224                         goto err;
225         }
226
227         old_r = rcu_dereference_protected(c->replicas,
228                                           lockdep_is_held(&c->sb_lock));
229         if (!__replicas_has_entry(old_r, new_entry)) {
230                 new_r = cpu_replicas_add_entry(old_r, new_entry);
231                 if (!new_r)
232                         goto err;
233
234                 ret = bch2_cpu_replicas_to_sb_replicas(c, new_r);
235                 if (ret)
236                         goto err;
237         }
238
239         /* allocations done, now commit: */
240
241         if (new_r)
242                 bch2_write_super(c);
243
244         /* don't update in memory replicas until changes are persistent */
245
246         if (new_gc) {
247                 rcu_assign_pointer(c->replicas_gc, new_gc);
248                 kfree_rcu(old_gc, rcu);
249         }
250
251         if (new_r) {
252                 rcu_assign_pointer(c->replicas, new_r);
253                 kfree_rcu(old_r, rcu);
254         }
255
256         mutex_unlock(&c->sb_lock);
257         return 0;
258 err:
259         mutex_unlock(&c->sb_lock);
260         kfree(new_gc);
261         kfree(new_r);
262         return ret;
263 }
264
265 static int __bch2_mark_replicas(struct bch_fs *c,
266                                 struct bch_replicas_entry *devs)
267 {
268         return likely(replicas_has_entry(c, devs, true))
269                 ? 0
270                 : bch2_mark_replicas_slowpath(c, devs);
271 }
272
273 int bch2_mark_replicas(struct bch_fs *c,
274                        enum bch_data_type data_type,
275                        struct bch_devs_list devs)
276 {
277         struct bch_replicas_entry_padded search;
278
279         if (!devs.nr)
280                 return 0;
281
282         memset(&search, 0, sizeof(search));
283
284         BUG_ON(devs.nr >= BCH_REPLICAS_MAX);
285
286         devlist_to_replicas(devs, data_type, &search.e);
287
288         return __bch2_mark_replicas(c, &search.e);
289 }
290
291 int bch2_mark_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
292 {
293         struct bch_replicas_entry_padded search;
294         struct bch_devs_list cached = bch2_bkey_cached_devs(k);
295         unsigned i;
296         int ret;
297
298         memset(&search, 0, sizeof(search));
299
300         for (i = 0; i < cached.nr; i++)
301                 if ((ret = bch2_mark_replicas(c, BCH_DATA_CACHED,
302                                               bch2_dev_list_single(cached.devs[i]))))
303                         return ret;
304
305         bkey_to_replicas(k, &search.e);
306
307         return search.e.nr_devs
308                 ? __bch2_mark_replicas(c, &search.e)
309                 : 0;
310 }
311
312 int bch2_replicas_gc_end(struct bch_fs *c, int ret)
313 {
314         struct bch_replicas_cpu *new_r, *old_r;
315
316         lockdep_assert_held(&c->replicas_gc_lock);
317
318         mutex_lock(&c->sb_lock);
319
320         new_r = rcu_dereference_protected(c->replicas_gc,
321                                           lockdep_is_held(&c->sb_lock));
322         rcu_assign_pointer(c->replicas_gc, NULL);
323
324         if (ret)
325                 goto err;
326
327         if (bch2_cpu_replicas_to_sb_replicas(c, new_r)) {
328                 ret = -ENOSPC;
329                 goto err;
330         }
331
332         bch2_write_super(c);
333
334         /* don't update in memory replicas until changes are persistent */
335
336         old_r = rcu_dereference_protected(c->replicas,
337                                           lockdep_is_held(&c->sb_lock));
338
339         rcu_assign_pointer(c->replicas, new_r);
340         kfree_rcu(old_r, rcu);
341 out:
342         mutex_unlock(&c->sb_lock);
343         return ret;
344 err:
345         kfree_rcu(new_r, rcu);
346         goto out;
347 }
348
349 int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
350 {
351         struct bch_replicas_cpu *dst, *src;
352         struct bch_replicas_entry *e;
353
354         lockdep_assert_held(&c->replicas_gc_lock);
355
356         mutex_lock(&c->sb_lock);
357         BUG_ON(c->replicas_gc);
358
359         src = rcu_dereference_protected(c->replicas,
360                                         lockdep_is_held(&c->sb_lock));
361
362         dst = kzalloc(sizeof(struct bch_replicas_cpu) +
363                       src->nr * src->entry_size, GFP_NOIO);
364         if (!dst) {
365                 mutex_unlock(&c->sb_lock);
366                 return -ENOMEM;
367         }
368
369         dst->nr         = 0;
370         dst->entry_size = src->entry_size;
371
372         for_each_cpu_replicas_entry(src, e)
373                 if (!((1 << e->data_type) & typemask))
374                         memcpy(cpu_replicas_entry(dst, dst->nr++),
375                                e, dst->entry_size);
376
377         bch2_cpu_replicas_sort(dst);
378
379         rcu_assign_pointer(c->replicas_gc, dst);
380         mutex_unlock(&c->sb_lock);
381
382         return 0;
383 }
384
385 /* Replicas tracking - superblock: */
386
387 static struct bch_replicas_cpu *
388 __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r)
389 {
390         struct bch_replicas_entry *e, *dst;
391         struct bch_replicas_cpu *cpu_r;
392         unsigned nr = 0, entry_size = 0, idx = 0;
393
394         for_each_replicas_entry(sb_r, e) {
395                 entry_size = max_t(unsigned, entry_size,
396                                    replicas_entry_bytes(e));
397                 nr++;
398         }
399
400         cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
401                         nr * entry_size, GFP_NOIO);
402         if (!cpu_r)
403                 return NULL;
404
405         cpu_r->nr               = nr;
406         cpu_r->entry_size       = entry_size;
407
408         for_each_replicas_entry(sb_r, e) {
409                 dst = cpu_replicas_entry(cpu_r, idx++);
410                 memcpy(dst, e, replicas_entry_bytes(e));
411                 replicas_entry_sort(dst);
412         }
413
414         return cpu_r;
415 }
416
417 static struct bch_replicas_cpu *
418 __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r)
419 {
420         struct bch_replicas_entry_v0 *e;
421         struct bch_replicas_cpu *cpu_r;
422         unsigned nr = 0, entry_size = 0, idx = 0;
423
424         for_each_replicas_entry(sb_r, e) {
425                 entry_size = max_t(unsigned, entry_size,
426                                    replicas_entry_bytes(e));
427                 nr++;
428         }
429
430         entry_size += sizeof(struct bch_replicas_entry) -
431                 sizeof(struct bch_replicas_entry_v0);
432
433         cpu_r = kzalloc(sizeof(struct bch_replicas_cpu) +
434                         nr * entry_size, GFP_NOIO);
435         if (!cpu_r)
436                 return NULL;
437
438         cpu_r->nr               = nr;
439         cpu_r->entry_size       = entry_size;
440
441         for_each_replicas_entry(sb_r, e) {
442                 struct bch_replicas_entry *dst =
443                         cpu_replicas_entry(cpu_r, idx++);
444
445                 dst->data_type  = e->data_type;
446                 dst->nr_devs    = e->nr_devs;
447                 dst->nr_required = 1;
448                 memcpy(dst->devs, e->devs, e->nr_devs);
449                 replicas_entry_sort(dst);
450         }
451
452         return cpu_r;
453 }
454
455 int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
456 {
457         struct bch_sb_field_replicas *sb_v1;
458         struct bch_sb_field_replicas_v0 *sb_v0;
459         struct bch_replicas_cpu *cpu_r, *old_r;
460
461         if ((sb_v1 = bch2_sb_get_replicas(c->disk_sb.sb)))
462                 cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_v1);
463         else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
464                 cpu_r = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0);
465         else
466                 cpu_r = kzalloc(sizeof(struct bch_replicas_cpu), GFP_NOIO);
467
468         if (!cpu_r)
469                 return -ENOMEM;
470
471         bch2_cpu_replicas_sort(cpu_r);
472
473         old_r = rcu_dereference_check(c->replicas, lockdep_is_held(&c->sb_lock));
474         rcu_assign_pointer(c->replicas, cpu_r);
475         if (old_r)
476                 kfree_rcu(old_r, rcu);
477
478         return 0;
479 }
480
481 static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
482                                                struct bch_replicas_cpu *r)
483 {
484         struct bch_sb_field_replicas_v0 *sb_r;
485         struct bch_replicas_entry_v0 *dst;
486         struct bch_replicas_entry *src;
487         size_t bytes;
488
489         bytes = sizeof(struct bch_sb_field_replicas);
490
491         for_each_cpu_replicas_entry(r, src)
492                 bytes += replicas_entry_bytes(src) - 1;
493
494         sb_r = bch2_sb_resize_replicas_v0(&c->disk_sb,
495                         DIV_ROUND_UP(bytes, sizeof(u64)));
496         if (!sb_r)
497                 return -ENOSPC;
498
499         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
500         sb_r = bch2_sb_get_replicas_v0(c->disk_sb.sb);
501
502         memset(&sb_r->entries, 0,
503                vstruct_end(&sb_r->field) -
504                (void *) &sb_r->entries);
505
506         dst = sb_r->entries;
507         for_each_cpu_replicas_entry(r, src) {
508                 dst->data_type  = src->data_type;
509                 dst->nr_devs    = src->nr_devs;
510                 memcpy(dst->devs, src->devs, src->nr_devs);
511
512                 dst = replicas_entry_next(dst);
513
514                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
515         }
516
517         return 0;
518 }
519
520 static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
521                                             struct bch_replicas_cpu *r)
522 {
523         struct bch_sb_field_replicas *sb_r;
524         struct bch_replicas_entry *dst, *src;
525         bool need_v1 = false;
526         size_t bytes;
527
528         bytes = sizeof(struct bch_sb_field_replicas);
529
530         for_each_cpu_replicas_entry(r, src) {
531                 bytes += replicas_entry_bytes(src);
532                 if (src->nr_required != 1)
533                         need_v1 = true;
534         }
535
536         if (!need_v1)
537                 return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
538
539         sb_r = bch2_sb_resize_replicas(&c->disk_sb,
540                         DIV_ROUND_UP(bytes, sizeof(u64)));
541         if (!sb_r)
542                 return -ENOSPC;
543
544         bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
545         sb_r = bch2_sb_get_replicas(c->disk_sb.sb);
546
547         memset(&sb_r->entries, 0,
548                vstruct_end(&sb_r->field) -
549                (void *) &sb_r->entries);
550
551         dst = sb_r->entries;
552         for_each_cpu_replicas_entry(r, src) {
553                 memcpy(dst, src, replicas_entry_bytes(src));
554
555                 dst = replicas_entry_next(dst);
556
557                 BUG_ON((void *) dst > vstruct_end(&sb_r->field));
558         }
559
560         return 0;
561 }
562
563 static const char *check_dup_replicas_entries(struct bch_replicas_cpu *cpu_r)
564 {
565         unsigned i;
566
567         sort_cmp_size(cpu_r->entries,
568                       cpu_r->nr,
569                       cpu_r->entry_size,
570                       memcmp, NULL);
571
572         for (i = 0; i + 1 < cpu_r->nr; i++) {
573                 struct bch_replicas_entry *l =
574                         cpu_replicas_entry(cpu_r, i);
575                 struct bch_replicas_entry *r =
576                         cpu_replicas_entry(cpu_r, i + 1);
577
578                 BUG_ON(memcmp(l, r, cpu_r->entry_size) > 0);
579
580                 if (!memcmp(l, r, cpu_r->entry_size))
581                         return "duplicate replicas entry";
582         }
583
584         return NULL;
585 }
586
587 static const char *bch2_sb_validate_replicas(struct bch_sb *sb, struct bch_sb_field *f)
588 {
589         struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
590         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
591         struct bch_replicas_cpu *cpu_r = NULL;
592         struct bch_replicas_entry *e;
593         const char *err;
594         unsigned i;
595
596         for_each_replicas_entry(sb_r, e) {
597                 err = "invalid replicas entry: invalid data type";
598                 if (e->data_type >= BCH_DATA_NR)
599                         goto err;
600
601                 err = "invalid replicas entry: no devices";
602                 if (!e->nr_devs)
603                         goto err;
604
605                 err = "invalid replicas entry: bad nr_required";
606                 if (!e->nr_required ||
607                     (e->nr_required > 1 &&
608                      e->nr_required >= e->nr_devs))
609                         goto err;
610
611                 err = "invalid replicas entry: invalid device";
612                 for (i = 0; i < e->nr_devs; i++)
613                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
614                                 goto err;
615         }
616
617         err = "cannot allocate memory";
618         cpu_r = __bch2_sb_replicas_to_cpu_replicas(sb_r);
619         if (!cpu_r)
620                 goto err;
621
622         err = check_dup_replicas_entries(cpu_r);
623 err:
624         kfree(cpu_r);
625         return err;
626 }
627
628 static void bch2_sb_replicas_to_text(struct printbuf *out,
629                                      struct bch_sb *sb,
630                                      struct bch_sb_field *f)
631 {
632         struct bch_sb_field_replicas *r = field_to_type(f, replicas);
633         struct bch_replicas_entry *e;
634         bool first = true;
635
636         for_each_replicas_entry(r, e) {
637                 if (!first)
638                         pr_buf(out, " ");
639                 first = false;
640
641                 replicas_entry_to_text(out, e);
642         }
643 }
644
645 const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
646         .validate       = bch2_sb_validate_replicas,
647         .to_text        = bch2_sb_replicas_to_text,
648 };
649
650 static const char *bch2_sb_validate_replicas_v0(struct bch_sb *sb, struct bch_sb_field *f)
651 {
652         struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
653         struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
654         struct bch_replicas_cpu *cpu_r = NULL;
655         struct bch_replicas_entry_v0 *e;
656         const char *err;
657         unsigned i;
658
659         for_each_replicas_entry_v0(sb_r, e) {
660                 err = "invalid replicas entry: invalid data type";
661                 if (e->data_type >= BCH_DATA_NR)
662                         goto err;
663
664                 err = "invalid replicas entry: no devices";
665                 if (!e->nr_devs)
666                         goto err;
667
668                 err = "invalid replicas entry: invalid device";
669                 for (i = 0; i < e->nr_devs; i++)
670                         if (!bch2_dev_exists(sb, mi, e->devs[i]))
671                                 goto err;
672         }
673
674         err = "cannot allocate memory";
675         cpu_r = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r);
676         if (!cpu_r)
677                 goto err;
678
679         err = check_dup_replicas_entries(cpu_r);
680 err:
681         kfree(cpu_r);
682         return err;
683 }
684
685 const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
686         .validate       = bch2_sb_validate_replicas_v0,
687 };
688
689 /* Query replicas: */
690
691 bool bch2_replicas_marked(struct bch_fs *c,
692                           enum bch_data_type data_type,
693                           struct bch_devs_list devs,
694                           bool check_gc_replicas)
695 {
696         struct bch_replicas_entry_padded search;
697
698         if (!devs.nr)
699                 return true;
700
701         memset(&search, 0, sizeof(search));
702
703         devlist_to_replicas(devs, data_type, &search.e);
704
705         return replicas_has_entry(c, &search.e, check_gc_replicas);
706 }
707
708 bool bch2_bkey_replicas_marked(struct bch_fs *c,
709                                struct bkey_s_c k,
710                                bool check_gc_replicas)
711 {
712         struct bch_replicas_entry_padded search;
713         struct bch_devs_list cached = bch2_bkey_cached_devs(k);
714         unsigned i;
715
716         memset(&search, 0, sizeof(search));
717
718         for (i = 0; i < cached.nr; i++)
719                 if (!bch2_replicas_marked(c, BCH_DATA_CACHED,
720                                           bch2_dev_list_single(cached.devs[i]),
721                                           check_gc_replicas))
722                         return false;
723
724         bkey_to_replicas(k, &search.e);
725
726         return search.e.nr_devs
727                 ? replicas_has_entry(c, &search.e, check_gc_replicas)
728                 : true;
729 }
730
731 struct replicas_status __bch2_replicas_status(struct bch_fs *c,
732                                               struct bch_devs_mask online_devs)
733 {
734         struct bch_sb_field_members *mi;
735         struct bch_replicas_entry *e;
736         struct bch_replicas_cpu *r;
737         unsigned i, nr_online, nr_offline;
738         struct replicas_status ret;
739
740         memset(&ret, 0, sizeof(ret));
741
742         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
743                 ret.replicas[i].redundancy = INT_MAX;
744
745         mi = bch2_sb_get_members(c->disk_sb.sb);
746         rcu_read_lock();
747         r = rcu_dereference(c->replicas);
748
749         for_each_cpu_replicas_entry(r, e) {
750                 if (e->data_type >= ARRAY_SIZE(ret.replicas))
751                         panic("e %p data_type %u\n", e, e->data_type);
752
753                 nr_online = nr_offline = 0;
754
755                 for (i = 0; i < e->nr_devs; i++) {
756                         BUG_ON(!bch2_dev_exists(c->disk_sb.sb, mi,
757                                                 e->devs[i]));
758
759                         if (test_bit(e->devs[i], online_devs.d))
760                                 nr_online++;
761                         else
762                                 nr_offline++;
763                 }
764
765                 ret.replicas[e->data_type].redundancy =
766                         min(ret.replicas[e->data_type].redundancy,
767                             (int) nr_online - (int) e->nr_required);
768
769                 ret.replicas[e->data_type].nr_offline =
770                         max(ret.replicas[e->data_type].nr_offline,
771                             nr_offline);
772         }
773
774         rcu_read_unlock();
775
776         for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
777                 if (ret.replicas[i].redundancy == INT_MAX)
778                         ret.replicas[i].redundancy = 0;
779
780         return ret;
781 }
782
783 struct replicas_status bch2_replicas_status(struct bch_fs *c)
784 {
785         return __bch2_replicas_status(c, bch2_online_devs(c));
786 }
787
788 static bool have_enough_devs(struct replicas_status s,
789                              enum bch_data_type type,
790                              bool force_if_degraded,
791                              bool force_if_lost)
792 {
793         return (!s.replicas[type].nr_offline || force_if_degraded) &&
794                 (s.replicas[type].redundancy >= 0 || force_if_lost);
795 }
796
797 bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
798 {
799         return (have_enough_devs(s, BCH_DATA_JOURNAL,
800                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
801                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
802                 have_enough_devs(s, BCH_DATA_BTREE,
803                                  flags & BCH_FORCE_IF_METADATA_DEGRADED,
804                                  flags & BCH_FORCE_IF_METADATA_LOST) &&
805                 have_enough_devs(s, BCH_DATA_USER,
806                                  flags & BCH_FORCE_IF_DATA_DEGRADED,
807                                  flags & BCH_FORCE_IF_DATA_LOST));
808 }
809
810 int bch2_replicas_online(struct bch_fs *c, bool meta)
811 {
812         struct replicas_status s = bch2_replicas_status(c);
813
814         return (meta
815                 ? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
816                       s.replicas[BCH_DATA_BTREE].redundancy)
817                 : s.replicas[BCH_DATA_USER].redundancy) + 1;
818 }
819
820 unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
821 {
822         struct bch_replicas_entry *e;
823         struct bch_replicas_cpu *r;
824         unsigned i, ret = 0;
825
826         rcu_read_lock();
827         r = rcu_dereference(c->replicas);
828
829         for_each_cpu_replicas_entry(r, e)
830                 for (i = 0; i < e->nr_devs; i++)
831                         if (e->devs[i] == ca->dev_idx)
832                                 ret |= 1 << e->data_type;
833
834         rcu_read_unlock();
835
836         return ret;
837 }