]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/recovery.c
73746ebabec648881381724317e6815d7cdcc6ce
[bcachefs-tools-debian] / libbcachefs / recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "alloc_background.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_io.h"
10 #include "buckets.h"
11 #include "dirent.h"
12 #include "ec.h"
13 #include "error.h"
14 #include "fs-common.h"
15 #include "fsck.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "quota.h"
20 #include "recovery.h"
21 #include "replicas.h"
22 #include "super-io.h"
23
24 #include <linux/sort.h>
25 #include <linux/stat.h>
26
27 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
28
29 /* for -o reconstruct_alloc: */
30 static void drop_alloc_keys(struct journal_keys *keys)
31 {
32         size_t src, dst;
33
34         for (src = 0, dst = 0; src < keys->nr; src++)
35                 if (keys->d[src].btree_id != BTREE_ID_alloc)
36                         keys->d[dst++] = keys->d[src];
37
38         keys->nr = dst;
39 }
40
41 /* iterate over keys read from the journal: */
42
43 static int __journal_key_cmp(enum btree_id      l_btree_id,
44                              unsigned           l_level,
45                              struct bpos        l_pos,
46                              struct journal_key *r)
47 {
48         return (cmp_int(l_btree_id,     r->btree_id) ?:
49                 cmp_int(l_level,        r->level) ?:
50                 bkey_cmp(l_pos, r->k->k.p));
51 }
52
53 static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
54 {
55         return (cmp_int(l->btree_id,    r->btree_id) ?:
56                 cmp_int(l->level,       r->level) ?:
57                 bkey_cmp(l->k->k.p,     r->k->k.p));
58 }
59
60 static size_t journal_key_search(struct journal_keys *journal_keys,
61                                  enum btree_id id, unsigned level,
62                                  struct bpos pos)
63 {
64         size_t l = 0, r = journal_keys->nr, m;
65
66         while (l < r) {
67                 m = l + ((r - l) >> 1);
68                 if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
69                         l = m + 1;
70                 else
71                         r = m;
72         }
73
74         BUG_ON(l < journal_keys->nr &&
75                __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
76
77         BUG_ON(l &&
78                __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
79
80         return l;
81 }
82
83 static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
84 {
85         struct bkey_i *n = iter->keys->d[idx].k;
86         struct btree_and_journal_iter *biter =
87                 container_of(iter, struct btree_and_journal_iter, journal);
88
89         if (iter->idx > idx ||
90             (iter->idx == idx &&
91              biter->last &&
92              bkey_cmp(n->k.p, biter->unpacked.p) <= 0))
93                 iter->idx++;
94 }
95
96 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
97                             unsigned level, struct bkey_i *k)
98 {
99         struct journal_key n = {
100                 .btree_id       = id,
101                 .level          = level,
102                 .k              = k,
103                 .allocated      = true
104         };
105         struct journal_keys *keys = &c->journal_keys;
106         struct journal_iter *iter;
107         unsigned idx = journal_key_search(keys, id, level, k->k.p);
108
109         if (idx < keys->nr &&
110             journal_key_cmp(&n, &keys->d[idx]) == 0) {
111                 if (keys->d[idx].allocated)
112                         kfree(keys->d[idx].k);
113                 keys->d[idx] = n;
114                 return 0;
115         }
116
117         if (keys->nr == keys->size) {
118                 struct journal_keys new_keys = {
119                         .nr                     = keys->nr,
120                         .size                   = keys->size * 2,
121                         .journal_seq_base       = keys->journal_seq_base,
122                 };
123
124                 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
125                 if (!new_keys.d)
126                         return -ENOMEM;
127
128                 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
129                 kvfree(keys->d);
130                 *keys = new_keys;
131         }
132
133         array_insert_item(keys->d, keys->nr, idx, n);
134
135         list_for_each_entry(iter, &c->journal_iters, list)
136                 journal_iter_fix(c, iter, idx);
137
138         return 0;
139 }
140
141 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
142                             unsigned level, struct bpos pos)
143 {
144         struct bkey_i *whiteout =
145                 kmalloc(sizeof(struct bkey), GFP_KERNEL);
146         int ret;
147
148         if (!whiteout)
149                 return -ENOMEM;
150
151         bkey_init(&whiteout->k);
152         whiteout->k.p = pos;
153
154         ret = bch2_journal_key_insert(c, id, level, whiteout);
155         if (ret)
156                 kfree(whiteout);
157         return ret;
158 }
159
160 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
161 {
162         struct journal_key *k = iter->idx - iter->keys->nr
163                 ? iter->keys->d + iter->idx : NULL;
164
165         if (k &&
166             k->btree_id == iter->btree_id &&
167             k->level    == iter->level)
168                 return k->k;
169
170         iter->idx = iter->keys->nr;
171         return NULL;
172 }
173
174 static void bch2_journal_iter_advance(struct journal_iter *iter)
175 {
176         if (iter->idx < iter->keys->nr)
177                 iter->idx++;
178 }
179
180 static void bch2_journal_iter_exit(struct journal_iter *iter)
181 {
182         list_del(&iter->list);
183 }
184
185 static void bch2_journal_iter_init(struct bch_fs *c,
186                                    struct journal_iter *iter,
187                                    enum btree_id id, unsigned level,
188                                    struct bpos pos)
189 {
190         iter->btree_id  = id;
191         iter->level     = level;
192         iter->keys      = &c->journal_keys;
193         iter->idx       = journal_key_search(&c->journal_keys, id, level, pos);
194         list_add(&iter->list, &c->journal_iters);
195 }
196
197 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
198 {
199         return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
200                                                 iter->b, &iter->unpacked);
201 }
202
203 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
204 {
205         bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
206 }
207
208 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
209 {
210         switch (iter->last) {
211         case none:
212                 break;
213         case btree:
214                 bch2_journal_iter_advance_btree(iter);
215                 break;
216         case journal:
217                 bch2_journal_iter_advance(&iter->journal);
218                 break;
219         }
220
221         iter->last = none;
222 }
223
224 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
225 {
226         struct bkey_s_c ret;
227
228         while (1) {
229                 struct bkey_s_c btree_k         =
230                         bch2_journal_iter_peek_btree(iter);
231                 struct bkey_s_c journal_k       =
232                         bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
233
234                 if (btree_k.k && journal_k.k) {
235                         int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
236
237                         if (!cmp)
238                                 bch2_journal_iter_advance_btree(iter);
239
240                         iter->last = cmp < 0 ? btree : journal;
241                 } else if (btree_k.k) {
242                         iter->last = btree;
243                 } else if (journal_k.k) {
244                         iter->last = journal;
245                 } else {
246                         iter->last = none;
247                         return bkey_s_c_null;
248                 }
249
250                 ret = iter->last == journal ? journal_k : btree_k;
251
252                 if (iter->b &&
253                     bkey_cmp(ret.k->p, iter->b->data->max_key) > 0) {
254                         iter->journal.idx = iter->journal.keys->nr;
255                         iter->last = none;
256                         return bkey_s_c_null;
257                 }
258
259                 if (!bkey_deleted(ret.k))
260                         break;
261
262                 bch2_btree_and_journal_iter_advance(iter);
263         }
264
265         return ret;
266 }
267
268 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
269 {
270         bch2_btree_and_journal_iter_advance(iter);
271
272         return bch2_btree_and_journal_iter_peek(iter);
273 }
274
275 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
276 {
277         bch2_journal_iter_exit(&iter->journal);
278 }
279
280 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
281                                                 struct bch_fs *c,
282                                                 struct btree *b)
283 {
284         memset(iter, 0, sizeof(*iter));
285
286         iter->b = b;
287         bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
288         bch2_journal_iter_init(c, &iter->journal,
289                                b->c.btree_id, b->c.level, b->data->min_key);
290 }
291
292 /* Walk btree, overlaying keys from the journal: */
293
294 static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
295                                            struct btree_and_journal_iter iter)
296 {
297         unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
298         struct bkey_s_c k;
299         struct bkey_buf tmp;
300
301         BUG_ON(!b->c.level);
302
303         bch2_bkey_buf_init(&tmp);
304
305         while (i < nr &&
306                (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
307                 bch2_bkey_buf_reassemble(&tmp, c, k);
308
309                 bch2_btree_node_prefetch(c, NULL, tmp.k,
310                                         b->c.btree_id, b->c.level - 1);
311
312                 bch2_btree_and_journal_iter_advance(&iter);
313                 i++;
314         }
315
316         bch2_bkey_buf_exit(&tmp, c);
317 }
318
319 static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
320                                 struct journal_keys *journal_keys,
321                                 enum btree_id btree_id,
322                                 btree_walk_node_fn node_fn,
323                                 btree_walk_key_fn key_fn)
324 {
325         struct btree_and_journal_iter iter;
326         struct bkey_s_c k;
327         struct bkey_buf tmp;
328         struct btree *child;
329         int ret = 0;
330
331         bch2_bkey_buf_init(&tmp);
332         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
333
334         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
335                 ret = key_fn(c, btree_id, b->c.level, k);
336                 if (ret)
337                         break;
338
339                 if (b->c.level) {
340                         bch2_bkey_buf_reassemble(&tmp, c, k);
341
342                         bch2_btree_and_journal_iter_advance(&iter);
343
344                         child = bch2_btree_node_get_noiter(c, tmp.k,
345                                                 b->c.btree_id, b->c.level - 1,
346                                                 false);
347
348                         ret = PTR_ERR_OR_ZERO(child);
349                         if (ret)
350                                 break;
351
352                         btree_and_journal_iter_prefetch(c, b, iter);
353
354                         ret   = (node_fn ? node_fn(c, b) : 0) ?:
355                                 bch2_btree_and_journal_walk_recurse(c, child,
356                                         journal_keys, btree_id, node_fn, key_fn);
357                         six_unlock_read(&child->c.lock);
358
359                         if (ret)
360                                 break;
361                 } else {
362                         bch2_btree_and_journal_iter_advance(&iter);
363                 }
364         }
365
366         bch2_btree_and_journal_iter_exit(&iter);
367         bch2_bkey_buf_exit(&tmp, c);
368         return ret;
369 }
370
371 int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_keys,
372                                 enum btree_id btree_id,
373                                 btree_walk_node_fn node_fn,
374                                 btree_walk_key_fn key_fn)
375 {
376         struct btree *b = c->btree_roots[btree_id].b;
377         int ret = 0;
378
379         if (btree_node_fake(b))
380                 return 0;
381
382         six_lock_read(&b->c.lock, NULL, NULL);
383         ret   = (node_fn ? node_fn(c, b) : 0) ?:
384                 bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id,
385                                                     node_fn, key_fn) ?:
386                 key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key));
387         six_unlock_read(&b->c.lock);
388
389         return ret;
390 }
391
392 /* sort and dedup all keys in the journal: */
393
394 void bch2_journal_entries_free(struct list_head *list)
395 {
396
397         while (!list_empty(list)) {
398                 struct journal_replay *i =
399                         list_first_entry(list, struct journal_replay, list);
400                 list_del(&i->list);
401                 kvpfree(i, offsetof(struct journal_replay, j) +
402                         vstruct_bytes(&i->j));
403         }
404 }
405
406 /*
407  * When keys compare equal, oldest compares first:
408  */
409 static int journal_sort_key_cmp(const void *_l, const void *_r)
410 {
411         const struct journal_key *l = _l;
412         const struct journal_key *r = _r;
413
414         return  cmp_int(l->btree_id,    r->btree_id) ?:
415                 cmp_int(l->level,       r->level) ?:
416                 bkey_cmp(l->k->k.p, r->k->k.p) ?:
417                 cmp_int(l->journal_seq, r->journal_seq) ?:
418                 cmp_int(l->journal_offset, r->journal_offset);
419 }
420
421 void bch2_journal_keys_free(struct journal_keys *keys)
422 {
423         struct journal_key *i;
424
425         for (i = keys->d; i < keys->d + keys->nr; i++)
426                 if (i->allocated)
427                         kfree(i->k);
428
429         kvfree(keys->d);
430         keys->d = NULL;
431         keys->nr = 0;
432 }
433
434 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
435 {
436         struct journal_replay *i;
437         struct jset_entry *entry;
438         struct bkey_i *k, *_n;
439         struct journal_keys keys = { NULL };
440         struct journal_key *src, *dst;
441         size_t nr_keys = 0;
442
443         if (list_empty(journal_entries))
444                 return keys;
445
446         list_for_each_entry(i, journal_entries, list) {
447                 if (i->ignore)
448                         continue;
449
450                 if (!keys.journal_seq_base)
451                         keys.journal_seq_base = le64_to_cpu(i->j.seq);
452
453                 for_each_jset_key(k, _n, entry, &i->j)
454                         nr_keys++;
455         }
456
457         keys.size = roundup_pow_of_two(nr_keys);
458
459         keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
460         if (!keys.d)
461                 goto err;
462
463         list_for_each_entry(i, journal_entries, list) {
464                 if (i->ignore)
465                         continue;
466
467                 BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
468
469                 for_each_jset_key(k, _n, entry, &i->j)
470                         keys.d[keys.nr++] = (struct journal_key) {
471                                 .btree_id       = entry->btree_id,
472                                 .level          = entry->level,
473                                 .k              = k,
474                                 .journal_seq    = le64_to_cpu(i->j.seq) -
475                                         keys.journal_seq_base,
476                                 .journal_offset = k->_data - i->j._data,
477                         };
478         }
479
480         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
481
482         src = dst = keys.d;
483         while (src < keys.d + keys.nr) {
484                 while (src + 1 < keys.d + keys.nr &&
485                        src[0].btree_id  == src[1].btree_id &&
486                        src[0].level     == src[1].level &&
487                        !bkey_cmp(src[0].k->k.p, src[1].k->k.p))
488                         src++;
489
490                 *dst++ = *src++;
491         }
492
493         keys.nr = dst - keys.d;
494 err:
495         return keys;
496 }
497
498 /* journal replay: */
499
500 static void replay_now_at(struct journal *j, u64 seq)
501 {
502         BUG_ON(seq < j->replay_journal_seq);
503         BUG_ON(seq > j->replay_journal_seq_end);
504
505         while (j->replay_journal_seq < seq)
506                 bch2_journal_pin_put(j, j->replay_journal_seq++);
507 }
508
509 static int __bch2_journal_replay_key(struct btree_trans *trans,
510                                      enum btree_id id, unsigned level,
511                                      struct bkey_i *k)
512 {
513         struct btree_iter *iter;
514         int ret;
515
516         iter = bch2_trans_get_node_iter(trans, id, k->k.p,
517                                         BTREE_MAX_DEPTH, level,
518                                         BTREE_ITER_INTENT);
519
520         /*
521          * iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run
522          * extent_handle_overwrites() and extent_update_to_keys() - but we don't
523          * want that here, journal replay is supposed to treat extents like
524          * regular keys:
525          */
526         __bch2_btree_iter_set_pos(iter, k->k.p, false);
527
528         ret   = bch2_btree_iter_traverse(iter) ?:
529                 bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
530         bch2_trans_iter_put(trans, iter);
531         return ret;
532 }
533
534 static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
535 {
536         unsigned commit_flags = BTREE_INSERT_NOFAIL|
537                 BTREE_INSERT_LAZY_RW;
538
539         if (!k->allocated)
540                 commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
541
542         return bch2_trans_do(c, NULL, NULL, commit_flags,
543                              __bch2_journal_replay_key(&trans, k->btree_id, k->level, k->k));
544 }
545
546 static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
547 {
548         struct btree_iter *iter;
549         int ret;
550
551         iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, k->k.p,
552                                    BTREE_ITER_CACHED|
553                                    BTREE_ITER_CACHED_NOFILL|
554                                    BTREE_ITER_INTENT);
555         ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
556         bch2_trans_iter_put(trans, iter);
557         return ret;
558 }
559
560 static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
561 {
562         return bch2_trans_do(c, NULL, NULL,
563                              BTREE_INSERT_NOFAIL|
564                              BTREE_INSERT_USE_RESERVE|
565                              BTREE_INSERT_LAZY_RW|
566                              BTREE_INSERT_JOURNAL_REPLAY,
567                         __bch2_alloc_replay_key(&trans, k));
568 }
569
570 static int journal_sort_seq_cmp(const void *_l, const void *_r)
571 {
572         const struct journal_key *l = _l;
573         const struct journal_key *r = _r;
574
575         return  cmp_int(r->level,       l->level) ?:
576                 cmp_int(l->journal_seq, r->journal_seq) ?:
577                 cmp_int(l->btree_id,    r->btree_id) ?:
578                 bkey_cmp(l->k->k.p,     r->k->k.p);
579 }
580
581 static int bch2_journal_replay(struct bch_fs *c,
582                                struct journal_keys keys)
583 {
584         struct journal *j = &c->journal;
585         struct journal_key *i;
586         u64 seq;
587         int ret;
588
589         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
590
591         if (keys.nr)
592                 replay_now_at(j, keys.journal_seq_base);
593
594         seq = j->replay_journal_seq;
595
596         /*
597          * First replay updates to the alloc btree - these will only update the
598          * btree key cache:
599          */
600         for_each_journal_key(keys, i) {
601                 cond_resched();
602
603                 if (!i->level && i->btree_id == BTREE_ID_alloc) {
604                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
605                         ret = bch2_alloc_replay_key(c, i->k);
606                         if (ret)
607                                 goto err;
608                 }
609         }
610
611         /*
612          * Next replay updates to interior btree nodes:
613          */
614         for_each_journal_key(keys, i) {
615                 cond_resched();
616
617                 if (i->level) {
618                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
619                         ret = bch2_journal_replay_key(c, i);
620                         if (ret)
621                                 goto err;
622                 }
623         }
624
625         /*
626          * Now that the btree is in a consistent state, we can start journal
627          * reclaim (which will be flushing entries from the btree key cache back
628          * to the btree:
629          */
630         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
631         set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
632         journal_reclaim_kick(j);
633
634         j->replay_journal_seq = seq;
635
636         /*
637          * Now replay leaf node updates:
638          */
639         for_each_journal_key(keys, i) {
640                 cond_resched();
641
642                 if (i->level || i->btree_id == BTREE_ID_alloc)
643                         continue;
644
645                 replay_now_at(j, keys.journal_seq_base + i->journal_seq);
646
647                 ret = bch2_journal_replay_key(c, i);
648                 if (ret)
649                         goto err;
650         }
651
652         replay_now_at(j, j->replay_journal_seq_end);
653         j->replay_journal_seq = 0;
654
655         bch2_journal_set_replay_done(j);
656         bch2_journal_flush_all_pins(j);
657         return bch2_journal_error(j);
658 err:
659         bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
660                 ret, bch2_btree_ids[i->btree_id], i->level);
661         return ret;
662 }
663
664 /* journal replay early: */
665
666 static int journal_replay_entry_early(struct bch_fs *c,
667                                       struct jset_entry *entry)
668 {
669         int ret = 0;
670
671         switch (entry->type) {
672         case BCH_JSET_ENTRY_btree_root: {
673                 struct btree_root *r;
674
675                 if (entry->btree_id >= BTREE_ID_NR) {
676                         bch_err(c, "filesystem has unknown btree type %u",
677                                 entry->btree_id);
678                         return -EINVAL;
679                 }
680
681                 r = &c->btree_roots[entry->btree_id];
682
683                 if (entry->u64s) {
684                         r->level = entry->level;
685                         bkey_copy(&r->key, &entry->start[0]);
686                         r->error = 0;
687                 } else {
688                         r->error = -EIO;
689                 }
690                 r->alive = true;
691                 break;
692         }
693         case BCH_JSET_ENTRY_usage: {
694                 struct jset_entry_usage *u =
695                         container_of(entry, struct jset_entry_usage, entry);
696
697                 switch (entry->btree_id) {
698                 case FS_USAGE_RESERVED:
699                         if (entry->level < BCH_REPLICAS_MAX)
700                                 c->usage_base->persistent_reserved[entry->level] =
701                                         le64_to_cpu(u->v);
702                         break;
703                 case FS_USAGE_INODES:
704                         c->usage_base->nr_inodes = le64_to_cpu(u->v);
705                         break;
706                 case FS_USAGE_KEY_VERSION:
707                         atomic64_set(&c->key_version,
708                                      le64_to_cpu(u->v));
709                         break;
710                 }
711
712                 break;
713         }
714         case BCH_JSET_ENTRY_data_usage: {
715                 struct jset_entry_data_usage *u =
716                         container_of(entry, struct jset_entry_data_usage, entry);
717
718                 ret = bch2_replicas_set_usage(c, &u->r,
719                                               le64_to_cpu(u->v));
720                 break;
721         }
722         case BCH_JSET_ENTRY_dev_usage: {
723                 struct jset_entry_dev_usage *u =
724                         container_of(entry, struct jset_entry_dev_usage, entry);
725                 struct bch_dev *ca = bch_dev_bkey_exists(c, u->dev);
726                 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
727                 unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) /
728                         sizeof(struct jset_entry_dev_usage_type);
729                 unsigned i;
730
731                 ca->usage_base->buckets_ec              = le64_to_cpu(u->buckets_ec);
732                 ca->usage_base->buckets_unavailable     = le64_to_cpu(u->buckets_unavailable);
733
734                 for (i = 0; i < nr_types; i++) {
735                         ca->usage_base->d[i].buckets    = le64_to_cpu(u->d[i].buckets);
736                         ca->usage_base->d[i].sectors    = le64_to_cpu(u->d[i].sectors);
737                         ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
738                 }
739
740                 break;
741         }
742         case BCH_JSET_ENTRY_blacklist: {
743                 struct jset_entry_blacklist *bl_entry =
744                         container_of(entry, struct jset_entry_blacklist, entry);
745
746                 ret = bch2_journal_seq_blacklist_add(c,
747                                 le64_to_cpu(bl_entry->seq),
748                                 le64_to_cpu(bl_entry->seq) + 1);
749                 break;
750         }
751         case BCH_JSET_ENTRY_blacklist_v2: {
752                 struct jset_entry_blacklist_v2 *bl_entry =
753                         container_of(entry, struct jset_entry_blacklist_v2, entry);
754
755                 ret = bch2_journal_seq_blacklist_add(c,
756                                 le64_to_cpu(bl_entry->start),
757                                 le64_to_cpu(bl_entry->end) + 1);
758                 break;
759         }
760         case BCH_JSET_ENTRY_clock: {
761                 struct jset_entry_clock *clock =
762                         container_of(entry, struct jset_entry_clock, entry);
763
764                 atomic64_set(&c->io_clock[clock->rw].now, clock->time);
765         }
766         }
767
768         return ret;
769 }
770
771 static int journal_replay_early(struct bch_fs *c,
772                                 struct bch_sb_field_clean *clean,
773                                 struct list_head *journal)
774 {
775         struct journal_replay *i;
776         struct jset_entry *entry;
777         int ret;
778
779         if (clean) {
780                 for (entry = clean->start;
781                      entry != vstruct_end(&clean->field);
782                      entry = vstruct_next(entry)) {
783                         ret = journal_replay_entry_early(c, entry);
784                         if (ret)
785                                 return ret;
786                 }
787         } else {
788                 list_for_each_entry(i, journal, list) {
789                         if (i->ignore)
790                                 continue;
791
792                         vstruct_for_each(&i->j, entry) {
793                                 ret = journal_replay_entry_early(c, entry);
794                                 if (ret)
795                                         return ret;
796                         }
797                 }
798         }
799
800         bch2_fs_usage_initialize(c);
801
802         return 0;
803 }
804
805 /* sb clean section: */
806
807 static struct bkey_i *btree_root_find(struct bch_fs *c,
808                                       struct bch_sb_field_clean *clean,
809                                       struct jset *j,
810                                       enum btree_id id, unsigned *level)
811 {
812         struct bkey_i *k;
813         struct jset_entry *entry, *start, *end;
814
815         if (clean) {
816                 start = clean->start;
817                 end = vstruct_end(&clean->field);
818         } else {
819                 start = j->start;
820                 end = vstruct_last(j);
821         }
822
823         for (entry = start; entry < end; entry = vstruct_next(entry))
824                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
825                     entry->btree_id == id)
826                         goto found;
827
828         return NULL;
829 found:
830         if (!entry->u64s)
831                 return ERR_PTR(-EINVAL);
832
833         k = entry->start;
834         *level = entry->level;
835         return k;
836 }
837
838 static int verify_superblock_clean(struct bch_fs *c,
839                                    struct bch_sb_field_clean **cleanp,
840                                    struct jset *j)
841 {
842         unsigned i;
843         struct bch_sb_field_clean *clean = *cleanp;
844         int ret = 0;
845
846         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
847                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
848                         le64_to_cpu(clean->journal_seq),
849                         le64_to_cpu(j->seq))) {
850                 kfree(clean);
851                 *cleanp = NULL;
852                 return 0;
853         }
854
855         for (i = 0; i < BTREE_ID_NR; i++) {
856                 char buf1[200], buf2[200];
857                 struct bkey_i *k1, *k2;
858                 unsigned l1 = 0, l2 = 0;
859
860                 k1 = btree_root_find(c, clean, NULL, i, &l1);
861                 k2 = btree_root_find(c, NULL, j, i, &l2);
862
863                 if (!k1 && !k2)
864                         continue;
865
866                 mustfix_fsck_err_on(!k1 || !k2 ||
867                                     IS_ERR(k1) ||
868                                     IS_ERR(k2) ||
869                                     k1->k.u64s != k2->k.u64s ||
870                                     memcmp(k1, k2, bkey_bytes(k1)) ||
871                                     l1 != l2, c,
872                         "superblock btree root %u doesn't match journal after clean shutdown\n"
873                         "sb:      l=%u %s\n"
874                         "journal: l=%u %s\n", i,
875                         l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
876                         l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
877         }
878 fsck_err:
879         return ret;
880 }
881
882 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
883 {
884         struct bch_sb_field_clean *clean, *sb_clean;
885         int ret;
886
887         mutex_lock(&c->sb_lock);
888         sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
889
890         if (fsck_err_on(!sb_clean, c,
891                         "superblock marked clean but clean section not present")) {
892                 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
893                 c->sb.clean = false;
894                 mutex_unlock(&c->sb_lock);
895                 return NULL;
896         }
897
898         clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
899                         GFP_KERNEL);
900         if (!clean) {
901                 mutex_unlock(&c->sb_lock);
902                 return ERR_PTR(-ENOMEM);
903         }
904
905         if (le16_to_cpu(c->disk_sb.sb->version) <
906             bcachefs_metadata_version_bkey_renumber)
907                 bch2_sb_clean_renumber(clean, READ);
908
909         mutex_unlock(&c->sb_lock);
910
911         return clean;
912 fsck_err:
913         mutex_unlock(&c->sb_lock);
914         return ERR_PTR(ret);
915 }
916
917 static int read_btree_roots(struct bch_fs *c)
918 {
919         unsigned i;
920         int ret = 0;
921
922         for (i = 0; i < BTREE_ID_NR; i++) {
923                 struct btree_root *r = &c->btree_roots[i];
924
925                 if (!r->alive)
926                         continue;
927
928                 if (i == BTREE_ID_alloc &&
929                     c->opts.reconstruct_alloc) {
930                         c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
931                         continue;
932                 }
933
934                 if (r->error) {
935                         __fsck_err(c, i == BTREE_ID_alloc
936                                    ? FSCK_CAN_IGNORE : 0,
937                                    "invalid btree root %s",
938                                    bch2_btree_ids[i]);
939                         if (i == BTREE_ID_alloc)
940                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
941                 }
942
943                 ret = bch2_btree_root_read(c, i, &r->key, r->level);
944                 if (ret) {
945                         __fsck_err(c, i == BTREE_ID_alloc
946                                    ? FSCK_CAN_IGNORE : 0,
947                                    "error reading btree root %s",
948                                    bch2_btree_ids[i]);
949                         if (i == BTREE_ID_alloc)
950                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
951                 }
952         }
953
954         for (i = 0; i < BTREE_ID_NR; i++)
955                 if (!c->btree_roots[i].b)
956                         bch2_btree_root_alloc(c, i);
957 fsck_err:
958         return ret;
959 }
960
961 int bch2_fs_recovery(struct bch_fs *c)
962 {
963         const char *err = "cannot allocate memory";
964         struct bch_sb_field_clean *clean = NULL;
965         struct jset *last_journal_entry = NULL;
966         u64 blacklist_seq, journal_seq;
967         bool write_sb = false;
968         int ret;
969
970         if (c->sb.clean)
971                 clean = read_superblock_clean(c);
972         ret = PTR_ERR_OR_ZERO(clean);
973         if (ret)
974                 goto err;
975
976         if (c->sb.clean)
977                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
978                          le64_to_cpu(clean->journal_seq));
979
980         if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
981                 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
982                 ret = -EINVAL;
983                 goto err;
984         }
985
986         if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
987                 bch_info(c, "alloc_v2 feature bit not set, fsck required");
988                 c->opts.fsck = true;
989                 c->opts.fix_errors = FSCK_OPT_YES;
990                 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_alloc_v2;
991         }
992
993         if (!c->replicas.entries ||
994             c->opts.rebuild_replicas) {
995                 bch_info(c, "building replicas info");
996                 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
997         }
998
999         ret = bch2_blacklist_table_initialize(c);
1000         if (ret) {
1001                 bch_err(c, "error initializing blacklist table");
1002                 goto err;
1003         }
1004
1005         if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1006                 struct journal_replay *i;
1007
1008                 ret = bch2_journal_read(c, &c->journal_entries,
1009                                         &blacklist_seq, &journal_seq);
1010                 if (ret)
1011                         goto err;
1012
1013                 list_for_each_entry_reverse(i, &c->journal_entries, list)
1014                         if (!i->ignore) {
1015                                 last_journal_entry = &i->j;
1016                                 break;
1017                         }
1018
1019                 if (mustfix_fsck_err_on(c->sb.clean &&
1020                                         last_journal_entry &&
1021                                         !journal_entry_empty(last_journal_entry), c,
1022                                 "filesystem marked clean but journal not empty")) {
1023                         c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
1024                         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1025                         c->sb.clean = false;
1026                 }
1027
1028                 if (!last_journal_entry) {
1029                         fsck_err_on(!c->sb.clean, c, "no journal entries found");
1030                         goto use_clean;
1031                 }
1032
1033                 c->journal_keys = journal_keys_sort(&c->journal_entries);
1034                 if (!c->journal_keys.d) {
1035                         ret = -ENOMEM;
1036                         goto err;
1037                 }
1038
1039                 if (c->sb.clean && last_journal_entry) {
1040                         ret = verify_superblock_clean(c, &clean,
1041                                                       last_journal_entry);
1042                         if (ret)
1043                                 goto err;
1044                 }
1045         } else {
1046 use_clean:
1047                 if (!clean) {
1048                         bch_err(c, "no superblock clean section found");
1049                         ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1050                         goto err;
1051
1052                 }
1053                 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1054         }
1055
1056         if (!c->sb.clean &&
1057             !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
1058                 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
1059                 ret = -EINVAL;
1060                 goto err;
1061         }
1062
1063         if (c->opts.reconstruct_alloc) {
1064                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
1065                 drop_alloc_keys(&c->journal_keys);
1066         }
1067
1068         ret = journal_replay_early(c, clean, &c->journal_entries);
1069         if (ret)
1070                 goto err;
1071
1072         /*
1073          * After an unclean shutdown, skip then next few journal sequence
1074          * numbers as they may have been referenced by btree writes that
1075          * happened before their corresponding journal writes - those btree
1076          * writes need to be ignored, by skipping and blacklisting the next few
1077          * journal sequence numbers:
1078          */
1079         if (!c->sb.clean)
1080                 journal_seq += 8;
1081
1082         if (blacklist_seq != journal_seq) {
1083                 ret = bch2_journal_seq_blacklist_add(c,
1084                                         blacklist_seq, journal_seq);
1085                 if (ret) {
1086                         bch_err(c, "error creating new journal seq blacklist entry");
1087                         goto err;
1088                 }
1089         }
1090
1091         ret = bch2_fs_journal_start(&c->journal, journal_seq,
1092                                     &c->journal_entries);
1093         if (ret)
1094                 goto err;
1095
1096         ret = read_btree_roots(c);
1097         if (ret)
1098                 goto err;
1099
1100         bch_verbose(c, "starting alloc read");
1101         err = "error reading allocation information";
1102         ret = bch2_alloc_read(c, &c->journal_keys);
1103         if (ret)
1104                 goto err;
1105         bch_verbose(c, "alloc read done");
1106
1107         bch_verbose(c, "starting stripes_read");
1108         err = "error reading stripes";
1109         ret = bch2_stripes_read(c, &c->journal_keys);
1110         if (ret)
1111                 goto err;
1112         bch_verbose(c, "stripes_read done");
1113
1114         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1115
1116         if (c->opts.fsck ||
1117             !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) ||
1118             !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA)) ||
1119             test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1120                 bch_info(c, "starting mark and sweep");
1121                 err = "error in mark and sweep";
1122                 ret = bch2_gc(c, true);
1123                 if (ret)
1124                         goto err;
1125                 bch_verbose(c, "mark and sweep done");
1126         }
1127
1128         bch2_stripes_heap_start(c);
1129
1130         clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1131         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1132
1133         /*
1134          * Skip past versions that might have possibly been used (as nonces),
1135          * but hadn't had their pointers written:
1136          */
1137         if (c->sb.encryption_type && !c->sb.clean)
1138                 atomic64_add(1 << 16, &c->key_version);
1139
1140         if (c->opts.norecovery)
1141                 goto out;
1142
1143         bch_verbose(c, "starting journal replay");
1144         err = "journal replay failed";
1145         ret = bch2_journal_replay(c, c->journal_keys);
1146         if (ret)
1147                 goto err;
1148         bch_verbose(c, "journal replay done");
1149
1150         if (test_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags) &&
1151             !c->opts.nochanges) {
1152                 /*
1153                  * note that even when filesystem was clean there might be work
1154                  * to do here, if we ran gc (because of fsck) which recalculated
1155                  * oldest_gen:
1156                  */
1157                 bch_verbose(c, "writing allocation info");
1158                 err = "error writing out alloc info";
1159                 ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW) ?:
1160                         bch2_alloc_write(c, BTREE_INSERT_LAZY_RW);
1161                 if (ret) {
1162                         bch_err(c, "error writing alloc info");
1163                         goto err;
1164                 }
1165                 bch_verbose(c, "alloc write done");
1166         }
1167
1168         if (!c->sb.clean) {
1169                 if (!(c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) {
1170                         bch_info(c, "checking inode link counts");
1171                         err = "error in recovery";
1172                         ret = bch2_fsck_inode_nlink(c);
1173                         if (ret)
1174                                 goto err;
1175                         bch_verbose(c, "check inodes done");
1176
1177                 } else {
1178                         bch_verbose(c, "checking for deleted inodes");
1179                         err = "error in recovery";
1180                         ret = bch2_fsck_walk_inodes_only(c);
1181                         if (ret)
1182                                 goto err;
1183                         bch_verbose(c, "check inodes done");
1184                 }
1185         }
1186
1187         if (c->opts.fsck) {
1188                 bch_info(c, "starting fsck");
1189                 err = "error in fsck";
1190                 ret = bch2_fsck_full(c);
1191                 if (ret)
1192                         goto err;
1193                 bch_verbose(c, "fsck done");
1194         }
1195
1196         if (enabled_qtypes(c)) {
1197                 bch_verbose(c, "reading quotas");
1198                 ret = bch2_fs_quota_read(c);
1199                 if (ret)
1200                         goto err;
1201                 bch_verbose(c, "quotas done");
1202         }
1203
1204         mutex_lock(&c->sb_lock);
1205         if (c->opts.version_upgrade) {
1206                 if (c->sb.version < bcachefs_metadata_version_new_versioning)
1207                         c->disk_sb.sb->version_min =
1208                                 le16_to_cpu(bcachefs_metadata_version_min);
1209                 c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
1210                 c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
1211                 write_sb = true;
1212         }
1213
1214         if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1215                 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
1216                 write_sb = true;
1217         }
1218
1219         if (c->opts.fsck &&
1220             !test_bit(BCH_FS_ERROR, &c->flags)) {
1221                 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
1222                 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1223                 write_sb = true;
1224         }
1225
1226         if (write_sb)
1227                 bch2_write_super(c);
1228         mutex_unlock(&c->sb_lock);
1229
1230         if (c->journal_seq_blacklist_table &&
1231             c->journal_seq_blacklist_table->nr > 128)
1232                 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1233 out:
1234         ret = 0;
1235 err:
1236 fsck_err:
1237         set_bit(BCH_FS_FSCK_DONE, &c->flags);
1238         bch2_flush_fsck_errs(c);
1239
1240         if (!c->opts.keep_journal) {
1241                 bch2_journal_keys_free(&c->journal_keys);
1242                 bch2_journal_entries_free(&c->journal_entries);
1243         }
1244         kfree(clean);
1245         if (ret)
1246                 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1247         else
1248                 bch_verbose(c, "ret %i", ret);
1249         return ret;
1250 }
1251
1252 int bch2_fs_initialize(struct bch_fs *c)
1253 {
1254         struct bch_inode_unpacked root_inode, lostfound_inode;
1255         struct bkey_inode_buf packed_inode;
1256         struct qstr lostfound = QSTR("lost+found");
1257         const char *err = "cannot allocate memory";
1258         struct bch_dev *ca;
1259         LIST_HEAD(journal);
1260         unsigned i;
1261         int ret;
1262
1263         bch_notice(c, "initializing new filesystem");
1264
1265         mutex_lock(&c->sb_lock);
1266         for_each_online_member(ca, c, i)
1267                 bch2_mark_dev_superblock(c, ca, 0);
1268         mutex_unlock(&c->sb_lock);
1269
1270         mutex_lock(&c->sb_lock);
1271         c->disk_sb.sb->version = c->disk_sb.sb->version_min =
1272                 le16_to_cpu(bcachefs_metadata_version_current);
1273         c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
1274         c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
1275
1276         bch2_write_super(c);
1277         mutex_unlock(&c->sb_lock);
1278
1279         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1280         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1281
1282         for (i = 0; i < BTREE_ID_NR; i++)
1283                 bch2_btree_root_alloc(c, i);
1284
1285         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
1286         set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
1287
1288         err = "unable to allocate journal buckets";
1289         for_each_online_member(ca, c, i) {
1290                 ret = bch2_dev_journal_alloc(ca);
1291                 if (ret) {
1292                         percpu_ref_put(&ca->io_ref);
1293                         goto err;
1294                 }
1295         }
1296
1297         /*
1298          * journal_res_get() will crash if called before this has
1299          * set up the journal.pin FIFO and journal.cur pointer:
1300          */
1301         bch2_fs_journal_start(&c->journal, 1, &journal);
1302         bch2_journal_set_replay_done(&c->journal);
1303
1304         err = "error going read-write";
1305         ret = bch2_fs_read_write_early(c);
1306         if (ret)
1307                 goto err;
1308
1309         /*
1310          * Write out the superblock and journal buckets, now that we can do
1311          * btree updates
1312          */
1313         err = "error writing alloc info";
1314         ret = bch2_alloc_write(c, 0);
1315         if (ret)
1316                 goto err;
1317
1318         bch2_inode_init(c, &root_inode, 0, 0,
1319                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1320         root_inode.bi_inum = BCACHEFS_ROOT_INO;
1321         bch2_inode_pack(c, &packed_inode, &root_inode);
1322
1323         err = "error creating root directory";
1324         ret = bch2_btree_insert(c, BTREE_ID_inodes,
1325                                 &packed_inode.inode.k_i,
1326                                 NULL, NULL, 0);
1327         if (ret)
1328                 goto err;
1329
1330         bch2_inode_init_early(c, &lostfound_inode);
1331
1332         err = "error creating lost+found";
1333         ret = bch2_trans_do(c, NULL, NULL, 0,
1334                 bch2_create_trans(&trans, BCACHEFS_ROOT_INO,
1335                                   &root_inode, &lostfound_inode,
1336                                   &lostfound,
1337                                   0, 0, S_IFDIR|0700, 0,
1338                                   NULL, NULL));
1339         if (ret)
1340                 goto err;
1341
1342         if (enabled_qtypes(c)) {
1343                 ret = bch2_fs_quota_read(c);
1344                 if (ret)
1345                         goto err;
1346         }
1347
1348         err = "error writing first journal entry";
1349         ret = bch2_journal_meta(&c->journal);
1350         if (ret)
1351                 goto err;
1352
1353         mutex_lock(&c->sb_lock);
1354         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1355         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1356
1357         bch2_write_super(c);
1358         mutex_unlock(&c->sb_lock);
1359
1360         return 0;
1361 err:
1362         pr_err("Error initializing new filesystem: %s (%i)", err, ret);
1363         return ret;
1364 }