]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/checksum.c
7c2af6754aeaaf603970cf719270c1a2032bfe1d
[bcachefs-tools-debian] / libbcachefs / checksum.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "checksum.h"
4 #include "super.h"
5 #include "super-io.h"
6
7 #include <linux/crc32c.h>
8 #include <linux/crypto.h>
9 #include <linux/xxhash.h>
10 #include <linux/key.h>
11 #include <linux/random.h>
12 #include <linux/scatterlist.h>
13 #include <crypto/algapi.h>
14 #include <crypto/chacha.h>
15 #include <crypto/hash.h>
16 #include <crypto/poly1305.h>
17 #include <crypto/skcipher.h>
18 #include <keys/user-type.h>
19
20 /*
21  * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
22  * it features page merging without having the checksum algorithm lose its state.
23  * for native checksum aglorithms (like crc), a default seed value will do.
24  * for hash-like algorithms, a state needs to be stored
25  */
26
27 struct bch2_checksum_state {
28         union {
29                 u64 seed;
30                 struct xxh64_state h64state;
31         };
32         unsigned int type;
33 };
34
35 static void bch2_checksum_init(struct bch2_checksum_state *state)
36 {
37         switch (state->type) {
38         case BCH_CSUM_none:
39         case BCH_CSUM_crc32c:
40         case BCH_CSUM_crc64:
41                 state->seed = 0;
42                 break;
43         case BCH_CSUM_crc32c_nonzero:
44                 state->seed = U32_MAX;
45                 break;
46         case BCH_CSUM_crc64_nonzero:
47                 state->seed = U64_MAX;
48                 break;
49         case BCH_CSUM_xxhash:
50                 xxh64_reset(&state->h64state, 0);
51                 break;
52         default:
53                 BUG();
54         }
55 }
56
57 static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
58 {
59         switch (state->type) {
60         case BCH_CSUM_none:
61         case BCH_CSUM_crc32c:
62         case BCH_CSUM_crc64:
63                 return state->seed;
64         case BCH_CSUM_crc32c_nonzero:
65                 return state->seed ^ U32_MAX;
66         case BCH_CSUM_crc64_nonzero:
67                 return state->seed ^ U64_MAX;
68         case BCH_CSUM_xxhash:
69                 return xxh64_digest(&state->h64state);
70         default:
71                 BUG();
72         }
73 }
74
75 static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
76 {
77         switch (state->type) {
78         case BCH_CSUM_none:
79                 return;
80         case BCH_CSUM_crc32c_nonzero:
81         case BCH_CSUM_crc32c:
82                 state->seed = crc32c(state->seed, data, len);
83                 break;
84         case BCH_CSUM_crc64_nonzero:
85         case BCH_CSUM_crc64:
86                 state->seed = crc64_be(state->seed, data, len);
87                 break;
88         case BCH_CSUM_xxhash:
89                 xxh64_update(&state->h64state, data, len);
90                 break;
91         default:
92                 BUG();
93         }
94 }
95
96 static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
97                                 struct nonce nonce,
98                                 struct scatterlist *sg, size_t len)
99 {
100         SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
101         int ret;
102
103         skcipher_request_set_sync_tfm(req, tfm);
104         skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
105
106         ret = crypto_skcipher_encrypt(req);
107         if (ret)
108                 pr_err("got error %i from crypto_skcipher_encrypt()", ret);
109
110         return ret;
111 }
112
113 static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
114                               struct nonce nonce,
115                               void *buf, size_t len)
116 {
117         if (!is_vmalloc_addr(buf)) {
118                 struct scatterlist sg;
119
120                 sg_init_table(&sg, 1);
121                 sg_set_page(&sg,
122                             is_vmalloc_addr(buf)
123                             ? vmalloc_to_page(buf)
124                             : virt_to_page(buf),
125                             len, offset_in_page(buf));
126                 return do_encrypt_sg(tfm, nonce, &sg, len);
127         } else {
128                 unsigned pages = buf_pages(buf, len);
129                 struct scatterlist *sg;
130                 size_t orig_len = len;
131                 int ret, i;
132
133                 sg = kmalloc_array(sizeof(*sg), pages, GFP_KERNEL);
134                 if (!sg)
135                         return -ENOMEM;
136
137                 sg_init_table(sg, pages);
138
139                 for (i = 0; i < pages; i++) {
140                         unsigned offset = offset_in_page(buf);
141                         unsigned pg_len = min(len, PAGE_SIZE - offset);
142
143                         sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
144                         buf += pg_len;
145                         len -= pg_len;
146                 }
147
148                 ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
149                 kfree(sg);
150                 return ret;
151         }
152 }
153
154 int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
155                             void *buf, size_t len)
156 {
157         struct crypto_sync_skcipher *chacha20 =
158                 crypto_alloc_sync_skcipher("chacha20", 0, 0);
159         int ret;
160
161         if (!chacha20) {
162                 pr_err("error requesting chacha20 module: %li", PTR_ERR(chacha20));
163                 return PTR_ERR(chacha20);
164         }
165
166         ret = crypto_skcipher_setkey(&chacha20->base,
167                                      (void *) key, sizeof(*key));
168         if (ret) {
169                 pr_err("crypto_skcipher_setkey() error: %i", ret);
170                 goto err;
171         }
172
173         ret = do_encrypt(chacha20, nonce, buf, len);
174 err:
175         crypto_free_sync_skcipher(chacha20);
176         return ret;
177 }
178
179 static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
180                         struct nonce nonce)
181 {
182         u8 key[POLY1305_KEY_SIZE];
183         int ret;
184
185         nonce.d[3] ^= BCH_NONCE_POLY;
186
187         memset(key, 0, sizeof(key));
188         ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
189         if (ret)
190                 return ret;
191
192         desc->tfm = c->poly1305;
193         crypto_shash_init(desc);
194         crypto_shash_update(desc, key, sizeof(key));
195         return 0;
196 }
197
198 struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
199                               struct nonce nonce, const void *data, size_t len)
200 {
201         switch (type) {
202         case BCH_CSUM_none:
203         case BCH_CSUM_crc32c_nonzero:
204         case BCH_CSUM_crc64_nonzero:
205         case BCH_CSUM_crc32c:
206         case BCH_CSUM_xxhash:
207         case BCH_CSUM_crc64: {
208                 struct bch2_checksum_state state;
209
210                 state.type = type;
211
212                 bch2_checksum_init(&state);
213                 bch2_checksum_update(&state, data, len);
214
215                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
216         }
217
218         case BCH_CSUM_chacha20_poly1305_80:
219         case BCH_CSUM_chacha20_poly1305_128: {
220                 SHASH_DESC_ON_STACK(desc, c->poly1305);
221                 u8 digest[POLY1305_DIGEST_SIZE];
222                 struct bch_csum ret = { 0 };
223
224                 gen_poly_key(c, desc, nonce);
225
226                 crypto_shash_update(desc, data, len);
227                 crypto_shash_final(desc, digest);
228
229                 memcpy(&ret, digest, bch_crc_bytes[type]);
230                 return ret;
231         }
232         default:
233                 BUG();
234         }
235 }
236
237 int bch2_encrypt(struct bch_fs *c, unsigned type,
238                   struct nonce nonce, void *data, size_t len)
239 {
240         if (!bch2_csum_type_is_encryption(type))
241                 return 0;
242
243         return do_encrypt(c->chacha20, nonce, data, len);
244 }
245
246 static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
247                                            struct nonce nonce, struct bio *bio,
248                                            struct bvec_iter *iter)
249 {
250         struct bio_vec bv;
251
252         switch (type) {
253         case BCH_CSUM_none:
254                 return (struct bch_csum) { 0 };
255         case BCH_CSUM_crc32c_nonzero:
256         case BCH_CSUM_crc64_nonzero:
257         case BCH_CSUM_crc32c:
258         case BCH_CSUM_xxhash:
259         case BCH_CSUM_crc64: {
260                 struct bch2_checksum_state state;
261
262                 state.type = type;
263                 bch2_checksum_init(&state);
264
265 #ifdef CONFIG_HIGHMEM
266                 __bio_for_each_segment(bv, bio, *iter, *iter) {
267                         void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
268                         bch2_checksum_update(&state, p, bv.bv_len);
269                         kunmap_atomic(p);
270                 }
271 #else
272                 __bio_for_each_bvec(bv, bio, *iter, *iter)
273                         bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
274                                 bv.bv_len);
275 #endif
276                 return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
277         }
278
279         case BCH_CSUM_chacha20_poly1305_80:
280         case BCH_CSUM_chacha20_poly1305_128: {
281                 SHASH_DESC_ON_STACK(desc, c->poly1305);
282                 u8 digest[POLY1305_DIGEST_SIZE];
283                 struct bch_csum ret = { 0 };
284
285                 gen_poly_key(c, desc, nonce);
286
287 #ifdef CONFIG_HIGHMEM
288                 __bio_for_each_segment(bv, bio, *iter, *iter) {
289                         void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
290
291                         crypto_shash_update(desc, p, bv.bv_len);
292                         kunmap_atomic(p);
293                 }
294 #else
295                 __bio_for_each_bvec(bv, bio, *iter, *iter)
296                         crypto_shash_update(desc,
297                                 page_address(bv.bv_page) + bv.bv_offset,
298                                 bv.bv_len);
299 #endif
300                 crypto_shash_final(desc, digest);
301
302                 memcpy(&ret, digest, bch_crc_bytes[type]);
303                 return ret;
304         }
305         default:
306                 BUG();
307         }
308 }
309
310 struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
311                                   struct nonce nonce, struct bio *bio)
312 {
313         struct bvec_iter iter = bio->bi_iter;
314
315         return __bch2_checksum_bio(c, type, nonce, bio, &iter);
316 }
317
318 int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
319                      struct nonce nonce, struct bio *bio)
320 {
321         struct bio_vec bv;
322         struct bvec_iter iter;
323         struct scatterlist sgl[16], *sg = sgl;
324         size_t bytes = 0;
325         int ret = 0;
326
327         if (!bch2_csum_type_is_encryption(type))
328                 return 0;
329
330         sg_init_table(sgl, ARRAY_SIZE(sgl));
331
332         bio_for_each_segment(bv, bio, iter) {
333                 if (sg == sgl + ARRAY_SIZE(sgl)) {
334                         sg_mark_end(sg - 1);
335
336                         ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
337                         if (ret)
338                                 return ret;
339
340                         nonce = nonce_add(nonce, bytes);
341                         bytes = 0;
342
343                         sg_init_table(sgl, ARRAY_SIZE(sgl));
344                         sg = sgl;
345                 }
346
347                 sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
348                 bytes += bv.bv_len;
349         }
350
351         sg_mark_end(sg - 1);
352         return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
353 }
354
355 struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
356                                     struct bch_csum b, size_t b_len)
357 {
358         struct bch2_checksum_state state;
359
360         state.type = type;
361         bch2_checksum_init(&state);
362         state.seed = a.lo;
363
364         BUG_ON(!bch2_checksum_mergeable(type));
365
366         while (b_len) {
367                 unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
368
369                 bch2_checksum_update(&state,
370                                 page_address(ZERO_PAGE(0)), b);
371                 b_len -= b;
372         }
373         a.lo = bch2_checksum_final(&state);
374         a.lo ^= b.lo;
375         a.hi ^= b.hi;
376         return a;
377 }
378
379 int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
380                         struct bversion version,
381                         struct bch_extent_crc_unpacked crc_old,
382                         struct bch_extent_crc_unpacked *crc_a,
383                         struct bch_extent_crc_unpacked *crc_b,
384                         unsigned len_a, unsigned len_b,
385                         unsigned new_csum_type)
386 {
387         struct bvec_iter iter = bio->bi_iter;
388         struct nonce nonce = extent_nonce(version, crc_old);
389         struct bch_csum merged = { 0 };
390         struct crc_split {
391                 struct bch_extent_crc_unpacked  *crc;
392                 unsigned                        len;
393                 unsigned                        csum_type;
394                 struct bch_csum                 csum;
395         } splits[3] = {
396                 { crc_a, len_a, new_csum_type },
397                 { crc_b, len_b, new_csum_type },
398                 { NULL,  bio_sectors(bio) - len_a - len_b, new_csum_type },
399         }, *i;
400         bool mergeable = crc_old.csum_type == new_csum_type &&
401                 bch2_checksum_mergeable(new_csum_type);
402         unsigned crc_nonce = crc_old.nonce;
403
404         BUG_ON(len_a + len_b > bio_sectors(bio));
405         BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
406         BUG_ON(crc_is_compressed(crc_old));
407         BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
408                bch2_csum_type_is_encryption(new_csum_type));
409
410         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
411                 iter.bi_size = i->len << 9;
412                 if (mergeable || i->crc)
413                         i->csum = __bch2_checksum_bio(c, i->csum_type,
414                                                       nonce, bio, &iter);
415                 else
416                         bio_advance_iter(bio, &iter, i->len << 9);
417                 nonce = nonce_add(nonce, i->len << 9);
418         }
419
420         if (mergeable)
421                 for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
422                         merged = bch2_checksum_merge(new_csum_type, merged,
423                                                      i->csum, i->len << 9);
424         else
425                 merged = bch2_checksum_bio(c, crc_old.csum_type,
426                                 extent_nonce(version, crc_old), bio);
427
428         if (bch2_crc_cmp(merged, crc_old.csum)) {
429                 bch_err(c, "checksum error in bch2_rechecksum_bio() (memory corruption or bug?)\n"
430                         "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
431                         crc_old.csum.hi,
432                         crc_old.csum.lo,
433                         merged.hi,
434                         merged.lo,
435                         bch2_csum_types[crc_old.csum_type],
436                         bch2_csum_types[new_csum_type]);
437                 return -EIO;
438         }
439
440         for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
441                 if (i->crc)
442                         *i->crc = (struct bch_extent_crc_unpacked) {
443                                 .csum_type              = i->csum_type,
444                                 .compression_type       = crc_old.compression_type,
445                                 .compressed_size        = i->len,
446                                 .uncompressed_size      = i->len,
447                                 .offset                 = 0,
448                                 .live_size              = i->len,
449                                 .nonce                  = crc_nonce,
450                                 .csum                   = i->csum,
451                         };
452
453                 if (bch2_csum_type_is_encryption(new_csum_type))
454                         crc_nonce += i->len;
455         }
456
457         return 0;
458 }
459
460 #ifdef __KERNEL__
461 static int __bch2_request_key(char *key_description, struct bch_key *key)
462 {
463         struct key *keyring_key;
464         const struct user_key_payload *ukp;
465         int ret;
466
467         keyring_key = request_key(&key_type_user, key_description, NULL);
468         if (IS_ERR(keyring_key))
469                 return PTR_ERR(keyring_key);
470
471         down_read(&keyring_key->sem);
472         ukp = dereference_key_locked(keyring_key);
473         if (ukp->datalen == sizeof(*key)) {
474                 memcpy(key, ukp->data, ukp->datalen);
475                 ret = 0;
476         } else {
477                 ret = -EINVAL;
478         }
479         up_read(&keyring_key->sem);
480         key_put(keyring_key);
481
482         return ret;
483 }
484 #else
485 #include <keyutils.h>
486
487 static int __bch2_request_key(char *key_description, struct bch_key *key)
488 {
489         key_serial_t key_id;
490
491         key_id = request_key("user", key_description, NULL,
492                              KEY_SPEC_USER_KEYRING);
493         if (key_id < 0)
494                 return -errno;
495
496         if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
497                 return -1;
498
499         return 0;
500 }
501 #endif
502
503 int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
504 {
505         struct printbuf key_description = PRINTBUF;
506         int ret;
507
508         prt_printf(&key_description, "bcachefs:");
509         pr_uuid(&key_description, sb->user_uuid.b);
510
511         ret = __bch2_request_key(key_description.buf, key);
512         printbuf_exit(&key_description);
513         return ret;
514 }
515
516 int bch2_decrypt_sb_key(struct bch_fs *c,
517                         struct bch_sb_field_crypt *crypt,
518                         struct bch_key *key)
519 {
520         struct bch_encrypted_key sb_key = crypt->key;
521         struct bch_key user_key;
522         int ret = 0;
523
524         /* is key encrypted? */
525         if (!bch2_key_is_encrypted(&sb_key))
526                 goto out;
527
528         ret = bch2_request_key(c->disk_sb.sb, &user_key);
529         if (ret) {
530                 bch_err(c, "error requesting encryption key: %i", ret);
531                 goto err;
532         }
533
534         /* decrypt real key: */
535         ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
536                              &sb_key, sizeof(sb_key));
537         if (ret)
538                 goto err;
539
540         if (bch2_key_is_encrypted(&sb_key)) {
541                 bch_err(c, "incorrect encryption key");
542                 ret = -EINVAL;
543                 goto err;
544         }
545 out:
546         *key = sb_key.key;
547 err:
548         memzero_explicit(&sb_key, sizeof(sb_key));
549         memzero_explicit(&user_key, sizeof(user_key));
550         return ret;
551 }
552
553 static int bch2_alloc_ciphers(struct bch_fs *c)
554 {
555         if (!c->chacha20)
556                 c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
557         if (IS_ERR(c->chacha20)) {
558                 bch_err(c, "error requesting chacha20 module: %li",
559                         PTR_ERR(c->chacha20));
560                 return PTR_ERR(c->chacha20);
561         }
562
563         if (!c->poly1305)
564                 c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
565         if (IS_ERR(c->poly1305)) {
566                 bch_err(c, "error requesting poly1305 module: %li",
567                         PTR_ERR(c->poly1305));
568                 return PTR_ERR(c->poly1305);
569         }
570
571         return 0;
572 }
573
574 int bch2_disable_encryption(struct bch_fs *c)
575 {
576         struct bch_sb_field_crypt *crypt;
577         struct bch_key key;
578         int ret = -EINVAL;
579
580         mutex_lock(&c->sb_lock);
581
582         crypt = bch2_sb_get_crypt(c->disk_sb.sb);
583         if (!crypt)
584                 goto out;
585
586         /* is key encrypted? */
587         ret = 0;
588         if (bch2_key_is_encrypted(&crypt->key))
589                 goto out;
590
591         ret = bch2_decrypt_sb_key(c, crypt, &key);
592         if (ret)
593                 goto out;
594
595         crypt->key.magic        = BCH_KEY_MAGIC;
596         crypt->key.key          = key;
597
598         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
599         bch2_write_super(c);
600 out:
601         mutex_unlock(&c->sb_lock);
602
603         return ret;
604 }
605
606 int bch2_enable_encryption(struct bch_fs *c, bool keyed)
607 {
608         struct bch_encrypted_key key;
609         struct bch_key user_key;
610         struct bch_sb_field_crypt *crypt;
611         int ret = -EINVAL;
612
613         mutex_lock(&c->sb_lock);
614
615         /* Do we already have an encryption key? */
616         if (bch2_sb_get_crypt(c->disk_sb.sb))
617                 goto err;
618
619         ret = bch2_alloc_ciphers(c);
620         if (ret)
621                 goto err;
622
623         key.magic = BCH_KEY_MAGIC;
624         get_random_bytes(&key.key, sizeof(key.key));
625
626         if (keyed) {
627                 ret = bch2_request_key(c->disk_sb.sb, &user_key);
628                 if (ret) {
629                         bch_err(c, "error requesting encryption key: %i", ret);
630                         goto err;
631                 }
632
633                 ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
634                                               &key, sizeof(key));
635                 if (ret)
636                         goto err;
637         }
638
639         ret = crypto_skcipher_setkey(&c->chacha20->base,
640                         (void *) &key.key, sizeof(key.key));
641         if (ret)
642                 goto err;
643
644         crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
645         if (!crypt) {
646                 ret = -ENOMEM; /* XXX this technically could be -ENOSPC */
647                 goto err;
648         }
649
650         crypt->key = key;
651
652         /* write superblock */
653         SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
654         bch2_write_super(c);
655 err:
656         mutex_unlock(&c->sb_lock);
657         memzero_explicit(&user_key, sizeof(user_key));
658         memzero_explicit(&key, sizeof(key));
659         return ret;
660 }
661
662 void bch2_fs_encryption_exit(struct bch_fs *c)
663 {
664         if (!IS_ERR_OR_NULL(c->poly1305))
665                 crypto_free_shash(c->poly1305);
666         if (!IS_ERR_OR_NULL(c->chacha20))
667                 crypto_free_sync_skcipher(c->chacha20);
668         if (!IS_ERR_OR_NULL(c->sha256))
669                 crypto_free_shash(c->sha256);
670 }
671
672 int bch2_fs_encryption_init(struct bch_fs *c)
673 {
674         struct bch_sb_field_crypt *crypt;
675         struct bch_key key;
676         int ret = 0;
677
678         pr_verbose_init(c->opts, "");
679
680         c->sha256 = crypto_alloc_shash("sha256", 0, 0);
681         if (IS_ERR(c->sha256)) {
682                 bch_err(c, "error requesting sha256 module");
683                 ret = PTR_ERR(c->sha256);
684                 goto out;
685         }
686
687         crypt = bch2_sb_get_crypt(c->disk_sb.sb);
688         if (!crypt)
689                 goto out;
690
691         ret = bch2_alloc_ciphers(c);
692         if (ret)
693                 goto out;
694
695         ret = bch2_decrypt_sb_key(c, crypt, &key);
696         if (ret)
697                 goto out;
698
699         ret = crypto_skcipher_setkey(&c->chacha20->base,
700                         (void *) &key.key, sizeof(key.key));
701         if (ret)
702                 goto out;
703 out:
704         memzero_explicit(&key, sizeof(key));
705         pr_verbose_init(c->opts, "ret %i", ret);
706         return ret;
707 }