1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
13 #include "btree_iter.h"
17 #include "disk_groups.h"
27 #include <trace/events/bcachefs.h>
29 static union bch_extent_entry *__bch2_bkey_drop_ptr(struct bkey_s, struct bch_extent_ptr *);
31 static unsigned bch2_crc_field_size_max[] = {
32 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
33 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
34 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
37 static void bch2_extent_crc_pack(union bch_extent_crc *,
38 struct bch_extent_crc_unpacked,
39 enum bch_extent_entry_type);
41 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
44 struct bch_dev_io_failures *i;
46 for (i = f->devs; i < f->devs + f->nr; i++)
53 void bch2_mark_io_failure(struct bch_io_failures *failed,
54 struct extent_ptr_decoded *p)
56 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
59 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
61 f = &failed->devs[failed->nr++];
66 } else if (p->idx != f->idx) {
76 * returns true if p1 is better than p2:
78 static inline bool ptr_better(struct bch_fs *c,
79 const struct extent_ptr_decoded p1,
80 const struct extent_ptr_decoded p2)
82 if (likely(!p1.idx && !p2.idx)) {
83 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
84 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
86 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
87 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
89 /* Pick at random, biased in favor of the faster device: */
91 return bch2_rand_range(l1 + l2) > l1;
94 if (bch2_force_reconstruct_read)
95 return p1.idx > p2.idx;
97 return p1.idx < p2.idx;
101 * This picks a non-stale pointer, preferably from a device other than @avoid.
102 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
103 * other devices, it will still pick a pointer from avoid.
105 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
106 struct bch_io_failures *failed,
107 struct extent_ptr_decoded *pick)
109 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
110 const union bch_extent_entry *entry;
111 struct extent_ptr_decoded p;
112 struct bch_dev_io_failures *f;
116 if (k.k->type == KEY_TYPE_error)
119 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
121 * Unwritten extent: no need to actually read, treat it as a
122 * hole and return 0s:
127 ca = bch_dev_bkey_exists(c, p.ptr.dev);
130 * If there are any dirty pointers it's an error if we can't
133 if (!ret && !p.ptr.cached)
136 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
139 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
141 p.idx = f->nr_failed < f->nr_retries
146 !bch2_dev_is_readable(ca))
149 if (bch2_force_reconstruct_read &&
153 if (p.idx >= (unsigned) p.has_ec + 1)
156 if (ret > 0 && !ptr_better(c, p, *pick))
166 /* KEY_TYPE_btree_ptr: */
168 int bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k,
169 unsigned flags, struct printbuf *err)
171 if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX) {
172 prt_printf(err, "value too big (%zu > %u)",
173 bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
174 return -BCH_ERR_invalid_bkey;
177 return bch2_bkey_ptrs_invalid(c, k, flags, err);
180 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
183 bch2_bkey_ptrs_to_text(out, c, k);
186 int bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
187 unsigned flags, struct printbuf *err)
189 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
191 if (bkey_val_bytes(k.k) <= sizeof(*bp.v)) {
192 prt_printf(err, "value too small (%zu <= %zu)",
193 bkey_val_bytes(k.k), sizeof(*bp.v));
194 return -BCH_ERR_invalid_bkey;
197 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) {
198 prt_printf(err, "value too big (%zu > %zu)",
199 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
200 return -BCH_ERR_invalid_bkey;
203 if (c->sb.version < bcachefs_metadata_version_snapshot &&
204 bp.v->min_key.snapshot) {
205 prt_printf(err, "invalid min_key.snapshot (%u != 0)",
206 bp.v->min_key.snapshot);
207 return -BCH_ERR_invalid_bkey;
210 return bch2_bkey_ptrs_invalid(c, k, flags, err);
213 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
216 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
218 prt_printf(out, "seq %llx written %u min_key %s",
219 le64_to_cpu(bp.v->seq),
220 le16_to_cpu(bp.v->sectors_written),
221 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
223 bch2_bpos_to_text(out, bp.v->min_key);
224 prt_printf(out, " ");
225 bch2_bkey_ptrs_to_text(out, c, k);
228 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
229 unsigned big_endian, int write,
232 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
234 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
236 if (version < bcachefs_metadata_version_inode_btree_change &&
237 btree_node_type_is_extents(btree_id) &&
238 !bkey_eq(bp.v->min_key, POS_MIN))
239 bp.v->min_key = write
240 ? bpos_nosnap_predecessor(bp.v->min_key)
241 : bpos_nosnap_successor(bp.v->min_key);
244 /* KEY_TYPE_extent: */
246 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
248 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
249 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
250 union bch_extent_entry *en_l;
251 const union bch_extent_entry *en_r;
252 struct extent_ptr_decoded lp, rp;
258 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
259 if (extent_entry_type(en_l) != extent_entry_type(en_r))
262 en_l = extent_entry_next(en_l);
263 en_r = extent_entry_next(en_r);
266 if (en_l < l_ptrs.end || en_r < r_ptrs.end)
271 lp.crc = bch2_extent_crc_unpack(l.k, NULL);
272 rp.crc = bch2_extent_crc_unpack(r.k, NULL);
274 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
275 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
276 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
277 rp.ptr.offset + rp.crc.offset ||
278 lp.ptr.dev != rp.ptr.dev ||
279 lp.ptr.gen != rp.ptr.gen ||
280 lp.ptr.unwritten != rp.ptr.unwritten ||
281 lp.has_ec != rp.has_ec)
284 /* Extents may not straddle buckets: */
285 ca = bch_dev_bkey_exists(c, lp.ptr.dev);
286 if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
289 if (lp.has_ec != rp.has_ec ||
291 (lp.ec.block != rp.ec.block ||
292 lp.ec.redundancy != rp.ec.redundancy ||
293 lp.ec.idx != rp.ec.idx)))
296 if (lp.crc.compression_type != rp.crc.compression_type ||
297 lp.crc.nonce != rp.crc.nonce)
300 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
301 lp.crc.uncompressed_size) {
302 /* can use left extent's crc entry */
303 } else if (lp.crc.live_size <= rp.crc.offset) {
304 /* can use right extent's crc entry */
306 /* check if checksums can be merged: */
307 if (lp.crc.csum_type != rp.crc.csum_type ||
308 lp.crc.nonce != rp.crc.nonce ||
309 crc_is_compressed(lp.crc) ||
310 !bch2_checksum_mergeable(lp.crc.csum_type))
313 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
317 if (lp.crc.csum_type &&
318 lp.crc.uncompressed_size +
319 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
323 en_l = extent_entry_next(en_l);
324 en_r = extent_entry_next(en_r);
329 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
330 if (extent_entry_is_crc(en_l)) {
331 struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
332 struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
334 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
335 bch2_crc_field_size_max[extent_entry_type(en_l)])
339 en_l = extent_entry_next(en_l);
340 en_r = extent_entry_next(en_r);
343 use_right_ptr = false;
346 while (en_l < l_ptrs.end) {
347 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
349 en_l->ptr = en_r->ptr;
351 if (extent_entry_is_crc(en_l)) {
352 struct bch_extent_crc_unpacked crc_l =
353 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
354 struct bch_extent_crc_unpacked crc_r =
355 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
357 use_right_ptr = false;
359 if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
360 crc_l.uncompressed_size) {
361 /* can use left extent's crc entry */
362 } else if (crc_l.live_size <= crc_r.offset) {
363 /* can use right extent's crc entry */
364 crc_r.offset -= crc_l.live_size;
365 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
366 extent_entry_type(en_l));
367 use_right_ptr = true;
369 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
372 crc_r.uncompressed_size << 9);
374 crc_l.uncompressed_size += crc_r.uncompressed_size;
375 crc_l.compressed_size += crc_r.compressed_size;
376 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
377 extent_entry_type(en_l));
381 en_l = extent_entry_next(en_l);
382 en_r = extent_entry_next(en_r);
385 bch2_key_resize(l.k, l.k->size + r.k->size);
389 /* KEY_TYPE_reservation: */
391 int bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k,
392 unsigned flags, struct printbuf *err)
394 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
396 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation)) {
397 prt_printf(err, "incorrect value size (%zu != %zu)",
398 bkey_val_bytes(k.k), sizeof(*r.v));
399 return -BCH_ERR_invalid_bkey;
402 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX) {
403 prt_printf(err, "invalid nr_replicas (%u)",
405 return -BCH_ERR_invalid_bkey;
411 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
414 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
416 prt_printf(out, "generation %u replicas %u",
417 le32_to_cpu(r.v->generation),
421 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
423 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
424 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
426 if (l.v->generation != r.v->generation ||
427 l.v->nr_replicas != r.v->nr_replicas)
430 bch2_key_resize(l.k, l.k->size + r.k->size);
434 /* Extent checksum entries: */
436 /* returns true if not equal */
437 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
438 struct bch_extent_crc_unpacked r)
440 return (l.csum_type != r.csum_type ||
441 l.compression_type != r.compression_type ||
442 l.compressed_size != r.compressed_size ||
443 l.uncompressed_size != r.uncompressed_size ||
444 l.offset != r.offset ||
445 l.live_size != r.live_size ||
446 l.nonce != r.nonce ||
447 bch2_crc_cmp(l.csum, r.csum));
450 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
451 struct bch_extent_crc_unpacked n)
453 return !crc_is_compressed(u) &&
455 u.uncompressed_size > u.live_size &&
456 bch2_csum_type_is_encryption(u.csum_type) ==
457 bch2_csum_type_is_encryption(n.csum_type);
460 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
461 struct bch_extent_crc_unpacked n)
463 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
464 struct bch_extent_crc_unpacked crc;
465 const union bch_extent_entry *i;
470 bkey_for_each_crc(k.k, ptrs, crc, i)
471 if (can_narrow_crc(crc, n))
478 * We're writing another replica for this extent, so while we've got the data in
479 * memory we'll be computing a new checksum for the currently live data.
481 * If there are other replicas we aren't moving, and they are checksummed but
482 * not compressed, we can modify them to point to only the data that is
483 * currently live (so that readers won't have to bounce) while we've got the
486 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
488 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
489 struct bch_extent_crc_unpacked u;
490 struct extent_ptr_decoded p;
491 union bch_extent_entry *i;
494 /* Find a checksum entry that covers only live data: */
496 bkey_for_each_crc(&k->k, ptrs, u, i)
497 if (!crc_is_compressed(u) &&
499 u.live_size == u.uncompressed_size) {
506 BUG_ON(crc_is_compressed(n));
508 BUG_ON(n.live_size != k->k.size);
510 restart_narrow_pointers:
511 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
513 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
514 if (can_narrow_crc(p.crc, n)) {
515 __bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
516 p.ptr.offset += p.crc.offset;
518 bch2_extent_ptr_decoded_append(k, &p);
520 goto restart_narrow_pointers;
526 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
527 struct bch_extent_crc_unpacked src,
528 enum bch_extent_entry_type type)
530 #define set_common_fields(_dst, _src) \
531 _dst.type = 1 << type; \
532 _dst.csum_type = _src.csum_type, \
533 _dst.compression_type = _src.compression_type, \
534 _dst._compressed_size = _src.compressed_size - 1, \
535 _dst._uncompressed_size = _src.uncompressed_size - 1, \
536 _dst.offset = _src.offset
539 case BCH_EXTENT_ENTRY_crc32:
540 set_common_fields(dst->crc32, src);
541 dst->crc32.csum = *((__le32 *) &src.csum.lo);
543 case BCH_EXTENT_ENTRY_crc64:
544 set_common_fields(dst->crc64, src);
545 dst->crc64.nonce = src.nonce;
546 dst->crc64.csum_lo = src.csum.lo;
547 dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
549 case BCH_EXTENT_ENTRY_crc128:
550 set_common_fields(dst->crc128, src);
551 dst->crc128.nonce = src.nonce;
552 dst->crc128.csum = src.csum;
557 #undef set_common_fields
560 void bch2_extent_crc_append(struct bkey_i *k,
561 struct bch_extent_crc_unpacked new)
563 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
564 union bch_extent_crc *crc = (void *) ptrs.end;
565 enum bch_extent_entry_type type;
567 if (bch_crc_bytes[new.csum_type] <= 4 &&
568 new.uncompressed_size <= CRC32_SIZE_MAX &&
569 new.nonce <= CRC32_NONCE_MAX)
570 type = BCH_EXTENT_ENTRY_crc32;
571 else if (bch_crc_bytes[new.csum_type] <= 10 &&
572 new.uncompressed_size <= CRC64_SIZE_MAX &&
573 new.nonce <= CRC64_NONCE_MAX)
574 type = BCH_EXTENT_ENTRY_crc64;
575 else if (bch_crc_bytes[new.csum_type] <= 16 &&
576 new.uncompressed_size <= CRC128_SIZE_MAX &&
577 new.nonce <= CRC128_NONCE_MAX)
578 type = BCH_EXTENT_ENTRY_crc128;
582 bch2_extent_crc_pack(crc, new, type);
584 k->k.u64s += extent_entry_u64s(ptrs.end);
586 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
589 /* Generic code for keys with pointers: */
591 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
593 return bch2_bkey_devs(k).nr;
596 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
598 return k.k->type == KEY_TYPE_reservation
599 ? bkey_s_c_to_reservation(k).v->nr_replicas
600 : bch2_bkey_dirty_devs(k).nr;
603 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
607 if (k.k->type == KEY_TYPE_reservation) {
608 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
610 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
611 const union bch_extent_entry *entry;
612 struct extent_ptr_decoded p;
614 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
615 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
621 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
623 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
624 const union bch_extent_entry *entry;
625 struct extent_ptr_decoded p;
628 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
629 if (!p.ptr.cached && crc_is_compressed(p.crc))
630 ret += p.crc.compressed_size;
635 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
637 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
638 const union bch_extent_entry *entry;
639 struct bch_extent_crc_unpacked crc;
641 bkey_for_each_crc(k.k, ptrs, crc, entry)
642 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
647 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
649 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
650 const union bch_extent_entry *entry;
651 struct extent_ptr_decoded p = { 0 };
652 unsigned replicas = 0;
654 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
659 replicas += p.ec.redundancy;
668 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
670 unsigned durability = 0;
676 ca = bch_dev_bkey_exists(c, p->ptr.dev);
678 if (ca->mi.state != BCH_MEMBER_STATE_failed)
679 durability = max_t(unsigned, durability, ca->mi.durability);
682 durability += p->ec.redundancy;
687 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
689 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
690 const union bch_extent_entry *entry;
691 struct extent_ptr_decoded p;
692 unsigned durability = 0;
694 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
695 durability += bch2_extent_ptr_durability(c,& p);
700 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
702 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
703 union bch_extent_entry *next = extent_entry_next(entry);
705 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
706 k->k.u64s -= extent_entry_u64s(entry);
709 static inline void __extent_entry_insert(struct bkey_i *k,
710 union bch_extent_entry *dst,
711 union bch_extent_entry *new)
713 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
715 memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
716 dst, (u64 *) end - (u64 *) dst);
717 k->k.u64s += extent_entry_u64s(new);
718 memcpy_u64s_small(dst, new, extent_entry_u64s(new));
721 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
722 struct extent_ptr_decoded *p)
724 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
725 struct bch_extent_crc_unpacked crc =
726 bch2_extent_crc_unpack(&k->k, NULL);
727 union bch_extent_entry *pos;
729 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
734 bkey_for_each_crc(&k->k, ptrs, crc, pos)
735 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
736 pos = extent_entry_next(pos);
740 bch2_extent_crc_append(k, p->crc);
741 pos = bkey_val_end(bkey_i_to_s(k));
743 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
744 __extent_entry_insert(k, pos, to_entry(&p->ptr));
747 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
748 __extent_entry_insert(k, pos, to_entry(&p->ec));
752 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
753 union bch_extent_entry *entry)
755 union bch_extent_entry *i = ptrs.start;
760 while (extent_entry_next(i) != entry)
761 i = extent_entry_next(i);
765 static void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
767 union bch_extent_entry *next = extent_entry_next(entry);
769 /* stripes have ptrs, but their layout doesn't work with this code */
770 BUG_ON(k.k->type == KEY_TYPE_stripe);
772 memmove_u64s_down(entry, next,
773 (u64 *) bkey_val_end(k) - (u64 *) next);
774 k.k->u64s -= (u64 *) next - (u64 *) entry;
778 * Returns pointer to the next entry after the one being dropped:
780 static union bch_extent_entry *__bch2_bkey_drop_ptr(struct bkey_s k,
781 struct bch_extent_ptr *ptr)
783 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
784 union bch_extent_entry *entry = to_entry(ptr), *next;
785 union bch_extent_entry *ret = entry;
786 bool drop_crc = true;
788 EBUG_ON(ptr < &ptrs.start->ptr ||
789 ptr >= &ptrs.end->ptr);
790 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
792 for (next = extent_entry_next(entry);
794 next = extent_entry_next(next)) {
795 if (extent_entry_is_crc(next)) {
797 } else if (extent_entry_is_ptr(next)) {
803 extent_entry_drop(k, entry);
805 while ((entry = extent_entry_prev(ptrs, entry))) {
806 if (extent_entry_is_ptr(entry))
809 if ((extent_entry_is_crc(entry) && drop_crc) ||
810 extent_entry_is_stripe_ptr(entry)) {
811 ret = (void *) ret - extent_entry_bytes(entry);
812 extent_entry_drop(k, entry);
819 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
820 struct bch_extent_ptr *ptr)
822 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
823 union bch_extent_entry *ret =
824 __bch2_bkey_drop_ptr(k, ptr);
827 * If we deleted all the dirty pointers and there's still cached
828 * pointers, we could set the cached pointers to dirty if they're not
829 * stale - but to do that correctly we'd need to grab an open_bucket
830 * reference so that we don't race with bucket reuse:
833 !bch2_bkey_dirty_devs(k.s_c).nr) {
834 k.k->type = KEY_TYPE_error;
835 set_bkey_val_u64s(k.k, 0);
837 } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
838 k.k->type = KEY_TYPE_deleted;
839 set_bkey_val_u64s(k.k, 0);
846 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
848 struct bch_extent_ptr *ptr;
850 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
853 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
855 struct bch_extent_ptr *ptr = (void *) bch2_bkey_has_device(k.s_c, dev);
858 __bch2_bkey_drop_ptr(k, ptr);
861 const struct bch_extent_ptr *
862 bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
864 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
865 const struct bch_extent_ptr *ptr;
867 bkey_for_each_ptr(ptrs, ptr)
874 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
876 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
877 const struct bch_extent_ptr *ptr;
879 bkey_for_each_ptr(ptrs, ptr)
880 if (bch2_dev_in_target(c, ptr->dev, target) &&
882 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
888 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
889 struct bch_extent_ptr m, u64 offset)
891 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
892 const union bch_extent_entry *entry;
893 struct extent_ptr_decoded p;
895 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
896 if (p.ptr.dev == m.dev &&
897 p.ptr.gen == m.gen &&
898 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
899 (s64) m.offset - offset)
906 * Returns true if two extents refer to the same data:
908 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
910 if (k1.k->type != k2.k->type)
913 if (bkey_extent_is_direct_data(k1.k)) {
914 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
915 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
916 const union bch_extent_entry *entry1, *entry2;
917 struct extent_ptr_decoded p1, p2;
919 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
922 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
923 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
924 if (p1.ptr.dev == p2.ptr.dev &&
925 p1.ptr.gen == p2.ptr.gen &&
926 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
927 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
932 /* KEY_TYPE_deleted, etc. */
937 bool bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1,
940 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
941 const union bch_extent_entry *entry2;
942 struct extent_ptr_decoded p2;
944 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
945 if (p1.ptr.dev == p2.ptr.dev &&
946 p1.ptr.gen == p2.ptr.gen &&
947 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
948 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
955 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
957 * Returns true if @k should be dropped entirely
959 * For existing keys, only called when btree nodes are being rewritten, not when
960 * they're merely being compacted/resorted in memory.
962 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
964 struct bch_extent_ptr *ptr;
966 bch2_bkey_drop_ptrs(k, ptr,
968 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
970 return bkey_deleted(k.k);
973 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
976 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
977 const union bch_extent_entry *entry;
978 struct bch_extent_crc_unpacked crc;
979 const struct bch_extent_ptr *ptr;
980 const struct bch_extent_stripe_ptr *ec;
984 bkey_extent_entry_for_each(ptrs, entry) {
986 prt_printf(out, " ");
988 switch (__extent_entry_type(entry)) {
989 case BCH_EXTENT_ENTRY_ptr:
990 ptr = entry_to_ptr(entry);
991 ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
992 ? bch_dev_bkey_exists(c, ptr->dev)
996 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
997 (u64) ptr->offset, ptr->gen,
998 ptr->cached ? " cached" : "");
1001 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1003 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1004 ptr->dev, b, offset, ptr->gen);
1006 prt_str(out, " cached");
1008 prt_str(out, " unwritten");
1009 if (ca && ptr_stale(ca, ptr))
1010 prt_printf(out, " stale");
1013 case BCH_EXTENT_ENTRY_crc32:
1014 case BCH_EXTENT_ENTRY_crc64:
1015 case BCH_EXTENT_ENTRY_crc128:
1016 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1018 prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
1019 crc.compressed_size,
1020 crc.uncompressed_size,
1021 crc.offset, crc.nonce,
1022 bch2_csum_types[crc.csum_type],
1023 bch2_compression_types[crc.compression_type]);
1025 case BCH_EXTENT_ENTRY_stripe_ptr:
1026 ec = &entry->stripe_ptr;
1028 prt_printf(out, "ec: idx %llu block %u",
1029 (u64) ec->idx, ec->block);
1032 prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1040 static int extent_ptr_invalid(const struct bch_fs *c,
1042 const struct bch_extent_ptr *ptr,
1043 unsigned size_ondisk,
1045 struct printbuf *err)
1047 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1048 const struct bch_extent_ptr *ptr2;
1053 if (!bch2_dev_exists2(c, ptr->dev)) {
1054 prt_printf(err, "pointer to invalid device (%u)", ptr->dev);
1055 return -BCH_ERR_invalid_bkey;
1058 ca = bch_dev_bkey_exists(c, ptr->dev);
1059 bkey_for_each_ptr(ptrs, ptr2)
1060 if (ptr != ptr2 && ptr->dev == ptr2->dev) {
1061 prt_printf(err, "multiple pointers to same device (%u)", ptr->dev);
1062 return -BCH_ERR_invalid_bkey;
1065 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1067 if (bucket >= ca->mi.nbuckets) {
1068 prt_printf(err, "pointer past last bucket (%llu > %llu)",
1069 bucket, ca->mi.nbuckets);
1070 return -BCH_ERR_invalid_bkey;
1073 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket)) {
1074 prt_printf(err, "pointer before first bucket (%llu < %u)",
1075 bucket, ca->mi.first_bucket);
1076 return -BCH_ERR_invalid_bkey;
1079 if (bucket_offset + size_ondisk > ca->mi.bucket_size) {
1080 prt_printf(err, "pointer spans multiple buckets (%u + %u > %u)",
1081 bucket_offset, size_ondisk, ca->mi.bucket_size);
1082 return -BCH_ERR_invalid_bkey;
1088 int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
1089 unsigned flags, struct printbuf *err)
1091 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1092 const union bch_extent_entry *entry;
1093 struct bch_extent_crc_unpacked crc;
1094 unsigned size_ondisk = k.k->size;
1095 unsigned nonce = UINT_MAX;
1096 unsigned nr_ptrs = 0;
1097 bool unwritten = false;
1100 if (bkey_is_btree_ptr(k.k))
1101 size_ondisk = btree_sectors(c);
1103 bkey_extent_entry_for_each(ptrs, entry) {
1104 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) {
1105 prt_printf(err, "invalid extent entry type (got %u, max %u)",
1106 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1107 return -BCH_ERR_invalid_bkey;
1110 if (bkey_is_btree_ptr(k.k) &&
1111 !extent_entry_is_ptr(entry)) {
1112 prt_printf(err, "has non ptr field");
1113 return -BCH_ERR_invalid_bkey;
1116 switch (extent_entry_type(entry)) {
1117 case BCH_EXTENT_ENTRY_ptr:
1118 ret = extent_ptr_invalid(c, k, &entry->ptr, size_ondisk,
1123 if (nr_ptrs && unwritten != entry->ptr.unwritten) {
1124 prt_printf(err, "extent with unwritten and written ptrs");
1125 return -BCH_ERR_invalid_bkey;
1128 if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) {
1129 prt_printf(err, "has unwritten ptrs");
1130 return -BCH_ERR_invalid_bkey;
1133 unwritten = entry->ptr.unwritten;
1136 case BCH_EXTENT_ENTRY_crc32:
1137 case BCH_EXTENT_ENTRY_crc64:
1138 case BCH_EXTENT_ENTRY_crc128:
1139 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1141 if (crc.offset + crc.live_size >
1142 crc.uncompressed_size) {
1143 prt_printf(err, "checksum offset + key size > uncompressed size");
1144 return -BCH_ERR_invalid_bkey;
1147 size_ondisk = crc.compressed_size;
1149 if (!bch2_checksum_type_valid(c, crc.csum_type)) {
1150 prt_printf(err, "invalid checksum type");
1151 return -BCH_ERR_invalid_bkey;
1154 if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR) {
1155 prt_printf(err, "invalid compression type");
1156 return -BCH_ERR_invalid_bkey;
1159 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1160 if (nonce == UINT_MAX)
1161 nonce = crc.offset + crc.nonce;
1162 else if (nonce != crc.offset + crc.nonce) {
1163 prt_printf(err, "incorrect nonce");
1164 return -BCH_ERR_invalid_bkey;
1168 case BCH_EXTENT_ENTRY_stripe_ptr:
1173 if (nr_ptrs >= BCH_BKEY_PTRS_MAX) {
1174 prt_str(err, "too many ptrs");
1175 return -BCH_ERR_invalid_bkey;
1181 void bch2_ptr_swab(struct bkey_s k)
1183 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1184 union bch_extent_entry *entry;
1187 for (d = (u64 *) ptrs.start;
1188 d != (u64 *) ptrs.end;
1192 for (entry = ptrs.start;
1194 entry = extent_entry_next(entry)) {
1195 switch (extent_entry_type(entry)) {
1196 case BCH_EXTENT_ENTRY_ptr:
1198 case BCH_EXTENT_ENTRY_crc32:
1199 entry->crc32.csum = swab32(entry->crc32.csum);
1201 case BCH_EXTENT_ENTRY_crc64:
1202 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1203 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1205 case BCH_EXTENT_ENTRY_crc128:
1206 entry->crc128.csum.hi = (__force __le64)
1207 swab64((__force u64) entry->crc128.csum.hi);
1208 entry->crc128.csum.lo = (__force __le64)
1209 swab64((__force u64) entry->crc128.csum.lo);
1211 case BCH_EXTENT_ENTRY_stripe_ptr:
1217 /* Generic extent code: */
1219 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1221 unsigned new_val_u64s = bkey_val_u64s(k.k);
1225 if (bkey_le(where, bkey_start_pos(k.k)))
1228 EBUG_ON(bkey_gt(where, k.k->p));
1230 sub = where.offset - bkey_start_offset(k.k);
1235 k.k->type = KEY_TYPE_deleted;
1239 switch (k.k->type) {
1240 case KEY_TYPE_extent:
1241 case KEY_TYPE_reflink_v: {
1242 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1243 union bch_extent_entry *entry;
1244 bool seen_crc = false;
1246 bkey_extent_entry_for_each(ptrs, entry) {
1247 switch (extent_entry_type(entry)) {
1248 case BCH_EXTENT_ENTRY_ptr:
1250 entry->ptr.offset += sub;
1252 case BCH_EXTENT_ENTRY_crc32:
1253 entry->crc32.offset += sub;
1255 case BCH_EXTENT_ENTRY_crc64:
1256 entry->crc64.offset += sub;
1258 case BCH_EXTENT_ENTRY_crc128:
1259 entry->crc128.offset += sub;
1261 case BCH_EXTENT_ENTRY_stripe_ptr:
1265 if (extent_entry_is_crc(entry))
1271 case KEY_TYPE_reflink_p: {
1272 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1274 le64_add_cpu(&p.v->idx, sub);
1277 case KEY_TYPE_inline_data:
1278 case KEY_TYPE_indirect_inline_data: {
1279 void *p = bkey_inline_data_p(k);
1280 unsigned bytes = bkey_inline_data_bytes(k.k);
1282 sub = min_t(u64, sub << 9, bytes);
1284 memmove(p, p + sub, bytes - sub);
1286 new_val_u64s -= sub >> 3;
1291 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1292 BUG_ON(val_u64s_delta < 0);
1294 set_bkey_val_u64s(k.k, new_val_u64s);
1295 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1296 return -val_u64s_delta;
1299 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1301 unsigned new_val_u64s = bkey_val_u64s(k.k);
1305 if (bkey_ge(where, k.k->p))
1308 EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1310 len = where.offset - bkey_start_offset(k.k);
1312 k.k->p.offset = where.offset;
1316 k.k->type = KEY_TYPE_deleted;
1320 switch (k.k->type) {
1321 case KEY_TYPE_inline_data:
1322 case KEY_TYPE_indirect_inline_data:
1323 new_val_u64s = (bkey_inline_data_offset(k.k) +
1324 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1328 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1329 BUG_ON(val_u64s_delta < 0);
1331 set_bkey_val_u64s(k.k, new_val_u64s);
1332 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1333 return -val_u64s_delta;