1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
13 #include "btree_iter.h"
17 #include "disk_groups.h"
27 #include <trace/events/bcachefs.h>
29 static unsigned bch2_crc_field_size_max[] = {
30 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
31 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
32 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
35 static void bch2_extent_crc_pack(union bch_extent_crc *,
36 struct bch_extent_crc_unpacked,
37 enum bch_extent_entry_type);
39 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
42 struct bch_dev_io_failures *i;
44 for (i = f->devs; i < f->devs + f->nr; i++)
51 void bch2_mark_io_failure(struct bch_io_failures *failed,
52 struct extent_ptr_decoded *p)
54 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
57 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
59 f = &failed->devs[failed->nr++];
64 } else if (p->idx != f->idx) {
74 * returns true if p1 is better than p2:
76 static inline bool ptr_better(struct bch_fs *c,
77 const struct extent_ptr_decoded p1,
78 const struct extent_ptr_decoded p2)
80 if (likely(!p1.idx && !p2.idx)) {
81 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
82 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
84 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
85 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
87 /* Pick at random, biased in favor of the faster device: */
89 return bch2_rand_range(l1 + l2) > l1;
92 if (bch2_force_reconstruct_read)
93 return p1.idx > p2.idx;
95 return p1.idx < p2.idx;
99 * This picks a non-stale pointer, preferably from a device other than @avoid.
100 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
101 * other devices, it will still pick a pointer from avoid.
103 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
104 struct bch_io_failures *failed,
105 struct extent_ptr_decoded *pick)
107 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
108 const union bch_extent_entry *entry;
109 struct extent_ptr_decoded p;
110 struct bch_dev_io_failures *f;
114 if (k.k->type == KEY_TYPE_error)
117 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
118 ca = bch_dev_bkey_exists(c, p.ptr.dev);
121 * If there are any dirty pointers it's an error if we can't
124 if (!ret && !p.ptr.cached)
127 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
130 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
132 p.idx = f->nr_failed < f->nr_retries
137 !bch2_dev_is_readable(ca))
140 if (bch2_force_reconstruct_read &&
144 if (p.idx >= (unsigned) p.has_ec + 1)
147 if (ret > 0 && !ptr_better(c, p, *pick))
157 /* KEY_TYPE_btree_ptr: */
159 int bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k,
160 int rw, struct printbuf *err)
162 if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX) {
163 pr_buf(err, "value too big (%zu > %u)",
164 bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
168 return bch2_bkey_ptrs_invalid(c, k, rw, err);
171 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
174 bch2_bkey_ptrs_to_text(out, c, k);
177 int bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
178 int rw, struct printbuf *err)
180 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
182 if (bkey_val_bytes(k.k) <= sizeof(*bp.v)) {
183 pr_buf(err, "value too small (%zu <= %zu)",
184 bkey_val_bytes(k.k), sizeof(*bp.v));
188 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) {
189 pr_buf(err, "value too big (%zu > %zu)",
190 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
194 if (c->sb.version < bcachefs_metadata_version_snapshot &&
195 bp.v->min_key.snapshot) {
196 pr_buf(err, "invalid min_key.snapshot (%u != 0)",
197 bp.v->min_key.snapshot);
201 return bch2_bkey_ptrs_invalid(c, k, rw, err);
204 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
207 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
209 pr_buf(out, "seq %llx written %u min_key %s",
210 le64_to_cpu(bp.v->seq),
211 le16_to_cpu(bp.v->sectors_written),
212 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
214 bch2_bpos_to_text(out, bp.v->min_key);
216 bch2_bkey_ptrs_to_text(out, c, k);
219 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
220 unsigned big_endian, int write,
223 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
225 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
227 if (version < bcachefs_metadata_version_inode_btree_change &&
228 btree_node_type_is_extents(btree_id) &&
229 bkey_cmp(bp.v->min_key, POS_MIN))
230 bp.v->min_key = write
231 ? bpos_nosnap_predecessor(bp.v->min_key)
232 : bpos_nosnap_successor(bp.v->min_key);
235 /* KEY_TYPE_extent: */
237 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
239 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
240 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
241 union bch_extent_entry *en_l;
242 const union bch_extent_entry *en_r;
243 struct extent_ptr_decoded lp, rp;
249 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
250 if (extent_entry_type(en_l) != extent_entry_type(en_r))
253 en_l = extent_entry_next(en_l);
254 en_r = extent_entry_next(en_r);
257 if (en_l < l_ptrs.end || en_r < r_ptrs.end)
262 lp.crc = bch2_extent_crc_unpack(l.k, NULL);
263 rp.crc = bch2_extent_crc_unpack(r.k, NULL);
265 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
266 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
267 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
268 rp.ptr.offset + rp.crc.offset ||
269 lp.ptr.dev != rp.ptr.dev ||
270 lp.ptr.gen != rp.ptr.gen ||
271 lp.has_ec != rp.has_ec)
274 /* Extents may not straddle buckets: */
275 ca = bch_dev_bkey_exists(c, lp.ptr.dev);
276 if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
279 if (lp.has_ec != rp.has_ec ||
281 (lp.ec.block != rp.ec.block ||
282 lp.ec.redundancy != rp.ec.redundancy ||
283 lp.ec.idx != rp.ec.idx)))
286 if (lp.crc.compression_type != rp.crc.compression_type ||
287 lp.crc.nonce != rp.crc.nonce)
290 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
291 lp.crc.uncompressed_size) {
292 /* can use left extent's crc entry */
293 } else if (lp.crc.live_size <= rp.crc.offset ) {
294 /* can use right extent's crc entry */
296 /* check if checksums can be merged: */
297 if (lp.crc.csum_type != rp.crc.csum_type ||
298 lp.crc.nonce != rp.crc.nonce ||
299 crc_is_compressed(lp.crc) ||
300 !bch2_checksum_mergeable(lp.crc.csum_type))
303 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
307 if (lp.crc.csum_type &&
308 lp.crc.uncompressed_size +
309 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
312 if (lp.crc.uncompressed_size + rp.crc.uncompressed_size >
313 bch2_crc_field_size_max[extent_entry_type(en_l)])
317 en_l = extent_entry_next(en_l);
318 en_r = extent_entry_next(en_r);
321 use_right_ptr = false;
324 while (en_l < l_ptrs.end) {
325 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
327 en_l->ptr = en_r->ptr;
329 if (extent_entry_is_crc(en_l)) {
330 struct bch_extent_crc_unpacked crc_l =
331 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
332 struct bch_extent_crc_unpacked crc_r =
333 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
335 use_right_ptr = false;
337 if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
338 crc_l.uncompressed_size) {
339 /* can use left extent's crc entry */
340 } else if (crc_l.live_size <= crc_r.offset ) {
341 /* can use right extent's crc entry */
342 crc_r.offset -= crc_l.live_size;
343 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
344 extent_entry_type(en_l));
345 use_right_ptr = true;
347 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
350 crc_r.uncompressed_size << 9);
352 crc_l.uncompressed_size += crc_r.uncompressed_size;
353 crc_l.compressed_size += crc_r.compressed_size;
354 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
355 extent_entry_type(en_l));
359 en_l = extent_entry_next(en_l);
360 en_r = extent_entry_next(en_r);
363 bch2_key_resize(l.k, l.k->size + r.k->size);
367 /* KEY_TYPE_reservation: */
369 int bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k,
370 int rw, struct printbuf *err)
372 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
374 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation)) {
375 pr_buf(err, "incorrect value size (%zu != %zu)",
376 bkey_val_bytes(k.k), sizeof(*r.v));
380 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX) {
381 pr_buf(err, "invalid nr_replicas (%u)",
389 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
392 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
394 pr_buf(out, "generation %u replicas %u",
395 le32_to_cpu(r.v->generation),
399 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
401 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
402 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
404 if (l.v->generation != r.v->generation ||
405 l.v->nr_replicas != r.v->nr_replicas)
408 bch2_key_resize(l.k, l.k->size + r.k->size);
412 /* Extent checksum entries: */
414 /* returns true if not equal */
415 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
416 struct bch_extent_crc_unpacked r)
418 return (l.csum_type != r.csum_type ||
419 l.compression_type != r.compression_type ||
420 l.compressed_size != r.compressed_size ||
421 l.uncompressed_size != r.uncompressed_size ||
422 l.offset != r.offset ||
423 l.live_size != r.live_size ||
424 l.nonce != r.nonce ||
425 bch2_crc_cmp(l.csum, r.csum));
428 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
429 struct bch_extent_crc_unpacked n)
431 return !crc_is_compressed(u) &&
433 u.uncompressed_size > u.live_size &&
434 bch2_csum_type_is_encryption(u.csum_type) ==
435 bch2_csum_type_is_encryption(n.csum_type);
438 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
439 struct bch_extent_crc_unpacked n)
441 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
442 struct bch_extent_crc_unpacked crc;
443 const union bch_extent_entry *i;
448 bkey_for_each_crc(k.k, ptrs, crc, i)
449 if (can_narrow_crc(crc, n))
456 * We're writing another replica for this extent, so while we've got the data in
457 * memory we'll be computing a new checksum for the currently live data.
459 * If there are other replicas we aren't moving, and they are checksummed but
460 * not compressed, we can modify them to point to only the data that is
461 * currently live (so that readers won't have to bounce) while we've got the
464 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
466 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
467 struct bch_extent_crc_unpacked u;
468 struct extent_ptr_decoded p;
469 union bch_extent_entry *i;
472 /* Find a checksum entry that covers only live data: */
474 bkey_for_each_crc(&k->k, ptrs, u, i)
475 if (!crc_is_compressed(u) &&
477 u.live_size == u.uncompressed_size) {
484 BUG_ON(crc_is_compressed(n));
486 BUG_ON(n.live_size != k->k.size);
488 restart_narrow_pointers:
489 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
491 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
492 if (can_narrow_crc(p.crc, n)) {
493 __bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
494 p.ptr.offset += p.crc.offset;
496 bch2_extent_ptr_decoded_append(k, &p);
498 goto restart_narrow_pointers;
504 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
505 struct bch_extent_crc_unpacked src,
506 enum bch_extent_entry_type type)
508 #define set_common_fields(_dst, _src) \
509 _dst.type = 1 << type; \
510 _dst.csum_type = _src.csum_type, \
511 _dst.compression_type = _src.compression_type, \
512 _dst._compressed_size = _src.compressed_size - 1, \
513 _dst._uncompressed_size = _src.uncompressed_size - 1, \
514 _dst.offset = _src.offset
517 case BCH_EXTENT_ENTRY_crc32:
518 set_common_fields(dst->crc32, src);
519 dst->crc32.csum = *((__le32 *) &src.csum.lo);
521 case BCH_EXTENT_ENTRY_crc64:
522 set_common_fields(dst->crc64, src);
523 dst->crc64.nonce = src.nonce;
524 dst->crc64.csum_lo = src.csum.lo;
525 dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
527 case BCH_EXTENT_ENTRY_crc128:
528 set_common_fields(dst->crc128, src);
529 dst->crc128.nonce = src.nonce;
530 dst->crc128.csum = src.csum;
535 #undef set_common_fields
538 void bch2_extent_crc_append(struct bkey_i *k,
539 struct bch_extent_crc_unpacked new)
541 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
542 union bch_extent_crc *crc = (void *) ptrs.end;
543 enum bch_extent_entry_type type;
545 if (bch_crc_bytes[new.csum_type] <= 4 &&
546 new.uncompressed_size <= CRC32_SIZE_MAX &&
547 new.nonce <= CRC32_NONCE_MAX)
548 type = BCH_EXTENT_ENTRY_crc32;
549 else if (bch_crc_bytes[new.csum_type] <= 10 &&
550 new.uncompressed_size <= CRC64_SIZE_MAX &&
551 new.nonce <= CRC64_NONCE_MAX)
552 type = BCH_EXTENT_ENTRY_crc64;
553 else if (bch_crc_bytes[new.csum_type] <= 16 &&
554 new.uncompressed_size <= CRC128_SIZE_MAX &&
555 new.nonce <= CRC128_NONCE_MAX)
556 type = BCH_EXTENT_ENTRY_crc128;
560 bch2_extent_crc_pack(crc, new, type);
562 k->k.u64s += extent_entry_u64s(ptrs.end);
564 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
567 /* Generic code for keys with pointers: */
569 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
571 return bch2_bkey_devs(k).nr;
574 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
576 return k.k->type == KEY_TYPE_reservation
577 ? bkey_s_c_to_reservation(k).v->nr_replicas
578 : bch2_bkey_dirty_devs(k).nr;
581 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
585 if (k.k->type == KEY_TYPE_reservation) {
586 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
588 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
589 const union bch_extent_entry *entry;
590 struct extent_ptr_decoded p;
592 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
593 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
599 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
601 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
602 const union bch_extent_entry *entry;
603 struct extent_ptr_decoded p;
606 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
607 if (!p.ptr.cached && crc_is_compressed(p.crc))
608 ret += p.crc.compressed_size;
613 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
615 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
616 const union bch_extent_entry *entry;
617 struct bch_extent_crc_unpacked crc;
619 bkey_for_each_crc(k.k, ptrs, crc, entry)
620 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
625 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
627 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
628 const union bch_extent_entry *entry;
629 struct extent_ptr_decoded p = { 0 };
630 unsigned replicas = 0;
632 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
637 replicas += p.ec.redundancy;
646 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
647 struct extent_ptr_decoded p)
649 unsigned durability = 0;
655 ca = bch_dev_bkey_exists(c, p.ptr.dev);
657 if (ca->mi.state != BCH_MEMBER_STATE_failed)
658 durability = max_t(unsigned, durability, ca->mi.durability);
661 durability += p.ec.redundancy;
666 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
668 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
669 const union bch_extent_entry *entry;
670 struct extent_ptr_decoded p;
671 unsigned durability = 0;
673 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
674 durability += bch2_extent_ptr_durability(c, p);
679 void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
681 unsigned nr_desired_replicas)
683 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
684 union bch_extent_entry *entry;
685 struct extent_ptr_decoded p;
686 int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
688 if (target && extra > 0)
689 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
690 int n = bch2_extent_ptr_durability(c, p);
692 if (n && n <= extra &&
693 !bch2_dev_in_target(c, p.ptr.dev, target)) {
694 entry->ptr.cached = true;
700 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
701 int n = bch2_extent_ptr_durability(c, p);
703 if (n && n <= extra) {
704 entry->ptr.cached = true;
710 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
712 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
713 union bch_extent_entry *next = extent_entry_next(entry);
715 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
716 k->k.u64s -= extent_entry_u64s(entry);
719 void bch2_bkey_append_ptr(struct bkey_i *k,
720 struct bch_extent_ptr ptr)
722 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
725 case KEY_TYPE_btree_ptr:
726 case KEY_TYPE_btree_ptr_v2:
727 case KEY_TYPE_extent:
728 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
730 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
732 memcpy((void *) &k->v + bkey_val_bytes(&k->k),
742 static inline void __extent_entry_insert(struct bkey_i *k,
743 union bch_extent_entry *dst,
744 union bch_extent_entry *new)
746 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
748 memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
749 dst, (u64 *) end - (u64 *) dst);
750 k->k.u64s += extent_entry_u64s(new);
751 memcpy(dst, new, extent_entry_bytes(new));
754 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
755 struct extent_ptr_decoded *p)
757 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
758 struct bch_extent_crc_unpacked crc =
759 bch2_extent_crc_unpack(&k->k, NULL);
760 union bch_extent_entry *pos;
762 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
767 bkey_for_each_crc(&k->k, ptrs, crc, pos)
768 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
769 pos = extent_entry_next(pos);
773 bch2_extent_crc_append(k, p->crc);
774 pos = bkey_val_end(bkey_i_to_s(k));
776 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
777 __extent_entry_insert(k, pos, to_entry(&p->ptr));
780 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
781 __extent_entry_insert(k, pos, to_entry(&p->ec));
785 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
786 union bch_extent_entry *entry)
788 union bch_extent_entry *i = ptrs.start;
793 while (extent_entry_next(i) != entry)
794 i = extent_entry_next(i);
798 static void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
800 union bch_extent_entry *next = extent_entry_next(entry);
802 /* stripes have ptrs, but their layout doesn't work with this code */
803 BUG_ON(k.k->type == KEY_TYPE_stripe);
805 memmove_u64s_down(entry, next,
806 (u64 *) bkey_val_end(k) - (u64 *) next);
807 k.k->u64s -= (u64 *) next - (u64 *) entry;
811 * Returns pointer to the next entry after the one being dropped:
813 union bch_extent_entry *__bch2_bkey_drop_ptr(struct bkey_s k,
814 struct bch_extent_ptr *ptr)
816 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
817 union bch_extent_entry *entry = to_entry(ptr), *next;
818 union bch_extent_entry *ret = entry;
819 bool drop_crc = true;
821 EBUG_ON(ptr < &ptrs.start->ptr ||
822 ptr >= &ptrs.end->ptr);
823 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
825 for (next = extent_entry_next(entry);
827 next = extent_entry_next(next)) {
828 if (extent_entry_is_crc(next)) {
830 } else if (extent_entry_is_ptr(next)) {
836 extent_entry_drop(k, entry);
838 while ((entry = extent_entry_prev(ptrs, entry))) {
839 if (extent_entry_is_ptr(entry))
842 if ((extent_entry_is_crc(entry) && drop_crc) ||
843 extent_entry_is_stripe_ptr(entry)) {
844 ret = (void *) ret - extent_entry_bytes(entry);
845 extent_entry_drop(k, entry);
852 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
853 struct bch_extent_ptr *ptr)
855 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
856 union bch_extent_entry *ret =
857 __bch2_bkey_drop_ptr(k, ptr);
860 * If we deleted all the dirty pointers and there's still cached
861 * pointers, we could set the cached pointers to dirty if they're not
862 * stale - but to do that correctly we'd need to grab an open_bucket
863 * reference so that we don't race with bucket reuse:
866 !bch2_bkey_dirty_devs(k.s_c).nr) {
867 k.k->type = KEY_TYPE_error;
868 set_bkey_val_u64s(k.k, 0);
870 } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
871 k.k->type = KEY_TYPE_deleted;
872 set_bkey_val_u64s(k.k, 0);
879 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
881 struct bch_extent_ptr *ptr;
883 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
886 const struct bch_extent_ptr *
887 bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
889 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
890 const struct bch_extent_ptr *ptr;
892 bkey_for_each_ptr(ptrs, ptr)
899 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
901 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
902 const struct bch_extent_ptr *ptr;
904 bkey_for_each_ptr(ptrs, ptr)
905 if (bch2_dev_in_target(c, ptr->dev, target) &&
907 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
913 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
914 struct bch_extent_ptr m, u64 offset)
916 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
917 const union bch_extent_entry *entry;
918 struct extent_ptr_decoded p;
920 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
921 if (p.ptr.dev == m.dev &&
922 p.ptr.gen == m.gen &&
923 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
924 (s64) m.offset - offset)
931 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
933 * Returns true if @k should be dropped entirely
935 * For existing keys, only called when btree nodes are being rewritten, not when
936 * they're merely being compacted/resorted in memory.
938 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
940 struct bch_extent_ptr *ptr;
942 bch2_bkey_drop_ptrs(k, ptr,
944 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
946 return bkey_deleted(k.k);
949 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
952 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
953 const union bch_extent_entry *entry;
954 struct bch_extent_crc_unpacked crc;
955 const struct bch_extent_ptr *ptr;
956 const struct bch_extent_stripe_ptr *ec;
960 bkey_extent_entry_for_each(ptrs, entry) {
964 switch (__extent_entry_type(entry)) {
965 case BCH_EXTENT_ENTRY_ptr:
966 ptr = entry_to_ptr(entry);
967 ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
968 ? bch_dev_bkey_exists(c, ptr->dev)
972 pr_buf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
973 (u64) ptr->offset, ptr->gen,
974 ptr->cached ? " cached" : "");
977 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
979 pr_buf(out, "ptr: %u:%llu:%u gen %u%s", ptr->dev,
981 ptr->cached ? " cached" : "");
983 if (ca && ptr_stale(ca, ptr))
984 pr_buf(out, " stale");
987 case BCH_EXTENT_ENTRY_crc32:
988 case BCH_EXTENT_ENTRY_crc64:
989 case BCH_EXTENT_ENTRY_crc128:
990 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
992 pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
994 crc.uncompressed_size,
995 crc.offset, crc.nonce,
996 bch2_csum_types[crc.csum_type],
997 bch2_compression_types[crc.compression_type]);
999 case BCH_EXTENT_ENTRY_stripe_ptr:
1000 ec = &entry->stripe_ptr;
1002 pr_buf(out, "ec: idx %llu block %u",
1003 (u64) ec->idx, ec->block);
1006 pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1014 static int extent_ptr_invalid(const struct bch_fs *c,
1016 const struct bch_extent_ptr *ptr,
1017 unsigned size_ondisk,
1019 struct printbuf *err)
1021 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1022 const struct bch_extent_ptr *ptr2;
1027 if (!bch2_dev_exists2(c, ptr->dev)) {
1028 pr_buf(err, "pointer to invalid device (%u)", ptr->dev);
1032 ca = bch_dev_bkey_exists(c, ptr->dev);
1033 bkey_for_each_ptr(ptrs, ptr2)
1034 if (ptr != ptr2 && ptr->dev == ptr2->dev) {
1035 pr_buf(err, "multiple pointers to same device (%u)", ptr->dev);
1039 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1041 if (bucket >= ca->mi.nbuckets) {
1042 pr_buf(err, "pointer past last bucket (%llu > %llu)",
1043 bucket, ca->mi.nbuckets);
1047 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket)) {
1048 pr_buf(err, "pointer before first bucket (%llu < %u)",
1049 bucket, ca->mi.first_bucket);
1053 if (bucket_offset + size_ondisk > ca->mi.bucket_size) {
1054 pr_buf(err, "pointer spans multiple buckets (%u + %u > %u)",
1055 bucket_offset, size_ondisk, ca->mi.bucket_size);
1062 int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
1063 int rw, struct printbuf *err)
1065 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1066 const union bch_extent_entry *entry;
1067 struct bch_extent_crc_unpacked crc;
1068 unsigned size_ondisk = k.k->size;
1069 unsigned nonce = UINT_MAX;
1072 if (bkey_is_btree_ptr(k.k))
1073 size_ondisk = btree_sectors(c);
1075 bkey_extent_entry_for_each(ptrs, entry) {
1076 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) {
1077 pr_buf(err, "invalid extent entry type (got %u, max %u)",
1078 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1082 if (bkey_is_btree_ptr(k.k) &&
1083 !extent_entry_is_ptr(entry)) {
1084 pr_buf(err, "has non ptr field");
1088 switch (extent_entry_type(entry)) {
1089 case BCH_EXTENT_ENTRY_ptr:
1090 ret = extent_ptr_invalid(c, k, &entry->ptr, size_ondisk,
1095 case BCH_EXTENT_ENTRY_crc32:
1096 case BCH_EXTENT_ENTRY_crc64:
1097 case BCH_EXTENT_ENTRY_crc128:
1098 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1100 if (crc.offset + crc.live_size >
1101 crc.uncompressed_size) {
1102 pr_buf(err, "checksum offset + key size > uncompressed size");
1106 size_ondisk = crc.compressed_size;
1108 if (!bch2_checksum_type_valid(c, crc.csum_type)) {
1109 pr_buf(err, "invalid checksum type");
1113 if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR) {
1114 pr_buf(err, "invalid compression type");
1118 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1119 if (nonce == UINT_MAX)
1120 nonce = crc.offset + crc.nonce;
1121 else if (nonce != crc.offset + crc.nonce) {
1122 pr_buf(err, "incorrect nonce");
1127 case BCH_EXTENT_ENTRY_stripe_ptr:
1135 void bch2_ptr_swab(struct bkey_s k)
1137 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1138 union bch_extent_entry *entry;
1141 for (d = (u64 *) ptrs.start;
1142 d != (u64 *) ptrs.end;
1146 for (entry = ptrs.start;
1148 entry = extent_entry_next(entry)) {
1149 switch (extent_entry_type(entry)) {
1150 case BCH_EXTENT_ENTRY_ptr:
1152 case BCH_EXTENT_ENTRY_crc32:
1153 entry->crc32.csum = swab32(entry->crc32.csum);
1155 case BCH_EXTENT_ENTRY_crc64:
1156 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1157 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1159 case BCH_EXTENT_ENTRY_crc128:
1160 entry->crc128.csum.hi = (__force __le64)
1161 swab64((__force u64) entry->crc128.csum.hi);
1162 entry->crc128.csum.lo = (__force __le64)
1163 swab64((__force u64) entry->crc128.csum.lo);
1165 case BCH_EXTENT_ENTRY_stripe_ptr:
1171 /* Generic extent code: */
1173 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1175 unsigned new_val_u64s = bkey_val_u64s(k.k);
1179 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
1182 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
1184 sub = where.offset - bkey_start_offset(k.k);
1189 k.k->type = KEY_TYPE_deleted;
1193 switch (k.k->type) {
1194 case KEY_TYPE_extent:
1195 case KEY_TYPE_reflink_v: {
1196 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1197 union bch_extent_entry *entry;
1198 bool seen_crc = false;
1200 bkey_extent_entry_for_each(ptrs, entry) {
1201 switch (extent_entry_type(entry)) {
1202 case BCH_EXTENT_ENTRY_ptr:
1204 entry->ptr.offset += sub;
1206 case BCH_EXTENT_ENTRY_crc32:
1207 entry->crc32.offset += sub;
1209 case BCH_EXTENT_ENTRY_crc64:
1210 entry->crc64.offset += sub;
1212 case BCH_EXTENT_ENTRY_crc128:
1213 entry->crc128.offset += sub;
1215 case BCH_EXTENT_ENTRY_stripe_ptr:
1219 if (extent_entry_is_crc(entry))
1225 case KEY_TYPE_reflink_p: {
1226 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1228 le64_add_cpu(&p.v->idx, sub);
1231 case KEY_TYPE_inline_data:
1232 case KEY_TYPE_indirect_inline_data: {
1233 void *p = bkey_inline_data_p(k);
1234 unsigned bytes = bkey_inline_data_bytes(k.k);
1236 sub = min_t(u64, sub << 9, bytes);
1238 memmove(p, p + sub, bytes - sub);
1240 new_val_u64s -= sub >> 3;
1245 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1246 BUG_ON(val_u64s_delta < 0);
1248 set_bkey_val_u64s(k.k, new_val_u64s);
1249 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1250 return -val_u64s_delta;
1253 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1255 unsigned new_val_u64s = bkey_val_u64s(k.k);
1259 if (bkey_cmp(where, k.k->p) >= 0)
1262 EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
1264 len = where.offset - bkey_start_offset(k.k);
1266 k.k->p.offset = where.offset;
1270 k.k->type = KEY_TYPE_deleted;
1274 switch (k.k->type) {
1275 case KEY_TYPE_inline_data:
1276 case KEY_TYPE_indirect_inline_data:
1277 new_val_u64s = (bkey_inline_data_offset(k.k) +
1278 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1282 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1283 BUG_ON(val_u64s_delta < 0);
1285 set_bkey_val_u64s(k.k, new_val_u64s);
1286 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1287 return -val_u64s_delta;