1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
13 #include "btree_iter.h"
17 #include "disk_groups.h"
28 static unsigned bch2_crc_field_size_max[] = {
29 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
30 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
31 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
34 static void bch2_extent_crc_pack(union bch_extent_crc *,
35 struct bch_extent_crc_unpacked,
36 enum bch_extent_entry_type);
38 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
41 struct bch_dev_io_failures *i;
43 for (i = f->devs; i < f->devs + f->nr; i++)
50 void bch2_mark_io_failure(struct bch_io_failures *failed,
51 struct extent_ptr_decoded *p)
53 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
56 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
58 f = &failed->devs[failed->nr++];
63 } else if (p->idx != f->idx) {
73 * returns true if p1 is better than p2:
75 static inline bool ptr_better(struct bch_fs *c,
76 const struct extent_ptr_decoded p1,
77 const struct extent_ptr_decoded p2)
79 if (likely(!p1.idx && !p2.idx)) {
80 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
81 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
83 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
84 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
86 /* Pick at random, biased in favor of the faster device: */
88 return bch2_rand_range(l1 + l2) > l1;
91 if (bch2_force_reconstruct_read)
92 return p1.idx > p2.idx;
94 return p1.idx < p2.idx;
98 * This picks a non-stale pointer, preferably from a device other than @avoid.
99 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
100 * other devices, it will still pick a pointer from avoid.
102 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
103 struct bch_io_failures *failed,
104 struct extent_ptr_decoded *pick)
106 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
107 const union bch_extent_entry *entry;
108 struct extent_ptr_decoded p;
109 struct bch_dev_io_failures *f;
113 if (k.k->type == KEY_TYPE_error)
116 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
118 * Unwritten extent: no need to actually read, treat it as a
119 * hole and return 0s:
124 ca = bch_dev_bkey_exists(c, p.ptr.dev);
127 * If there are any dirty pointers it's an error if we can't
130 if (!ret && !p.ptr.cached)
133 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
136 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
138 p.idx = f->nr_failed < f->nr_retries
143 !bch2_dev_is_readable(ca))
146 if (bch2_force_reconstruct_read &&
150 if (p.idx >= (unsigned) p.has_ec + 1)
153 if (ret > 0 && !ptr_better(c, p, *pick))
163 /* KEY_TYPE_btree_ptr: */
165 int bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k,
166 enum bkey_invalid_flags flags,
167 struct printbuf *err)
169 if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX) {
170 prt_printf(err, "value too big (%zu > %u)",
171 bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
172 return -BCH_ERR_invalid_bkey;
175 return bch2_bkey_ptrs_invalid(c, k, flags, err);
178 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
181 bch2_bkey_ptrs_to_text(out, c, k);
184 int bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
185 enum bkey_invalid_flags flags,
186 struct printbuf *err)
188 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) {
189 prt_printf(err, "value too big (%zu > %zu)",
190 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
191 return -BCH_ERR_invalid_bkey;
194 return bch2_bkey_ptrs_invalid(c, k, flags, err);
197 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
200 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
202 prt_printf(out, "seq %llx written %u min_key %s",
203 le64_to_cpu(bp.v->seq),
204 le16_to_cpu(bp.v->sectors_written),
205 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
207 bch2_bpos_to_text(out, bp.v->min_key);
208 prt_printf(out, " ");
209 bch2_bkey_ptrs_to_text(out, c, k);
212 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
213 unsigned big_endian, int write,
216 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
218 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
220 if (version < bcachefs_metadata_version_inode_btree_change &&
221 btree_id_is_extents(btree_id) &&
222 !bkey_eq(bp.v->min_key, POS_MIN))
223 bp.v->min_key = write
224 ? bpos_nosnap_predecessor(bp.v->min_key)
225 : bpos_nosnap_successor(bp.v->min_key);
228 /* KEY_TYPE_extent: */
230 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
232 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
233 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
234 union bch_extent_entry *en_l;
235 const union bch_extent_entry *en_r;
236 struct extent_ptr_decoded lp, rp;
242 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
243 if (extent_entry_type(en_l) != extent_entry_type(en_r))
246 en_l = extent_entry_next(en_l);
247 en_r = extent_entry_next(en_r);
250 if (en_l < l_ptrs.end || en_r < r_ptrs.end)
255 lp.crc = bch2_extent_crc_unpack(l.k, NULL);
256 rp.crc = bch2_extent_crc_unpack(r.k, NULL);
258 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
259 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
260 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
261 rp.ptr.offset + rp.crc.offset ||
262 lp.ptr.dev != rp.ptr.dev ||
263 lp.ptr.gen != rp.ptr.gen ||
264 lp.ptr.unwritten != rp.ptr.unwritten ||
265 lp.has_ec != rp.has_ec)
268 /* Extents may not straddle buckets: */
269 ca = bch_dev_bkey_exists(c, lp.ptr.dev);
270 if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
273 if (lp.has_ec != rp.has_ec ||
275 (lp.ec.block != rp.ec.block ||
276 lp.ec.redundancy != rp.ec.redundancy ||
277 lp.ec.idx != rp.ec.idx)))
280 if (lp.crc.compression_type != rp.crc.compression_type ||
281 lp.crc.nonce != rp.crc.nonce)
284 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
285 lp.crc.uncompressed_size) {
286 /* can use left extent's crc entry */
287 } else if (lp.crc.live_size <= rp.crc.offset) {
288 /* can use right extent's crc entry */
290 /* check if checksums can be merged: */
291 if (lp.crc.csum_type != rp.crc.csum_type ||
292 lp.crc.nonce != rp.crc.nonce ||
293 crc_is_compressed(lp.crc) ||
294 !bch2_checksum_mergeable(lp.crc.csum_type))
297 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
301 if (lp.crc.csum_type &&
302 lp.crc.uncompressed_size +
303 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
307 en_l = extent_entry_next(en_l);
308 en_r = extent_entry_next(en_r);
313 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
314 if (extent_entry_is_crc(en_l)) {
315 struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
316 struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
318 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
319 bch2_crc_field_size_max[extent_entry_type(en_l)])
323 en_l = extent_entry_next(en_l);
324 en_r = extent_entry_next(en_r);
327 use_right_ptr = false;
330 while (en_l < l_ptrs.end) {
331 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
333 en_l->ptr = en_r->ptr;
335 if (extent_entry_is_crc(en_l)) {
336 struct bch_extent_crc_unpacked crc_l =
337 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
338 struct bch_extent_crc_unpacked crc_r =
339 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
341 use_right_ptr = false;
343 if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
344 crc_l.uncompressed_size) {
345 /* can use left extent's crc entry */
346 } else if (crc_l.live_size <= crc_r.offset) {
347 /* can use right extent's crc entry */
348 crc_r.offset -= crc_l.live_size;
349 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
350 extent_entry_type(en_l));
351 use_right_ptr = true;
353 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
356 crc_r.uncompressed_size << 9);
358 crc_l.uncompressed_size += crc_r.uncompressed_size;
359 crc_l.compressed_size += crc_r.compressed_size;
360 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
361 extent_entry_type(en_l));
365 en_l = extent_entry_next(en_l);
366 en_r = extent_entry_next(en_r);
369 bch2_key_resize(l.k, l.k->size + r.k->size);
373 /* KEY_TYPE_reservation: */
375 int bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k,
376 enum bkey_invalid_flags flags,
377 struct printbuf *err)
379 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
381 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX) {
382 prt_printf(err, "invalid nr_replicas (%u)",
384 return -BCH_ERR_invalid_bkey;
390 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
393 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
395 prt_printf(out, "generation %u replicas %u",
396 le32_to_cpu(r.v->generation),
400 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
402 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
403 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
405 if (l.v->generation != r.v->generation ||
406 l.v->nr_replicas != r.v->nr_replicas)
409 bch2_key_resize(l.k, l.k->size + r.k->size);
413 /* Extent checksum entries: */
415 /* returns true if not equal */
416 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
417 struct bch_extent_crc_unpacked r)
419 return (l.csum_type != r.csum_type ||
420 l.compression_type != r.compression_type ||
421 l.compressed_size != r.compressed_size ||
422 l.uncompressed_size != r.uncompressed_size ||
423 l.offset != r.offset ||
424 l.live_size != r.live_size ||
425 l.nonce != r.nonce ||
426 bch2_crc_cmp(l.csum, r.csum));
429 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
430 struct bch_extent_crc_unpacked n)
432 return !crc_is_compressed(u) &&
434 u.uncompressed_size > u.live_size &&
435 bch2_csum_type_is_encryption(u.csum_type) ==
436 bch2_csum_type_is_encryption(n.csum_type);
439 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
440 struct bch_extent_crc_unpacked n)
442 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
443 struct bch_extent_crc_unpacked crc;
444 const union bch_extent_entry *i;
449 bkey_for_each_crc(k.k, ptrs, crc, i)
450 if (can_narrow_crc(crc, n))
457 * We're writing another replica for this extent, so while we've got the data in
458 * memory we'll be computing a new checksum for the currently live data.
460 * If there are other replicas we aren't moving, and they are checksummed but
461 * not compressed, we can modify them to point to only the data that is
462 * currently live (so that readers won't have to bounce) while we've got the
465 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
467 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
468 struct bch_extent_crc_unpacked u;
469 struct extent_ptr_decoded p;
470 union bch_extent_entry *i;
473 /* Find a checksum entry that covers only live data: */
475 bkey_for_each_crc(&k->k, ptrs, u, i)
476 if (!crc_is_compressed(u) &&
478 u.live_size == u.uncompressed_size) {
485 BUG_ON(crc_is_compressed(n));
487 BUG_ON(n.live_size != k->k.size);
489 restart_narrow_pointers:
490 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
492 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
493 if (can_narrow_crc(p.crc, n)) {
494 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
495 p.ptr.offset += p.crc.offset;
497 bch2_extent_ptr_decoded_append(k, &p);
499 goto restart_narrow_pointers;
505 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
506 struct bch_extent_crc_unpacked src,
507 enum bch_extent_entry_type type)
509 #define set_common_fields(_dst, _src) \
510 _dst.type = 1 << type; \
511 _dst.csum_type = _src.csum_type, \
512 _dst.compression_type = _src.compression_type, \
513 _dst._compressed_size = _src.compressed_size - 1, \
514 _dst._uncompressed_size = _src.uncompressed_size - 1, \
515 _dst.offset = _src.offset
518 case BCH_EXTENT_ENTRY_crc32:
519 set_common_fields(dst->crc32, src);
520 memcpy(&dst->crc32.csum, &src.csum.lo, sizeof(dst->crc32.csum));
522 case BCH_EXTENT_ENTRY_crc64:
523 set_common_fields(dst->crc64, src);
524 dst->crc64.nonce = src.nonce;
525 dst->crc64.csum_lo = src.csum.lo;
526 dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
528 case BCH_EXTENT_ENTRY_crc128:
529 set_common_fields(dst->crc128, src);
530 dst->crc128.nonce = src.nonce;
531 dst->crc128.csum = src.csum;
536 #undef set_common_fields
539 void bch2_extent_crc_append(struct bkey_i *k,
540 struct bch_extent_crc_unpacked new)
542 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
543 union bch_extent_crc *crc = (void *) ptrs.end;
544 enum bch_extent_entry_type type;
546 if (bch_crc_bytes[new.csum_type] <= 4 &&
547 new.uncompressed_size <= CRC32_SIZE_MAX &&
548 new.nonce <= CRC32_NONCE_MAX)
549 type = BCH_EXTENT_ENTRY_crc32;
550 else if (bch_crc_bytes[new.csum_type] <= 10 &&
551 new.uncompressed_size <= CRC64_SIZE_MAX &&
552 new.nonce <= CRC64_NONCE_MAX)
553 type = BCH_EXTENT_ENTRY_crc64;
554 else if (bch_crc_bytes[new.csum_type] <= 16 &&
555 new.uncompressed_size <= CRC128_SIZE_MAX &&
556 new.nonce <= CRC128_NONCE_MAX)
557 type = BCH_EXTENT_ENTRY_crc128;
561 bch2_extent_crc_pack(crc, new, type);
563 k->k.u64s += extent_entry_u64s(ptrs.end);
565 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
568 /* Generic code for keys with pointers: */
570 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
572 return bch2_bkey_devs(k).nr;
575 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
577 return k.k->type == KEY_TYPE_reservation
578 ? bkey_s_c_to_reservation(k).v->nr_replicas
579 : bch2_bkey_dirty_devs(k).nr;
582 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
586 if (k.k->type == KEY_TYPE_reservation) {
587 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
589 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
590 const union bch_extent_entry *entry;
591 struct extent_ptr_decoded p;
593 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
594 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
600 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
602 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
603 const union bch_extent_entry *entry;
604 struct extent_ptr_decoded p;
607 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
608 if (!p.ptr.cached && crc_is_compressed(p.crc))
609 ret += p.crc.compressed_size;
614 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
616 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
617 const union bch_extent_entry *entry;
618 struct bch_extent_crc_unpacked crc;
620 bkey_for_each_crc(k.k, ptrs, crc, entry)
621 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
626 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
628 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
629 const union bch_extent_entry *entry;
630 struct extent_ptr_decoded p = { 0 };
631 unsigned replicas = 0;
633 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
638 replicas += p.ec.redundancy;
647 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
654 ca = bch_dev_bkey_exists(c, p->ptr.dev);
656 return ca->mi.durability +
662 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
669 ca = bch_dev_bkey_exists(c, p->ptr.dev);
671 if (ca->mi.state == BCH_MEMBER_STATE_failed)
674 return ca->mi.durability +
680 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
682 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
683 const union bch_extent_entry *entry;
684 struct extent_ptr_decoded p;
685 unsigned durability = 0;
687 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
688 durability += bch2_extent_ptr_durability(c, &p);
693 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
695 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
696 const union bch_extent_entry *entry;
697 struct extent_ptr_decoded p;
698 unsigned durability = 0;
700 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
701 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
702 durability += bch2_extent_ptr_durability(c, &p);
707 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
709 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
710 union bch_extent_entry *next = extent_entry_next(entry);
712 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
713 k->k.u64s -= extent_entry_u64s(entry);
716 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
717 struct extent_ptr_decoded *p)
719 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
720 struct bch_extent_crc_unpacked crc =
721 bch2_extent_crc_unpack(&k->k, NULL);
722 union bch_extent_entry *pos;
724 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
729 bkey_for_each_crc(&k->k, ptrs, crc, pos)
730 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
731 pos = extent_entry_next(pos);
735 bch2_extent_crc_append(k, p->crc);
736 pos = bkey_val_end(bkey_i_to_s(k));
738 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
739 __extent_entry_insert(k, pos, to_entry(&p->ptr));
742 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
743 __extent_entry_insert(k, pos, to_entry(&p->ec));
747 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
748 union bch_extent_entry *entry)
750 union bch_extent_entry *i = ptrs.start;
755 while (extent_entry_next(i) != entry)
756 i = extent_entry_next(i);
760 static void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
762 union bch_extent_entry *next = extent_entry_next(entry);
764 /* stripes have ptrs, but their layout doesn't work with this code */
765 BUG_ON(k.k->type == KEY_TYPE_stripe);
767 memmove_u64s_down(entry, next,
768 (u64 *) bkey_val_end(k) - (u64 *) next);
769 k.k->u64s -= (u64 *) next - (u64 *) entry;
773 * Returns pointer to the next entry after the one being dropped:
775 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
776 struct bch_extent_ptr *ptr)
778 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
779 union bch_extent_entry *entry = to_entry(ptr), *next;
780 union bch_extent_entry *ret = entry;
781 bool drop_crc = true;
783 EBUG_ON(ptr < &ptrs.start->ptr ||
784 ptr >= &ptrs.end->ptr);
785 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
787 for (next = extent_entry_next(entry);
789 next = extent_entry_next(next)) {
790 if (extent_entry_is_crc(next)) {
792 } else if (extent_entry_is_ptr(next)) {
798 extent_entry_drop(k, entry);
800 while ((entry = extent_entry_prev(ptrs, entry))) {
801 if (extent_entry_is_ptr(entry))
804 if ((extent_entry_is_crc(entry) && drop_crc) ||
805 extent_entry_is_stripe_ptr(entry)) {
806 ret = (void *) ret - extent_entry_bytes(entry);
807 extent_entry_drop(k, entry);
814 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
815 struct bch_extent_ptr *ptr)
817 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
818 union bch_extent_entry *ret =
819 bch2_bkey_drop_ptr_noerror(k, ptr);
822 * If we deleted all the dirty pointers and there's still cached
823 * pointers, we could set the cached pointers to dirty if they're not
824 * stale - but to do that correctly we'd need to grab an open_bucket
825 * reference so that we don't race with bucket reuse:
828 !bch2_bkey_dirty_devs(k.s_c).nr) {
829 k.k->type = KEY_TYPE_error;
830 set_bkey_val_u64s(k.k, 0);
832 } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
833 k.k->type = KEY_TYPE_deleted;
834 set_bkey_val_u64s(k.k, 0);
841 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
843 struct bch_extent_ptr *ptr;
845 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
848 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
850 struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
853 bch2_bkey_drop_ptr_noerror(k, ptr);
856 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
858 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
859 const struct bch_extent_ptr *ptr;
861 bkey_for_each_ptr(ptrs, ptr)
868 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
870 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
871 const struct bch_extent_ptr *ptr;
873 bkey_for_each_ptr(ptrs, ptr)
874 if (bch2_dev_in_target(c, ptr->dev, target) &&
876 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
882 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
883 struct bch_extent_ptr m, u64 offset)
885 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
886 const union bch_extent_entry *entry;
887 struct extent_ptr_decoded p;
889 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
890 if (p.ptr.dev == m.dev &&
891 p.ptr.gen == m.gen &&
892 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
893 (s64) m.offset - offset)
900 * Returns true if two extents refer to the same data:
902 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
904 if (k1.k->type != k2.k->type)
907 if (bkey_extent_is_direct_data(k1.k)) {
908 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
909 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
910 const union bch_extent_entry *entry1, *entry2;
911 struct extent_ptr_decoded p1, p2;
913 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
916 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
917 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
918 if (p1.ptr.dev == p2.ptr.dev &&
919 p1.ptr.gen == p2.ptr.gen &&
920 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
921 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
926 /* KEY_TYPE_deleted, etc. */
931 struct bch_extent_ptr *
932 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
934 struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
935 union bch_extent_entry *entry2;
936 struct extent_ptr_decoded p2;
938 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
939 if (p1.ptr.dev == p2.ptr.dev &&
940 p1.ptr.gen == p2.ptr.gen &&
941 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
942 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
948 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
950 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
951 union bch_extent_entry *entry;
952 union bch_extent_entry *ec = NULL;
954 bkey_extent_entry_for_each(ptrs, entry) {
955 if (&entry->ptr == ptr) {
958 extent_entry_drop(k, ec);
962 if (extent_entry_is_stripe_ptr(entry))
964 else if (extent_entry_is_ptr(entry))
972 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
974 * Returns true if @k should be dropped entirely
976 * For existing keys, only called when btree nodes are being rewritten, not when
977 * they're merely being compacted/resorted in memory.
979 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
981 struct bch_extent_ptr *ptr;
983 bch2_bkey_drop_ptrs(k, ptr,
985 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
987 return bkey_deleted(k.k);
990 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
993 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
994 const union bch_extent_entry *entry;
995 struct bch_extent_crc_unpacked crc;
996 const struct bch_extent_ptr *ptr;
997 const struct bch_extent_stripe_ptr *ec;
1002 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
1004 bkey_extent_entry_for_each(ptrs, entry) {
1006 prt_printf(out, " ");
1008 switch (__extent_entry_type(entry)) {
1009 case BCH_EXTENT_ENTRY_ptr:
1010 ptr = entry_to_ptr(entry);
1011 ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
1012 ? bch_dev_bkey_exists(c, ptr->dev)
1016 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
1017 (u64) ptr->offset, ptr->gen,
1018 ptr->cached ? " cached" : "");
1021 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1023 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1024 ptr->dev, b, offset, ptr->gen);
1026 prt_str(out, " cached");
1028 prt_str(out, " unwritten");
1029 if (ca && ptr_stale(ca, ptr))
1030 prt_printf(out, " stale");
1033 case BCH_EXTENT_ENTRY_crc32:
1034 case BCH_EXTENT_ENTRY_crc64:
1035 case BCH_EXTENT_ENTRY_crc128:
1036 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1038 prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
1039 crc.compressed_size,
1040 crc.uncompressed_size,
1041 crc.offset, crc.nonce,
1042 bch2_csum_types[crc.csum_type],
1043 bch2_compression_types[crc.compression_type]);
1045 case BCH_EXTENT_ENTRY_stripe_ptr:
1046 ec = &entry->stripe_ptr;
1048 prt_printf(out, "ec: idx %llu block %u",
1049 (u64) ec->idx, ec->block);
1052 prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1060 static int extent_ptr_invalid(const struct bch_fs *c,
1062 const struct bch_extent_ptr *ptr,
1063 unsigned size_ondisk,
1065 struct printbuf *err)
1067 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1068 const struct bch_extent_ptr *ptr2;
1073 if (!bch2_dev_exists2(c, ptr->dev)) {
1074 prt_printf(err, "pointer to invalid device (%u)", ptr->dev);
1075 return -BCH_ERR_invalid_bkey;
1078 ca = bch_dev_bkey_exists(c, ptr->dev);
1079 bkey_for_each_ptr(ptrs, ptr2)
1080 if (ptr != ptr2 && ptr->dev == ptr2->dev) {
1081 prt_printf(err, "multiple pointers to same device (%u)", ptr->dev);
1082 return -BCH_ERR_invalid_bkey;
1085 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1087 if (bucket >= ca->mi.nbuckets) {
1088 prt_printf(err, "pointer past last bucket (%llu > %llu)",
1089 bucket, ca->mi.nbuckets);
1090 return -BCH_ERR_invalid_bkey;
1093 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket)) {
1094 prt_printf(err, "pointer before first bucket (%llu < %u)",
1095 bucket, ca->mi.first_bucket);
1096 return -BCH_ERR_invalid_bkey;
1099 if (bucket_offset + size_ondisk > ca->mi.bucket_size) {
1100 prt_printf(err, "pointer spans multiple buckets (%u + %u > %u)",
1101 bucket_offset, size_ondisk, ca->mi.bucket_size);
1102 return -BCH_ERR_invalid_bkey;
1108 int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
1109 enum bkey_invalid_flags flags,
1110 struct printbuf *err)
1112 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1113 const union bch_extent_entry *entry;
1114 struct bch_extent_crc_unpacked crc;
1115 unsigned size_ondisk = k.k->size;
1116 unsigned nonce = UINT_MAX;
1117 unsigned nr_ptrs = 0;
1118 bool unwritten = false, have_ec = false, crc_since_last_ptr = false;
1121 if (bkey_is_btree_ptr(k.k))
1122 size_ondisk = btree_sectors(c);
1124 bkey_extent_entry_for_each(ptrs, entry) {
1125 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) {
1126 prt_printf(err, "invalid extent entry type (got %u, max %u)",
1127 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1128 return -BCH_ERR_invalid_bkey;
1131 if (bkey_is_btree_ptr(k.k) &&
1132 !extent_entry_is_ptr(entry)) {
1133 prt_printf(err, "has non ptr field");
1134 return -BCH_ERR_invalid_bkey;
1137 switch (extent_entry_type(entry)) {
1138 case BCH_EXTENT_ENTRY_ptr:
1139 ret = extent_ptr_invalid(c, k, &entry->ptr, size_ondisk,
1144 if (nr_ptrs && unwritten != entry->ptr.unwritten) {
1145 prt_printf(err, "extent with unwritten and written ptrs");
1146 return -BCH_ERR_invalid_bkey;
1149 if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) {
1150 prt_printf(err, "has unwritten ptrs");
1151 return -BCH_ERR_invalid_bkey;
1154 if (entry->ptr.cached && have_ec) {
1155 prt_printf(err, "cached, erasure coded ptr");
1156 return -BCH_ERR_invalid_bkey;
1159 unwritten = entry->ptr.unwritten;
1161 crc_since_last_ptr = false;
1164 case BCH_EXTENT_ENTRY_crc32:
1165 case BCH_EXTENT_ENTRY_crc64:
1166 case BCH_EXTENT_ENTRY_crc128:
1167 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1169 if (crc.offset + crc.live_size >
1170 crc.uncompressed_size) {
1171 prt_printf(err, "checksum offset + key size > uncompressed size");
1172 return -BCH_ERR_invalid_bkey;
1175 size_ondisk = crc.compressed_size;
1177 if (!bch2_checksum_type_valid(c, crc.csum_type)) {
1178 prt_printf(err, "invalid checksum type");
1179 return -BCH_ERR_invalid_bkey;
1182 if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR) {
1183 prt_printf(err, "invalid compression type");
1184 return -BCH_ERR_invalid_bkey;
1187 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1188 if (nonce == UINT_MAX)
1189 nonce = crc.offset + crc.nonce;
1190 else if (nonce != crc.offset + crc.nonce) {
1191 prt_printf(err, "incorrect nonce");
1192 return -BCH_ERR_invalid_bkey;
1196 if (crc_since_last_ptr) {
1197 prt_printf(err, "redundant crc entry");
1198 return -BCH_ERR_invalid_bkey;
1200 crc_since_last_ptr = true;
1202 case BCH_EXTENT_ENTRY_stripe_ptr:
1204 prt_printf(err, "redundant stripe entry");
1205 return -BCH_ERR_invalid_bkey;
1209 case BCH_EXTENT_ENTRY_rebalance:
1215 prt_str(err, "no ptrs");
1216 return -BCH_ERR_invalid_bkey;
1219 if (nr_ptrs >= BCH_BKEY_PTRS_MAX) {
1220 prt_str(err, "too many ptrs");
1221 return -BCH_ERR_invalid_bkey;
1224 if (crc_since_last_ptr) {
1225 prt_printf(err, "redundant crc entry");
1226 return -BCH_ERR_invalid_bkey;
1230 prt_printf(err, "redundant stripe entry");
1231 return -BCH_ERR_invalid_bkey;
1237 void bch2_ptr_swab(struct bkey_s k)
1239 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1240 union bch_extent_entry *entry;
1243 for (d = (u64 *) ptrs.start;
1244 d != (u64 *) ptrs.end;
1248 for (entry = ptrs.start;
1250 entry = extent_entry_next(entry)) {
1251 switch (extent_entry_type(entry)) {
1252 case BCH_EXTENT_ENTRY_ptr:
1254 case BCH_EXTENT_ENTRY_crc32:
1255 entry->crc32.csum = swab32(entry->crc32.csum);
1257 case BCH_EXTENT_ENTRY_crc64:
1258 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1259 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1261 case BCH_EXTENT_ENTRY_crc128:
1262 entry->crc128.csum.hi = (__force __le64)
1263 swab64((__force u64) entry->crc128.csum.hi);
1264 entry->crc128.csum.lo = (__force __le64)
1265 swab64((__force u64) entry->crc128.csum.lo);
1267 case BCH_EXTENT_ENTRY_stripe_ptr:
1269 case BCH_EXTENT_ENTRY_rebalance:
1275 /* Generic extent code: */
1277 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1279 unsigned new_val_u64s = bkey_val_u64s(k.k);
1283 if (bkey_le(where, bkey_start_pos(k.k)))
1286 EBUG_ON(bkey_gt(where, k.k->p));
1288 sub = where.offset - bkey_start_offset(k.k);
1293 k.k->type = KEY_TYPE_deleted;
1297 switch (k.k->type) {
1298 case KEY_TYPE_extent:
1299 case KEY_TYPE_reflink_v: {
1300 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1301 union bch_extent_entry *entry;
1302 bool seen_crc = false;
1304 bkey_extent_entry_for_each(ptrs, entry) {
1305 switch (extent_entry_type(entry)) {
1306 case BCH_EXTENT_ENTRY_ptr:
1308 entry->ptr.offset += sub;
1310 case BCH_EXTENT_ENTRY_crc32:
1311 entry->crc32.offset += sub;
1313 case BCH_EXTENT_ENTRY_crc64:
1314 entry->crc64.offset += sub;
1316 case BCH_EXTENT_ENTRY_crc128:
1317 entry->crc128.offset += sub;
1319 case BCH_EXTENT_ENTRY_stripe_ptr:
1321 case BCH_EXTENT_ENTRY_rebalance:
1325 if (extent_entry_is_crc(entry))
1331 case KEY_TYPE_reflink_p: {
1332 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1334 le64_add_cpu(&p.v->idx, sub);
1337 case KEY_TYPE_inline_data:
1338 case KEY_TYPE_indirect_inline_data: {
1339 void *p = bkey_inline_data_p(k);
1340 unsigned bytes = bkey_inline_data_bytes(k.k);
1342 sub = min_t(u64, sub << 9, bytes);
1344 memmove(p, p + sub, bytes - sub);
1346 new_val_u64s -= sub >> 3;
1351 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1352 BUG_ON(val_u64s_delta < 0);
1354 set_bkey_val_u64s(k.k, new_val_u64s);
1355 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1356 return -val_u64s_delta;
1359 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1361 unsigned new_val_u64s = bkey_val_u64s(k.k);
1365 if (bkey_ge(where, k.k->p))
1368 EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1370 len = where.offset - bkey_start_offset(k.k);
1372 k.k->p.offset = where.offset;
1376 k.k->type = KEY_TYPE_deleted;
1380 switch (k.k->type) {
1381 case KEY_TYPE_inline_data:
1382 case KEY_TYPE_indirect_inline_data:
1383 new_val_u64s = (bkey_inline_data_offset(k.k) +
1384 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1388 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1389 BUG_ON(val_u64s_delta < 0);
1391 set_bkey_val_u64s(k.k, new_val_u64s);
1392 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1393 return -val_u64s_delta;