1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
11 #include "btree_cache.h"
14 #include "btree_iter.h"
19 #include "disk_groups.h"
30 static unsigned bch2_crc_field_size_max[] = {
31 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
32 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
33 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
36 static void bch2_extent_crc_pack(union bch_extent_crc *,
37 struct bch_extent_crc_unpacked,
38 enum bch_extent_entry_type);
40 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
43 struct bch_dev_io_failures *i;
45 for (i = f->devs; i < f->devs + f->nr; i++)
52 void bch2_mark_io_failure(struct bch_io_failures *failed,
53 struct extent_ptr_decoded *p)
55 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
58 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
60 f = &failed->devs[failed->nr++];
65 } else if (p->idx != f->idx) {
75 * returns true if p1 is better than p2:
77 static inline bool ptr_better(struct bch_fs *c,
78 const struct extent_ptr_decoded p1,
79 const struct extent_ptr_decoded p2)
81 if (likely(!p1.idx && !p2.idx)) {
82 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
83 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
85 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
86 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
88 /* Pick at random, biased in favor of the faster device: */
90 return bch2_rand_range(l1 + l2) > l1;
93 if (bch2_force_reconstruct_read)
94 return p1.idx > p2.idx;
96 return p1.idx < p2.idx;
100 * This picks a non-stale pointer, preferably from a device other than @avoid.
101 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
102 * other devices, it will still pick a pointer from avoid.
104 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
105 struct bch_io_failures *failed,
106 struct extent_ptr_decoded *pick)
108 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
109 const union bch_extent_entry *entry;
110 struct extent_ptr_decoded p;
111 struct bch_dev_io_failures *f;
115 if (k.k->type == KEY_TYPE_error)
118 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
120 * Unwritten extent: no need to actually read, treat it as a
121 * hole and return 0s:
126 ca = bch_dev_bkey_exists(c, p.ptr.dev);
129 * If there are any dirty pointers it's an error if we can't
132 if (!ret && !p.ptr.cached)
135 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
138 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
140 p.idx = f->nr_failed < f->nr_retries
145 !bch2_dev_is_readable(ca))
148 if (bch2_force_reconstruct_read &&
152 if (p.idx >= (unsigned) p.has_ec + 1)
155 if (ret > 0 && !ptr_better(c, p, *pick))
165 /* KEY_TYPE_btree_ptr: */
167 int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k,
168 enum bkey_invalid_flags flags,
169 struct printbuf *err)
173 bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err,
174 btree_ptr_val_too_big,
175 "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
177 ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
182 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
185 bch2_bkey_ptrs_to_text(out, c, k);
188 int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
189 enum bkey_invalid_flags flags,
190 struct printbuf *err)
194 bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, c, err,
195 btree_ptr_v2_val_too_big,
196 "value too big (%zu > %zu)",
197 bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
199 ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
204 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
207 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
209 prt_printf(out, "seq %llx written %u min_key %s",
210 le64_to_cpu(bp.v->seq),
211 le16_to_cpu(bp.v->sectors_written),
212 BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
214 bch2_bpos_to_text(out, bp.v->min_key);
215 prt_printf(out, " ");
216 bch2_bkey_ptrs_to_text(out, c, k);
219 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
220 unsigned big_endian, int write,
223 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
225 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
227 if (version < bcachefs_metadata_version_inode_btree_change &&
228 btree_id_is_extents(btree_id) &&
229 !bkey_eq(bp.v->min_key, POS_MIN))
230 bp.v->min_key = write
231 ? bpos_nosnap_predecessor(bp.v->min_key)
232 : bpos_nosnap_successor(bp.v->min_key);
235 /* KEY_TYPE_extent: */
237 bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
239 struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
240 struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
241 union bch_extent_entry *en_l;
242 const union bch_extent_entry *en_r;
243 struct extent_ptr_decoded lp, rp;
249 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
250 if (extent_entry_type(en_l) != extent_entry_type(en_r))
253 en_l = extent_entry_next(en_l);
254 en_r = extent_entry_next(en_r);
257 if (en_l < l_ptrs.end || en_r < r_ptrs.end)
262 lp.crc = bch2_extent_crc_unpack(l.k, NULL);
263 rp.crc = bch2_extent_crc_unpack(r.k, NULL);
265 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
266 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
267 if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
268 rp.ptr.offset + rp.crc.offset ||
269 lp.ptr.dev != rp.ptr.dev ||
270 lp.ptr.gen != rp.ptr.gen ||
271 lp.ptr.unwritten != rp.ptr.unwritten ||
272 lp.has_ec != rp.has_ec)
275 /* Extents may not straddle buckets: */
276 ca = bch_dev_bkey_exists(c, lp.ptr.dev);
277 if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
280 if (lp.has_ec != rp.has_ec ||
282 (lp.ec.block != rp.ec.block ||
283 lp.ec.redundancy != rp.ec.redundancy ||
284 lp.ec.idx != rp.ec.idx)))
287 if (lp.crc.compression_type != rp.crc.compression_type ||
288 lp.crc.nonce != rp.crc.nonce)
291 if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
292 lp.crc.uncompressed_size) {
293 /* can use left extent's crc entry */
294 } else if (lp.crc.live_size <= rp.crc.offset) {
295 /* can use right extent's crc entry */
297 /* check if checksums can be merged: */
298 if (lp.crc.csum_type != rp.crc.csum_type ||
299 lp.crc.nonce != rp.crc.nonce ||
300 crc_is_compressed(lp.crc) ||
301 !bch2_checksum_mergeable(lp.crc.csum_type))
304 if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
308 if (lp.crc.csum_type &&
309 lp.crc.uncompressed_size +
310 rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
314 en_l = extent_entry_next(en_l);
315 en_r = extent_entry_next(en_r);
320 while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
321 if (extent_entry_is_crc(en_l)) {
322 struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
323 struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
325 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
326 bch2_crc_field_size_max[extent_entry_type(en_l)])
330 en_l = extent_entry_next(en_l);
331 en_r = extent_entry_next(en_r);
334 use_right_ptr = false;
337 while (en_l < l_ptrs.end) {
338 if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
340 en_l->ptr = en_r->ptr;
342 if (extent_entry_is_crc(en_l)) {
343 struct bch_extent_crc_unpacked crc_l =
344 bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
345 struct bch_extent_crc_unpacked crc_r =
346 bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
348 use_right_ptr = false;
350 if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
351 crc_l.uncompressed_size) {
352 /* can use left extent's crc entry */
353 } else if (crc_l.live_size <= crc_r.offset) {
354 /* can use right extent's crc entry */
355 crc_r.offset -= crc_l.live_size;
356 bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
357 extent_entry_type(en_l));
358 use_right_ptr = true;
360 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
363 crc_r.uncompressed_size << 9);
365 crc_l.uncompressed_size += crc_r.uncompressed_size;
366 crc_l.compressed_size += crc_r.compressed_size;
367 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
368 extent_entry_type(en_l));
372 en_l = extent_entry_next(en_l);
373 en_r = extent_entry_next(en_r);
376 bch2_key_resize(l.k, l.k->size + r.k->size);
380 /* KEY_TYPE_reservation: */
382 int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k,
383 enum bkey_invalid_flags flags,
384 struct printbuf *err)
386 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
389 bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err,
390 reservation_key_nr_replicas_invalid,
391 "invalid nr_replicas (%u)", r.v->nr_replicas);
396 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
399 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
401 prt_printf(out, "generation %u replicas %u",
402 le32_to_cpu(r.v->generation),
406 bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
408 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
409 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
411 if (l.v->generation != r.v->generation ||
412 l.v->nr_replicas != r.v->nr_replicas)
415 bch2_key_resize(l.k, l.k->size + r.k->size);
419 /* Extent checksum entries: */
421 /* returns true if not equal */
422 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
423 struct bch_extent_crc_unpacked r)
425 return (l.csum_type != r.csum_type ||
426 l.compression_type != r.compression_type ||
427 l.compressed_size != r.compressed_size ||
428 l.uncompressed_size != r.uncompressed_size ||
429 l.offset != r.offset ||
430 l.live_size != r.live_size ||
431 l.nonce != r.nonce ||
432 bch2_crc_cmp(l.csum, r.csum));
435 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
436 struct bch_extent_crc_unpacked n)
438 return !crc_is_compressed(u) &&
440 u.uncompressed_size > u.live_size &&
441 bch2_csum_type_is_encryption(u.csum_type) ==
442 bch2_csum_type_is_encryption(n.csum_type);
445 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
446 struct bch_extent_crc_unpacked n)
448 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
449 struct bch_extent_crc_unpacked crc;
450 const union bch_extent_entry *i;
455 bkey_for_each_crc(k.k, ptrs, crc, i)
456 if (can_narrow_crc(crc, n))
463 * We're writing another replica for this extent, so while we've got the data in
464 * memory we'll be computing a new checksum for the currently live data.
466 * If there are other replicas we aren't moving, and they are checksummed but
467 * not compressed, we can modify them to point to only the data that is
468 * currently live (so that readers won't have to bounce) while we've got the
471 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
473 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
474 struct bch_extent_crc_unpacked u;
475 struct extent_ptr_decoded p;
476 union bch_extent_entry *i;
479 /* Find a checksum entry that covers only live data: */
481 bkey_for_each_crc(&k->k, ptrs, u, i)
482 if (!crc_is_compressed(u) &&
484 u.live_size == u.uncompressed_size) {
491 BUG_ON(crc_is_compressed(n));
493 BUG_ON(n.live_size != k->k.size);
495 restart_narrow_pointers:
496 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
498 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
499 if (can_narrow_crc(p.crc, n)) {
500 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
501 p.ptr.offset += p.crc.offset;
503 bch2_extent_ptr_decoded_append(k, &p);
505 goto restart_narrow_pointers;
511 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
512 struct bch_extent_crc_unpacked src,
513 enum bch_extent_entry_type type)
515 #define set_common_fields(_dst, _src) \
516 _dst.type = 1 << type; \
517 _dst.csum_type = _src.csum_type, \
518 _dst.compression_type = _src.compression_type, \
519 _dst._compressed_size = _src.compressed_size - 1, \
520 _dst._uncompressed_size = _src.uncompressed_size - 1, \
521 _dst.offset = _src.offset
524 case BCH_EXTENT_ENTRY_crc32:
525 set_common_fields(dst->crc32, src);
526 dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
528 case BCH_EXTENT_ENTRY_crc64:
529 set_common_fields(dst->crc64, src);
530 dst->crc64.nonce = src.nonce;
531 dst->crc64.csum_lo = (u64 __force) src.csum.lo;
532 dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
534 case BCH_EXTENT_ENTRY_crc128:
535 set_common_fields(dst->crc128, src);
536 dst->crc128.nonce = src.nonce;
537 dst->crc128.csum = src.csum;
542 #undef set_common_fields
545 void bch2_extent_crc_append(struct bkey_i *k,
546 struct bch_extent_crc_unpacked new)
548 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
549 union bch_extent_crc *crc = (void *) ptrs.end;
550 enum bch_extent_entry_type type;
552 if (bch_crc_bytes[new.csum_type] <= 4 &&
553 new.uncompressed_size <= CRC32_SIZE_MAX &&
554 new.nonce <= CRC32_NONCE_MAX)
555 type = BCH_EXTENT_ENTRY_crc32;
556 else if (bch_crc_bytes[new.csum_type] <= 10 &&
557 new.uncompressed_size <= CRC64_SIZE_MAX &&
558 new.nonce <= CRC64_NONCE_MAX)
559 type = BCH_EXTENT_ENTRY_crc64;
560 else if (bch_crc_bytes[new.csum_type] <= 16 &&
561 new.uncompressed_size <= CRC128_SIZE_MAX &&
562 new.nonce <= CRC128_NONCE_MAX)
563 type = BCH_EXTENT_ENTRY_crc128;
567 bch2_extent_crc_pack(crc, new, type);
569 k->k.u64s += extent_entry_u64s(ptrs.end);
571 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
574 /* Generic code for keys with pointers: */
576 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
578 return bch2_bkey_devs(k).nr;
581 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
583 return k.k->type == KEY_TYPE_reservation
584 ? bkey_s_c_to_reservation(k).v->nr_replicas
585 : bch2_bkey_dirty_devs(k).nr;
588 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
592 if (k.k->type == KEY_TYPE_reservation) {
593 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
595 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
596 const union bch_extent_entry *entry;
597 struct extent_ptr_decoded p;
599 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
600 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
606 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
608 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
609 const union bch_extent_entry *entry;
610 struct extent_ptr_decoded p;
613 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
614 if (!p.ptr.cached && crc_is_compressed(p.crc))
615 ret += p.crc.compressed_size;
620 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
622 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
623 const union bch_extent_entry *entry;
624 struct bch_extent_crc_unpacked crc;
626 bkey_for_each_crc(k.k, ptrs, crc, entry)
627 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
632 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
634 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
635 const union bch_extent_entry *entry;
636 struct extent_ptr_decoded p = { 0 };
637 unsigned replicas = 0;
639 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
644 replicas += p.ec.redundancy;
653 static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
659 ? p->ec.redundancy + 1
663 unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
665 struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
667 return __extent_ptr_durability(ca, p);
670 unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
672 struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
674 if (ca->mi.state == BCH_MEMBER_STATE_failed)
677 return __extent_ptr_durability(ca, p);
680 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
682 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
683 const union bch_extent_entry *entry;
684 struct extent_ptr_decoded p;
685 unsigned durability = 0;
687 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
688 durability += bch2_extent_ptr_durability(c, &p);
693 static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
695 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
696 const union bch_extent_entry *entry;
697 struct extent_ptr_decoded p;
698 unsigned durability = 0;
700 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
701 if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
702 durability += bch2_extent_ptr_durability(c, &p);
707 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
709 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
710 union bch_extent_entry *next = extent_entry_next(entry);
712 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
713 k->k.u64s -= extent_entry_u64s(entry);
716 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
717 struct extent_ptr_decoded *p)
719 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
720 struct bch_extent_crc_unpacked crc =
721 bch2_extent_crc_unpack(&k->k, NULL);
722 union bch_extent_entry *pos;
724 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
729 bkey_for_each_crc(&k->k, ptrs, crc, pos)
730 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
731 pos = extent_entry_next(pos);
735 bch2_extent_crc_append(k, p->crc);
736 pos = bkey_val_end(bkey_i_to_s(k));
738 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
739 __extent_entry_insert(k, pos, to_entry(&p->ptr));
742 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
743 __extent_entry_insert(k, pos, to_entry(&p->ec));
747 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
748 union bch_extent_entry *entry)
750 union bch_extent_entry *i = ptrs.start;
755 while (extent_entry_next(i) != entry)
756 i = extent_entry_next(i);
761 * Returns pointer to the next entry after the one being dropped:
763 union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
764 struct bch_extent_ptr *ptr)
766 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
767 union bch_extent_entry *entry = to_entry(ptr), *next;
768 union bch_extent_entry *ret = entry;
769 bool drop_crc = true;
771 EBUG_ON(ptr < &ptrs.start->ptr ||
772 ptr >= &ptrs.end->ptr);
773 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
775 for (next = extent_entry_next(entry);
777 next = extent_entry_next(next)) {
778 if (extent_entry_is_crc(next)) {
780 } else if (extent_entry_is_ptr(next)) {
786 extent_entry_drop(k, entry);
788 while ((entry = extent_entry_prev(ptrs, entry))) {
789 if (extent_entry_is_ptr(entry))
792 if ((extent_entry_is_crc(entry) && drop_crc) ||
793 extent_entry_is_stripe_ptr(entry)) {
794 ret = (void *) ret - extent_entry_bytes(entry);
795 extent_entry_drop(k, entry);
802 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
803 struct bch_extent_ptr *ptr)
805 bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
806 union bch_extent_entry *ret =
807 bch2_bkey_drop_ptr_noerror(k, ptr);
810 * If we deleted all the dirty pointers and there's still cached
811 * pointers, we could set the cached pointers to dirty if they're not
812 * stale - but to do that correctly we'd need to grab an open_bucket
813 * reference so that we don't race with bucket reuse:
816 !bch2_bkey_dirty_devs(k.s_c).nr) {
817 k.k->type = KEY_TYPE_error;
818 set_bkey_val_u64s(k.k, 0);
820 } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
821 k.k->type = KEY_TYPE_deleted;
822 set_bkey_val_u64s(k.k, 0);
829 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
831 struct bch_extent_ptr *ptr;
833 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
836 void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
838 struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
841 bch2_bkey_drop_ptr_noerror(k, ptr);
844 const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
846 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
848 bkey_for_each_ptr(ptrs, ptr)
855 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
857 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
859 bkey_for_each_ptr(ptrs, ptr)
860 if (bch2_dev_in_target(c, ptr->dev, target) &&
862 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
868 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
869 struct bch_extent_ptr m, u64 offset)
871 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
872 const union bch_extent_entry *entry;
873 struct extent_ptr_decoded p;
875 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
876 if (p.ptr.dev == m.dev &&
877 p.ptr.gen == m.gen &&
878 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
879 (s64) m.offset - offset)
886 * Returns true if two extents refer to the same data:
888 bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
890 if (k1.k->type != k2.k->type)
893 if (bkey_extent_is_direct_data(k1.k)) {
894 struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
895 struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
896 const union bch_extent_entry *entry1, *entry2;
897 struct extent_ptr_decoded p1, p2;
899 if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
902 bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
903 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
904 if (p1.ptr.dev == p2.ptr.dev &&
905 p1.ptr.gen == p2.ptr.gen &&
906 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
907 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
912 /* KEY_TYPE_deleted, etc. */
917 struct bch_extent_ptr *
918 bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
920 struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
921 union bch_extent_entry *entry2;
922 struct extent_ptr_decoded p2;
924 bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
925 if (p1.ptr.dev == p2.ptr.dev &&
926 p1.ptr.gen == p2.ptr.gen &&
927 (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
928 (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
934 void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
936 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
937 union bch_extent_entry *entry;
938 union bch_extent_entry *ec = NULL;
940 bkey_extent_entry_for_each(ptrs, entry) {
941 if (&entry->ptr == ptr) {
944 extent_entry_drop(k, ec);
948 if (extent_entry_is_stripe_ptr(entry))
950 else if (extent_entry_is_ptr(entry))
958 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
960 * Returns true if @k should be dropped entirely
962 * For existing keys, only called when btree nodes are being rewritten, not when
963 * they're merely being compacted/resorted in memory.
965 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
967 struct bch_extent_ptr *ptr;
969 bch2_bkey_drop_ptrs(k, ptr,
971 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
973 return bkey_deleted(k.k);
976 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
979 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
980 const union bch_extent_entry *entry;
984 prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
986 bkey_extent_entry_for_each(ptrs, entry) {
988 prt_printf(out, " ");
990 switch (__extent_entry_type(entry)) {
991 case BCH_EXTENT_ENTRY_ptr: {
992 const struct bch_extent_ptr *ptr = entry_to_ptr(entry);
993 struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
994 ? bch_dev_bkey_exists(c, ptr->dev)
998 prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
999 (u64) ptr->offset, ptr->gen,
1000 ptr->cached ? " cached" : "");
1003 u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
1005 prt_printf(out, "ptr: %u:%llu:%u gen %u",
1006 ptr->dev, b, offset, ptr->gen);
1008 prt_str(out, " cached");
1010 prt_str(out, " unwritten");
1011 if (ca && ptr_stale(ca, ptr))
1012 prt_printf(out, " stale");
1016 case BCH_EXTENT_ENTRY_crc32:
1017 case BCH_EXTENT_ENTRY_crc64:
1018 case BCH_EXTENT_ENTRY_crc128: {
1019 struct bch_extent_crc_unpacked crc =
1020 bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1022 prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress ",
1023 crc.compressed_size,
1024 crc.uncompressed_size,
1025 crc.offset, crc.nonce,
1026 bch2_csum_types[crc.csum_type]);
1027 bch2_prt_compression_type(out, crc.compression_type);
1030 case BCH_EXTENT_ENTRY_stripe_ptr: {
1031 const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
1033 prt_printf(out, "ec: idx %llu block %u",
1034 (u64) ec->idx, ec->block);
1037 case BCH_EXTENT_ENTRY_rebalance: {
1038 const struct bch_extent_rebalance *r = &entry->rebalance;
1040 prt_str(out, "rebalance: target ");
1042 bch2_target_to_text(out, c, r->target);
1044 prt_printf(out, "%u", r->target);
1045 prt_str(out, " compression ");
1046 bch2_compression_opt_to_text(out, r->compression);
1050 prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1058 static int extent_ptr_invalid(struct bch_fs *c,
1060 enum bkey_invalid_flags flags,
1061 const struct bch_extent_ptr *ptr,
1062 unsigned size_ondisk,
1064 struct printbuf *err)
1066 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1072 if (!bch2_dev_exists2(c, ptr->dev)) {
1074 * If we're in the write path this key might have already been
1075 * overwritten, and we could be seeing a device that doesn't
1076 * exist anymore due to racing with device removal:
1078 if (flags & BKEY_INVALID_WRITE)
1081 bkey_fsck_err(c, err, ptr_to_invalid_device,
1082 "pointer to invalid device (%u)", ptr->dev);
1085 ca = bch_dev_bkey_exists(c, ptr->dev);
1086 bkey_for_each_ptr(ptrs, ptr2)
1087 bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
1088 ptr_to_duplicate_device,
1089 "multiple pointers to same device (%u)", ptr->dev);
1091 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
1093 bkey_fsck_err_on(bucket >= ca->mi.nbuckets, c, err,
1094 ptr_after_last_bucket,
1095 "pointer past last bucket (%llu > %llu)", bucket, ca->mi.nbuckets);
1096 bkey_fsck_err_on(ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket), c, err,
1097 ptr_before_first_bucket,
1098 "pointer before first bucket (%llu < %u)", bucket, ca->mi.first_bucket);
1099 bkey_fsck_err_on(bucket_offset + size_ondisk > ca->mi.bucket_size, c, err,
1100 ptr_spans_multiple_buckets,
1101 "pointer spans multiple buckets (%u + %u > %u)",
1102 bucket_offset, size_ondisk, ca->mi.bucket_size);
1107 int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
1108 enum bkey_invalid_flags flags,
1109 struct printbuf *err)
1111 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1112 const union bch_extent_entry *entry;
1113 struct bch_extent_crc_unpacked crc;
1114 unsigned size_ondisk = k.k->size;
1115 unsigned nonce = UINT_MAX;
1116 unsigned nr_ptrs = 0;
1117 bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
1120 if (bkey_is_btree_ptr(k.k))
1121 size_ondisk = btree_sectors(c);
1123 bkey_extent_entry_for_each(ptrs, entry) {
1124 bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err,
1125 extent_ptrs_invalid_entry,
1126 "invalid extent entry type (got %u, max %u)",
1127 __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
1129 bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
1130 !extent_entry_is_ptr(entry), c, err,
1131 btree_ptr_has_non_ptr,
1132 "has non ptr field");
1134 switch (extent_entry_type(entry)) {
1135 case BCH_EXTENT_ENTRY_ptr:
1136 ret = extent_ptr_invalid(c, k, flags, &entry->ptr,
1137 size_ondisk, false, err);
1141 bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err,
1142 ptr_cached_and_erasure_coded,
1143 "cached, erasure coded ptr");
1145 if (!entry->ptr.unwritten)
1146 have_written = true;
1148 have_unwritten = true;
1151 crc_since_last_ptr = false;
1154 case BCH_EXTENT_ENTRY_crc32:
1155 case BCH_EXTENT_ENTRY_crc64:
1156 case BCH_EXTENT_ENTRY_crc128:
1157 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1159 bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err,
1160 ptr_crc_uncompressed_size_too_small,
1161 "checksum offset + key size > uncompressed size");
1162 bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err,
1163 ptr_crc_csum_type_unknown,
1164 "invalid checksum type");
1165 bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err,
1166 ptr_crc_compression_type_unknown,
1167 "invalid compression type");
1169 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1170 if (nonce == UINT_MAX)
1171 nonce = crc.offset + crc.nonce;
1172 else if (nonce != crc.offset + crc.nonce)
1173 bkey_fsck_err(c, err, ptr_crc_nonce_mismatch,
1177 bkey_fsck_err_on(crc_since_last_ptr, c, err,
1179 "redundant crc entry");
1180 crc_since_last_ptr = true;
1182 bkey_fsck_err_on(crc_is_encoded(crc) &&
1183 (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
1184 (flags & (BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT)), c, err,
1185 ptr_crc_uncompressed_size_too_big,
1186 "too large encoded extent");
1188 size_ondisk = crc.compressed_size;
1190 case BCH_EXTENT_ENTRY_stripe_ptr:
1191 bkey_fsck_err_on(have_ec, c, err,
1192 ptr_stripe_redundant,
1193 "redundant stripe entry");
1196 case BCH_EXTENT_ENTRY_rebalance: {
1197 const struct bch_extent_rebalance *r = &entry->rebalance;
1199 if (!bch2_compression_opt_valid(r->compression)) {
1200 struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
1201 prt_printf(err, "invalid compression opt %u:%u",
1202 opt.type, opt.level);
1203 return -BCH_ERR_invalid_bkey;
1210 bkey_fsck_err_on(!nr_ptrs, c, err,
1211 extent_ptrs_no_ptrs,
1213 bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err,
1214 extent_ptrs_too_many_ptrs,
1215 "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
1216 bkey_fsck_err_on(have_written && have_unwritten, c, err,
1217 extent_ptrs_written_and_unwritten,
1218 "extent with unwritten and written ptrs");
1219 bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err,
1220 extent_ptrs_unwritten,
1221 "has unwritten ptrs");
1222 bkey_fsck_err_on(crc_since_last_ptr, c, err,
1223 extent_ptrs_redundant_crc,
1224 "redundant crc entry");
1225 bkey_fsck_err_on(have_ec, c, err,
1226 extent_ptrs_redundant_stripe,
1227 "redundant stripe entry");
1232 void bch2_ptr_swab(struct bkey_s k)
1234 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1235 union bch_extent_entry *entry;
1238 for (d = (u64 *) ptrs.start;
1239 d != (u64 *) ptrs.end;
1243 for (entry = ptrs.start;
1245 entry = extent_entry_next(entry)) {
1246 switch (extent_entry_type(entry)) {
1247 case BCH_EXTENT_ENTRY_ptr:
1249 case BCH_EXTENT_ENTRY_crc32:
1250 entry->crc32.csum = swab32(entry->crc32.csum);
1252 case BCH_EXTENT_ENTRY_crc64:
1253 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1254 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1256 case BCH_EXTENT_ENTRY_crc128:
1257 entry->crc128.csum.hi = (__force __le64)
1258 swab64((__force u64) entry->crc128.csum.hi);
1259 entry->crc128.csum.lo = (__force __le64)
1260 swab64((__force u64) entry->crc128.csum.lo);
1262 case BCH_EXTENT_ENTRY_stripe_ptr:
1264 case BCH_EXTENT_ENTRY_rebalance:
1270 const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
1272 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1273 const union bch_extent_entry *entry;
1275 bkey_extent_entry_for_each(ptrs, entry)
1276 if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
1277 return &entry->rebalance;
1282 unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
1283 unsigned target, unsigned compression)
1285 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1286 unsigned rewrite_ptrs = 0;
1289 unsigned compression_type = bch2_compression_opt_to_type(compression);
1290 const union bch_extent_entry *entry;
1291 struct extent_ptr_decoded p;
1294 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
1295 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
1298 goto incompressible;
1301 if (!p.ptr.cached && p.crc.compression_type != compression_type)
1302 rewrite_ptrs |= 1U << i;
1307 if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
1310 bkey_for_each_ptr(ptrs, ptr) {
1311 if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
1312 rewrite_ptrs |= 1U << i;
1317 return rewrite_ptrs;
1320 bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
1322 const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
1325 * If it's an indirect extent, we don't delete the rebalance entry when
1326 * done so that we know what options were applied - check if it still
1330 k.k->type == KEY_TYPE_reflink_v &&
1331 !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
1337 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
1338 struct bch_io_opts *opts)
1340 struct bkey_s k = bkey_i_to_s(_k);
1341 struct bch_extent_rebalance *r;
1342 unsigned target = opts->background_target;
1343 unsigned compression = background_compression(*opts);
1344 bool needs_rebalance;
1346 if (!bkey_extent_is_direct_data(k.k))
1349 /* get existing rebalance entry: */
1350 r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
1352 if (k.k->type == KEY_TYPE_reflink_v) {
1354 * indirect extents: existing options take precedence,
1355 * so that we don't move extents back and forth if
1356 * they're referenced by different inodes with different
1362 compression = r->compression;
1366 r->compression = compression;
1369 needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
1371 if (needs_rebalance && !r) {
1372 union bch_extent_entry *new = bkey_val_end(k);
1374 new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance;
1375 new->rebalance.compression = compression;
1376 new->rebalance.target = target;
1377 new->rebalance.unused = 0;
1378 k.k->u64s += extent_entry_u64s(new);
1379 } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
1381 * For indirect extents, don't delete the rebalance entry when
1382 * we're finished so that we know we specifically moved it or
1383 * compressed it to its current location/compression type
1385 extent_entry_drop(k, (union bch_extent_entry *) r);
1391 /* Generic extent code: */
1393 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1395 unsigned new_val_u64s = bkey_val_u64s(k.k);
1399 if (bkey_le(where, bkey_start_pos(k.k)))
1402 EBUG_ON(bkey_gt(where, k.k->p));
1404 sub = where.offset - bkey_start_offset(k.k);
1409 k.k->type = KEY_TYPE_deleted;
1413 switch (k.k->type) {
1414 case KEY_TYPE_extent:
1415 case KEY_TYPE_reflink_v: {
1416 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1417 union bch_extent_entry *entry;
1418 bool seen_crc = false;
1420 bkey_extent_entry_for_each(ptrs, entry) {
1421 switch (extent_entry_type(entry)) {
1422 case BCH_EXTENT_ENTRY_ptr:
1424 entry->ptr.offset += sub;
1426 case BCH_EXTENT_ENTRY_crc32:
1427 entry->crc32.offset += sub;
1429 case BCH_EXTENT_ENTRY_crc64:
1430 entry->crc64.offset += sub;
1432 case BCH_EXTENT_ENTRY_crc128:
1433 entry->crc128.offset += sub;
1435 case BCH_EXTENT_ENTRY_stripe_ptr:
1437 case BCH_EXTENT_ENTRY_rebalance:
1441 if (extent_entry_is_crc(entry))
1447 case KEY_TYPE_reflink_p: {
1448 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1450 le64_add_cpu(&p.v->idx, sub);
1453 case KEY_TYPE_inline_data:
1454 case KEY_TYPE_indirect_inline_data: {
1455 void *p = bkey_inline_data_p(k);
1456 unsigned bytes = bkey_inline_data_bytes(k.k);
1458 sub = min_t(u64, sub << 9, bytes);
1460 memmove(p, p + sub, bytes - sub);
1462 new_val_u64s -= sub >> 3;
1467 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1468 BUG_ON(val_u64s_delta < 0);
1470 set_bkey_val_u64s(k.k, new_val_u64s);
1471 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1472 return -val_u64s_delta;
1475 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1477 unsigned new_val_u64s = bkey_val_u64s(k.k);
1481 if (bkey_ge(where, k.k->p))
1484 EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
1486 len = where.offset - bkey_start_offset(k.k);
1488 k.k->p.offset = where.offset;
1492 k.k->type = KEY_TYPE_deleted;
1496 switch (k.k->type) {
1497 case KEY_TYPE_inline_data:
1498 case KEY_TYPE_indirect_inline_data:
1499 new_val_u64s = (bkey_inline_data_offset(k.k) +
1500 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1504 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1505 BUG_ON(val_u64s_delta < 0);
1507 set_bkey_val_u64s(k.k, new_val_u64s);
1508 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1509 return -val_u64s_delta;