1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
13 #include "btree_iter.h"
17 #include "disk_groups.h"
27 #include <trace/events/bcachefs.h>
29 static unsigned bch2_crc_field_size_max[] = {
30 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
31 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
32 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
35 static void bch2_extent_crc_pack(union bch_extent_crc *,
36 struct bch_extent_crc_unpacked,
37 enum bch_extent_entry_type);
39 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
42 struct bch_dev_io_failures *i;
44 for (i = f->devs; i < f->devs + f->nr; i++)
51 void bch2_mark_io_failure(struct bch_io_failures *failed,
52 struct extent_ptr_decoded *p)
54 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
57 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
59 f = &failed->devs[failed->nr++];
64 } else if (p->idx != f->idx) {
74 * returns true if p1 is better than p2:
76 static inline bool ptr_better(struct bch_fs *c,
77 const struct extent_ptr_decoded p1,
78 const struct extent_ptr_decoded p2)
80 if (likely(!p1.idx && !p2.idx)) {
81 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
82 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
84 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
85 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
87 /* Pick at random, biased in favor of the faster device: */
89 return bch2_rand_range(l1 + l2) > l1;
92 if (bch2_force_reconstruct_read)
93 return p1.idx > p2.idx;
95 return p1.idx < p2.idx;
99 * This picks a non-stale pointer, preferably from a device other than @avoid.
100 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
101 * other devices, it will still pick a pointer from avoid.
103 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
104 struct bch_io_failures *failed,
105 struct extent_ptr_decoded *pick)
107 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
108 const union bch_extent_entry *entry;
109 struct extent_ptr_decoded p;
110 struct bch_dev_io_failures *f;
114 if (k.k->type == KEY_TYPE_error)
117 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
118 ca = bch_dev_bkey_exists(c, p.ptr.dev);
121 * If there are any dirty pointers it's an error if we can't
124 if (!ret && !p.ptr.cached)
127 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
130 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
132 p.idx = f->nr_failed < f->nr_retries
137 !bch2_dev_is_readable(ca))
140 if (bch2_force_reconstruct_read &&
144 if (p.idx >= (unsigned) p.has_ec + 1)
147 if (ret > 0 && !ptr_better(c, p, *pick))
157 /* KEY_TYPE_btree_ptr: */
159 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
161 if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX)
162 return "value too big";
164 return bch2_bkey_ptrs_invalid(c, k);
167 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
170 bch2_bkey_ptrs_to_text(out, c, k);
173 const char *bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
175 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
177 if (bkey_val_bytes(k.k) <= sizeof(*bp.v))
178 return "value too small";
180 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
181 return "value too big";
183 if (bp.v->min_key.snapshot)
184 return "invalid min_key.snapshot";
186 return bch2_bkey_ptrs_invalid(c, k);
189 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
192 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
194 pr_buf(out, "seq %llx written %u min_key ",
195 le64_to_cpu(bp.v->seq),
196 le16_to_cpu(bp.v->sectors_written));
198 bch2_bpos_to_text(out, bp.v->min_key);
200 bch2_bkey_ptrs_to_text(out, c, k);
203 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
204 unsigned big_endian, int write,
207 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
209 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
211 if (version < bcachefs_metadata_version_inode_btree_change &&
212 btree_node_type_is_extents(btree_id) &&
213 bkey_cmp(bp.v->min_key, POS_MIN))
214 bp.v->min_key = write
215 ? bkey_predecessor(bp.v->min_key)
216 : bkey_successor(bp.v->min_key);
219 /* KEY_TYPE_extent: */
221 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
223 return bch2_bkey_ptrs_invalid(c, k);
226 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
229 bch2_bkey_ptrs_to_text(out, c, k);
232 enum merge_result bch2_extent_merge(struct bch_fs *c,
233 struct bkey_s _l, struct bkey_s _r)
235 struct bkey_s_extent l = bkey_s_to_extent(_l);
236 struct bkey_s_extent r = bkey_s_to_extent(_r);
237 union bch_extent_entry *en_l = l.v->start;
238 union bch_extent_entry *en_r = r.v->start;
239 struct bch_extent_crc_unpacked crc_l, crc_r;
241 if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
242 return BCH_MERGE_NOMERGE;
244 crc_l = bch2_extent_crc_unpack(l.k, NULL);
246 extent_for_each_entry(l, en_l) {
247 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
249 if (extent_entry_type(en_l) != extent_entry_type(en_r))
250 return BCH_MERGE_NOMERGE;
252 switch (extent_entry_type(en_l)) {
253 case BCH_EXTENT_ENTRY_ptr: {
254 const struct bch_extent_ptr *lp = &en_l->ptr;
255 const struct bch_extent_ptr *rp = &en_r->ptr;
258 if (lp->offset + crc_l.compressed_size != rp->offset ||
259 lp->dev != rp->dev ||
261 return BCH_MERGE_NOMERGE;
263 /* We don't allow extents to straddle buckets: */
264 ca = bch_dev_bkey_exists(c, lp->dev);
266 if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
267 return BCH_MERGE_NOMERGE;
271 case BCH_EXTENT_ENTRY_stripe_ptr:
272 if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
273 en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
274 return BCH_MERGE_NOMERGE;
276 case BCH_EXTENT_ENTRY_crc32:
277 case BCH_EXTENT_ENTRY_crc64:
278 case BCH_EXTENT_ENTRY_crc128:
279 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
280 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
282 if (crc_l.csum_type != crc_r.csum_type ||
283 crc_l.compression_type != crc_r.compression_type ||
284 crc_l.nonce != crc_r.nonce)
285 return BCH_MERGE_NOMERGE;
287 if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
289 return BCH_MERGE_NOMERGE;
291 if (!bch2_checksum_mergeable(crc_l.csum_type))
292 return BCH_MERGE_NOMERGE;
294 if (crc_is_compressed(crc_l))
295 return BCH_MERGE_NOMERGE;
297 if (crc_l.csum_type &&
298 crc_l.uncompressed_size +
299 crc_r.uncompressed_size > c->sb.encoded_extent_max)
300 return BCH_MERGE_NOMERGE;
302 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
303 bch2_crc_field_size_max[extent_entry_type(en_l)])
304 return BCH_MERGE_NOMERGE;
308 return BCH_MERGE_NOMERGE;
312 extent_for_each_entry(l, en_l) {
313 struct bch_extent_crc_unpacked crc_l, crc_r;
315 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
317 if (!extent_entry_is_crc(en_l))
320 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
321 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
323 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
326 crc_r.uncompressed_size << 9);
328 crc_l.uncompressed_size += crc_r.uncompressed_size;
329 crc_l.compressed_size += crc_r.compressed_size;
331 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
332 extent_entry_type(en_l));
335 bch2_key_resize(l.k, l.k->size + r.k->size);
337 return BCH_MERGE_MERGE;
340 /* KEY_TYPE_reservation: */
342 const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
344 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
346 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
347 return "incorrect value size";
349 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
350 return "invalid nr_replicas";
355 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
358 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
360 pr_buf(out, "generation %u replicas %u",
361 le32_to_cpu(r.v->generation),
365 enum merge_result bch2_reservation_merge(struct bch_fs *c,
366 struct bkey_s _l, struct bkey_s _r)
368 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
369 struct bkey_s_reservation r = bkey_s_to_reservation(_r);
371 if (l.v->generation != r.v->generation ||
372 l.v->nr_replicas != r.v->nr_replicas)
373 return BCH_MERGE_NOMERGE;
375 if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
376 bch2_key_resize(l.k, KEY_SIZE_MAX);
377 bch2_cut_front_s(l.k->p, r.s);
378 return BCH_MERGE_PARTIAL;
381 bch2_key_resize(l.k, l.k->size + r.k->size);
383 return BCH_MERGE_MERGE;
386 /* Extent checksum entries: */
388 /* returns true if not equal */
389 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
390 struct bch_extent_crc_unpacked r)
392 return (l.csum_type != r.csum_type ||
393 l.compression_type != r.compression_type ||
394 l.compressed_size != r.compressed_size ||
395 l.uncompressed_size != r.uncompressed_size ||
396 l.offset != r.offset ||
397 l.live_size != r.live_size ||
398 l.nonce != r.nonce ||
399 bch2_crc_cmp(l.csum, r.csum));
402 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
403 struct bch_extent_crc_unpacked n)
405 return !crc_is_compressed(u) &&
407 u.uncompressed_size > u.live_size &&
408 bch2_csum_type_is_encryption(u.csum_type) ==
409 bch2_csum_type_is_encryption(n.csum_type);
412 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
413 struct bch_extent_crc_unpacked n)
415 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
416 struct bch_extent_crc_unpacked crc;
417 const union bch_extent_entry *i;
422 bkey_for_each_crc(k.k, ptrs, crc, i)
423 if (can_narrow_crc(crc, n))
430 * We're writing another replica for this extent, so while we've got the data in
431 * memory we'll be computing a new checksum for the currently live data.
433 * If there are other replicas we aren't moving, and they are checksummed but
434 * not compressed, we can modify them to point to only the data that is
435 * currently live (so that readers won't have to bounce) while we've got the
438 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
440 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
441 struct bch_extent_crc_unpacked u;
442 struct extent_ptr_decoded p;
443 union bch_extent_entry *i;
446 /* Find a checksum entry that covers only live data: */
448 bkey_for_each_crc(&k->k, ptrs, u, i)
449 if (!crc_is_compressed(u) &&
451 u.live_size == u.uncompressed_size) {
458 BUG_ON(crc_is_compressed(n));
460 BUG_ON(n.live_size != k->k.size);
462 restart_narrow_pointers:
463 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
465 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
466 if (can_narrow_crc(p.crc, n)) {
467 bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
468 p.ptr.offset += p.crc.offset;
470 bch2_extent_ptr_decoded_append(k, &p);
472 goto restart_narrow_pointers;
478 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
479 struct bch_extent_crc_unpacked src,
480 enum bch_extent_entry_type type)
482 #define set_common_fields(_dst, _src) \
483 _dst.type = 1 << type; \
484 _dst.csum_type = _src.csum_type, \
485 _dst.compression_type = _src.compression_type, \
486 _dst._compressed_size = _src.compressed_size - 1, \
487 _dst._uncompressed_size = _src.uncompressed_size - 1, \
488 _dst.offset = _src.offset
491 case BCH_EXTENT_ENTRY_crc32:
492 set_common_fields(dst->crc32, src);
493 dst->crc32.csum = *((__le32 *) &src.csum.lo);
495 case BCH_EXTENT_ENTRY_crc64:
496 set_common_fields(dst->crc64, src);
497 dst->crc64.nonce = src.nonce;
498 dst->crc64.csum_lo = src.csum.lo;
499 dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
501 case BCH_EXTENT_ENTRY_crc128:
502 set_common_fields(dst->crc128, src);
503 dst->crc128.nonce = src.nonce;
504 dst->crc128.csum = src.csum;
509 #undef set_common_fields
512 void bch2_extent_crc_append(struct bkey_i *k,
513 struct bch_extent_crc_unpacked new)
515 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
516 union bch_extent_crc *crc = (void *) ptrs.end;
517 enum bch_extent_entry_type type;
519 if (bch_crc_bytes[new.csum_type] <= 4 &&
520 new.uncompressed_size <= CRC32_SIZE_MAX &&
521 new.nonce <= CRC32_NONCE_MAX)
522 type = BCH_EXTENT_ENTRY_crc32;
523 else if (bch_crc_bytes[new.csum_type] <= 10 &&
524 new.uncompressed_size <= CRC64_SIZE_MAX &&
525 new.nonce <= CRC64_NONCE_MAX)
526 type = BCH_EXTENT_ENTRY_crc64;
527 else if (bch_crc_bytes[new.csum_type] <= 16 &&
528 new.uncompressed_size <= CRC128_SIZE_MAX &&
529 new.nonce <= CRC128_NONCE_MAX)
530 type = BCH_EXTENT_ENTRY_crc128;
534 bch2_extent_crc_pack(crc, new, type);
536 k->k.u64s += extent_entry_u64s(ptrs.end);
538 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
541 /* Generic code for keys with pointers: */
543 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
545 return bch2_bkey_devs(k).nr;
548 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
550 return k.k->type == KEY_TYPE_reservation
551 ? bkey_s_c_to_reservation(k).v->nr_replicas
552 : bch2_bkey_dirty_devs(k).nr;
555 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
559 if (k.k->type == KEY_TYPE_reservation) {
560 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
562 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
563 const union bch_extent_entry *entry;
564 struct extent_ptr_decoded p;
566 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
567 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
573 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
575 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
576 const union bch_extent_entry *entry;
577 struct extent_ptr_decoded p;
580 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
581 if (!p.ptr.cached && crc_is_compressed(p.crc))
582 ret += p.crc.compressed_size;
587 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
589 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
590 const union bch_extent_entry *entry;
591 struct bch_extent_crc_unpacked crc;
593 bkey_for_each_crc(k.k, ptrs, crc, entry)
594 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
599 bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
600 unsigned nr_replicas, bool compressed)
602 struct btree_trans trans;
603 struct btree_iter *iter;
604 struct bpos end = pos;
611 bch2_trans_init(&trans, c, 0, 0);
613 for_each_btree_key(&trans, iter, BTREE_ID_extents, pos,
614 BTREE_ITER_SLOTS, k, err) {
615 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
618 if (nr_replicas > bch2_bkey_replicas(c, k) ||
619 (!compressed && bch2_bkey_sectors_compressed(k))) {
624 bch2_trans_iter_put(&trans, iter);
626 bch2_trans_exit(&trans);
631 unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
633 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
634 const union bch_extent_entry *entry;
635 struct extent_ptr_decoded p;
636 unsigned replicas = 0;
638 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
643 replicas += p.ec.redundancy;
652 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
653 struct extent_ptr_decoded p)
655 unsigned durability = 0;
661 ca = bch_dev_bkey_exists(c, p.ptr.dev);
663 if (ca->mi.state != BCH_MEMBER_STATE_failed)
664 durability = max_t(unsigned, durability, ca->mi.durability);
667 durability += p.ec.redundancy;
672 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
674 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
675 const union bch_extent_entry *entry;
676 struct extent_ptr_decoded p;
677 unsigned durability = 0;
679 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
680 durability += bch2_extent_ptr_durability(c, p);
685 void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
687 unsigned nr_desired_replicas)
689 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
690 union bch_extent_entry *entry;
691 struct extent_ptr_decoded p;
692 int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
694 if (target && extra > 0)
695 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
696 int n = bch2_extent_ptr_durability(c, p);
698 if (n && n <= extra &&
699 !bch2_dev_in_target(c, p.ptr.dev, target)) {
700 entry->ptr.cached = true;
706 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
707 int n = bch2_extent_ptr_durability(c, p);
709 if (n && n <= extra) {
710 entry->ptr.cached = true;
716 void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
718 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
719 union bch_extent_entry *next = extent_entry_next(entry);
721 memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
722 k->k.u64s -= extent_entry_u64s(entry);
725 void bch2_bkey_append_ptr(struct bkey_i *k,
726 struct bch_extent_ptr ptr)
728 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
731 case KEY_TYPE_btree_ptr:
732 case KEY_TYPE_btree_ptr_v2:
733 case KEY_TYPE_extent:
734 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
736 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
738 memcpy((void *) &k->v + bkey_val_bytes(&k->k),
748 static inline void __extent_entry_insert(struct bkey_i *k,
749 union bch_extent_entry *dst,
750 union bch_extent_entry *new)
752 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
754 memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
755 dst, (u64 *) end - (u64 *) dst);
756 k->k.u64s += extent_entry_u64s(new);
757 memcpy(dst, new, extent_entry_bytes(new));
760 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
761 struct extent_ptr_decoded *p)
763 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
764 struct bch_extent_crc_unpacked crc =
765 bch2_extent_crc_unpack(&k->k, NULL);
766 union bch_extent_entry *pos;
768 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
773 bkey_for_each_crc(&k->k, ptrs, crc, pos)
774 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
775 pos = extent_entry_next(pos);
779 bch2_extent_crc_append(k, p->crc);
780 pos = bkey_val_end(bkey_i_to_s(k));
782 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
783 __extent_entry_insert(k, pos, to_entry(&p->ptr));
786 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
787 __extent_entry_insert(k, pos, to_entry(&p->ec));
791 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
792 union bch_extent_entry *entry)
794 union bch_extent_entry *i = ptrs.start;
799 while (extent_entry_next(i) != entry)
800 i = extent_entry_next(i);
804 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
805 struct bch_extent_ptr *ptr)
807 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
808 union bch_extent_entry *dst, *src, *prev;
809 bool drop_crc = true;
811 EBUG_ON(ptr < &ptrs.start->ptr ||
812 ptr >= &ptrs.end->ptr);
813 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
815 src = extent_entry_next(to_entry(ptr));
816 if (src != ptrs.end &&
817 !extent_entry_is_crc(src))
821 while ((prev = extent_entry_prev(ptrs, dst))) {
822 if (extent_entry_is_ptr(prev))
825 if (extent_entry_is_crc(prev)) {
834 memmove_u64s_down(dst, src,
835 (u64 *) ptrs.end - (u64 *) src);
836 k.k->u64s -= (u64 *) src - (u64 *) dst;
841 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
843 struct bch_extent_ptr *ptr;
845 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
848 const struct bch_extent_ptr *
849 bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
851 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
852 const struct bch_extent_ptr *ptr;
854 bkey_for_each_ptr(ptrs, ptr)
861 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
863 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
864 const struct bch_extent_ptr *ptr;
866 bkey_for_each_ptr(ptrs, ptr)
867 if (bch2_dev_in_target(c, ptr->dev, target) &&
869 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
875 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
876 struct bch_extent_ptr m, u64 offset)
878 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
879 const union bch_extent_entry *entry;
880 struct extent_ptr_decoded p;
882 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
883 if (p.ptr.dev == m.dev &&
884 p.ptr.gen == m.gen &&
885 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
886 (s64) m.offset - offset)
893 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
895 * Returns true if @k should be dropped entirely
897 * For existing keys, only called when btree nodes are being rewritten, not when
898 * they're merely being compacted/resorted in memory.
900 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
902 struct bch_extent_ptr *ptr;
904 bch2_bkey_drop_ptrs(k, ptr,
906 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
908 /* will only happen if all pointers were cached: */
909 if (!bch2_bkey_nr_ptrs(k.s_c))
910 k.k->type = KEY_TYPE_deleted;
912 return bkey_deleted(k.k);
915 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
918 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
919 const union bch_extent_entry *entry;
920 struct bch_extent_crc_unpacked crc;
921 const struct bch_extent_ptr *ptr;
922 const struct bch_extent_stripe_ptr *ec;
926 bkey_extent_entry_for_each(ptrs, entry) {
930 switch (__extent_entry_type(entry)) {
931 case BCH_EXTENT_ENTRY_ptr:
932 ptr = entry_to_ptr(entry);
933 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
934 ? bch_dev_bkey_exists(c, ptr->dev)
937 pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
938 (u64) ptr->offset, ptr->gen,
939 ptr->cached ? " cached" : "",
940 ca && ptr_stale(ca, ptr)
943 case BCH_EXTENT_ENTRY_crc32:
944 case BCH_EXTENT_ENTRY_crc64:
945 case BCH_EXTENT_ENTRY_crc128:
946 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
948 pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
950 crc.uncompressed_size,
951 crc.offset, crc.nonce,
953 crc.compression_type);
955 case BCH_EXTENT_ENTRY_stripe_ptr:
956 ec = &entry->stripe_ptr;
958 pr_buf(out, "ec: idx %llu block %u",
959 (u64) ec->idx, ec->block);
962 pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
970 static const char *extent_ptr_invalid(const struct bch_fs *c,
972 const struct bch_extent_ptr *ptr,
973 unsigned size_ondisk,
976 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
977 const struct bch_extent_ptr *ptr2;
980 if (!bch2_dev_exists2(c, ptr->dev))
981 return "pointer to invalid device";
983 ca = bch_dev_bkey_exists(c, ptr->dev);
985 return "pointer to invalid device";
987 bkey_for_each_ptr(ptrs, ptr2)
988 if (ptr != ptr2 && ptr->dev == ptr2->dev)
989 return "multiple pointers to same device";
991 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
992 return "offset past end of device";
994 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
995 return "offset before first bucket";
997 if (bucket_remainder(ca, ptr->offset) +
998 size_ondisk > ca->mi.bucket_size)
999 return "spans multiple buckets";
1004 const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
1006 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1007 struct bch_devs_list devs;
1008 const union bch_extent_entry *entry;
1009 struct bch_extent_crc_unpacked crc;
1010 unsigned size_ondisk = k.k->size;
1012 unsigned nonce = UINT_MAX;
1015 if (k.k->type == KEY_TYPE_btree_ptr ||
1016 k.k->type == KEY_TYPE_btree_ptr_v2)
1017 size_ondisk = c->opts.btree_node_size;
1019 bkey_extent_entry_for_each(ptrs, entry) {
1020 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1021 return "invalid extent entry type";
1023 if (k.k->type == KEY_TYPE_btree_ptr &&
1024 !extent_entry_is_ptr(entry))
1025 return "has non ptr field";
1027 switch (extent_entry_type(entry)) {
1028 case BCH_EXTENT_ENTRY_ptr:
1029 reason = extent_ptr_invalid(c, k, &entry->ptr,
1030 size_ondisk, false);
1034 case BCH_EXTENT_ENTRY_crc32:
1035 case BCH_EXTENT_ENTRY_crc64:
1036 case BCH_EXTENT_ENTRY_crc128:
1037 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1039 if (crc.offset + crc.live_size >
1040 crc.uncompressed_size)
1041 return "checksum offset + key size > uncompressed size";
1043 size_ondisk = crc.compressed_size;
1045 if (!bch2_checksum_type_valid(c, crc.csum_type))
1046 return "invalid checksum type";
1048 if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR)
1049 return "invalid compression type";
1051 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1052 if (nonce == UINT_MAX)
1053 nonce = crc.offset + crc.nonce;
1054 else if (nonce != crc.offset + crc.nonce)
1055 return "incorrect nonce";
1058 case BCH_EXTENT_ENTRY_stripe_ptr:
1063 devs = bch2_bkey_devs(k);
1064 bubble_sort(devs.devs, devs.nr, u8_cmp);
1065 for (i = 0; i + 1 < devs.nr; i++)
1066 if (devs.devs[i] == devs.devs[i + 1])
1067 return "multiple ptrs to same device";
1072 void bch2_ptr_swab(struct bkey_s k)
1074 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1075 union bch_extent_entry *entry;
1078 for (d = (u64 *) ptrs.start;
1079 d != (u64 *) ptrs.end;
1083 for (entry = ptrs.start;
1085 entry = extent_entry_next(entry)) {
1086 switch (extent_entry_type(entry)) {
1087 case BCH_EXTENT_ENTRY_ptr:
1089 case BCH_EXTENT_ENTRY_crc32:
1090 entry->crc32.csum = swab32(entry->crc32.csum);
1092 case BCH_EXTENT_ENTRY_crc64:
1093 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1094 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1096 case BCH_EXTENT_ENTRY_crc128:
1097 entry->crc128.csum.hi = (__force __le64)
1098 swab64((__force u64) entry->crc128.csum.hi);
1099 entry->crc128.csum.lo = (__force __le64)
1100 swab64((__force u64) entry->crc128.csum.lo);
1102 case BCH_EXTENT_ENTRY_stripe_ptr:
1108 /* Generic extent code: */
1110 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1112 unsigned new_val_u64s = bkey_val_u64s(k.k);
1116 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
1119 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
1121 sub = where.offset - bkey_start_offset(k.k);
1126 k.k->type = KEY_TYPE_deleted;
1130 switch (k.k->type) {
1131 case KEY_TYPE_extent:
1132 case KEY_TYPE_reflink_v: {
1133 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1134 union bch_extent_entry *entry;
1135 bool seen_crc = false;
1137 bkey_extent_entry_for_each(ptrs, entry) {
1138 switch (extent_entry_type(entry)) {
1139 case BCH_EXTENT_ENTRY_ptr:
1141 entry->ptr.offset += sub;
1143 case BCH_EXTENT_ENTRY_crc32:
1144 entry->crc32.offset += sub;
1146 case BCH_EXTENT_ENTRY_crc64:
1147 entry->crc64.offset += sub;
1149 case BCH_EXTENT_ENTRY_crc128:
1150 entry->crc128.offset += sub;
1152 case BCH_EXTENT_ENTRY_stripe_ptr:
1156 if (extent_entry_is_crc(entry))
1162 case KEY_TYPE_reflink_p: {
1163 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1165 le64_add_cpu(&p.v->idx, sub);
1168 case KEY_TYPE_inline_data:
1169 case KEY_TYPE_indirect_inline_data: {
1170 void *p = bkey_inline_data_p(k);
1171 unsigned bytes = bkey_inline_data_bytes(k.k);
1173 sub = min_t(u64, sub << 9, bytes);
1175 memmove(p, p + sub, bytes - sub);
1177 new_val_u64s -= sub >> 3;
1182 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1183 BUG_ON(val_u64s_delta < 0);
1185 set_bkey_val_u64s(k.k, new_val_u64s);
1186 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1187 return -val_u64s_delta;
1190 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1192 unsigned new_val_u64s = bkey_val_u64s(k.k);
1196 if (bkey_cmp(where, k.k->p) >= 0)
1199 EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
1201 len = where.offset - bkey_start_offset(k.k);
1203 k.k->p.offset = where.offset;
1207 k.k->type = KEY_TYPE_deleted;
1211 switch (k.k->type) {
1212 case KEY_TYPE_inline_data:
1213 case KEY_TYPE_indirect_inline_data:
1214 new_val_u64s = (bkey_inline_data_offset(k.k) +
1215 min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
1219 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1220 BUG_ON(val_u64s_delta < 0);
1222 set_bkey_val_u64s(k.k, new_val_u64s);
1223 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1224 return -val_u64s_delta;