1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Code for managing the extent btree and dynamically updating the writeback
10 #include "bkey_methods.h"
13 #include "btree_iter.h"
17 #include "disk_groups.h"
27 #include <trace/events/bcachefs.h>
29 static unsigned bch2_crc_field_size_max[] = {
30 [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
31 [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
32 [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
35 static void bch2_extent_crc_pack(union bch_extent_crc *,
36 struct bch_extent_crc_unpacked,
37 enum bch_extent_entry_type);
39 static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
42 struct bch_dev_io_failures *i;
44 for (i = f->devs; i < f->devs + f->nr; i++)
51 void bch2_mark_io_failure(struct bch_io_failures *failed,
52 struct extent_ptr_decoded *p)
54 struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
57 BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
59 f = &failed->devs[failed->nr++];
64 } else if (p->idx != f->idx) {
74 * returns true if p1 is better than p2:
76 static inline bool ptr_better(struct bch_fs *c,
77 const struct extent_ptr_decoded p1,
78 const struct extent_ptr_decoded p2)
80 if (likely(!p1.idx && !p2.idx)) {
81 struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
82 struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
84 u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
85 u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
87 /* Pick at random, biased in favor of the faster device: */
89 return bch2_rand_range(l1 + l2) > l1;
92 if (force_reconstruct_read(c))
93 return p1.idx > p2.idx;
95 return p1.idx < p2.idx;
99 * This picks a non-stale pointer, preferably from a device other than @avoid.
100 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
101 * other devices, it will still pick a pointer from avoid.
103 int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
104 struct bch_io_failures *failed,
105 struct extent_ptr_decoded *pick)
107 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
108 const union bch_extent_entry *entry;
109 struct extent_ptr_decoded p;
110 struct bch_dev_io_failures *f;
114 if (k.k->type == KEY_TYPE_error)
117 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
118 ca = bch_dev_bkey_exists(c, p.ptr.dev);
121 * If there are any dirty pointers it's an error if we can't
124 if (!ret && !p.ptr.cached)
127 if (p.ptr.cached && ptr_stale(ca, &p.ptr))
130 f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
132 p.idx = f->nr_failed < f->nr_retries
137 !bch2_dev_is_readable(ca))
140 if (force_reconstruct_read(c) &&
144 if (p.idx >= (unsigned) p.has_ec + 1)
147 if (ret > 0 && !ptr_better(c, p, *pick))
157 /* KEY_TYPE_btree_ptr: */
159 const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
161 if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
162 return "value too big";
164 return bch2_bkey_ptrs_invalid(c, k);
167 void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
169 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
170 const struct bch_extent_ptr *ptr;
173 struct bucket_mark mark;
176 if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
179 if (!percpu_down_read_trylock(&c->mark_lock))
182 bkey_for_each_ptr(ptrs, ptr) {
183 ca = bch_dev_bkey_exists(c, ptr->dev);
185 mark = ptr_bucket_mark(ca, ptr);
188 if (gen_after(mark.gen, ptr->gen))
191 err = "inconsistent";
192 if (mark.data_type != BCH_DATA_btree ||
193 mark.dirty_sectors < c->opts.btree_node_size)
197 percpu_up_read(&c->mark_lock);
200 bch2_fs_inconsistent(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
201 err, (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
202 PTR_BUCKET_NR(ca, ptr),
203 mark.gen, (unsigned) mark.v.counter);
207 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
210 bch2_bkey_ptrs_to_text(out, c, k);
213 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
216 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
218 pr_buf(out, "seq %llx sectors %u written %u min_key ",
219 le64_to_cpu(bp.v->seq),
220 le16_to_cpu(bp.v->sectors),
221 le16_to_cpu(bp.v->sectors_written));
223 bch2_bpos_to_text(out, bp.v->min_key);
225 bch2_bkey_ptrs_to_text(out, c, k);
228 void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
229 unsigned big_endian, int write,
232 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
234 compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
236 if (version < bcachefs_metadata_version_inode_btree_change &&
237 btree_node_type_is_extents(btree_id) &&
238 bkey_cmp(bp.v->min_key, POS_MIN))
239 bp.v->min_key = write
240 ? bkey_predecessor(bp.v->min_key)
241 : bkey_successor(bp.v->min_key);
244 /* KEY_TYPE_extent: */
246 const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
248 return bch2_bkey_ptrs_invalid(c, k);
251 void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
253 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
254 const union bch_extent_entry *entry;
255 struct extent_ptr_decoded p;
258 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) ||
259 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
262 if (!percpu_down_read_trylock(&c->mark_lock))
265 extent_for_each_ptr_decode(e, p, entry) {
266 struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
267 struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
268 unsigned stale = gen_after(mark.gen, p.ptr.gen);
269 unsigned disk_sectors = ptr_disk_sectors(p);
270 unsigned mark_sectors = p.ptr.cached
271 ? mark.cached_sectors
272 : mark.dirty_sectors;
274 bch2_fs_inconsistent_on(stale && !p.ptr.cached, c,
275 "stale dirty pointer (ptr gen %u bucket %u",
276 p.ptr.gen, mark.gen);
278 bch2_fs_inconsistent_on(stale > 96, c,
279 "key too stale: %i", stale);
281 bch2_fs_inconsistent_on(!stale &&
282 (mark.data_type != BCH_DATA_user ||
283 mark_sectors < disk_sectors), c,
284 "extent pointer not marked: %s:\n"
285 "type %u sectors %u < %u",
286 (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
288 mark_sectors, disk_sectors);
291 percpu_up_read(&c->mark_lock);
294 void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
297 bch2_bkey_ptrs_to_text(out, c, k);
300 enum merge_result bch2_extent_merge(struct bch_fs *c,
301 struct bkey_s _l, struct bkey_s _r)
303 struct bkey_s_extent l = bkey_s_to_extent(_l);
304 struct bkey_s_extent r = bkey_s_to_extent(_r);
305 union bch_extent_entry *en_l = l.v->start;
306 union bch_extent_entry *en_r = r.v->start;
307 struct bch_extent_crc_unpacked crc_l, crc_r;
309 if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
310 return BCH_MERGE_NOMERGE;
312 crc_l = bch2_extent_crc_unpack(l.k, NULL);
314 extent_for_each_entry(l, en_l) {
315 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
317 if (extent_entry_type(en_l) != extent_entry_type(en_r))
318 return BCH_MERGE_NOMERGE;
320 switch (extent_entry_type(en_l)) {
321 case BCH_EXTENT_ENTRY_ptr: {
322 const struct bch_extent_ptr *lp = &en_l->ptr;
323 const struct bch_extent_ptr *rp = &en_r->ptr;
326 if (lp->offset + crc_l.compressed_size != rp->offset ||
327 lp->dev != rp->dev ||
329 return BCH_MERGE_NOMERGE;
331 /* We don't allow extents to straddle buckets: */
332 ca = bch_dev_bkey_exists(c, lp->dev);
334 if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
335 return BCH_MERGE_NOMERGE;
339 case BCH_EXTENT_ENTRY_stripe_ptr:
340 if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
341 en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
342 return BCH_MERGE_NOMERGE;
344 case BCH_EXTENT_ENTRY_crc32:
345 case BCH_EXTENT_ENTRY_crc64:
346 case BCH_EXTENT_ENTRY_crc128:
347 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
348 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
350 if (crc_l.csum_type != crc_r.csum_type ||
351 crc_l.compression_type != crc_r.compression_type ||
352 crc_l.nonce != crc_r.nonce)
353 return BCH_MERGE_NOMERGE;
355 if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
357 return BCH_MERGE_NOMERGE;
359 if (!bch2_checksum_mergeable(crc_l.csum_type))
360 return BCH_MERGE_NOMERGE;
362 if (crc_is_compressed(crc_l))
363 return BCH_MERGE_NOMERGE;
365 if (crc_l.csum_type &&
366 crc_l.uncompressed_size +
367 crc_r.uncompressed_size > c->sb.encoded_extent_max)
368 return BCH_MERGE_NOMERGE;
370 if (crc_l.uncompressed_size + crc_r.uncompressed_size >
371 bch2_crc_field_size_max[extent_entry_type(en_l)])
372 return BCH_MERGE_NOMERGE;
376 return BCH_MERGE_NOMERGE;
380 extent_for_each_entry(l, en_l) {
381 struct bch_extent_crc_unpacked crc_l, crc_r;
383 en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
385 if (!extent_entry_is_crc(en_l))
388 crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
389 crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
391 crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
394 crc_r.uncompressed_size << 9);
396 crc_l.uncompressed_size += crc_r.uncompressed_size;
397 crc_l.compressed_size += crc_r.compressed_size;
399 bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
400 extent_entry_type(en_l));
403 bch2_key_resize(l.k, l.k->size + r.k->size);
405 return BCH_MERGE_MERGE;
408 /* KEY_TYPE_reservation: */
410 const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
412 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
414 if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
415 return "incorrect value size";
417 if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
418 return "invalid nr_replicas";
423 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
426 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
428 pr_buf(out, "generation %u replicas %u",
429 le32_to_cpu(r.v->generation),
433 enum merge_result bch2_reservation_merge(struct bch_fs *c,
434 struct bkey_s _l, struct bkey_s _r)
436 struct bkey_s_reservation l = bkey_s_to_reservation(_l);
437 struct bkey_s_reservation r = bkey_s_to_reservation(_r);
439 if (l.v->generation != r.v->generation ||
440 l.v->nr_replicas != r.v->nr_replicas)
441 return BCH_MERGE_NOMERGE;
443 if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
444 bch2_key_resize(l.k, KEY_SIZE_MAX);
445 bch2_cut_front_s(l.k->p, r.s);
446 return BCH_MERGE_PARTIAL;
449 bch2_key_resize(l.k, l.k->size + r.k->size);
451 return BCH_MERGE_MERGE;
454 /* Extent checksum entries: */
456 /* returns true if not equal */
457 static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
458 struct bch_extent_crc_unpacked r)
460 return (l.csum_type != r.csum_type ||
461 l.compression_type != r.compression_type ||
462 l.compressed_size != r.compressed_size ||
463 l.uncompressed_size != r.uncompressed_size ||
464 l.offset != r.offset ||
465 l.live_size != r.live_size ||
466 l.nonce != r.nonce ||
467 bch2_crc_cmp(l.csum, r.csum));
470 static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
471 struct bch_extent_crc_unpacked n)
473 return !crc_is_compressed(u) &&
475 u.uncompressed_size > u.live_size &&
476 bch2_csum_type_is_encryption(u.csum_type) ==
477 bch2_csum_type_is_encryption(n.csum_type);
480 bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
481 struct bch_extent_crc_unpacked n)
483 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
484 struct bch_extent_crc_unpacked crc;
485 const union bch_extent_entry *i;
490 bkey_for_each_crc(k.k, ptrs, crc, i)
491 if (can_narrow_crc(crc, n))
498 * We're writing another replica for this extent, so while we've got the data in
499 * memory we'll be computing a new checksum for the currently live data.
501 * If there are other replicas we aren't moving, and they are checksummed but
502 * not compressed, we can modify them to point to only the data that is
503 * currently live (so that readers won't have to bounce) while we've got the
506 bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
508 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
509 struct bch_extent_crc_unpacked u;
510 struct extent_ptr_decoded p;
511 union bch_extent_entry *i;
514 /* Find a checksum entry that covers only live data: */
516 bkey_for_each_crc(&k->k, ptrs, u, i)
517 if (!crc_is_compressed(u) &&
519 u.live_size == u.uncompressed_size) {
526 BUG_ON(crc_is_compressed(n));
528 BUG_ON(n.live_size != k->k.size);
530 restart_narrow_pointers:
531 ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
533 bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
534 if (can_narrow_crc(p.crc, n)) {
535 bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
536 p.ptr.offset += p.crc.offset;
538 bch2_extent_ptr_decoded_append(k, &p);
540 goto restart_narrow_pointers;
546 static void bch2_extent_crc_pack(union bch_extent_crc *dst,
547 struct bch_extent_crc_unpacked src,
548 enum bch_extent_entry_type type)
550 #define set_common_fields(_dst, _src) \
551 _dst.type = 1 << type; \
552 _dst.csum_type = _src.csum_type, \
553 _dst.compression_type = _src.compression_type, \
554 _dst._compressed_size = _src.compressed_size - 1, \
555 _dst._uncompressed_size = _src.uncompressed_size - 1, \
556 _dst.offset = _src.offset
559 case BCH_EXTENT_ENTRY_crc32:
560 set_common_fields(dst->crc32, src);
561 dst->crc32.csum = *((__le32 *) &src.csum.lo);
563 case BCH_EXTENT_ENTRY_crc64:
564 set_common_fields(dst->crc64, src);
565 dst->crc64.nonce = src.nonce;
566 dst->crc64.csum_lo = src.csum.lo;
567 dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
569 case BCH_EXTENT_ENTRY_crc128:
570 set_common_fields(dst->crc128, src);
571 dst->crc128.nonce = src.nonce;
572 dst->crc128.csum = src.csum;
577 #undef set_common_fields
580 void bch2_extent_crc_append(struct bkey_i *k,
581 struct bch_extent_crc_unpacked new)
583 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
584 union bch_extent_crc *crc = (void *) ptrs.end;
585 enum bch_extent_entry_type type;
587 if (bch_crc_bytes[new.csum_type] <= 4 &&
588 new.uncompressed_size <= CRC32_SIZE_MAX &&
589 new.nonce <= CRC32_NONCE_MAX)
590 type = BCH_EXTENT_ENTRY_crc32;
591 else if (bch_crc_bytes[new.csum_type] <= 10 &&
592 new.uncompressed_size <= CRC64_SIZE_MAX &&
593 new.nonce <= CRC64_NONCE_MAX)
594 type = BCH_EXTENT_ENTRY_crc64;
595 else if (bch_crc_bytes[new.csum_type] <= 16 &&
596 new.uncompressed_size <= CRC128_SIZE_MAX &&
597 new.nonce <= CRC128_NONCE_MAX)
598 type = BCH_EXTENT_ENTRY_crc128;
602 bch2_extent_crc_pack(crc, new, type);
604 k->k.u64s += extent_entry_u64s(ptrs.end);
606 EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
609 /* Generic code for keys with pointers: */
611 unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
613 return bch2_bkey_devs(k).nr;
616 unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
618 return k.k->type == KEY_TYPE_reservation
619 ? bkey_s_c_to_reservation(k).v->nr_replicas
620 : bch2_bkey_dirty_devs(k).nr;
623 unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
627 if (k.k->type == KEY_TYPE_reservation) {
628 ret = bkey_s_c_to_reservation(k).v->nr_replicas;
630 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
631 const union bch_extent_entry *entry;
632 struct extent_ptr_decoded p;
634 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
635 ret += !p.ptr.cached && !crc_is_compressed(p.crc);
641 unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
643 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
644 const union bch_extent_entry *entry;
645 struct extent_ptr_decoded p;
648 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
649 if (!p.ptr.cached && crc_is_compressed(p.crc))
650 ret += p.crc.compressed_size;
655 bool bch2_bkey_is_incompressible(struct bkey_s_c k)
657 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
658 const union bch_extent_entry *entry;
659 struct bch_extent_crc_unpacked crc;
661 bkey_for_each_crc(k.k, ptrs, crc, entry)
662 if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
667 bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
668 unsigned nr_replicas)
670 struct btree_trans trans;
671 struct btree_iter *iter;
672 struct bpos end = pos;
679 bch2_trans_init(&trans, c, 0, 0);
681 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
682 BTREE_ITER_SLOTS, k, err) {
683 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
686 if (nr_replicas > bch2_bkey_nr_ptrs_fully_allocated(k)) {
691 bch2_trans_exit(&trans);
696 static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
697 struct extent_ptr_decoded p)
699 unsigned durability = 0;
705 ca = bch_dev_bkey_exists(c, p.ptr.dev);
707 if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
708 durability = max_t(unsigned, durability, ca->mi.durability);
712 genradix_ptr(&c->stripes[0], p.ec.idx);
717 durability += s->nr_redundant;
723 unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
725 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
726 const union bch_extent_entry *entry;
727 struct extent_ptr_decoded p;
728 unsigned durability = 0;
730 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
731 durability += bch2_extent_ptr_durability(c, p);
736 void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
738 unsigned nr_desired_replicas)
740 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
741 union bch_extent_entry *entry;
742 struct extent_ptr_decoded p;
743 int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
745 if (target && extra > 0)
746 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
747 int n = bch2_extent_ptr_durability(c, p);
749 if (n && n <= extra &&
750 !bch2_dev_in_target(c, p.ptr.dev, target)) {
751 entry->ptr.cached = true;
757 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
758 int n = bch2_extent_ptr_durability(c, p);
760 if (n && n <= extra) {
761 entry->ptr.cached = true;
767 void bch2_bkey_append_ptr(struct bkey_i *k,
768 struct bch_extent_ptr ptr)
770 EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
773 case KEY_TYPE_btree_ptr:
774 case KEY_TYPE_btree_ptr_v2:
775 case KEY_TYPE_extent:
776 EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
778 ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
780 memcpy((void *) &k->v + bkey_val_bytes(&k->k),
790 static inline void __extent_entry_insert(struct bkey_i *k,
791 union bch_extent_entry *dst,
792 union bch_extent_entry *new)
794 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
796 memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
797 dst, (u64 *) end - (u64 *) dst);
798 k->k.u64s += extent_entry_u64s(new);
799 memcpy(dst, new, extent_entry_bytes(new));
802 void bch2_extent_ptr_decoded_append(struct bkey_i *k,
803 struct extent_ptr_decoded *p)
805 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
806 struct bch_extent_crc_unpacked crc =
807 bch2_extent_crc_unpack(&k->k, NULL);
808 union bch_extent_entry *pos;
810 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
815 bkey_for_each_crc(&k->k, ptrs, crc, pos)
816 if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
817 pos = extent_entry_next(pos);
821 bch2_extent_crc_append(k, p->crc);
822 pos = bkey_val_end(bkey_i_to_s(k));
824 p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
825 __extent_entry_insert(k, pos, to_entry(&p->ptr));
828 p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
829 __extent_entry_insert(k, pos, to_entry(&p->ec));
833 static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
834 union bch_extent_entry *entry)
836 union bch_extent_entry *i = ptrs.start;
841 while (extent_entry_next(i) != entry)
842 i = extent_entry_next(i);
846 union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
847 struct bch_extent_ptr *ptr)
849 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
850 union bch_extent_entry *dst, *src, *prev;
851 bool drop_crc = true;
853 EBUG_ON(ptr < &ptrs.start->ptr ||
854 ptr >= &ptrs.end->ptr);
855 EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
857 src = extent_entry_next(to_entry(ptr));
858 if (src != ptrs.end &&
859 !extent_entry_is_crc(src))
863 while ((prev = extent_entry_prev(ptrs, dst))) {
864 if (extent_entry_is_ptr(prev))
867 if (extent_entry_is_crc(prev)) {
876 memmove_u64s_down(dst, src,
877 (u64 *) ptrs.end - (u64 *) src);
878 k.k->u64s -= (u64 *) src - (u64 *) dst;
883 void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
885 struct bch_extent_ptr *ptr;
887 bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
890 const struct bch_extent_ptr *
891 bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
893 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
894 const struct bch_extent_ptr *ptr;
896 bkey_for_each_ptr(ptrs, ptr)
903 bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
905 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
906 const struct bch_extent_ptr *ptr;
908 bkey_for_each_ptr(ptrs, ptr)
909 if (bch2_dev_in_target(c, ptr->dev, target) &&
911 !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
917 bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
918 struct bch_extent_ptr m, u64 offset)
920 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
921 const union bch_extent_entry *entry;
922 struct extent_ptr_decoded p;
924 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
925 if (p.ptr.dev == m.dev &&
926 p.ptr.gen == m.gen &&
927 (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
928 (s64) m.offset - offset)
935 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
937 * Returns true if @k should be dropped entirely
939 * For existing keys, only called when btree nodes are being rewritten, not when
940 * they're merely being compacted/resorted in memory.
942 bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
944 struct bch_extent_ptr *ptr;
946 bch2_bkey_drop_ptrs(k, ptr,
948 ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
950 /* will only happen if all pointers were cached: */
951 if (!bch2_bkey_nr_ptrs(k.s_c))
952 k.k->type = KEY_TYPE_discard;
954 return bkey_whiteout(k.k);
957 void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
960 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
961 const union bch_extent_entry *entry;
962 struct bch_extent_crc_unpacked crc;
963 const struct bch_extent_ptr *ptr;
964 const struct bch_extent_stripe_ptr *ec;
968 bkey_extent_entry_for_each(ptrs, entry) {
972 switch (__extent_entry_type(entry)) {
973 case BCH_EXTENT_ENTRY_ptr:
974 ptr = entry_to_ptr(entry);
975 ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
976 ? bch_dev_bkey_exists(c, ptr->dev)
979 pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
980 (u64) ptr->offset, ptr->gen,
981 ptr->cached ? " cached" : "",
982 ca && ptr_stale(ca, ptr)
985 case BCH_EXTENT_ENTRY_crc32:
986 case BCH_EXTENT_ENTRY_crc64:
987 case BCH_EXTENT_ENTRY_crc128:
988 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
990 pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
992 crc.uncompressed_size,
993 crc.offset, crc.nonce,
995 crc.compression_type);
997 case BCH_EXTENT_ENTRY_stripe_ptr:
998 ec = &entry->stripe_ptr;
1000 pr_buf(out, "ec: idx %llu block %u",
1001 (u64) ec->idx, ec->block);
1004 pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
1012 static const char *extent_ptr_invalid(const struct bch_fs *c,
1014 const struct bch_extent_ptr *ptr,
1015 unsigned size_ondisk,
1018 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1019 const struct bch_extent_ptr *ptr2;
1022 if (!bch2_dev_exists2(c, ptr->dev))
1023 return "pointer to invalid device";
1025 ca = bch_dev_bkey_exists(c, ptr->dev);
1027 return "pointer to invalid device";
1029 bkey_for_each_ptr(ptrs, ptr2)
1030 if (ptr != ptr2 && ptr->dev == ptr2->dev)
1031 return "multiple pointers to same device";
1033 if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
1034 return "offset past end of device";
1036 if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
1037 return "offset before first bucket";
1039 if (bucket_remainder(ca, ptr->offset) +
1040 size_ondisk > ca->mi.bucket_size)
1041 return "spans multiple buckets";
1046 const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
1048 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1049 const union bch_extent_entry *entry;
1050 struct bch_extent_crc_unpacked crc;
1051 unsigned size_ondisk = k.k->size;
1053 unsigned nonce = UINT_MAX;
1055 if (k.k->type == KEY_TYPE_btree_ptr)
1056 size_ondisk = c->opts.btree_node_size;
1057 if (k.k->type == KEY_TYPE_btree_ptr_v2)
1058 size_ondisk = le16_to_cpu(bkey_s_c_to_btree_ptr_v2(k).v->sectors);
1060 bkey_extent_entry_for_each(ptrs, entry) {
1061 if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
1062 return "invalid extent entry type";
1064 if (k.k->type == KEY_TYPE_btree_ptr &&
1065 !extent_entry_is_ptr(entry))
1066 return "has non ptr field";
1068 switch (extent_entry_type(entry)) {
1069 case BCH_EXTENT_ENTRY_ptr:
1070 reason = extent_ptr_invalid(c, k, &entry->ptr,
1071 size_ondisk, false);
1075 case BCH_EXTENT_ENTRY_crc32:
1076 case BCH_EXTENT_ENTRY_crc64:
1077 case BCH_EXTENT_ENTRY_crc128:
1078 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1080 if (crc.offset + crc.live_size >
1081 crc.uncompressed_size)
1082 return "checksum offset + key size > uncompressed size";
1084 size_ondisk = crc.compressed_size;
1086 if (!bch2_checksum_type_valid(c, crc.csum_type))
1087 return "invalid checksum type";
1089 if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR)
1090 return "invalid compression type";
1092 if (bch2_csum_type_is_encryption(crc.csum_type)) {
1093 if (nonce == UINT_MAX)
1094 nonce = crc.offset + crc.nonce;
1095 else if (nonce != crc.offset + crc.nonce)
1096 return "incorrect nonce";
1099 case BCH_EXTENT_ENTRY_stripe_ptr:
1107 void bch2_ptr_swab(struct bkey_s k)
1109 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1110 union bch_extent_entry *entry;
1113 for (d = (u64 *) ptrs.start;
1114 d != (u64 *) ptrs.end;
1118 for (entry = ptrs.start;
1120 entry = extent_entry_next(entry)) {
1121 switch (extent_entry_type(entry)) {
1122 case BCH_EXTENT_ENTRY_ptr:
1124 case BCH_EXTENT_ENTRY_crc32:
1125 entry->crc32.csum = swab32(entry->crc32.csum);
1127 case BCH_EXTENT_ENTRY_crc64:
1128 entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
1129 entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
1131 case BCH_EXTENT_ENTRY_crc128:
1132 entry->crc128.csum.hi = (__force __le64)
1133 swab64((__force u64) entry->crc128.csum.hi);
1134 entry->crc128.csum.lo = (__force __le64)
1135 swab64((__force u64) entry->crc128.csum.lo);
1137 case BCH_EXTENT_ENTRY_stripe_ptr:
1143 /* Generic extent code: */
1145 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1147 unsigned new_val_u64s = bkey_val_u64s(k.k);
1151 if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
1154 EBUG_ON(bkey_cmp(where, k.k->p) > 0);
1156 sub = where.offset - bkey_start_offset(k.k);
1161 k.k->type = KEY_TYPE_deleted;
1165 switch (k.k->type) {
1166 case KEY_TYPE_extent:
1167 case KEY_TYPE_reflink_v: {
1168 struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1169 union bch_extent_entry *entry;
1170 bool seen_crc = false;
1172 bkey_extent_entry_for_each(ptrs, entry) {
1173 switch (extent_entry_type(entry)) {
1174 case BCH_EXTENT_ENTRY_ptr:
1176 entry->ptr.offset += sub;
1178 case BCH_EXTENT_ENTRY_crc32:
1179 entry->crc32.offset += sub;
1181 case BCH_EXTENT_ENTRY_crc64:
1182 entry->crc64.offset += sub;
1184 case BCH_EXTENT_ENTRY_crc128:
1185 entry->crc128.offset += sub;
1187 case BCH_EXTENT_ENTRY_stripe_ptr:
1191 if (extent_entry_is_crc(entry))
1197 case KEY_TYPE_reflink_p: {
1198 struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
1200 le64_add_cpu(&p.v->idx, sub);
1203 case KEY_TYPE_inline_data: {
1204 struct bkey_s_inline_data d = bkey_s_to_inline_data(k);
1206 sub = min_t(u64, sub << 9, bkey_val_bytes(d.k));
1210 bkey_val_bytes(d.k) - sub);
1212 new_val_u64s -= sub >> 3;
1217 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1218 BUG_ON(val_u64s_delta < 0);
1220 set_bkey_val_u64s(k.k, new_val_u64s);
1221 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1222 return -val_u64s_delta;
1225 int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1227 unsigned new_val_u64s = bkey_val_u64s(k.k);
1231 if (bkey_cmp(where, k.k->p) >= 0)
1234 EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
1236 len = where.offset - bkey_start_offset(k.k);
1242 k.k->type = KEY_TYPE_deleted;
1246 switch (k.k->type) {
1247 case KEY_TYPE_inline_data:
1248 new_val_u64s = min(new_val_u64s, k.k->size << 6);
1252 val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
1253 BUG_ON(val_u64s_delta < 0);
1255 set_bkey_val_u64s(k.k, new_val_u64s);
1256 memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
1257 return -val_u64s_delta;