+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
*
#include "bkey_methods.h"
#include "btree_gc.h"
#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "buckets.h"
#include "checksum.h"
#include "debug.h"
#include "dirent.h"
+#include "disk_groups.h"
#include "error.h"
#include "extents.h"
#include "inode.h"
#include "journal.h"
+#include "replicas.h"
+#include "super.h"
#include "super-io.h"
+#include "util.h"
#include "xattr.h"
#include <trace/events/bcachefs.h>
-static enum merge_result bch2_extent_merge(struct bch_fs *, struct btree *,
- struct bkey_i *, struct bkey_i *);
+unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+ unsigned nr_ptrs = 0;
+
+ bkey_for_each_ptr(p, ptr)
+ nr_ptrs++;
-static void sort_key_next(struct btree_node_iter *iter,
- struct btree *b,
- struct btree_node_iter_set *i)
+ return nr_ptrs;
+}
+
+unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c k)
{
- i->k += __btree_node_offset_to_key(b, i->k)->u64s;
+ unsigned nr_ptrs = 0;
+
+ switch (k.k->type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v: {
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(p, ptr)
+ nr_ptrs += !ptr->cached;
+ BUG_ON(!nr_ptrs);
+ break;
+ }
+ case KEY_TYPE_reservation:
+ nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
+ break;
+ }
+
+ return nr_ptrs;
+}
+
+static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
+ struct extent_ptr_decoded p)
+{
+ unsigned durability = 0;
+ struct bch_dev *ca;
+
+ if (p.ptr.cached)
+ return 0;
+
+ ca = bch_dev_bkey_exists(c, p.ptr.dev);
+
+ if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
+ durability = max_t(unsigned, durability, ca->mi.durability);
+
+ if (p.has_ec) {
+ struct stripe *s =
+ genradix_ptr(&c->stripes[0], p.ec.idx);
+
+ if (WARN_ON(!s))
+ goto out;
+
+ durability = max_t(unsigned, durability, s->nr_redundant);
+ }
+out:
+ return durability;
+}
+
+unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned durability = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ durability += bch2_extent_ptr_durability(c, p);
- if (i->k == i->end)
- *i = iter->data[--iter->used];
+ return durability;
+}
+
+static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
+ unsigned dev)
+{
+ struct bch_dev_io_failures *i;
+
+ for (i = f->devs; i < f->devs + f->nr; i++)
+ if (i->dev == dev)
+ return i;
+
+ return NULL;
+}
+
+void bch2_mark_io_failure(struct bch_io_failures *failed,
+ struct extent_ptr_decoded *p)
+{
+ struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
+
+ if (!f) {
+ BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
+
+ f = &failed->devs[failed->nr++];
+ f->dev = p->ptr.dev;
+ f->idx = p->idx;
+ f->nr_failed = 1;
+ f->nr_retries = 0;
+ } else if (p->idx != f->idx) {
+ f->idx = p->idx;
+ f->nr_failed = 1;
+ f->nr_retries = 0;
+ } else {
+ f->nr_failed++;
+ }
}
/*
- * Returns true if l > r - unless l == r, in which case returns true if l is
- * older than r.
- *
- * Necessary for btree_sort_fixup() - if there are multiple keys that compare
- * equal in different sets, we have to process them newest to oldest.
+ * returns true if p1 is better than p2:
*/
-#define key_sort_cmp(h, l, r) \
-({ \
- bkey_cmp_packed(b, \
- __btree_node_offset_to_key(b, (l).k), \
- __btree_node_offset_to_key(b, (r).k)) \
- \
- ?: (l).k - (r).k; \
-})
-
-static inline bool should_drop_next_key(struct btree_node_iter *iter,
- struct btree *b)
+static inline bool ptr_better(struct bch_fs *c,
+ const struct extent_ptr_decoded p1,
+ const struct extent_ptr_decoded p2)
{
- struct btree_node_iter_set *l = iter->data, *r = iter->data + 1;
- struct bkey_packed *k = __btree_node_offset_to_key(b, l->k);
+ if (likely(!p1.idx && !p2.idx)) {
+ struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
+ struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
- if (bkey_whiteout(k))
- return true;
+ u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
+ u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
- if (iter->used < 2)
- return false;
+ /* Pick at random, biased in favor of the faster device: */
- if (iter->used > 2 &&
- key_sort_cmp(iter, r[0], r[1]) >= 0)
- r++;
+ return bch2_rand_range(l1 + l2) > l1;
+ }
- /*
- * key_sort_cmp() ensures that when keys compare equal the older key
- * comes first; so if l->k compares equal to r->k then l->k is older and
- * should be dropped.
- */
- return !bkey_cmp_packed(b,
- __btree_node_offset_to_key(b, l->k),
- __btree_node_offset_to_key(b, r->k));
+ if (force_reconstruct_read(c))
+ return p1.idx > p2.idx;
+
+ return p1.idx < p2.idx;
}
-struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
- struct btree *b,
- struct btree_node_iter *iter)
+/*
+ * This picks a non-stale pointer, preferably from a device other than @avoid.
+ * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
+ * other devices, it will still pick a pointer from avoid.
+ */
+int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_io_failures *failed,
+ struct extent_ptr_decoded *pick)
{
- struct bkey_packed *out = dst->start;
- struct btree_nr_keys nr;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ struct bch_dev_io_failures *f;
+ struct bch_dev *ca;
+ int ret = 0;
- memset(&nr, 0, sizeof(nr));
+ if (k.k->type == KEY_TYPE_error)
+ return -EIO;
- heap_resort(iter, key_sort_cmp);
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ ca = bch_dev_bkey_exists(c, p.ptr.dev);
- while (!bch2_btree_node_iter_end(iter)) {
- if (!should_drop_next_key(iter, b)) {
- struct bkey_packed *k =
- __btree_node_offset_to_key(b, iter->data->k);
+ /*
+ * If there are any dirty pointers it's an error if we can't
+ * read:
+ */
+ if (!ret && !p.ptr.cached)
+ ret = -EIO;
- bkey_copy(out, k);
- btree_keys_account_key_add(&nr, 0, out);
- out = bkey_next(out);
- }
+ if (p.ptr.cached && ptr_stale(ca, &p.ptr))
+ continue;
+
+ f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
+ if (f)
+ p.idx = f->nr_failed < f->nr_retries
+ ? f->idx
+ : f->idx + 1;
+
+ if (!p.idx &&
+ !bch2_dev_is_readable(ca))
+ p.idx++;
- sort_key_next(iter, b, iter->data);
- heap_sift_down(iter, 0, key_sort_cmp);
+ if (force_reconstruct_read(c) &&
+ !p.idx && p.has_ec)
+ p.idx++;
+
+ if (p.idx >= (unsigned) p.has_ec + 1)
+ continue;
+
+ if (ret > 0 && !ptr_better(c, p, *pick))
+ continue;
+
+ *pick = p;
+ ret = 1;
}
- dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- return nr;
+ return ret;
}
-/* Common among btree and extent ptrs */
+void bch2_bkey_append_ptr(struct bkey_i *k,
+ struct bch_extent_ptr ptr)
+{
+ EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
+
+ switch (k->k.type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_extent:
+ EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
+
+ ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+
+ memcpy((void *) &k->v + bkey_val_bytes(&k->k),
+ &ptr,
+ sizeof(ptr));
+ k->u64s++;
+ break;
+ default:
+ BUG();
+ }
+}
+
+void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
+{
+ struct bch_extent_ptr *ptr;
+
+ bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
+}
const struct bch_extent_ptr *
-bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
+bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
- extent_for_each_ptr(e, ptr)
+ bkey_for_each_ptr(ptrs, ptr)
if (ptr->dev == dev)
return ptr;
return NULL;
}
-unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e)
+bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
- unsigned nr_ptrs = 0;
- extent_for_each_ptr(e, ptr)
- nr_ptrs++;
+ bkey_for_each_ptr(ptrs, ptr)
+ if (bch2_dev_in_target(c, ptr->dev, target) &&
+ (!ptr->cached ||
+ !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
+ return true;
- return nr_ptrs;
+ return false;
}
-unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c k)
+/* extent specific utility code */
+
+const struct bch_extent_ptr *
+bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
{
- struct bkey_s_c_extent e;
const struct bch_extent_ptr *ptr;
- unsigned nr_ptrs = 0;
- switch (k.k->type) {
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED:
- e = bkey_s_c_to_extent(k);
+ extent_for_each_ptr(e, ptr)
+ if (ptr->dev == dev)
+ return ptr;
- extent_for_each_ptr(e, ptr)
- nr_ptrs += !ptr->cached;
- break;
+ return NULL;
+}
- case BCH_RESERVATION:
- nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
- break;
+const struct bch_extent_ptr *
+bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
+{
+ const struct bch_extent_ptr *ptr;
+
+ extent_for_each_ptr(e, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ if (ca->mi.group &&
+ ca->mi.group - 1 == group)
+ return ptr;
}
- return nr_ptrs;
+ return NULL;
}
-/* returns true if equal */
-static bool crc_cmp(union bch_extent_crc *l, union bch_extent_crc *r)
+unsigned bch2_extent_is_compressed(struct bkey_s_c k)
{
- return extent_crc_type(l) == extent_crc_type(r) &&
- !memcmp(l, r, extent_entry_bytes(to_entry(l)));
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned ret = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (!p.ptr.cached &&
+ p.crc.compression_type != BCH_COMPRESSION_NONE)
+ ret += p.crc.compressed_size;
+
+ return ret;
}
-/* Increment pointers after @crc by crc's offset until the next crc entry: */
-void bch2_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc *crc)
+bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_extent_ptr m, u64 offset)
{
- union bch_extent_entry *entry;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
- extent_for_each_entry_from(e, entry, extent_entry_next(to_entry(crc))) {
- if (!extent_entry_is_ptr(entry))
- return;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (p.ptr.dev == m.dev &&
+ p.ptr.gen == m.gen &&
+ (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
+ (s64) m.offset - offset)
+ return true;
- entry->ptr.offset += crc_offset(crc);
- }
+ return false;
}
-/*
- * We're writing another replica for this extent, so while we've got the data in
- * memory we'll be computing a new checksum for the currently live data.
- *
- * If there are other replicas we aren't moving, and they are checksummed but
- * not compressed, we can modify them to point to only the data that is
- * currently live (so that readers won't have to bounce) while we've got the
- * checksum we need:
- *
- * XXX: to guard against data being corrupted while in memory, instead of
- * recomputing the checksum here, it would be better in the read path to instead
- * of computing the checksum of the entire extent:
- *
- * | extent |
- *
- * compute the checksums of the live and dead data separately
- * | dead data || live data || dead data |
- *
- * and then verify that crc_dead1 + crc_live + crc_dead2 == orig_crc, and then
- * use crc_live here (that we verified was correct earlier)
- *
- * note: doesn't work with encryption
- */
-void bch2_extent_narrow_crcs(struct bkey_s_extent e)
+static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
+ union bch_extent_entry *entry)
{
- union bch_extent_crc *crc;
- bool have_wide = false, have_narrow = false;
- struct bch_csum csum = { 0 };
- unsigned csum_type = 0;
-
- extent_for_each_crc(e, crc) {
- if (crc_compression_type(crc) ||
- bch2_csum_type_is_encryption(crc_csum_type(crc)))
- continue;
-
- if (crc_uncompressed_size(e.k, crc) != e.k->size) {
- have_wide = true;
- } else {
- have_narrow = true;
- csum = crc_csum(crc);
- csum_type = crc_csum_type(crc);
- }
- }
-
- if (!have_wide || !have_narrow)
- return;
+ union bch_extent_entry *i = ptrs.start;
- extent_for_each_crc(e, crc) {
- if (crc_compression_type(crc))
- continue;
+ if (i == entry)
+ return NULL;
- if (crc_uncompressed_size(e.k, crc) != e.k->size) {
- switch (extent_crc_type(crc)) {
- case BCH_EXTENT_CRC_NONE:
- BUG();
- case BCH_EXTENT_CRC32:
- if (bch_crc_bytes[csum_type] > 4)
- continue;
-
- bch2_extent_crc_narrow_pointers(e, crc);
- crc->crc32._compressed_size = e.k->size - 1;
- crc->crc32._uncompressed_size = e.k->size - 1;
- crc->crc32.offset = 0;
- crc->crc32.csum_type = csum_type;
- crc->crc32.csum = csum.lo;
- break;
- case BCH_EXTENT_CRC64:
- if (bch_crc_bytes[csum_type] > 10)
- continue;
-
- bch2_extent_crc_narrow_pointers(e, crc);
- crc->crc64._compressed_size = e.k->size - 1;
- crc->crc64._uncompressed_size = e.k->size - 1;
- crc->crc64.offset = 0;
- crc->crc64.csum_type = csum_type;
- crc->crc64.csum_lo = csum.lo;
- crc->crc64.csum_hi = csum.hi;
- break;
- case BCH_EXTENT_CRC128:
- if (bch_crc_bytes[csum_type] > 16)
- continue;
-
- bch2_extent_crc_narrow_pointers(e, crc);
- crc->crc128._compressed_size = e.k->size - 1;
- crc->crc128._uncompressed_size = e.k->size - 1;
- crc->crc128.offset = 0;
- crc->crc128.csum_type = csum_type;
- crc->crc128.csum = csum;
- break;
- }
- }
- }
+ while (extent_entry_next(i) != entry)
+ i = extent_entry_next(i);
+ return i;
}
-void bch2_extent_drop_redundant_crcs(struct bkey_s_extent e)
+union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
+ struct bch_extent_ptr *ptr)
{
- union bch_extent_entry *entry = e.v->start;
- union bch_extent_crc *crc, *prev = NULL;
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *dst, *src, *prev;
+ bool drop_crc = true;
+
+ EBUG_ON(ptr < &ptrs.start->ptr ||
+ ptr >= &ptrs.end->ptr);
+ EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
+
+ src = extent_entry_next(to_entry(ptr));
+ if (src != ptrs.end &&
+ !extent_entry_is_crc(src))
+ drop_crc = false;
+
+ dst = to_entry(ptr);
+ while ((prev = extent_entry_prev(ptrs, dst))) {
+ if (extent_entry_is_ptr(prev))
+ break;
- while (entry != extent_entry_last(e)) {
- union bch_extent_entry *next = extent_entry_next(entry);
- size_t crc_u64s = extent_entry_u64s(entry);
+ if (extent_entry_is_crc(prev)) {
+ if (drop_crc)
+ dst = prev;
+ break;
+ }
- if (!extent_entry_is_crc(entry))
- goto next;
+ dst = prev;
+ }
- crc = entry_to_crc(entry);
+ memmove_u64s_down(dst, src,
+ (u64 *) ptrs.end - (u64 *) src);
+ k.k->u64s -= (u64 *) src - (u64 *) dst;
- if (next == extent_entry_last(e)) {
- /* crc entry with no pointers after it: */
- goto drop;
- }
+ return dst;
+}
- if (extent_entry_is_crc(next)) {
- /* no pointers before next crc entry: */
- goto drop;
- }
+static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
+ struct bch_extent_crc_unpacked n)
+{
+ return !u.compression_type &&
+ u.csum_type &&
+ u.uncompressed_size > u.live_size &&
+ bch2_csum_type_is_encryption(u.csum_type) ==
+ bch2_csum_type_is_encryption(n.csum_type);
+}
- if (prev && crc_cmp(crc, prev)) {
- /* identical to previous crc entry: */
- goto drop;
- }
+bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
+ struct bch_extent_crc_unpacked n)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ struct bch_extent_crc_unpacked crc;
+ const union bch_extent_entry *i;
- if (!prev &&
- !crc_csum_type(crc) &&
- !crc_compression_type(crc)) {
- /* null crc entry: */
- bch2_extent_crc_narrow_pointers(e, crc);
- goto drop;
- }
+ if (!n.csum_type)
+ return false;
- prev = crc;
-next:
- entry = next;
- continue;
-drop:
- memmove_u64s_down(crc, next,
- (u64 *) extent_entry_last(e) - (u64 *) next);
- e.k->u64s -= crc_u64s;
- }
+ bkey_for_each_crc(k.k, ptrs, crc, i)
+ if (can_narrow_crc(crc, n))
+ return true;
- EBUG_ON(bkey_val_u64s(e.k) && !bch2_extent_nr_ptrs(e.c));
+ return false;
}
-static bool should_drop_ptr(const struct bch_fs *c,
- struct bkey_s_c_extent e,
- const struct bch_extent_ptr *ptr)
+/*
+ * We're writing another replica for this extent, so while we've got the data in
+ * memory we'll be computing a new checksum for the currently live data.
+ *
+ * If there are other replicas we aren't moving, and they are checksummed but
+ * not compressed, we can modify them to point to only the data that is
+ * currently live (so that readers won't have to bounce) while we've got the
+ * checksum we need:
+ */
+bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
{
- return ptr->cached && ptr_stale(c->devs[ptr->dev], ptr);
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ struct bch_extent_crc_unpacked u;
+ struct extent_ptr_decoded p;
+ union bch_extent_entry *i;
+ bool ret = false;
+
+ /* Find a checksum entry that covers only live data: */
+ if (!n.csum_type) {
+ bkey_for_each_crc(&k->k, ptrs, u, i)
+ if (!u.compression_type &&
+ u.csum_type &&
+ u.live_size == u.uncompressed_size) {
+ n = u;
+ goto found;
+ }
+ return false;
+ }
+found:
+ BUG_ON(n.compression_type);
+ BUG_ON(n.offset);
+ BUG_ON(n.live_size != k->k.size);
+
+restart_narrow_pointers:
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+
+ bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
+ if (can_narrow_crc(p.crc, n)) {
+ bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
+ p.ptr.offset += p.crc.offset;
+ p.crc = n;
+ bch2_extent_ptr_decoded_append(k, &p);
+ ret = true;
+ goto restart_narrow_pointers;
+ }
+
+ return ret;
}
-static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
+/* returns true if not equal */
+static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
+ struct bch_extent_crc_unpacked r)
{
- struct bch_extent_ptr *ptr = &e.v->start->ptr;
- bool dropped = false;
-
- while ((ptr = extent_ptr_next(e, ptr)))
- if (should_drop_ptr(c, e.c, ptr)) {
- __bch2_extent_drop_ptr(e, ptr);
- dropped = true;
- } else
- ptr++;
-
- if (dropped)
- bch2_extent_drop_redundant_crcs(e);
+ return (l.csum_type != r.csum_type ||
+ l.compression_type != r.compression_type ||
+ l.compressed_size != r.compressed_size ||
+ l.uncompressed_size != r.uncompressed_size ||
+ l.offset != r.offset ||
+ l.live_size != r.live_size ||
+ l.nonce != r.nonce ||
+ bch2_crc_cmp(l.csum, r.csum));
}
-static bool bch2_ptr_normalize(struct bch_fs *c, struct btree *bk,
- struct bkey_s k)
+void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
{
- return bch2_extent_normalize(c, k);
+ union bch_extent_entry *entry;
+ u64 *d = (u64 *) bkeyp_val(f, k);
+ unsigned i;
+
+ for (i = 0; i < bkeyp_val_u64s(f, k); i++)
+ d[i] = swab64(d[i]);
+
+ for (entry = (union bch_extent_entry *) d;
+ entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
+ entry = extent_entry_next(entry)) {
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ entry->crc32.csum = swab32(entry->crc32.csum);
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
+ entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ entry->crc128.csum.hi = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.hi);
+ entry->crc128.csum.lo = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.lo);
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ break;
+ }
+ }
}
-static void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
+void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
{
- switch (k->type) {
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED: {
- union bch_extent_entry *entry;
- u64 *d = (u64 *) bkeyp_val(f, k);
- unsigned i;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct bch_extent_crc_unpacked crc;
+ const struct bch_extent_ptr *ptr;
+ const struct bch_extent_stripe_ptr *ec;
+ struct bch_dev *ca;
+ bool first = true;
- for (i = 0; i < bkeyp_val_u64s(f, k); i++)
- d[i] = swab64(d[i]);
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (!first)
+ pr_buf(out, " ");
- for (entry = (union bch_extent_entry *) d;
- entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
- entry = extent_entry_next(entry)) {
- switch (extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_crc32:
- entry->crc32.csum = swab32(entry->crc32.csum);
- break;
- case BCH_EXTENT_ENTRY_crc64:
- entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
- entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
- break;
- case BCH_EXTENT_ENTRY_crc128:
- entry->crc128.csum.hi = swab64(entry->crc64.csum_hi);
- entry->crc128.csum.lo = swab64(entry->crc64.csum_lo);
- break;
- case BCH_EXTENT_ENTRY_ptr:
- break;
- }
+ switch (__extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ ptr = entry_to_ptr(entry);
+ ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
+ ? bch_dev_bkey_exists(c, ptr->dev)
+ : NULL;
+
+ pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
+ (u64) ptr->offset, ptr->gen,
+ ptr->cached ? " cached" : "",
+ ca && ptr_stale(ca, ptr)
+ ? " stale" : "");
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ case BCH_EXTENT_ENTRY_crc64:
+ case BCH_EXTENT_ENTRY_crc128:
+ crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
+
+ pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
+ crc.compressed_size,
+ crc.uncompressed_size,
+ crc.offset, crc.nonce,
+ crc.csum_type,
+ crc.compression_type);
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ ec = &entry->stripe_ptr;
+
+ pr_buf(out, "ec: idx %llu block %u",
+ (u64) ec->idx, ec->block);
+ break;
+ default:
+ pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
+ return;
}
- break;
- }
+
+ first = false;
}
}
static const char *extent_ptr_invalid(const struct bch_fs *c,
- struct bkey_s_c_extent e,
+ struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
unsigned size_ondisk,
bool metadata)
{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr2;
struct bch_dev *ca;
- if (ptr->dev >= c->sb.nr_devices)
+ if (!bch2_dev_exists2(c, ptr->dev))
return "pointer to invalid device";
- ca = c->devs[ptr->dev];
+ ca = bch_dev_bkey_exists(c, ptr->dev);
if (!ca)
return "pointer to invalid device";
- extent_for_each_ptr(e, ptr2)
+ bkey_for_each_ptr(ptrs, ptr2)
if (ptr != ptr2 && ptr->dev == ptr2->dev)
return "multiple pointers to same device";
- if (ptr->offset + size_ondisk > ca->mi.bucket_size * ca->mi.nbuckets)
+ if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
return "offset past end of device";
- if (ptr->offset < ca->mi.bucket_size * ca->mi.first_bucket)
+ if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
return "offset before first bucket";
- if ((ptr->offset & (ca->mi.bucket_size - 1)) +
+ if (bucket_remainder(ca, ptr->offset) +
size_ondisk > ca->mi.bucket_size)
return "spans multiple buckets";
- if (!(metadata ? ca->mi.has_metadata : ca->mi.has_data))
- return "device not marked as containing data";
-
return NULL;
}
-static size_t extent_print_ptrs(struct bch_fs *c, char *buf,
- size_t size, struct bkey_s_c_extent e)
+const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- char *out = buf, *end = buf + size;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
- const union bch_extent_crc *crc;
- const struct bch_extent_ptr *ptr;
- struct bch_dev *ca;
- bool first = true;
+ struct bch_extent_crc_unpacked crc;
+ unsigned size_ondisk = k.k->size;
+ const char *reason;
+ unsigned nonce = UINT_MAX;
-#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+ if (k.k->type == KEY_TYPE_btree_ptr)
+ size_ondisk = c->opts.btree_node_size;
- extent_for_each_entry(e, entry) {
- if (!first)
- p(" ");
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
+ return "invalid extent entry type";
- switch (__extent_entry_type(entry)) {
+ if (k.k->type == KEY_TYPE_btree_ptr &&
+ !extent_entry_is_ptr(entry))
+ return "has non ptr field";
+
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ reason = extent_ptr_invalid(c, k, &entry->ptr,
+ size_ondisk, false);
+ if (reason)
+ return reason;
+ break;
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128:
- crc = entry_to_crc(entry);
+ crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
- p("crc: c_size %u size %u offset %u csum %u compress %u",
- crc_compressed_size(e.k, crc),
- crc_uncompressed_size(e.k, crc),
- crc_offset(crc), crc_csum_type(crc),
- crc_compression_type(crc));
- break;
- case BCH_EXTENT_ENTRY_ptr:
- ptr = entry_to_ptr(entry);
- ca = c->devs[ptr->dev];
+ if (crc.offset + crc.live_size >
+ crc.uncompressed_size)
+ return "checksum offset + key size > uncompressed size";
- p("ptr: %u:%llu gen %u%s", ptr->dev,
- (u64) ptr->offset, ptr->gen,
- ca && ptr_stale(ca, ptr)
- ? " stale" : "");
+ size_ondisk = crc.compressed_size;
+
+ if (!bch2_checksum_type_valid(c, crc.csum_type))
+ return "invalid checksum type";
+
+ if (crc.compression_type >= BCH_COMPRESSION_NR)
+ return "invalid compression type";
+
+ if (bch2_csum_type_is_encryption(crc.csum_type)) {
+ if (nonce == UINT_MAX)
+ nonce = crc.offset + crc.nonce;
+ else if (nonce != crc.offset + crc.nonce)
+ return "incorrect nonce";
+ }
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
break;
- default:
- p("(invalid extent entry %.16llx)", *((u64 *) entry));
- goto out;
}
-
- first = false;
}
-out:
- if (bkey_extent_is_cached(e.k))
- p(" cached");
-#undef p
- return out - buf;
+
+ return NULL;
}
/* Btree ptrs */
-static const char *bch2_btree_ptr_invalid(const struct bch_fs *c,
- struct bkey_s_c k)
+const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- if (bkey_extent_is_cached(k.k))
- return "cached";
-
- if (k.k->size)
- return "nonzero key size";
-
if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
return "value too big";
- switch (k.k->type) {
- case BCH_EXTENT: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
- const struct bch_extent_ptr *ptr;
- const union bch_extent_crc *crc;
- const char *reason;
-
- extent_for_each_entry(e, entry)
- if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
- return "invalid extent entry type";
-
- extent_for_each_ptr_crc(e, ptr, crc) {
- reason = extent_ptr_invalid(c, e, ptr,
- c->sb.btree_node_size,
- true);
- if (reason)
- return reason;
- }
-
- if (crc)
- return "has crc field";
-
- return NULL;
- }
-
- default:
- return "invalid value type";
- }
+ return bch2_bkey_ptrs_invalid(c, k);
}
-static void btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k)
+void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
{
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
- unsigned seq;
const char *err;
char buf[160];
- struct bucket *g;
+ struct bucket_mark mark;
struct bch_dev *ca;
- unsigned replicas = 0;
- bool bad;
- extent_for_each_ptr(e, ptr) {
- ca = c->devs[ptr->dev];
- g = PTR_BUCKET(ca, ptr);
- replicas++;
+ bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
+ !bch2_bkey_replicas_marked(c, k, false), c,
+ "btree key bad (replicas not marked in superblock):\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
- err = "stale";
- if (ptr_stale(ca, ptr))
- goto err;
-
- do {
- seq = read_seqcount_begin(&c->gc_pos_lock);
- bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
- g->mark.data_type != BUCKET_BTREE;
- } while (read_seqcount_retry(&c->gc_pos_lock, seq));
-
- err = "inconsistent";
- if (bad)
- goto err;
- }
-
- if (replicas < c->sb.meta_replicas_have) {
- bch2_bkey_val_to_text(c, btree_node_type(b),
- buf, sizeof(buf), k);
- bch2_fs_bug(c,
- "btree key bad (too few replicas, %u < %u): %s",
- replicas, c->sb.meta_replicas_have, buf);
+ if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
return;
- }
-
- return;
-err:
- bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k);
- bch2_fs_bug(c, "%s btree pointer %s: bucket %zi "
- "gen %i last_gc %i mark %08x",
- err, buf, PTR_BUCKET_NR(ca, ptr),
- PTR_BUCKET(ca, ptr)->mark.gen,
- ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
- (unsigned) g->mark.counter);
-}
-
-static void bch2_btree_ptr_to_text(struct bch_fs *c, char *buf,
- size_t size, struct bkey_s_c k)
-{
- char *out = buf, *end = buf + size;
- const char *invalid;
-#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+ bkey_for_each_ptr(ptrs, ptr) {
+ ca = bch_dev_bkey_exists(c, ptr->dev);
- if (bkey_extent_is_data(k.k))
- out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
-
- invalid = bch2_btree_ptr_invalid(c, k);
- if (invalid)
- p(" invalid: %s", invalid);
-#undef p
-}
-
-struct extent_pick_ptr
-bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b)
-{
- struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
- const union bch_extent_crc *crc;
- const struct bch_extent_ptr *ptr;
- struct extent_pick_ptr pick = { .ca = NULL };
-
- extent_for_each_ptr_crc(e, ptr, crc) {
- struct bch_dev *ca = c->devs[ptr->dev];
- struct btree *root = btree_node_root(c, b);
-
- if (bch2_fs_inconsistent_on(crc, c,
- "btree node pointer with crc at btree %u level %u/%u bucket %zu",
- b->btree_id, b->level, root ? root->level : -1,
- PTR_BUCKET_NR(ca, ptr)))
- break;
-
- if (ca->mi.state == BCH_MEMBER_STATE_FAILED)
- continue;
-
- if (pick.ca && pick.ca->mi.tier < ca->mi.tier)
- continue;
-
- if (!percpu_ref_tryget(&ca->io_ref))
- continue;
+ mark = ptr_bucket_mark(ca, ptr);
- if (pick.ca)
- percpu_ref_put(&pick.ca->io_ref);
+ err = "stale";
+ if (gen_after(mark.gen, ptr->gen))
+ goto err;
- pick.ca = ca;
- pick.ptr = *ptr;
+ err = "inconsistent";
+ if (mark.data_type != BCH_DATA_BTREE ||
+ mark.dirty_sectors < c->opts.btree_node_size)
+ goto err;
}
- return pick;
+ return;
+err:
+ bch2_bkey_val_to_text(&PBUF(buf), c, k);
+ bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
+ err, buf, PTR_BUCKET_NR(ca, ptr),
+ mark.gen, (unsigned) mark.v.counter);
}
-const struct bkey_ops bch2_bkey_btree_ops = {
- .key_invalid = bch2_btree_ptr_invalid,
- .key_debugcheck = btree_ptr_debugcheck,
- .val_to_text = bch2_btree_ptr_to_text,
- .swab = bch2_ptr_swab,
-};
+void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ bch2_bkey_ptrs_to_text(out, c, k);
+}
/* Extents */
-static bool __bch2_cut_front(struct bpos where, struct bkey_s k)
+void __bch2_cut_front(struct bpos where, struct bkey_s k)
{
- u64 len = 0;
+ u64 sub;
if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
- return false;
+ return;
EBUG_ON(bkey_cmp(where, k.k->p) > 0);
- len = k.k->p.offset - where.offset;
+ sub = where.offset - bkey_start_offset(k.k);
- BUG_ON(len > k.k->size);
+ k.k->size -= sub;
- /*
- * Don't readjust offset if the key size is now 0, because that could
- * cause offset to point to the next bucket:
- */
- if (!len)
- __set_bkey_deleted(k.k);
- else if (bkey_extent_is_data(k.k)) {
- struct bkey_s_extent e = bkey_s_to_extent(k);
- struct bch_extent_ptr *ptr;
- union bch_extent_crc *crc, *prev_crc = NULL;
-
- extent_for_each_ptr_crc(e, ptr, crc) {
- switch (extent_crc_type(crc)) {
- case BCH_EXTENT_CRC_NONE:
- ptr->offset += e.k->size - len;
+ if (!k.k->size)
+ k.k->type = KEY_TYPE_deleted;
+
+ switch (k.k->type) {
+ case KEY_TYPE_deleted:
+ case KEY_TYPE_discard:
+ case KEY_TYPE_error:
+ case KEY_TYPE_cookie:
+ break;
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v: {
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ bool seen_crc = false;
+
+ bkey_extent_entry_for_each(ptrs, entry) {
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ if (!seen_crc)
+ entry->ptr.offset += sub;
break;
- case BCH_EXTENT_CRC32:
- if (prev_crc != crc)
- crc->crc32.offset += e.k->size - len;
+ case BCH_EXTENT_ENTRY_crc32:
+ entry->crc32.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ entry->crc64.offset += sub;
break;
- case BCH_EXTENT_CRC64:
- if (prev_crc != crc)
- crc->crc64.offset += e.k->size - len;
+ case BCH_EXTENT_ENTRY_crc128:
+ entry->crc128.offset += sub;
break;
- case BCH_EXTENT_CRC128:
- if (prev_crc != crc)
- crc->crc128.offset += e.k->size - len;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
break;
}
- prev_crc = crc;
- }
- }
- k.k->size = len;
+ if (extent_entry_is_crc(entry))
+ seen_crc = true;
+ }
- return true;
-}
+ break;
+ }
+ case KEY_TYPE_reflink_p: {
+ struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
-bool bch2_cut_front(struct bpos where, struct bkey_i *k)
-{
- return __bch2_cut_front(where, bkey_i_to_s(k));
+ le64_add_cpu(&p.v->idx, sub);
+ break;
+ }
+ case KEY_TYPE_reservation:
+ break;
+ default:
+ BUG();
+ }
}
bool bch2_cut_back(struct bpos where, struct bkey *k)
len = where.offset - bkey_start_offset(k);
- BUG_ON(len > k->size);
-
k->p = where;
k->size = len;
if (!len)
- __set_bkey_deleted(k);
+ k->type = KEY_TYPE_deleted;
return true;
}
-/**
- * bch_key_resize - adjust size of @k
- *
- * bkey_start_offset(k) will be preserved, modifies where the extent ends
- */
-void bch2_key_resize(struct bkey *k,
- unsigned new_size)
-{
- k->p.offset -= k->size;
- k->p.offset += new_size;
- k->size = new_size;
-}
-
-/*
- * In extent_sort_fix_overlapping(), insert_fixup_extent(),
- * extent_merge_inline() - we're modifying keys in place that are packed. To do
- * that we have to unpack the key, modify the unpacked key - then this
- * copies/repacks the unpacked to the original as necessary.
- */
-static bool __extent_save(struct btree *b, struct btree_node_iter *iter,
- struct bkey_packed *dst, struct bkey *src)
+static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
{
- struct bkey_format *f = &b->format;
- struct bkey_i *dst_unpacked;
- bool ret;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ unsigned ret = 0;
- if ((dst_unpacked = packed_to_bkey(dst))) {
- dst_unpacked->k = *src;
- ret = true;
- } else {
- ret = bch2_bkey_pack_key(dst, src, f);
+ bkey_extent_entry_for_each(ptrs, entry) {
+ switch (__extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ ret++;
+ }
}
- if (ret && iter)
- bch2_verify_key_order(b, iter, dst);
-
return ret;
}
-static void extent_save(struct btree *b, struct btree_node_iter *iter,
- struct bkey_packed *dst, struct bkey *src)
-{
- BUG_ON(!__extent_save(b, iter, dst, src));
-}
-
-/*
- * If keys compare equal, compare by pointer order:
- *
- * Necessary for sort_fix_overlapping() - if there are multiple keys that
- * compare equal in different sets, we have to process them newest to oldest.
- */
-#define extent_sort_cmp(h, l, r) \
-({ \
- struct bkey _ul = bkey_unpack_key(b, \
- __btree_node_offset_to_key(b, (l).k)); \
- struct bkey _ur = bkey_unpack_key(b, \
- __btree_node_offset_to_key(b, (r).k)); \
- \
- bkey_cmp(bkey_start_pos(&_ul), \
- bkey_start_pos(&_ur)) ?: (r).k - (l).k; \
-})
-
-static inline void extent_sort_sift(struct btree_node_iter *iter,
- struct btree *b, size_t i)
+static int count_iters_for_insert(struct btree_trans *trans,
+ struct bkey_s_c k,
+ unsigned offset,
+ struct bpos *end,
+ unsigned *nr_iters,
+ unsigned max_iters,
+ bool overwrite)
{
- heap_sift_down(iter, i, extent_sort_cmp);
-}
+ int ret = 0;
-static inline void extent_sort_next(struct btree_node_iter *iter,
- struct btree *b,
- struct btree_node_iter_set *i)
-{
- sort_key_next(iter, b, i);
- heap_sift_down(iter, i - iter->data, extent_sort_cmp);
-}
+ switch (k.k->type) {
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v:
+ *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
-static void extent_sort_append(struct bch_fs *c,
- struct btree *b,
- struct btree_nr_keys *nr,
- struct bkey_packed *start,
- struct bkey_packed **prev,
- struct bkey_packed *k)
-{
- struct bkey_format *f = &b->format;
- BKEY_PADDED(k) tmp;
+ if (*nr_iters >= max_iters) {
+ *end = bpos_min(*end, k.k->p);
+ ret = 1;
+ }
- if (bkey_whiteout(k))
- return;
+ break;
+ case KEY_TYPE_reflink_p: {
+ struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
+ u64 idx = le64_to_cpu(p.v->idx);
+ unsigned sectors = bpos_min(*end, p.k->p).offset -
+ bkey_start_offset(p.k);
+ struct btree_iter *iter;
+ struct bkey_s_c r_k;
+
+ for_each_btree_key(trans, iter,
+ BTREE_ID_REFLINK, POS(0, idx + offset),
+ BTREE_ITER_SLOTS, r_k, ret) {
+ if (bkey_cmp(bkey_start_pos(r_k.k),
+ POS(0, idx + sectors)) >= 0)
+ break;
- bch2_bkey_unpack(b, &tmp.k, k);
+ *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
- if (*prev &&
- bch2_extent_merge(c, b, (void *) *prev, &tmp.k))
- return;
+ if (*nr_iters >= max_iters) {
+ struct bpos pos = bkey_start_pos(k.k);
+ pos.offset += r_k.k->p.offset - idx;
- if (*prev) {
- bch2_bkey_pack(*prev, (void *) *prev, f);
+ *end = bpos_min(*end, pos);
+ ret = 1;
+ break;
+ }
+ }
- btree_keys_account_key_add(nr, 0, *prev);
- *prev = bkey_next(*prev);
- } else {
- *prev = start;
+ bch2_trans_iter_put(trans, iter);
+ break;
+ }
}
- bkey_copy(*prev, &tmp.k);
+ return ret;
}
-struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
- struct bset *dst,
- struct btree *b,
- struct btree_node_iter *iter)
-{
- struct bkey_format *f = &b->format;
- struct btree_node_iter_set *_l = iter->data, *_r;
- struct bkey_packed *prev = NULL, *out, *lk, *rk;
- struct bkey l_unpacked, r_unpacked;
- struct bkey_s l, r;
- struct btree_nr_keys nr;
-
- memset(&nr, 0, sizeof(nr));
-
- heap_resort(iter, extent_sort_cmp);
-
- while (!bch2_btree_node_iter_end(iter)) {
- lk = __btree_node_offset_to_key(b, _l->k);
-
- if (iter->used == 1) {
- extent_sort_append(c, b, &nr, dst->start, &prev, lk);
- extent_sort_next(iter, b, _l);
- continue;
- }
-
- _r = iter->data + 1;
- if (iter->used > 2 &&
- extent_sort_cmp(iter, _r[0], _r[1]) >= 0)
- _r++;
-
- rk = __btree_node_offset_to_key(b, _r->k);
+#define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3)
- l = __bkey_disassemble(b, lk, &l_unpacked);
- r = __bkey_disassemble(b, rk, &r_unpacked);
-
- /* If current key and next key don't overlap, just append */
- if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
- extent_sort_append(c, b, &nr, dst->start, &prev, lk);
- extent_sort_next(iter, b, _l);
- continue;
- }
+int bch2_extent_atomic_end(struct btree_iter *iter,
+ struct bkey_i *insert,
+ struct bpos *end)
+{
+ struct btree_trans *trans = iter->trans;
+ struct btree *b;
+ struct btree_node_iter node_iter;
+ struct bkey_packed *_k;
+ unsigned nr_iters = 0;
+ int ret;
+
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
- /* Skip 0 size keys */
- if (!r.k->size) {
- extent_sort_next(iter, b, _r);
- continue;
- }
+ b = iter->l[0].b;
+ node_iter = iter->l[0].iter;
- /*
- * overlap: keep the newer key and trim the older key so they
- * don't overlap. comparing pointers tells us which one is
- * newer, since the bsets are appended one after the other.
- */
+ BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
- /* can't happen because of comparison func */
- BUG_ON(_l->k < _r->k &&
- !bkey_cmp(bkey_start_pos(l.k), bkey_start_pos(r.k)));
-
- if (_l->k > _r->k) {
- /* l wins, trim r */
- if (bkey_cmp(l.k->p, r.k->p) >= 0) {
- sort_key_next(iter, b, _r);
- } else {
- __bch2_cut_front(l.k->p, r);
- extent_save(b, NULL, rk, r.k);
- }
+ *end = bpos_min(insert->k.p, b->key.k.p);
- extent_sort_sift(iter, b, _r - iter->data);
- } else if (bkey_cmp(l.k->p, r.k->p) > 0) {
- BKEY_PADDED(k) tmp;
+ ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
+ &nr_iters, EXTENT_ITERS_MAX / 2, false);
+ if (ret < 0)
+ return ret;
- /*
- * r wins, but it overlaps in the middle of l - split l:
- */
- bkey_reassemble(&tmp.k, l.s_c);
- bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
+ while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
+ KEY_TYPE_discard))) {
+ struct bkey unpacked;
+ struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
+ unsigned offset = 0;
- __bch2_cut_front(r.k->p, l);
- extent_save(b, NULL, lk, l.k);
+ if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
+ break;
- extent_sort_sift(iter, b, 0);
+ if (bkey_cmp(bkey_start_pos(&insert->k),
+ bkey_start_pos(k.k)) > 0)
+ offset = bkey_start_offset(&insert->k) -
+ bkey_start_offset(k.k);
- extent_sort_append(c, b, &nr, dst->start, &prev,
- bkey_to_packed(&tmp.k));
- } else {
- bch2_cut_back(bkey_start_pos(r.k), l.k);
- extent_save(b, NULL, lk, l.k);
- }
- }
+ ret = count_iters_for_insert(trans, k, offset, end,
+ &nr_iters, EXTENT_ITERS_MAX, true);
+ if (ret)
+ break;
- if (prev) {
- bch2_bkey_pack(prev, (void *) prev, f);
- btree_keys_account_key_add(&nr, 0, prev);
- out = bkey_next(prev);
- } else {
- out = dst->start;
+ bch2_btree_node_iter_advance(&node_iter, b);
}
- dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
- return nr;
-}
-
-struct extent_insert_state {
- struct btree_insert *trans;
- struct btree_insert_entry *insert;
- struct bpos committed;
- struct bch_fs_usage stats;
-
- /* for deleting: */
- struct bkey_i whiteout;
- bool do_journal;
- bool deleting;
-};
-
-static void bch2_add_sectors(struct extent_insert_state *s,
- struct bkey_s_c k, u64 offset, s64 sectors)
-{
- struct bch_fs *c = s->trans->c;
- struct btree *b = s->insert->iter->nodes[0];
-
- EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
-
- if (!sectors)
- return;
-
- bch2_mark_key(c, k, sectors, false, gc_pos_btree_node(b),
- &s->stats, s->trans->journal_res.seq);
+ return ret < 0 ? ret : 0;
}
-static void bch2_subtract_sectors(struct extent_insert_state *s,
- struct bkey_s_c k, u64 offset, s64 sectors)
+int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
{
- bch2_add_sectors(s, k, offset, -sectors);
-}
+ struct bpos end;
+ int ret;
-/* These wrappers subtract exactly the sectors that we're removing from @k */
-static void bch2_cut_subtract_back(struct extent_insert_state *s,
- struct bpos where, struct bkey_s k)
-{
- bch2_subtract_sectors(s, k.s_c, where.offset,
- k.k->p.offset - where.offset);
- bch2_cut_back(where, k.k);
-}
+ ret = bch2_extent_atomic_end(iter, k, &end);
+ if (ret)
+ return ret;
-static void bch2_cut_subtract_front(struct extent_insert_state *s,
- struct bpos where, struct bkey_s k)
-{
- bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
- where.offset - bkey_start_offset(k.k));
- __bch2_cut_front(where, k);
+ bch2_cut_back(end, &k->k);
+ return 0;
}
-static void bch2_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
+int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
{
- if (k.k->size)
- bch2_subtract_sectors(s, k.s_c,
- bkey_start_offset(k.k), k.k->size);
- k.k->size = 0;
- __set_bkey_deleted(k.k);
-}
-
-static bool bch2_extent_merge_inline(struct bch_fs *,
- struct btree_iter *,
- struct bkey_packed *,
- struct bkey_packed *,
- bool);
+ struct bpos end;
+ int ret;
-#define MAX_LOCK_HOLD_TIME (5 * NSEC_PER_MSEC)
-
-static enum btree_insert_ret
-extent_insert_should_stop(struct extent_insert_state *s)
-{
- struct btree *b = s->insert->iter->nodes[0];
+ ret = bch2_extent_atomic_end(iter, k, &end);
+ if (ret)
+ return ret;
- /*
- * Check if we have sufficient space in both the btree node and the
- * journal reservation:
- *
- * Each insert checks for room in the journal entry, but we check for
- * room in the btree node up-front. In the worst case, bkey_cmpxchg()
- * will insert two keys, and one iteration of this room will insert one
- * key, so we need room for three keys.
- */
- if (!bch2_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s))
- return BTREE_INSERT_BTREE_NODE_FULL;
- else if (!journal_res_insert_fits(s->trans, s->insert))
- return BTREE_INSERT_JOURNAL_RES_FULL; /* XXX worth tracing */
- else
- return BTREE_INSERT_OK;
+ return !bkey_cmp(end, k->k.p);
}
-static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
- struct bkey_i *insert)
+enum btree_insert_ret
+bch2_extent_can_insert(struct btree_trans *trans,
+ struct btree_insert_entry *insert,
+ unsigned *u64s)
{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
- struct bset_tree *t = bset_tree_last(b);
- struct bkey_packed *where =
- bch2_btree_node_iter_bset_pos(node_iter, b, t);
- struct bkey_packed *prev = bch2_bkey_prev(b, t, where);
- struct bkey_packed *next_live_key = where;
- unsigned clobber_u64s;
-
- if (prev)
- where = bkey_next(prev);
-
- while (next_live_key != btree_bkey_last(b, t) &&
- bkey_deleted(next_live_key))
- next_live_key = bkey_next(next_live_key);
+ struct btree_iter_level *l = &insert->iter->l[0];
+ struct btree_node_iter node_iter = l->iter;
+ enum bch_extent_overlap overlap;
+ struct bkey_packed *_k;
+ struct bkey unpacked;
+ struct bkey_s_c k;
+ int sectors;
/*
- * Everything between where and next_live_key is now deleted keys, and
- * is overwritten:
+ * We avoid creating whiteouts whenever possible when deleting, but
+ * those optimizations mean we may potentially insert two whiteouts
+ * instead of one (when we overlap with the front of one extent and the
+ * back of another):
*/
- clobber_u64s = (u64 *) next_live_key - (u64 *) where;
-
- if (prev &&
- bch2_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
- goto drop_deleted_keys;
-
- if (next_live_key != btree_bkey_last(b, t) &&
- bch2_extent_merge_inline(c, iter, bkey_to_packed(insert),
- next_live_key, false))
- goto drop_deleted_keys;
-
- bch2_bset_insert(b, node_iter, where, insert, clobber_u64s);
- bch2_btree_node_iter_fix(iter, b, node_iter, t, where,
- clobber_u64s, where->u64s);
- return;
-drop_deleted_keys:
- bch2_bset_delete(b, where, clobber_u64s);
- bch2_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0);
-}
+ if (bkey_whiteout(&insert->k->k))
+ *u64s += BKEY_U64s;
-static void extent_insert_committed(struct extent_insert_state *s)
-{
- struct bch_fs *c = s->trans->c;
- struct btree_iter *iter = s->insert->iter;
- struct bkey_i *insert = !s->deleting
- ? s->insert->k
- : &s->whiteout;
- BKEY_PADDED(k) split;
+ _k = bch2_btree_node_iter_peek_filter(&node_iter, l->b,
+ KEY_TYPE_discard);
+ if (!_k)
+ return BTREE_INSERT_OK;
- EBUG_ON(bkey_cmp(insert->k.p, s->committed) < 0);
- EBUG_ON(bkey_cmp(s->committed, bkey_start_pos(&insert->k)) < 0);
+ k = bkey_disassemble(l->b, _k, &unpacked);
- if (!bkey_cmp(s->committed, bkey_start_pos(&insert->k)))
- return;
+ overlap = bch2_extent_overlap(&insert->k->k, k.k);
- if (s->deleting && !s->do_journal) {
- bch2_cut_front(s->committed, insert);
- goto done;
- }
+ /* account for having to split existing extent: */
+ if (overlap == BCH_EXTENT_OVERLAP_MIDDLE)
+ *u64s += _k->u64s;
- EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
+ if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
+ (sectors = bch2_extent_is_compressed(k))) {
+ int flags = trans->flags & BTREE_INSERT_NOFAIL
+ ? BCH_DISK_RESERVATION_NOFAIL : 0;
- bkey_copy(&split.k, insert);
-
- if (!(s->trans->flags & BTREE_INSERT_JOURNAL_REPLAY) &&
- bkey_cmp(s->committed, insert->k.p) &&
- bkey_extent_is_compressed(bkey_i_to_s_c(insert))) {
- /* XXX: possibly need to increase our reservation? */
- bch2_cut_subtract_back(s, s->committed,
- bkey_i_to_s(&split.k));
- bch2_cut_front(s->committed, insert);
- bch2_add_sectors(s, bkey_i_to_s_c(insert),
- bkey_start_offset(&insert->k),
- insert->k.size);
- } else {
- bch2_cut_back(s->committed, &split.k.k);
- bch2_cut_front(s->committed, insert);
+ switch (bch2_disk_reservation_add(trans->c,
+ trans->disk_res,
+ sectors, flags)) {
+ case 0:
+ break;
+ case -ENOSPC:
+ return BTREE_INSERT_ENOSPC;
+ default:
+ BUG();
+ }
}
- if (debug_check_bkeys(c))
- bch2_bkey_debugcheck(c, iter->nodes[iter->level],
- bkey_i_to_s_c(&split.k));
-
- bch2_btree_journal_key(s->trans, iter, &split.k);
-
- if (!s->deleting)
- extent_bset_insert(c, iter, &split.k);
-done:
- bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
-
- insert->k.needs_whiteout = false;
- s->do_journal = false;
- s->trans->did_work = true;
+ return BTREE_INSERT_OK;
}
-static enum extent_insert_hook_ret
-__extent_insert_advance_pos(struct extent_insert_state *s,
- struct bpos next_pos,
- struct bkey_s_c k)
+static void verify_extent_nonoverlapping(struct bch_fs *c,
+ struct btree *b,
+ struct btree_node_iter *_iter,
+ struct bkey_i *insert)
{
- struct extent_insert_hook *hook = s->trans->hook;
- enum extent_insert_hook_ret ret;
-#if 0
- /*
- * Currently disabled for encryption - broken with fcollapse. Will have
- * to reenable when versions are exposed for send/receive - versions
- * will have to be monotonic then:
- */
- if (k.k && k.k->size &&
- !bversion_zero(s->insert->k->k.version) &&
- bversion_cmp(k.k->version, s->insert->k->k.version) > 0) {
- ret = BTREE_HOOK_NO_INSERT;
- } else
-#endif
- if (hook)
- ret = hook->fn(hook, s->committed, next_pos, k, s->insert->k);
- else
- ret = BTREE_HOOK_DO_INSERT;
-
- EBUG_ON(bkey_deleted(&s->insert->k->k) || !s->insert->k->k.size);
-
- switch (ret) {
- case BTREE_HOOK_DO_INSERT:
- break;
- case BTREE_HOOK_NO_INSERT:
- extent_insert_committed(s);
- bch2_cut_subtract_front(s, next_pos, bkey_i_to_s(s->insert->k));
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct btree_node_iter iter;
+ struct bkey_packed *k;
+ struct bkey uk;
- bch2_btree_iter_set_pos_same_leaf(s->insert->iter, next_pos);
- break;
- case BTREE_HOOK_RESTART_TRANS:
- return ret;
- }
+ if (!expensive_debug_checks(c))
+ return;
- s->committed = next_pos;
- return ret;
-}
+ iter = *_iter;
+ k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard);
+ BUG_ON(k &&
+ (uk = bkey_unpack_key(b, k),
+ bkey_cmp(uk.p, bkey_start_pos(&insert->k)) > 0));
-/*
- * Update iter->pos, marking how much of @insert we've processed, and call hook
- * fn:
- */
-static enum extent_insert_hook_ret
-extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
-{
- struct btree *b = s->insert->iter->nodes[0];
- struct bpos next_pos = bpos_min(s->insert->k->k.p,
- k.k ? k.k->p : b->key.k.p);
-
- /* hole? */
- if (k.k && bkey_cmp(s->committed, bkey_start_pos(k.k)) < 0) {
- bool have_uncommitted = bkey_cmp(s->committed,
- bkey_start_pos(&s->insert->k->k)) > 0;
-
- switch (__extent_insert_advance_pos(s, bkey_start_pos(k.k),
- bkey_s_c_null)) {
- case BTREE_HOOK_DO_INSERT:
- break;
- case BTREE_HOOK_NO_INSERT:
- /*
- * we had to split @insert and insert the committed
- * part - need to bail out and recheck journal
- * reservation/btree node before we advance pos past @k:
- */
- if (have_uncommitted)
- return BTREE_HOOK_NO_INSERT;
- break;
- case BTREE_HOOK_RESTART_TRANS:
- return BTREE_HOOK_RESTART_TRANS;
- }
+ iter = *_iter;
+ k = bch2_btree_node_iter_peek_filter(&iter, b, KEY_TYPE_discard);
+#if 0
+ BUG_ON(k &&
+ (uk = bkey_unpack_key(b, k),
+ bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0);
+#else
+ if (k &&
+ (uk = bkey_unpack_key(b, k),
+ bkey_cmp(insert->k.p, bkey_start_pos(&uk))) > 0) {
+ char buf1[100];
+ char buf2[100];
+
+ bch2_bkey_to_text(&PBUF(buf1), &insert->k);
+ bch2_bkey_to_text(&PBUF(buf2), &uk);
+
+ bch2_dump_btree_node(b);
+ panic("insert > next :\n"
+ "insert %s\n"
+ "next %s\n",
+ buf1, buf2);
}
+#endif
- /* avoid redundant calls to hook fn: */
- if (!bkey_cmp(s->committed, next_pos))
- return BTREE_HOOK_DO_INSERT;
-
- return __extent_insert_advance_pos(s, next_pos, k);
+#endif
}
-static enum btree_insert_ret
-extent_insert_check_split_compressed(struct extent_insert_state *s,
- struct bkey_s_c k,
- enum bch_extent_overlap overlap)
+static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
+ struct bkey_i *insert)
{
- struct bch_fs *c = s->trans->c;
- unsigned sectors;
+ struct btree_iter_level *l = &iter->l[0];
+ struct bkey_packed *k =
+ bch2_btree_node_iter_bset_pos(&l->iter, l->b, bset_tree_last(l->b));
- if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
- (sectors = bkey_extent_is_compressed(k))) {
- int flags = BCH_DISK_RESERVATION_BTREE_LOCKS_HELD;
+ BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
- if (s->trans->flags & BTREE_INSERT_NOFAIL)
- flags |= BCH_DISK_RESERVATION_NOFAIL;
+ EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
+ verify_extent_nonoverlapping(c, l->b, &l->iter, insert);
- switch (bch2_disk_reservation_add(c,
- s->trans->disk_res,
- sectors, flags)) {
- case 0:
- break;
- case -ENOSPC:
- return BTREE_INSERT_ENOSPC;
- case -EINTR:
- return BTREE_INSERT_NEED_GC_LOCK;
- default:
- BUG();
- }
- }
+ if (debug_check_bkeys(c))
+ bch2_bkey_debugcheck(c, l->b, bkey_i_to_s_c(insert));
- return BTREE_INSERT_OK;
+ bch2_bset_insert(l->b, &l->iter, k, insert, 0);
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter, k, 0, k->u64s);
}
-static enum btree_insert_ret
-extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
- struct bset_tree *t, struct bkey_packed *_k, struct bkey_s k,
+static void
+extent_squash(struct bch_fs *c, struct btree_iter *iter,
+ struct bkey_i *insert,
+ struct bkey_packed *_k, struct bkey_s k,
enum bch_extent_overlap overlap)
{
- struct bch_fs *c = s->trans->c;
- struct btree_iter *iter = s->insert->iter;
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree_iter_level *l = &iter->l[0];
switch (overlap) {
case BCH_EXTENT_OVERLAP_FRONT:
/* insert overlaps with start of k: */
- bch2_cut_subtract_front(s, insert->k.p, k);
- BUG_ON(bkey_deleted(k.k));
- extent_save(b, node_iter, _k, k.k);
+ __bch2_cut_front(insert->k.p, k);
+ EBUG_ON(bkey_deleted(k.k));
+ extent_save(l->b, _k, k.k);
+ bch2_btree_iter_fix_key_modified(iter, l->b, _k);
break;
case BCH_EXTENT_OVERLAP_BACK:
/* insert overlaps with end of k: */
- bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
- BUG_ON(bkey_deleted(k.k));
- extent_save(b, node_iter, _k, k.k);
+ bch2_cut_back(bkey_start_pos(&insert->k), k.k);
+ EBUG_ON(bkey_deleted(k.k));
+ extent_save(l->b, _k, k.k);
/*
* As the auxiliary tree is indexed by the end of the
* key and we've just changed the end, update the
* auxiliary tree.
*/
- bch2_bset_fix_invalidated_key(b, t, _k);
- bch2_btree_node_iter_fix(iter, b, node_iter, t,
- _k, _k->u64s, _k->u64s);
+ bch2_bset_fix_invalidated_key(l->b, _k);
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter,
+ _k, _k->u64s, _k->u64s);
break;
case BCH_EXTENT_OVERLAP_ALL: {
- struct bpos orig_pos = k.k->p;
-
/* The insert key completely covers k, invalidate k */
if (!bkey_whiteout(k.k))
- btree_keys_account_key_drop(&b->nr,
- t - b->set, _k);
-
- bch2_drop_subtract(s, k);
- k.k->p = bkey_start_pos(&insert->k);
- if (!__extent_save(b, node_iter, _k, k.k)) {
- /*
- * Couldn't repack: we aren't necessarily able
- * to repack if the new key is outside the range
- * of the old extent, so we have to split
- * @insert:
- */
- k.k->p = orig_pos;
- extent_save(b, node_iter, _k, k.k);
-
- if (extent_insert_advance_pos(s, k.s_c) ==
- BTREE_HOOK_RESTART_TRANS)
- return BTREE_INSERT_NEED_TRAVERSE;
-
- extent_insert_committed(s);
- /*
- * We split and inserted upto at k.k->p - that
- * has to coincide with iter->pos, so that we
- * don't have anything more we have to insert
- * until we recheck our journal reservation:
- */
- EBUG_ON(bkey_cmp(s->committed, k.k->p));
+ btree_account_key_drop(l->b, _k);
+
+ k.k->size = 0;
+ k.k->type = KEY_TYPE_deleted;
+
+ if (_k >= btree_bset_last(l->b)->start) {
+ unsigned u64s = _k->u64s;
+
+ bch2_bset_delete(l->b, _k, _k->u64s);
+ bch2_btree_node_iter_fix(iter, l->b, &l->iter,
+ _k, u64s, 0);
} else {
- bch2_bset_fix_invalidated_key(b, t, _k);
- bch2_btree_node_iter_fix(iter, b, node_iter, t,
- _k, _k->u64s, _k->u64s);
+ extent_save(l->b, _k, k.k);
+ bch2_btree_iter_fix_key_modified(iter, l->b, _k);
}
break;
* what k points to)
*/
bkey_reassemble(&split.k, k.s_c);
- split.k.k.needs_whiteout |= bset_written(b, bset(b, t));
+ split.k.k.needs_whiteout |= bkey_written(l->b, _k);
bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
BUG_ON(bkey_deleted(&split.k.k));
- bch2_cut_subtract_front(s, insert->k.p, k);
+ __bch2_cut_front(insert->k.p, k);
BUG_ON(bkey_deleted(k.k));
- extent_save(b, node_iter, _k, k.k);
+ extent_save(l->b, _k, k.k);
+ bch2_btree_iter_fix_key_modified(iter, l->b, _k);
- bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
- bkey_start_offset(&split.k.k),
- split.k.k.size);
extent_bset_insert(c, iter, &split.k);
break;
}
- }
-
- return BTREE_INSERT_OK;
-}
-
-static enum btree_insert_ret
-bch2_delete_fixup_extent(struct extent_insert_state *s)
-{
- struct bch_fs *c = s->trans->c;
- struct btree_iter *iter = s->insert->iter;
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
- struct bkey_packed *_k;
- struct bkey unpacked;
- struct bkey_i *insert = s->insert->k;
- enum btree_insert_ret ret = BTREE_INSERT_OK;
-
- EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
-
- s->whiteout = *insert;
- s->do_journal = false;
-
- while (bkey_cmp(s->committed, insert->k.p) < 0 &&
- (ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK &&
- (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
- struct bset_tree *t = bch2_bkey_to_bset(b, _k);
- struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
- enum bch_extent_overlap overlap;
-
- EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
- EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
-
- if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
- break;
-
- if (bkey_whiteout(k.k)) {
- s->committed = bpos_min(insert->k.p, k.k->p);
- goto next;
- }
-
- overlap = bch2_extent_overlap(&insert->k, k.k);
-
- ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
- if (ret != BTREE_INSERT_OK)
- goto stop;
-
- switch (extent_insert_advance_pos(s, k.s_c)) {
- case BTREE_HOOK_DO_INSERT:
- break;
- case BTREE_HOOK_NO_INSERT:
- continue;
- case BTREE_HOOK_RESTART_TRANS:
- ret = BTREE_INSERT_NEED_TRAVERSE;
- goto stop;
- }
-
- s->do_journal = true;
-
- if (overlap == BCH_EXTENT_OVERLAP_ALL) {
- btree_keys_account_key_drop(&b->nr,
- t - b->set, _k);
- bch2_subtract_sectors(s, k.s_c,
- bkey_start_offset(k.k), k.k->size);
- _k->type = KEY_TYPE_DISCARD;
- reserve_whiteout(b, t, _k);
- } else if (k.k->needs_whiteout ||
- bset_written(b, bset(b, t))) {
- struct bkey_i discard = *insert;
-
- switch (overlap) {
- case BCH_EXTENT_OVERLAP_FRONT:
- bch2_cut_front(bkey_start_pos(k.k), &discard);
- break;
- case BCH_EXTENT_OVERLAP_BACK:
- bch2_cut_back(k.k->p, &discard.k);
- break;
- default:
- break;
- }
-
- discard.k.needs_whiteout = true;
-
- ret = extent_squash(s, insert, t, _k, k, overlap);
- BUG_ON(ret != BTREE_INSERT_OK);
-
- extent_bset_insert(c, iter, &discard);
- } else {
- ret = extent_squash(s, insert, t, _k, k, overlap);
- BUG_ON(ret != BTREE_INSERT_OK);
- }
-next:
- bch2_cut_front(s->committed, insert);
- bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
- }
-
- if (bkey_cmp(s->committed, insert->k.p) < 0 &&
- ret == BTREE_INSERT_OK &&
- extent_insert_advance_pos(s, bkey_s_c_null) == BTREE_HOOK_RESTART_TRANS)
- ret = BTREE_INSERT_NEED_TRAVERSE;
-stop:
- extent_insert_committed(s);
-
- bch2_fs_usage_apply(c, &s->stats, s->trans->disk_res,
- gc_pos_btree_node(b));
-
- EBUG_ON(bkey_cmp(iter->pos, s->committed));
- EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) !=
- !!(iter->flags & BTREE_ITER_AT_END_OF_LEAF));
-
- bch2_cut_front(iter->pos, insert);
-
- if (insert->k.size && (iter->flags & BTREE_ITER_AT_END_OF_LEAF))
- ret = BTREE_INSERT_NEED_TRAVERSE;
-
- EBUG_ON(insert->k.size && ret == BTREE_INSERT_OK);
-
- return ret;
+ }
}
/**
* If the end of iter->pos is not the same as the end of insert, then
* key insertion needs to continue/be retried.
*/
-enum btree_insert_ret
-bch2_insert_fixup_extent(struct btree_insert *trans,
- struct btree_insert_entry *insert)
+void bch2_insert_fixup_extent(struct btree_trans *trans,
+ struct btree_insert_entry *insert_entry)
{
struct bch_fs *c = trans->c;
- struct btree_iter *iter = insert->iter;
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ struct btree_iter *iter = insert_entry->iter;
+ struct bkey_i *insert = insert_entry->k;
+ struct btree_iter_level *l = &iter->l[0];
+ struct btree_node_iter node_iter = l->iter;
+ bool deleting = bkey_whiteout(&insert->k);
+ bool update_journal = !deleting;
+ bool update_btree = !deleting;
+ struct bkey_i whiteout = *insert;
struct bkey_packed *_k;
struct bkey unpacked;
- enum btree_insert_ret ret = BTREE_INSERT_OK;
-
- struct extent_insert_state s = {
- .trans = trans,
- .insert = insert,
- .committed = insert->iter->pos,
- .deleting = bkey_whiteout(&insert->k->k),
- };
+ BKEY_PADDED(k) tmp;
EBUG_ON(iter->level);
- EBUG_ON(bkey_deleted(&insert->k->k) || !insert->k->k.size);
-
- if (s.deleting)
- return bch2_delete_fixup_extent(&s);
-
- /*
- * As we process overlapping extents, we advance @iter->pos both to
- * signal to our caller (btree_insert_key()) how much of @insert->k has
- * been inserted, and also to keep @iter->pos consistent with
- * @insert->k and the node iterator that we're advancing:
- */
- EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
-
- if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
- bch2_add_sectors(&s, bkey_i_to_s_c(insert->k),
- bkey_start_offset(&insert->k->k),
- insert->k->k.size);
-
- while (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
- (ret = extent_insert_should_stop(&s)) == BTREE_INSERT_OK &&
- (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
- struct bset_tree *t = bch2_bkey_to_bset(b, _k);
- struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
- enum bch_extent_overlap overlap;
+ EBUG_ON(!insert->k.size);
+ EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k)));
- EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
- EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
+ while ((_k = bch2_btree_node_iter_peek_filter(&l->iter, l->b,
+ KEY_TYPE_discard))) {
+ struct bkey_s k = __bkey_disassemble(l->b, _k, &unpacked);
+ struct bpos cur_end = bpos_min(insert->k.p, k.k->p);
+ enum bch_extent_overlap overlap =
+ bch2_extent_overlap(&insert->k, k.k);
- if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0)
+ if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
break;
- overlap = bch2_extent_overlap(&insert->k->k, k.k);
-
- ret = extent_insert_check_split_compressed(&s, k.s_c, overlap);
- if (ret != BTREE_INSERT_OK)
- goto stop;
+ if (!bkey_whiteout(k.k))
+ update_journal = true;
- if (!k.k->size)
- goto squash;
+ if (!update_journal) {
+ bch2_cut_front(cur_end, insert);
+ bch2_cut_front(cur_end, &whiteout);
+ bch2_btree_iter_set_pos_same_leaf(iter, cur_end);
+ goto next;
+ }
/*
- * Only call advance pos & call hook for nonzero size extents:
- * If hook returned BTREE_HOOK_NO_INSERT, @insert->k no longer
- * overlaps with @k:
+ * When deleting, if possible just do it by switching the type
+ * of the key we're deleting, instead of creating and inserting
+ * a new whiteout:
*/
- switch (extent_insert_advance_pos(&s, k.s_c)) {
- case BTREE_HOOK_DO_INSERT:
+ if (deleting &&
+ !update_btree &&
+ !bkey_cmp(insert->k.p, k.k->p) &&
+ !bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) {
+ if (!bkey_whiteout(k.k)) {
+ btree_account_key_drop(l->b, _k);
+ _k->type = KEY_TYPE_discard;
+ reserve_whiteout(l->b, _k);
+ bch2_btree_iter_fix_key_modified(iter,
+ l->b, _k);
+ }
break;
- case BTREE_HOOK_NO_INSERT:
- continue;
- case BTREE_HOOK_RESTART_TRANS:
- ret = BTREE_INSERT_NEED_TRAVERSE;
- goto stop;
}
- if (k.k->size &&
- (k.k->needs_whiteout || bset_written(b, bset(b, t))))
- insert->k->k.needs_whiteout = true;
+ if (k.k->needs_whiteout || bkey_written(l->b, _k)) {
+ insert->k.needs_whiteout = true;
+ update_btree = true;
+ }
- if (overlap == BCH_EXTENT_OVERLAP_ALL &&
+ if (update_btree &&
+ overlap == BCH_EXTENT_OVERLAP_ALL &&
bkey_whiteout(k.k) &&
k.k->needs_whiteout) {
- unreserve_whiteout(b, t, _k);
+ unreserve_whiteout(l->b, _k);
_k->needs_whiteout = false;
}
-squash:
- ret = extent_squash(&s, insert->k, t, _k, k, overlap);
- if (ret != BTREE_INSERT_OK)
- goto stop;
- }
-
- if (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
- ret == BTREE_INSERT_OK &&
- extent_insert_advance_pos(&s, bkey_s_c_null) == BTREE_HOOK_RESTART_TRANS)
- ret = BTREE_INSERT_NEED_TRAVERSE;
-stop:
- extent_insert_committed(&s);
- /*
- * Subtract any remaining sectors from @insert, if we bailed out early
- * and didn't fully insert @insert:
- */
- if (insert->k->k.size &&
- !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
- bch2_subtract_sectors(&s, bkey_i_to_s_c(insert->k),
- bkey_start_offset(&insert->k->k),
- insert->k->k.size);
-
- bch2_fs_usage_apply(c, &s.stats, trans->disk_res,
- gc_pos_btree_node(b));
-
- EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
- EBUG_ON(bkey_cmp(iter->pos, s.committed));
- EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) !=
- !!(iter->flags & BTREE_ITER_AT_END_OF_LEAF));
-
- if (insert->k->k.size && (iter->flags & BTREE_ITER_AT_END_OF_LEAF))
- ret = BTREE_INSERT_NEED_TRAVERSE;
-
- EBUG_ON(insert->k->k.size && ret == BTREE_INSERT_OK);
-
- return ret;
-}
-
-static const char *bch2_extent_invalid(const struct bch_fs *c,
- struct bkey_s_c k)
-{
- if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
- return "value too big";
-
- if (!k.k->size)
- return "zero key size";
-
- switch (k.k->type) {
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
- const union bch_extent_crc *crc;
- const struct bch_extent_ptr *ptr;
- unsigned size_ondisk = e.k->size;
- const char *reason;
- extent_for_each_entry(e, entry) {
- if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
- return "invalid extent entry type";
+ extent_squash(c, iter, insert, _k, k, overlap);
- if (extent_entry_is_crc(entry)) {
- crc = entry_to_crc(entry);
+ if (!update_btree)
+ bch2_cut_front(cur_end, insert);
+next:
+ node_iter = l->iter;
- if (crc_offset(crc) + e.k->size >
- crc_uncompressed_size(e.k, crc))
- return "checksum offset + key size > uncompressed size";
+ if (overlap == BCH_EXTENT_OVERLAP_FRONT ||
+ overlap == BCH_EXTENT_OVERLAP_MIDDLE)
+ break;
+ }
- size_ondisk = crc_compressed_size(e.k, crc);
+ l->iter = node_iter;
+ bch2_btree_iter_set_pos_same_leaf(iter, insert->k.p);
- if (!bch2_checksum_type_valid(c, crc_csum_type(crc)))
- return "invalid checksum type";
+ if (update_btree) {
+ bkey_copy(&tmp.k, insert);
- if (crc_compression_type(crc) >= BCH_COMPRESSION_NR)
- return "invalid compression type";
- } else {
- ptr = entry_to_ptr(entry);
+ if (deleting)
+ tmp.k.k.type = KEY_TYPE_discard;
- reason = extent_ptr_invalid(c, e, &entry->ptr,
- size_ondisk, false);
- if (reason)
- return reason;
- }
- }
+ EBUG_ON(bkey_deleted(&tmp.k.k) || !tmp.k.k.size);
- return NULL;
+ extent_bset_insert(c, iter, &tmp.k);
}
- case BCH_RESERVATION: {
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+ if (update_journal) {
+ bkey_copy(&tmp.k, !deleting ? insert : &whiteout);
- if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
- return "incorrect value size";
+ if (deleting)
+ tmp.k.k.type = KEY_TYPE_discard;
- if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
- return "invalid nr_replicas";
+ EBUG_ON(bkey_deleted(&tmp.k.k) || !tmp.k.k.size);
- return NULL;
+ bch2_btree_journal_key(trans, iter, &tmp.k);
}
- default:
- return "invalid value type";
- }
+ bch2_cut_front(insert->k.p, insert);
}
-static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
- struct bkey_s_c_extent e)
+const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- const struct bch_extent_ptr *ptr;
- struct bch_dev *ca;
- struct bucket *g;
- unsigned seq, stale;
+ return bch2_bkey_ptrs_invalid(c, k);
+}
+
+void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
char buf[160];
- bool bad;
- unsigned ptrs_per_tier[BCH_TIER_MAX];
- unsigned replicas = 0;
/*
* XXX: we should be doing most/all of these checks at startup time,
* going to get overwritten during replay)
*/
- memset(ptrs_per_tier, 0, sizeof(ptrs_per_tier));
-
- extent_for_each_ptr(e, ptr) {
- ca = c->devs[ptr->dev];
- g = PTR_BUCKET(ca, ptr);
- replicas++;
- ptrs_per_tier[ca->mi.tier]++;
-
- /*
- * If journal replay hasn't finished, we might be seeing keys
- * that will be overwritten by the time journal replay is done:
- */
- if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
- continue;
-
- stale = 0;
-
- do {
- struct bucket_mark mark;
-
- seq = read_seqcount_begin(&c->gc_pos_lock);
- mark = READ_ONCE(g->mark);
-
- /* between mark and bucket gen */
- smp_rmb();
-
- stale = ptr_stale(ca, ptr);
-
- bch2_fs_bug_on(stale && !ptr->cached, c,
- "stale dirty pointer");
-
- bch2_fs_bug_on(stale > 96, c,
- "key too stale: %i",
- stale);
-
- if (stale)
- break;
-
- bad = (mark.data_type != BUCKET_DATA ||
- (gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
- !mark.owned_by_allocator &&
- !(ptr->cached
- ? mark.cached_sectors
- : mark.dirty_sectors)));
- } while (read_seqcount_retry(&c->gc_pos_lock, seq));
-
- if (bad)
- goto bad_ptr;
+ if (percpu_down_read_trylock(&c->mark_lock)) {
+ bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
+ !bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
+ "extent key bad (replicas not marked in superblock):\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
+ percpu_up_read(&c->mark_lock);
}
-
- if (replicas > BCH_REPLICAS_MAX) {
- bch2_bkey_val_to_text(c, btree_node_type(b), buf,
- sizeof(buf), e.s_c);
- bch2_fs_bug(c,
- "extent key bad (too many replicas: %u): %s",
- replicas, buf);
+ /*
+ * If journal replay hasn't finished, we might be seeing keys
+ * that will be overwritten by the time journal replay is done:
+ */
+ if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
return;
- }
- if (!bkey_extent_is_cached(e.k) &&
- replicas < c->sb.data_replicas_have) {
- bch2_bkey_val_to_text(c, btree_node_type(b), buf,
- sizeof(buf), e.s_c);
- bch2_fs_bug(c,
- "extent key bad (too few replicas, %u < %u): %s",
- replicas, c->sb.data_replicas_have, buf);
- return;
+ extent_for_each_ptr_decode(e, p, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
+ unsigned stale = gen_after(mark.gen, p.ptr.gen);
+ unsigned disk_sectors = ptr_disk_sectors(p);
+ unsigned mark_sectors = p.ptr.cached
+ ? mark.cached_sectors
+ : mark.dirty_sectors;
+
+ bch2_fs_bug_on(stale && !p.ptr.cached, c,
+ "stale dirty pointer (ptr gen %u bucket %u",
+ p.ptr.gen, mark.gen);
+
+ bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
+
+ bch2_fs_bug_on(!stale &&
+ (mark.data_type != BCH_DATA_USER ||
+ mark_sectors < disk_sectors), c,
+ "extent pointer not marked: %s:\n"
+ "type %u sectors %u < %u",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
+ mark.data_type,
+ mark_sectors, disk_sectors);
}
+}
- return;
-
-bad_ptr:
- bch2_bkey_val_to_text(c, btree_node_type(b), buf,
- sizeof(buf), e.s_c);
- bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu "
- "gen %i last_gc %i mark 0x%08x",
- buf, PTR_BUCKET_NR(ca, ptr), PTR_BUCKET(ca, ptr)->mark.gen,
- ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
- (unsigned) g->mark.counter);
- return;
+void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ bch2_bkey_ptrs_to_text(out, c, k);
}
-static void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k)
+static unsigned bch2_crc_field_size_max[] = {
+ [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
+};
+
+static void bch2_extent_crc_pack(union bch_extent_crc *dst,
+ struct bch_extent_crc_unpacked src)
{
- switch (k.k->type) {
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED:
- bch2_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
+#define set_common_fields(_dst, _src) \
+ _dst.csum_type = _src.csum_type, \
+ _dst.compression_type = _src.compression_type, \
+ _dst._compressed_size = _src.compressed_size - 1, \
+ _dst._uncompressed_size = _src.uncompressed_size - 1, \
+ _dst.offset = _src.offset
+
+ switch (extent_entry_type(to_entry(dst))) {
+ case BCH_EXTENT_ENTRY_crc32:
+ set_common_fields(dst->crc32, src);
+ dst->crc32.csum = *((__le32 *) &src.csum.lo);
break;
- case BCH_RESERVATION:
+ case BCH_EXTENT_ENTRY_crc64:
+ set_common_fields(dst->crc64, src);
+ dst->crc64.nonce = src.nonce;
+ dst->crc64.csum_lo = src.csum.lo;
+ dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ set_common_fields(dst->crc128, src);
+ dst->crc128.nonce = src.nonce;
+ dst->crc128.csum = src.csum;
break;
default:
BUG();
}
+#undef set_common_fields
}
-static void bch2_extent_to_text(struct bch_fs *c, char *buf,
- size_t size, struct bkey_s_c k)
+void bch2_extent_crc_append(struct bkey_i *k,
+ struct bch_extent_crc_unpacked new)
{
- char *out = buf, *end = buf + size;
- const char *invalid;
-
-#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ union bch_extent_crc *crc = (void *) ptrs.end;
+
+ if (bch_crc_bytes[new.csum_type] <= 4 &&
+ new.uncompressed_size - 1 <= CRC32_SIZE_MAX &&
+ new.nonce <= CRC32_NONCE_MAX)
+ crc->type = 1 << BCH_EXTENT_ENTRY_crc32;
+ else if (bch_crc_bytes[new.csum_type] <= 10 &&
+ new.uncompressed_size - 1 <= CRC64_SIZE_MAX &&
+ new.nonce <= CRC64_NONCE_MAX)
+ crc->type = 1 << BCH_EXTENT_ENTRY_crc64;
+ else if (bch_crc_bytes[new.csum_type] <= 16 &&
+ new.uncompressed_size - 1 <= CRC128_SIZE_MAX &&
+ new.nonce <= CRC128_NONCE_MAX)
+ crc->type = 1 << BCH_EXTENT_ENTRY_crc128;
+ else
+ BUG();
- if (bkey_extent_is_data(k.k))
- out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
+ bch2_extent_crc_pack(crc, new);
- invalid = bch2_extent_invalid(c, k);
- if (invalid)
- p(" invalid: %s", invalid);
-#undef p
-}
+ k->k.u64s += extent_entry_u64s(ptrs.end);
-static unsigned PTR_TIER(struct bch_fs *c,
- const struct bch_extent_ptr *ptr)
-{
- return c->devs[ptr->dev]->mi.tier;
+ EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
}
-static void bch2_extent_crc_init(union bch_extent_crc *crc,
- unsigned compressed_size,
- unsigned uncompressed_size,
- unsigned compression_type,
- unsigned nonce,
- struct bch_csum csum, unsigned csum_type)
+static inline void __extent_entry_insert(struct bkey_i *k,
+ union bch_extent_entry *dst,
+ union bch_extent_entry *new)
{
- if (bch_crc_bytes[csum_type] <= 4 &&
- uncompressed_size <= CRC32_SIZE_MAX &&
- nonce <= CRC32_NONCE_MAX) {
- crc->crc32 = (struct bch_extent_crc32) {
- .type = 1 << BCH_EXTENT_ENTRY_crc32,
- ._compressed_size = compressed_size - 1,
- ._uncompressed_size = uncompressed_size - 1,
- .offset = 0,
- .compression_type = compression_type,
- .csum_type = csum_type,
- .csum = *((__le32 *) &csum.lo),
- };
- return;
- }
-
- if (bch_crc_bytes[csum_type] <= 10 &&
- uncompressed_size <= CRC64_SIZE_MAX &&
- nonce <= CRC64_NONCE_MAX) {
- crc->crc64 = (struct bch_extent_crc64) {
- .type = 1 << BCH_EXTENT_ENTRY_crc64,
- ._compressed_size = compressed_size - 1,
- ._uncompressed_size = uncompressed_size - 1,
- .offset = 0,
- .nonce = nonce,
- .compression_type = compression_type,
- .csum_type = csum_type,
- .csum_lo = csum.lo,
- .csum_hi = *((__le16 *) &csum.hi),
- };
- return;
- }
-
- if (bch_crc_bytes[csum_type] <= 16 &&
- uncompressed_size <= CRC128_SIZE_MAX &&
- nonce <= CRC128_NONCE_MAX) {
- crc->crc128 = (struct bch_extent_crc128) {
- .type = 1 << BCH_EXTENT_ENTRY_crc128,
- ._compressed_size = compressed_size - 1,
- ._uncompressed_size = uncompressed_size - 1,
- .offset = 0,
- .nonce = nonce,
- .compression_type = compression_type,
- .csum_type = csum_type,
- .csum = csum,
- };
- return;
- }
+ union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
- BUG();
+ memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
+ dst, (u64 *) end - (u64 *) dst);
+ k->k.u64s += extent_entry_u64s(new);
+ memcpy(dst, new, extent_entry_bytes(new));
}
-void bch2_extent_crc_append(struct bkey_i_extent *e,
- unsigned compressed_size,
- unsigned uncompressed_size,
- unsigned compression_type,
- unsigned nonce,
- struct bch_csum csum, unsigned csum_type)
+void bch2_extent_ptr_decoded_append(struct bkey_i *k,
+ struct extent_ptr_decoded *p)
{
- union bch_extent_crc *crc;
-
- BUG_ON(compressed_size > uncompressed_size);
- BUG_ON(uncompressed_size != e->k.size);
- BUG_ON(!compressed_size || !uncompressed_size);
-
- /*
- * Look up the last crc entry, so we can check if we need to add
- * another:
- */
- extent_for_each_crc(extent_i_to_s(e), crc)
- ;
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ struct bch_extent_crc_unpacked crc =
+ bch2_extent_crc_unpack(&k->k, NULL);
+ union bch_extent_entry *pos;
+
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = ptrs.start;
+ goto found;
+ }
- if (!crc && !csum_type && !compression_type)
- return;
+ bkey_for_each_crc(&k->k, ptrs, crc, pos)
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = extent_entry_next(pos);
+ goto found;
+ }
- if (crc &&
- crc_compressed_size(&e->k, crc) == compressed_size &&
- crc_uncompressed_size(&e->k, crc) == uncompressed_size &&
- crc_offset(crc) == 0 &&
- crc_nonce(crc) == nonce &&
- crc_csum_type(crc) == csum_type &&
- crc_compression_type(crc) == compression_type &&
- crc_csum(crc).lo == csum.lo &&
- crc_csum(crc).hi == csum.hi)
- return;
+ bch2_extent_crc_append(k, p->crc);
+ pos = bkey_val_end(bkey_i_to_s(k));
+found:
+ p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ptr));
- bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)),
- compressed_size,
- uncompressed_size,
- compression_type,
- nonce, csum, csum_type);
- __extent_entry_push(e);
+ if (p->has_ec) {
+ p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ec));
+ }
}
/*
*/
bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
{
- struct bkey_s_extent e;
+ struct bch_extent_ptr *ptr;
- switch (k.k->type) {
- case KEY_TYPE_ERROR:
- return false;
+ bch2_bkey_drop_ptrs(k, ptr,
+ ptr->cached &&
+ ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
- case KEY_TYPE_DELETED:
- case KEY_TYPE_COOKIE:
- return true;
+ /* will only happen if all pointers were cached: */
+ if (!bkey_val_u64s(k.k))
+ k.k->type = KEY_TYPE_discard;
- case KEY_TYPE_DISCARD:
- return bversion_zero(k.k->version);
+ return bkey_whiteout(k.k);
+}
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED:
- e = bkey_s_to_extent(k);
+void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
+ unsigned target,
+ unsigned nr_desired_replicas)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
- bch2_extent_drop_stale(c, e);
+ if (target && extra > 0)
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ int n = bch2_extent_ptr_durability(c, p);
- if (!bkey_val_u64s(e.k)) {
- if (bkey_extent_is_cached(e.k)) {
- k.k->type = KEY_TYPE_DISCARD;
- if (bversion_zero(k.k->version))
- return true;
- } else {
- k.k->type = KEY_TYPE_ERROR;
+ if (n && n <= extra &&
+ !bch2_dev_in_target(c, p.ptr.dev, target)) {
+ entry->ptr.cached = true;
+ extra -= n;
}
}
- return false;
- case BCH_RESERVATION:
- return false;
- default:
- BUG();
- }
-}
-
-void bch2_extent_mark_replicas_cached(struct bch_fs *c,
- struct bkey_s_extent e,
- unsigned nr_cached)
-{
- struct bch_extent_ptr *ptr;
- bool have_higher_tier;
- unsigned tier = 0;
-
- if (!nr_cached)
- return;
-
- do {
- have_higher_tier = false;
+ if (extra > 0)
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ int n = bch2_extent_ptr_durability(c, p);
- extent_for_each_ptr(e, ptr) {
- if (!ptr->cached &&
- PTR_TIER(c, ptr) == tier) {
- ptr->cached = true;
- nr_cached--;
- if (!nr_cached)
- return;
+ if (n && n <= extra) {
+ entry->ptr.cached = true;
+ extra -= n;
}
-
- if (PTR_TIER(c, ptr) > tier)
- have_higher_tier = true;
}
-
- tier++;
- } while (have_higher_tier);
}
-/*
- * This picks a non-stale pointer, preferabbly from a device other than
- * avoid. Avoid can be NULL, meaning pick any. If there are no non-stale
- * pointers to other devices, it will still pick a pointer from avoid.
- * Note that it prefers lowered-numbered pointers to higher-numbered pointers
- * as the pointers are sorted by tier, hence preferring pointers to tier 0
- * rather than pointers to tier 1.
- */
-void bch2_extent_pick_ptr_avoiding(struct bch_fs *c, struct bkey_s_c k,
- struct bch_dev *avoid,
- struct extent_pick_ptr *ret)
+enum merge_result bch2_extent_merge(struct bch_fs *c,
+ struct bkey_s _l, struct bkey_s _r)
{
- struct bkey_s_c_extent e;
- const union bch_extent_crc *crc;
- const struct bch_extent_ptr *ptr;
+ struct bkey_s_extent l = bkey_s_to_extent(_l);
+ struct bkey_s_extent r = bkey_s_to_extent(_r);
+ union bch_extent_entry *en_l = l.v->start;
+ union bch_extent_entry *en_r = r.v->start;
+ struct bch_extent_crc_unpacked crc_l, crc_r;
- switch (k.k->type) {
- case KEY_TYPE_DELETED:
- case KEY_TYPE_DISCARD:
- case KEY_TYPE_COOKIE:
- ret->ca = NULL;
- return;
-
- case KEY_TYPE_ERROR:
- ret->ca = ERR_PTR(-EIO);
- return;
+ if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
+ return BCH_MERGE_NOMERGE;
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED:
- e = bkey_s_c_to_extent(k);
- ret->ca = NULL;
+ crc_l = bch2_extent_crc_unpack(l.k, NULL);
- extent_for_each_ptr_crc(e, ptr, crc) {
- struct bch_dev *ca = c->devs[ptr->dev];
+ extent_for_each_entry(l, en_l) {
+ en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
- if (ptr->cached && ptr_stale(ca, ptr))
- continue;
+ if (extent_entry_type(en_l) != extent_entry_type(en_r))
+ return BCH_MERGE_NOMERGE;
- if (ca->mi.state == BCH_MEMBER_STATE_FAILED)
- continue;
+ switch (extent_entry_type(en_l)) {
+ case BCH_EXTENT_ENTRY_ptr: {
+ const struct bch_extent_ptr *lp = &en_l->ptr;
+ const struct bch_extent_ptr *rp = &en_r->ptr;
+ struct bch_dev *ca;
- if (ret->ca &&
- (ca == avoid ||
- ret->ca->mi.tier < ca->mi.tier))
- continue;
+ if (lp->offset + crc_l.compressed_size != rp->offset ||
+ lp->dev != rp->dev ||
+ lp->gen != rp->gen)
+ return BCH_MERGE_NOMERGE;
- if (!percpu_ref_tryget(&ca->io_ref))
- continue;
+ /* We don't allow extents to straddle buckets: */
+ ca = bch_dev_bkey_exists(c, lp->dev);
- if (ret->ca)
- percpu_ref_put(&ret->ca->io_ref);
+ if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
+ return BCH_MERGE_NOMERGE;
- *ret = (struct extent_pick_ptr) {
- .crc = crc_to_128(e.k, crc),
- .ptr = *ptr,
- .ca = ca,
- };
+ break;
}
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
+ en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
+ return BCH_MERGE_NOMERGE;
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ case BCH_EXTENT_ENTRY_crc64:
+ case BCH_EXTENT_ENTRY_crc128:
+ crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
- if (!ret->ca && !bkey_extent_is_cached(e.k))
- ret->ca = ERR_PTR(-EIO);
- return;
-
- case BCH_RESERVATION:
- ret->ca = NULL;
- return;
-
- default:
- BUG();
- }
-}
-
-static enum merge_result bch2_extent_merge(struct bch_fs *c,
- struct btree *bk,
- struct bkey_i *l, struct bkey_i *r)
-{
- struct bkey_s_extent el, er;
- union bch_extent_entry *en_l, *en_r;
-
- if (key_merging_disabled(c))
- return BCH_MERGE_NOMERGE;
-
- /*
- * Generic header checks
- * Assumes left and right are in order
- * Left and right must be exactly aligned
- */
-
- if (l->k.u64s != r->k.u64s ||
- l->k.type != r->k.type ||
- bversion_cmp(l->k.version, r->k.version) ||
- bkey_cmp(l->k.p, bkey_start_pos(&r->k)))
- return BCH_MERGE_NOMERGE;
+ if (crc_l.csum_type != crc_r.csum_type ||
+ crc_l.compression_type != crc_r.compression_type ||
+ crc_l.nonce != crc_r.nonce)
+ return BCH_MERGE_NOMERGE;
- switch (l->k.type) {
- case KEY_TYPE_DELETED:
- case KEY_TYPE_DISCARD:
- case KEY_TYPE_ERROR:
- /* These types are mergeable, and no val to check */
- break;
+ if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
+ crc_r.offset)
+ return BCH_MERGE_NOMERGE;
- case BCH_EXTENT:
- case BCH_EXTENT_CACHED:
- el = bkey_i_to_s_extent(l);
- er = bkey_i_to_s_extent(r);
+ if (!bch2_checksum_mergeable(crc_l.csum_type))
+ return BCH_MERGE_NOMERGE;
- extent_for_each_entry(el, en_l) {
- struct bch_extent_ptr *lp, *rp;
- unsigned bucket_size;
+ if (crc_l.compression_type)
+ return BCH_MERGE_NOMERGE;
- en_r = vstruct_idx(er.v, (u64 *) en_l - el.v->_data);
+ if (crc_l.csum_type &&
+ crc_l.uncompressed_size +
+ crc_r.uncompressed_size > c->sb.encoded_extent_max)
+ return BCH_MERGE_NOMERGE;
- if ((extent_entry_type(en_l) !=
- extent_entry_type(en_r)) ||
- extent_entry_is_crc(en_l))
+ if (crc_l.uncompressed_size + crc_r.uncompressed_size - 1 >
+ bch2_crc_field_size_max[extent_entry_type(en_l)])
return BCH_MERGE_NOMERGE;
- lp = &en_l->ptr;
- rp = &en_r->ptr;
+ break;
+ default:
+ return BCH_MERGE_NOMERGE;
+ }
+ }
- if (lp->offset + el.k->size != rp->offset ||
- lp->dev != rp->dev ||
- lp->gen != rp->gen)
- return BCH_MERGE_NOMERGE;
+ extent_for_each_entry(l, en_l) {
+ struct bch_extent_crc_unpacked crc_l, crc_r;
- /* We don't allow extents to straddle buckets: */
- bucket_size = c->devs[lp->dev]->mi.bucket_size;
+ en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
- if ((lp->offset & ~((u64) bucket_size - 1)) !=
- (rp->offset & ~((u64) bucket_size - 1)))
- return BCH_MERGE_NOMERGE;
- }
+ if (!extent_entry_is_crc(en_l))
+ continue;
- break;
- case BCH_RESERVATION: {
- struct bkey_i_reservation *li = bkey_i_to_reservation(l);
- struct bkey_i_reservation *ri = bkey_i_to_reservation(r);
+ crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
- if (li->v.generation != ri->v.generation ||
- li->v.nr_replicas != ri->v.nr_replicas)
- return BCH_MERGE_NOMERGE;
- break;
- }
- default:
- return BCH_MERGE_NOMERGE;
- }
+ crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
+ crc_l.csum,
+ crc_r.csum,
+ crc_r.uncompressed_size << 9);
- l->k.needs_whiteout |= r->k.needs_whiteout;
+ crc_l.uncompressed_size += crc_r.uncompressed_size;
+ crc_l.compressed_size += crc_r.compressed_size;
- /* Keys with no pointers aren't restricted to one bucket and could
- * overflow KEY_SIZE
- */
- if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
- bch2_key_resize(&l->k, KEY_SIZE_MAX);
- bch2_cut_front(l->k.p, r);
- return BCH_MERGE_PARTIAL;
+ bch2_extent_crc_pack(entry_to_crc(en_l), crc_l);
}
- bch2_key_resize(&l->k, l->k.size + r->k.size);
+ bch2_key_resize(l.k, l.k->size + r.k->size);
return BCH_MERGE_MERGE;
}
-static void extent_i_save(struct btree *b, struct bkey_packed *dst,
- struct bkey_i *src)
+bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
+ unsigned nr_replicas)
{
- struct bkey_format *f = &b->format;
- struct bkey_i *dst_unpacked;
-
- BUG_ON(bkeyp_val_u64s(f, dst) != bkey_val_u64s(&src->k));
-
- /*
- * We don't want the bch2_verify_key_order() call in extent_save(),
- * because we may be out of order with deleted keys that are about to be
- * removed by extent_bset_insert()
- */
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ struct bpos end = pos;
+ struct bkey_s_c k;
+ bool ret = true;
+ int err;
- if ((dst_unpacked = packed_to_bkey(dst)))
- bkey_copy(dst_unpacked, src);
- else
- BUG_ON(!bch2_bkey_pack(dst, src, f));
-}
+ end.offset += size;
-static bool extent_merge_one_overlapping(struct btree_iter *iter,
- struct bpos new_pos,
- struct bset_tree *t,
- struct bkey_packed *k, struct bkey uk,
- bool check, bool could_pack)
-{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
+ bch2_trans_init(&trans, c, 0, 0);
- BUG_ON(!bkey_deleted(k));
+ for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
+ BTREE_ITER_SLOTS, k, err) {
+ if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
+ break;
- if (check) {
- return !bkey_packed(k) || could_pack;
- } else {
- uk.p = new_pos;
- extent_save(b, node_iter, k, &uk);
- bch2_bset_fix_invalidated_key(b, t, k);
- bch2_btree_node_iter_fix(iter, b, node_iter, t,
- k, k->u64s, k->u64s);
- return true;
+ if (nr_replicas > bch2_bkey_nr_ptrs_allocated(k)) {
+ ret = false;
+ break;
+ }
}
+ bch2_trans_exit(&trans);
+
+ return ret;
}
-static bool extent_merge_do_overlapping(struct btree_iter *iter,
- struct bkey *m, bool back_merge)
+unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
- struct bset_tree *t;
- struct bkey_packed *k;
- struct bkey uk;
- struct bpos new_pos = back_merge ? m->p : bkey_start_pos(m);
- bool could_pack = bkey_pack_pos((void *) &uk, new_pos, b);
- bool check = true;
+ unsigned ret = 0;
- /*
- * @m is the new merged extent:
- *
- * The merge took place in the last bset; we know there can't be any 0
- * size extents overlapping with m there because if so they would have
- * been between the two extents we merged.
- *
- * But in the other bsets, we have to check for and fix such extents:
- */
-do_fixup:
- for_each_bset(b, t) {
- if (t == bset_tree_last(b))
- break;
-
- /*
- * if we don't find this bset in the iterator we already got to
- * the end of that bset, so start searching from the end.
- */
- k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
-
- if (k == btree_bkey_last(b, t))
- k = bch2_bkey_prev_all(b, t, k);
- if (!k)
- continue;
+ switch (k.k->type) {
+ case KEY_TYPE_extent: {
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
- if (back_merge) {
- /*
- * Back merge: 0 size extents will be before the key
- * that was just inserted (and thus the iterator
- * position) - walk backwards to find them
- */
- for (;
- k &&
- (uk = bkey_unpack_key(b, k),
- bkey_cmp(uk.p, bkey_start_pos(m)) > 0);
- k = bch2_bkey_prev_all(b, t, k)) {
- if (bkey_cmp(uk.p, m->p) >= 0)
- continue;
-
- if (!extent_merge_one_overlapping(iter, new_pos,
- t, k, uk, check, could_pack))
- return false;
- }
- } else {
- /* Front merge - walk forwards */
- for (;
- k != btree_bkey_last(b, t) &&
- (uk = bkey_unpack_key(b, k),
- bkey_cmp(uk.p, m->p) < 0);
- k = bkey_next(k)) {
- if (bkey_cmp(uk.p,
- bkey_start_pos(m)) <= 0)
- continue;
-
- if (!extent_merge_one_overlapping(iter, new_pos,
- t, k, uk, check, could_pack))
- return false;
- }
- }
+ extent_for_each_ptr_decode(e, p, entry)
+ ret += !p.ptr.cached &&
+ p.crc.compression_type == BCH_COMPRESSION_NONE;
+ break;
}
-
- if (check) {
- check = false;
- goto do_fixup;
+ case KEY_TYPE_reservation:
+ ret = bkey_s_c_to_reservation(k).v->nr_replicas;
+ break;
}
- return true;
+ return ret;
}
-/*
- * When merging an extent that we're inserting into a btree node, the new merged
- * extent could overlap with an existing 0 size extent - if we don't fix that,
- * it'll break the btree node iterator so this code finds those 0 size extents
- * and shifts them out of the way.
- *
- * Also unpacks and repacks.
- */
-static bool bch2_extent_merge_inline(struct bch_fs *c,
- struct btree_iter *iter,
- struct bkey_packed *l,
- struct bkey_packed *r,
- bool back_merge)
-{
- struct btree *b = iter->nodes[0];
- struct btree_node_iter *node_iter = &iter->node_iters[0];
- const struct bkey_format *f = &b->format;
- struct bset_tree *t = bset_tree_last(b);
- struct bkey_packed *m;
- BKEY_PADDED(k) li;
- BKEY_PADDED(k) ri;
- struct bkey_i *mi;
- struct bkey tmp;
-
- /*
- * We need to save copies of both l and r, because we might get a
- * partial merge (which modifies both) and then fails to repack
- */
- bch2_bkey_unpack(b, &li.k, l);
- bch2_bkey_unpack(b, &ri.k, r);
+/* KEY_TYPE_reservation: */
- m = back_merge ? l : r;
- mi = back_merge ? &li.k : &ri.k;
+const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
- /* l & r should be in last bset: */
- EBUG_ON(bch2_bkey_to_bset(b, m) != t);
+ if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
+ return "incorrect value size";
- switch (bch2_extent_merge(c, b, &li.k, &ri.k)) {
- case BCH_MERGE_NOMERGE:
- return false;
- case BCH_MERGE_PARTIAL:
- if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &mi->k, f))
- return false;
+ if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
+ return "invalid nr_replicas";
- if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
- return false;
+ return NULL;
+}
- extent_i_save(b, m, mi);
- bch2_bset_fix_invalidated_key(b, t, m);
+void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
- /*
- * Update iterator to reflect what we just inserted - otherwise,
- * the iter_fix() call is going to put us _before_ the key we
- * just partially merged with:
- */
- if (back_merge)
- bch2_btree_iter_set_pos_same_leaf(iter, li.k.k.p);
+ pr_buf(out, "generation %u replicas %u",
+ le32_to_cpu(r.v->generation),
+ r.v->nr_replicas);
+}
- bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
- t, m, m->u64s, m->u64s);
+enum merge_result bch2_reservation_merge(struct bch_fs *c,
+ struct bkey_s _l, struct bkey_s _r)
+{
+ struct bkey_s_reservation l = bkey_s_to_reservation(_l);
+ struct bkey_s_reservation r = bkey_s_to_reservation(_r);
- if (!back_merge)
- bkey_copy(packed_to_bkey(l), &li.k);
- else
- bkey_copy(packed_to_bkey(r), &ri.k);
- return false;
- case BCH_MERGE_MERGE:
- if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &li.k.k, f))
- return false;
+ if (l.v->generation != r.v->generation ||
+ l.v->nr_replicas != r.v->nr_replicas)
+ return BCH_MERGE_NOMERGE;
- if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
- return false;
+ if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
+ bch2_key_resize(l.k, KEY_SIZE_MAX);
+ __bch2_cut_front(l.k->p, r.s);
+ return BCH_MERGE_PARTIAL;
+ }
- extent_i_save(b, m, &li.k);
- bch2_bset_fix_invalidated_key(b, t, m);
+ bch2_key_resize(l.k, l.k->size + r.k->size);
- bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
- t, m, m->u64s, m->u64s);
- return true;
- default:
- BUG();
- }
+ return BCH_MERGE_MERGE;
}
-
-const struct bkey_ops bch2_bkey_extent_ops = {
- .key_invalid = bch2_extent_invalid,
- .key_debugcheck = bch2_extent_debugcheck,
- .val_to_text = bch2_extent_to_text,
- .swab = bch2_ptr_swab,
- .key_normalize = bch2_ptr_normalize,
- .key_merge = bch2_extent_merge,
- .is_extents = true,
-};