-5963d1b1a4a31af4282e6710c7948eb215160386
+8a65cc495143fa43fb3c100de3c2b14519d3135f
void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{
struct bch_alloc_v4 _a;
- const struct bch_alloc_v4 *a = &_a;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
const struct bch_backpointer *bps;
unsigned i;
- if (k.k->type == KEY_TYPE_alloc_v4)
- a = bkey_s_c_to_alloc_v4(k).v;
- else
- bch2_alloc_to_v4(k, &_a);
-
prt_newline(out);
printbuf_indent_add(out, 2);
printbuf_indent_sub(out, 4);
}
-void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
+void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
{
if (k.k->type == KEY_TYPE_alloc_v4) {
int d;
* Not sketchy at doing it this way, nope...
*/
struct bkey_i_alloc_v4 *ret =
- bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
- if (!IS_ERR(ret))
+ bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
+ if (!IS_ERR(ret)) {
bkey_reassemble(&ret->k_i, k);
+ memset((void *) ret + bkey_bytes(k.k), 0, sizeof(struct bch_backpointer));
+ }
return ret;
}
continue;
ca = bch_dev_bkey_exists(c, k.k->p.inode);
- bch2_alloc_to_v4(k, &a);
- *bucket_gen(ca, k.k->p.offset) = a.gen;
+ *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
}
bch2_trans_iter_exit(&trans, &iter);
a->data_type != BCH_DATA_need_discard)
return 0;
- k = bch2_trans_kmalloc(trans, sizeof(*k));
+ k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
if (IS_ERR(k))
return PTR_ERR(k);
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bch_alloc_v4 old_a, *new_a;
+ struct bch_alloc_v4 old_a_convert, *new_a;
+ const struct bch_alloc_v4 *old_a;
u64 old_lru, new_lru;
int ret = 0;
*/
BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
- bch2_alloc_to_v4(old, &old_a);
+ old_a = bch2_alloc_to_v4(old, &old_a_convert);
new_a = &bkey_i_to_alloc_v4(new)->v;
new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
- if (new_a->dirty_sectors > old_a.dirty_sectors ||
- new_a->cached_sectors > old_a.cached_sectors) {
+ if (new_a->dirty_sectors > old_a->dirty_sectors ||
+ new_a->cached_sectors > old_a->cached_sectors) {
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
}
- if (old_a.data_type != new_a->data_type ||
+ if (old_a->data_type != new_a->data_type ||
(new_a->data_type == BCH_DATA_free &&
- alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) {
- ret = bch2_bucket_do_index(trans, old, &old_a, false) ?:
+ alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
+ ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
if (ret)
return ret;
!new_a->io_time[READ])
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
- old_lru = alloc_lru_idx(old_a);
+ old_lru = alloc_lru_idx(*old_a);
new_lru = alloc_lru_idx(*new_a);
if (old_lru != new_lru) {
{
struct bch_fs *c = trans->c;
struct bch_dev *ca;
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
unsigned discard_key_type, freespace_key_type;
struct bkey_s_c alloc_k, k;
struct printbuf buf = PRINTBUF;
if (!ca->mi.freespace_initialized)
return 0;
- bch2_alloc_to_v4(alloc_k, &a);
+ a = bch2_alloc_to_v4(alloc_k, &a_convert);
- discard_key_type = a.data_type == BCH_DATA_need_discard
+ discard_key_type = a->data_type == BCH_DATA_need_discard
? KEY_TYPE_set : 0;
- freespace_key_type = a.data_type == BCH_DATA_free
+ freespace_key_type = a->data_type == BCH_DATA_free
? KEY_TYPE_set : 0;
bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
- bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, a));
+ bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
k = bch2_btree_iter_peek_slot(discard_iter);
ret = bkey_err(k);
struct bch_fs *c = trans->c;
struct btree_iter alloc_iter;
struct bkey_s_c alloc_k;
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
u64 genbits;
struct bpos pos;
enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
if (ret)
goto err;
- bch2_alloc_to_v4(alloc_k, &a);
+ a = bch2_alloc_to_v4(alloc_k, &a_convert);
- if (fsck_err_on(a.data_type != state ||
+ if (fsck_err_on(a->data_type != state ||
(state == BCH_DATA_free &&
- genbits != alloc_freespace_genbits(a)), c,
+ genbits != alloc_freespace_genbits(*a)), c,
"%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)",
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
bch2_btree_ids[iter->btree_id],
- a.data_type == state,
- genbits >> 56, alloc_freespace_genbits(a) >> 56))
+ a->data_type == state,
+ genbits >> 56, alloc_freespace_genbits(*a) >> 56))
goto delete;
out:
err:
{
struct bch_fs *c = trans->c;
struct btree_iter lru_iter;
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
struct bkey_s_c alloc_k, k;
struct printbuf buf = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
if (ret)
return ret;
- bch2_alloc_to_v4(alloc_k, &a);
+ a = bch2_alloc_to_v4(alloc_k, &a_convert);
- if (a.data_type != BCH_DATA_cached)
+ if (a->data_type != BCH_DATA_cached)
return 0;
bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
- POS(alloc_k.k->p.inode, a.io_time[READ]), 0);
+ POS(alloc_k.k->p.inode, a->io_time[READ]), 0);
k = bch2_btree_iter_peek_slot(&lru_iter);
ret = bkey_err(k);
if (ret)
goto err;
- if (fsck_err_on(!a.io_time[READ], c,
+ if (fsck_err_on(!a->io_time[READ], c,
"cached bucket with read_time 0\n"
" %s",
(printbuf_reset(&buf),
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
(bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
- u64 read_time = a.io_time[READ];
-
- if (!a.io_time[READ])
- a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+ u64 read_time = a->io_time[READ] ?:
+ atomic64_read(&c->io_clock[READ].now);
ret = bch2_lru_set(trans,
alloc_k.k->p.inode,
alloc_k.k->p.offset,
- &a.io_time[READ]);
+ &read_time);
if (ret)
goto err;
- if (a.io_time[READ] != read_time) {
+ if (a->io_time[READ] != read_time) {
struct bkey_i_alloc_v4 *a_mut =
bch2_alloc_to_v4_mut(trans, alloc_k);
ret = PTR_ERR_OR_ZERO(a_mut);
if (ret)
goto err;
- a_mut->v.io_time[READ] = a.io_time[READ];
+ a_mut->v.io_time[READ] = read_time;
ret = bch2_trans_update(trans, alloc_iter,
&a_mut->k_i, BTREE_TRIGGER_NORUN);
if (ret)
percpu_ref_put(&c->writes);
}
-static int bucket_freespace_init(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k, struct bch_dev *ca)
-{
- struct bch_alloc_v4 a;
-
- if (iter->pos.offset >= ca->mi.nbuckets)
- return 1;
-
- bch2_alloc_to_v4(k, &a);
- return bch2_bucket_do_index(trans, k, &a, true);
-}
-
static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
+ struct bpos end = POS(ca->dev_idx, ca->mi.nbuckets);
struct bch_member *m;
int ret;
bch2_trans_init(&trans, c, 0, 0);
- ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
- POS(ca->dev_idx, ca->mi.first_bucket),
- BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
- NULL, NULL, BTREE_INSERT_LAZY_RW,
- bucket_freespace_init(&trans, &iter, k, ca));
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc,
+ POS(ca->dev_idx, ca->mi.first_bucket),
+ BTREE_ITER_PREFETCH);
+ /*
+ * Scan the alloc btree for every bucket on @ca, and add buckets to the
+ * freespace/need_discard/need_gc_gens btrees as needed:
+ */
+ while (1) {
+ bch2_trans_begin(&trans);
+ ret = 0;
+
+ if (bkey_ge(iter.pos, end))
+ break;
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto bkey_err;
+
+ if (k.k->type) {
+ /*
+ * We process live keys in the alloc btree one at a
+ * time:
+ */
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+
+ ret = bch2_bucket_do_index(&trans, k, a, true) ?:
+ bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_advance(&iter);
+ } else {
+ /*
+ * When there's a hole, process a whole range of keys
+ * all at once:
+ *
+ * This is similar to how extent btree iterators in
+ * slots mode will synthesize a whole range - a
+ * KEY_TYPE_deleted extent.
+ *
+ * But alloc keys aren't extents (they have zero size),
+ * so we're open coding it here:
+ */
+ struct btree_iter iter2;
+ struct bkey_i *freespace;
+ struct bpos next;
+
+ bch2_trans_copy_iter(&iter2, &iter);
+ k = bch2_btree_iter_peek_upto(&iter2,
+ bkey_min(bkey_min(end,
+ iter.path->l[0].b->key.k.p),
+ POS(iter.pos.inode, iter.pos.offset + U32_MAX - 1)));
+ next = iter2.pos;
+ ret = bkey_err(k);
+ bch2_trans_iter_exit(&trans, &iter2);
+
+ BUG_ON(next.offset >= iter.pos.offset + U32_MAX);
+
+ if (ret)
+ goto bkey_err;
+
+ freespace = bch2_trans_kmalloc(&trans, sizeof(*freespace));
+ ret = PTR_ERR_OR_ZERO(freespace);
+ if (ret)
+ goto bkey_err;
+
+ bkey_init(&freespace->k);
+ freespace->k.type = KEY_TYPE_set;
+ freespace->k.p = iter.pos;
+
+ bch2_key_resize(&freespace->k, next.offset - iter.pos.offset);
+
+ ret = __bch2_btree_insert(&trans, BTREE_ID_freespace, freespace) ?:
+ bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_set_pos(&iter, next);
+ }
+bkey_err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+ }
+
+ bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
if (ret < 0) {
struct bkey_i_alloc_v4 *
bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos);
-void bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
+void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
+
+static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert)
+{
+ const struct bch_alloc_v4 *ret;
+
+ if (unlikely(k.k->type != KEY_TYPE_alloc_v4))
+ goto slowpath;
+
+ ret = bkey_s_c_to_alloc_v4(k).v;
+ if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s)
+ goto slowpath;
+
+ return ret;
+slowpath:
+ __bch2_alloc_to_v4(k, convert);
+ return convert;
+}
+
struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 bucket,
enum alloc_reserve reserve,
- struct bch_alloc_v4 *a,
+ const struct bch_alloc_v4 *a,
struct bucket_alloc_state *s,
struct closure *cl)
{
struct btree_iter iter = { NULL };
struct bkey_s_c k;
struct open_bucket *ob;
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
u64 b = free_entry & ~(~0ULL << 56);
unsigned genbits = free_entry >> 56;
struct printbuf buf = PRINTBUF;
goto err;
}
- bch2_alloc_to_v4(k, &a);
+ a = bch2_alloc_to_v4(k, &a_convert);
- if (genbits != (alloc_freespace_genbits(a) >> 56)) {
+ if (genbits != (alloc_freespace_genbits(*a) >> 56)) {
prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
" freespace key ",
- genbits, alloc_freespace_genbits(a) >> 56);
+ genbits, alloc_freespace_genbits(*a) >> 56);
bch2_bkey_val_to_text(&buf, c, freespace_k);
prt_printf(&buf, "\n ");
bch2_bkey_val_to_text(&buf, c, k);
}
- if (a.data_type != BCH_DATA_free) {
+ if (a->data_type != BCH_DATA_free) {
prt_printf(&buf, "non free bucket in freespace btree\n"
" freespace key ");
bch2_bkey_val_to_text(&buf, c, freespace_k);
}
}
- ob = __try_alloc_bucket(c, ca, b, reserve, &a, s, cl);
+ ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
if (!ob)
iter.path->preserve = false;
err:
for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, s->cur_bucket),
BTREE_ITER_SLOTS, k, ret) {
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
break;
is_superblock_bucket(ca, k.k->p.offset))
continue;
- bch2_alloc_to_v4(k, &a);
+ a = bch2_alloc_to_v4(k, &a_convert);
- if (a.data_type != BCH_DATA_free)
+ if (a->data_type != BCH_DATA_free)
continue;
s->buckets_seen++;
- ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a, s, cl);
+ ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
if (ob)
break;
}
#define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
bkey_fields()
#undef x
-
- /*
- * Extents - we have to guarantee that if an extent is packed, a trimmed
- * version will also pack:
- */
- if (bkey_start_offset(in) <
- le64_to_cpu(format->field_offset[BKEY_FIELD_OFFSET]))
- return false;
-
pack_state_finish(&state, out);
out->u64s = format->key_u64s + in->u64s - BKEY_U64s;
out->format = KEY_FORMAT_LOCAL_BTREE;
#define x(id, field) __bkey_format_add(s, id, k->field);
bkey_fields()
#undef x
- __bkey_format_add(s, BKEY_FIELD_OFFSET, bkey_start_offset(k));
}
void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
struct bucket gc, *b;
struct bkey_i_alloc_v4 *a;
- struct bch_alloc_v4 old, new;
+ struct bch_alloc_v4 old_convert, new;
+ const struct bch_alloc_v4 *old;
enum bch_data_type type;
int ret;
if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
return 1;
- bch2_alloc_to_v4(k, &old);
- new = old;
+ old = bch2_alloc_to_v4(k, &old_convert);
+ new = *old;
percpu_down_read(&c->mark_lock);
b = gc_bucket(ca, iter->pos.offset);
type = __alloc_data_type(b->dirty_sectors,
b->cached_sectors,
b->stripe,
- old,
+ *old,
b->data_type);
if (b->data_type != type) {
struct bch_dev_usage *u;
gc.data_type != BCH_DATA_btree)
return 0;
- if (gen_after(old.gen, gc.gen))
+ if (gen_after(old->gen, gc.gen))
return 0;
#define copy_bucket_field(_f) \
copy_bucket_field(stripe);
#undef copy_bucket_field
- if (!bch2_alloc_v4_cmp(old, new))
+ if (!bch2_alloc_v4_cmp(*old, new))
return 0;
a = bch2_alloc_to_v4_mut(trans, k);
struct btree_iter iter;
struct bkey_s_c k;
struct bucket *g;
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
unsigned i;
int ret;
ca = bch_dev_bkey_exists(c, k.k->p.inode);
g = gc_bucket(ca, k.k->p.offset);
- bch2_alloc_to_v4(k, &a);
+ a = bch2_alloc_to_v4(k, &a_convert);
g->gen_valid = 1;
- g->gen = a.gen;
+ g->gen = a->gen;
if (metadata_only &&
- (a.data_type == BCH_DATA_user ||
- a.data_type == BCH_DATA_cached ||
- a.data_type == BCH_DATA_parity)) {
- g->data_type = a.data_type;
- g->dirty_sectors = a.dirty_sectors;
- g->cached_sectors = a.cached_sectors;
- g->stripe = a.stripe;
- g->stripe_redundancy = a.stripe_redundancy;
+ (a->data_type == BCH_DATA_user ||
+ a->data_type == BCH_DATA_cached ||
+ a->data_type == BCH_DATA_parity)) {
+ g->data_type = a->data_type;
+ g->dirty_sectors = a->dirty_sectors;
+ g->cached_sectors = a->cached_sectors;
+ g->stripe = a->stripe;
+ g->stripe_redundancy = a->stripe_redundancy;
}
}
bch2_trans_iter_exit(&trans, &iter);
" should be %u",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
- struct bkey_i *new;
+ struct bkey_i *new = bch2_bkey_make_mut(trans, k);
- new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
ret = PTR_ERR_OR_ZERO(new);
if (ret)
return ret;
- bkey_reassemble(new, k);
-
if (!r->refcount)
new->k.type = KEY_TYPE_deleted;
else
percpu_up_read(&c->mark_lock);
return 0;
update:
- u = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ u = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(u);
if (ret)
return ret;
- bkey_reassemble(u, k);
-
bch2_extent_normalize(c, bkey_i_to_s(u));
return bch2_trans_update(trans, iter, u, 0);
}
struct bkey_s_c k)
{
struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
- struct bch_alloc_v4 a;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
struct bkey_i_alloc_v4 *a_mut;
int ret;
- bch2_alloc_to_v4(k, &a);
-
- if (a.oldest_gen == ca->oldest_gen[iter->pos.offset])
+ if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
return 0;
a_mut = bch2_alloc_to_v4_mut(trans, k);
int ret;
EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
+ EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
if (iter->update_path) {
bch2_path_put_nokeep(trans, iter->update_path,
while (1) {
k = __bch2_btree_iter_peek(iter, search_key);
- if (!k.k || bkey_err(k))
+ if (unlikely(!k.k))
+ goto end;
+ if (unlikely(bkey_err(k)))
goto out_no_locked;
/*
else
iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
- if (bkey_gt(iter_pos, end)) {
- bch2_btree_iter_set_pos(iter, end);
- k = bkey_s_c_null;
- goto out_no_locked;
- }
+ if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? bkey_gt(iter_pos, end)
+ : bkey_ge(iter_pos, end)))
+ goto end;
if (iter->update_path &&
!bkey_eq(iter->update_path->pos, k.k->p)) {
bch2_btree_iter_verify_entry_exit(iter);
return k;
+end:
+ bch2_btree_iter_set_pos(iter, end);
+ k = bkey_s_c_null;
+ goto out_no_locked;
}
/**
goto out_no_locked;
} else {
struct bpos next;
+ struct bpos end = iter->pos;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ end.offset = U64_MAX;
EBUG_ON(iter->path->level);
if (iter->flags & BTREE_ITER_INTENT) {
struct btree_iter iter2;
- struct bpos end = iter->pos;
-
- if (iter->flags & BTREE_ITER_IS_EXTENTS)
- end.offset = U64_MAX;
bch2_trans_copy_iter(&iter2, iter);
k = bch2_btree_iter_peek_upto(&iter2, end);
} else {
struct bpos pos = iter->pos;
- k = bch2_btree_iter_peek(iter);
+ k = bch2_btree_iter_peek_upto(iter, end);
if (unlikely(bkey_err(k)))
bch2_btree_iter_set_pos(iter, pos);
else
static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
{
- unsigned new_top = trans->mem_top + size;
- void *p = trans->mem + trans->mem_top;
+ size = roundup(size, 8);
+
+ if (likely(trans->mem_top + size <= trans->mem_bytes)) {
+ void *p = trans->mem + trans->mem_top;
- if (likely(new_top <= trans->mem_bytes)) {
trans->mem_top += size;
memset(p, 0, size);
return p;
} else {
return __bch2_trans_kmalloc(trans, size);
+ }
+}
+
+static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
+{
+ size = roundup(size, 8);
+
+ if (likely(trans->mem_top + size <= trans->mem_bytes)) {
+ void *p = trans->mem + trans->mem_top;
+ trans->mem_top += size;
+ return p;
+ } else {
+ return __bch2_trans_kmalloc(trans, size);
}
}
+static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k)
+{
+ struct bkey_i *mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k));
+
+ if (!IS_ERR(mut))
+ bkey_reassemble(mut, k);
+ return mut;
+}
+
+static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+
+ return unlikely(IS_ERR(k.k))
+ ? ERR_CAST(k.k)
+ : bch2_bkey_make_mut(trans, k);
+}
+
+#define bch2_bkey_get_mut_typed(_trans, _iter, _type) \
+({ \
+ struct bkey_i *_k = bch2_bkey_get_mut(_trans, _iter); \
+ struct bkey_i_##_type *_ret; \
+ \
+ if (IS_ERR(_k)) \
+ _ret = ERR_CAST(_k); \
+ else if (unlikely(_k->k.type != KEY_TYPE_##_type)) \
+ _ret = ERR_PTR(-ENOENT); \
+ else \
+ _ret = bkey_i_to_##_type(_k); \
+ _ret; \
+})
+
+#define bch2_bkey_alloc(_trans, _iter, _type) \
+({ \
+ struct bkey_i_##_type *_k = bch2_trans_kmalloc(_trans, sizeof(*_k));\
+ if (!IS_ERR(_k)) { \
+ bkey_##_type##_init(&_k->k_i); \
+ _k->k.p = (_iter)->pos; \
+ } \
+ _k; \
+})
+
u32 bch2_trans_begin(struct btree_trans *);
static inline struct btree *
_ret; \
})
+#define for_each_btree_key2_upto(_trans, _iter, _btree_id, \
+ _start, _end, _flags, _k, _do) \
+({ \
+ int _ret = 0; \
+ \
+ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ \
+ while (1) { \
+ u32 _restart_count = bch2_trans_begin(_trans); \
+ \
+ _ret = 0; \
+ (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, (_flags));\
+ if (!(_k).k) \
+ break; \
+ \
+ _ret = bkey_err(_k) ?: (_do); \
+ if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
+ continue; \
+ if (_ret) \
+ break; \
+ bch2_trans_verify_not_restarted(_trans, _restart_count);\
+ if (!bch2_btree_iter_advance(&(_iter))) \
+ break; \
+ } \
+ \
+ bch2_trans_iter_exit((_trans), &(_iter)); \
+ _ret; \
+})
+
#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
_start, _flags, _k, _do) \
({ \
(_do) ?: bch2_trans_commit(_trans, (_disk_res),\
(_journal_seq), (_commit_flags)))
+#define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
+ _start, _end, _iter_flags, _k, \
+ _disk_res, _journal_seq, _commit_flags,\
+ _do) \
+ for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
+ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
+ (_journal_seq), (_commit_flags)))
+
#define for_each_btree_key(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
!((_ret) = bkey_err(_k)) && (_k).k; \
bch2_btree_iter_advance(&(_iter)))
+#define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
+ for (; \
+ (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
+
/* new multiple iterator interface: */
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
if (ret)
goto nomerge1;
- update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ update = bch2_bkey_make_mut(trans, k);
if ((ret = PTR_ERR_OR_ZERO(update)))
goto err;
- bkey_reassemble(update, k);
-
if (bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(insert))) {
ret = bch2_btree_delete_at(trans, &iter, flags);
if (ret)
trans->extra_journal_res += compressed_sectors;
if (front_split) {
- update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ update = bch2_bkey_make_mut(trans, k);
if ((ret = PTR_ERR_OR_ZERO(update)))
goto err;
- bkey_reassemble(update, k);
-
bch2_cut_back(start, update);
bch2_trans_iter_init(trans, &update_iter, btree_id, update->k.p,
if (k.k->p.snapshot != insert->k.p.snapshot &&
(front_split || back_split)) {
- update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ update = bch2_bkey_make_mut(trans, k);
if ((ret = PTR_ERR_OR_ZERO(update)))
goto err;
- bkey_reassemble(update, k);
-
bch2_cut_front(start, update);
bch2_cut_back(insert->k.p, update);
}
if (back_split) {
- update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ update = bch2_bkey_make_mut(trans, k);
if ((ret = PTR_ERR_OR_ZERO(update)))
goto err;
- bkey_reassemble(update, k);
bch2_cut_front(insert->k.p, update);
ret = bch2_trans_update_by_path(trans, iter.path, update,
int ret = 0;
bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT);
- while ((k = bch2_btree_iter_peek(&iter)).k) {
+ while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(trans->c, 0);
struct bkey_i delete;
if (ret)
goto err;
- if (bkey_ge(iter.pos, end))
- break;
-
bkey_init(&delete.k);
/*
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
+ u64 bucket_journal_seq;
struct bch_fs *c = trans->c;
- struct bch_alloc_v4 old_a, new_a;
+ struct bch_alloc_v4 old_a_convert, new_a_convert;
+ const struct bch_alloc_v4 *old_a, *new_a;
struct bch_dev *ca;
int ret = 0;
ca = bch_dev_bkey_exists(c, new.k->p.inode);
- bch2_alloc_to_v4(old, &old_a);
- bch2_alloc_to_v4(new, &new_a);
+ old_a = bch2_alloc_to_v4(old, &old_a_convert);
+ new_a = bch2_alloc_to_v4(new, &new_a_convert);
+
+ bucket_journal_seq = new_a->journal_seq;
if ((flags & BTREE_TRIGGER_INSERT) &&
- data_type_is_empty(old_a.data_type) !=
- data_type_is_empty(new_a.data_type) &&
+ data_type_is_empty(old_a->data_type) !=
+ data_type_is_empty(new_a->data_type) &&
new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
- BUG_ON(!journal_seq);
+ EBUG_ON(!journal_seq);
/*
* If the btree updates referring to a bucket weren't flushed
* before the bucket became empty again, then the we don't have
* to wait on a journal flush before we can reuse the bucket:
*/
- new_a.journal_seq = data_type_is_empty(new_a.data_type) &&
+ v->journal_seq = bucket_journal_seq =
+ data_type_is_empty(new_a->data_type) &&
(journal_seq == v->journal_seq ||
bch2_journal_noflush_seq(&c->journal, v->journal_seq))
? 0 : journal_seq;
- v->journal_seq = new_a.journal_seq;
}
- if (!data_type_is_empty(old_a.data_type) &&
- data_type_is_empty(new_a.data_type) &&
- new_a.journal_seq) {
+ if (!data_type_is_empty(old_a->data_type) &&
+ data_type_is_empty(new_a->data_type) &&
+ bucket_journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new.k->p.inode, new.k->p.offset,
- new_a.journal_seq);
+ bucket_journal_seq);
if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
}
percpu_down_read(&c->mark_lock);
- if (!gc && new_a.gen != old_a.gen)
- *bucket_gen(ca, new.k->p.offset) = new_a.gen;
+ if (!gc && new_a->gen != old_a->gen)
+ *bucket_gen(ca, new.k->p.offset) = new_a->gen;
- bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
+ bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
if (gc) {
struct bucket *g = gc_bucket(ca, new.k->p.offset);
bucket_lock(g);
g->gen_valid = 1;
- g->gen = new_a.gen;
- g->data_type = new_a.data_type;
- g->stripe = new_a.stripe;
- g->stripe_redundancy = new_a.stripe_redundancy;
- g->dirty_sectors = new_a.dirty_sectors;
- g->cached_sectors = new_a.cached_sectors;
+ g->gen = new_a->gen;
+ g->data_type = new_a->data_type;
+ g->stripe = new_a->stripe;
+ g->stripe_redundancy = new_a->stripe_redundancy;
+ g->dirty_sectors = new_a->dirty_sectors;
+ g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g);
}
*/
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
- old_a.cached_sectors) {
+ old_a->cached_sectors) {
ret = update_cached_sectors(c, new, ca->dev_idx,
- -((s64) old_a.cached_sectors),
+ -((s64) old_a->cached_sectors),
journal_seq, gc);
if (ret) {
bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
}
}
- if (new_a.data_type == BCH_DATA_free &&
- (!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
+ if (new_a->data_type == BCH_DATA_free &&
+ (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait);
- if (new_a.data_type == BCH_DATA_need_discard &&
- (!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
+ if (new_a->data_type == BCH_DATA_need_discard &&
+ (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
bch2_do_discards(c);
- if (old_a.data_type != BCH_DATA_cached &&
- new_a.data_type == BCH_DATA_cached &&
+ if (old_a->data_type != BCH_DATA_cached &&
+ new_a->data_type == BCH_DATA_cached &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
bch2_do_invalidates(c);
- if (new_a.data_type == BCH_DATA_need_gc_gens)
+ if (new_a->data_type == BCH_DATA_need_gc_gens)
bch2_do_gc_gens(c);
return 0;
s64 sectors, enum bch_data_type data_type)
{
struct btree_iter iter;
- struct bkey_s_c k;
struct bkey_i_stripe *s;
struct bch_replicas_padded r;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
BTREE_ITER_INTENT|
BTREE_ITER_WITH_UPDATES);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_stripe) {
- bch2_trans_inconsistent(trans,
+ s = bch2_bkey_get_mut_typed(trans, &iter, stripe);
+ ret = PTR_ERR_OR_ZERO(s);
+ if (unlikely(ret)) {
+ bch2_trans_inconsistent_on(ret == -ENOENT, trans,
"pointer to nonexistent stripe %llu",
(u64) p.ec.idx);
- ret = -EIO;
goto err;
}
- if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) {
+ if (!bch2_ptr_matches_stripe(&s->v, p)) {
bch2_trans_inconsistent(trans,
"stripe pointer doesn't match stripe %llu",
(u64) p.ec.idx);
goto err;
}
- s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
- ret = PTR_ERR_OR_ZERO(s);
- if (ret)
- goto err;
-
- bkey_reassemble(&s->k_i, k);
stripe_blockcount_set(&s->v, p.ec.block,
stripe_blockcount_get(&s->v, p.ec.block) +
sectors);
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
- struct bkey_s_c k;
- struct bkey_i *n;
+ struct bkey_i *k;
__le64 *refcount;
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
struct printbuf buf = PRINTBUF;
bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
BTREE_ITER_INTENT|
BTREE_ITER_WITH_UPDATES);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
+ k = bch2_bkey_get_mut(trans, &iter);
+ ret = PTR_ERR_OR_ZERO(k);
if (ret)
goto err;
- n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- goto err;
-
- bkey_reassemble(n, k);
-
- refcount = bkey_refcount(n);
+ refcount = bkey_refcount(k);
if (!refcount) {
bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_trans_inconsistent(trans,
u64 pad;
pad = max_t(s64, le32_to_cpu(v->front_pad),
- le64_to_cpu(v->idx) - bkey_start_offset(k.k));
+ le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
BUG_ON(pad > U32_MAX);
v->front_pad = cpu_to_le32(pad);
pad = max_t(s64, le32_to_cpu(v->back_pad),
- k.k->p.offset - p.k->size - le64_to_cpu(v->idx));
+ k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
BUG_ON(pad > U32_MAX);
v->back_pad = cpu_to_le32(pad);
}
le64_add_cpu(refcount, add);
bch2_btree_iter_set_pos_to_extent_start(&iter);
- ret = bch2_trans_update(trans, &iter, n, 0);
+ ret = bch2_trans_update(trans, &iter, k, 0);
if (ret)
goto err;
- *idx = k.k->p.offset;
+ *idx = k->k.p.offset;
err:
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
{
return bch2_btree_delete_range(c, BTREE_ID_stripes,
POS(0, idx),
- POS(0, idx + 1),
+ POS(0, idx),
0, NULL);
}
dev = s->key.v.ptrs[block].dev;
- n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ n = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(n);
if (ret)
return ret;
- bkey_reassemble(n, k);
-
bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
ec_ptr = (void *) bch2_bkey_has_device(bkey_i_to_s_c(n), dev);
BUG_ON(!ec_ptr);
bch2_trans_copy_iter(©, iter);
- for_each_btree_key_continue_norestart(copy, 0, k, ret) {
+ for_each_btree_key_upto_continue_norestart(copy, insert->k.p, 0, k, ret) {
unsigned offset = 0;
- if (bkey_ge(bkey_start_pos(k.k), *end))
- break;
-
if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
offset = bkey_start_offset(&insert->k) -
bkey_start_offset(k.k);
if (ret)
goto err;
- for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
- if (bkey_ge(bkey_start_pos(k.k), end))
- break;
-
+ for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
if (bkey_extent_is_data(k.k)) {
ret = 1;
break;
}
- }
start = iter.pos;
bch2_trans_iter_exit(&trans, &iter);
err:
* page
*/
ret = range_has_data(c, inode->ei_subvol,
- POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT),
- POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT));
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
if (ret <= 0)
return ret;
k = insert
? bch2_btree_iter_peek_prev(&src)
- : bch2_btree_iter_peek(&src);
+ : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
if ((ret = bkey_err(k)))
continue;
return bch2_err_class(ret);
}
+/*
+ * Take a quota reservation for unallocated blocks in a given file range
+ * Does not check pagecache
+ */
static int quota_reserve_range(struct bch_inode_info *inode,
struct quota_res *res,
u64 start, u64 end)
if (ret)
goto err;
- for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
- SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) {
- if (k.k->p.inode != inode->v.i_ino) {
- break;
- } else if (bkey_extent_is_data(k.k)) {
+ for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, offset >> 9, snapshot),
+ POS(inode->v.i_ino, U64_MAX),
+ 0, k, ret) {
+ if (bkey_extent_is_data(k.k)) {
next_data = max(offset, bkey_start_offset(k.k) << 9);
break;
} else if (k.k->p.offset >> 9 > isize)
if (IS_ERR(delete))
return PTR_ERR(delete);
- tmp = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ tmp = bch2_bkey_make_mut(trans, k);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
- bkey_reassemble(tmp, k);
-
bkey_init(&delete->k);
delete->k.p = k_iter->pos;
return bch2_btree_iter_traverse(k_iter) ?:
return bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0);
}
-struct bkey_s_c bch2_inode_to_v3(struct btree_trans *trans, struct bkey_s_c k)
+struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
{
struct bch_inode_unpacked u;
struct bkey_inode_buf *inode_p;
int ret;
+ if (!bkey_is_inode(&k->k))
+ return ERR_PTR(-ENOENT);
+
inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
if (IS_ERR(inode_p))
- return bkey_s_c_err(PTR_ERR(inode_p));
+ return ERR_CAST(inode_p);
- ret = bch2_inode_unpack(k, &u);
+ ret = bch2_inode_unpack(bkey_i_to_s_c(k), &u);
if (ret)
- return bkey_s_c_err(ret);
+ return ERR_PTR(ret);
bch2_inode_pack(inode_p, &u);
- return bkey_i_to_s_c(&inode_p->inode.k_i);
+ return &inode_p->inode.k_i;
}
static int __bch2_inode_invalid(struct bkey_s_c k, struct printbuf *err)
void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
int bch2_inode_unpack(struct bkey_s_c, struct bch_inode_unpacked *);
-struct bkey_s_c bch2_inode_to_v3(struct btree_trans *, struct bkey_s_c);
+struct bkey_i *bch2_inode_to_v3(struct btree_trans *, struct bkey_i *);
void bch2_inode_unpacked_to_text(struct printbuf *, struct bch_inode_unpacked *);
s64 i_sectors_delta)
{
struct btree_iter iter;
- struct bkey_s_c inode_k;
- struct bkey_s_c_inode_v3 inode;
- struct bkey_i_inode_v3 *new_inode;
+ struct bkey_i *k;
+ struct bkey_i_inode_v3 *inode;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
extent_iter->pos.inode,
extent_iter->snapshot),
BTREE_ITER_INTENT|BTREE_ITER_CACHED);
- inode_k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(inode_k);
- if (unlikely(ret))
- goto err;
-
- ret = bkey_is_inode(inode_k.k) ? 0 : -ENOENT;
+ k = bch2_bkey_get_mut(trans, &iter);
+ ret = PTR_ERR_OR_ZERO(k);
if (unlikely(ret))
goto err;
- if (unlikely(inode_k.k->type != KEY_TYPE_inode_v3)) {
- inode_k = bch2_inode_to_v3(trans, inode_k);
- ret = bkey_err(inode_k);
+ if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
+ k = bch2_inode_to_v3(trans, k);
+ ret = PTR_ERR_OR_ZERO(k);
if (unlikely(ret))
goto err;
}
- inode = bkey_s_c_to_inode_v3(inode_k);
-
- new_inode = bch2_trans_kmalloc(trans, bkey_bytes(inode_k.k));
- ret = PTR_ERR_OR_ZERO(new_inode);
- if (unlikely(ret))
- goto err;
-
- bkey_reassemble(&new_inode->k_i, inode.s_c);
+ inode = bkey_i_to_inode_v3(k);
- if (!(le64_to_cpu(inode.v->bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
- new_i_size > le64_to_cpu(inode.v->bi_size))
- new_inode->v.bi_size = cpu_to_le64(new_i_size);
+ if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
+ new_i_size > le64_to_cpu(inode->v.bi_size))
+ inode->v.bi_size = cpu_to_le64(new_i_size);
- le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta);
+ le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
- new_inode->k.p.snapshot = iter.snapshot;
+ inode->k.p.snapshot = iter.snapshot;
- ret = bch2_trans_update(trans, &iter, &new_inode->k_i,
+ ret = bch2_trans_update(trans, &iter, &inode->k_i,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
err:
bch2_trans_iter_exit(trans, &iter);
bch2_btree_iter_set_snapshot(iter, snapshot);
- k = bch2_btree_iter_peek(iter);
- if (bkey_ge(iter->pos, end_pos)) {
- bch2_btree_iter_set_pos(iter, end_pos);
+ /*
+ * peek_upto() doesn't have ideal semantics for extents:
+ */
+ k = bch2_btree_iter_peek_upto(iter, end_pos);
+ if (!k.k)
break;
- }
ret = bkey_err(k);
if (ret)
continue;
+ BUG_ON(bkey_ge(iter->pos, end_pos));
+
bkey_init(&delete.k);
delete.k.p = iter->pos;
bch2_disk_reservation_put(c, &disk_res);
}
+ BUG_ON(bkey_gt(iter->pos, end_pos));
+
return ret ?: ret2;
}
return 0;
}
- new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ new = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(new);
if (ret)
return ret;
- bkey_reassemble(new, k);
-
bch2_cut_front(bkey_start_pos(&orig->k), new);
bch2_cut_back(orig->k.p, new);
bch2_trans_init(&trans, c, 0, 0);
for_each_keylist_key(&op->insert_keys, orig) {
- ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_extents,
- bkey_start_pos(&orig->k),
+ ret = for_each_btree_key_upto_commit(&trans, iter, BTREE_ID_extents,
+ bkey_start_pos(&orig->k), orig->k.p,
BTREE_ITER_INTENT, k,
NULL, NULL, BTREE_INSERT_NOFAIL, ({
- if (bkey_ge(bkey_start_pos(k.k), orig->k.p))
- break;
+ BUG_ON(bkey_ge(bkey_start_pos(k.k), orig->k.p));
bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size);
}));
BUG_ON(iter.pos.inode != lru_id);
*time = iter.pos.offset;
- lru = bch2_trans_kmalloc(trans, sizeof(*lru));
+ lru = bch2_bkey_alloc(trans, &iter, lru);
ret = PTR_ERR_OR_ZERO(lru);
if (ret)
goto err;
- bkey_lru_init(&lru->k_i);
- lru->k.p = iter.pos;
- lru->v.idx = cpu_to_le64(idx);
+ lru->v.idx = cpu_to_le64(idx);
ret = bch2_trans_update(trans, &iter, &lru->k_i, 0);
if (ret)
" for %s",
(bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf),
(bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
- struct bkey_i *update =
- bch2_trans_kmalloc(trans, sizeof(*update));
-
- ret = PTR_ERR_OR_ZERO(update);
- if (ret)
- goto err;
-
- bkey_init(&update->k);
- update->k.p = lru_iter->pos;
-
- ret = bch2_trans_update(trans, lru_iter, update, 0);
+ ret = bch2_btree_delete_at(trans, lru_iter, 0);
if (ret)
goto err;
}
if (!bch2_bkey_has_device(k, dev_idx))
return 0;
- n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ n = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(n);
if (ret)
return ret;
- bkey_reassemble(n, k);
-
ret = drop_dev_ptrs(c, bkey_i_to_s(n), dev_idx, flags, false);
if (ret)
return ret;
struct bkey_i *n;
int ret;
- n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ n = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(n);
if (ret)
return ret;
- bkey_reassemble(n, k);
-
while (data_opts.kill_ptrs) {
unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
struct bch_extent_ptr *ptr;
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
- struct bch_alloc_v4 a;
int ret;
bch2_trans_init(&trans, c, 0, 0);
BTREE_ITER_PREFETCH, k, ret) {
struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
struct copygc_heap_entry e;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
- bch2_alloc_to_v4(k, &a);
+ a = bch2_alloc_to_v4(k, &a_convert);
- if ((a.data_type != BCH_DATA_btree &&
- a.data_type != BCH_DATA_user) ||
- a.dirty_sectors >= ca->mi.bucket_size ||
+ if ((a->data_type != BCH_DATA_btree &&
+ a->data_type != BCH_DATA_user) ||
+ a->dirty_sectors >= ca->mi.bucket_size ||
bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
continue;
e = (struct copygc_heap_entry) {
.dev = iter.pos.inode,
- .gen = a.gen,
- .replicas = 1 + a.stripe_redundancy,
- .fragmentation = div_u64((u64) a.dirty_sectors * (1ULL << 31),
+ .gen = a->gen,
+ .replicas = 1 + a->stripe_redundancy,
+ .fragmentation = div_u64((u64) a->dirty_sectors * (1ULL << 31),
ca->mi.bucket_size),
- .sectors = a.dirty_sectors,
+ .sectors = a->dirty_sectors,
.bucket = iter.pos.offset,
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
case BCH_OPT_BOOL:
ret = kstrtou64(val, 10, res);
if (ret < 0 || (*res != 0 && *res != 1)) {
- prt_printf(err, "%s: must be bool",
- opt->attr.name);
+ if (err)
+ prt_printf(err, "%s: must be bool",
+ opt->attr.name);
return ret;
}
break;
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_USR, 0),
- POS(QTYP_USR + 1, 0),
+ POS(QTYP_USR, U64_MAX),
0, NULL);
if (ret)
return ret;
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_GRP, 0),
- POS(QTYP_GRP + 1, 0),
+ POS(QTYP_GRP, U64_MAX),
0, NULL);
if (ret)
return ret;
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_PRJ, 0),
- POS(QTYP_PRJ + 1, 0),
+ POS(QTYP_PRJ, U64_MAX),
0, NULL);
if (ret)
return ret;
struct bkey_s_c k;
int ret;
- for_each_btree_key_continue_norestart(*iter, 0, k, ret) {
- if (bkey_ge(iter->pos, end))
- break;
-
+ for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) {
if (bkey_extent_is_unwritten(k))
continue;
static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
{
struct btree_iter iter;
- struct bkey_s_c k;
struct bkey_i_snapshot *s;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_snapshot) {
- bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
- ret = -ENOENT;
+ s = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
+ ret = PTR_ERR_OR_ZERO(s);
+ if (unlikely(ret)) {
+ bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", id);
goto err;
}
/* already deleted? */
- if (BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v))
+ if (BCH_SNAPSHOT_DELETED(&s->v))
goto err;
- s = bch2_trans_kmalloc(trans, sizeof(*s));
- ret = PTR_ERR_OR_ZERO(s);
- if (ret)
- goto err;
-
- bkey_reassemble(&s->k_i, k);
SET_BCH_SNAPSHOT_DELETED(&s->v, true);
SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
s->v.subvol = 0;
struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
struct bkey_s_c k;
struct bkey_s_c_snapshot s;
- struct bkey_i_snapshot *parent;
u32 parent_id;
unsigned i;
int ret = 0;
parent_id = le32_to_cpu(s.v->parent);
if (parent_id) {
+ struct bkey_i_snapshot *parent;
+
bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots,
POS(0, parent_id),
BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek_slot(&p_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_snapshot) {
- bch2_fs_inconsistent(trans->c, "missing snapshot %u", parent_id);
- ret = -ENOENT;
- goto err;
- }
-
- parent = bch2_trans_kmalloc(trans, sizeof(*parent));
+ parent = bch2_bkey_get_mut_typed(trans, &p_iter, snapshot);
ret = PTR_ERR_OR_ZERO(parent);
- if (ret)
+ if (unlikely(ret)) {
+ bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", parent_id);
goto err;
-
- bkey_reassemble(&parent->k_i, k);
+ }
for (i = 0; i < 2; i++)
if (le32_to_cpu(parent->v.children[i]) == id)
goto err;
}
- n = bch2_trans_kmalloc(trans, sizeof(*n));
+ n = bch2_bkey_alloc(trans, &iter, snapshot);
ret = PTR_ERR_OR_ZERO(n);
if (ret)
goto err;
- bkey_snapshot_init(&n->k_i);
- n->k.p = iter.pos;
n->v.flags = 0;
n->v.parent = cpu_to_le32(parent);
n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
if (parent) {
bch2_btree_iter_set_pos(&iter, POS(0, parent));
- k = bch2_btree_iter_peek(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_snapshot) {
- bch_err(trans->c, "snapshot %u not found", parent);
- ret = -ENOENT;
- goto err;
- }
-
- n = bch2_trans_kmalloc(trans, sizeof(*n));
+ n = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
ret = PTR_ERR_OR_ZERO(n);
- if (ret)
+ if (unlikely(ret)) {
+ if (ret == -ENOENT)
+ bch_err(trans->c, "snapshot %u not found", parent);
goto err;
-
- bkey_reassemble(&n->k_i, k);
+ }
if (n->v.children[0] || n->v.children[1]) {
bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
{
struct btree_iter iter;
- struct bkey_s_c k;
struct bkey_i_subvolume *n;
struct subvolume_unlink_hook *h;
int ret = 0;
POS(0, subvolid),
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_subvolume) {
- bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvolid);
- ret = -EIO;
- goto err;
- }
-
- n = bch2_trans_kmalloc(trans, sizeof(*n));
+ n = bch2_bkey_get_mut_typed(trans, &iter, subvolume);
ret = PTR_ERR_OR_ZERO(n);
- if (ret)
+ if (unlikely(ret)) {
+ bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing subvolume %u", subvolid);
goto err;
+ }
- bkey_reassemble(&n->k_i, k);
SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
if (src_subvolid) {
/* Creating a snapshot: */
- src_subvol = bch2_trans_kmalloc(trans, sizeof(*src_subvol));
- ret = PTR_ERR_OR_ZERO(src_subvol);
- if (ret)
- goto err;
bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes,
POS(0, src_subvolid),
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek_slot(&src_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
- if (k.k->type != KEY_TYPE_subvolume) {
- bch_err(c, "subvolume %u not found", src_subvolid);
- ret = -ENOENT;
+ src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter, subvolume);
+ ret = PTR_ERR_OR_ZERO(src_subvol);
+ if (unlikely(ret)) {
+ bch2_fs_inconsistent_on(ret == -ENOENT, trans->c,
+ "subvolume %u not found", src_subvolid);
goto err;
}
- bkey_reassemble(&src_subvol->k_i, k);
parent = le32_to_cpu(src_subvol->v.snapshot);
}
goto err;
}
- new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
+ new_subvol = bch2_bkey_alloc(trans, &dst_iter, subvolume);
ret = PTR_ERR_OR_ZERO(new_subvol);
if (ret)
goto err;
- bkey_subvolume_init(&new_subvol->k_i);
new_subvol->v.flags = 0;
new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
new_subvol->v.inode = cpu_to_le64(inode);
SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
- new_subvol->k.p = dst_iter.pos;
ret = bch2_trans_update(trans, &dst_iter, &new_subvol->k_i, 0);
if (ret)
goto err;
int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX), SPOS_MAX,
- 0,
- NULL);
+ SPOS(0, 0, U32_MAX),
+ POS(0, U64_MAX),
+ 0, NULL);
BUG_ON(ret);
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), SPOS_MAX,
+ SPOS(0, 0, U32_MAX),
+ POS(0, U64_MAX),
0, NULL);
BUG_ON(ret);
}
i = 0;
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), 0, k, ({
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k, ({
BUG_ON(k.k->p.offset != i++);
0;
}));
i = 0;
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX), 0, k, ({
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k, ({
BUG_ON(bkey_start_offset(k.k) != i);
i = k.k->p.offset;
0;
i = 0;
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), 0, k, ({
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k, ({
BUG_ON(k.k->p.offset != i);
i += 2;
0;
i = 0;
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX),
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
BTREE_ITER_SLOTS, k, ({
if (i >= nr * 2)
break;
i = 0;
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX), 0, k, ({
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k, ({
BUG_ON(bkey_start_offset(k.k) != i + 8);
BUG_ON(k.k->size != 8);
i += 16;
i = 0;
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_extents,
- SPOS(0, 0, U32_MAX),
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
BTREE_ITER_SLOTS, k, ({
if (i == nr)
break;
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0);
- lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+ lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+ lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), 0);
- lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+ lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+ lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k);
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_init(&trans, c, 0, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, snapid_lo), 0);
- lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+ lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k->p.snapshot != U32_MAX);
bch2_trans_init(&trans, c, 0, 0);
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), 0, k,
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k,
0);
if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
- SPOS(0, 0, U32_MAX), SPOS_MAX,
+ SPOS(0, 0, U32_MAX),
+ POS(0, U64_MAX),
0, NULL);
if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));