-ce9293e9d063f7f1a22209f9cc2f5cb7478e886c
+7e03c1ab0ef2e3148ba70656eab67471c85a0419
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SIGNAL_H
+#define _LINUX_SCHED_SIGNAL_H
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+ return 0;
+}
+
+#endif /* _LINUX_SCHED_SIGNAL_H */
+
/* Btree: */
-#define BCH_BTREE_IDS() \
+#define BCH_BTREE_IDS() \
x(EXTENTS, 0, "extents") \
x(INODES, 1, "inodes") \
x(DIRENTS, 2, "dirents") \
x(XATTRS, 3, "xattrs") \
x(ALLOC, 4, "alloc") \
x(QUOTAS, 5, "quotas") \
- x(EC, 6, "erasure_coding") \
+ x(EC, 6, "stripes") \
x(REFLINK, 7, "reflink")
enum btree_id {
: expensive_debug_checks(c) ? 0
: !btree_node_type_needs_gc(btree_id) ? 1
: 0;
- u8 max_stale;
+ u8 max_stale = 0;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
{
struct bch_dev *ca;
unsigned i;
-
- /*
- * indicate to stripe code that we need to allocate for the gc stripes
- * radix tree, too
- */
- gc_pos_set(c, gc_phase(GC_PHASE_START));
+ int ret;
BUG_ON(c->usage_gc);
}
}
+ ret = bch2_ec_mem_alloc(c, true);
+ if (ret)
+ return ret;
+
+ percpu_down_write(&c->mark_lock);
+
+ /*
+ * indicate to stripe code that we need to allocate for the gc stripes
+ * radix tree, too
+ */
+ gc_pos_set(c, gc_phase(GC_PHASE_START));
+
for_each_member_device(ca, c, i) {
struct bucket_array *dst = __bucket_array(ca, 1);
struct bucket_array *src = __bucket_array(ca, 0);
}
};
- return bch2_ec_mem_alloc(c, true);
+ percpu_up_write(&c->mark_lock);
+
+ return 0;
}
/**
down_write(&c->gc_lock);
again:
- percpu_down_write(&c->mark_lock);
ret = bch2_gc_start(c, metadata_only);
- percpu_up_write(&c->mark_lock);
-
if (ret)
goto out;
return bch2_btree_iter_peek(iter);
}
+ if (unlikely(bkey_deleted(&iter->k))) {
+ /*
+ * we're currently pointed at a hole, because previously we were
+ * iterating over slots:
+ */
+ return bch2_btree_iter_peek(iter);
+ }
+
do {
bch2_btree_node_iter_advance(&l->iter, l->b);
p = bch2_btree_node_iter_peek_all(&l->iter, l->b);
struct bch_fs *c = trans->c;
struct bch_fs_usage *fs_usage = NULL;
struct btree_insert_entry *i;
+ struct btree_iter *iter;
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
? BCH_BUCKET_MARK_BUCKET_INVALIDATE
: 0;
goto out_clear_replicas;
}
+ trans_for_each_iter(trans, iter) {
+ if (iter->nodes_locked != iter->nodes_intent_locked) {
+ BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
+ BUG_ON(trans->iters_live & (1ULL << iter->idx));
+ __bch2_btree_iter_unlock(iter);
+ }
+ }
+
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
trans_for_each_update(trans, i)
btree_insert_entry_checks(trans, i);
preempt_enable();
}
-static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
- unsigned offset, s64 delta,
- unsigned flags)
+static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
{
+ return DIV_ROUND_UP(sectors * n, d);
+}
+
+static s64 __ptr_disk_sectors_delta(unsigned old_size,
+ unsigned offset, s64 delta,
+ unsigned flags,
+ unsigned n, unsigned d)
+{
+ BUG_ON(!n || !d);
+
if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
- BUG_ON(offset + -delta > p.crc.live_size);
+ BUG_ON(offset + -delta > old_size);
- return -((s64) ptr_disk_sectors(p)) +
- __ptr_disk_sectors(p, offset) +
- __ptr_disk_sectors(p, p.crc.live_size -
- offset + delta);
+ return -disk_sectors_scaled(n, d, old_size) +
+ disk_sectors_scaled(n, d, offset) +
+ disk_sectors_scaled(n, d, old_size - offset + delta);
} else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
- BUG_ON(offset + -delta > p.crc.live_size);
+ BUG_ON(offset + -delta > old_size);
- return -((s64) ptr_disk_sectors(p)) +
- __ptr_disk_sectors(p, p.crc.live_size +
- delta);
+ return -disk_sectors_scaled(n, d, old_size) +
+ disk_sectors_scaled(n, d, old_size + delta);
} else {
- return ptr_disk_sectors(p);
+ return disk_sectors_scaled(n, d, delta);
}
}
+static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
+ unsigned offset, s64 delta,
+ unsigned flags)
+{
+ return __ptr_disk_sectors_delta(p.crc.live_size,
+ offset, delta, flags,
+ p.crc.compressed_size,
+ p.crc.uncompressed_size);
+}
+
static void bucket_set_stripe(struct bch_fs *c,
const struct bch_stripe *v,
struct bch_fs_usage *fs_usage,
struct bch_extent_stripe_ptr p,
enum bch_data_type data_type,
struct bch_fs_usage *fs_usage,
- s64 sectors, unsigned flags)
+ s64 sectors, unsigned flags,
+ struct bch_replicas_padded *r,
+ unsigned *nr_data,
+ unsigned *nr_parity)
{
bool gc = flags & BCH_BUCKET_MARK_GC;
struct stripe *m;
- unsigned old, new, nr_data;
+ unsigned old, new;
int blocks_nonempty_delta;
- s64 parity_sectors;
-
- BUG_ON(!sectors);
m = genradix_ptr(&c->stripes[gc], p.idx);
BUG_ON(m->r.e.data_type != data_type);
- nr_data = m->nr_blocks - m->nr_redundant;
-
- parity_sectors = DIV_ROUND_UP(abs(sectors) * m->nr_redundant, nr_data);
-
- if (sectors < 0)
- parity_sectors = -parity_sectors;
- sectors += parity_sectors;
+ *nr_data = m->nr_blocks - m->nr_redundant;
+ *nr_parity = m->nr_redundant;
+ *r = m->r;
old = m->block_sectors[p.block];
m->block_sectors[p.block] += sectors;
spin_unlock(&c->ec_stripes_heap_lock);
- update_replicas(c, fs_usage, &m->r.e, sectors);
-
return 0;
}
struct extent_ptr_decoded p;
struct bch_replicas_padded r;
s64 dirty_sectors = 0;
- unsigned i;
int ret;
r.e.data_type = data_type;
? sectors
: ptr_disk_sectors_delta(p, offset, sectors, flags);
bool stale = bch2_mark_pointer(c, p, disk_sectors, data_type,
- fs_usage, journal_seq, flags);
+ fs_usage, journal_seq, flags);
if (p.ptr.cached) {
if (!stale)
update_cached_sectors(c, fs_usage, p.ptr.dev,
disk_sectors);
- } else if (!p.ec_nr) {
+ } else if (!p.has_ec) {
dirty_sectors += disk_sectors;
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
} else {
- for (i = 0; i < p.ec_nr; i++) {
- ret = bch2_mark_stripe_ptr(c, p.ec[i],
- data_type, fs_usage,
- disk_sectors, flags);
- if (ret)
- return ret;
- }
-
+ struct bch_replicas_padded ec_r;
+ unsigned nr_data, nr_parity;
+ s64 parity_sectors;
+
+ ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
+ fs_usage, disk_sectors, flags,
+ &ec_r, &nr_data, &nr_parity);
+ if (ret)
+ return ret;
+
+ parity_sectors =
+ __ptr_disk_sectors_delta(p.crc.live_size,
+ offset, sectors, flags,
+ p.crc.compressed_size * nr_parity,
+ p.crc.uncompressed_size * nr_data);
+
+ update_replicas(c, fs_usage, &ec_r.e,
+ disk_sectors + parity_sectors);
+
+ /*
+ * There may be other dirty pointers in this extent, but
+ * if so they're not required for mounting if we have an
+ * erasure coded pointer in this extent:
+ */
r.e.nr_required = 0;
}
}
- update_replicas(c, fs_usage, &r.e, dirty_sectors);
+ if (r.e.nr_devs)
+ update_replicas(c, fs_usage, &r.e, dirty_sectors);
return 0;
}
static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
struct bch_extent_stripe_ptr p,
- s64 sectors, enum bch_data_type data_type)
+ s64 sectors, enum bch_data_type data_type,
+ struct bch_replicas_padded *r,
+ unsigned *nr_data,
+ unsigned *nr_parity)
{
struct bch_fs *c = trans->c;
- struct bch_replicas_padded r;
struct btree_iter *iter;
struct bkey_i *new_k;
struct bkey_s_c k;
struct bkey_s_stripe s;
- unsigned nr_data;
- s64 parity_sectors;
int ret = 0;
ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
bkey_reassemble(new_k, k);
s = bkey_i_to_s_stripe(new_k);
- nr_data = s.v->nr_blocks - s.v->nr_redundant;
-
- parity_sectors = DIV_ROUND_UP(abs(sectors) * s.v->nr_redundant, nr_data);
-
- if (sectors < 0)
- parity_sectors = -parity_sectors;
-
stripe_blockcount_set(s.v, p.block,
stripe_blockcount_get(s.v, p.block) +
- sectors + parity_sectors);
-
- bch2_bkey_to_replicas(&r.e, s.s_c);
+ sectors);
- update_replicas_list(trans, &r.e, sectors);
+ *nr_data = s.v->nr_blocks - s.v->nr_redundant;
+ *nr_parity = s.v->nr_redundant;
+ bch2_bkey_to_replicas(&r->e, s.s_c);
out:
bch2_trans_iter_put(trans, iter);
return ret;
struct bch_replicas_padded r;
s64 dirty_sectors = 0;
bool stale;
- unsigned i;
int ret;
r.e.data_type = data_type;
if (!stale)
update_cached_sectors_list(trans, p.ptr.dev,
disk_sectors);
- } else if (!p.ec_nr) {
+ } else if (!p.has_ec) {
dirty_sectors += disk_sectors;
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
} else {
- for (i = 0; i < p.ec_nr; i++) {
- ret = bch2_trans_mark_stripe_ptr(trans, p.ec[i],
- disk_sectors, data_type);
- if (ret)
- return ret;
- }
+ struct bch_replicas_padded ec_r;
+ unsigned nr_data, nr_parity;
+ s64 parity_sectors;
+
+ ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
+ disk_sectors, data_type,
+ &ec_r, &nr_data, &nr_parity);
+ if (ret)
+ return ret;
+
+ parity_sectors =
+ __ptr_disk_sectors_delta(p.crc.live_size,
+ offset, sectors, flags,
+ p.crc.compressed_size * nr_parity,
+ p.crc.uncompressed_size * nr_data);
+
+ update_replicas_list(trans, &ec_r.e,
+ disk_sectors + parity_sectors);
r.e.nr_required = 0;
}
}
- update_replicas_list(trans, &r.e, dirty_sectors);
+ if (r.e.nr_devs)
+ update_replicas_list(trans, &r.e, dirty_sectors);
return 0;
}
return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
}
-static inline unsigned __ptr_disk_sectors(struct extent_ptr_decoded p,
- unsigned live_size)
+static inline s64 __ptr_disk_sectors(struct extent_ptr_decoded p,
+ unsigned live_size)
{
return live_size && p.crc.compression_type
? max(1U, DIV_ROUND_UP(live_size * p.crc.compressed_size,
: live_size;
}
-static inline unsigned ptr_disk_sectors(struct extent_ptr_decoded p)
+static inline s64 ptr_disk_sectors(struct extent_ptr_decoded p)
{
return __ptr_disk_sectors(p, p.crc.live_size);
}
struct btree_iter *
__bch2_dirent_lookup_trans(struct btree_trans *trans, u64 dir_inum,
const struct bch_hash_info *hash_info,
- const struct qstr *name)
+ const struct qstr *name, unsigned flags)
{
return bch2_hash_lookup(trans, bch2_dirent_hash_desc,
- hash_info, dir_inum, name, 0);
+ hash_info, dir_inum, name, flags);
}
u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
bch2_trans_init(&trans, c, 0, 0);
- iter = __bch2_dirent_lookup_trans(&trans, dir_inum, hash_info, name);
+ iter = __bch2_dirent_lookup_trans(&trans, dir_inum,
+ hash_info, name, 0);
if (IS_ERR(iter)) {
BUG_ON(PTR_ERR(iter) == -EINTR);
goto out;
struct btree_iter *iter;
struct bkey_s_c k;
struct bkey_s_c_dirent dirent;
- unsigned len;
int ret;
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
POS(inum, ctx->pos), 0, k, ret) {
+ if (k.k->p.inode > inum)
+ break;
+
if (k.k->type != KEY_TYPE_dirent)
continue;
dirent = bkey_s_c_to_dirent(k);
- if (bkey_cmp(k.k->p, POS(inum, ctx->pos)) < 0)
- continue;
-
- if (k.k->p.inode > inum)
- break;
-
- len = bch2_dirent_name_bytes(dirent);
-
/*
* XXX: dir_emit() can fault and block, while we're holding
* locks
*/
- if (!dir_emit(ctx, dirent.v->d_name, len,
+ ctx->pos = dirent.k->p.offset;
+ if (!dir_emit(ctx, dirent.v->d_name,
+ bch2_dirent_name_bytes(dirent),
le64_to_cpu(dirent.v->d_inum),
dirent.v->d_type))
break;
-
- ctx->pos = k.k->p.offset + 1;
+ ctx->pos = dirent.k->p.offset + 1;
}
ret = bch2_trans_exit(&trans) ?: ret;
struct btree_iter *
__bch2_dirent_lookup_trans(struct btree_trans *, u64,
const struct bch_hash_info *,
- const struct qstr *);
+ const struct qstr *, unsigned);
u64 bch2_dirent_lookup(struct bch_fs *, u64, const struct bch_hash_info *,
const struct qstr *);
pr_buf(out, " %u:%llu:%u", s->ptrs[i].dev,
(u64) s->ptrs[i].offset,
stripe_blockcount_get(s, i));
-
- bch2_bkey_ptrs_to_text(out, c, k);
}
static int ptr_matches_stripe(struct bch_fs *c,
closure_init_stack(&cl);
- BUG_ON(!rbio->pick.idx ||
- rbio->pick.idx - 1 >= rbio->pick.ec_nr);
+ BUG_ON(!rbio->pick.has_ec);
- stripe_idx = rbio->pick.ec[rbio->pick.idx - 1].idx;
+ stripe_idx = rbio->pick.ec.idx;
buf = kzalloc(sizeof(*buf), GFP_NOIO);
if (!buf)
size_t idx = iter->pos.offset;
int ret = 0;
- if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT))
+ if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT|__GFP_NOWARN))
return ret;
bch2_trans_unlock(iter->trans);
struct btree_trans trans;
struct btree_iter *btree_iter;
struct journal_iter journal_iter;
- struct bkey_s_c btree_k, journal_k, k;
+ struct bkey_s_c btree_k, journal_k;
int ret;
ret = bch2_fs_ec_start(c);
journal_k = bch2_journal_iter_peek(&journal_iter);
while (1) {
+ bool btree;
+
if (btree_k.k && journal_k.k) {
int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
- if (cmp < 0) {
- k = btree_k;
- btree_k = bch2_btree_iter_next(btree_iter);
- } else if (cmp == 0) {
+ if (!cmp)
btree_k = bch2_btree_iter_next(btree_iter);
- k = journal_k;
- journal_k = bch2_journal_iter_next(&journal_iter);
- } else {
- k = journal_k;
- journal_k = bch2_journal_iter_next(&journal_iter);
- }
+ btree = cmp < 0;
} else if (btree_k.k) {
- k = btree_k;
- btree_k = bch2_btree_iter_next(btree_iter);
+ btree = true;
} else if (journal_k.k) {
- k = journal_k;
- journal_k = bch2_journal_iter_next(&journal_iter);
+ btree = false;
} else {
break;
}
- bch2_mark_key(c, k, 0, 0, NULL, 0,
+ bch2_mark_key(c, btree ? btree_k : journal_k,
+ 0, 0, NULL, 0,
BCH_BUCKET_MARK_ALLOC_READ|
BCH_BUCKET_MARK_NOATOMIC);
+
+ if (btree)
+ btree_k = bch2_btree_iter_next(btree_iter);
+ else
+ journal_k = bch2_journal_iter_next(&journal_iter);
}
ret = bch2_trans_exit(&trans) ?: ret;
static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
struct extent_ptr_decoded p)
{
- unsigned i, durability = 0;
+ unsigned durability = 0;
struct bch_dev *ca;
if (p.ptr.cached)
if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
durability = max_t(unsigned, durability, ca->mi.durability);
- for (i = 0; i < p.ec_nr; i++) {
+ if (p.has_ec) {
struct stripe *s =
- genradix_ptr(&c->stripes[0], p.idx);
+ genradix_ptr(&c->stripes[0], p.ec.idx);
if (WARN_ON(!s))
- continue;
+ goto out;
durability = max_t(unsigned, durability, s->nr_redundant);
}
-
+out:
return durability;
}
p.idx++;
if (force_reconstruct_read(c) &&
- !p.idx && p.ec_nr)
+ !p.idx && p.has_ec)
p.idx++;
- if (p.idx >= p.ec_nr + 1)
+ if (p.idx >= (unsigned) p.has_ec + 1)
continue;
if (ret > 0 && !ptr_better(c, p, *pick))
struct bpos *end)
{
struct btree_trans *trans = iter->trans;
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
+ struct btree *b;
+ struct btree_node_iter node_iter;
struct bkey_packed *_k;
unsigned nr_iters = 0;
int ret;
- BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
+
+ b = iter->l[0].b;
+ node_iter = iter->l[0].iter;
+
BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
*end = bpos_min(insert->k.p, b->key.k.p);
struct bch_extent_crc_unpacked crc =
bch2_extent_crc_unpack(&k->k, NULL);
union bch_extent_entry *pos;
- unsigned i;
if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
pos = ptrs.start;
p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
__extent_entry_insert(k, pos, to_entry(&p->ptr));
- for (i = 0; i < p->ec_nr; i++) {
- p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
- __extent_entry_insert(k, pos, to_entry(&p->ec[i]));
+ if (p->has_ec) {
+ p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ec));
}
}
__label__ out; \
\
(_ptr).idx = 0; \
- (_ptr).ec_nr = 0; \
+ (_ptr).has_ec = false; \
\
__bkey_extent_entry_for_each_from(_entry, _end, _entry) \
switch (extent_entry_type(_entry)) { \
entry_to_crc(_entry)); \
break; \
case BCH_EXTENT_ENTRY_stripe_ptr: \
- (_ptr).ec[(_ptr).ec_nr++] = _entry->stripe_ptr; \
+ (_ptr).ec = _entry->stripe_ptr; \
+ (_ptr).has_ec = true; \
break; \
} \
out: \
struct extent_ptr_decoded {
unsigned idx;
- unsigned ec_nr;
+ bool has_ec;
struct bch_extent_crc_unpacked crc;
struct bch_extent_ptr ptr;
- struct bch_extent_stripe_ptr ec[4];
+ struct bch_extent_stripe_ptr ec;
};
struct bch_io_failures {
u64 now = bch2_current_time(trans->c);
int ret;
- dir_iter = bch2_inode_peek(trans, dir_u, dir_inum,
- name ? BTREE_ITER_INTENT : 0);
+ dir_iter = bch2_inode_peek(trans, dir_u, dir_inum, BTREE_ITER_INTENT);
if (IS_ERR(dir_iter))
return PTR_ERR(dir_iter);
return 0;
}
-int bch2_link_trans(struct btree_trans *trans,
- u64 dir_inum,
+int bch2_link_trans(struct btree_trans *trans, u64 dir_inum,
u64 inum, struct bch_inode_unpacked *inode_u,
const struct qstr *name)
{
struct bch_hash_info dir_hash;
u64 now = bch2_current_time(trans->c);
- dir_iter = bch2_inode_peek(trans, &dir_u, dir_inum, 0);
- if (IS_ERR(dir_iter))
- return PTR_ERR(dir_iter);
-
inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT);
if (IS_ERR(inode_iter))
return PTR_ERR(inode_iter);
- dir_hash = bch2_hash_info_init(trans->c, &dir_u);
-
inode_u->bi_ctime = now;
bch2_inode_nlink_inc(inode_u);
+ dir_iter = bch2_inode_peek(trans, &dir_u, dir_inum, 0);
+ if (IS_ERR(dir_iter))
+ return PTR_ERR(dir_iter);
+
+ /* XXX: shouldn't we be updating mtime/ctime on the directory? */
+
+ dir_hash = bch2_hash_info_init(trans->c, &dir_u);
+ bch2_trans_iter_put(trans, dir_iter);
+
return bch2_dirent_create(trans, dir_inum, &dir_hash,
mode_to_type(inode_u->bi_mode),
name, inum, BCH_HASH_SET_MUST_CREATE) ?:
dir_hash = bch2_hash_info_init(trans->c, dir_u);
- dirent_iter = __bch2_dirent_lookup_trans(trans, dir_inum,
- &dir_hash, name);
+ dirent_iter = __bch2_dirent_lookup_trans(trans, dir_inum, &dir_hash,
+ name, BTREE_ITER_INTENT);
if (IS_ERR(dirent_iter))
return PTR_ERR(dirent_iter);
struct posix_acl *,
struct posix_acl *);
-int bch2_link_trans(struct btree_trans *,
- u64,
+int bch2_link_trans(struct btree_trans *, u64,
u64, struct bch_inode_unpacked *,
const struct qstr *);
u64 sectors;
};
-struct bchfs_write_op {
- struct bch_inode_info *inode;
- s64 sectors_added;
- bool is_dio;
- bool unalloc;
- u64 new_i_size;
-
- /* must be last: */
- struct bch_write_op op;
-};
-
struct bch_writepage_io {
struct closure cl;
- u64 new_sectors;
+ struct bch_inode_info *inode;
/* must be last: */
- struct bchfs_write_op op;
+ struct bch_write_op op;
};
struct dio_write {
struct iovec inline_vecs[2];
/* must be last: */
- struct bchfs_write_op iop;
+ struct bch_write_op op;
};
struct dio_read {
mutex_unlock(&inode->ei_quota_lock);
}
-/* normal i_size/i_sectors update machinery: */
-
-static int sum_sector_overwrites(struct btree_trans *trans,
- struct btree_iter *extent_iter,
- struct bkey_i *new,
- bool may_allocate,
- s64 *delta)
-{
- struct btree_iter *iter;
- struct bkey_s_c old;
- int ret = 0;
-
- *delta = 0;
-
- iter = bch2_trans_copy_iter(trans, extent_iter);
- if (IS_ERR(iter))
- return PTR_ERR(iter);
-
- for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
- if (!may_allocate &&
- bch2_bkey_nr_ptrs_allocated(old) <
- bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new))) {
- ret = -ENOSPC;
- break;
- }
-
- *delta += (min(new->k.p.offset,
- old.k->p.offset) -
- max(bkey_start_offset(&new->k),
- bkey_start_offset(old.k))) *
- (bkey_extent_is_allocation(&new->k) -
- bkey_extent_is_allocation(old.k));
-
- if (bkey_cmp(old.k->p, new->k.p) >= 0)
- break;
- }
-
- bch2_trans_iter_put(trans, iter);
- return ret;
-}
-
-int bch2_extent_update(struct btree_trans *trans,
- struct bch_inode_info *inode,
- struct disk_reservation *disk_res,
- struct quota_res *quota_res,
- struct btree_iter *extent_iter,
- struct bkey_i *k,
- u64 new_i_size,
- bool may_allocate,
- bool direct,
- s64 *total_delta)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter *inode_iter = NULL;
- struct bch_inode_unpacked inode_u;
- struct bkey_inode_buf inode_p;
- bool extended = false;
- s64 i_sectors_delta;
- int ret;
-
- ret = bch2_btree_iter_traverse(extent_iter);
- if (ret)
- return ret;
-
- ret = bch2_extent_trim_atomic(k, extent_iter);
- if (ret)
- return ret;
-
- ret = sum_sector_overwrites(trans, extent_iter, k,
- may_allocate, &i_sectors_delta);
- if (ret)
- return ret;
-
- bch2_trans_update(trans, extent_iter, k);
-
- new_i_size = min(k->k.p.offset << 9, new_i_size);
-
- /* XXX: inode->i_size locking */
- if (i_sectors_delta ||
- new_i_size > inode->ei_inode.bi_size) {
- inode_iter = bch2_inode_peek(trans, &inode_u,
- k->k.p.inode, BTREE_ITER_INTENT);
- if (IS_ERR(inode_iter))
- return PTR_ERR(inode_iter);
-
- inode_u.bi_sectors += i_sectors_delta;
-
- /*
- * XXX: can BCH_INODE_I_SIZE_DIRTY be true here? i.e. can we
- * race with truncate?
- */
- if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
- new_i_size > inode_u.bi_size) {
- inode_u.bi_size = new_i_size;
- extended = true;
- }
-
- if (i_sectors_delta || extended) {
- bch2_inode_pack(&inode_p, &inode_u);
- bch2_trans_update(trans, inode_iter,
- &inode_p.inode.k_i);
- }
- }
-
- ret = bch2_trans_commit(trans, disk_res,
- &inode->ei_journal_seq,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_ATOMIC|
- BTREE_INSERT_NOUNLOCK|
- BTREE_INSERT_USE_RESERVE);
- if (ret)
- goto err;
-
- if (i_sectors_delta || extended) {
- inode->ei_inode.bi_sectors = inode_u.bi_sectors;
- inode->ei_inode.bi_size = inode_u.bi_size;
- }
-
- if (direct)
- i_sectors_acct(c, inode, quota_res, i_sectors_delta);
- if (direct && extended) {
- spin_lock(&inode->v.i_lock);
- if (new_i_size > inode->v.i_size)
- i_size_write(&inode->v, new_i_size);
- spin_unlock(&inode->v.i_lock);
- }
-
- if (total_delta)
- *total_delta += i_sectors_delta;
-err:
- if (!IS_ERR_OR_NULL(inode_iter))
- bch2_trans_iter_put(trans, inode_iter);
- return ret;
-}
-
-static int bchfs_write_index_update(struct bch_write_op *wop)
-{
- struct bch_fs *c = wop->c;
- struct bchfs_write_op *op = container_of(wop,
- struct bchfs_write_op, op);
- struct quota_res *quota_res = op->is_dio
- ? &container_of(op, struct dio_write, iop)->quota_res
- : NULL;
- struct bch_inode_info *inode = op->inode;
- struct keylist *keys = &op->op.insert_keys;
- struct bkey_i *k = bch2_keylist_front(keys);
- struct btree_trans trans;
- struct btree_iter *iter;
- int ret;
-
- BUG_ON(k->k.p.inode != inode->v.i_ino);
-
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
-
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
- bkey_start_pos(&k->k),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-
- do {
- BKEY_PADDED(k) tmp;
-
- bkey_copy(&tmp.k, bch2_keylist_front(keys));
-
- bch2_trans_begin_updates(&trans);
-
- ret = bch2_extent_update(&trans, inode,
- &wop->res, quota_res,
- iter, &tmp.k,
- op->new_i_size,
- !op->unalloc,
- op->is_dio,
- &op->sectors_added);
- if (ret == -EINTR)
- continue;
- if (ret)
- break;
-
- if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
- bch2_cut_front(iter->pos, bch2_keylist_front(keys));
- else
- bch2_keylist_pop_front(keys);
- } while (!bch2_keylist_empty(keys));
-
- bch2_trans_exit(&trans);
-
- return ret;
-}
-
-static inline void bch2_fswrite_op_init(struct bchfs_write_op *op,
- struct bch_fs *c,
- struct bch_inode_info *inode,
- struct bch_io_opts opts,
- bool is_dio)
-{
- op->inode = inode;
- op->sectors_added = 0;
- op->is_dio = is_dio;
- op->unalloc = false;
- op->new_i_size = U64_MAX;
-
- bch2_write_op_init(&op->op, c, opts);
- op->op.target = opts.foreground_target;
- op->op.index_update_fn = bchfs_write_index_update;
- op_journal_seq_set(&op->op, &inode->ei_journal_seq);
-}
-
-static inline struct bch_io_opts io_opts(struct bch_fs *c, struct bch_inode_info *inode)
-{
- struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
-
- bch2_io_opts_apply(&opts, bch2_inode_opts_get(&inode->ei_inode));
- return opts;
-}
-
/* page state: */
/* stored in page->private: */
};
struct bch_page_state {
+ spinlock_t lock;
atomic_t write_count;
struct bch_page_sector s[PAGE_SECTORS];
};
if (!s)
return NULL;
+ spin_lock_init(&s->lock);
/*
* migrate_page_move_mapping() assumes that pages with private data
* have their count elevated by 1.
if (!s)
return;
+ EBUG_ON(!PageLocked(page));
+ EBUG_ON(PageWriteback(page));
+
for (i = 0; i < ARRAY_SIZE(s->s); i++) {
disk_res.sectors += s->s[i].replicas_reserved;
s->s[i].replicas_reserved = 0;
WARN_ON((u64) page_offset(page) + offset + len >
round_up((u64) i_size_read(&inode->v), block_bytes(c)));
+ spin_lock(&s->lock);
+
for (i = round_down(offset, block_bytes(c)) >> 9;
i < round_up(offset + len, block_bytes(c)) >> 9;
i++) {
s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
}
+ spin_unlock(&s->lock);
+
if (dirty_sectors)
i_sectors_acct(c, inode, &res->quota, dirty_sectors);
void bch2_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
- EBUG_ON(!PageLocked(page));
- EBUG_ON(PageWriteback(page));
-
if (offset || length < PAGE_SIZE)
return;
int bch2_releasepage(struct page *page, gfp_t gfp_mask)
{
- /* XXX: this can't take locks that are held while we allocate memory */
- EBUG_ON(!PageLocked(page));
- EBUG_ON(PageWriteback(page));
-
if (PageDirty(page))
return 0;
{
struct bch_inode_info *inode = to_bch_ei(mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, inode);
+ struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct btree_trans trans;
struct btree_iter *iter;
struct page *page;
{
struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, inode);
+ struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct bch_read_bio *rbio;
rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
DECLARE_COMPLETION_ONSTACK(done);
rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
- io_opts(c, inode));
+ io_opts(c, &inode->ei_inode));
rbio->bio.bi_private = &done;
rbio->bio.bi_end_io = bch2_read_single_page_end_io;
static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
struct bch_inode_info *inode)
{
- return (struct bch_writepage_state) { .opts = io_opts(c, inode) };
+ return (struct bch_writepage_state) {
+ .opts = io_opts(c, &inode->ei_inode)
+ };
}
static void bch2_writepage_io_free(struct closure *cl)
struct bch_writepage_io *io = container_of(cl,
struct bch_writepage_io, cl);
- bio_put(&io->op.op.wbio.bio);
+ bio_put(&io->op.wbio.bio);
}
static void bch2_writepage_io_done(struct closure *cl)
{
struct bch_writepage_io *io = container_of(cl,
struct bch_writepage_io, cl);
- struct bch_fs *c = io->op.op.c;
- struct bio *bio = &io->op.op.wbio.bio;
+ struct bch_fs *c = io->op.c;
+ struct bio *bio = &io->op.wbio.bio;
struct bvec_iter_all iter;
struct bio_vec *bvec;
unsigned i;
- if (io->op.op.error) {
+ if (io->op.error) {
bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s;
SetPageError(bvec->bv_page);
mapping_set_error(bvec->bv_page->mapping, -EIO);
- lock_page(bvec->bv_page);
- s = bch2_page_state(bvec->bv_page);
+ s = __bch2_page_state(bvec->bv_page);
+ spin_lock(&s->lock);
for (i = 0; i < PAGE_SECTORS; i++)
s->s[i].nr_replicas = 0;
- unlock_page(bvec->bv_page);
+ spin_unlock(&s->lock);
}
}
* racing with fallocate can cause us to add fewer sectors than
* expected - but we shouldn't add more sectors than expected:
*/
- BUG_ON(io->op.sectors_added > (s64) io->new_sectors);
+ BUG_ON(io->op.i_sectors_delta > 0);
/*
* (error (due to going RO) halfway through a page can screw that up
* slightly)
* XXX wtf?
- BUG_ON(io->op.sectors_added - io->new_sectors >= (s64) PAGE_SECTORS);
+ BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
*/
/*
* PageWriteback is effectively our ref on the inode - fixup i_blocks
* before calling end_page_writeback:
*/
- if (io->op.sectors_added != io->new_sectors)
- i_sectors_acct(c, io->op.inode, NULL,
- io->op.sectors_added - (s64) io->new_sectors);
+ i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
struct bch_writepage_io *io = w->io;
w->io = NULL;
- closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
+ closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
continue_at(&io->cl, bch2_writepage_io_done, NULL);
}
w->io = container_of(bio_alloc_bioset(GFP_NOFS,
BIO_MAX_PAGES,
&c->writepage_bioset),
- struct bch_writepage_io, op.op.wbio.bio);
+ struct bch_writepage_io, op.wbio.bio);
closure_init(&w->io->cl, NULL);
- w->io->new_sectors = 0;
- bch2_fswrite_op_init(&w->io->op, c, inode, w->opts, false);
- op = &w->io->op.op;
+ w->io->inode = inode;
+
+ op = &w->io->op;
+ bch2_write_op_init(op, c, w->opts);
+ op->target = w->opts.foreground_target;
+ op_journal_seq_set(op, &inode->ei_journal_seq);
op->nr_replicas = nr_replicas;
op->res.nr_replicas = nr_replicas;
op->write_point = writepoint_hashed(inode->ei_last_dirtied);
}
if (w->io &&
- (w->io->op.op.res.nr_replicas != nr_replicas_this_write ||
- bio_full(&w->io->op.op.wbio.bio) ||
- bio_end_sector(&w->io->op.op.wbio.bio) != sector))
+ (w->io->op.res.nr_replicas != nr_replicas_this_write ||
+ bio_full(&w->io->op.wbio.bio) ||
+ bio_end_sector(&w->io->op.wbio.bio) != sector))
bch2_writepage_do_io(w);
if (!w->io)
bch2_writepage_io_alloc(c, w, inode, sector,
nr_replicas_this_write);
- w->io->new_sectors += dirty_sectors;
-
atomic_inc(&s->write_count);
- BUG_ON(inode != w->io->op.inode);
- BUG_ON(!bio_add_page(&w->io->op.op.wbio.bio, page,
+ BUG_ON(inode != w->io->inode);
+ BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
sectors << 9, offset << 9));
/* Check for writing past i_size: */
- WARN_ON((bio_end_sector(&w->io->op.op.wbio.bio) << 9) >
+ WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
round_up(i_size, block_bytes(c)));
- w->io->op.op.res.sectors += reserved_sectors;
+ w->io->op.res.sectors += reserved_sectors;
+ w->io->op.i_sectors_delta -= dirty_sectors;
w->io->op.new_i_size = i_size;
if (wbc->sync_mode == WB_SYNC_ALL)
- w->io->op.op.wbio.bio.bi_opf |= REQ_SYNC;
+ w->io->op.wbio.bio.bi_opf |= REQ_SYNC;
offset += sectors;
}
struct file *file = req->ki_filp;
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, inode);
+ struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct dio_read *dio;
struct bio *bio;
loff_t offset = req->ki_pos;
static long bch2_dio_write_loop(struct dio_write *dio)
{
bool kthread = (current->flags & PF_KTHREAD) != 0;
- struct bch_fs *c = dio->iop.op.c;
+ struct bch_fs *c = dio->op.c;
struct kiocb *req = dio->req;
struct address_space *mapping = req->ki_filp->f_mapping;
- struct bch_inode_info *inode = dio->iop.inode;
- struct bio *bio = &dio->iop.op.wbio.bio;
+ struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
+ struct bio *bio = &dio->op.wbio.bio;
struct bvec_iter_all iter;
struct bio_vec *bv;
unsigned unaligned;
+ u64 new_i_size;
loff_t offset;
bool sync;
long ret;
__pagecache_block_get(&mapping->add_lock);
/* Write and invalidate pagecache range that we're writing to: */
- offset = req->ki_pos + (dio->iop.op.written << 9);
+ offset = req->ki_pos + (dio->op.written << 9);
ret = write_invalidate_inode_pages_range(mapping,
offset,
offset + iov_iter_count(&dio->iter) - 1);
goto err;
while (1) {
- offset = req->ki_pos + (dio->iop.op.written << 9);
+ offset = req->ki_pos + (dio->op.written << 9);
BUG_ON(current->pagecache_lock);
current->pagecache_lock = &mapping->add_lock;
if (unlikely(ret))
goto err;
- dio->iop.op.pos = POS(inode->v.i_ino, offset >> 9);
+ dio->op.pos = POS(inode->v.i_ino, offset >> 9);
task_io_account_write(bio->bi_iter.bi_size);
- closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
+ closure_call(&dio->op.cl, bch2_write, NULL, &dio->cl);
if (!dio->sync && !dio->loop && dio->iter.count) {
struct iovec *iov = dio->inline_vecs;
iov = kmalloc(dio->iter.nr_segs * sizeof(*iov),
GFP_KERNEL);
if (unlikely(!iov)) {
- dio->iop.op.error = -ENOMEM;
+ dio->op.error = -ENOMEM;
goto err_wait_io;
}
closure_sync(&dio->cl);
loop:
+ i_sectors_acct(c, inode, &dio->quota_res,
+ dio->op.i_sectors_delta);
+ dio->op.i_sectors_delta = 0;
+
+ new_i_size = req->ki_pos + ((u64) dio->op.written << 9);
+
+ spin_lock(&inode->v.i_lock);
+ if (new_i_size > inode->v.i_size)
+ i_size_write(&inode->v, new_i_size);
+ spin_unlock(&inode->v.i_lock);
+
bio_for_each_segment_all(bv, bio, iter)
put_page(bv->bv_page);
- if (!dio->iter.count || dio->iop.op.error)
+ if (!dio->iter.count || dio->op.error)
break;
bio_reset(bio);
}
- ret = dio->iop.op.error ?: ((long) dio->iop.op.written << 9);
+ ret = dio->op.error ?: ((long) dio->op.written << 9);
err:
__pagecache_block_put(&mapping->add_lock);
- bch2_disk_reservation_put(c, &dio->iop.op.res);
+ bch2_disk_reservation_put(c, &dio->op.res);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
if (dio->free_iov)
struct file *file = req->ki_filp;
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct dio_write *dio;
struct bio *bio;
ssize_t ret;
bio = bio_alloc_bioset(GFP_KERNEL,
iov_iter_npages(iter, BIO_MAX_PAGES),
&c->dio_write_bioset);
- dio = container_of(bio, struct dio_write, iop.op.wbio.bio);
+ dio = container_of(bio, struct dio_write, op.wbio.bio);
closure_init(&dio->cl, NULL);
dio->req = req;
dio->mm = current->mm;
dio->free_iov = false;
dio->quota_res.sectors = 0;
dio->iter = *iter;
- bch2_fswrite_op_init(&dio->iop, c, inode, io_opts(c, inode), true);
- dio->iop.op.write_point = writepoint_hashed((unsigned long) current);
- dio->iop.op.flags |= BCH_WRITE_NOPUT_RESERVATION;
+
+ bch2_write_op_init(&dio->op, c, opts);
+ dio->op.target = opts.foreground_target;
+ op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
+ dio->op.write_point = writepoint_hashed((unsigned long) current);
+ dio->op.flags |= BCH_WRITE_NOPUT_RESERVATION;
if ((req->ki_flags & IOCB_DSYNC) &&
!c->opts.journal_flush_disabled)
- dio->iop.op.flags |= BCH_WRITE_FLUSH;
+ dio->op.flags |= BCH_WRITE_FLUSH;
ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
iter->count >> 9, true);
if (unlikely(ret))
goto err;
- dio->iop.op.nr_replicas = dio->iop.op.opts.data_replicas;
-
- ret = bch2_disk_reservation_get(c, &dio->iop.op.res, iter->count >> 9,
- dio->iop.op.opts.data_replicas, 0);
- if (unlikely(ret)) {
- if (!bch2_check_range_allocated(c, POS(inode->v.i_ino,
- req->ki_pos >> 9),
- iter->count >> 9,
- dio->iop.op.opts.data_replicas))
- goto err;
+ dio->op.nr_replicas = dio->op.opts.data_replicas;
- dio->iop.unalloc = true;
- }
+ ret = bch2_disk_reservation_get(c, &dio->op.res, iter->count >> 9,
+ dio->op.opts.data_replicas, 0);
+ if (unlikely(ret) &&
+ !bch2_check_range_allocated(c, POS(inode->v.i_ino,
+ req->ki_pos >> 9),
+ iter->count >> 9,
+ dio->op.opts.data_replicas))
+ goto err;
return bch2_dio_write_loop(dio);
err:
- bch2_disk_reservation_put(c, &dio->iop.op.res);
+ bch2_disk_reservation_put(c, &dio->op.res);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
closure_debug_destroy(&dio->cl);
bio_put(bio);
/* truncate: */
-int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos end, struct bch_inode_info *inode,
- u64 new_i_size)
-{
- struct bch_fs *c = trans->c;
- unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
- struct bkey_s_c k;
- int ret = 0, ret2 = 0;
-
- while ((k = bch2_btree_iter_peek(iter)).k &&
- bkey_cmp(iter->pos, end) < 0) {
- struct disk_reservation disk_res =
- bch2_disk_reservation_init(c, 0);
- struct bkey_i delete;
-
- ret = bkey_err(k);
- if (ret)
- goto btree_err;
-
- bkey_init(&delete.k);
- delete.k.p = iter->pos;
-
- /* create the biggest key we can */
- bch2_key_resize(&delete.k, max_sectors);
- bch2_cut_back(end, &delete.k);
-
- bch2_trans_begin_updates(trans);
-
- ret = bch2_extent_update(trans, inode,
- &disk_res, NULL, iter, &delete,
- new_i_size, false, true, NULL);
- bch2_disk_reservation_put(c, &disk_res);
-btree_err:
- if (ret == -EINTR) {
- ret2 = ret;
- ret = 0;
- }
- if (ret)
- break;
- }
-
- if (bkey_cmp(iter->pos, end) > 0) {
- bch2_btree_iter_set_pos(iter, end);
- ret = bch2_btree_iter_traverse(iter);
- }
-
- return ret ?: ret2;
-}
-
-static int __bch2_fpunch(struct bch_fs *c, struct bch_inode_info *inode,
- u64 start_offset, u64 end_offset)
-{
- struct btree_trans trans;
- struct btree_iter *iter;
- int ret = 0;
-
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
-
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
- POS(inode->v.i_ino, start_offset),
- BTREE_ITER_INTENT);
-
- ret = bch2_fpunch_at(&trans, iter,
- POS(inode->v.i_ino, end_offset),
- inode, 0);
-
- bch2_trans_exit(&trans);
-
- if (ret == -EINTR)
- ret = 0;
-
- return ret;
-}
-
static inline int range_has_data(struct bch_fs *c,
struct bpos start,
struct bpos end)
from, round_up(from, PAGE_SIZE));
}
-static int bch2_extend(struct bch_inode_info *inode, struct iattr *iattr)
+static int bch2_extend(struct bch_inode_info *inode,
+ struct bch_inode_unpacked *inode_u,
+ struct iattr *iattr)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct address_space *mapping = inode->v.i_mapping;
int ret;
- ret = filemap_write_and_wait_range(mapping,
- inode->ei_inode.bi_size, S64_MAX);
+ /*
+ * sync appends:
+ *
+ * this has to be done _before_ extending i_size:
+ */
+ ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
if (ret)
return ret;
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct address_space *mapping = inode->v.i_mapping;
+ struct bch_inode_unpacked inode_u;
+ struct btree_trans trans;
+ struct btree_iter *iter;
u64 new_i_size = iattr->ia_size;
- bool shrink;
+ s64 i_sectors_delta = 0;
int ret = 0;
inode_dio_wait(&inode->v);
pagecache_block_get(&mapping->add_lock);
- BUG_ON(inode->v.i_size < inode->ei_inode.bi_size);
+ /*
+ * fetch current on disk i_size: inode is locked, i_size can only
+ * increase underneath us:
+ */
+ bch2_trans_init(&trans, c, 0, 0);
+ iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
+ ret = PTR_ERR_OR_ZERO(iter);
+ bch2_trans_exit(&trans);
+
+ if (ret)
+ goto err;
- shrink = iattr->ia_size <= inode->v.i_size;
+ BUG_ON(inode->v.i_size < inode_u.bi_size);
- if (!shrink) {
- ret = bch2_extend(inode, iattr);
+ if (iattr->ia_size > inode->v.i_size) {
+ ret = bch2_extend(inode, &inode_u, iattr);
goto err;
}
* userspace has to redirty it and call .mkwrite -> set_page_dirty
* again to allocate the part of the page that was extended.
*/
- if (iattr->ia_size > inode->ei_inode.bi_size)
+ if (iattr->ia_size > inode_u.bi_size)
ret = filemap_write_and_wait_range(mapping,
- inode->ei_inode.bi_size,
+ inode_u.bi_size,
iattr->ia_size - 1);
else if (iattr->ia_size & (PAGE_SIZE - 1))
ret = filemap_write_and_wait_range(mapping,
truncate_setsize(&inode->v, iattr->ia_size);
- ret = __bch2_fpunch(c, inode,
+ ret = bch2_fpunch(c, inode->v.i_ino,
round_up(iattr->ia_size, block_bytes(c)) >> 9,
- U64_MAX);
+ U64_MAX, &inode->ei_journal_seq, &i_sectors_delta);
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
+
if (unlikely(ret))
goto err;
/* fallocate: */
-static long bch2_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
+static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct address_space *mapping = inode->v.i_mapping;
truncate_pagecache_range(&inode->v, offset, offset + len - 1);
- if (discard_start < discard_end)
- ret = __bch2_fpunch(c, inode, discard_start, discard_end);
+ if (discard_start < discard_end) {
+ s64 i_sectors_delta = 0;
+
+ ret = bch2_fpunch(c, inode->v.i_ino,
+ discard_start, discard_end,
+ &inode->ei_journal_seq,
+ &i_sectors_delta);
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
+ }
err:
pagecache_block_put(&mapping->add_lock);
inode_unlock(&inode->v);
return ret;
}
-static long bch2_fcollapse_finsert(struct bch_inode_info *inode,
+static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
loff_t offset, loff_t len,
bool insert)
{
ATTR_MTIME|ATTR_CTIME);
mutex_unlock(&inode->ei_update_lock);
} else {
- ret = __bch2_fpunch(c, inode, offset >> 9,
- (offset + len) >> 9);
+ s64 i_sectors_delta = 0;
+
+ ret = bch2_fpunch(c, inode->v.i_ino,
+ offset >> 9, (offset + len) >> 9,
+ &inode->ei_journal_seq,
+ &i_sectors_delta);
+ i_sectors_acct(c, inode, NULL, i_sectors_delta);
+
if (ret)
goto err;
}
copy.k.k.p.offset += shift >> 9;
bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k.k));
- ret = bch2_btree_iter_traverse(dst);
- if (ret)
- goto bkey_err;
-
ret = bch2_extent_atomic_end(dst, ©.k, &atomic_end);
if (ret)
goto bkey_err;
return ret;
}
-static long bch2_fallocate(struct bch_inode_info *inode, int mode,
- loff_t offset, loff_t len)
+static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
+ loff_t offset, loff_t len)
{
struct address_space *mapping = inode->v.i_mapping;
struct bch_fs *c = inode->v.i_sb->s_fs_info;
loff_t block_start = round_down(offset, block_bytes(c));
loff_t block_end = round_up(end, block_bytes(c));
unsigned sectors;
- unsigned replicas = io_opts(c, inode).data_replicas;
+ unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
int ret;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
end_pos = POS(inode->v.i_ino, block_end >> 9);
while (bkey_cmp(iter->pos, end_pos) < 0) {
+ s64 i_sectors_delta = 0;
struct disk_reservation disk_res = { 0 };
struct quota_res quota_res = { 0 };
struct bkey_i_reservation reservation;
bch2_trans_begin_updates(&trans);
- ret = bch2_extent_update(&trans, inode,
- &disk_res, "a_res,
- iter, &reservation.k_i,
- 0, true, true, NULL);
+ ret = bch2_extent_update(&trans, iter, &reservation.k_i,
+ &disk_res, &inode->ei_journal_seq,
+ 0, &i_sectors_delta);
+ i_sectors_acct(c, inode, "a_res, i_sectors_delta);
bkey_err:
bch2_quota_reservation_put(c, inode, "a_res);
bch2_disk_reservation_put(c, &disk_res);
if (ret)
goto err;
}
- bch2_trans_unlock(&trans);
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- end > inode->v.i_size) {
- i_size_write(&inode->v, end);
+ /*
+ * Do we need to extend the file?
+ *
+ * If we zeroed up to the end of the file, we dropped whatever writes
+ * were going to write out the current i_size, so we have to extend
+ * manually even if FL_KEEP_SIZE was set:
+ */
+ if (end >= inode->v.i_size &&
+ (!(mode & FALLOC_FL_KEEP_SIZE) ||
+ (mode & FALLOC_FL_ZERO_RANGE))) {
+ struct btree_iter *inode_iter;
+ struct bch_inode_unpacked inode_u;
- mutex_lock(&inode->ei_update_lock);
- ret = bch2_write_inode_size(c, inode, inode->v.i_size, 0);
- mutex_unlock(&inode->ei_update_lock);
- }
+ do {
+ bch2_trans_begin(&trans);
+ inode_iter = bch2_inode_peek(&trans, &inode_u,
+ inode->v.i_ino, 0);
+ ret = PTR_ERR_OR_ZERO(inode_iter);
+ } while (ret == -EINTR);
+
+ bch2_trans_unlock(&trans);
+
+ if (ret)
+ goto err;
- /* blech */
- if ((mode & FALLOC_FL_KEEP_SIZE) &&
- (mode & FALLOC_FL_ZERO_RANGE) &&
- inode->ei_inode.bi_size != inode->v.i_size) {
- /* sync appends.. */
+ /*
+ * Sync existing appends before extending i_size,
+ * as in bch2_extend():
+ */
ret = filemap_write_and_wait_range(mapping,
- inode->ei_inode.bi_size, S64_MAX);
+ inode_u.bi_size, S64_MAX);
if (ret)
goto err;
- if (inode->ei_inode.bi_size != inode->v.i_size) {
- mutex_lock(&inode->ei_update_lock);
- ret = bch2_write_inode_size(c, inode,
- inode->v.i_size, 0);
- mutex_unlock(&inode->ei_update_lock);
- }
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ end = inode->v.i_size;
+ else
+ i_size_write(&inode->v, end);
+
+ mutex_lock(&inode->ei_update_lock);
+ ret = bch2_write_inode_size(c, inode, end, 0);
+ mutex_unlock(&inode->ei_update_lock);
}
err:
bch2_trans_exit(&trans);
struct bch_inode_info *inode = file_bch_inode(file);
if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
- return bch2_fallocate(inode, mode, offset, len);
+ return bchfs_fallocate(inode, mode, offset, len);
if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
- return bch2_fpunch(inode, offset, len);
+ return bchfs_fpunch(inode, offset, len);
if (mode == FALLOC_FL_INSERT_RANGE)
- return bch2_fcollapse_finsert(inode, offset, len, true);
+ return bchfs_fcollapse_finsert(inode, offset, len, true);
if (mode == FALLOC_FL_COLLAPSE_RANGE)
- return bch2_fcollapse_finsert(inode, offset, len, false);
+ return bchfs_fcollapse_finsert(inode, offset, len, false);
return -EOPNOTSUPP;
}
lock_page(page);
s = bch2_page_state(page);
- if (s)
+ if (s) {
+ spin_lock(&s->lock);
for (j = 0; j < PAGE_SECTORS; j++)
s->s[j].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
unlock_page(page);
}
struct bch_inode_info *src = file_bch_inode(file_src);
struct bch_inode_info *dst = file_bch_inode(file_dst);
struct bch_fs *c = src->v.i_sb->s_fs_info;
+ s64 i_sectors_delta = 0;
loff_t ret = 0;
loff_t aligned_len;
bch2_lock_inodes(INODE_LOCK, src, dst);
+ file_update_time(file_dst);
+
inode_dio_wait(&src->v);
inode_dio_wait(&dst->v);
file_dst, pos_dst,
&len, remap_flags);
if (ret < 0 || len == 0)
- goto out_unlock;
+ goto err;
aligned_len = round_up(len, block_bytes(c));
ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
pos_dst, pos_dst + aligned_len);
if (ret)
- goto out_unlock;
+ goto err;
mark_range_unallocated(src, pos_src, pos_src + aligned_len);
- ret = bch2_remap_range(c, dst,
+ ret = bch2_remap_range(c,
POS(dst->v.i_ino, pos_dst >> 9),
POS(src->v.i_ino, pos_src >> 9),
aligned_len >> 9,
- pos_dst + len);
- if (ret > 0)
- ret = min(ret << 9, len);
+ &dst->ei_journal_seq,
+ pos_dst + len, &i_sectors_delta);
+ if (ret < 0)
+ goto err;
+
+ ret <<= 9;
+ /*
+ * due to alignment, we might have remapped slightly more than requsted
+ */
+ ret = min(ret, len);
+
+ /* XXX get a quota reservation */
+ i_sectors_acct(c, dst, NULL, i_sectors_delta);
-out_unlock:
+ spin_lock(&dst->v.i_lock);
+ if (pos_dst + len > dst->v.i_size)
+ i_size_write(&dst->v, pos_dst + len);
+ spin_unlock(&dst->v.i_lock);
+err:
__pagecache_block_put(&dst->v.i_mapping->add_lock);
__pagecache_block_put(&src->v.i_mapping->add_lock);
pr_verbose_init(c->opts, "");
if (bioset_init(&c->writepage_bioset,
- 4, offsetof(struct bch_writepage_io, op.op.wbio.bio),
+ 4, offsetof(struct bch_writepage_io, op.wbio.bio),
BIOSET_NEED_BVECS) ||
bioset_init(&c->dio_read_bioset,
4, offsetof(struct dio_read, rbio.bio),
BIOSET_NEED_BVECS) ||
bioset_init(&c->dio_write_bioset,
- 4, offsetof(struct dio_write, iop.op.wbio.bio),
+ 4, offsetof(struct dio_write, op.wbio.bio),
BIOSET_NEED_BVECS))
ret = -ENOMEM;
struct quota_res;
-int bch2_extent_update(struct btree_trans *,
- struct bch_inode_info *,
- struct disk_reservation *,
- struct quota_res *,
- struct btree_iter *,
- struct bkey_i *,
- u64, bool, bool, s64 *);
-int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
- struct bpos, struct bch_inode_info *, u64);
-
int __must_check bch2_write_inode_size(struct bch_fs *,
struct bch_inode_info *,
loff_t, unsigned);
} while ((v = cmpxchg(&dst->ei_journal_seq, old, journal_seq)) != old);
}
-/*
- * I_SIZE_DIRTY requires special handling:
- *
- * To the recovery code, the flag means that there is stale data past i_size
- * that needs to be deleted; it's used for implementing atomic appends and
- * truncates.
- *
- * On append, we set I_SIZE_DIRTY before doing the write, then after the write
- * we clear I_SIZE_DIRTY atomically with updating i_size to the new larger size
- * that exposes the data we just wrote.
- *
- * On truncate, it's the reverse: We set I_SIZE_DIRTY atomically with setting
- * i_size to the new smaller size, then we delete the data that we just made
- * invisible, and then we clear I_SIZE_DIRTY.
- *
- * Because there can be multiple appends in flight at a time, we need a refcount
- * (i_size_dirty_count) instead of manipulating the flag directly. Nonzero
- * refcount means I_SIZE_DIRTY is set, zero means it's cleared.
- *
- * Because write_inode() can be called at any time, i_size_dirty_count means
- * something different to the runtime code - it means to write_inode() "don't
- * update i_size yet".
- *
- * We don't clear I_SIZE_DIRTY directly, we let write_inode() clear it when
- * i_size_dirty_count is zero - but the reverse is not true, I_SIZE_DIRTY must
- * be set explicitly.
- */
-
void bch2_inode_update_after_write(struct bch_fs *c,
struct bch_inode_info *inode,
struct bch_inode_unpacked *bi,
}
}
+static inline struct bch_io_opts
+io_opts(struct bch_fs *c, struct bch_inode_unpacked *inode)
+{
+ struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
+
+ bch2_io_opts_apply(&opts, bch2_inode_opts_get(inode));
+ return opts;
+}
+
static inline u8 mode_to_type(umode_t mode)
{
return (mode >> 12) & 15;
#include "ec.h"
#include "error.h"
#include "extents.h"
+#include "inode.h"
#include "io.h"
#include "journal.h"
#include "keylist.h"
mutex_unlock(&c->bio_bounce_pages_lock);
}
+/* Extent update path: */
+
+static int sum_sector_overwrites(struct btree_trans *trans,
+ struct btree_iter *extent_iter,
+ struct bkey_i *new,
+ bool may_allocate,
+ bool *maybe_extending,
+ s64 *delta)
+{
+ struct btree_iter *iter;
+ struct bkey_s_c old;
+ int ret = 0;
+
+ *maybe_extending = true;
+ *delta = 0;
+
+ iter = bch2_trans_copy_iter(trans, extent_iter);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
+ if (!may_allocate &&
+ bch2_bkey_nr_ptrs_allocated(old) <
+ bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new))) {
+ ret = -ENOSPC;
+ break;
+ }
+
+ *delta += (min(new->k.p.offset,
+ old.k->p.offset) -
+ max(bkey_start_offset(&new->k),
+ bkey_start_offset(old.k))) *
+ (bkey_extent_is_allocation(&new->k) -
+ bkey_extent_is_allocation(old.k));
+
+ if (bkey_cmp(old.k->p, new->k.p) >= 0) {
+ /*
+ * Check if there's already data above where we're
+ * going to be writing to - this means we're definitely
+ * not extending the file:
+ *
+ * Note that it's not sufficient to check if there's
+ * data up to the sector offset we're going to be
+ * writing to, because i_size could be up to one block
+ * less:
+ */
+ if (!bkey_cmp(old.k->p, new->k.p))
+ old = bch2_btree_iter_next(iter);
+
+ if (old.k && !bkey_err(old) &&
+ old.k->p.inode == extent_iter->pos.inode &&
+ bkey_extent_is_data(old.k))
+ *maybe_extending = false;
+
+ break;
+ }
+ }
+
+ bch2_trans_iter_put(trans, iter);
+ return ret;
+}
+
+int bch2_extent_update(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *k,
+ struct disk_reservation *disk_res,
+ u64 *journal_seq,
+ u64 new_i_size,
+ s64 *i_sectors_delta)
+{
+ /* this must live until after bch2_trans_commit(): */
+ struct bkey_inode_buf inode_p;
+ bool extending = false;
+ s64 delta = 0;
+ int ret;
+
+ ret = bch2_extent_trim_atomic(k, iter);
+ if (ret)
+ return ret;
+
+ ret = sum_sector_overwrites(trans, iter, k,
+ disk_res && disk_res->sectors != 0,
+ &extending, &delta);
+ if (ret)
+ return ret;
+
+ new_i_size = extending
+ ? min(k->k.p.offset << 9, new_i_size)
+ : 0;
+
+ if (delta || new_i_size) {
+ struct btree_iter *inode_iter;
+ struct bch_inode_unpacked inode_u;
+
+ inode_iter = bch2_inode_peek(trans, &inode_u,
+ k->k.p.inode, BTREE_ITER_INTENT);
+ if (IS_ERR(inode_iter))
+ return PTR_ERR(inode_iter);
+
+ /*
+ * XXX:
+ * writeback can race a bit with truncate, because truncate
+ * first updates the inode then truncates the pagecache. This is
+ * ugly, but lets us preserve the invariant that the in memory
+ * i_size is always >= the on disk i_size.
+ *
+ BUG_ON(new_i_size > inode_u.bi_size &&
+ (inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY));
+ */
+ BUG_ON(new_i_size > inode_u.bi_size && !extending);
+
+ if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
+ new_i_size > inode_u.bi_size)
+ inode_u.bi_size = new_i_size;
+ else
+ new_i_size = 0;
+
+ inode_u.bi_sectors += delta;
+
+ if (delta || new_i_size) {
+ bch2_inode_pack(&inode_p, &inode_u);
+ bch2_trans_update(trans, inode_iter,
+ &inode_p.inode.k_i);
+ }
+
+ bch2_trans_iter_put(trans, inode_iter);
+ }
+
+ bch2_trans_update(trans, iter, k);
+
+ ret = bch2_trans_commit(trans, disk_res, journal_seq,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_ATOMIC|
+ BTREE_INSERT_USE_RESERVE);
+ if (!ret && i_sectors_delta)
+ *i_sectors_delta += delta;
+
+ return ret;
+}
+
+int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end, u64 *journal_seq,
+ s64 *i_sectors_delta)
+{
+ struct bch_fs *c = trans->c;
+ unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
+ struct bkey_s_c k;
+ int ret = 0, ret2 = 0;
+
+ while ((k = bch2_btree_iter_peek(iter)).k &&
+ bkey_cmp(iter->pos, end) < 0) {
+ struct disk_reservation disk_res =
+ bch2_disk_reservation_init(c, 0);
+ struct bkey_i delete;
+
+ ret = bkey_err(k);
+ if (ret)
+ goto btree_err;
+
+ bkey_init(&delete.k);
+ delete.k.p = iter->pos;
+
+ /* create the biggest key we can */
+ bch2_key_resize(&delete.k, max_sectors);
+ bch2_cut_back(end, &delete.k);
+
+ bch2_trans_begin_updates(trans);
+
+ ret = bch2_extent_update(trans, iter, &delete,
+ &disk_res, journal_seq,
+ 0, i_sectors_delta);
+ bch2_disk_reservation_put(c, &disk_res);
+btree_err:
+ if (ret == -EINTR) {
+ ret2 = ret;
+ ret = 0;
+ }
+ if (ret)
+ break;
+ }
+
+ if (bkey_cmp(iter->pos, end) > 0) {
+ bch2_btree_iter_set_pos(iter, end);
+ ret = bch2_btree_iter_traverse(iter);
+ }
+
+ return ret ?: ret2;
+}
+
+int bch2_fpunch(struct bch_fs *c, u64 inum, u64 start, u64 end,
+ u64 *journal_seq, s64 *i_sectors_delta)
+{
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ int ret = 0;
+
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ POS(inum, start),
+ BTREE_ITER_INTENT);
+
+ ret = bch2_fpunch_at(&trans, iter, POS(inum, end),
+ journal_seq, i_sectors_delta);
+ bch2_trans_exit(&trans);
+
+ if (ret == -EINTR)
+ ret = 0;
+
+ return ret;
+}
+
+int bch2_write_index_default(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct keylist *keys = &op->insert_keys;
+ struct bkey_i *k = bch2_keylist_front(keys);
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ int ret;
+
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
+
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ bkey_start_pos(&k->k),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+
+ do {
+ BKEY_PADDED(k) tmp;
+
+ bkey_copy(&tmp.k, bch2_keylist_front(keys));
+
+ bch2_trans_begin_updates(&trans);
+
+ ret = bch2_extent_update(&trans, iter, &tmp.k,
+ &op->res, op_journal_seq(op),
+ op->new_i_size, &op->i_sectors_delta);
+ if (ret == -EINTR)
+ continue;
+ if (ret)
+ break;
+
+ if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
+ bch2_cut_front(iter->pos, bch2_keylist_front(keys));
+ else
+ bch2_keylist_pop_front(keys);
+ } while (!bch2_keylist_empty(keys));
+
+ bch2_trans_exit(&trans);
+
+ return ret;
+}
+
/* Writes */
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
closure_return(cl);
}
-int bch2_write_index_default(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct btree_trans trans;
- struct btree_iter *iter;
- struct keylist *keys = &op->insert_keys;
- int ret;
-
- BUG_ON(bch2_keylist_empty(keys));
- bch2_verify_keylist_sorted(keys);
-
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
-retry:
- bch2_trans_begin(&trans);
-
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
- bkey_start_pos(&bch2_keylist_front(keys)->k),
- BTREE_ITER_INTENT);
-
- do {
- BKEY_PADDED(k) split;
-
- bkey_copy(&split.k, bch2_keylist_front(keys));
-
- ret = bch2_extent_trim_atomic(&split.k, iter);
- if (ret)
- break;
-
- bch2_trans_update(&trans, iter, &split.k);
-
- ret = bch2_trans_commit(&trans, &op->res, op_journal_seq(op),
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_USE_RESERVE);
- if (ret)
- break;
-
- if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
- bch2_cut_front(iter->pos, bch2_keylist_front(keys));
- else
- bch2_keylist_pop_front(keys);
- } while (!bch2_keylist_empty(keys));
-
- if (ret == -EINTR) {
- ret = 0;
- goto retry;
- }
-
- bch2_trans_exit(&trans);
-
- return ret;
-}
-
/**
* bch_write_index - after a write, update index to point to new data
*/
: op->c->wq;
}
+int bch2_extent_update(struct btree_trans *, struct btree_iter *,
+ struct bkey_i *, struct disk_reservation *,
+ u64 *, u64, s64 *);
+int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
+ struct bpos, u64 *, s64 *);
+int bch2_fpunch(struct bch_fs *c, u64, u64, u64, u64 *, s64 *);
+
int bch2_write_index_default(struct bch_write_op *);
static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
op->write_point = (struct write_point_specifier) { 0 };
op->res = (struct disk_reservation) { 0 };
op->journal_seq = 0;
+ op->new_i_size = U64_MAX;
+ op->i_sectors_delta = 0;
op->index_update_fn = bch2_write_index_default;
}
u64 *journal_seq_p;
u64 journal_seq;
};
+ u64 new_i_size;
+ s64 i_sectors_delta;
int (*index_update_fn)(struct bch_write_op *);
if (ret)
goto err;
+ atomic_end = bpos_min(k->k.p, iter->l[0].b->key.k.p);
+
split_iter = bch2_trans_copy_iter(&trans, iter);
ret = PTR_ERR_OR_ZERO(split_iter);
if (ret)
if (ret)
goto err;
- ret = bch2_extent_atomic_end(split_iter, k, &atomic_end);
- if (ret)
- goto err;
-
if (!remark &&
remark_if_split &&
bkey_cmp(atomic_end, k->k.p) < 0) {
#include "bcachefs.h"
#include "btree_update.h"
#include "extents.h"
-#include "fs.h"
-#include "fs-io.h"
+#include "inode.h"
+#include "io.h"
#include "reflink.h"
#include <linux/sched/signal.h>
bch2_bkey_ptrs_to_text(out, c, k);
}
-/*
- * bch2_remap_range() depends on bch2_extent_update(), which depends on various
- * things tied to the linux vfs for inode updates, for now:
- */
-#ifndef NO_BCACHEFS_FS
-
static int bch2_make_extent_indirect(struct btree_trans *trans,
struct btree_iter *extent_iter,
struct bkey_i_extent *e)
static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
{
struct bkey_s_c k = bch2_btree_iter_peek(iter);
+ int ret;
- while (1) {
- if (bkey_err(k))
- return k;
-
+ for_each_btree_key_continue(iter, 0, k, ret) {
if (bkey_cmp(iter->pos, end) >= 0)
return bkey_s_c_null;
if (k.k->type == KEY_TYPE_extent ||
k.k->type == KEY_TYPE_reflink_p)
- return k;
-
- k = bch2_btree_iter_next(iter);
+ break;
}
+
+ return k;
}
s64 bch2_remap_range(struct bch_fs *c,
- struct bch_inode_info *dst_inode,
struct bpos dst_start, struct bpos src_start,
- u64 remap_sectors, u64 new_i_size)
+ u64 remap_sectors, u64 *journal_seq,
+ u64 new_i_size, s64 *i_sectors_delta)
{
struct btree_trans trans;
struct btree_iter *dst_iter, *src_iter;
struct bpos dst_end = dst_start, src_end = src_start;
struct bpos dst_want, src_want;
u64 src_done, dst_done;
- int ret = 0;
+ int ret = 0, ret2 = 0;
if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
mutex_lock(&c->sb_lock);
if (bkey_cmp(dst_iter->pos, dst_want) < 0) {
ret = bch2_fpunch_at(&trans, dst_iter, dst_want,
- dst_inode, new_i_size);
+ journal_seq, i_sectors_delta);
if (ret)
goto btree_err;
continue;
min(src_k.k->p.offset - src_iter->pos.offset,
dst_end.offset - dst_iter->pos.offset));
- ret = bch2_extent_update(&trans, dst_inode, NULL, NULL,
- dst_iter, &new_dst.k,
- new_i_size, false, true, NULL);
+ ret = bch2_extent_update(&trans, dst_iter, &new_dst.k,
+ NULL, journal_seq,
+ new_i_size, i_sectors_delta);
if (ret)
goto btree_err;
dst_done = dst_iter->pos.offset - dst_start.offset;
new_i_size = min(dst_iter->pos.offset << 9, new_i_size);
- ret = bch2_trans_exit(&trans) ?: ret;
+ bch2_trans_begin(&trans);
- mutex_lock(&dst_inode->ei_update_lock);
- if (dst_inode->v.i_size < new_i_size) {
- i_size_write(&dst_inode->v, new_i_size);
- ret = bch2_write_inode_size(c, dst_inode, new_i_size,
- ATTR_MTIME|ATTR_CTIME);
- }
- mutex_unlock(&dst_inode->ei_update_lock);
+ do {
+ struct bch_inode_unpacked inode_u;
+ struct btree_iter *inode_iter;
- return dst_done ?: ret;
-}
+ inode_iter = bch2_inode_peek(&trans, &inode_u,
+ dst_start.inode, BTREE_ITER_INTENT);
+ ret2 = PTR_ERR_OR_ZERO(inode_iter);
+
+ if (!ret2 &&
+ inode_u.bi_size < new_i_size)
+ ret2 = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
+ bch2_trans_commit(&trans, NULL, journal_seq,
+ BTREE_INSERT_ATOMIC);
+ } while (ret2 == -EINTR);
-#endif /* NO_BCACHEFS_FS */
+ ret = bch2_trans_exit(&trans) ?: ret;
+
+ return dst_done ?: ret ?: ret2;
+}
.val_to_text = bch2_reflink_v_to_text, \
}
-#ifndef NO_BCACHEFS_FS
-s64 bch2_remap_range(struct bch_fs *, struct bch_inode_info *,
- struct bpos, struct bpos, u64, u64);
-#endif /* NO_BCACHEFS_FS */
+s64 bch2_remap_range(struct bch_fs *, struct bpos, struct bpos,
+ u64, u64 *, u64, s64 *);
#endif /* _BCACHEFS_REFLINK_H */
if (p.ptr.cached)
continue;
- if (p.ec_nr) {
+ if (p.has_ec)
r->nr_required = 0;
- break;
- }
r->devs[r->nr_devs++] = p.ptr.dev;
}
struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry);
- memset(u, 0, sizeof(*u));
- u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
- sizeof(u64)) - 1;
+ int u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
+ sizeof(u64)) - 1;
+ memset(u, 0, u64s * sizeof(u64));
+ u->entry.u64s = u64s;
u->entry.type = BCH_JSET_ENTRY_data_usage;
u->v = cpu_to_le64(c->usage_base->replicas[i]);
memcpy(&u->r, e, replicas_entry_bytes(e));