-0cd3e1d27a6252b6c0cc32f237c2b2414540e2e8
+ac3ab6a511717db1644ded49a6f417304abba048
memset(&b->nr, 0, sizeof(b->nr));
b->data->magic = cpu_to_le64(bset_magic(c));
+ memset(&b->data->_ptr, 0, sizeof(b->data->_ptr));
b->data->flags = 0;
SET_BTREE_NODE_ID(b->data, as->btree_id);
SET_BTREE_NODE_LEVEL(b->data, level);
}
break;
case BTREE_INSERT_ENOSPC:
+ BUG_ON(flags & BTREE_INSERT_NOFAIL);
ret = -ENOSPC;
break;
case BTREE_INSERT_NEED_MARK_REPLICAS:
break;
}
+ BUG_ON(ret == -ENOSPC && (flags & BTREE_INSERT_NOFAIL));
+
return ret;
}
if (i < trans->updates + trans->nr_updates &&
i->btree_id == n.btree_id &&
bkey_cmp(n.k->k.p, bkey_start_pos(&i->k->k)) > 0) {
- /* We don't handle splitting extents here: */
- BUG_ON(bkey_cmp(bkey_start_pos(&n.k->k),
- bkey_start_pos(&i->k->k)) > 0);
+ if (bkey_cmp(bkey_start_pos(&n.k->k),
+ bkey_start_pos(&i->k->k)) > 0) {
+ struct btree_insert_entry split = *i;
+ int ret;
+
+ BUG_ON(trans->nr_updates + 1 >= BTREE_ITER_MAX);
+
+ split.k = bch2_trans_kmalloc(trans, bkey_bytes(&i->k->k));
+ ret = PTR_ERR_OR_ZERO(split.k);
+ if (ret)
+ return ret;
+
+ bkey_copy(split.k, i->k);
+ bch2_cut_back(bkey_start_pos(&n.k->k), split.k);
+
+ split.iter = bch2_trans_get_iter(trans, split.btree_id,
+ bkey_start_pos(&split.k->k),
+ BTREE_ITER_INTENT);
+ split.iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
+ bch2_trans_iter_put(trans, split.iter);
+ array_insert_item(trans->updates, trans->nr_updates,
+ i - trans->updates, split);
+ i++;
+ }
/*
* When we have an extent that overwrites the start of another
}
}
-#define RESERVE_FACTOR 6
-
static u64 reserve_factor(u64 r)
{
return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
}
-static u64 avail_factor(u64 r)
-{
- return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
-}
-
u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
{
return min(fs_usage->u.hidden +
return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
}
+#define RESERVE_FACTOR 6
+
+static inline u64 avail_factor(u64 r)
+{
+ return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
+}
+
int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
void bch2_dev_buckets_free(struct bch_dev *);
int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
if (mode != BCH_RENAME)
*dst_inum = le64_to_cpu(bkey_s_c_to_dirent(old_dst).v->d_inum);
+ if (mode != BCH_RENAME_EXCHANGE)
+ *src_offset = dst_iter->pos.offset;
/* Lookup src: */
src_iter = bch2_hash_lookup(trans, bch2_dirent_hash_desc,
bch2_trans_update(trans, src_iter, &new_src->k_i, 0);
bch2_trans_update(trans, dst_iter, &new_dst->k_i, 0);
out_set_offset:
- *src_offset = new_src->k.p.offset;
+ if (mode == BCH_RENAME_EXCHANGE)
+ *src_offset = new_src->k.p.offset;
*dst_offset = new_dst->k.p.offset;
out:
bch2_trans_iter_put(trans, src_iter);
dst_inode_u->bi_dir = src_dir_u->bi_inum;
dst_inode_u->bi_dir_offset = src_offset;
}
+
+ if (mode == BCH_RENAME_OVERWRITE &&
+ dst_inode_u->bi_dir == dst_dir_u->bi_inum &&
+ dst_inode_u->bi_dir_offset == src_offset) {
+ dst_inode_u->bi_dir = 0;
+ dst_inode_u->bi_dir_offset = 0;
+ }
}
if (mode == BCH_RENAME_OVERWRITE) {
struct bio_vec *bvec;
unsigned i;
+ up(&io->op.c->io_in_flight);
+
if (io->op.error) {
set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
{
struct bch_writepage_io *io = w->io;
+ down(&io->op.c->io_in_flight);
+
w->io = NULL;
closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
continue_at(&io->cl, bch2_writepage_io_done, NULL);
if (dio->loop)
goto loop;
+ down(&c->io_in_flight);
+
while (1) {
iter_count = dio->iter.count;
if ((req->ki_flags & IOCB_DSYNC) &&
!c->opts.journal_flush_disabled)
dio->op.flags |= BCH_WRITE_FLUSH;
+ dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
dio->op.opts.data_replicas, 0);
ret = dio->op.error ?: ((long) dio->written << 9);
err:
+ up(&c->io_in_flight);
bch2_pagecache_block_put(&inode->ei_pagecache_lock);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
ret = bch2_extent_update(&trans, iter, &reservation.k_i,
&disk_res, &inode->ei_journal_seq,
- 0, &i_sectors_delta);
+ 0, &i_sectors_delta, true);
i_sectors_acct(c, inode, "a_res, i_sectors_delta);
bkey_err:
bch2_quota_reservation_put(c, inode, "a_res);
if (pos_dst + ret > dst->v.i_size)
i_size_write(&dst->v, pos_dst + ret);
spin_unlock(&dst->v.i_lock);
+
+ if (((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
+ IS_SYNC(file_inode(file_dst))) &&
+ !c->opts.journal_flush_disabled)
+ ret = bch2_journal_flush_seq(&c->journal, dst->ei_journal_seq);
err:
bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
buf->f_type = BCACHEFS_STATFS_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = usage.capacity >> shift;
- buf->f_bfree = (usage.capacity - usage.used) >> shift;
- buf->f_bavail = buf->f_bfree;
+ buf->f_bfree = usage.free >> shift;
+ buf->f_bavail = avail_factor(usage.free) >> shift;
buf->f_files = usage.nr_inodes + avail_inodes;
buf->f_ffree = avail_inodes;
* just switch units to bytes and that issue goes away
*/
ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
- POS(u.bi_inum, round_up(u.bi_size, block_bytes(c))),
+ POS(u.bi_inum, round_up(u.bi_size, block_bytes(c)) >> 9),
POS(u.bi_inum, U64_MAX),
NULL);
if (ret) {
k.k->type, k.k->p.offset, k.k->p.inode, w.inode.bi_size)) {
bch2_fs_lazy_rw(c);
return bch2_btree_delete_range_trans(&trans, BTREE_ID_extents,
- POS(k.k->p.inode, round_up(w.inode.bi_size, block_bytes(c))),
+ POS(k.k->p.inode, round_up(w.inode.bi_size, block_bytes(c)) >> 9),
POS(k.k->p.inode, U64_MAX),
NULL) ?: -EINTR;
}
struct btree_iter *extent_iter,
struct bkey_i *new,
bool *maybe_extending,
- bool *should_check_enospc,
+ bool *usage_increasing,
s64 *i_sectors_delta,
s64 *disk_sectors_delta)
{
int ret = 0;
*maybe_extending = true;
- *should_check_enospc = false;
+ *usage_increasing = false;
*i_sectors_delta = 0;
*disk_sectors_delta = 0;
? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
: 0;
- if (!*should_check_enospc &&
+ if (!*usage_increasing &&
(new_replicas > bch2_bkey_replicas(c, old) ||
(!new_compressed && bch2_bkey_sectors_compressed(old))))
- *should_check_enospc = true;
+ *usage_increasing = true;
if (bkey_cmp(old.k->p, new->k.p) >= 0) {
/*
struct disk_reservation *disk_res,
u64 *journal_seq,
u64 new_i_size,
- s64 *i_sectors_delta_total)
+ s64 *i_sectors_delta_total,
+ bool check_enospc)
{
/* this must live until after bch2_trans_commit(): */
struct bkey_inode_buf inode_p;
- bool extending = false, should_check_enospc;
+ bool extending = false, usage_increasing;
s64 i_sectors_delta = 0, disk_sectors_delta = 0;
int ret;
ret = bch2_sum_sector_overwrites(trans, iter, k,
&extending,
- &should_check_enospc,
+ &usage_increasing,
&i_sectors_delta,
&disk_sectors_delta);
if (ret)
return ret;
+ if (!usage_increasing)
+ check_enospc = false;
+
if (disk_res &&
disk_sectors_delta > (s64) disk_res->sectors) {
ret = bch2_disk_reservation_add(trans->c, disk_res,
disk_sectors_delta - disk_res->sectors,
- !should_check_enospc
+ !check_enospc
? BCH_DISK_RESERVATION_NOFAIL : 0);
if (ret)
return ret;
bch2_trans_iter_put(trans, inode_iter);
}
- bch2_trans_update(trans, iter, k, 0);
-
- ret = bch2_trans_commit(trans, disk_res, journal_seq,
+ ret = bch2_trans_update(trans, iter, k, 0) ?:
+ bch2_trans_commit(trans, disk_res, journal_seq,
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL);
+ BUG_ON(ret == -ENOSPC);
if (ret)
return ret;
ret = bch2_extent_update(trans, iter, &delete,
&disk_res, journal_seq,
- 0, i_sectors_delta);
+ 0, i_sectors_delta, false);
bch2_disk_reservation_put(c, &disk_res);
btree_err:
if (ret == -EINTR) {
ret = bch2_extent_update(&trans, iter, sk.k,
&op->res, op_journal_seq(op),
- op->new_i_size, &op->i_sectors_delta);
+ op->new_i_size, &op->i_sectors_delta,
+ op->flags & BCH_WRITE_CHECK_ENOSPC);
if (ret == -EINTR)
continue;
if (ret)
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
- if (!(op->flags & BCH_WRITE_FROM_INTERNAL))
- up(&c->io_in_flight);
-
if (op->end_io) {
EBUG_ON(cl->parent);
closure_debug_destroy(cl);
goto err;
}
- /*
- * Can't ratelimit copygc - we'd deadlock:
- */
- if (!(op->flags & BCH_WRITE_FROM_INTERNAL))
- down(&c->io_in_flight);
-
bch2_increment_clock(c, bio_sectors(bio), WRITE);
data_len = min_t(u64, bio->bi_iter.bi_size,
BCH_WRITE_ONLY_SPECIFIED_DEVS = (1 << 6),
BCH_WRITE_WROTE_DATA_INLINE = (1 << 7),
BCH_WRITE_FROM_INTERNAL = (1 << 8),
+ BCH_WRITE_CHECK_ENOSPC = (1 << 9),
/* Internal: */
- BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 9),
- BCH_WRITE_SKIP_CLOSURE_PUT = (1 << 10),
- BCH_WRITE_DONE = (1 << 11),
+ BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 10),
+ BCH_WRITE_SKIP_CLOSURE_PUT = (1 << 11),
+ BCH_WRITE_DONE = (1 << 12),
};
static inline u64 *op_journal_seq(struct bch_write_op *op)
struct bkey_i *, bool *, bool *, s64 *, s64 *);
int bch2_extent_update(struct btree_trans *, struct btree_iter *,
struct bkey_i *, struct disk_reservation *,
- u64 *, u64, s64 *);
+ u64 *, u64, s64 *, bool);
int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
struct bpos, u64 *, s64 *);
int bch2_fpunch(struct bch_fs *c, u64, u64, u64, u64 *, s64 *);
if (qdq->d_fieldmask & QC_INO_HARD)
new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
- return bch2_trans_update(trans, iter, &new_quota->k_i, 0);
+ ret = bch2_trans_update(trans, iter, &new_quota->k_i, 0);
+ bch2_trans_iter_put(trans, iter);
+ return ret;
}
static int bch2_set_quota(struct super_block *sb, struct kqid qid,
*refcount = 0;
memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k));
- bch2_trans_update(trans, reflink_iter, r_v, 0);
+ ret = bch2_trans_update(trans, reflink_iter, r_v, 0);
+ if (ret)
+ goto err;
r_p = bch2_trans_kmalloc(trans, sizeof(*r_p));
if (IS_ERR(r_p)) {
set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
- bch2_trans_update(trans, extent_iter, &r_p->k_i, 0);
+ ret = bch2_trans_update(trans, extent_iter, &r_p->k_i, 0);
err:
if (!IS_ERR(reflink_iter))
c->reflink_hint = reflink_iter->pos.offset;
ret = bch2_extent_update(&trans, dst_iter, new_dst.k,
&disk_res, journal_seq,
- new_i_size, i_sectors_delta);
+ new_i_size, i_sectors_delta,
+ true);
bch2_disk_reservation_put(c, &disk_res);
if (ret)
continue;