#include "bcachefs.h"
#include "alloc_foreground.h"
+#include "bkey_on_stack.h"
#include "btree_gc.h"
#include "btree_update.h"
#include "btree_update_interior.h"
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
while (1) {
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k;
struct bkey_i *insert;
- struct bkey_i_extent *new =
- bkey_i_to_extent(bch2_keylist_front(keys));
+ struct bkey_i_extent *new;
BKEY_PADDED(k) _new, _insert;
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
bool did_work = false;
int nr;
+ bch2_trans_reset(&trans, 0);
+
+ k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
- if (ret)
+ if (ret) {
+ if (ret == -EINTR)
+ continue;
break;
+ }
+
+ new = bkey_i_to_extent(bch2_keylist_front(keys));
if (bversion_cmp(k.k->version, new->k.version) ||
!bch2_bkey_matches_ptr(c, k, m->ptr, m->offset))
bkey_copy(&_new.k, bch2_keylist_front(keys));
new = bkey_i_to_extent(&_new.k);
+ bch2_cut_front(iter->pos, &new->k_i);
- bch2_cut_front(iter->pos, insert);
- bch2_cut_back(new->k.p, &insert->k);
- bch2_cut_back(insert->k.p, &new->k);
+ bch2_cut_front(iter->pos, insert);
+ bch2_cut_back(new->k.p, insert);
+ bch2_cut_back(insert->k.p, &new->k_i);
if (m->data_cmd == DATA_REWRITE)
bch2_bkey_drop_device(bkey_i_to_s(insert),
* If we're not fully overwriting @k, and it's compressed, we
* need a reservation for all the pointers in @insert
*/
- nr = bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(insert)) -
+ nr = bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(insert)) -
m->nr_ptrs_reserved;
if (insert->k.size < k.k->size &&
- bch2_extent_is_compressed(k) &&
+ bch2_bkey_sectors_compressed(k) &&
nr > 0) {
ret = bch2_disk_reservation_add(c, &op->res,
keylist_sectors(keys) * nr, 0);
goto next;
}
- bch2_trans_update(&trans, iter, insert);
+ bch2_trans_update(&trans, iter, insert, 0);
ret = bch2_trans_commit(&trans, &op->res,
op_journal_seq(op),
- BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
m->data_opts.btree_insert_flags);
if (bch2_keylist_empty(keys))
goto out;
}
-
- bch2_cut_front(iter->pos, bch2_keylist_front(keys));
continue;
nomatch:
if (m->ctxt)
enum btree_id btree_id,
struct bkey_s_c k)
{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
int ret;
m->btree_id = btree_id;
m->nr_ptrs_reserved = 0;
bch2_write_op_init(&m->op, c, io_opts);
- m->op.compression_type =
- bch2_compression_opt_to_type[io_opts.background_compression ?:
- io_opts.compression];
+
+ if (!bch2_bkey_is_incompressible(k))
+ m->op.compression_type =
+ bch2_compression_opt_to_type[io_opts.background_compression ?:
+ io_opts.compression];
+ else
+ m->op.incompressible = true;
+
m->op.target = data_opts.target,
m->op.write_point = wp;
m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS|
BCH_WRITE_PAGES_STABLE|
BCH_WRITE_PAGES_OWNED|
- BCH_WRITE_DATA_ENCODED;
+ BCH_WRITE_DATA_ENCODED|
+ BCH_WRITE_FROM_INTERNAL;
m->op.nr_replicas = 1;
m->op.nr_replicas_required = 1;
*/
#if 0
int nr = (int) io_opts.data_replicas -
- bch2_bkey_nr_dirty_ptrs(k);
+ bch2_bkey_nr_ptrs_allocated(k);
#endif
int nr = (int) io_opts.data_replicas;
break;
}
case DATA_REWRITE: {
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
unsigned compressed_sectors = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (!p.ptr.cached &&
- p.crc.compression_type != BCH_COMPRESSION_NONE &&
+ crc_is_compressed(p.crc) &&
bch2_dev_in_target(c, p.ptr.dev, data_opts.target))
compressed_sectors += p.crc.compressed_size;
{
bool kthread = (current->flags & PF_KTHREAD) != 0;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
- BKEY_PADDED(k) tmp;
+ struct bkey_on_stack sk;
struct btree_trans trans;
struct btree_iter *iter;
struct bkey_s_c k;
u64 delay, cur_inum = U64_MAX;
int ret = 0, ret2;
+ bkey_on_stack_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_USER;
if (!bkey_extent_is_direct_data(k.k))
goto next_nondata;
- if (cur_inum != k.k->p.inode) {
+ if (btree_id == BTREE_ID_EXTENTS &&
+ cur_inum != k.k->p.inode) {
struct bch_inode_unpacked inode;
/* don't hold btree locks while looking up inode: */
}
/* unlock before doing IO: */
- bkey_reassemble(&tmp.k, k);
- k = bkey_i_to_s_c(&tmp.k);
+ bkey_on_stack_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
bch2_trans_unlock(&trans);
ret2 = bch2_move_extent(c, ctxt, wp, io_opts, btree_id, k,
if (rate)
bch2_ratelimit_increment(rate, k.k->size);
next:
- atomic64_add(k.k->size * bch2_bkey_nr_dirty_ptrs(k),
+ atomic64_add(k.k->size * bch2_bkey_nr_ptrs_allocated(k),
&stats->sectors_seen);
next_nondata:
bch2_btree_iter_next(iter);
}
out:
ret = bch2_trans_exit(&trans) ?: ret;
+ bkey_on_stack_exit(&sk, c);
return ret;
}
ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret;
- while (1) {
- closure_wait_event(&c->btree_interior_update_wait,
- !bch2_btree_interior_updates_nr_pending(c) ||
- c->btree_roots_dirty);
- if (!bch2_btree_interior_updates_nr_pending(c))
- break;
- bch2_journal_meta(&c->journal);
- }
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
ret = bch2_replicas_gc2(c) ?: ret;