struct btree_iter *iter,
struct btree_write_buffered_key *wb)
{
- bch2_btree_node_unlock_write(trans, iter->path, iter->path->l[0].b);
+ struct btree_path *path = btree_iter_path(trans, iter);
+
+ bch2_btree_node_unlock_write(trans, path, path->l[0].b);
trans->journal_res.seq = wb->journal_seq;
* We can't clone a path that has write locks: unshare it now, before
* set_pos and traverse():
*/
- if (iter->path->ref > 1)
+ if (btree_iter_path(trans, iter)->ref > 1)
iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
- path = iter->path;
+ path = btree_iter_path(trans, iter);
if (!*write_locked) {
ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
struct btree_write_buffer *wb = &c->btree_write_buffer;
- struct wb_key_ref *i;
struct btree_iter iter = { NULL };
size_t skipped = 0, fast = 0, slowpath = 0;
bool write_locked = false;
continue;
}
- if (write_locked &&
- (iter.path->btree_id != k->btree ||
- bpos_gt(k->k.k.p, iter.path->l[0].b->key.k.p))) {
- bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
- write_locked = false;
+ if (write_locked) {
+ struct btree_path *path = btree_iter_path(trans, &iter);
+
+ if (path->btree_id != i->btree ||
+ bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
+ bch2_btree_node_unlock_write(trans, path, path->l[0].b);
+ write_locked = false;
+ }
}
- if (!iter.path || iter.path->btree_id != k->btree) {
+ if (!iter.path || iter.btree_id != k->btree) {
bch2_trans_iter_exit(trans, &iter);
bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
}
bch2_btree_iter_set_pos(&iter, k->k.k.p);
- iter.path->preserve = false;
+ btree_iter_path(trans, &iter)->preserve = false;
do {
if (race_fault()) {
break;
}
- if (write_locked)
- bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
+ if (write_locked) {
+ struct btree_path *path = btree_iter_path(trans, &iter);
+ bch2_btree_node_unlock_write(trans, path, path->l[0].b);
+ }
bch2_trans_iter_exit(trans, &iter);
if (ret)
* The fastpath zapped the seq of keys that were successfully flushed so
* we can skip those here.
*/
- trace_write_buffer_flush_slowpath(trans, slowpath, wb->flushing.keys.nr);
+ trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
- struct btree_write_buffered_key *i;
darray_for_each(wb->flushing.keys, i) {
if (!i->journal_seq)
continue;
struct journal_buf *buf;
int ret = 0;
- mutex_lock(&j->buf_lock);
- while ((buf = bch2_next_write_buffer_flush_journal_buf(j, seq)))
- if (bch2_journal_keys_to_write_buffer(c, buf)) {
- ret = -ENOMEM;
- break;
- }
- mutex_unlock(&j->buf_lock);
+ while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, seq))) {
+ ret = bch2_journal_keys_to_write_buffer(c, buf);
+ mutex_unlock(&j->buf_lock);
+ }
return ret;
}
-int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
+static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq)
{
struct bch_fs *c = trans->c;
struct btree_write_buffer *wb = &c->btree_write_buffer;
int ret = 0, fetch_from_journal_err;
- trace_write_buffer_flush_sync(trans, _RET_IP_);
-retry:
- bch2_trans_unlock(trans);
+ do {
+ bch2_trans_unlock(trans);
- bch2_journal_block_reservations(&c->journal);
- fetch_from_journal_err = fetch_wb_keys_from_journal(c, U64_MAX);
- bch2_journal_unblock(&c->journal);
+ fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq);
- /*
- * On memory allocation failure, bch2_btree_write_buffer_flush_locked()
- * is not guaranteed to empty wb->inc:
- */
- mutex_lock(&wb->flushing.lock);
- while (!ret &&
- (wb->flushing.keys.nr || wb->inc.keys.nr))
+ /*
+ * On memory allocation failure, bch2_btree_write_buffer_flush_locked()
+ * is not guaranteed to empty wb->inc:
+ */
+ mutex_lock(&wb->flushing.lock);
ret = bch2_btree_write_buffer_flush_locked(trans);
- mutex_unlock(&wb->flushing.lock);
-
- if (!ret && fetch_from_journal_err)
- goto retry;
+ mutex_unlock(&wb->flushing.lock);
+ } while (!ret &&
+ (fetch_from_journal_err ||
+ (wb->inc.pin.seq && wb->inc.pin.seq <= seq) ||
+ (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq)));
return ret;
}
+static int bch2_btree_write_buffer_journal_flush(struct journal *j,
+ struct journal_entry_pin *_pin, u64 seq)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+
+ return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq));
+}
+
+int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
+{
+ struct bch_fs *c = trans->c;
+
+ trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
+
+ return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal));
+}
+
int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
return ret;
}
-static int bch2_btree_write_buffer_journal_flush(struct journal *j,
- struct journal_entry_pin *_pin, u64 seq)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct btree_write_buffer *wb = &c->btree_write_buffer;
- int ret, fetch_from_journal_err;
-
- do {
- fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq);
-
- mutex_lock(&wb->flushing.lock);
- ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans));
- mutex_unlock(&wb->flushing.lock);
- } while (!ret &&
- (fetch_from_journal_err ||
- (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq) ||
- (wb->inc.pin.seq && wb->inc.pin.seq <= seq)));
-
- return ret;
-}
-
static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work);