bch2_trans_copy_iter(&iter, extent_iter);
- for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, old, ret) {
+ for_each_btree_key_upto_continue_norestart(iter,
+ new->k.p, BTREE_ITER_SLOTS, old, ret) {
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
max(bkey_start_offset(&new->k),
bkey_start_offset(old.k));
struct bch_fs *c = op->c;
bch2_disk_reservation_put(c, &op->res);
- bch2_write_ref_put(c, BCH_WRITE_REF_write);
+ if (!(op->flags & BCH_WRITE_MOVE))
+ bch2_write_ref_put(c, BCH_WRITE_REF_write);
bch2_keylist_free(&op->insert_keys, op->inline_keys);
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
struct write_point *wp = op->wp;
struct workqueue_struct *wq = index_update_wq(op);
+ unsigned long flags;
if ((op->flags & BCH_WRITE_DONE) &&
(op->flags & BCH_WRITE_MOVE))
bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
- barrier();
-
- /*
- * We're not using wp->writes_lock here, so this is racey: that's ok,
- * because this is just for diagnostic purposes, and we're running out
- * of interrupt context here so if we were to take the log we'd have to
- * switch to spin_lock_irq()/irqsave(), which is not free:
- */
+ spin_lock_irqsave(&wp->writes_lock, flags);
if (wp->state == WRITE_POINT_waiting_io)
__wp_update_state(wp, WRITE_POINT_waiting_work);
+ list_add_tail(&op->wp_list, &wp->writes);
+ spin_unlock_irqrestore (&wp->writes_lock, flags);
- op->btree_update_ready = true;
queue_work(wq, &wp->index_update_work);
}
static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
{
- op->btree_update_ready = false;
op->wp = wp;
- spin_lock(&wp->writes_lock);
- list_add_tail(&op->wp_list, &wp->writes);
- if (wp->state == WRITE_POINT_stopped)
+ if (wp->state == WRITE_POINT_stopped) {
+ spin_lock_irq(&wp->writes_lock);
__wp_update_state(wp, WRITE_POINT_waiting_io);
- spin_unlock(&wp->writes_lock);
+ spin_unlock_irq(&wp->writes_lock);
+ }
}
void bch2_write_point_do_index_updates(struct work_struct *work)
struct bch_write_op *op;
while (1) {
- spin_lock(&wp->writes_lock);
- list_for_each_entry(op, &wp->writes, wp_list)
- if (op->btree_update_ready) {
- list_del(&op->wp_list);
- goto unlock;
- }
- op = NULL;
-unlock:
+ spin_lock_irq(&wp->writes_lock);
+ op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
+ if (op)
+ list_del(&op->wp_list);
wp_update_state(wp, op != NULL);
- spin_unlock(&wp->writes_lock);
+ spin_unlock_irq(&wp->writes_lock);
if (!op)
break;
}
again:
memset(&op->failed, 0, sizeof(op->failed));
- op->btree_update_ready = false;
do {
struct bkey_i *key_to_write;
goto err;
}
- if (c->opts.nochanges ||
+ if (c->opts.nochanges) {
+ op->error = -BCH_ERR_erofs_no_writes;
+ goto err;
+ }
+
+ if (!(op->flags & BCH_WRITE_MOVE) &&
!bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
op->error = -BCH_ERR_erofs_no_writes;
goto err;
op->end_io(op);
}
+const char * const bch2_write_flags[] = {
+#define x(f) #f,
+ BCH_WRITE_FLAGS()
+#undef x
+ NULL
+};
+
+void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
+{
+ prt_str(out, "pos: ");
+ bch2_bpos_to_text(out, op->pos);
+ prt_newline(out);
+
+ prt_str(out, "started: ");
+ bch2_pr_time_units(out, local_clock() - op->start_time);
+ prt_newline(out);
+
+ prt_str(out, "flags: ");
+ prt_bitflags(out, bch2_write_flags, op->flags);
+ prt_newline(out);
+}
+
/* Cache promotion on read */
struct promote_op {