#include "error.h"
#include "io_misc.h"
#include "logged_ops.h"
+#include "super.h"
struct bch_logged_op_fn {
u8 type;
bch2_bkey_buf_init(&sk);
bch2_bkey_buf_reassemble(&sk, c, k);
- ret = fn->resume(trans, sk.k) ?: trans_was_restarted(trans, restart_count);
+ ret = drop_locks_do(trans, (bch2_fs_lazy_rw(c), 0)) ?:
+ fn->resume(trans, sk.k) ?: trans_was_restarted(trans, restart_count);
bch2_bkey_buf_exit(&sk, c);
return ret;
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "bkey_methods.h"
#include "nocow_locking.h"
#include "util.h"
for (i = 0; i < ARRAY_SIZE(l->b); i++)
if (l->b[i] == dev_bucket) {
- BUG_ON(sign(atomic_read(&l->l[i])) != lock_val);
+ int v = atomic_sub_return(lock_val, &l->l[i]);
- if (!atomic_sub_return(lock_val, &l->l[i]))
+ BUG_ON(v && sign(v) != lock_val);
+ if (!v)
closure_wake_up(&l->wait);
return;
}
if (lock_val > 0 ? v < 0 : v > 0)
goto fail;
take_lock:
+ v = atomic_read(&l->l[i]);
+ /* Overflow? */
+ if (v && sign(v + lock_val) != sign(v))
+ goto fail;
+
atomic_add(lock_val, &l->l[i]);
spin_unlock(&l->lock);
return true;
}
void bch2_nocow_locks_to_text(struct printbuf *out, struct bucket_nocow_lock_table *t)
+
{
unsigned i, nr_zero = 0;
struct nocow_lock_bucket *l;
prt_printf(out, "(%u empty entries)\n", nr_zero);
nr_zero = 0;
- for (i = 0; i < ARRAY_SIZE(l->l); i++)
- if (atomic_read(&l->l[i]))
- prt_printf(out, "%llu: %i ", l->b[i], atomic_read(&l->l[i]));
+ for (i = 0; i < ARRAY_SIZE(l->l); i++) {
+ int v = atomic_read(&l->l[i]);
+ if (v) {
+ bch2_bpos_to_text(out, u64_to_bucket(l->b[i]));
+ prt_printf(out, ": %s %u ", v < 0 ? "copy" : "update", abs(v));
+ }
+ }
prt_newline(out);
}
prt_printf(out, "(%u empty entries)\n", nr_zero);
}
+void bch2_fs_nocow_locking_exit(struct bch_fs *c)
+{
+ struct bucket_nocow_lock_table *t = &c->nocow_locks;
+
+ for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
+ for (unsigned j = 0; j < ARRAY_SIZE(l->l); j++)
+ BUG_ON(atomic_read(&l->l[j]));
+}
+
int bch2_fs_nocow_locking_init(struct bch_fs *c)
{
- unsigned i;
+ struct bucket_nocow_lock_table *t = &c->nocow_locks;
- for (i = 0; i < ARRAY_SIZE(c->nocow_locks.l); i++)
- spin_lock_init(&c->nocow_locks.l[i].lock);
+ for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
+ spin_lock_init(&l->lock);
return 0;
}
x(check_alloc_to_lru_refs, PASS_FSCK) \
x(fs_freespace_init, PASS_ALWAYS|PASS_SILENT) \
x(bucket_gens_init, 0) \
- x(resume_logged_ops, PASS_ALWAYS) \
x(check_snapshot_trees, PASS_FSCK) \
x(check_snapshots, PASS_FSCK) \
x(check_subvols, PASS_FSCK) \
x(delete_dead_snapshots, PASS_FSCK|PASS_UNCLEAN) \
x(fs_upgrade_for_subvolumes, 0) \
+ x(resume_logged_ops, PASS_ALWAYS) \
x(check_inodes, PASS_FSCK) \
x(check_extents, PASS_FSCK) \
x(check_dirents, PASS_FSCK) \