-5074caad6a72d6e38c21a7e02f5e62048f2046d7
+400f275d46228e0ca08d8c931a674a534db0f4fb
* six_trylock_type(lock, type)
* six_trylock_convert(lock, from, to)
*
- * A lock may be held multiple types by the same thread (for read or intent,
+ * A lock may be held multiple times by the same thread (for read or intent,
* not write). However, the six locks code does _not_ implement the actual
* recursive checks itself though - rather, if your code (e.g. btree iterator
* code) knows that the current thread already has a lock held, and for the
goto err;
bkey_reassemble(update, k);
ret = bch2_trans_update_extent(trans, iter, update, 0);
- if (!ret)
+ if (ret)
goto err;
}
}
BUG_ON(u64s > j->entry_u64s_reserved);
le32_add_cpu(&jset->u64s, u64s);
- BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
+
+ sectors = vstruct_sectors(jset, c->block_bits);
+ bytes = vstruct_bytes(jset);
+
+ if (sectors > w->sectors) {
+ bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
+ vstruct_bytes(jset), w->sectors << 9,
+ u64s, w->u64s_reserved, j->entry_u64s_reserved);
+ goto err;
+ }
jset->magic = cpu_to_le64(jset_magic(c));
jset->version = c->sb.version < bcachefs_metadata_version_bkey_renumber
jset_validate(c, NULL, jset, 0, WRITE))
goto err;
- sectors = vstruct_sectors(jset, c->block_bits);
- BUG_ON(sectors > w->sectors);
-
- bytes = vstruct_bytes(jset);
memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
retry_alloc:
#include "bcachefs.h"
#include "btree_update.h"
#include "errcode.h"
+#include "error.h"
#include "inode.h"
#include "quota.h"
#include "subvolume.h"
ret = bch2_snapshot_tree_lookup(trans,
snapshot_t(c, k.k->p.snapshot)->tree, &s_t);
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+ "%s: snapshot tree %u not found", __func__,
+ snapshot_t(c, k.k->p.snapshot)->tree);
if (ret)
return ret;
int bch2_replicas_gc_end(struct bch_fs *c, int ret)
{
- unsigned i;
-
lockdep_assert_held(&c->replicas_gc_lock);
mutex_lock(&c->sb_lock);
percpu_down_write(&c->mark_lock);
- /*
- * this is kind of crappy; the replicas gc mechanism needs to be ripped
- * out
- */
-
- for (i = 0; i < c->replicas.nr; i++) {
- struct bch_replicas_entry *e =
- cpu_replicas_entry(&c->replicas, i);
- struct bch_replicas_cpu n;
-
- if (!__replicas_has_entry(&c->replicas_gc, e) &&
- bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
- n = cpu_replicas_add_entry(&c->replicas_gc, e);
- if (!n.entries) {
- ret = -BCH_ERR_ENOMEM_cpu_replicas;
- goto err;
- }
-
- swap(n, c->replicas_gc);
- kfree(n.entries);
- }
- }
-
ret = bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
if (ret)
goto err;
int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
struct bch_snapshot_tree *s)
{
- int ret;
-
- ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
- BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
-
- if (bch2_err_matches(ret, ENOENT))
- bch_err(trans->c, "snapshot tree %u not found", id);
- return ret;
+ return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
+ BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
}
static struct bkey_i_snapshot_tree *
struct bch_snapshot_tree s_t;
int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
+ if (bch2_err_matches(ret, ENOENT))
+ return 0;
if (ret)
return ret;
tree_id = le32_to_cpu(root.v->tree);
ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
- if (ret)
+ if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
- if (le32_to_cpu(s_t.root_snapshot) != root_id) {
+ if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
u = bch2_bkey_make_mut_typed(trans, &root_iter, root.s_c, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u) ?:
snapshot_tree_create(trans, root_id,
struct bch_snapshot_tree st;
ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
+
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+ "%s: snapshot tree %u not found", __func__, snapshot_tree);
+
if (ret)
return ret;