if (!new_keys.d) {
bch_err(c, "%s: error allocating new key array (size %zu)",
__func__, new_keys.size);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_journal_key_insert;
}
/* Since @keys was full, there was no gap: */
n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
if (!n)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_journal_key_insert;
bkey_copy(n, k);
ret = bch2_journal_key_insert_take(c, id, level, n);
keys->nr = keys->gap = keys->size = 0;
}
+static void __journal_keys_sort(struct journal_keys *keys)
+{
+ struct journal_key *src, *dst;
+
+ sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
+
+ src = dst = keys->d;
+ while (src < keys->d + keys->nr) {
+ while (src + 1 < keys->d + keys->nr &&
+ src[0].btree_id == src[1].btree_id &&
+ src[0].level == src[1].level &&
+ bpos_eq(src[0].k->k.p, src[1].k->k.p))
+ src++;
+
+ *dst++ = *src++;
+ }
+
+ keys->nr = dst - keys->d;
+}
+
static int journal_keys_sort(struct bch_fs *c)
{
struct genradix_iter iter;
struct journal_replay *i, **_i;
struct jset_entry *entry;
- struct bkey_i *k, *_n;
+ struct bkey_i *k;
struct journal_keys *keys = &c->journal_keys;
- struct journal_key *src, *dst;
- size_t nr_keys = 0;
+ size_t nr_keys = 0, nr_read = 0;
genradix_for_each(&c->journal_entries, iter, _i) {
i = *_i;
if (!i || i->ignore)
continue;
- for_each_jset_key(k, _n, entry, &i->j)
+ for_each_jset_key(k, entry, &i->j)
nr_keys++;
}
keys->size = roundup_pow_of_two(nr_keys);
keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
- if (!keys->d)
- return -ENOMEM;
+ if (!keys->d) {
+ bch_err(c, "Failed to allocate buffer for sorted journal keys (%zu keys); trying slowpath",
+ nr_keys);
+
+ do {
+ keys->size >>= 1;
+ keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
+ } while (!keys->d && keys->size > nr_keys / 8);
+
+ if (!keys->d) {
+ bch_err(c, "Failed to allocate %zu size buffer for sorted journal keys; exiting",
+ keys->size);
+ return -BCH_ERR_ENOMEM_journal_keys_sort;
+ }
+ }
genradix_for_each(&c->journal_entries, iter, _i) {
i = *_i;
if (!i || i->ignore)
continue;
- for_each_jset_key(k, _n, entry, &i->j)
+ cond_resched();
+
+ for_each_jset_key(k, entry, &i->j) {
+ if (keys->nr == keys->size) {
+ __journal_keys_sort(keys);
+
+ if (keys->nr > keys->size * 7 / 8) {
+ bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu/%zu",
+ keys->nr, keys->size, nr_read, nr_keys);
+ return -BCH_ERR_ENOMEM_journal_keys_sort;
+ }
+ }
+
keys->d[keys->nr++] = (struct journal_key) {
.btree_id = entry->btree_id,
.level = entry->level,
.journal_seq = le64_to_cpu(i->j.seq),
.journal_offset = k->_data - i->j._data,
};
- }
-
- sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
-
- src = dst = keys->d;
- while (src < keys->d + keys->nr) {
- while (src + 1 < keys->d + keys->nr &&
- src[0].btree_id == src[1].btree_id &&
- src[0].level == src[1].level &&
- bpos_eq(src[0].k->k.p, src[1].k->k.p))
- src++;
- *dst++ = *src++;
+ nr_read++;
+ }
}
- keys->nr = dst - keys->d;
+ __journal_keys_sort(keys);
keys->gap = keys->nr;
+
+ bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_keys, keys->nr);
return 0;
}
keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
if (!keys_sorted)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_journal_replay;
for (i = 0; i < keys->nr; i++)
keys_sorted[i] = &keys->d[i];
journal_sort_seq_cmp, NULL);
if (keys->nr) {
- ret = bch2_fs_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
- keys->nr, start_seq, end_seq);
+ ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
+ keys->nr, start_seq, end_seq);
if (ret)
goto err;
}
ret = bch2_journal_error(j);
if (keys->nr && !ret)
- bch2_fs_log_msg(c, "journal replay finished");
+ bch2_journal_log_msg(c, "journal replay finished");
err:
kvfree(keys_sorted);
return ret;
IS_ERR(k1) ||
IS_ERR(k2) ||
k1->k.u64s != k2->k.u64s ||
- memcmp(k1, k2, bkey_bytes(k1)) ||
+ memcmp(k1, k2, bkey_bytes(&k1->k)) ||
l1 != l2, c,
"superblock btree root %u doesn't match journal after clean shutdown\n"
"sb: l=%u %s\n"
GFP_KERNEL);
if (!clean) {
mutex_unlock(&c->sb_lock);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean);
}
ret = bch2_sb_clean_validate_late(c, clean, READ);
}
if (!c->opts.nochanges) {
- if (c->sb.version < bcachefs_metadata_version_lru_v2) {
- bch_info(c, "version prior to backpointers, upgrade and fsck required");
+ if (c->sb.version < bcachefs_metadata_version_no_bps_in_alloc_keys) {
+ bch_info(c, "version prior to no_bps_in_alloc_keys, upgrade and fsck required");
c->opts.version_upgrade = true;
c->opts.fsck = true;
c->opts.fix_errors = FSCK_OPT_YES;
- } else if (c->sb.version < bcachefs_metadata_version_fragmentation_lru) {
- bch_info(c, "version prior to backpointers, upgrade required");
- c->opts.version_upgrade = true;
}
}
journal_seq += 8;
if (blacklist_seq != journal_seq) {
- ret = bch2_fs_log_msg(c, "blacklisting entries %llu-%llu",
- blacklist_seq, journal_seq) ?:
+ ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
+ blacklist_seq, journal_seq) ?:
bch2_journal_seq_blacklist_add(c,
blacklist_seq, journal_seq);
if (ret) {
}
}
- ret = bch2_fs_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
- journal_seq, last_seq, blacklist_seq - 1) ?:
+ ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
+ journal_seq, last_seq, blacklist_seq - 1) ?:
bch2_fs_journal_start(&c->journal, journal_seq);
if (ret)
goto err;
if (c->opts.reconstruct_alloc)
- bch2_fs_log_msg(c, "dropping alloc info");
+ bch2_journal_log_msg(c, "dropping alloc info");
/*
* Skip past versions that might have possibly been used (as nonces),