-9b77e72c474e11130b514abd41a3c06e3f67c2ab
+e2b8120595b8d82ad51f3b4310deaef1c96b3e26
opt_set(opts, nochanges, true);
opt_set(opts, norecovery, true);
opt_set(opts, degraded, true);
- opt_set(opts, errors, BCH_ON_ERROR_CONTINUE);
+ opt_set(opts, errors, BCH_ON_ERROR_continue);
opt_set(opts, fix_errors, FSCK_OPT_YES);
while ((opt = getopt(argc, argv, "o:fvh")) != -1)
opt_set(opts, nochanges, true);
opt_set(opts, norecovery, true);
opt_set(opts, degraded, true);
- opt_set(opts, errors, BCH_ON_ERROR_CONTINUE);
+ opt_set(opts, errors, BCH_ON_ERROR_continue);
while ((opt = getopt(argc, argv, "b:s:e:i:m:fvh")) != -1)
switch (opt) {
opt_set(opts, nochanges, true);
opt_set(opts, norecovery, true);
opt_set(opts, degraded, true);
- opt_set(opts, errors, BCH_ON_ERROR_CONTINUE);
+ opt_set(opts, errors, BCH_ON_ERROR_continue);
opt_set(opts, fix_errors, FSCK_OPT_YES);
opt_set(opts, keep_journal, true);
struct bch_ioctl_dev_usage u = bchu_dev_usage(fs, dev_idx);
- if (u.state == BCH_MEMBER_STATE_RW) {
+ if (u.state == BCH_MEMBER_STATE_rw) {
printf("Setting %s readonly\n", dev_path);
- bchu_disk_set_state(fs, dev_idx, BCH_MEMBER_STATE_RO, 0);
+ bchu_disk_set_state(fs, dev_idx, BCH_MEMBER_STATE_ro, 0);
}
return bchu_data(fs, (struct bch_ioctl_data) {
die("Please supply a device state");
unsigned new_state = read_string_list_or_die(new_state_str,
- bch2_dev_state, "device state");
+ bch2_member_states, "device state");
if (!offline) {
unsigned dev_idx;
printf("\n");
printf_pad(20, "%s (device %u):", d->label ?: "(no label)", d->idx);
- printf("%30s%16s\n", d->dev ?: "(device not found)", bch2_dev_state[u.state]);
+ printf("%30s%16s\n", d->dev ?: "(device not found)", bch2_member_states[u.state]);
printf("%-20s%16s%16s%16s\n",
"", "data", "buckets", "fragmented");
int ret;
bch2_inode_pack(c, &packed, inode);
- ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
+ ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed.inode.k_i,
NULL, NULL, 0);
if (ret)
die("error updating inode: %s", strerror(-ret));
bch2_mark_bkey_replicas(c, extent_i_to_s_c(e).s_c);
- ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &e->k_i,
+ ret = bch2_btree_insert(c, BTREE_ID_extents, &e->k_i,
&res, NULL, 0);
if (ret)
die("btree insert error %s", strerror(-ret));
m->first_bucket = 0;
m->bucket_size = cpu_to_le16(i->bucket_size);
- SET_BCH_MEMBER_REPLACEMENT(m, CACHE_REPLACEMENT_LRU);
+ SET_BCH_MEMBER_REPLACEMENT(m, BCH_CACHE_REPLACEMENT_lru);
SET_BCH_MEMBER_DISCARD(m, i->discard);
SET_BCH_MEMBER_DATA_ALLOWED(m, i->data_allowed);
SET_BCH_MEMBER_DURABILITY(m, i->durability + 1);
time_str,
BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR
- ? bch2_dev_state[BCH_MEMBER_STATE(m)]
+ ? bch2_member_states[BCH_MEMBER_STATE(m)]
: "unknown",
group,
data_allowed_str,
data_has_str,
- BCH_MEMBER_REPLACEMENT(m) < CACHE_REPLACEMENT_NR
+ BCH_MEMBER_REPLACEMENT(m) < BCH_CACHE_REPLACEMENT_NR
? bch2_cache_replacement_policies[BCH_MEMBER_REPLACEMENT(m)]
: "unknown",
pr_units(le16_to_cpu(sb->block_size), units),
pr_units(BCH_SB_BTREE_NODE_SIZE(sb), units),
- BCH_SB_ERROR_ACTION(sb) < BCH_NR_ERROR_ACTIONS
+ BCH_SB_ERROR_ACTION(sb) < BCH_ON_ERROR_NR
? bch2_error_actions[BCH_SB_ERROR_ACTION(sb)]
: "unknown",
int ret;
down_read(&c->gc_lock);
- ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_ALLOC,
+ ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_alloc,
NULL, bch2_alloc_read_fn);
up_read(&c->gc_lock);
bch2_trans_begin(trans);
ret = bch2_btree_key_cache_flush(trans,
- BTREE_ID_ALLOC, iter->pos);
+ BTREE_ID_alloc, iter->pos);
if (ret)
goto err;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
for_each_member_device(ca, c, i) {
u64 *time, now;
int ret = 0;
- iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, POS(dev, bucket_nr),
+ iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, POS(dev, bucket_nr),
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
ca->inc_gen_needs_gc = 0;
switch (ca->mi.replacement) {
- case CACHE_REPLACEMENT_LRU:
+ case BCH_CACHE_REPLACEMENT_lru:
find_reclaimable_buckets_lru(c, ca);
break;
- case CACHE_REPLACEMENT_FIFO:
+ case BCH_CACHE_REPLACEMENT_fifo:
find_reclaimable_buckets_fifo(c, ca);
break;
- case CACHE_REPLACEMENT_RANDOM:
+ case BCH_CACHE_REPLACEMENT_random:
find_reclaimable_buckets_random(c, ca);
break;
}
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc,
POS(ca->dev_idx, 0),
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
GC_PHASE_START,
GC_PHASE_SB,
- GC_PHASE_BTREE_EC,
- GC_PHASE_BTREE_EXTENTS,
- GC_PHASE_BTREE_INODES,
- GC_PHASE_BTREE_DIRENTS,
- GC_PHASE_BTREE_XATTRS,
- GC_PHASE_BTREE_ALLOC,
- GC_PHASE_BTREE_QUOTAS,
- GC_PHASE_BTREE_REFLINK,
+ GC_PHASE_BTREE_stripes,
+ GC_PHASE_BTREE_extents,
+ GC_PHASE_BTREE_inodes,
+ GC_PHASE_BTREE_dirents,
+ GC_PHASE_BTREE_xattrs,
+ GC_PHASE_BTREE_alloc,
+ GC_PHASE_BTREE_quotas,
+ GC_PHASE_BTREE_reflink,
GC_PHASE_PENDING_DELETE,
GC_PHASE_ALLOC,
* Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
* has been marked by GC.
*
- * gc_cur_phase is a superset of btree_ids (BTREE_ID_EXTENTS etc.)
+ * gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
*
* Protected by gc_pos_lock. Only written to by GC thread, so GC thread
* can read without a lock.
x(discard, 1) \
x(error, 2) \
x(cookie, 3) \
- x(whiteout, 4) \
+ x(hash_whiteout, 4) \
x(btree_ptr, 5) \
x(extent, 6) \
x(reservation, 7) \
KEY_TYPE_MAX,
};
+struct bch_deleted {
+ struct bch_val v;
+};
+
+struct bch_discard {
+ struct bch_val v;
+};
+
+struct bch_error {
+ struct bch_val v;
+};
+
struct bch_cookie {
struct bch_val v;
__le64 cookie;
};
+struct bch_hash_whiteout {
+ struct bch_val v;
+};
+
/* Extents */
/*
LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
#endif
+#define BCH_MEMBER_STATES() \
+ x(rw, 0) \
+ x(ro, 1) \
+ x(failed, 2) \
+ x(spare, 3)
+
enum bch_member_state {
- BCH_MEMBER_STATE_RW = 0,
- BCH_MEMBER_STATE_RO = 1,
- BCH_MEMBER_STATE_FAILED = 2,
- BCH_MEMBER_STATE_SPARE = 3,
- BCH_MEMBER_STATE_NR = 4,
+#define x(t, n) BCH_MEMBER_STATE_##t = n,
+ BCH_MEMBER_STATES()
+#undef x
+ BCH_MEMBER_STATE_NR
};
-enum cache_replacement {
- CACHE_REPLACEMENT_LRU = 0,
- CACHE_REPLACEMENT_FIFO = 1,
- CACHE_REPLACEMENT_RANDOM = 2,
- CACHE_REPLACEMENT_NR = 3,
+#define BCH_CACHE_REPLACEMENT_POLICIES() \
+ x(lru, 0) \
+ x(fifo, 1) \
+ x(random, 2)
+
+enum bch_cache_replacement_policies {
+#define x(t, n) BCH_CACHE_REPLACEMENT_##t = n,
+ BCH_CACHE_REPLACEMENT_POLICIES()
+#undef x
+ BCH_CACHE_REPLACEMENT_NR
};
struct bch_sb_field_members {
#define BCH_BKEY_PTRS_MAX 16U
+#define BCH_ERROR_ACTIONS() \
+ x(continue, 0) \
+ x(ro, 1) \
+ x(panic, 2)
+
enum bch_error_actions {
- BCH_ON_ERROR_CONTINUE = 0,
- BCH_ON_ERROR_RO = 1,
- BCH_ON_ERROR_PANIC = 2,
- BCH_NR_ERROR_ACTIONS = 3,
+#define x(t, n) BCH_ON_ERROR_##t = n,
+ BCH_ERROR_ACTIONS()
+#undef x
+ BCH_ON_ERROR_NR
};
enum bch_str_hash_type {
BCH_STR_HASH_NR = 4,
};
+#define BCH_STR_HASH_OPTS() \
+ x(crc32c, 0) \
+ x(crc64, 1) \
+ x(siphash, 2)
+
enum bch_str_hash_opts {
- BCH_STR_HASH_OPT_CRC32C = 0,
- BCH_STR_HASH_OPT_CRC64 = 1,
- BCH_STR_HASH_OPT_SIPHASH = 2,
- BCH_STR_HASH_OPT_NR = 3,
+#define x(t, n) BCH_STR_HASH_OPT_##t = n,
+ BCH_STR_HASH_OPTS()
+#undef x
+ BCH_STR_HASH_OPT_NR
};
enum bch_csum_type {
}
}
+#define BCH_CSUM_OPTS() \
+ x(none, 0) \
+ x(crc32c, 1) \
+ x(crc64, 2)
+
enum bch_csum_opts {
- BCH_CSUM_OPT_NONE = 0,
- BCH_CSUM_OPT_CRC32C = 1,
- BCH_CSUM_OPT_CRC64 = 2,
- BCH_CSUM_OPT_NR = 3,
+#define x(t, n) BCH_CSUM_OPT_##t = n,
+ BCH_CSUM_OPTS()
+#undef x
+ BCH_CSUM_OPT_NR
};
#define BCH_COMPRESSION_TYPES() \
x(incompressible, 5)
enum bch_compression_type {
-#define x(t, n) BCH_COMPRESSION_TYPE_##t,
+#define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
BCH_COMPRESSION_TYPES()
#undef x
BCH_COMPRESSION_TYPE_NR
x(zstd, 3)
enum bch_compression_opts {
-#define x(t, n) BCH_COMPRESSION_OPT_##t,
+#define x(t, n) BCH_COMPRESSION_OPT_##t = n,
BCH_COMPRESSION_OPTS()
#undef x
BCH_COMPRESSION_OPT_NR
/* Btree: */
-#define BCH_BTREE_IDS() \
- x(EXTENTS, 0, "extents") \
- x(INODES, 1, "inodes") \
- x(DIRENTS, 2, "dirents") \
- x(XATTRS, 3, "xattrs") \
- x(ALLOC, 4, "alloc") \
- x(QUOTAS, 5, "quotas") \
- x(EC, 6, "stripes") \
- x(REFLINK, 7, "reflink")
+#define BCH_BTREE_IDS() \
+ x(extents, 0) \
+ x(inodes, 1) \
+ x(dirents, 2) \
+ x(xattrs, 3) \
+ x(alloc, 4) \
+ x(quotas, 5) \
+ x(stripes, 6) \
+ x(reflink, 7)
enum btree_id {
-#define x(kwd, val, name) BTREE_ID_##kwd = val,
+#define x(kwd, val) BTREE_ID_##kwd = val,
BCH_BTREE_IDS()
#undef x
BTREE_ID_NR
* bkey_i_extent to a bkey_i - since that's always safe, instead of conversion
* functions.
*/
-#define BKEY_VAL_ACCESSORS(name) \
+#define x(name, ...) \
struct bkey_i_##name { \
union { \
struct bkey k; \
return k; \
}
-BKEY_VAL_ACCESSORS(cookie);
-BKEY_VAL_ACCESSORS(btree_ptr);
-BKEY_VAL_ACCESSORS(extent);
-BKEY_VAL_ACCESSORS(reservation);
-BKEY_VAL_ACCESSORS(inode);
-BKEY_VAL_ACCESSORS(inode_generation);
-BKEY_VAL_ACCESSORS(dirent);
-BKEY_VAL_ACCESSORS(xattr);
-BKEY_VAL_ACCESSORS(alloc);
-BKEY_VAL_ACCESSORS(quota);
-BKEY_VAL_ACCESSORS(stripe);
-BKEY_VAL_ACCESSORS(reflink_p);
-BKEY_VAL_ACCESSORS(reflink_v);
-BKEY_VAL_ACCESSORS(inline_data);
-BKEY_VAL_ACCESSORS(btree_ptr_v2);
-BKEY_VAL_ACCESSORS(indirect_inline_data);
-BKEY_VAL_ACCESSORS(alloc_v2);
+BCH_BKEY_TYPES();
+#undef x
/* byte order helpers */
.key_invalid = key_type_cookie_invalid, \
}
-#define bch2_bkey_ops_whiteout (struct bkey_ops) { \
+#define bch2_bkey_ops_hash_whiteout (struct bkey_ops) { \
.key_invalid = empty_val_key_invalid, \
}
if (k.k->u64s < BKEY_U64s)
return "u64s too small";
- if (type == BKEY_TYPE_BTREE &&
+ if (type == BKEY_TYPE_btree &&
bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
return "value too big";
if (k.k->p.snapshot)
return "nonzero snapshot";
- if (type != BKEY_TYPE_BTREE &&
+ if (type != BKEY_TYPE_btree &&
!bkey_cmp(k.k->p, POS_MAX))
return "POS_MAX key";
u8 old;
u8 new;
} bkey_renumber_table[] = {
- {BKEY_TYPE_BTREE, 128, KEY_TYPE_btree_ptr },
- {BKEY_TYPE_EXTENTS, 128, KEY_TYPE_extent },
- {BKEY_TYPE_EXTENTS, 129, KEY_TYPE_extent },
- {BKEY_TYPE_EXTENTS, 130, KEY_TYPE_reservation },
- {BKEY_TYPE_INODES, 128, KEY_TYPE_inode },
- {BKEY_TYPE_INODES, 130, KEY_TYPE_inode_generation },
- {BKEY_TYPE_DIRENTS, 128, KEY_TYPE_dirent },
- {BKEY_TYPE_DIRENTS, 129, KEY_TYPE_whiteout },
- {BKEY_TYPE_XATTRS, 128, KEY_TYPE_xattr },
- {BKEY_TYPE_XATTRS, 129, KEY_TYPE_whiteout },
- {BKEY_TYPE_ALLOC, 128, KEY_TYPE_alloc },
- {BKEY_TYPE_QUOTAS, 128, KEY_TYPE_quota },
+ {BKEY_TYPE_btree, 128, KEY_TYPE_btree_ptr },
+ {BKEY_TYPE_extents, 128, KEY_TYPE_extent },
+ {BKEY_TYPE_extents, 129, KEY_TYPE_extent },
+ {BKEY_TYPE_extents, 130, KEY_TYPE_reservation },
+ {BKEY_TYPE_inodes, 128, KEY_TYPE_inode },
+ {BKEY_TYPE_inodes, 130, KEY_TYPE_inode_generation },
+ {BKEY_TYPE_dirents, 128, KEY_TYPE_dirent },
+ {BKEY_TYPE_dirents, 129, KEY_TYPE_hash_whiteout },
+ {BKEY_TYPE_xattrs, 128, KEY_TYPE_xattr },
+ {BKEY_TYPE_xattrs, 129, KEY_TYPE_hash_whiteout },
+ {BKEY_TYPE_alloc, 128, KEY_TYPE_alloc },
+ {BKEY_TYPE_quotas, 128, KEY_TYPE_quota },
};
void bch2_bkey_renumber(enum btree_node_type btree_node_type,
break;
case 2:
if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_id == BTREE_ID_INODES) {
+ btree_id == BTREE_ID_inodes) {
if (!bkey_packed(k)) {
struct bkey_i *u = packed_to_bkey(k);
swap(u->k.p.inode, u->k.p.offset);
#include <linux/sched/mm.h>
#include <trace/events/bcachefs.h>
-const char * const bch2_btree_ids[] = {
-#define x(kwd, val, name) name,
- BCH_BTREE_IDS()
-#undef x
- NULL
-};
-
void bch2_recalc_btree_reserve(struct bch_fs *c)
{
unsigned i, reserve = 16;
struct btree_iter;
-extern const char * const bch2_btree_ids[];
-
void bch2_recalc_btree_reserve(struct bch_fs *);
void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
{
switch (id) {
-#define x(n, v, s) case BTREE_ID_##n: return GC_PHASE_BTREE_##n;
+#define x(name, v) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
BCH_BTREE_IDS()
#undef x
default:
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- if (ca->mi.state != BCH_MEMBER_STATE_RW)
+ if (ca->mi.state != BCH_MEMBER_STATE_rw)
set_btree_node_need_rewrite(b);
}
out:
unsigned whiteout_u64s = 0;
int ret;
- if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
+ if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_btree))
return -1;
ret = validate_bset(c, NULL, b, i, sectors, WRITE, false) ?:
int write, struct bkey_format *f)
{
if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_id == BTREE_ID_INODES) {
+ btree_id == BTREE_ID_inodes) {
swap(f->bits_per_field[BKEY_FIELD_INODE],
f->bits_per_field[BKEY_FIELD_OFFSET]);
swap(f->field_offset[BKEY_FIELD_INODE],
bch2_bpos_swab(p);
if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_id == BTREE_ID_INODES)
+ btree_id == BTREE_ID_inodes)
swap(p->inode, p->offset);
}
}
enum btree_node_type {
-#define x(kwd, val, name) BKEY_TYPE_##kwd = val,
+#define x(kwd, val) BKEY_TYPE_##kwd = val,
BCH_BTREE_IDS()
#undef x
- BKEY_TYPE_BTREE,
+ BKEY_TYPE_btree,
};
/* Type of a key in btree @id at level @level: */
static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
{
- return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
+ return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
}
/* Type of keys @b contains: */
static inline bool btree_node_type_is_extents(enum btree_node_type type)
{
switch (type) {
- case BKEY_TYPE_EXTENTS:
- case BKEY_TYPE_REFLINK:
+ case BKEY_TYPE_extents:
+ case BKEY_TYPE_reflink:
return true;
default:
return false;
}
#define BTREE_NODE_TYPE_HAS_TRIGGERS \
- ((1U << BKEY_TYPE_EXTENTS)| \
- (1U << BKEY_TYPE_ALLOC)| \
- (1U << BKEY_TYPE_INODES)| \
- (1U << BKEY_TYPE_REFLINK)| \
- (1U << BKEY_TYPE_EC)| \
- (1U << BKEY_TYPE_BTREE))
+ ((1U << BKEY_TYPE_extents)| \
+ (1U << BKEY_TYPE_alloc)| \
+ (1U << BKEY_TYPE_inodes)| \
+ (1U << BKEY_TYPE_reflink)| \
+ (1U << BKEY_TYPE_stripes)| \
+ (1U << BKEY_TYPE_btree))
#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
- ((1U << BKEY_TYPE_EXTENTS)| \
- (1U << BKEY_TYPE_INODES)| \
- (1U << BKEY_TYPE_EC)| \
- (1U << BKEY_TYPE_REFLINK))
+ ((1U << BKEY_TYPE_extents)| \
+ (1U << BKEY_TYPE_inodes)| \
+ (1U << BKEY_TYPE_stripes)| \
+ (1U << BKEY_TYPE_reflink))
enum btree_trigger_flags {
__BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
struct bkey_packed *src, *dst, *n;
struct bset *i;
- BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
+ BUG_ON(btree_node_type(b) != BKEY_TYPE_btree);
bch2_btree_node_iter_init(&node_iter, b, &k->k.p);
{
return (((BTREE_NODE_TYPE_HAS_TRIGGERS &
~BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS)) |
- (1U << BTREE_ID_EC)) &
+ (1U << BTREE_ID_stripes)) &
(1U << iter->btree_id);
}
struct btree_iter **iter,
struct bkey_s_c *k)
{
- unsigned flags = btree_id != BTREE_ID_ALLOC
+ unsigned flags = btree_id != BTREE_ID_alloc
? BTREE_ITER_SLOTS
: BTREE_ITER_CACHED;
int ret;
if (IS_ERR(a))
return a;
- iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k);
+ iter = trans_get_update(trans, BTREE_ID_alloc, pos, &k);
if (iter) {
*u = bch2_alloc_unpack(k);
} else {
- iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
+ iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, pos,
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
struct bch_replicas_padded r;
int ret = 0;
- ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.ec.idx), &iter, &k);
+ ret = trans_get_key(trans, BTREE_ID_stripes, POS(0, p.ec.idx), &iter, &k);
if (ret < 0)
return ret;
__le64 *refcount;
s64 ret;
- ret = trans_get_key(trans, BTREE_ID_REFLINK,
+ ret = trans_get_key(trans, BTREE_ID_reflink,
POS(0, idx), &iter, &k);
if (ret < 0)
return ret;
bool data)
{
switch (type) {
- case BCH_CSUM_OPT_NONE:
+ case BCH_CSUM_OPT_none:
return BCH_CSUM_NONE;
- case BCH_CSUM_OPT_CRC32C:
+ case BCH_CSUM_OPT_crc32c:
return data ? BCH_CSUM_CRC32C : BCH_CSUM_CRC32C_NONZERO;
- case BCH_CSUM_OPT_CRC64:
+ case BCH_CSUM_OPT_crc64:
return data ? BCH_CSUM_CRC64 : BCH_CSUM_CRC64_NONZERO;
default:
BUG();
}
const struct bch_hash_desc bch2_dirent_hash_desc = {
- .btree_id = BTREE_ID_DIRENTS,
+ .btree_id = BTREE_ID_dirents,
.key_type = KEY_TYPE_dirent,
.hash_key = dirent_hash_key,
.hash_bkey = dirent_hash_bkey,
* overwrite old_dst - just make sure to use a
* whiteout when deleting src:
*/
- new_src->k.type = KEY_TYPE_whiteout;
+ new_src->k.type = KEY_TYPE_hash_whiteout;
}
} else {
/* Check if we need a whiteout to delete src: */
goto out;
if (ret)
- new_src->k.type = KEY_TYPE_whiteout;
+ new_src->k.type = KEY_TYPE_hash_whiteout;
}
}
struct bkey_s_c k;
int ret;
- for_each_btree_key(trans, iter, BTREE_ID_DIRENTS,
+ for_each_btree_key(trans, iter, BTREE_ID_dirents,
POS(dir_inum, 0), 0, k, ret) {
if (k.k->p.inode > dir_inum)
break;
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
+ for_each_btree_key(&trans, iter, BTREE_ID_dirents,
POS(inum, ctx->pos), 0, k, ret) {
if (k.k->p.inode > inum)
break;
int ret;
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, idx), BTREE_ITER_SLOTS);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS(0, idx), BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
static int ec_stripe_delete(struct bch_fs *c, size_t idx)
{
- return bch2_btree_delete_range(c, BTREE_ID_EC,
+ return bch2_btree_delete_range(c, BTREE_ID_stripes,
POS(0, idx),
POS(0, idx + 1),
NULL);
retry:
bch2_trans_begin(&trans);
- for_each_btree_key(&trans, iter, BTREE_ID_EC, start_pos,
+ for_each_btree_key(&trans, iter, BTREE_ID_stripes, start_pos,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
if (start_pos.offset) {
unsigned i;
int ret;
- iter = bch2_trans_get_iter(trans, BTREE_ID_EC,
+ iter = bch2_trans_get_iter(trans, BTREE_ID_stripes,
new->k.p, BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
/* XXX this doesn't support the reflink btree */
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
bkey_start_pos(pos),
BTREE_ITER_INTENT);
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
genradix_for_each(&c->stripes[0], giter, m) {
int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
{
- int ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_EC,
+ int ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_stripes,
NULL, bch2_stripes_read_fn);
if (ret)
bch_err(c, "error reading stripes: %i", ret);
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, U64_MAX), 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS(0, U64_MAX), 0);
k = bch2_btree_iter_prev(iter);
if (!IS_ERR_OR_NULL(k.k))
set_bit(BCH_FS_ERROR, &c->flags);
switch (c->opts.errors) {
- case BCH_ON_ERROR_CONTINUE:
+ case BCH_ON_ERROR_continue:
return false;
- case BCH_ON_ERROR_RO:
+ case BCH_ON_ERROR_ro:
if (bch2_fs_emergency_read_only(c))
bch_err(c, "emergency read only");
return true;
- case BCH_ON_ERROR_PANIC:
+ case BCH_ON_ERROR_panic:
panic(bch2_fmt(c, "panic after error"));
return true;
default:
bool dev;
down_write(&c->state_lock);
- dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_RO,
+ dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_ro,
BCH_FORCE_IF_DEGRADED);
if (dev
- ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_RO,
+ ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro,
BCH_FORCE_IF_DEGRADED)
: bch2_fs_emergency_read_only(c))
bch_err(ca,
struct bkey_s_c r_k;
for_each_btree_key(trans, iter,
- BTREE_ID_REFLINK, POS(0, idx + offset),
+ BTREE_ID_reflink, POS(0, idx + offset),
BTREE_ITER_SLOTS, r_k, ret2) {
if (bkey_cmp(bkey_start_pos(r_k.k),
POS(0, idx + sectors)) >= 0)
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
+ for_each_btree_key(&trans, iter, BTREE_ID_extents, pos,
BTREE_ITER_SLOTS, k, err) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
ca = bch_dev_bkey_exists(c, p.ptr.dev);
- if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
+ if (ca->mi.state != BCH_MEMBER_STATE_failed)
durability = max_t(unsigned, durability, ca->mi.durability);
if (p.has_ec)
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS);
bch2_pagecache_add_get(&inode->ei_pagecache_lock);
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS);
bchfs_read(&trans, iter, rbio, inum, NULL);
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
+ for_each_btree_key(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
goto err;
}
- src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ src = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(inode->v.i_ino, src_start >> 9),
BTREE_ITER_INTENT);
dst = bch2_trans_copy_iter(&trans, src);
truncate_pagecache_range(&inode->v, offset, end - 1);
}
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(inode->v.i_ino, block_start >> 9),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
end_pos = POS(inode->v.i_ino, block_end >> 9);
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
+ for_each_btree_key(&trans, iter, BTREE_ID_extents,
POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
break;
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
+ for_each_btree_key(&trans, iter, BTREE_ID_extents,
POS(inode->v.i_ino, offset >> 9),
BTREE_ITER_SLOTS, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
bch2_bkey_buf_init(&prev);
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(ei->v.i_ino, start >> 9), 0);
retry:
while ((k = bch2_btree_iter_peek(iter)).k &&
u64 sectors = 0;
int ret;
- for_each_btree_key(trans, iter, BTREE_ID_EXTENTS,
+ for_each_btree_key(trans, iter, BTREE_ID_extents,
POS(inum, 0), 0, k, ret) {
if (k.k->p.inode != inum)
break;
struct hash_check *h,
struct btree_iter *k_iter, struct bkey_s_c k)
{
- bool hole = (k.k->type != KEY_TYPE_whiteout &&
+ bool hole = (k.k->type != KEY_TYPE_hash_whiteout &&
k.k->type != desc.key_type);
if (hole || k.k->p.offset > h->chain_end + 1)
if (fsck_err(c, "cannot fix dirent by removing trailing garbage %s (%zu)\n"
"hash table key at wrong offset: btree %u, offset %llu, "
"hashed to %llu chain starts at %llu\n%s",
- buf, strlen(buf), BTREE_ID_DIRENTS,
+ buf, strlen(buf), BTREE_ID_dirents,
k->k->p.offset, hash, h->chain->pos.offset,
(bch2_bkey_val_to_text(&PBUF(buf), c,
*k), buf))) {
static int bch2_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size)
{
- return bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
+ return bch2_btree_delete_range(c, BTREE_ID_extents,
POS(inode_nr, round_up(new_size, block_bytes(c)) >> 9),
POS(inode_nr + 1, 0), NULL);
}
bch_verbose(c, "checking extents");
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_INTENT);
retry:
bch2_inode_pack(c, &p, &w.inode);
- ret = bch2_btree_insert(c, BTREE_ID_INODES,
+ ret = bch2_btree_insert(c, BTREE_ID_inodes,
&p.inode.k_i, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
hash_check_init(&h);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_dirents,
POS(BCACHEFS_ROOT_INO, 0), 0);
retry:
for_each_btree_key_continue(iter, 0, k, ret) {
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs,
POS(BCACHEFS_ROOT_INO, 0), 0);
retry:
for_each_btree_key_continue(iter, 0, k, ret) {
bch2_inode_pack(c, &packed, root_inode);
- return bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
+ return bch2_btree_insert(c, BTREE_ID_inodes, &packed.inode.k_i,
NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
if (e->offset == U64_MAX)
goto up;
- for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
+ for_each_btree_key(&trans, iter, BTREE_ID_dirents,
POS(e->inum, e->offset + 1), 0, k, ret) {
if (k.k->p.inode != e->inum)
break;
path.nr--;
}
- iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS_MIN, 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS_MIN, 0);
retry:
for_each_btree_key_continue(iter, 0, k, ret) {
if (k.k->type != KEY_TYPE_inode)
inc_link(c, links, range_start, range_end, BCACHEFS_ROOT_INO, false);
- for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret) {
+ for_each_btree_key(&trans, iter, BTREE_ID_dirents, POS_MIN, 0, k, ret) {
switch (k.k->type) {
case KEY_TYPE_dirent:
d = bkey_s_c_to_dirent(k);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes,
POS(0, range_start), 0);
nlinks_iter = genradix_iter_init(links, 0);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN, 0, k, ret) {
+ for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_inode)
continue;
struct bkey_s_c k;
int ret;
- iter = bch2_trans_get_iter(trans, BTREE_ID_INODES, POS(0, inum),
+ iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum),
BTREE_ITER_CACHED|flags);
k = bch2_btree_iter_peek_cached(iter);
ret = bkey_err(k);
if (IS_ERR(inode_p))
return PTR_ERR(inode_p);
again:
- for_each_btree_key(trans, iter, BTREE_ID_INODES, POS(0, start),
+ for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, start),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(iter->pos, POS(0, max)) > 0)
break;
* cache before using a slot:
*/
if (k.k->type != KEY_TYPE_inode &&
- !bch2_btree_key_cache_find(c, BTREE_ID_INODES, iter->pos))
+ !bch2_btree_key_cache_find(c, BTREE_ID_inodes, iter->pos))
goto found_slot;
}
* XXX: the dirent could ideally would delete whiteouts when they're no
* longer needed
*/
- ret = bch2_btree_delete_range_trans(&trans, BTREE_ID_EXTENTS,
+ ret = bch2_btree_delete_range_trans(&trans, BTREE_ID_extents,
start, end, NULL) ?:
- bch2_btree_delete_range_trans(&trans, BTREE_ID_XATTRS,
+ bch2_btree_delete_range_trans(&trans, BTREE_ID_xattrs,
start, end, NULL) ?:
- bch2_btree_delete_range_trans(&trans, BTREE_ID_DIRENTS,
+ bch2_btree_delete_range_trans(&trans, BTREE_ID_dirents,
start, end, NULL);
if (ret)
goto err;
bi_generation = 0;
if (cached) {
- iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(0, inode_nr),
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr),
BTREE_ITER_CACHED|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_cached(iter);
} else {
- iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(0, inode_nr),
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
}
struct bkey_s_c k;
int ret;
- iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
+ iter = bch2_trans_get_iter(trans, BTREE_ID_inodes,
POS(0, inode_nr), BTREE_ITER_CACHED);
k = bch2_btree_iter_peek_cached(iter);
ret = bkey_err(k);
int ret = 0;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(inum, start),
BTREE_ITER_INTENT);
bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
bkey_start_pos(&k->k),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
promote = __promote_alloc(c,
k.k->type == KEY_TYPE_reflink_v
- ? BTREE_ID_REFLINK
- : BTREE_ID_EXTENTS,
+ ? BTREE_ID_reflink
+ : BTREE_ID_extents,
k, pos, pick, opts, sectors, rbio);
if (!promote)
return NULL;
bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
rbio->pos, BTREE_ITER_SLOTS);
retry:
rbio->bio.bi_status = 0;
retry:
bch2_trans_begin(&trans);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
+ for_each_btree_key(&trans, iter, BTREE_ID_extents,
POS(inode, bvec_iter.bi_sector),
BTREE_ITER_SLOTS, k, ret) {
unsigned bytes, sectors, offset_into_extent;
if (crc_is_compressed(rbio->pick.crc))
return 0;
- iter = bch2_trans_get_iter(trans, BTREE_ID_EXTENTS, rbio->pos,
+ iter = bch2_trans_get_iter(trans, BTREE_ID_extents, rbio->pos,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
if ((ret = bkey_err(k)))
reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
*offset_into_extent;
- iter = bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
+ iter = bch2_trans_get_iter(trans, BTREE_ID_reflink,
POS(0, reflink_offset),
BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(iter);
retry:
bch2_trans_begin(&trans);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(inode, rbio->bio.bi_iter.bi_sector),
BTREE_ITER_SLOTS);
while (1) {
for (i = 0; i < j->nr_ptrs; i++) {
struct bch_dev *ca = c->devs[j->ptrs[i].dev];
+ u64 offset;
+
+ div64_u64_rem(j->ptrs[i].offset, ca->mi.bucket_size, &offset);
if (i)
pr_buf(out, " ");
pr_buf(out, "%u:%llu (offset %llu)",
j->ptrs[i].dev,
- (u64) j->ptrs[i].offset,
- (u64) j->ptrs[i].offset % ca->mi.bucket_size);
+ (u64) j->ptrs[i].offset, offset);
}
}
!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
continue;
- if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
- ca->mi.state == BCH_MEMBER_STATE_RO) &&
+ if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
+ ca->mi.state == BCH_MEMBER_STATE_ro) &&
percpu_ref_tryget(&ca->io_ref))
closure_call(&ca->journal.read,
bch2_journal_read_device,
* it:
*/
if (!ca->mi.durability ||
- ca->mi.state != BCH_MEMBER_STATE_RW ||
+ ca->mi.state != BCH_MEMBER_STATE_rw ||
!ja->nr ||
bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
ca->dev_idx) ||
static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
{
- return __bch2_dev_usrdata_drop(c, dev_idx, flags, BTREE_ID_EXTENTS) ?:
- __bch2_dev_usrdata_drop(c, dev_idx, flags, BTREE_ID_REFLINK);
+ return __bch2_dev_usrdata_drop(c, dev_idx, flags, BTREE_ID_extents) ?:
+ __bch2_dev_usrdata_drop(c, dev_idx, flags, BTREE_ID_reflink);
}
static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
if (!bkey_extent_is_direct_data(k.k))
goto next_nondata;
- if (btree_id == BTREE_ID_EXTENTS &&
+ if (btree_id == BTREE_ID_extents &&
cur_inum != k.k->p.inode) {
struct bch_inode_unpacked inode;
stats->data_type = BCH_DATA_user;
ret = __bch2_move_data(c, &ctxt, rate, wp, start, end,
- pred, arg, stats, BTREE_ID_EXTENTS) ?:
+ pred, arg, stats, BTREE_ID_extents) ?:
__bch2_move_data(c, &ctxt, rate, wp, start, end,
- pred, arg, stats, BTREE_ID_REFLINK);
+ pred, arg, stats, BTREE_ID_reflink);
move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads));
closure_sync(&ctxt.cl);
#include "super-io.h"
#include "util.h"
+#define x(t, n) #t,
+
const char * const bch2_error_actions[] = {
- "continue",
- "remount-ro",
- "panic",
+ BCH_ERROR_ACTIONS()
NULL
};
const char * const bch2_sb_features[] = {
-#define x(f, n) #f,
BCH_SB_FEATURES()
-#undef x
+ NULL
+};
+
+const char * const bch2_btree_ids[] = {
+ BCH_BTREE_IDS()
NULL
};
const char * const bch2_csum_opts[] = {
- "none",
- "crc32c",
- "crc64",
+ BCH_CSUM_OPTS()
NULL
};
const char * const bch2_compression_opts[] = {
-#define x(t, n) #t,
BCH_COMPRESSION_OPTS()
-#undef x
NULL
};
const char * const bch2_str_hash_types[] = {
- "crc32c",
- "crc64",
- "siphash",
+ BCH_STR_HASH_OPTS()
NULL
};
const char * const bch2_data_types[] = {
-#define x(t, n) #t,
BCH_DATA_TYPES()
-#undef x
NULL
};
const char * const bch2_cache_replacement_policies[] = {
- "lru",
- "fifo",
- "random",
+ BCH_CACHE_REPLACEMENT_POLICIES()
NULL
};
-/* Default is -1; we skip past it for struct cached_dev's cache mode */
-const char * const bch2_cache_modes[] = {
- "default",
- "writethrough",
- "writeback",
- "writearound",
- "none",
+const char * const bch2_member_states[] = {
+ BCH_MEMBER_STATES()
NULL
};
-const char * const bch2_dev_state[] = {
- "readwrite",
- "readonly",
- "failed",
- "spare",
- NULL
-};
+#undef x
void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src)
{
extern const char * const bch2_error_actions[];
extern const char * const bch2_sb_features[];
+extern const char * const bch2_btree_ids[];
extern const char * const bch2_csum_opts[];
extern const char * const bch2_compression_opts[];
extern const char * const bch2_str_hash_types[];
extern const char * const bch2_data_types[];
extern const char * const bch2_cache_replacement_policies[];
-extern const char * const bch2_cache_modes[];
-extern const char * const bch2_dev_state[];
+extern const char * const bch2_member_states[];
/*
* Mount options; we also store defaults in the superblock.
x(errors, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_error_actions), \
- BCH_SB_ERROR_ACTION, BCH_ON_ERROR_RO, \
+ BCH_SB_ERROR_ACTION, BCH_ON_ERROR_ro, \
NULL, "Action to take on filesystem error") \
x(metadata_replicas, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
x(metadata_checksum, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_csum_opts), \
- BCH_SB_META_CSUM_TYPE, BCH_CSUM_OPT_CRC32C, \
+ BCH_SB_META_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \
NULL, NULL) \
x(data_checksum, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
OPT_STR(bch2_csum_opts), \
- BCH_SB_DATA_CSUM_TYPE, BCH_CSUM_OPT_CRC32C, \
+ BCH_SB_DATA_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \
NULL, NULL) \
x(compression, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
x(str_hash, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_str_hash_types), \
- BCH_SB_STR_HASH_TYPE, BCH_STR_HASH_OPT_SIPHASH, \
+ BCH_SB_STR_HASH_TYPE, BCH_STR_HASH_OPT_siphash, \
NULL, "Hash function for directory entries and xattrs")\
x(metadata_target, u16, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_QUOTAS, POS(type, 0),
+ for_each_btree_key(&trans, iter, BTREE_ID_quotas, POS(type, 0),
BTREE_ITER_PREFETCH, k, ret) {
if (k.k->p.inode != type)
break;
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN,
+ for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
switch (k.k->type) {
case KEY_TYPE_inode:
if (c->opts.usrquota)
return -EINVAL;
- ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
+ ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_USR, 0),
POS(QTYP_USR + 1, 0),
NULL);
if (c->opts.grpquota)
return -EINVAL;
- ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
+ ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_GRP, 0),
POS(QTYP_GRP + 1, 0),
NULL);
if (c->opts.prjquota)
return -EINVAL;
- ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
+ ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_PRJ, 0),
POS(QTYP_PRJ + 1, 0),
NULL);
struct bkey_s_c k;
int ret;
- iter = bch2_trans_get_iter(trans, BTREE_ID_QUOTAS, new_quota->k.p,
+ iter = bch2_trans_get_iter(trans, BTREE_ID_quotas, new_quota->k.p,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
size_t src, dst;
for (src = 0, dst = 0; src < keys->nr; src++)
- if (keys->d[src].btree_id != BTREE_ID_ALLOC)
+ if (keys->d[src].btree_id != BTREE_ID_alloc)
keys->d[dst++] = keys->d[src];
keys->nr = dst;
struct btree_iter *iter;
int ret;
- iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, k->k.p,
+ iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, k->k.p,
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
for_each_journal_key(keys, i) {
cond_resched();
- if (!i->level && i->btree_id == BTREE_ID_ALLOC) {
+ if (!i->level && i->btree_id == BTREE_ID_alloc) {
j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
ret = bch2_alloc_replay_key(c, i->k);
if (ret)
for_each_journal_key(keys, i) {
cond_resched();
- if (i->level || i->btree_id == BTREE_ID_ALLOC)
+ if (i->level || i->btree_id == BTREE_ID_alloc)
continue;
replay_now_at(j, keys.journal_seq_base + i->journal_seq);
if (!r->alive)
continue;
- if (i == BTREE_ID_ALLOC &&
+ if (i == BTREE_ID_alloc &&
c->opts.reconstruct_alloc) {
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
continue;
}
if (r->error) {
- __fsck_err(c, i == BTREE_ID_ALLOC
+ __fsck_err(c, i == BTREE_ID_alloc
? FSCK_CAN_IGNORE : 0,
"invalid btree root %s",
bch2_btree_ids[i]);
- if (i == BTREE_ID_ALLOC)
+ if (i == BTREE_ID_alloc)
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
}
ret = bch2_btree_root_read(c, i, &r->key, r->level);
if (ret) {
- __fsck_err(c, i == BTREE_ID_ALLOC
+ __fsck_err(c, i == BTREE_ID_alloc
? FSCK_CAN_IGNORE : 0,
"error reading btree root %s",
bch2_btree_ids[i]);
- if (i == BTREE_ID_ALLOC)
+ if (i == BTREE_ID_alloc)
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
}
}
bch2_inode_pack(c, &packed_inode, &root_inode);
err = "error creating root directory";
- ret = bch2_btree_insert(c, BTREE_ID_INODES,
+ ret = bch2_btree_insert(c, BTREE_ID_inodes,
&packed_inode.inode.k_i,
NULL, NULL, 0);
if (ret)
if (orig->k.type == KEY_TYPE_inline_data)
bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
- for_each_btree_key(trans, reflink_iter, BTREE_ID_REFLINK,
+ for_each_btree_key(trans, reflink_iter, BTREE_ID_reflink,
POS(0, c->reflink_hint),
BTREE_ITER_INTENT|BTREE_ITER_SLOTS, k, ret) {
if (reflink_iter->pos.inode) {
bch2_bkey_buf_init(&new_src);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 4096);
- src_iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, src_start,
+ src_iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, src_start,
BTREE_ITER_INTENT);
- dst_iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, dst_start,
+ dst_iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, dst_start,
BTREE_ITER_INTENT);
while (1) {
bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
{
switch (opt) {
- case BCH_STR_HASH_OPT_CRC32C:
+ case BCH_STR_HASH_OPT_crc32c:
return BCH_STR_HASH_CRC32C;
- case BCH_STR_HASH_OPT_CRC64:
+ case BCH_STR_HASH_OPT_crc64:
return BCH_STR_HASH_CRC64;
- case BCH_STR_HASH_OPT_SIPHASH:
+ case BCH_STR_HASH_OPT_siphash:
return c->sb.features & (1ULL << BCH_FEATURE_new_siphash)
? BCH_STR_HASH_SIPHASH
: BCH_STR_HASH_SIPHASH_OLD;
if (k.k->type == desc.key_type) {
if (!desc.cmp_key(k, key))
return iter;
- } else if (k.k->type == KEY_TYPE_whiteout) {
+ } else if (k.k->type == KEY_TYPE_hash_whiteout) {
;
} else {
/* hole, not found */
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) {
if (k.k->type != desc.key_type &&
- k.k->type != KEY_TYPE_whiteout)
+ k.k->type != KEY_TYPE_hash_whiteout)
break;
if (k.k->type == desc.key_type &&
!(flags & BCH_HASH_SET_MUST_REPLACE))
slot = bch2_trans_copy_iter(trans, iter);
- if (k.k->type != KEY_TYPE_whiteout)
+ if (k.k->type != KEY_TYPE_hash_whiteout)
goto not_found;
}
bkey_init(&delete->k);
delete->k.p = iter->pos;
- delete->k.type = ret ? KEY_TYPE_whiteout : KEY_TYPE_deleted;
+ delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted;
bch2_trans_update(trans, iter, delete, 0);
return 0;
for (entry = clean->start;
entry < (struct jset_entry *) vstruct_end(&clean->field);
entry = vstruct_next(entry))
- bch2_bkey_renumber(BKEY_TYPE_BTREE, bkey_to_packed(entry->start), write);
+ bch2_bkey_renumber(BKEY_TYPE_btree, bkey_to_packed(entry->start), write);
}
int bch2_fs_mark_dirty(struct bch_fs *c)
lockdep_assert_held(&c->state_lock);
switch (new_state) {
- case BCH_MEMBER_STATE_RW:
+ case BCH_MEMBER_STATE_rw:
return true;
- case BCH_MEMBER_STATE_RO:
- if (ca->mi.state != BCH_MEMBER_STATE_RW)
+ case BCH_MEMBER_STATE_ro:
+ if (ca->mi.state != BCH_MEMBER_STATE_rw)
return true;
/* do we have enough devices to write to? */
for_each_member_device(ca2, c, i)
if (ca2 != ca)
- nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW;
+ nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
? c->opts.metadata_replicas
: c->opts.data_replicas_required);
return nr_rw >= required;
- case BCH_MEMBER_STATE_FAILED:
- case BCH_MEMBER_STATE_SPARE:
- if (ca->mi.state != BCH_MEMBER_STATE_RW &&
- ca->mi.state != BCH_MEMBER_STATE_RO)
+ case BCH_MEMBER_STATE_failed:
+ case BCH_MEMBER_STATE_spare:
+ if (ca->mi.state != BCH_MEMBER_STATE_rw &&
+ ca->mi.state != BCH_MEMBER_STATE_ro)
return true;
/* do we have enough devices to read from? */
ca = bch_dev_locked(c, i);
if (!bch2_dev_is_online(ca) &&
- (ca->mi.state == BCH_MEMBER_STATE_RW ||
- ca->mi.state == BCH_MEMBER_STATE_RO)) {
+ (ca->mi.state == BCH_MEMBER_STATE_rw ||
+ ca->mi.state == BCH_MEMBER_STATE_ro)) {
mutex_unlock(&c->sb_lock);
return false;
}
{
lockdep_assert_held(&c->state_lock);
- BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW);
+ BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
if (!bch2_dev_state_allowed(c, ca, new_state, flags))
return -EINVAL;
- if (new_state != BCH_MEMBER_STATE_RW)
+ if (new_state != BCH_MEMBER_STATE_rw)
__bch2_dev_read_only(c, ca);
- bch_notice(ca, "%s", bch2_dev_state[new_state]);
+ bch_notice(ca, "%s", bch2_member_states[new_state]);
mutex_lock(&c->sb_lock);
mi = bch2_sb_get_members(c->disk_sb.sb);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- if (new_state == BCH_MEMBER_STATE_RW &&
+ if (new_state == BCH_MEMBER_STATE_rw &&
__bch2_dev_read_write(c, ca))
ret = -ENOMEM;
for (i = 0; i < ca->mi.nbuckets; i++) {
ret = bch2_btree_key_cache_flush(&trans,
- BTREE_ID_ALLOC, POS(ca->dev_idx, i));
+ BTREE_ID_alloc, POS(ca->dev_idx, i));
if (ret)
break;
}
if (ret)
return ret;
- return bch2_btree_delete_range(c, BTREE_ID_ALLOC,
+ return bch2_btree_delete_range(c, BTREE_ID_alloc,
POS(ca->dev_idx, 0),
POS(ca->dev_idx + 1, 0),
NULL);
*/
percpu_ref_put(&ca->ref);
- if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot remove without losing data");
goto err;
}
bch2_dev_usage_journal_reserve(c);
return 0;
err:
- if (ca->mi.state == BCH_MEMBER_STATE_RW &&
+ if (ca->mi.state == BCH_MEMBER_STATE_rw &&
!percpu_ref_is_zero(&ca->io_ref))
__bch2_dev_read_write(c, ca);
up_write(&c->state_lock);
if (ret)
goto err_late;
- if (ca->mi.state == BCH_MEMBER_STATE_RW) {
+ if (ca->mi.state == BCH_MEMBER_STATE_rw) {
err = __bch2_dev_read_write(c, ca);
if (err)
goto err_late;
goto err;
}
- if (ca->mi.state == BCH_MEMBER_STATE_RW) {
+ if (ca->mi.state == BCH_MEMBER_STATE_rw) {
err = __bch2_dev_read_write(c, ca);
if (err)
goto err;
return 0;
}
- if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot offline required disk");
up_write(&c->state_lock);
return -EINVAL;
static inline bool bch2_dev_is_readable(struct bch_dev *ca)
{
return bch2_dev_is_online(ca) &&
- ca->mi.state != BCH_MEMBER_STATE_FAILED;
+ ca->mi.state != BCH_MEMBER_STATE_failed;
}
static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
if (!percpu_ref_tryget(&ca->io_ref))
return false;
- if (ca->mi.state == BCH_MEMBER_STATE_RW ||
- (ca->mi.state == BCH_MEMBER_STATE_RO && rw == READ))
+ if (ca->mi.state == BCH_MEMBER_STATE_rw ||
+ (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
return true;
percpu_ref_put(&ca->io_ref);
__for_each_online_member(ca, c, iter, ~0)
#define for_each_rw_member(ca, c, iter) \
- __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW)
+ __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw)
#define for_each_readable_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, \
- (1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
+ (1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
/*
* If a key exists that references a device, the device won't be going away and
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k, ret)
+ for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN, 0, k, ret)
if (k.k->type == KEY_TYPE_extent) {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry;
}
if (attr == &sysfs_state_rw) {
- bch2_string_opt_to_text(&out, bch2_dev_state,
+ bch2_string_opt_to_text(&out, bch2_member_states,
ca->mi.state);
pr_buf(&out, "\n");
return out.pos - buf;
{
int ret;
- ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
+ ret = bch2_btree_delete_range(c, BTREE_ID_extents,
POS(0, 0), POS(0, U64_MAX),
NULL);
BUG_ON(ret);
- ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
+ ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
POS(0, 0), POS(0, U64_MAX),
NULL);
BUG_ON(ret);
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, k.k.p,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, k.k.p,
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
bkey_cookie_init(&k.k_i);
k.k.p.offset = i;
- ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
+ ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "insert error in test_iterate: %i", ret);
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_XATTRS,
+ for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
POS_MIN, 0, k, ret) {
if (k.k->p.inode)
break;
k.k.p.offset = i + 8;
k.k.size = 8;
- ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
+ ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "insert error in test_iterate_extents: %i", ret);
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
+ for_each_btree_key(&trans, iter, BTREE_ID_extents,
POS_MIN, 0, k, ret) {
BUG_ON(bkey_start_offset(k.k) != i);
i = k.k->p.offset;
bkey_cookie_init(&k.k_i);
k.k.p.offset = i * 2;
- ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
+ ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "insert error in test_iterate_slots: %i", ret);
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
+ for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
0, k, ret) {
if (k.k->p.inode)
break;
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
+ for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
BTREE_ITER_SLOTS, k, ret) {
BUG_ON(k.k->p.offset != i);
BUG_ON(bkey_deleted(k.k) != (i & 1));
k.k.p.offset = i + 16;
k.k.size = 8;
- ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
+ ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
+ for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
0, k, ret) {
BUG_ON(bkey_start_offset(k.k) != i + 8);
BUG_ON(k.k->size != 8);
i = 0;
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
+ for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS, k, ret) {
BUG_ON(bkey_deleted(k.k) != !(i % 16));
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
k = bch2_btree_iter_peek(iter);
BUG_ON(k.k);
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN, 0);
k = bch2_btree_iter_peek(iter);
BUG_ON(k.k);
k.k_i.k.size = end - start;
k.k_i.k.version.lo = test_version++;
- ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
+ ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret)
bch_err(c, "insert error in insert_test_extent: %i", ret);
k.k.p.offset = test_rand();
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
- __bch2_btree_insert(&trans, BTREE_ID_XATTRS, &k.k_i));
+ __bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
if (ret) {
bch_err(c, "error in rand_insert: %i", ret);
break;
u64 i;
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
for (i = 0; i < nr; i++) {
bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
u64 i;
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
for (i = 0; i < nr; i++) {
bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
struct bkey_s_c k;
int ret = 0;
- iter = bch2_trans_get_iter(trans, BTREE_ID_XATTRS, pos,
+ iter = bch2_trans_get_iter(trans, BTREE_ID_xattrs, pos,
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek(iter);
ret = bkey_err(k);
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
+ for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
insert.k.p = iter->pos;
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, 0, k, ret)
+ for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN, 0, k, ret)
;
bch2_trans_exit(&trans);
return ret;
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
+ for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
BTREE_ITER_INTENT, k, ret) {
struct bkey_i_cookie u;
{
int ret;
- ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
+ ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
POS(0, 0), POS(0, U64_MAX),
NULL);
if (ret)
}
const struct bch_hash_desc bch2_xattr_hash_desc = {
- .btree_id = BTREE_ID_XATTRS,
+ .btree_id = BTREE_ID_xattrs,
.key_type = KEY_TYPE_xattr,
.hash_key = xattr_hash_key,
.hash_bkey = xattr_hash_bkey,
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_XATTRS,
+ for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
POS(inum, 0), 0, k, ret) {
BUG_ON(k.k->p.inode < inum);