-01d7ad6d95c85cf5434a891e6dd7971797e0e1fa
+99175e5712ecc323930db29565b43d5d0e55a276
if (ca->mi.freespace_initialized &&
test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags) &&
bch2_trans_inconsistent_on(old.k->type != old_type, trans,
- "incorrect key when %s %s btree (got %s should be %s)\n"
+ "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
" for %s",
set ? "setting" : "clearing",
bch2_btree_ids[btree],
+ iter.pos.inode,
+ iter.pos.offset,
bch2_bkey_types[old.k->type],
bch2_bkey_types[old_type],
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
return ret;
}
-static int bch2_check_discard_freespace_key(struct btree_trans *trans,
- struct btree_iter *iter)
+static int __bch2_check_discard_freespace_key(struct btree_trans *trans,
+ struct btree_iter *iter)
{
struct bch_fs *c = trans->c;
struct btree_iter alloc_iter;
goto delete;
out:
fsck_err:
+ set_btree_iter_dontneed(&alloc_iter);
bch2_trans_iter_exit(trans, &alloc_iter);
printbuf_exit(&buf);
return ret;
goto out;
}
+static int bch2_check_discard_freespace_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end)
+{
+ if (!btree_node_type_is_extents(iter->btree_id)) {
+ return __bch2_check_discard_freespace_key(trans, iter);
+ } else {
+ int ret;
+
+ while (!bkey_eq(iter->pos, end) &&
+ !(ret = btree_trans_too_many_iters(trans) ?:
+ __bch2_check_discard_freespace_key(trans, iter)))
+ bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+
+ return ret;
+ }
+}
+
/*
* We've already checked that generation numbers in the bucket_gens btree are
* valid for buckets that exist; this just checks for keys for nonexistent
BTREE_ID_need_discard, POS_MIN,
BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
- bch2_check_discard_freespace_key(&trans, &iter)) ?:
+ bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?:
for_each_btree_key_commit(&trans, iter,
BTREE_ID_freespace, POS_MIN,
BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
- bch2_check_discard_freespace_key(&trans, &iter)) ?:
+ bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?:
for_each_btree_key_commit(&trans, iter,
BTREE_ID_bucket_gens, POS_MIN,
BTREE_ITER_PREFETCH, k,
/* Unlock before doing IO: */
if (trans && sync)
- bch2_trans_unlock(trans);
+ bch2_trans_unlock_noassert(trans);
bch2_btree_node_read(c, b, sync);
u32 bch2_trans_begin(struct btree_trans *trans)
{
struct btree_path *path;
+ u64 now;
bch2_trans_reset_updates(trans);
path->preserve = false;
}
+ now = local_clock();
if (!trans->restarted &&
(need_resched() ||
- local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))
+ now - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
drop_locks_do(trans, (cond_resched(), 0));
+ now = local_clock();
+ }
+ trans->last_begin_time = now;
if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
bch2_trans_reset_srcu_lock(trans);
trans->notrace_relock_fail = false;
}
- trans->last_begin_time = local_clock();
return trans->restart_count;
}
return 0;
}
+void bch2_trans_unlock_noassert(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ __bch2_btree_path_unlock(trans, path);
+}
+
void bch2_trans_unlock(struct btree_trans *trans)
{
struct btree_path *path;
static inline void bch2_assert_btree_nodes_not_locked(void) {}
#endif
+void bch2_trans_unlock_noassert(struct btree_trans *);
+
static inline bool is_btree_node(struct btree_path *path, unsigned l)
{
return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
BKEY_BTREE_PTR_U64s_MAX * (1 + split)))
break;
- split = true;
+ split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
}
if (flags & BTREE_INSERT_GC_LOCK_HELD)
if (crc_is_compressed(p.crc))
reserve_sectors += k.k->size;
- m->op.nr_replicas += bch2_extent_ptr_durability(c, &p);
+ m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
} else if (!p.ptr.cached) {
bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
}
return replicas;
}
-unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
+unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
- unsigned durability = 0;
struct bch_dev *ca;
if (p->ptr.cached)
ca = bch_dev_bkey_exists(c, p->ptr.dev);
- if (ca->mi.state != BCH_MEMBER_STATE_failed)
- durability = max_t(unsigned, durability, ca->mi.durability);
+ return ca->mi.durability +
+ (p->has_ec
+ ? p->ec.redundancy
+ : 0);
+}
- if (p->has_ec)
- durability += p->ec.redundancy;
+unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
+{
+ struct bch_dev *ca;
- return durability;
+ if (p->ptr.cached)
+ return 0;
+
+ ca = bch_dev_bkey_exists(c, p->ptr.dev);
+
+ if (ca->mi.state == BCH_MEMBER_STATE_failed)
+ return 0;
+
+ return ca->mi.durability +
+ (p->has_ec
+ ? p->ec.redundancy
+ : 0);
}
unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
+unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
enum six_lock_type unlock_wakeup;
};
-#define LOCK_VALS { \
- [SIX_LOCK_read] = { \
- .lock_val = 1U << SIX_LOCK_HELD_read_OFFSET, \
- .lock_fail = SIX_LOCK_HELD_write, \
- .held_mask = SIX_LOCK_HELD_read, \
- .unlock_wakeup = SIX_LOCK_write, \
- }, \
- [SIX_LOCK_intent] = { \
- .lock_val = SIX_LOCK_HELD_intent, \
- .lock_fail = SIX_LOCK_HELD_intent, \
- .held_mask = SIX_LOCK_HELD_intent, \
- .unlock_wakeup = SIX_LOCK_intent, \
- }, \
- [SIX_LOCK_write] = { \
- .lock_val = SIX_LOCK_HELD_write, \
- .lock_fail = SIX_LOCK_HELD_read, \
- .held_mask = SIX_LOCK_HELD_write, \
- .unlock_wakeup = SIX_LOCK_read, \
- }, \
-}
+static const struct six_lock_vals l[] = {
+ [SIX_LOCK_read] = {
+ .lock_val = 1U << SIX_LOCK_HELD_read_OFFSET,
+ .lock_fail = SIX_LOCK_HELD_write,
+ .held_mask = SIX_LOCK_HELD_read,
+ .unlock_wakeup = SIX_LOCK_write,
+ },
+ [SIX_LOCK_intent] = {
+ .lock_val = SIX_LOCK_HELD_intent,
+ .lock_fail = SIX_LOCK_HELD_intent,
+ .held_mask = SIX_LOCK_HELD_intent,
+ .unlock_wakeup = SIX_LOCK_intent,
+ },
+ [SIX_LOCK_write] = {
+ .lock_val = SIX_LOCK_HELD_write,
+ .lock_fail = SIX_LOCK_HELD_read,
+ .held_mask = SIX_LOCK_HELD_write,
+ .unlock_wakeup = SIX_LOCK_read,
+ },
+};
static inline void six_set_bitmask(struct six_lock *lock, u32 mask)
{
static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
struct task_struct *task, bool try)
{
- const struct six_lock_vals l[] = LOCK_VALS;
int ret;
u32 old;
bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
unsigned seq, unsigned long ip)
{
- if (lock->seq != seq || !six_trylock_ip(lock, type, ip))
+ if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip))
return false;
- if (lock->seq != seq) {
+ if (six_lock_seq(lock) != seq) {
six_unlock_ip(lock, type, ip);
return false;
}
__always_inline
static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
{
- const struct six_lock_vals l[] = LOCK_VALS;
u32 state;
if (type == SIX_LOCK_intent)
if (type != SIX_LOCK_write)
six_release(&lock->dep_map, ip);
+ else
+ lock->seq++;
if (type == SIX_LOCK_intent &&
lock->intent_lock_recurse) {
return;
}
- lock->seq += type == SIX_LOCK_write;
-
do_six_unlock_type(lock, type);
}
EXPORT_SYMBOL_GPL(six_unlock_ip);
*/
bool six_lock_tryupgrade(struct six_lock *lock)
{
- const struct six_lock_vals l[] = LOCK_VALS;
u32 old = atomic_read(&lock->state), new;
do {
*/
void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
{
- const struct six_lock_vals l[] = LOCK_VALS;
-
six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_);
/* XXX: assert already locked, and that we don't overflow: */